From dfd402a4c4baae42398ce9180ff424d589b8bffc Mon Sep 17 00:00:00 2001 From: Marco Elver Date: Thu, 14 Nov 2019 19:02:54 +0100 Subject: [PATCH 0001/1971] kcsan: Add Kernel Concurrency Sanitizer infrastructure Kernel Concurrency Sanitizer (KCSAN) is a dynamic data-race detector for kernel space. KCSAN is a sampling watchpoint-based data-race detector. See the included Documentation/dev-tools/kcsan.rst for more details. This patch adds basic infrastructure, but does not yet enable KCSAN for any architecture. Signed-off-by: Marco Elver Acked-by: Paul E. McKenney Signed-off-by: Paul E. McKenney --- MAINTAINERS | 11 + Makefile | 3 +- include/linux/compiler-clang.h | 9 + include/linux/compiler-gcc.h | 7 + include/linux/compiler.h | 37 +- include/linux/kcsan-checks.h | 97 +++++ include/linux/kcsan.h | 115 ++++++ include/linux/sched.h | 4 + init/init_task.c | 8 + init/main.c | 2 + kernel/Makefile | 1 + kernel/kcsan/Makefile | 11 + kernel/kcsan/atomic.h | 27 ++ kernel/kcsan/core.c | 626 +++++++++++++++++++++++++++++++++ kernel/kcsan/debugfs.c | 275 +++++++++++++++ kernel/kcsan/encoding.h | 94 +++++ kernel/kcsan/kcsan.h | 108 ++++++ kernel/kcsan/report.c | 320 +++++++++++++++++ kernel/kcsan/test.c | 121 +++++++ lib/Kconfig.debug | 2 + lib/Kconfig.kcsan | 118 +++++++ lib/Makefile | 3 + scripts/Makefile.kcsan | 6 + scripts/Makefile.lib | 10 + 24 files changed, 2006 insertions(+), 9 deletions(-) create mode 100644 include/linux/kcsan-checks.h create mode 100644 include/linux/kcsan.h create mode 100644 kernel/kcsan/Makefile create mode 100644 kernel/kcsan/atomic.h create mode 100644 kernel/kcsan/core.c create mode 100644 kernel/kcsan/debugfs.c create mode 100644 kernel/kcsan/encoding.h create mode 100644 kernel/kcsan/kcsan.h create mode 100644 kernel/kcsan/report.c create mode 100644 kernel/kcsan/test.c create mode 100644 lib/Kconfig.kcsan create mode 100644 scripts/Makefile.kcsan diff --git a/MAINTAINERS b/MAINTAINERS index eb19fad370d79..1879aace26575 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -8848,6 +8848,17 @@ F: Documentation/kbuild/kconfig* F: scripts/kconfig/ F: scripts/Kconfig.include +KCSAN +M: Marco Elver +R: Dmitry Vyukov +L: kasan-dev@googlegroups.com +S: Maintained +F: Documentation/dev-tools/kcsan.rst +F: include/linux/kcsan*.h +F: kernel/kcsan/ +F: lib/Kconfig.kcsan +F: scripts/Makefile.kcsan + KDUMP M: Dave Young M: Baoquan He diff --git a/Makefile b/Makefile index 1d5298356ea8c..6fe2889d23092 100644 --- a/Makefile +++ b/Makefile @@ -478,7 +478,7 @@ export KBUILD_HOSTCXXFLAGS KBUILD_HOSTLDFLAGS KBUILD_HOSTLDLIBS LDFLAGS_MODULE export KBUILD_CPPFLAGS NOSTDINC_FLAGS LINUXINCLUDE OBJCOPYFLAGS KBUILD_LDFLAGS export KBUILD_CFLAGS CFLAGS_KERNEL CFLAGS_MODULE -export CFLAGS_KASAN CFLAGS_KASAN_NOSANITIZE CFLAGS_UBSAN +export CFLAGS_KASAN CFLAGS_KASAN_NOSANITIZE CFLAGS_UBSAN CFLAGS_KCSAN export KBUILD_AFLAGS AFLAGS_KERNEL AFLAGS_MODULE export KBUILD_AFLAGS_MODULE KBUILD_CFLAGS_MODULE KBUILD_LDFLAGS_MODULE export KBUILD_AFLAGS_KERNEL KBUILD_CFLAGS_KERNEL @@ -900,6 +900,7 @@ endif include scripts/Makefile.kasan include scripts/Makefile.extrawarn include scripts/Makefile.ubsan +include scripts/Makefile.kcsan # Add user supplied CPPFLAGS, AFLAGS and CFLAGS as the last assignments KBUILD_CPPFLAGS += $(KCPPFLAGS) diff --git a/include/linux/compiler-clang.h b/include/linux/compiler-clang.h index 333a6695a918c..a213eb55e725f 100644 --- a/include/linux/compiler-clang.h +++ b/include/linux/compiler-clang.h @@ -24,6 +24,15 @@ #define __no_sanitize_address #endif +#if __has_feature(thread_sanitizer) +/* emulate gcc's __SANITIZE_THREAD__ flag */ +#define __SANITIZE_THREAD__ +#define __no_sanitize_thread \ + __attribute__((no_sanitize("thread"))) +#else +#define __no_sanitize_thread +#endif + /* * Not all versions of clang implement the the type-generic versions * of the builtin overflow checkers. Fortunately, clang implements diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h index d7ee4c6bad482..0eb2a1cc411dc 100644 --- a/include/linux/compiler-gcc.h +++ b/include/linux/compiler-gcc.h @@ -145,6 +145,13 @@ #define __no_sanitize_address #endif +#if defined(__SANITIZE_THREAD__) && __has_attribute(__no_sanitize_thread__) +#define __no_sanitize_thread \ + __attribute__((__noinline__)) __attribute__((no_sanitize_thread)) +#else +#define __no_sanitize_thread +#endif + #if GCC_VERSION >= 50100 #define COMPILER_HAS_GENERIC_BUILTIN_OVERFLOW 1 #endif diff --git a/include/linux/compiler.h b/include/linux/compiler.h index 5e88e7e33abec..c42fa83f23fb8 100644 --- a/include/linux/compiler.h +++ b/include/linux/compiler.h @@ -178,6 +178,7 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val, #endif #include +#include #define __READ_ONCE_SIZE \ ({ \ @@ -193,12 +194,6 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val, } \ }) -static __always_inline -void __read_once_size(const volatile void *p, void *res, int size) -{ - __READ_ONCE_SIZE; -} - #ifdef CONFIG_KASAN /* * We can't declare function 'inline' because __no_sanitize_address confilcts @@ -207,18 +202,44 @@ void __read_once_size(const volatile void *p, void *res, int size) * '__maybe_unused' allows us to avoid defined-but-not-used warnings. */ # define __no_kasan_or_inline __no_sanitize_address notrace __maybe_unused +# define __no_sanitize_or_inline __no_kasan_or_inline #else # define __no_kasan_or_inline __always_inline #endif -static __no_kasan_or_inline +#ifdef __SANITIZE_THREAD__ +/* + * Rely on __SANITIZE_THREAD__ instead of CONFIG_KCSAN, to avoid not inlining in + * compilation units where instrumentation is disabled. + */ +# define __no_kcsan_or_inline __no_sanitize_thread notrace __maybe_unused +# define __no_sanitize_or_inline __no_kcsan_or_inline +#else +# define __no_kcsan_or_inline __always_inline +#endif + +#ifndef __no_sanitize_or_inline +#define __no_sanitize_or_inline __always_inline +#endif + +static __no_kcsan_or_inline +void __read_once_size(const volatile void *p, void *res, int size) +{ + kcsan_check_atomic_read(p, size); + __READ_ONCE_SIZE; +} + +static __no_sanitize_or_inline void __read_once_size_nocheck(const volatile void *p, void *res, int size) { __READ_ONCE_SIZE; } -static __always_inline void __write_once_size(volatile void *p, void *res, int size) +static __no_kcsan_or_inline +void __write_once_size(volatile void *p, void *res, int size) { + kcsan_check_atomic_write(p, size); + switch (size) { case 1: *(volatile __u8 *)p = *(__u8 *)res; break; case 2: *(volatile __u16 *)p = *(__u16 *)res; break; diff --git a/include/linux/kcsan-checks.h b/include/linux/kcsan-checks.h new file mode 100644 index 0000000000000..e78220661086b --- /dev/null +++ b/include/linux/kcsan-checks.h @@ -0,0 +1,97 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef _LINUX_KCSAN_CHECKS_H +#define _LINUX_KCSAN_CHECKS_H + +#include + +/* + * Access type modifiers. + */ +#define KCSAN_ACCESS_WRITE 0x1 +#define KCSAN_ACCESS_ATOMIC 0x2 + +/* + * __kcsan_*: Always calls into runtime when KCSAN is enabled. This may be used + * even in compilation units that selectively disable KCSAN, but must use KCSAN + * to validate access to an address. Never use these in header files! + */ +#ifdef CONFIG_KCSAN +/** + * __kcsan_check_access - check generic access for data race + * + * @ptr address of access + * @size size of access + * @type access type modifier + */ +void __kcsan_check_access(const volatile void *ptr, size_t size, int type); + +#else +static inline void __kcsan_check_access(const volatile void *ptr, size_t size, + int type) { } +#endif + +/* + * kcsan_*: Only calls into runtime when the particular compilation unit has + * KCSAN instrumentation enabled. May be used in header files. + */ +#ifdef __SANITIZE_THREAD__ +#define kcsan_check_access __kcsan_check_access +#else +static inline void kcsan_check_access(const volatile void *ptr, size_t size, + int type) { } +#endif + +/** + * __kcsan_check_read - check regular read access for data races + * + * @ptr address of access + * @size size of access + */ +#define __kcsan_check_read(ptr, size) __kcsan_check_access(ptr, size, 0) + +/** + * __kcsan_check_write - check regular write access for data races + * + * @ptr address of access + * @size size of access + */ +#define __kcsan_check_write(ptr, size) \ + __kcsan_check_access(ptr, size, KCSAN_ACCESS_WRITE) + +/** + * kcsan_check_read - check regular read access for data races + * + * @ptr address of access + * @size size of access + */ +#define kcsan_check_read(ptr, size) kcsan_check_access(ptr, size, 0) + +/** + * kcsan_check_write - check regular write access for data races + * + * @ptr address of access + * @size size of access + */ +#define kcsan_check_write(ptr, size) \ + kcsan_check_access(ptr, size, KCSAN_ACCESS_WRITE) + +/* + * Check for atomic accesses: if atomic access are not ignored, this simply + * aliases to kcsan_check_access, otherwise becomes a no-op. + */ +#ifdef CONFIG_KCSAN_IGNORE_ATOMICS +#define kcsan_check_atomic_read(...) \ + do { \ + } while (0) +#define kcsan_check_atomic_write(...) \ + do { \ + } while (0) +#else +#define kcsan_check_atomic_read(ptr, size) \ + kcsan_check_access(ptr, size, KCSAN_ACCESS_ATOMIC) +#define kcsan_check_atomic_write(ptr, size) \ + kcsan_check_access(ptr, size, KCSAN_ACCESS_ATOMIC | KCSAN_ACCESS_WRITE) +#endif + +#endif /* _LINUX_KCSAN_CHECKS_H */ diff --git a/include/linux/kcsan.h b/include/linux/kcsan.h new file mode 100644 index 0000000000000..9047048fee842 --- /dev/null +++ b/include/linux/kcsan.h @@ -0,0 +1,115 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef _LINUX_KCSAN_H +#define _LINUX_KCSAN_H + +#include +#include + +#ifdef CONFIG_KCSAN + +/* + * Context for each thread of execution: for tasks, this is stored in + * task_struct, and interrupts access internal per-CPU storage. + */ +struct kcsan_ctx { + int disable_count; /* disable counter */ + int atomic_next; /* number of following atomic ops */ + + /* + * We distinguish between: (a) nestable atomic regions that may contain + * other nestable regions; and (b) flat atomic regions that do not keep + * track of nesting. Both (a) and (b) are entirely independent of each + * other, and a flat region may be started in a nestable region or + * vice-versa. + * + * This is required because, for example, in the annotations for + * seqlocks, we declare seqlock writer critical sections as (a) nestable + * atomic regions, but reader critical sections as (b) flat atomic + * regions, but have encountered cases where seqlock reader critical + * sections are contained within writer critical sections (the opposite + * may be possible, too). + * + * To support these cases, we independently track the depth of nesting + * for (a), and whether the leaf level is flat for (b). + */ + int atomic_nest_count; + bool in_flat_atomic; +}; + +/** + * kcsan_init - initialize KCSAN runtime + */ +void kcsan_init(void); + +/** + * kcsan_disable_current - disable KCSAN for the current context + * + * Supports nesting. + */ +void kcsan_disable_current(void); + +/** + * kcsan_enable_current - re-enable KCSAN for the current context + * + * Supports nesting. + */ +void kcsan_enable_current(void); + +/** + * kcsan_nestable_atomic_begin - begin nestable atomic region + * + * Accesses within the atomic region may appear to race with other accesses but + * should be considered atomic. + */ +void kcsan_nestable_atomic_begin(void); + +/** + * kcsan_nestable_atomic_end - end nestable atomic region + */ +void kcsan_nestable_atomic_end(void); + +/** + * kcsan_flat_atomic_begin - begin flat atomic region + * + * Accesses within the atomic region may appear to race with other accesses but + * should be considered atomic. + */ +void kcsan_flat_atomic_begin(void); + +/** + * kcsan_flat_atomic_end - end flat atomic region + */ +void kcsan_flat_atomic_end(void); + +/** + * kcsan_atomic_next - consider following accesses as atomic + * + * Force treating the next n memory accesses for the current context as atomic + * operations. + * + * @n number of following memory accesses to treat as atomic. + */ +void kcsan_atomic_next(int n); + +#else /* CONFIG_KCSAN */ + +static inline void kcsan_init(void) { } + +static inline void kcsan_disable_current(void) { } + +static inline void kcsan_enable_current(void) { } + +static inline void kcsan_nestable_atomic_begin(void) { } + +static inline void kcsan_nestable_atomic_end(void) { } + +static inline void kcsan_flat_atomic_begin(void) { } + +static inline void kcsan_flat_atomic_end(void) { } + +static inline void kcsan_atomic_next(int n) { } + +#endif /* CONFIG_KCSAN */ + +#endif /* _LINUX_KCSAN_H */ diff --git a/include/linux/sched.h b/include/linux/sched.h index 67a1d86981a9d..ae4f341c1db43 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -31,6 +31,7 @@ #include #include #include +#include /* task_struct member predeclarations (sorted alphabetically): */ struct audit_context; @@ -1172,6 +1173,9 @@ struct task_struct { #ifdef CONFIG_KASAN unsigned int kasan_depth; #endif +#ifdef CONFIG_KCSAN + struct kcsan_ctx kcsan_ctx; +#endif #ifdef CONFIG_FUNCTION_GRAPH_TRACER /* Index of current stored address in ret_stack: */ diff --git a/init/init_task.c b/init/init_task.c index 9e5cbe5eab7b1..2b4fe98b0f095 100644 --- a/init/init_task.c +++ b/init/init_task.c @@ -161,6 +161,14 @@ struct task_struct init_task #ifdef CONFIG_KASAN .kasan_depth = 1, #endif +#ifdef CONFIG_KCSAN + .kcsan_ctx = { + .disable_count = 0, + .atomic_next = 0, + .atomic_nest_count = 0, + .in_flat_atomic = false, + }, +#endif #ifdef CONFIG_TRACE_IRQFLAGS .softirqs_enabled = 1, #endif diff --git a/init/main.c b/init/main.c index 91f6ebb30ef04..4d814de017ee0 100644 --- a/init/main.c +++ b/init/main.c @@ -93,6 +93,7 @@ #include #include #include +#include #include #include @@ -779,6 +780,7 @@ asmlinkage __visible void __init start_kernel(void) acpi_subsystem_init(); arch_post_acpi_subsys_init(); sfi_init_late(); + kcsan_init(); /* Do the rest non-__init'ed, we're now alive */ arch_call_rest_init(); diff --git a/kernel/Makefile b/kernel/Makefile index daad787fb795d..74ab46e2ebd14 100644 --- a/kernel/Makefile +++ b/kernel/Makefile @@ -102,6 +102,7 @@ obj-$(CONFIG_TRACEPOINTS) += trace/ obj-$(CONFIG_IRQ_WORK) += irq_work.o obj-$(CONFIG_CPU_PM) += cpu_pm.o obj-$(CONFIG_BPF) += bpf/ +obj-$(CONFIG_KCSAN) += kcsan/ obj-$(CONFIG_PERF_EVENTS) += events/ diff --git a/kernel/kcsan/Makefile b/kernel/kcsan/Makefile new file mode 100644 index 0000000000000..dd15b62ec0b58 --- /dev/null +++ b/kernel/kcsan/Makefile @@ -0,0 +1,11 @@ +# SPDX-License-Identifier: GPL-2.0 +KCSAN_SANITIZE := n +KCOV_INSTRUMENT := n + +CFLAGS_REMOVE_core.o = $(CC_FLAGS_FTRACE) + +CFLAGS_core.o := $(call cc-option,-fno-conserve-stack,) \ + $(call cc-option,-fno-stack-protector,) + +obj-y := core.o debugfs.o report.o +obj-$(CONFIG_KCSAN_SELFTEST) += test.o diff --git a/kernel/kcsan/atomic.h b/kernel/kcsan/atomic.h new file mode 100644 index 0000000000000..c9c3fe6280114 --- /dev/null +++ b/kernel/kcsan/atomic.h @@ -0,0 +1,27 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef _KERNEL_KCSAN_ATOMIC_H +#define _KERNEL_KCSAN_ATOMIC_H + +#include + +/* + * Helper that returns true if access to ptr should be considered as an atomic + * access, even though it is not explicitly atomic. + * + * List all volatile globals that have been observed in races, to suppress + * data race reports between accesses to these variables. + * + * For now, we assume that volatile accesses of globals are as strong as atomic + * accesses (READ_ONCE, WRITE_ONCE cast to volatile). The situation is still not + * entirely clear, as on some architectures (Alpha) READ_ONCE/WRITE_ONCE do more + * than cast to volatile. Eventually, we hope to be able to remove this + * function. + */ +static inline bool kcsan_is_atomic(const volatile void *ptr) +{ + /* only jiffies for now */ + return ptr == &jiffies; +} + +#endif /* _KERNEL_KCSAN_ATOMIC_H */ diff --git a/kernel/kcsan/core.c b/kernel/kcsan/core.c new file mode 100644 index 0000000000000..d9410d58c93ec --- /dev/null +++ b/kernel/kcsan/core.c @@ -0,0 +1,626 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "atomic.h" +#include "encoding.h" +#include "kcsan.h" + +bool kcsan_enabled; + +/* Per-CPU kcsan_ctx for interrupts */ +static DEFINE_PER_CPU(struct kcsan_ctx, kcsan_cpu_ctx) = { + .disable_count = 0, + .atomic_next = 0, + .atomic_nest_count = 0, + .in_flat_atomic = false, +}; + +/* + * Helper macros to index into adjacent slots slots, starting from address slot + * itself, followed by the right and left slots. + * + * The purpose is 2-fold: + * + * 1. if during insertion the address slot is already occupied, check if + * any adjacent slots are free; + * 2. accesses that straddle a slot boundary due to size that exceeds a + * slot's range may check adjacent slots if any watchpoint matches. + * + * Note that accesses with very large size may still miss a watchpoint; however, + * given this should be rare, this is a reasonable trade-off to make, since this + * will avoid: + * + * 1. excessive contention between watchpoint checks and setup; + * 2. larger number of simultaneous watchpoints without sacrificing + * performance. + * + * Example: SLOT_IDX values for KCSAN_CHECK_ADJACENT=1, where i is [0, 1, 2]: + * + * slot=0: [ 1, 2, 0] + * slot=9: [10, 11, 9] + * slot=63: [64, 65, 63] + */ +#define NUM_SLOTS (1 + 2 * KCSAN_CHECK_ADJACENT) +#define SLOT_IDX(slot, i) (slot + ((i + KCSAN_CHECK_ADJACENT) % NUM_SLOTS)) + +/* + * SLOT_IDX_FAST is used in fast-path. Not first checking the address's primary + * slot (middle) is fine if we assume that data races occur rarely. The set of + * indices {SLOT_IDX(slot, i) | i in [0, NUM_SLOTS)} is equivalent to + * {SLOT_IDX_FAST(slot, i) | i in [0, NUM_SLOTS)}. + */ +#define SLOT_IDX_FAST(slot, i) (slot + i) + +/* + * Watchpoints, with each entry encoded as defined in encoding.h: in order to be + * able to safely update and access a watchpoint without introducing locking + * overhead, we encode each watchpoint as a single atomic long. The initial + * zero-initialized state matches INVALID_WATCHPOINT. + * + * Add NUM_SLOTS-1 entries to account for overflow; this helps avoid having to + * use more complicated SLOT_IDX_FAST calculation with modulo in fast-path. + */ +static atomic_long_t watchpoints[CONFIG_KCSAN_NUM_WATCHPOINTS + NUM_SLOTS - 1]; + +/* + * Instructions to skip watching counter, used in should_watch(). We use a + * per-CPU counter to avoid excessive contention. + */ +static DEFINE_PER_CPU(long, kcsan_skip); + +static inline atomic_long_t *find_watchpoint(unsigned long addr, size_t size, + bool expect_write, + long *encoded_watchpoint) +{ + const int slot = watchpoint_slot(addr); + const unsigned long addr_masked = addr & WATCHPOINT_ADDR_MASK; + atomic_long_t *watchpoint; + unsigned long wp_addr_masked; + size_t wp_size; + bool is_write; + int i; + + BUILD_BUG_ON(CONFIG_KCSAN_NUM_WATCHPOINTS < NUM_SLOTS); + + for (i = 0; i < NUM_SLOTS; ++i) { + watchpoint = &watchpoints[SLOT_IDX_FAST(slot, i)]; + *encoded_watchpoint = atomic_long_read(watchpoint); + if (!decode_watchpoint(*encoded_watchpoint, &wp_addr_masked, + &wp_size, &is_write)) + continue; + + if (expect_write && !is_write) + continue; + + /* Check if the watchpoint matches the access. */ + if (matching_access(wp_addr_masked, wp_size, addr_masked, size)) + return watchpoint; + } + + return NULL; +} + +static inline atomic_long_t *insert_watchpoint(unsigned long addr, size_t size, + bool is_write) +{ + const int slot = watchpoint_slot(addr); + const long encoded_watchpoint = encode_watchpoint(addr, size, is_write); + atomic_long_t *watchpoint; + int i; + + /* Check slot index logic, ensuring we stay within array bounds. */ + BUILD_BUG_ON(SLOT_IDX(0, 0) != KCSAN_CHECK_ADJACENT); + BUILD_BUG_ON(SLOT_IDX(0, KCSAN_CHECK_ADJACENT + 1) != 0); + BUILD_BUG_ON(SLOT_IDX(CONFIG_KCSAN_NUM_WATCHPOINTS - 1, + KCSAN_CHECK_ADJACENT) != + ARRAY_SIZE(watchpoints) - 1); + BUILD_BUG_ON(SLOT_IDX(CONFIG_KCSAN_NUM_WATCHPOINTS - 1, + KCSAN_CHECK_ADJACENT + 1) != + ARRAY_SIZE(watchpoints) - NUM_SLOTS); + + for (i = 0; i < NUM_SLOTS; ++i) { + long expect_val = INVALID_WATCHPOINT; + + /* Try to acquire this slot. */ + watchpoint = &watchpoints[SLOT_IDX(slot, i)]; + if (atomic_long_try_cmpxchg_relaxed(watchpoint, &expect_val, + encoded_watchpoint)) + return watchpoint; + } + + return NULL; +} + +/* + * Return true if watchpoint was successfully consumed, false otherwise. + * + * This may return false if: + * + * 1. another thread already consumed the watchpoint; + * 2. the thread that set up the watchpoint already removed it; + * 3. the watchpoint was removed and then re-used. + */ +static inline bool try_consume_watchpoint(atomic_long_t *watchpoint, + long encoded_watchpoint) +{ + return atomic_long_try_cmpxchg_relaxed(watchpoint, &encoded_watchpoint, + CONSUMED_WATCHPOINT); +} + +/* + * Return true if watchpoint was not touched, false if consumed. + */ +static inline bool remove_watchpoint(atomic_long_t *watchpoint) +{ + return atomic_long_xchg_relaxed(watchpoint, INVALID_WATCHPOINT) != + CONSUMED_WATCHPOINT; +} + +static inline struct kcsan_ctx *get_ctx(void) +{ + /* + * In interrupt, use raw_cpu_ptr to avoid unnecessary checks, that would + * also result in calls that generate warnings in uaccess regions. + */ + return in_task() ? ¤t->kcsan_ctx : raw_cpu_ptr(&kcsan_cpu_ctx); +} + +static inline bool is_atomic(const volatile void *ptr) +{ + struct kcsan_ctx *ctx = get_ctx(); + + if (unlikely(ctx->atomic_next > 0)) { + /* + * Because we do not have separate contexts for nested + * interrupts, in case atomic_next is set, we simply assume that + * the outer interrupt set atomic_next. In the worst case, we + * will conservatively consider operations as atomic. This is a + * reasonable trade-off to make, since this case should be + * extremely rare; however, even if extremely rare, it could + * lead to false positives otherwise. + */ + if ((hardirq_count() >> HARDIRQ_SHIFT) < 2) + --ctx->atomic_next; /* in task, or outer interrupt */ + return true; + } + if (unlikely(ctx->atomic_nest_count > 0 || ctx->in_flat_atomic)) + return true; + + return kcsan_is_atomic(ptr); +} + +static inline bool should_watch(const volatile void *ptr, int type) +{ + /* + * Never set up watchpoints when memory operations are atomic. + * + * Need to check this first, before kcsan_skip check below: (1) atomics + * should not count towards skipped instructions, and (2) to actually + * decrement kcsan_atomic_next for consecutive instruction stream. + */ + if ((type & KCSAN_ACCESS_ATOMIC) != 0 || is_atomic(ptr)) + return false; + + if (this_cpu_dec_return(kcsan_skip) >= 0) + return false; + + /* + * NOTE: If we get here, kcsan_skip must always be reset in slow path + * via reset_kcsan_skip() to avoid underflow. + */ + + /* this operation should be watched */ + return true; +} + +static inline void reset_kcsan_skip(void) +{ + long skip_count = CONFIG_KCSAN_SKIP_WATCH - + (IS_ENABLED(CONFIG_KCSAN_SKIP_WATCH_RANDOMIZE) ? + prandom_u32_max(CONFIG_KCSAN_SKIP_WATCH) : + 0); + this_cpu_write(kcsan_skip, skip_count); +} + +static inline bool kcsan_is_enabled(void) +{ + return READ_ONCE(kcsan_enabled) && get_ctx()->disable_count == 0; +} + +static inline unsigned int get_delay(void) +{ + unsigned int delay = in_task() ? CONFIG_KCSAN_UDELAY_TASK : + CONFIG_KCSAN_UDELAY_INTERRUPT; + return delay - (IS_ENABLED(CONFIG_KCSAN_DELAY_RANDOMIZE) ? + prandom_u32_max(delay) : + 0); +} + +/* + * Pull everything together: check_access() below contains the performance + * critical operations; the fast-path (including check_access) functions should + * all be inlinable by the instrumentation functions. + * + * The slow-path (kcsan_found_watchpoint, kcsan_setup_watchpoint) are + * non-inlinable -- note that, we prefix these with "kcsan_" to ensure they can + * be filtered from the stacktrace, as well as give them unique names for the + * UACCESS whitelist of objtool. Each function uses user_access_save/restore(), + * since they do not access any user memory, but instrumentation is still + * emitted in UACCESS regions. + */ + +static noinline void kcsan_found_watchpoint(const volatile void *ptr, + size_t size, bool is_write, + atomic_long_t *watchpoint, + long encoded_watchpoint) +{ + unsigned long flags; + bool consumed; + + if (!kcsan_is_enabled()) + return; + /* + * Consume the watchpoint as soon as possible, to minimize the chances + * of !consumed. Consuming the watchpoint must always be guarded by + * kcsan_is_enabled() check, as otherwise we might erroneously + * triggering reports when disabled. + */ + consumed = try_consume_watchpoint(watchpoint, encoded_watchpoint); + + /* keep this after try_consume_watchpoint */ + flags = user_access_save(); + + if (consumed) { + kcsan_report(ptr, size, is_write, true, raw_smp_processor_id(), + KCSAN_REPORT_CONSUMED_WATCHPOINT); + } else { + /* + * The other thread may not print any diagnostics, as it has + * already removed the watchpoint, or another thread consumed + * the watchpoint before this thread. + */ + kcsan_counter_inc(KCSAN_COUNTER_REPORT_RACES); + } + kcsan_counter_inc(KCSAN_COUNTER_DATA_RACES); + + user_access_restore(flags); +} + +static noinline void kcsan_setup_watchpoint(const volatile void *ptr, + size_t size, bool is_write) +{ + atomic_long_t *watchpoint; + union { + u8 _1; + u16 _2; + u32 _4; + u64 _8; + } expect_value; + bool value_change = false; + unsigned long ua_flags = user_access_save(); + unsigned long irq_flags; + + /* + * Always reset kcsan_skip counter in slow-path to avoid underflow; see + * should_watch(). + */ + reset_kcsan_skip(); + + if (!kcsan_is_enabled()) + goto out; + + if (!check_encodable((unsigned long)ptr, size)) { + kcsan_counter_inc(KCSAN_COUNTER_UNENCODABLE_ACCESSES); + goto out; + } + + /* + * Disable interrupts & preemptions to avoid another thread on the same + * CPU accessing memory locations for the set up watchpoint; this is to + * avoid reporting races to e.g. CPU-local data. + * + * An alternative would be adding the source CPU to the watchpoint + * encoding, and checking that watchpoint-CPU != this-CPU. There are + * several problems with this: + * 1. we should avoid stealing more bits from the watchpoint encoding + * as it would affect accuracy, as well as increase performance + * overhead in the fast-path; + * 2. if we are preempted, but there *is* a genuine data race, we + * would *not* report it -- since this is the common case (vs. + * CPU-local data accesses), it makes more sense (from a data race + * detection point of view) to simply disable preemptions to ensure + * as many tasks as possible run on other CPUs. + */ + local_irq_save(irq_flags); + + watchpoint = insert_watchpoint((unsigned long)ptr, size, is_write); + if (watchpoint == NULL) { + /* + * Out of capacity: the size of `watchpoints`, and the frequency + * with which `should_watch()` returns true should be tweaked so + * that this case happens very rarely. + */ + kcsan_counter_inc(KCSAN_COUNTER_NO_CAPACITY); + goto out_unlock; + } + + kcsan_counter_inc(KCSAN_COUNTER_SETUP_WATCHPOINTS); + kcsan_counter_inc(KCSAN_COUNTER_USED_WATCHPOINTS); + + /* + * Read the current value, to later check and infer a race if the data + * was modified via a non-instrumented access, e.g. from a device. + */ + switch (size) { + case 1: + expect_value._1 = READ_ONCE(*(const u8 *)ptr); + break; + case 2: + expect_value._2 = READ_ONCE(*(const u16 *)ptr); + break; + case 4: + expect_value._4 = READ_ONCE(*(const u32 *)ptr); + break; + case 8: + expect_value._8 = READ_ONCE(*(const u64 *)ptr); + break; + default: + break; /* ignore; we do not diff the values */ + } + + if (IS_ENABLED(CONFIG_KCSAN_DEBUG)) { + kcsan_disable_current(); + pr_err("KCSAN: watching %s, size: %zu, addr: %px [slot: %d, encoded: %lx]\n", + is_write ? "write" : "read", size, ptr, + watchpoint_slot((unsigned long)ptr), + encode_watchpoint((unsigned long)ptr, size, is_write)); + kcsan_enable_current(); + } + + /* + * Delay this thread, to increase probability of observing a racy + * conflicting access. + */ + udelay(get_delay()); + + /* + * Re-read value, and check if it is as expected; if not, we infer a + * racy access. + */ + switch (size) { + case 1: + value_change = expect_value._1 != READ_ONCE(*(const u8 *)ptr); + break; + case 2: + value_change = expect_value._2 != READ_ONCE(*(const u16 *)ptr); + break; + case 4: + value_change = expect_value._4 != READ_ONCE(*(const u32 *)ptr); + break; + case 8: + value_change = expect_value._8 != READ_ONCE(*(const u64 *)ptr); + break; + default: + break; /* ignore; we do not diff the values */ + } + + /* Check if this access raced with another. */ + if (!remove_watchpoint(watchpoint)) { + /* + * No need to increment 'data_races' counter, as the racing + * thread already did. + */ + kcsan_report(ptr, size, is_write, size > 8 || value_change, + smp_processor_id(), KCSAN_REPORT_RACE_SIGNAL); + } else if (value_change) { + /* Inferring a race, since the value should not have changed. */ + kcsan_counter_inc(KCSAN_COUNTER_RACES_UNKNOWN_ORIGIN); + if (IS_ENABLED(CONFIG_KCSAN_REPORT_RACE_UNKNOWN_ORIGIN)) + kcsan_report(ptr, size, is_write, true, + smp_processor_id(), + KCSAN_REPORT_RACE_UNKNOWN_ORIGIN); + } + + kcsan_counter_dec(KCSAN_COUNTER_USED_WATCHPOINTS); +out_unlock: + local_irq_restore(irq_flags); +out: + user_access_restore(ua_flags); +} + +static __always_inline void check_access(const volatile void *ptr, size_t size, + int type) +{ + const bool is_write = (type & KCSAN_ACCESS_WRITE) != 0; + atomic_long_t *watchpoint; + long encoded_watchpoint; + + /* + * Avoid user_access_save in fast-path: find_watchpoint is safe without + * user_access_save, as the address that ptr points to is only used to + * check if a watchpoint exists; ptr is never dereferenced. + */ + watchpoint = find_watchpoint((unsigned long)ptr, size, !is_write, + &encoded_watchpoint); + /* + * It is safe to check kcsan_is_enabled() after find_watchpoint in the + * slow-path, as long as no state changes that cause a data race to be + * detected and reported have occurred until kcsan_is_enabled() is + * checked. + */ + + if (unlikely(watchpoint != NULL)) + kcsan_found_watchpoint(ptr, size, is_write, watchpoint, + encoded_watchpoint); + else if (unlikely(should_watch(ptr, type))) + kcsan_setup_watchpoint(ptr, size, is_write); +} + +/* === Public interface ===================================================== */ + +void __init kcsan_init(void) +{ + BUG_ON(!in_task()); + + kcsan_debugfs_init(); + + /* + * We are in the init task, and no other tasks should be running; + * WRITE_ONCE without memory barrier is sufficient. + */ + if (IS_ENABLED(CONFIG_KCSAN_EARLY_ENABLE)) + WRITE_ONCE(kcsan_enabled, true); +} + +/* === Exported interface =================================================== */ + +void kcsan_disable_current(void) +{ + ++get_ctx()->disable_count; +} +EXPORT_SYMBOL(kcsan_disable_current); + +void kcsan_enable_current(void) +{ + if (get_ctx()->disable_count-- == 0) { + /* + * Warn if kcsan_enable_current() calls are unbalanced with + * kcsan_disable_current() calls, which causes disable_count to + * become negative and should not happen. + */ + kcsan_disable_current(); /* restore to 0, KCSAN still enabled */ + kcsan_disable_current(); /* disable to generate warning */ + WARN(1, "Unbalanced %s()", __func__); + kcsan_enable_current(); + } +} +EXPORT_SYMBOL(kcsan_enable_current); + +void kcsan_nestable_atomic_begin(void) +{ + /* + * Do *not* check and warn if we are in a flat atomic region: nestable + * and flat atomic regions are independent from each other. + * See include/linux/kcsan.h: struct kcsan_ctx comments for more + * comments. + */ + + ++get_ctx()->atomic_nest_count; +} +EXPORT_SYMBOL(kcsan_nestable_atomic_begin); + +void kcsan_nestable_atomic_end(void) +{ + if (get_ctx()->atomic_nest_count-- == 0) { + /* + * Warn if kcsan_nestable_atomic_end() calls are unbalanced with + * kcsan_nestable_atomic_begin() calls, which causes + * atomic_nest_count to become negative and should not happen. + */ + kcsan_nestable_atomic_begin(); /* restore to 0 */ + kcsan_disable_current(); /* disable to generate warning */ + WARN(1, "Unbalanced %s()", __func__); + kcsan_enable_current(); + } +} +EXPORT_SYMBOL(kcsan_nestable_atomic_end); + +void kcsan_flat_atomic_begin(void) +{ + get_ctx()->in_flat_atomic = true; +} +EXPORT_SYMBOL(kcsan_flat_atomic_begin); + +void kcsan_flat_atomic_end(void) +{ + get_ctx()->in_flat_atomic = false; +} +EXPORT_SYMBOL(kcsan_flat_atomic_end); + +void kcsan_atomic_next(int n) +{ + get_ctx()->atomic_next = n; +} +EXPORT_SYMBOL(kcsan_atomic_next); + +void __kcsan_check_access(const volatile void *ptr, size_t size, int type) +{ + check_access(ptr, size, type); +} +EXPORT_SYMBOL(__kcsan_check_access); + +/* + * KCSAN uses the same instrumentation that is emitted by supported compilers + * for ThreadSanitizer (TSAN). + * + * When enabled, the compiler emits instrumentation calls (the functions + * prefixed with "__tsan" below) for all loads and stores that it generated; + * inline asm is not instrumented. + * + * Note that, not all supported compiler versions distinguish aligned/unaligned + * accesses, but e.g. recent versions of Clang do. We simply alias the unaligned + * version to the generic version, which can handle both. + */ + +#define DEFINE_TSAN_READ_WRITE(size) \ + void __tsan_read##size(void *ptr) \ + { \ + check_access(ptr, size, 0); \ + } \ + EXPORT_SYMBOL(__tsan_read##size); \ + void __tsan_unaligned_read##size(void *ptr) \ + __alias(__tsan_read##size); \ + EXPORT_SYMBOL(__tsan_unaligned_read##size); \ + void __tsan_write##size(void *ptr) \ + { \ + check_access(ptr, size, KCSAN_ACCESS_WRITE); \ + } \ + EXPORT_SYMBOL(__tsan_write##size); \ + void __tsan_unaligned_write##size(void *ptr) \ + __alias(__tsan_write##size); \ + EXPORT_SYMBOL(__tsan_unaligned_write##size) + +DEFINE_TSAN_READ_WRITE(1); +DEFINE_TSAN_READ_WRITE(2); +DEFINE_TSAN_READ_WRITE(4); +DEFINE_TSAN_READ_WRITE(8); +DEFINE_TSAN_READ_WRITE(16); + +void __tsan_read_range(void *ptr, size_t size) +{ + check_access(ptr, size, 0); +} +EXPORT_SYMBOL(__tsan_read_range); + +void __tsan_write_range(void *ptr, size_t size) +{ + check_access(ptr, size, KCSAN_ACCESS_WRITE); +} +EXPORT_SYMBOL(__tsan_write_range); + +/* + * The below are not required by KCSAN, but can still be emitted by the + * compiler. + */ +void __tsan_func_entry(void *call_pc) +{ +} +EXPORT_SYMBOL(__tsan_func_entry); +void __tsan_func_exit(void) +{ +} +EXPORT_SYMBOL(__tsan_func_exit); +void __tsan_init(void) +{ +} +EXPORT_SYMBOL(__tsan_init); diff --git a/kernel/kcsan/debugfs.c b/kernel/kcsan/debugfs.c new file mode 100644 index 0000000000000..041d520a01837 --- /dev/null +++ b/kernel/kcsan/debugfs.c @@ -0,0 +1,275 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "kcsan.h" + +/* + * Statistics counters. + */ +static atomic_long_t counters[KCSAN_COUNTER_COUNT]; + +/* + * Addresses for filtering functions from reporting. This list can be used as a + * whitelist or blacklist. + */ +static struct { + unsigned long *addrs; /* array of addresses */ + size_t size; /* current size */ + int used; /* number of elements used */ + bool sorted; /* if elements are sorted */ + bool whitelist; /* if list is a blacklist or whitelist */ +} report_filterlist = { + .addrs = NULL, + .size = 8, /* small initial size */ + .used = 0, + .sorted = false, + .whitelist = false, /* default is blacklist */ +}; +static DEFINE_SPINLOCK(report_filterlist_lock); + +static const char *counter_to_name(enum kcsan_counter_id id) +{ + switch (id) { + case KCSAN_COUNTER_USED_WATCHPOINTS: + return "used_watchpoints"; + case KCSAN_COUNTER_SETUP_WATCHPOINTS: + return "setup_watchpoints"; + case KCSAN_COUNTER_DATA_RACES: + return "data_races"; + case KCSAN_COUNTER_NO_CAPACITY: + return "no_capacity"; + case KCSAN_COUNTER_REPORT_RACES: + return "report_races"; + case KCSAN_COUNTER_RACES_UNKNOWN_ORIGIN: + return "races_unknown_origin"; + case KCSAN_COUNTER_UNENCODABLE_ACCESSES: + return "unencodable_accesses"; + case KCSAN_COUNTER_ENCODING_FALSE_POSITIVES: + return "encoding_false_positives"; + case KCSAN_COUNTER_COUNT: + BUG(); + } + return NULL; +} + +void kcsan_counter_inc(enum kcsan_counter_id id) +{ + atomic_long_inc(&counters[id]); +} + +void kcsan_counter_dec(enum kcsan_counter_id id) +{ + atomic_long_dec(&counters[id]); +} + +/* + * The microbenchmark allows benchmarking KCSAN core runtime only. To run + * multiple threads, pipe 'microbench=' from multiple tasks into the + * debugfs file. + */ +static void microbenchmark(unsigned long iters) +{ + cycles_t cycles; + + pr_info("KCSAN: %s begin | iters: %lu\n", __func__, iters); + + cycles = get_cycles(); + while (iters--) { + /* + * We can run this benchmark from multiple tasks; this address + * calculation increases likelyhood of some accesses overlapping + * (they still won't conflict because all are reads). + */ + unsigned long addr = + iters % (CONFIG_KCSAN_NUM_WATCHPOINTS * PAGE_SIZE); + __kcsan_check_read((void *)addr, sizeof(long)); + } + cycles = get_cycles() - cycles; + + pr_info("KCSAN: %s end | cycles: %llu\n", __func__, cycles); +} + +static int cmp_filterlist_addrs(const void *rhs, const void *lhs) +{ + const unsigned long a = *(const unsigned long *)rhs; + const unsigned long b = *(const unsigned long *)lhs; + + return a < b ? -1 : a == b ? 0 : 1; +} + +bool kcsan_skip_report_debugfs(unsigned long func_addr) +{ + unsigned long symbolsize, offset; + unsigned long flags; + bool ret = false; + + if (!kallsyms_lookup_size_offset(func_addr, &symbolsize, &offset)) + return false; + func_addr -= offset; /* get function start */ + + spin_lock_irqsave(&report_filterlist_lock, flags); + if (report_filterlist.used == 0) + goto out; + + /* Sort array if it is unsorted, and then do a binary search. */ + if (!report_filterlist.sorted) { + sort(report_filterlist.addrs, report_filterlist.used, + sizeof(unsigned long), cmp_filterlist_addrs, NULL); + report_filterlist.sorted = true; + } + ret = !!bsearch(&func_addr, report_filterlist.addrs, + report_filterlist.used, sizeof(unsigned long), + cmp_filterlist_addrs); + if (report_filterlist.whitelist) + ret = !ret; + +out: + spin_unlock_irqrestore(&report_filterlist_lock, flags); + return ret; +} + +static void set_report_filterlist_whitelist(bool whitelist) +{ + unsigned long flags; + + spin_lock_irqsave(&report_filterlist_lock, flags); + report_filterlist.whitelist = whitelist; + spin_unlock_irqrestore(&report_filterlist_lock, flags); +} + +/* Returns 0 on success, error-code otherwise. */ +static ssize_t insert_report_filterlist(const char *func) +{ + unsigned long flags; + unsigned long addr = kallsyms_lookup_name(func); + ssize_t ret = 0; + + if (!addr) { + pr_err("KCSAN: could not find function: '%s'\n", func); + return -ENOENT; + } + + spin_lock_irqsave(&report_filterlist_lock, flags); + + if (report_filterlist.addrs == NULL) { + /* initial allocation */ + report_filterlist.addrs = + kmalloc_array(report_filterlist.size, + sizeof(unsigned long), GFP_KERNEL); + if (report_filterlist.addrs == NULL) { + ret = -ENOMEM; + goto out; + } + } else if (report_filterlist.used == report_filterlist.size) { + /* resize filterlist */ + size_t new_size = report_filterlist.size * 2; + unsigned long *new_addrs = + krealloc(report_filterlist.addrs, + new_size * sizeof(unsigned long), GFP_KERNEL); + + if (new_addrs == NULL) { + /* leave filterlist itself untouched */ + ret = -ENOMEM; + goto out; + } + + report_filterlist.size = new_size; + report_filterlist.addrs = new_addrs; + } + + /* Note: deduplicating should be done in userspace. */ + report_filterlist.addrs[report_filterlist.used++] = + kallsyms_lookup_name(func); + report_filterlist.sorted = false; + +out: + spin_unlock_irqrestore(&report_filterlist_lock, flags); + return ret; +} + +static int show_info(struct seq_file *file, void *v) +{ + int i; + unsigned long flags; + + /* show stats */ + seq_printf(file, "enabled: %i\n", READ_ONCE(kcsan_enabled)); + for (i = 0; i < KCSAN_COUNTER_COUNT; ++i) + seq_printf(file, "%s: %ld\n", counter_to_name(i), + atomic_long_read(&counters[i])); + + /* show filter functions, and filter type */ + spin_lock_irqsave(&report_filterlist_lock, flags); + seq_printf(file, "\n%s functions: %s\n", + report_filterlist.whitelist ? "whitelisted" : "blacklisted", + report_filterlist.used == 0 ? "none" : ""); + for (i = 0; i < report_filterlist.used; ++i) + seq_printf(file, " %ps\n", (void *)report_filterlist.addrs[i]); + spin_unlock_irqrestore(&report_filterlist_lock, flags); + + return 0; +} + +static int debugfs_open(struct inode *inode, struct file *file) +{ + return single_open(file, show_info, NULL); +} + +static ssize_t debugfs_write(struct file *file, const char __user *buf, + size_t count, loff_t *off) +{ + char kbuf[KSYM_NAME_LEN]; + char *arg; + int read_len = count < (sizeof(kbuf) - 1) ? count : (sizeof(kbuf) - 1); + + if (copy_from_user(kbuf, buf, read_len)) + return -EFAULT; + kbuf[read_len] = '\0'; + arg = strstrip(kbuf); + + if (!strcmp(arg, "on")) { + WRITE_ONCE(kcsan_enabled, true); + } else if (!strcmp(arg, "off")) { + WRITE_ONCE(kcsan_enabled, false); + } else if (!strncmp(arg, "microbench=", sizeof("microbench=") - 1)) { + unsigned long iters; + + if (kstrtoul(&arg[sizeof("microbench=") - 1], 0, &iters)) + return -EINVAL; + microbenchmark(iters); + } else if (!strcmp(arg, "whitelist")) { + set_report_filterlist_whitelist(true); + } else if (!strcmp(arg, "blacklist")) { + set_report_filterlist_whitelist(false); + } else if (arg[0] == '!') { + ssize_t ret = insert_report_filterlist(&arg[1]); + + if (ret < 0) + return ret; + } else { + return -EINVAL; + } + + return count; +} + +static const struct file_operations debugfs_ops = { .read = seq_read, + .open = debugfs_open, + .write = debugfs_write, + .release = single_release }; + +void __init kcsan_debugfs_init(void) +{ + debugfs_create_file("kcsan", 0644, NULL, NULL, &debugfs_ops); +} diff --git a/kernel/kcsan/encoding.h b/kernel/kcsan/encoding.h new file mode 100644 index 0000000000000..e17bdac0e54bd --- /dev/null +++ b/kernel/kcsan/encoding.h @@ -0,0 +1,94 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef _KERNEL_KCSAN_ENCODING_H +#define _KERNEL_KCSAN_ENCODING_H + +#include +#include +#include + +#include "kcsan.h" + +#define SLOT_RANGE PAGE_SIZE +#define INVALID_WATCHPOINT 0 +#define CONSUMED_WATCHPOINT 1 + +/* + * The maximum useful size of accesses for which we set up watchpoints is the + * max range of slots we check on an access. + */ +#define MAX_ENCODABLE_SIZE (SLOT_RANGE * (1 + KCSAN_CHECK_ADJACENT)) + +/* + * Number of bits we use to store size info. + */ +#define WATCHPOINT_SIZE_BITS bits_per(MAX_ENCODABLE_SIZE) +/* + * This encoding for addresses discards the upper (1 for is-write + SIZE_BITS); + * however, most 64-bit architectures do not use the full 64-bit address space. + * Also, in order for a false positive to be observable 2 things need to happen: + * + * 1. different addresses but with the same encoded address race; + * 2. and both map onto the same watchpoint slots; + * + * Both these are assumed to be very unlikely. However, in case it still happens + * happens, the report logic will filter out the false positive (see report.c). + */ +#define WATCHPOINT_ADDR_BITS (BITS_PER_LONG - 1 - WATCHPOINT_SIZE_BITS) + +/* + * Masks to set/retrieve the encoded data. + */ +#define WATCHPOINT_WRITE_MASK BIT(BITS_PER_LONG - 1) +#define WATCHPOINT_SIZE_MASK \ + GENMASK(BITS_PER_LONG - 2, BITS_PER_LONG - 2 - WATCHPOINT_SIZE_BITS) +#define WATCHPOINT_ADDR_MASK \ + GENMASK(BITS_PER_LONG - 3 - WATCHPOINT_SIZE_BITS, 0) + +static inline bool check_encodable(unsigned long addr, size_t size) +{ + return size <= MAX_ENCODABLE_SIZE; +} + +static inline long encode_watchpoint(unsigned long addr, size_t size, + bool is_write) +{ + return (long)((is_write ? WATCHPOINT_WRITE_MASK : 0) | + (size << WATCHPOINT_ADDR_BITS) | + (addr & WATCHPOINT_ADDR_MASK)); +} + +static inline bool decode_watchpoint(long watchpoint, + unsigned long *addr_masked, size_t *size, + bool *is_write) +{ + if (watchpoint == INVALID_WATCHPOINT || + watchpoint == CONSUMED_WATCHPOINT) + return false; + + *addr_masked = (unsigned long)watchpoint & WATCHPOINT_ADDR_MASK; + *size = ((unsigned long)watchpoint & WATCHPOINT_SIZE_MASK) >> + WATCHPOINT_ADDR_BITS; + *is_write = !!((unsigned long)watchpoint & WATCHPOINT_WRITE_MASK); + + return true; +} + +/* + * Return watchpoint slot for an address. + */ +static inline int watchpoint_slot(unsigned long addr) +{ + return (addr / PAGE_SIZE) % CONFIG_KCSAN_NUM_WATCHPOINTS; +} + +static inline bool matching_access(unsigned long addr1, size_t size1, + unsigned long addr2, size_t size2) +{ + unsigned long end_range1 = addr1 + size1 - 1; + unsigned long end_range2 = addr2 + size2 - 1; + + return addr1 <= end_range2 && addr2 <= end_range1; +} + +#endif /* _KERNEL_KCSAN_ENCODING_H */ diff --git a/kernel/kcsan/kcsan.h b/kernel/kcsan/kcsan.h new file mode 100644 index 0000000000000..1bb2f1c0d61e4 --- /dev/null +++ b/kernel/kcsan/kcsan.h @@ -0,0 +1,108 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +/* + * The Kernel Concurrency Sanitizer (KCSAN) infrastructure. For more info please + * see Documentation/dev-tools/kcsan.rst. + */ + +#ifndef _KERNEL_KCSAN_KCSAN_H +#define _KERNEL_KCSAN_KCSAN_H + +#include + +/* The number of adjacent watchpoints to check. */ +#define KCSAN_CHECK_ADJACENT 1 + +/* + * Globally enable and disable KCSAN. + */ +extern bool kcsan_enabled; + +/* + * Initialize debugfs file. + */ +void kcsan_debugfs_init(void); + +enum kcsan_counter_id { + /* + * Number of watchpoints currently in use. + */ + KCSAN_COUNTER_USED_WATCHPOINTS, + + /* + * Total number of watchpoints set up. + */ + KCSAN_COUNTER_SETUP_WATCHPOINTS, + + /* + * Total number of data races. + */ + KCSAN_COUNTER_DATA_RACES, + + /* + * Number of times no watchpoints were available. + */ + KCSAN_COUNTER_NO_CAPACITY, + + /* + * A thread checking a watchpoint raced with another checking thread; + * only one will be reported. + */ + KCSAN_COUNTER_REPORT_RACES, + + /* + * Observed data value change, but writer thread unknown. + */ + KCSAN_COUNTER_RACES_UNKNOWN_ORIGIN, + + /* + * The access cannot be encoded to a valid watchpoint. + */ + KCSAN_COUNTER_UNENCODABLE_ACCESSES, + + /* + * Watchpoint encoding caused a watchpoint to fire on mismatching + * accesses. + */ + KCSAN_COUNTER_ENCODING_FALSE_POSITIVES, + + KCSAN_COUNTER_COUNT, /* number of counters */ +}; + +/* + * Increment/decrement counter with given id; avoid calling these in fast-path. + */ +void kcsan_counter_inc(enum kcsan_counter_id id); +void kcsan_counter_dec(enum kcsan_counter_id id); + +/* + * Returns true if data races in the function symbol that maps to func_addr + * (offsets are ignored) should *not* be reported. + */ +bool kcsan_skip_report_debugfs(unsigned long func_addr); + +enum kcsan_report_type { + /* + * The thread that set up the watchpoint and briefly stalled was + * signalled that another thread triggered the watchpoint. + */ + KCSAN_REPORT_RACE_SIGNAL, + + /* + * A thread found and consumed a matching watchpoint. + */ + KCSAN_REPORT_CONSUMED_WATCHPOINT, + + /* + * No other thread was observed to race with the access, but the data + * value before and after the stall differs. + */ + KCSAN_REPORT_RACE_UNKNOWN_ORIGIN, +}; +/* + * Print a race report from thread that encountered the race. + */ +void kcsan_report(const volatile void *ptr, size_t size, bool is_write, + bool value_change, int cpu_id, enum kcsan_report_type type); + +#endif /* _KERNEL_KCSAN_KCSAN_H */ diff --git a/kernel/kcsan/report.c b/kernel/kcsan/report.c new file mode 100644 index 0000000000000..ead5610bafa71 --- /dev/null +++ b/kernel/kcsan/report.c @@ -0,0 +1,320 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include +#include +#include +#include +#include +#include + +#include "kcsan.h" +#include "encoding.h" + +/* + * Max. number of stack entries to show in the report. + */ +#define NUM_STACK_ENTRIES 64 + +/* + * Other thread info: communicated from other racing thread to thread that set + * up the watchpoint, which then prints the complete report atomically. Only + * need one struct, as all threads should to be serialized regardless to print + * the reports, with reporting being in the slow-path. + */ +static struct { + const volatile void *ptr; + size_t size; + bool is_write; + int task_pid; + int cpu_id; + unsigned long stack_entries[NUM_STACK_ENTRIES]; + int num_stack_entries; +} other_info = { .ptr = NULL }; + +/* + * This spinlock protects reporting and other_info, since other_info is usually + * required when reporting. + */ +static DEFINE_SPINLOCK(report_lock); + +/* + * Special rules to skip reporting. + */ +static bool skip_report(bool is_write, bool value_change, + unsigned long top_frame) +{ + if (IS_ENABLED(CONFIG_KCSAN_REPORT_VALUE_CHANGE_ONLY) && is_write && + !value_change) { + /* + * The access is a write, but the data value did not change. + * + * We opt-out of this filter for certain functions at request of + * maintainers. + */ + char buf[64]; + + snprintf(buf, sizeof(buf), "%ps", (void *)top_frame); + if (!strnstr(buf, "rcu_", sizeof(buf)) && + !strnstr(buf, "_rcu", sizeof(buf)) && + !strnstr(buf, "_srcu", sizeof(buf))) + return true; + } + + return kcsan_skip_report_debugfs(top_frame); +} + +static inline const char *get_access_type(bool is_write) +{ + return is_write ? "write" : "read"; +} + +/* Return thread description: in task or interrupt. */ +static const char *get_thread_desc(int task_id) +{ + if (task_id != -1) { + static char buf[32]; /* safe: protected by report_lock */ + + snprintf(buf, sizeof(buf), "task %i", task_id); + return buf; + } + return "interrupt"; +} + +/* Helper to skip KCSAN-related functions in stack-trace. */ +static int get_stack_skipnr(unsigned long stack_entries[], int num_entries) +{ + char buf[64]; + int skip = 0; + + for (; skip < num_entries; ++skip) { + snprintf(buf, sizeof(buf), "%ps", (void *)stack_entries[skip]); + if (!strnstr(buf, "csan_", sizeof(buf)) && + !strnstr(buf, "tsan_", sizeof(buf)) && + !strnstr(buf, "_once_size", sizeof(buf))) { + break; + } + } + return skip; +} + +/* Compares symbolized strings of addr1 and addr2. */ +static int sym_strcmp(void *addr1, void *addr2) +{ + char buf1[64]; + char buf2[64]; + + snprintf(buf1, sizeof(buf1), "%pS", addr1); + snprintf(buf2, sizeof(buf2), "%pS", addr2); + return strncmp(buf1, buf2, sizeof(buf1)); +} + +/* + * Returns true if a report was generated, false otherwise. + */ +static bool print_report(const volatile void *ptr, size_t size, bool is_write, + bool value_change, int cpu_id, + enum kcsan_report_type type) +{ + unsigned long stack_entries[NUM_STACK_ENTRIES] = { 0 }; + int num_stack_entries = + stack_trace_save(stack_entries, NUM_STACK_ENTRIES, 1); + int skipnr = get_stack_skipnr(stack_entries, num_stack_entries); + int other_skipnr; + + /* + * Must check report filter rules before starting to print. + */ + if (skip_report(is_write, true, stack_entries[skipnr])) + return false; + + if (type == KCSAN_REPORT_RACE_SIGNAL) { + other_skipnr = get_stack_skipnr(other_info.stack_entries, + other_info.num_stack_entries); + + /* value_change is only known for the other thread */ + if (skip_report(other_info.is_write, value_change, + other_info.stack_entries[other_skipnr])) + return false; + } + + /* Print report header. */ + pr_err("==================================================================\n"); + switch (type) { + case KCSAN_REPORT_RACE_SIGNAL: { + void *this_fn = (void *)stack_entries[skipnr]; + void *other_fn = (void *)other_info.stack_entries[other_skipnr]; + int cmp; + + /* + * Order functions lexographically for consistent bug titles. + * Do not print offset of functions to keep title short. + */ + cmp = sym_strcmp(other_fn, this_fn); + pr_err("BUG: KCSAN: data-race in %ps / %ps\n", + cmp < 0 ? other_fn : this_fn, + cmp < 0 ? this_fn : other_fn); + } break; + + case KCSAN_REPORT_RACE_UNKNOWN_ORIGIN: + pr_err("BUG: KCSAN: data-race in %pS\n", + (void *)stack_entries[skipnr]); + break; + + default: + BUG(); + } + + pr_err("\n"); + + /* Print information about the racing accesses. */ + switch (type) { + case KCSAN_REPORT_RACE_SIGNAL: + pr_err("%s to 0x%px of %zu bytes by %s on cpu %i:\n", + get_access_type(other_info.is_write), other_info.ptr, + other_info.size, get_thread_desc(other_info.task_pid), + other_info.cpu_id); + + /* Print the other thread's stack trace. */ + stack_trace_print(other_info.stack_entries + other_skipnr, + other_info.num_stack_entries - other_skipnr, + 0); + + pr_err("\n"); + pr_err("%s to 0x%px of %zu bytes by %s on cpu %i:\n", + get_access_type(is_write), ptr, size, + get_thread_desc(in_task() ? task_pid_nr(current) : -1), + cpu_id); + break; + + case KCSAN_REPORT_RACE_UNKNOWN_ORIGIN: + pr_err("race at unknown origin, with %s to 0x%px of %zu bytes by %s on cpu %i:\n", + get_access_type(is_write), ptr, size, + get_thread_desc(in_task() ? task_pid_nr(current) : -1), + cpu_id); + break; + + default: + BUG(); + } + /* Print stack trace of this thread. */ + stack_trace_print(stack_entries + skipnr, num_stack_entries - skipnr, + 0); + + /* Print report footer. */ + pr_err("\n"); + pr_err("Reported by Kernel Concurrency Sanitizer on:\n"); + dump_stack_print_info(KERN_DEFAULT); + pr_err("==================================================================\n"); + + return true; +} + +static void release_report(unsigned long *flags, enum kcsan_report_type type) +{ + if (type == KCSAN_REPORT_RACE_SIGNAL) + other_info.ptr = NULL; /* mark for reuse */ + + spin_unlock_irqrestore(&report_lock, *flags); +} + +/* + * Depending on the report type either sets other_info and returns false, or + * acquires the matching other_info and returns true. If other_info is not + * required for the report type, simply acquires report_lock and returns true. + */ +static bool prepare_report(unsigned long *flags, const volatile void *ptr, + size_t size, bool is_write, int cpu_id, + enum kcsan_report_type type) +{ + if (type != KCSAN_REPORT_CONSUMED_WATCHPOINT && + type != KCSAN_REPORT_RACE_SIGNAL) { + /* other_info not required; just acquire report_lock */ + spin_lock_irqsave(&report_lock, *flags); + return true; + } + +retry: + spin_lock_irqsave(&report_lock, *flags); + + switch (type) { + case KCSAN_REPORT_CONSUMED_WATCHPOINT: + if (other_info.ptr != NULL) + break; /* still in use, retry */ + + other_info.ptr = ptr; + other_info.size = size; + other_info.is_write = is_write; + other_info.task_pid = in_task() ? task_pid_nr(current) : -1; + other_info.cpu_id = cpu_id; + other_info.num_stack_entries = stack_trace_save( + other_info.stack_entries, NUM_STACK_ENTRIES, 1); + + spin_unlock_irqrestore(&report_lock, *flags); + + /* + * The other thread will print the summary; other_info may now + * be consumed. + */ + return false; + + case KCSAN_REPORT_RACE_SIGNAL: + if (other_info.ptr == NULL) + break; /* no data available yet, retry */ + + /* + * First check if this is the other_info we are expecting, i.e. + * matches based on how watchpoint was encoded. + */ + if (!matching_access((unsigned long)other_info.ptr & + WATCHPOINT_ADDR_MASK, + other_info.size, + (unsigned long)ptr & WATCHPOINT_ADDR_MASK, + size)) + break; /* mismatching watchpoint, retry */ + + if (!matching_access((unsigned long)other_info.ptr, + other_info.size, (unsigned long)ptr, + size)) { + /* + * If the actual accesses to not match, this was a false + * positive due to watchpoint encoding. + */ + kcsan_counter_inc( + KCSAN_COUNTER_ENCODING_FALSE_POSITIVES); + + /* discard this other_info */ + release_report(flags, KCSAN_REPORT_RACE_SIGNAL); + return false; + } + + /* + * Matching & usable access in other_info: keep other_info_lock + * locked, as this thread consumes it to print the full report; + * unlocked in release_report. + */ + return true; + + default: + BUG(); + } + + spin_unlock_irqrestore(&report_lock, *flags); + goto retry; +} + +void kcsan_report(const volatile void *ptr, size_t size, bool is_write, + bool value_change, int cpu_id, enum kcsan_report_type type) +{ + unsigned long flags = 0; + + kcsan_disable_current(); + if (prepare_report(&flags, ptr, size, is_write, cpu_id, type)) { + if (print_report(ptr, size, is_write, value_change, cpu_id, + type) && + panic_on_warn) + panic("panic_on_warn set ...\n"); + + release_report(&flags, type); + } + kcsan_enable_current(); +} diff --git a/kernel/kcsan/test.c b/kernel/kcsan/test.c new file mode 100644 index 0000000000000..0bae63c5ca65a --- /dev/null +++ b/kernel/kcsan/test.c @@ -0,0 +1,121 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include +#include +#include +#include +#include + +#include "encoding.h" + +#define ITERS_PER_TEST 2000 + +/* Test requirements. */ +static bool test_requires(void) +{ + /* random should be initialized for the below tests */ + return prandom_u32() + prandom_u32() != 0; +} + +/* + * Test watchpoint encode and decode: check that encoding some access's info, + * and then subsequent decode preserves the access's info. + */ +static bool test_encode_decode(void) +{ + int i; + + for (i = 0; i < ITERS_PER_TEST; ++i) { + size_t size = prandom_u32_max(MAX_ENCODABLE_SIZE) + 1; + bool is_write = !!prandom_u32_max(2); + unsigned long addr; + + prandom_bytes(&addr, sizeof(addr)); + if (WARN_ON(!check_encodable(addr, size))) + return false; + + /* encode and decode */ + { + const long encoded_watchpoint = + encode_watchpoint(addr, size, is_write); + unsigned long verif_masked_addr; + size_t verif_size; + bool verif_is_write; + + /* check special watchpoints */ + if (WARN_ON(decode_watchpoint( + INVALID_WATCHPOINT, &verif_masked_addr, + &verif_size, &verif_is_write))) + return false; + if (WARN_ON(decode_watchpoint( + CONSUMED_WATCHPOINT, &verif_masked_addr, + &verif_size, &verif_is_write))) + return false; + + /* check decoding watchpoint returns same data */ + if (WARN_ON(!decode_watchpoint( + encoded_watchpoint, &verif_masked_addr, + &verif_size, &verif_is_write))) + return false; + if (WARN_ON(verif_masked_addr != + (addr & WATCHPOINT_ADDR_MASK))) + goto fail; + if (WARN_ON(verif_size != size)) + goto fail; + if (WARN_ON(is_write != verif_is_write)) + goto fail; + + continue; +fail: + pr_err("%s fail: %s %zu bytes @ %lx -> encoded: %lx -> %s %zu bytes @ %lx\n", + __func__, is_write ? "write" : "read", size, + addr, encoded_watchpoint, + verif_is_write ? "write" : "read", verif_size, + verif_masked_addr); + return false; + } + } + + return true; +} + +/* Test access matching function. */ +static bool test_matching_access(void) +{ + if (WARN_ON(!matching_access(10, 1, 10, 1))) + return false; + if (WARN_ON(!matching_access(10, 2, 11, 1))) + return false; + if (WARN_ON(!matching_access(10, 1, 9, 2))) + return false; + if (WARN_ON(matching_access(10, 1, 11, 1))) + return false; + if (WARN_ON(matching_access(9, 1, 10, 1))) + return false; + return true; +} + +static int __init kcsan_selftest(void) +{ + int passed = 0; + int total = 0; + +#define RUN_TEST(do_test) \ + do { \ + ++total; \ + if (do_test()) \ + ++passed; \ + else \ + pr_err("KCSAN selftest: " #do_test " failed"); \ + } while (0) + + RUN_TEST(test_requires); + RUN_TEST(test_encode_decode); + RUN_TEST(test_matching_access); + + pr_info("KCSAN selftest: %d/%d tests passed\n", passed, total); + if (passed != total) + panic("KCSAN selftests failed"); + return 0; +} +postcore_initcall(kcsan_selftest); diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index 93d97f9b01571..35accd1d93ded 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -2086,6 +2086,8 @@ source "lib/Kconfig.kgdb" source "lib/Kconfig.ubsan" +source "lib/Kconfig.kcsan" + config ARCH_HAS_DEVMEM_IS_ALLOWED bool diff --git a/lib/Kconfig.kcsan b/lib/Kconfig.kcsan new file mode 100644 index 0000000000000..5dd464e52ab43 --- /dev/null +++ b/lib/Kconfig.kcsan @@ -0,0 +1,118 @@ +# SPDX-License-Identifier: GPL-2.0-only + +config HAVE_ARCH_KCSAN + bool + +menuconfig KCSAN + bool "KCSAN: watchpoint-based dynamic data race detector" + depends on HAVE_ARCH_KCSAN && !KASAN && STACKTRACE + default n + help + Kernel Concurrency Sanitizer is a dynamic data race detector, which + uses a watchpoint-based sampling approach to detect races. See + for more details. + +if KCSAN + +config KCSAN_DEBUG + bool "Debugging of KCSAN internals" + default n + +config KCSAN_SELFTEST + bool "Perform short selftests on boot" + default y + help + Run KCSAN selftests on boot. On test failure, causes kernel to panic. + +config KCSAN_EARLY_ENABLE + bool "Early enable during boot" + default y + help + If KCSAN should be enabled globally as soon as possible. KCSAN can + later be enabled/disabled via debugfs. + +config KCSAN_NUM_WATCHPOINTS + int "Number of available watchpoints" + default 64 + help + Total number of available watchpoints. An address range maps into a + specific watchpoint slot as specified in kernel/kcsan/encoding.h. + Although larger number of watchpoints may not be usable due to + limited number of CPUs, a larger value helps to improve performance + due to reducing cache-line contention. The chosen default is a + conservative value; we should almost never observe "no_capacity" + events (see /sys/kernel/debug/kcsan). + +config KCSAN_UDELAY_TASK + int "Delay in microseconds (for tasks)" + default 80 + help + For tasks, the microsecond delay after setting up a watchpoint. + +config KCSAN_UDELAY_INTERRUPT + int "Delay in microseconds (for interrupts)" + default 20 + help + For interrupts, the microsecond delay after setting up a watchpoint. + Interrupts have tighter latency requirements, and their delay should + be lower than for tasks. + +config KCSAN_DELAY_RANDOMIZE + bool "Randomize above delays" + default y + help + If delays should be randomized, where the maximum is KCSAN_UDELAY_*. + If false, the chosen delays are always KCSAN_UDELAY_* defined above. + +config KCSAN_SKIP_WATCH + int "Skip instructions before setting up watchpoint" + default 4000 + help + The number of per-CPU memory operations to skip, before another + watchpoint is set up, i.e. one in KCSAN_WATCH_SKIP per-CPU + memory operations are used to set up a watchpoint. A smaller value + results in more aggressive race detection, whereas a larger value + improves system performance at the cost of missing some races. + +config KCSAN_SKIP_WATCH_RANDOMIZE + bool "Randomize watchpoint instruction skip count" + default y + help + If instruction skip count should be randomized, where the maximum is + KCSAN_WATCH_SKIP. If false, the chosen value is always + KCSAN_WATCH_SKIP. + +# Note that, while some of the below options could be turned into boot +# parameters, to optimize for the common use-case, we avoid this because: (a) +# it would impact performance (and we want to avoid static branch for all +# {READ,WRITE}_ONCE, atomic_*, bitops, etc.), and (b) complicate the design +# without real benefit. The main purpose of the below options are for use in +# fuzzer configs to control reported data races, and are not expected to be +# switched frequently by a user. + +config KCSAN_REPORT_RACE_UNKNOWN_ORIGIN + bool "Report races of unknown origin" + default y + help + If KCSAN should report races where only one access is known, and the + conflicting access is of unknown origin. This type of race is + reported if it was only possible to infer a race due to a data value + change while an access is being delayed on a watchpoint. + +config KCSAN_REPORT_VALUE_CHANGE_ONLY + bool "Only report races where watcher observed a data value change" + default y + help + If enabled and a conflicting write is observed via watchpoint, but + the data value of the memory location was observed to remain + unchanged, do not report the data race. + +config KCSAN_IGNORE_ATOMICS + bool "Do not instrument marked atomic accesses" + default n + help + If enabled, never instruments marked atomic accesses. This results in + not reporting data races where one access is atomic and the other is + a plain access. + +endif # KCSAN diff --git a/lib/Makefile b/lib/Makefile index c5892807e06fa..778ab704e3ad5 100644 --- a/lib/Makefile +++ b/lib/Makefile @@ -24,6 +24,9 @@ KASAN_SANITIZE_string.o := n CFLAGS_string.o := $(call cc-option, -fno-stack-protector) endif +# Used by KCSAN while enabled, avoid recursion. +KCSAN_SANITIZE_random32.o := n + lib-y := ctype.o string.o vsprintf.o cmdline.o \ rbtree.o radix-tree.o timerqueue.o xarray.o \ idr.o extable.o \ diff --git a/scripts/Makefile.kcsan b/scripts/Makefile.kcsan new file mode 100644 index 0000000000000..caf1111a28ae5 --- /dev/null +++ b/scripts/Makefile.kcsan @@ -0,0 +1,6 @@ +# SPDX-License-Identifier: GPL-2.0 +ifdef CONFIG_KCSAN + +CFLAGS_KCSAN := -fsanitize=thread + +endif # CONFIG_KCSAN diff --git a/scripts/Makefile.lib b/scripts/Makefile.lib index 179d55af58529..8952f909f7c9c 100644 --- a/scripts/Makefile.lib +++ b/scripts/Makefile.lib @@ -152,6 +152,16 @@ _c_flags += $(if $(patsubst n%,, \ $(CFLAGS_KCOV)) endif +# +# Enable KCSAN flags except some files or directories we don't want to check +# (depends on variables KCSAN_SANITIZE_obj.o, KCSAN_SANITIZE) +# +ifeq ($(CONFIG_KCSAN),y) +_c_flags += $(if $(patsubst n%,, \ + $(KCSAN_SANITIZE_$(basetarget).o)$(KCSAN_SANITIZE)y), \ + $(CFLAGS_KCSAN)) +endif + # $(srctree)/$(src) for including checkin headers from generated source files # $(objtree)/$(obj) for including generated headers from checkin source files ifeq ($(KBUILD_EXTMOD),) -- GitLab From c48981eeb0d56e107691df590007d6699441a689 Mon Sep 17 00:00:00 2001 From: Marco Elver Date: Thu, 14 Nov 2019 19:02:55 +0100 Subject: [PATCH 0002/1971] include/linux/compiler.h: Introduce data_race(expr) macro This introduces the data_race(expr) macro, which can be used to annotate expressions for purposes of (1) documenting, and (2) giving tooling such as KCSAN information about which data races are deemed "safe". More context: http://lkml.kernel.org/r/CAHk-=wg5CkOEF8DTez1Qu0XTEFw_oHhxN98bDnFqbY7HL5AB2g@mail.gmail.com Signed-off-by: Marco Elver Cc: Alan Stern Cc: Eric Dumazet Cc: Linus Torvalds Cc: Paul E. McKenney Signed-off-by: Paul E. McKenney --- include/linux/compiler.h | 20 ++++++++++++++++++++ 1 file changed, 20 insertions(+) diff --git a/include/linux/compiler.h b/include/linux/compiler.h index c42fa83f23fb8..7d3e777815782 100644 --- a/include/linux/compiler.h +++ b/include/linux/compiler.h @@ -310,6 +310,26 @@ unsigned long read_word_at_a_time(const void *addr) __u.__val; \ }) +#include + +/* + * data_race: macro to document that accesses in an expression may conflict with + * other concurrent accesses resulting in data races, but the resulting + * behaviour is deemed safe regardless. + * + * This macro *does not* affect normal code generation, but is a hint to tooling + * that data races here should be ignored. + */ +#define data_race(expr) \ + ({ \ + typeof(({ expr; })) __val; \ + kcsan_nestable_atomic_begin(); \ + __val = ({ expr; }); \ + kcsan_nestable_atomic_end(); \ + __val; \ + }) +#else + #endif /* __KERNEL__ */ /* -- GitLab From 905e672b3af5d2305f8ed58d68e13843217eaa99 Mon Sep 17 00:00:00 2001 From: Marco Elver Date: Thu, 14 Nov 2019 19:02:56 +0100 Subject: [PATCH 0003/1971] kcsan: Add Documentation entry in dev-tools Signed-off-by: Marco Elver Acked-by: Paul E. McKenney Signed-off-by: Paul E. McKenney --- Documentation/dev-tools/index.rst | 1 + Documentation/dev-tools/kcsan.rst | 256 ++++++++++++++++++++++++++++++ 2 files changed, 257 insertions(+) create mode 100644 Documentation/dev-tools/kcsan.rst diff --git a/Documentation/dev-tools/index.rst b/Documentation/dev-tools/index.rst index b0522a4dd1073..1b756a7014e0f 100644 --- a/Documentation/dev-tools/index.rst +++ b/Documentation/dev-tools/index.rst @@ -21,6 +21,7 @@ whole; patches welcome! kasan ubsan kmemleak + kcsan gdb-kernel-debugging kgdb kselftest diff --git a/Documentation/dev-tools/kcsan.rst b/Documentation/dev-tools/kcsan.rst new file mode 100644 index 0000000000000..a6f4f92df2fa0 --- /dev/null +++ b/Documentation/dev-tools/kcsan.rst @@ -0,0 +1,256 @@ +The Kernel Concurrency Sanitizer (KCSAN) +======================================== + +Overview +-------- + +*Kernel Concurrency Sanitizer (KCSAN)* is a dynamic data race detector for +kernel space. KCSAN is a sampling watchpoint-based data race detector. Key +priorities in KCSAN's design are lack of false positives, scalability, and +simplicity. More details can be found in `Implementation Details`_. + +KCSAN uses compile-time instrumentation to instrument memory accesses. KCSAN is +supported in both GCC and Clang. With GCC it requires version 7.3.0 or later. +With Clang it requires version 7.0.0 or later. + +Usage +----- + +To enable KCSAN configure kernel with:: + + CONFIG_KCSAN = y + +KCSAN provides several other configuration options to customize behaviour (see +their respective help text for more info). + +Error reports +~~~~~~~~~~~~~ + +A typical data race report looks like this:: + + ================================================================== + BUG: KCSAN: data-race in generic_permission / kernfs_refresh_inode + + write to 0xffff8fee4c40700c of 4 bytes by task 175 on cpu 4: + kernfs_refresh_inode+0x70/0x170 + kernfs_iop_permission+0x4f/0x90 + inode_permission+0x190/0x200 + link_path_walk.part.0+0x503/0x8e0 + path_lookupat.isra.0+0x69/0x4d0 + filename_lookup+0x136/0x280 + user_path_at_empty+0x47/0x60 + vfs_statx+0x9b/0x130 + __do_sys_newlstat+0x50/0xb0 + __x64_sys_newlstat+0x37/0x50 + do_syscall_64+0x85/0x260 + entry_SYSCALL_64_after_hwframe+0x44/0xa9 + + read to 0xffff8fee4c40700c of 4 bytes by task 166 on cpu 6: + generic_permission+0x5b/0x2a0 + kernfs_iop_permission+0x66/0x90 + inode_permission+0x190/0x200 + link_path_walk.part.0+0x503/0x8e0 + path_lookupat.isra.0+0x69/0x4d0 + filename_lookup+0x136/0x280 + user_path_at_empty+0x47/0x60 + do_faccessat+0x11a/0x390 + __x64_sys_access+0x3c/0x50 + do_syscall_64+0x85/0x260 + entry_SYSCALL_64_after_hwframe+0x44/0xa9 + + Reported by Kernel Concurrency Sanitizer on: + CPU: 6 PID: 166 Comm: systemd-journal Not tainted 5.3.0-rc7+ #1 + Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.12.0-1 04/01/2014 + ================================================================== + +The header of the report provides a short summary of the functions involved in +the race. It is followed by the access types and stack traces of the 2 threads +involved in the data race. + +The other less common type of data race report looks like this:: + + ================================================================== + BUG: KCSAN: data-race in e1000_clean_rx_irq+0x551/0xb10 + + race at unknown origin, with read to 0xffff933db8a2ae6c of 1 bytes by interrupt on cpu 0: + e1000_clean_rx_irq+0x551/0xb10 + e1000_clean+0x533/0xda0 + net_rx_action+0x329/0x900 + __do_softirq+0xdb/0x2db + irq_exit+0x9b/0xa0 + do_IRQ+0x9c/0xf0 + ret_from_intr+0x0/0x18 + default_idle+0x3f/0x220 + arch_cpu_idle+0x21/0x30 + do_idle+0x1df/0x230 + cpu_startup_entry+0x14/0x20 + rest_init+0xc5/0xcb + arch_call_rest_init+0x13/0x2b + start_kernel+0x6db/0x700 + + Reported by Kernel Concurrency Sanitizer on: + CPU: 0 PID: 0 Comm: swapper/0 Not tainted 5.3.0-rc7+ #2 + Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.12.0-1 04/01/2014 + ================================================================== + +This report is generated where it was not possible to determine the other +racing thread, but a race was inferred due to the data value of the watched +memory location having changed. These can occur either due to missing +instrumentation or e.g. DMA accesses. + +Selective analysis +~~~~~~~~~~~~~~~~~~ + +To disable KCSAN data race detection for an entire subsystem, add to the +respective ``Makefile``:: + + KCSAN_SANITIZE := n + +To disable KCSAN on a per-file basis, add to the ``Makefile``:: + + KCSAN_SANITIZE_file.o := n + +KCSAN also understands the ``data_race(expr)`` annotation, which tells KCSAN +that any data races due to accesses in ``expr`` should be ignored and resulting +behaviour when encountering a data race is deemed safe. + +debugfs +~~~~~~~ + +* The file ``/sys/kernel/debug/kcsan`` can be read to get stats. + +* KCSAN can be turned on or off by writing ``on`` or ``off`` to + ``/sys/kernel/debug/kcsan``. + +* Writing ``!some_func_name`` to ``/sys/kernel/debug/kcsan`` adds + ``some_func_name`` to the report filter list, which (by default) blacklists + reporting data races where either one of the top stackframes are a function + in the list. + +* Writing either ``blacklist`` or ``whitelist`` to ``/sys/kernel/debug/kcsan`` + changes the report filtering behaviour. For example, the blacklist feature + can be used to silence frequently occurring data races; the whitelist feature + can help with reproduction and testing of fixes. + +Data Races +---------- + +Informally, two operations *conflict* if they access the same memory location, +and at least one of them is a write operation. In an execution, two memory +operations from different threads form a **data race** if they *conflict*, at +least one of them is a *plain access* (non-atomic), and they are *unordered* in +the "happens-before" order according to the `LKMM +<../../tools/memory-model/Documentation/explanation.txt>`_. + +Relationship with the Linux Kernel Memory Model (LKMM) +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +The LKMM defines the propagation and ordering rules of various memory +operations, which gives developers the ability to reason about concurrent code. +Ultimately this allows to determine the possible executions of concurrent code, +and if that code is free from data races. + +KCSAN is aware of *atomic* accesses (``READ_ONCE``, ``WRITE_ONCE``, +``atomic_*``, etc.), but is oblivious of any ordering guarantees. In other +words, KCSAN assumes that as long as a plain access is not observed to race +with another conflicting access, memory operations are correctly ordered. + +This means that KCSAN will not report *potential* data races due to missing +memory ordering. If, however, missing memory ordering (that is observable with +a particular compiler and architecture) leads to an observable data race (e.g. +entering a critical section erroneously), KCSAN would report the resulting +data race. + +Race conditions vs. data races +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Race conditions are logic bugs, where unexpected interleaving of racing +concurrent operations result in an erroneous state. + +Data races on the other hand are defined at the *memory model/language level*. +Many data races are also harmful race conditions, which a tool like KCSAN +reports! However, not all data races are race conditions and vice-versa. +KCSAN's intent is to report data races according to the LKMM. A data race +detector can only work at the memory model/language level. + +Deeper analysis, to find high-level race conditions only, requires conveying +the intended kernel logic to a tool. This requires (1) the developer writing a +specification or model of their code, and then (2) the tool verifying that the +implementation matches. This has been done for small bits of code using model +checkers and other formal methods, but does not scale to the level of what can +be covered with a dynamic analysis based data race detector such as KCSAN. + +For reasons outlined in this `article `_, +data races can be much more subtle, but can cause no less harm than high-level +race conditions. + +Implementation Details +---------------------- + +The general approach is inspired by `DataCollider +`_. +Unlike DataCollider, KCSAN does not use hardware watchpoints, but instead +relies on compiler instrumentation. Watchpoints are implemented using an +efficient encoding that stores access type, size, and address in a long; the +benefits of using "soft watchpoints" are portability and greater flexibility in +limiting which accesses trigger a watchpoint. + +More specifically, KCSAN requires instrumenting plain (unmarked, non-atomic) +memory operations; for each instrumented plain access: + +1. Check if a matching watchpoint exists; if yes, and at least one access is a + write, then we encountered a racing access. + +2. Periodically, if no matching watchpoint exists, set up a watchpoint and + stall for a small delay. + +3. Also check the data value before the delay, and re-check the data value + after delay; if the values mismatch, we infer a race of unknown origin. + +To detect data races between plain and atomic memory operations, KCSAN also +annotates atomic accesses, but only to check if a watchpoint exists +(``kcsan_check_atomic_*``); i.e. KCSAN never sets up a watchpoint on atomic +accesses. + +Key Properties +~~~~~~~~~~~~~~ + +1. **Memory Overhead:** The current implementation uses a small array of longs + to encode watchpoint information, which is negligible. + +2. **Performance Overhead:** KCSAN's runtime aims to be minimal, using an + efficient watchpoint encoding that does not require acquiring any shared + locks in the fast-path. For kernel boot on a system with 8 CPUs: + + - 5.0x slow-down with the default KCSAN config; + - 2.8x slow-down from runtime fast-path overhead only (set very large + ``KCSAN_SKIP_WATCH`` and unset ``KCSAN_SKIP_WATCH_RANDOMIZE``). + +3. **Annotation Overheads:** Minimal annotations are required outside the KCSAN + runtime. As a result, maintenance overheads are minimal as the kernel + evolves. + +4. **Detects Racy Writes from Devices:** Due to checking data values upon + setting up watchpoints, racy writes from devices can also be detected. + +5. **Memory Ordering:** KCSAN is *not* explicitly aware of the LKMM's ordering + rules; this may result in missed data races (false negatives). + +6. **Analysis Accuracy:** For observed executions, due to using a sampling + strategy, the analysis is *unsound* (false negatives possible), but aims to + be complete (no false positives). + +Alternatives Considered +----------------------- + +An alternative data race detection approach for the kernel can be found in +`Kernel Thread Sanitizer (KTSAN) `_. +KTSAN is a happens-before data race detector, which explicitly establishes the +happens-before order between memory operations, which can then be used to +determine data races as defined in `Data Races`_. To build a correct +happens-before relation, KTSAN must be aware of all ordering rules of the LKMM +and synchronization primitives. Unfortunately, any omission leads to false +positives, which is especially important in the context of the kernel which +includes numerous custom synchronization mechanisms. Furthermore, KTSAN's +implementation requires metadata for each memory location (shadow memory); +currently, for each page, KTSAN requires 4 pages of shadow memory. -- GitLab From 5f5c971292b43fed68273d5cba7202f6bd953df9 Mon Sep 17 00:00:00 2001 From: Marco Elver Date: Thu, 14 Nov 2019 19:02:57 +0100 Subject: [PATCH 0004/1971] objtool, kcsan: Add KCSAN runtime functions to whitelist This patch adds KCSAN runtime functions to the objtool whitelist. Signed-off-by: Marco Elver Acked-by: Paul E. McKenney Signed-off-by: Paul E. McKenney --- tools/objtool/check.c | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/tools/objtool/check.c b/tools/objtool/check.c index 044c9a3cb2472..e022a9a00ca12 100644 --- a/tools/objtool/check.c +++ b/tools/objtool/check.c @@ -466,6 +466,24 @@ static const char *uaccess_safe_builtin[] = { "__asan_report_store4_noabort", "__asan_report_store8_noabort", "__asan_report_store16_noabort", + /* KCSAN */ + "kcsan_found_watchpoint", + "kcsan_setup_watchpoint", + /* KCSAN/TSAN */ + "__tsan_func_entry", + "__tsan_func_exit", + "__tsan_read_range", + "__tsan_write_range", + "__tsan_read1", + "__tsan_read2", + "__tsan_read4", + "__tsan_read8", + "__tsan_read16", + "__tsan_write1", + "__tsan_write2", + "__tsan_write4", + "__tsan_write8", + "__tsan_write16", /* KCOV */ "write_comp_data", "__sanitizer_cov_trace_pc", -- GitLab From 0ebba7141eadc4804ec5b4bb5106331b0c3abf4c Mon Sep 17 00:00:00 2001 From: Marco Elver Date: Thu, 14 Nov 2019 19:02:58 +0100 Subject: [PATCH 0005/1971] build, kcsan: Add KCSAN build exceptions This blacklists several compilation units from KCSAN. See the respective inline comments for the reasoning. Signed-off-by: Marco Elver Acked-by: Paul E. McKenney Signed-off-by: Paul E. McKenney --- drivers/firmware/efi/libstub/Makefile | 2 ++ kernel/Makefile | 5 +++++ kernel/sched/Makefile | 6 ++++++ mm/Makefile | 8 ++++++++ 4 files changed, 21 insertions(+) diff --git a/drivers/firmware/efi/libstub/Makefile b/drivers/firmware/efi/libstub/Makefile index ee0661ddb25bb..5d0a645c0de87 100644 --- a/drivers/firmware/efi/libstub/Makefile +++ b/drivers/firmware/efi/libstub/Makefile @@ -31,7 +31,9 @@ KBUILD_CFLAGS := $(cflags-y) -DDISABLE_BRANCH_PROFILING \ -D__DISABLE_EXPORTS GCOV_PROFILE := n +# Sanitizer runtimes are unavailable and cannot be linked here. KASAN_SANITIZE := n +KCSAN_SANITIZE := n UBSAN_SANITIZE := n OBJECT_FILES_NON_STANDARD := y diff --git a/kernel/Makefile b/kernel/Makefile index 74ab46e2ebd14..cc53f7c254463 100644 --- a/kernel/Makefile +++ b/kernel/Makefile @@ -23,6 +23,9 @@ endif # Prevents flicker of uninteresting __do_softirq()/__local_bh_disable_ip() # in coverage traces. KCOV_INSTRUMENT_softirq.o := n +# Avoid KCSAN instrumentation in softirq ("No shared variables, all the data +# are CPU local" => assume no data races), to reduce overhead in interrupts. +KCSAN_SANITIZE_softirq.o = n # These are called from save_stack_trace() on slub debug path, # and produce insane amounts of uninteresting coverage. KCOV_INSTRUMENT_module.o := n @@ -30,6 +33,7 @@ KCOV_INSTRUMENT_extable.o := n # Don't self-instrument. KCOV_INSTRUMENT_kcov.o := n KASAN_SANITIZE_kcov.o := n +KCSAN_SANITIZE_kcov.o := n CFLAGS_kcov.o := $(call cc-option, -fno-conserve-stack -fno-stack-protector) # cond_syscall is currently not LTO compatible @@ -118,6 +122,7 @@ obj-$(CONFIG_RSEQ) += rseq.o obj-$(CONFIG_GCC_PLUGIN_STACKLEAK) += stackleak.o KASAN_SANITIZE_stackleak.o := n +KCSAN_SANITIZE_stackleak.o := n KCOV_INSTRUMENT_stackleak.o := n $(obj)/configs.o: $(obj)/config_data.gz diff --git a/kernel/sched/Makefile b/kernel/sched/Makefile index 21fb5a5662b51..e9307a9c54e77 100644 --- a/kernel/sched/Makefile +++ b/kernel/sched/Makefile @@ -7,6 +7,12 @@ endif # that is not a function of syscall inputs. E.g. involuntary context switches. KCOV_INSTRUMENT := n +# There are numerous races here, however, most of them due to plain accesses. +# This would make it even harder for syzbot to find reproducers, because these +# bugs trigger without specific input. Disable by default, but should re-enable +# eventually. +KCSAN_SANITIZE := n + ifneq ($(CONFIG_SCHED_OMIT_FRAME_POINTER),y) # According to Alan Modra , the -fno-omit-frame-pointer is # needed for x86 only. Why this used to be enabled for all architectures is beyond diff --git a/mm/Makefile b/mm/Makefile index d996846697ef5..56c1964bb3a1d 100644 --- a/mm/Makefile +++ b/mm/Makefile @@ -7,6 +7,14 @@ KASAN_SANITIZE_slab_common.o := n KASAN_SANITIZE_slab.o := n KASAN_SANITIZE_slub.o := n +# These produce frequent data race reports: most of them are due to races on +# the same word but accesses to different bits of that word. Re-enable KCSAN +# for these when we have more consensus on what to do about them. +KCSAN_SANITIZE_slab_common.o := n +KCSAN_SANITIZE_slab.o := n +KCSAN_SANITIZE_slub.o := n +KCSAN_SANITIZE_page_alloc.o := n + # These files are disabled because they produce non-interesting and/or # flaky coverage that is not a function of syscall inputs. E.g. slab is out of # free pages, or a task is migrated between nodes. -- GitLab From 88ecd153be9519f259b87a9f6f4c8383a8b3bbf1 Mon Sep 17 00:00:00 2001 From: Marco Elver Date: Thu, 14 Nov 2019 19:02:59 +0100 Subject: [PATCH 0006/1971] seqlock, kcsan: Add annotations for KCSAN Since seqlocks in the Linux kernel do not require the use of marked atomic accesses in critical sections, we teach KCSAN to assume such accesses are atomic. KCSAN currently also pretends that writes to `sequence` are atomic, although currently plain writes are used (their corresponding reads are READ_ONCE). Further, to avoid false positives in the absence of clear ending of a seqlock reader critical section (only when using the raw interface), KCSAN assumes a fixed number of accesses after start of a seqlock critical section are atomic. === Commentary on design around absence of clear begin/end markings === Seqlock usage via seqlock_t follows a predictable usage pattern, where clear critical section begin/end is enforced. With subtle special cases for readers needing to be flat atomic regions, e.g. because usage such as in: - fs/namespace.c:__legitimize_mnt - unbalanced read_seqretry - fs/dcache.c:d_walk - unbalanced need_seqretry But, anything directly accessing seqcount_t seems to be unpredictable. Filtering for usage of read_seqcount_retry not following 'do { .. } while (read_seqcount_retry(..));': $ git grep 'read_seqcount_retry' | grep -Ev 'while \(|seqlock.h|Doc|\* ' => about 1/3 of the total read_seqcount_retry usage. Just looking at fs/namei.c, we conclude that it is non-trivial to prescribe and migrate to an interface that would force clear begin/end seqlock markings for critical sections. As such, we concluded that the best design currently, is to simply ensure that KCSAN works well with the existing code. Signed-off-by: Marco Elver Acked-by: Paul E. McKenney Signed-off-by: Paul E. McKenney --- include/linux/seqlock.h | 40 ++++++++++++++++++++++++++++++++++++++-- 1 file changed, 38 insertions(+), 2 deletions(-) diff --git a/include/linux/seqlock.h b/include/linux/seqlock.h index bcf4cf26b8c89..61232bc223fdf 100644 --- a/include/linux/seqlock.h +++ b/include/linux/seqlock.h @@ -37,8 +37,24 @@ #include #include #include +#include #include +/* + * The seqlock interface does not prescribe a precise sequence of read + * begin/retry/end. For readers, typically there is a call to + * read_seqcount_begin() and read_seqcount_retry(), however, there are more + * esoteric cases which do not follow this pattern. + * + * As a consequence, we take the following best-effort approach for raw usage + * via seqcount_t under KCSAN: upon beginning a seq-reader critical section, + * pessimistically mark then next KCSAN_SEQLOCK_REGION_MAX memory accesses as + * atomics; if there is a matching read_seqcount_retry() call, no following + * memory operations are considered atomic. Usage of seqlocks via seqlock_t + * interface is not affected. + */ +#define KCSAN_SEQLOCK_REGION_MAX 1000 + /* * Version using sequence counter only. * This can be used when code has its own mutex protecting the @@ -115,6 +131,7 @@ repeat: cpu_relax(); goto repeat; } + kcsan_atomic_next(KCSAN_SEQLOCK_REGION_MAX); return ret; } @@ -131,6 +148,7 @@ static inline unsigned raw_read_seqcount(const seqcount_t *s) { unsigned ret = READ_ONCE(s->sequence); smp_rmb(); + kcsan_atomic_next(KCSAN_SEQLOCK_REGION_MAX); return ret; } @@ -183,6 +201,7 @@ static inline unsigned raw_seqcount_begin(const seqcount_t *s) { unsigned ret = READ_ONCE(s->sequence); smp_rmb(); + kcsan_atomic_next(KCSAN_SEQLOCK_REGION_MAX); return ret & ~1; } @@ -202,7 +221,8 @@ static inline unsigned raw_seqcount_begin(const seqcount_t *s) */ static inline int __read_seqcount_retry(const seqcount_t *s, unsigned start) { - return unlikely(s->sequence != start); + kcsan_atomic_next(0); + return unlikely(READ_ONCE(s->sequence) != start); } /** @@ -225,6 +245,7 @@ static inline int read_seqcount_retry(const seqcount_t *s, unsigned start) static inline void raw_write_seqcount_begin(seqcount_t *s) { + kcsan_nestable_atomic_begin(); s->sequence++; smp_wmb(); } @@ -233,6 +254,7 @@ static inline void raw_write_seqcount_end(seqcount_t *s) { smp_wmb(); s->sequence++; + kcsan_nestable_atomic_end(); } /** @@ -271,9 +293,11 @@ static inline void raw_write_seqcount_end(seqcount_t *s) */ static inline void raw_write_seqcount_barrier(seqcount_t *s) { + kcsan_nestable_atomic_begin(); s->sequence++; smp_wmb(); s->sequence++; + kcsan_nestable_atomic_end(); } static inline int raw_read_seqcount_latch(seqcount_t *s) @@ -398,7 +422,9 @@ static inline void write_seqcount_end(seqcount_t *s) static inline void write_seqcount_invalidate(seqcount_t *s) { smp_wmb(); + kcsan_nestable_atomic_begin(); s->sequence+=2; + kcsan_nestable_atomic_end(); } typedef struct { @@ -430,11 +456,21 @@ typedef struct { */ static inline unsigned read_seqbegin(const seqlock_t *sl) { - return read_seqcount_begin(&sl->seqcount); + unsigned ret = read_seqcount_begin(&sl->seqcount); + + kcsan_atomic_next(0); /* non-raw usage, assume closing read_seqretry */ + kcsan_flat_atomic_begin(); + return ret; } static inline unsigned read_seqretry(const seqlock_t *sl, unsigned start) { + /* + * Assume not nested: read_seqretry may be called multiple times when + * completing read critical section. + */ + kcsan_flat_atomic_end(); + return read_seqcount_retry(&sl->seqcount, start); } -- GitLab From bf07132f96d426bcbf2098227fb680915cf44498 Mon Sep 17 00:00:00 2001 From: Marco Elver Date: Thu, 14 Nov 2019 19:03:00 +0100 Subject: [PATCH 0007/1971] seqlock: Require WRITE_ONCE surrounding raw_seqcount_barrier This patch proposes to require marked atomic accesses surrounding raw_write_seqcount_barrier. We reason that otherwise there is no way to guarantee propagation nor atomicity of writes before/after the barrier [1]. For example, consider the compiler tears stores either before or after the barrier; in this case, readers may observe a partial value, and because readers are unaware that writes are going on (writes are not in a seq-writer critical section), will complete the seq-reader critical section while having observed some partial state. [1] https://lwn.net/Articles/793253/ This came up when designing and implementing KCSAN, because KCSAN would flag these accesses as data-races. After careful analysis, our reasoning as above led us to conclude that the best thing to do is to propose an amendment to the raw_seqcount_barrier usage. Signed-off-by: Marco Elver Acked-by: Paul E. McKenney Signed-off-by: Paul E. McKenney --- include/linux/seqlock.h | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/include/linux/seqlock.h b/include/linux/seqlock.h index 61232bc223fdf..f52c91be89397 100644 --- a/include/linux/seqlock.h +++ b/include/linux/seqlock.h @@ -265,6 +265,13 @@ static inline void raw_write_seqcount_end(seqcount_t *s) * usual consistency guarantee. It is one wmb cheaper, because we can * collapse the two back-to-back wmb()s. * + * Note that, writes surrounding the barrier should be declared atomic (e.g. + * via WRITE_ONCE): a) to ensure the writes become visible to other threads + * atomically, avoiding compiler optimizations; b) to document which writes are + * meant to propagate to the reader critical section. This is necessary because + * neither writes before and after the barrier are enclosed in a seq-writer + * critical section that would ensure readers are aware of ongoing writes. + * * seqcount_t seq; * bool X = true, Y = false; * @@ -284,11 +291,11 @@ static inline void raw_write_seqcount_end(seqcount_t *s) * * void write(void) * { - * Y = true; + * WRITE_ONCE(Y, true); * * raw_write_seqcount_barrier(seq); * - * X = false; + * WRITE_ONCE(X, false); * } */ static inline void raw_write_seqcount_barrier(seqcount_t *s) -- GitLab From e75a6795ed132f26f69d08dea958630d5993056d Mon Sep 17 00:00:00 2001 From: Marco Elver Date: Thu, 14 Nov 2019 19:03:02 +0100 Subject: [PATCH 0008/1971] locking/atomics, kcsan: Add KCSAN instrumentation This adds KCSAN instrumentation to atomic-instrumented.h. Signed-off-by: Marco Elver Reviewed-by: Mark Rutland Acked-by: Paul E. McKenney Signed-off-by: Paul E. McKenney --- include/asm-generic/atomic-instrumented.h | 393 +++++++++++----------- scripts/atomic/gen-atomic-instrumented.sh | 17 +- 2 files changed, 218 insertions(+), 192 deletions(-) diff --git a/include/asm-generic/atomic-instrumented.h b/include/asm-generic/atomic-instrumented.h index e8730c6b9fe2c..3dc0f38544f65 100644 --- a/include/asm-generic/atomic-instrumented.h +++ b/include/asm-generic/atomic-instrumented.h @@ -19,11 +19,24 @@ #include #include +#include + +static inline void __atomic_check_read(const volatile void *v, size_t size) +{ + kasan_check_read(v, size); + kcsan_check_atomic_read(v, size); +} + +static inline void __atomic_check_write(const volatile void *v, size_t size) +{ + kasan_check_write(v, size); + kcsan_check_atomic_write(v, size); +} static inline int atomic_read(const atomic_t *v) { - kasan_check_read(v, sizeof(*v)); + __atomic_check_read(v, sizeof(*v)); return arch_atomic_read(v); } #define atomic_read atomic_read @@ -32,7 +45,7 @@ atomic_read(const atomic_t *v) static inline int atomic_read_acquire(const atomic_t *v) { - kasan_check_read(v, sizeof(*v)); + __atomic_check_read(v, sizeof(*v)); return arch_atomic_read_acquire(v); } #define atomic_read_acquire atomic_read_acquire @@ -41,7 +54,7 @@ atomic_read_acquire(const atomic_t *v) static inline void atomic_set(atomic_t *v, int i) { - kasan_check_write(v, sizeof(*v)); + __atomic_check_write(v, sizeof(*v)); arch_atomic_set(v, i); } #define atomic_set atomic_set @@ -50,7 +63,7 @@ atomic_set(atomic_t *v, int i) static inline void atomic_set_release(atomic_t *v, int i) { - kasan_check_write(v, sizeof(*v)); + __atomic_check_write(v, sizeof(*v)); arch_atomic_set_release(v, i); } #define atomic_set_release atomic_set_release @@ -59,7 +72,7 @@ atomic_set_release(atomic_t *v, int i) static inline void atomic_add(int i, atomic_t *v) { - kasan_check_write(v, sizeof(*v)); + __atomic_check_write(v, sizeof(*v)); arch_atomic_add(i, v); } #define atomic_add atomic_add @@ -68,7 +81,7 @@ atomic_add(int i, atomic_t *v) static inline int atomic_add_return(int i, atomic_t *v) { - kasan_check_write(v, sizeof(*v)); + __atomic_check_write(v, sizeof(*v)); return arch_atomic_add_return(i, v); } #define atomic_add_return atomic_add_return @@ -78,7 +91,7 @@ atomic_add_return(int i, atomic_t *v) static inline int atomic_add_return_acquire(int i, atomic_t *v) { - kasan_check_write(v, sizeof(*v)); + __atomic_check_write(v, sizeof(*v)); return arch_atomic_add_return_acquire(i, v); } #define atomic_add_return_acquire atomic_add_return_acquire @@ -88,7 +101,7 @@ atomic_add_return_acquire(int i, atomic_t *v) static inline int atomic_add_return_release(int i, atomic_t *v) { - kasan_check_write(v, sizeof(*v)); + __atomic_check_write(v, sizeof(*v)); return arch_atomic_add_return_release(i, v); } #define atomic_add_return_release atomic_add_return_release @@ -98,7 +111,7 @@ atomic_add_return_release(int i, atomic_t *v) static inline int atomic_add_return_relaxed(int i, atomic_t *v) { - kasan_check_write(v, sizeof(*v)); + __atomic_check_write(v, sizeof(*v)); return arch_atomic_add_return_relaxed(i, v); } #define atomic_add_return_relaxed atomic_add_return_relaxed @@ -108,7 +121,7 @@ atomic_add_return_relaxed(int i, atomic_t *v) static inline int atomic_fetch_add(int i, atomic_t *v) { - kasan_check_write(v, sizeof(*v)); + __atomic_check_write(v, sizeof(*v)); return arch_atomic_fetch_add(i, v); } #define atomic_fetch_add atomic_fetch_add @@ -118,7 +131,7 @@ atomic_fetch_add(int i, atomic_t *v) static inline int atomic_fetch_add_acquire(int i, atomic_t *v) { - kasan_check_write(v, sizeof(*v)); + __atomic_check_write(v, sizeof(*v)); return arch_atomic_fetch_add_acquire(i, v); } #define atomic_fetch_add_acquire atomic_fetch_add_acquire @@ -128,7 +141,7 @@ atomic_fetch_add_acquire(int i, atomic_t *v) static inline int atomic_fetch_add_release(int i, atomic_t *v) { - kasan_check_write(v, sizeof(*v)); + __atomic_check_write(v, sizeof(*v)); return arch_atomic_fetch_add_release(i, v); } #define atomic_fetch_add_release atomic_fetch_add_release @@ -138,7 +151,7 @@ atomic_fetch_add_release(int i, atomic_t *v) static inline int atomic_fetch_add_relaxed(int i, atomic_t *v) { - kasan_check_write(v, sizeof(*v)); + __atomic_check_write(v, sizeof(*v)); return arch_atomic_fetch_add_relaxed(i, v); } #define atomic_fetch_add_relaxed atomic_fetch_add_relaxed @@ -147,7 +160,7 @@ atomic_fetch_add_relaxed(int i, atomic_t *v) static inline void atomic_sub(int i, atomic_t *v) { - kasan_check_write(v, sizeof(*v)); + __atomic_check_write(v, sizeof(*v)); arch_atomic_sub(i, v); } #define atomic_sub atomic_sub @@ -156,7 +169,7 @@ atomic_sub(int i, atomic_t *v) static inline int atomic_sub_return(int i, atomic_t *v) { - kasan_check_write(v, sizeof(*v)); + __atomic_check_write(v, sizeof(*v)); return arch_atomic_sub_return(i, v); } #define atomic_sub_return atomic_sub_return @@ -166,7 +179,7 @@ atomic_sub_return(int i, atomic_t *v) static inline int atomic_sub_return_acquire(int i, atomic_t *v) { - kasan_check_write(v, sizeof(*v)); + __atomic_check_write(v, sizeof(*v)); return arch_atomic_sub_return_acquire(i, v); } #define atomic_sub_return_acquire atomic_sub_return_acquire @@ -176,7 +189,7 @@ atomic_sub_return_acquire(int i, atomic_t *v) static inline int atomic_sub_return_release(int i, atomic_t *v) { - kasan_check_write(v, sizeof(*v)); + __atomic_check_write(v, sizeof(*v)); return arch_atomic_sub_return_release(i, v); } #define atomic_sub_return_release atomic_sub_return_release @@ -186,7 +199,7 @@ atomic_sub_return_release(int i, atomic_t *v) static inline int atomic_sub_return_relaxed(int i, atomic_t *v) { - kasan_check_write(v, sizeof(*v)); + __atomic_check_write(v, sizeof(*v)); return arch_atomic_sub_return_relaxed(i, v); } #define atomic_sub_return_relaxed atomic_sub_return_relaxed @@ -196,7 +209,7 @@ atomic_sub_return_relaxed(int i, atomic_t *v) static inline int atomic_fetch_sub(int i, atomic_t *v) { - kasan_check_write(v, sizeof(*v)); + __atomic_check_write(v, sizeof(*v)); return arch_atomic_fetch_sub(i, v); } #define atomic_fetch_sub atomic_fetch_sub @@ -206,7 +219,7 @@ atomic_fetch_sub(int i, atomic_t *v) static inline int atomic_fetch_sub_acquire(int i, atomic_t *v) { - kasan_check_write(v, sizeof(*v)); + __atomic_check_write(v, sizeof(*v)); return arch_atomic_fetch_sub_acquire(i, v); } #define atomic_fetch_sub_acquire atomic_fetch_sub_acquire @@ -216,7 +229,7 @@ atomic_fetch_sub_acquire(int i, atomic_t *v) static inline int atomic_fetch_sub_release(int i, atomic_t *v) { - kasan_check_write(v, sizeof(*v)); + __atomic_check_write(v, sizeof(*v)); return arch_atomic_fetch_sub_release(i, v); } #define atomic_fetch_sub_release atomic_fetch_sub_release @@ -226,7 +239,7 @@ atomic_fetch_sub_release(int i, atomic_t *v) static inline int atomic_fetch_sub_relaxed(int i, atomic_t *v) { - kasan_check_write(v, sizeof(*v)); + __atomic_check_write(v, sizeof(*v)); return arch_atomic_fetch_sub_relaxed(i, v); } #define atomic_fetch_sub_relaxed atomic_fetch_sub_relaxed @@ -236,7 +249,7 @@ atomic_fetch_sub_relaxed(int i, atomic_t *v) static inline void atomic_inc(atomic_t *v) { - kasan_check_write(v, sizeof(*v)); + __atomic_check_write(v, sizeof(*v)); arch_atomic_inc(v); } #define atomic_inc atomic_inc @@ -246,7 +259,7 @@ atomic_inc(atomic_t *v) static inline int atomic_inc_return(atomic_t *v) { - kasan_check_write(v, sizeof(*v)); + __atomic_check_write(v, sizeof(*v)); return arch_atomic_inc_return(v); } #define atomic_inc_return atomic_inc_return @@ -256,7 +269,7 @@ atomic_inc_return(atomic_t *v) static inline int atomic_inc_return_acquire(atomic_t *v) { - kasan_check_write(v, sizeof(*v)); + __atomic_check_write(v, sizeof(*v)); return arch_atomic_inc_return_acquire(v); } #define atomic_inc_return_acquire atomic_inc_return_acquire @@ -266,7 +279,7 @@ atomic_inc_return_acquire(atomic_t *v) static inline int atomic_inc_return_release(atomic_t *v) { - kasan_check_write(v, sizeof(*v)); + __atomic_check_write(v, sizeof(*v)); return arch_atomic_inc_return_release(v); } #define atomic_inc_return_release atomic_inc_return_release @@ -276,7 +289,7 @@ atomic_inc_return_release(atomic_t *v) static inline int atomic_inc_return_relaxed(atomic_t *v) { - kasan_check_write(v, sizeof(*v)); + __atomic_check_write(v, sizeof(*v)); return arch_atomic_inc_return_relaxed(v); } #define atomic_inc_return_relaxed atomic_inc_return_relaxed @@ -286,7 +299,7 @@ atomic_inc_return_relaxed(atomic_t *v) static inline int atomic_fetch_inc(atomic_t *v) { - kasan_check_write(v, sizeof(*v)); + __atomic_check_write(v, sizeof(*v)); return arch_atomic_fetch_inc(v); } #define atomic_fetch_inc atomic_fetch_inc @@ -296,7 +309,7 @@ atomic_fetch_inc(atomic_t *v) static inline int atomic_fetch_inc_acquire(atomic_t *v) { - kasan_check_write(v, sizeof(*v)); + __atomic_check_write(v, sizeof(*v)); return arch_atomic_fetch_inc_acquire(v); } #define atomic_fetch_inc_acquire atomic_fetch_inc_acquire @@ -306,7 +319,7 @@ atomic_fetch_inc_acquire(atomic_t *v) static inline int atomic_fetch_inc_release(atomic_t *v) { - kasan_check_write(v, sizeof(*v)); + __atomic_check_write(v, sizeof(*v)); return arch_atomic_fetch_inc_release(v); } #define atomic_fetch_inc_release atomic_fetch_inc_release @@ -316,7 +329,7 @@ atomic_fetch_inc_release(atomic_t *v) static inline int atomic_fetch_inc_relaxed(atomic_t *v) { - kasan_check_write(v, sizeof(*v)); + __atomic_check_write(v, sizeof(*v)); return arch_atomic_fetch_inc_relaxed(v); } #define atomic_fetch_inc_relaxed atomic_fetch_inc_relaxed @@ -326,7 +339,7 @@ atomic_fetch_inc_relaxed(atomic_t *v) static inline void atomic_dec(atomic_t *v) { - kasan_check_write(v, sizeof(*v)); + __atomic_check_write(v, sizeof(*v)); arch_atomic_dec(v); } #define atomic_dec atomic_dec @@ -336,7 +349,7 @@ atomic_dec(atomic_t *v) static inline int atomic_dec_return(atomic_t *v) { - kasan_check_write(v, sizeof(*v)); + __atomic_check_write(v, sizeof(*v)); return arch_atomic_dec_return(v); } #define atomic_dec_return atomic_dec_return @@ -346,7 +359,7 @@ atomic_dec_return(atomic_t *v) static inline int atomic_dec_return_acquire(atomic_t *v) { - kasan_check_write(v, sizeof(*v)); + __atomic_check_write(v, sizeof(*v)); return arch_atomic_dec_return_acquire(v); } #define atomic_dec_return_acquire atomic_dec_return_acquire @@ -356,7 +369,7 @@ atomic_dec_return_acquire(atomic_t *v) static inline int atomic_dec_return_release(atomic_t *v) { - kasan_check_write(v, sizeof(*v)); + __atomic_check_write(v, sizeof(*v)); return arch_atomic_dec_return_release(v); } #define atomic_dec_return_release atomic_dec_return_release @@ -366,7 +379,7 @@ atomic_dec_return_release(atomic_t *v) static inline int atomic_dec_return_relaxed(atomic_t *v) { - kasan_check_write(v, sizeof(*v)); + __atomic_check_write(v, sizeof(*v)); return arch_atomic_dec_return_relaxed(v); } #define atomic_dec_return_relaxed atomic_dec_return_relaxed @@ -376,7 +389,7 @@ atomic_dec_return_relaxed(atomic_t *v) static inline int atomic_fetch_dec(atomic_t *v) { - kasan_check_write(v, sizeof(*v)); + __atomic_check_write(v, sizeof(*v)); return arch_atomic_fetch_dec(v); } #define atomic_fetch_dec atomic_fetch_dec @@ -386,7 +399,7 @@ atomic_fetch_dec(atomic_t *v) static inline int atomic_fetch_dec_acquire(atomic_t *v) { - kasan_check_write(v, sizeof(*v)); + __atomic_check_write(v, sizeof(*v)); return arch_atomic_fetch_dec_acquire(v); } #define atomic_fetch_dec_acquire atomic_fetch_dec_acquire @@ -396,7 +409,7 @@ atomic_fetch_dec_acquire(atomic_t *v) static inline int atomic_fetch_dec_release(atomic_t *v) { - kasan_check_write(v, sizeof(*v)); + __atomic_check_write(v, sizeof(*v)); return arch_atomic_fetch_dec_release(v); } #define atomic_fetch_dec_release atomic_fetch_dec_release @@ -406,7 +419,7 @@ atomic_fetch_dec_release(atomic_t *v) static inline int atomic_fetch_dec_relaxed(atomic_t *v) { - kasan_check_write(v, sizeof(*v)); + __atomic_check_write(v, sizeof(*v)); return arch_atomic_fetch_dec_relaxed(v); } #define atomic_fetch_dec_relaxed atomic_fetch_dec_relaxed @@ -415,7 +428,7 @@ atomic_fetch_dec_relaxed(atomic_t *v) static inline void atomic_and(int i, atomic_t *v) { - kasan_check_write(v, sizeof(*v)); + __atomic_check_write(v, sizeof(*v)); arch_atomic_and(i, v); } #define atomic_and atomic_and @@ -424,7 +437,7 @@ atomic_and(int i, atomic_t *v) static inline int atomic_fetch_and(int i, atomic_t *v) { - kasan_check_write(v, sizeof(*v)); + __atomic_check_write(v, sizeof(*v)); return arch_atomic_fetch_and(i, v); } #define atomic_fetch_and atomic_fetch_and @@ -434,7 +447,7 @@ atomic_fetch_and(int i, atomic_t *v) static inline int atomic_fetch_and_acquire(int i, atomic_t *v) { - kasan_check_write(v, sizeof(*v)); + __atomic_check_write(v, sizeof(*v)); return arch_atomic_fetch_and_acquire(i, v); } #define atomic_fetch_and_acquire atomic_fetch_and_acquire @@ -444,7 +457,7 @@ atomic_fetch_and_acquire(int i, atomic_t *v) static inline int atomic_fetch_and_release(int i, atomic_t *v) { - kasan_check_write(v, sizeof(*v)); + __atomic_check_write(v, sizeof(*v)); return arch_atomic_fetch_and_release(i, v); } #define atomic_fetch_and_release atomic_fetch_and_release @@ -454,7 +467,7 @@ atomic_fetch_and_release(int i, atomic_t *v) static inline int atomic_fetch_and_relaxed(int i, atomic_t *v) { - kasan_check_write(v, sizeof(*v)); + __atomic_check_write(v, sizeof(*v)); return arch_atomic_fetch_and_relaxed(i, v); } #define atomic_fetch_and_relaxed atomic_fetch_and_relaxed @@ -464,7 +477,7 @@ atomic_fetch_and_relaxed(int i, atomic_t *v) static inline void atomic_andnot(int i, atomic_t *v) { - kasan_check_write(v, sizeof(*v)); + __atomic_check_write(v, sizeof(*v)); arch_atomic_andnot(i, v); } #define atomic_andnot atomic_andnot @@ -474,7 +487,7 @@ atomic_andnot(int i, atomic_t *v) static inline int atomic_fetch_andnot(int i, atomic_t *v) { - kasan_check_write(v, sizeof(*v)); + __atomic_check_write(v, sizeof(*v)); return arch_atomic_fetch_andnot(i, v); } #define atomic_fetch_andnot atomic_fetch_andnot @@ -484,7 +497,7 @@ atomic_fetch_andnot(int i, atomic_t *v) static inline int atomic_fetch_andnot_acquire(int i, atomic_t *v) { - kasan_check_write(v, sizeof(*v)); + __atomic_check_write(v, sizeof(*v)); return arch_atomic_fetch_andnot_acquire(i, v); } #define atomic_fetch_andnot_acquire atomic_fetch_andnot_acquire @@ -494,7 +507,7 @@ atomic_fetch_andnot_acquire(int i, atomic_t *v) static inline int atomic_fetch_andnot_release(int i, atomic_t *v) { - kasan_check_write(v, sizeof(*v)); + __atomic_check_write(v, sizeof(*v)); return arch_atomic_fetch_andnot_release(i, v); } #define atomic_fetch_andnot_release atomic_fetch_andnot_release @@ -504,7 +517,7 @@ atomic_fetch_andnot_release(int i, atomic_t *v) static inline int atomic_fetch_andnot_relaxed(int i, atomic_t *v) { - kasan_check_write(v, sizeof(*v)); + __atomic_check_write(v, sizeof(*v)); return arch_atomic_fetch_andnot_relaxed(i, v); } #define atomic_fetch_andnot_relaxed atomic_fetch_andnot_relaxed @@ -513,7 +526,7 @@ atomic_fetch_andnot_relaxed(int i, atomic_t *v) static inline void atomic_or(int i, atomic_t *v) { - kasan_check_write(v, sizeof(*v)); + __atomic_check_write(v, sizeof(*v)); arch_atomic_or(i, v); } #define atomic_or atomic_or @@ -522,7 +535,7 @@ atomic_or(int i, atomic_t *v) static inline int atomic_fetch_or(int i, atomic_t *v) { - kasan_check_write(v, sizeof(*v)); + __atomic_check_write(v, sizeof(*v)); return arch_atomic_fetch_or(i, v); } #define atomic_fetch_or atomic_fetch_or @@ -532,7 +545,7 @@ atomic_fetch_or(int i, atomic_t *v) static inline int atomic_fetch_or_acquire(int i, atomic_t *v) { - kasan_check_write(v, sizeof(*v)); + __atomic_check_write(v, sizeof(*v)); return arch_atomic_fetch_or_acquire(i, v); } #define atomic_fetch_or_acquire atomic_fetch_or_acquire @@ -542,7 +555,7 @@ atomic_fetch_or_acquire(int i, atomic_t *v) static inline int atomic_fetch_or_release(int i, atomic_t *v) { - kasan_check_write(v, sizeof(*v)); + __atomic_check_write(v, sizeof(*v)); return arch_atomic_fetch_or_release(i, v); } #define atomic_fetch_or_release atomic_fetch_or_release @@ -552,7 +565,7 @@ atomic_fetch_or_release(int i, atomic_t *v) static inline int atomic_fetch_or_relaxed(int i, atomic_t *v) { - kasan_check_write(v, sizeof(*v)); + __atomic_check_write(v, sizeof(*v)); return arch_atomic_fetch_or_relaxed(i, v); } #define atomic_fetch_or_relaxed atomic_fetch_or_relaxed @@ -561,7 +574,7 @@ atomic_fetch_or_relaxed(int i, atomic_t *v) static inline void atomic_xor(int i, atomic_t *v) { - kasan_check_write(v, sizeof(*v)); + __atomic_check_write(v, sizeof(*v)); arch_atomic_xor(i, v); } #define atomic_xor atomic_xor @@ -570,7 +583,7 @@ atomic_xor(int i, atomic_t *v) static inline int atomic_fetch_xor(int i, atomic_t *v) { - kasan_check_write(v, sizeof(*v)); + __atomic_check_write(v, sizeof(*v)); return arch_atomic_fetch_xor(i, v); } #define atomic_fetch_xor atomic_fetch_xor @@ -580,7 +593,7 @@ atomic_fetch_xor(int i, atomic_t *v) static inline int atomic_fetch_xor_acquire(int i, atomic_t *v) { - kasan_check_write(v, sizeof(*v)); + __atomic_check_write(v, sizeof(*v)); return arch_atomic_fetch_xor_acquire(i, v); } #define atomic_fetch_xor_acquire atomic_fetch_xor_acquire @@ -590,7 +603,7 @@ atomic_fetch_xor_acquire(int i, atomic_t *v) static inline int atomic_fetch_xor_release(int i, atomic_t *v) { - kasan_check_write(v, sizeof(*v)); + __atomic_check_write(v, sizeof(*v)); return arch_atomic_fetch_xor_release(i, v); } #define atomic_fetch_xor_release atomic_fetch_xor_release @@ -600,7 +613,7 @@ atomic_fetch_xor_release(int i, atomic_t *v) static inline int atomic_fetch_xor_relaxed(int i, atomic_t *v) { - kasan_check_write(v, sizeof(*v)); + __atomic_check_write(v, sizeof(*v)); return arch_atomic_fetch_xor_relaxed(i, v); } #define atomic_fetch_xor_relaxed atomic_fetch_xor_relaxed @@ -610,7 +623,7 @@ atomic_fetch_xor_relaxed(int i, atomic_t *v) static inline int atomic_xchg(atomic_t *v, int i) { - kasan_check_write(v, sizeof(*v)); + __atomic_check_write(v, sizeof(*v)); return arch_atomic_xchg(v, i); } #define atomic_xchg atomic_xchg @@ -620,7 +633,7 @@ atomic_xchg(atomic_t *v, int i) static inline int atomic_xchg_acquire(atomic_t *v, int i) { - kasan_check_write(v, sizeof(*v)); + __atomic_check_write(v, sizeof(*v)); return arch_atomic_xchg_acquire(v, i); } #define atomic_xchg_acquire atomic_xchg_acquire @@ -630,7 +643,7 @@ atomic_xchg_acquire(atomic_t *v, int i) static inline int atomic_xchg_release(atomic_t *v, int i) { - kasan_check_write(v, sizeof(*v)); + __atomic_check_write(v, sizeof(*v)); return arch_atomic_xchg_release(v, i); } #define atomic_xchg_release atomic_xchg_release @@ -640,7 +653,7 @@ atomic_xchg_release(atomic_t *v, int i) static inline int atomic_xchg_relaxed(atomic_t *v, int i) { - kasan_check_write(v, sizeof(*v)); + __atomic_check_write(v, sizeof(*v)); return arch_atomic_xchg_relaxed(v, i); } #define atomic_xchg_relaxed atomic_xchg_relaxed @@ -650,7 +663,7 @@ atomic_xchg_relaxed(atomic_t *v, int i) static inline int atomic_cmpxchg(atomic_t *v, int old, int new) { - kasan_check_write(v, sizeof(*v)); + __atomic_check_write(v, sizeof(*v)); return arch_atomic_cmpxchg(v, old, new); } #define atomic_cmpxchg atomic_cmpxchg @@ -660,7 +673,7 @@ atomic_cmpxchg(atomic_t *v, int old, int new) static inline int atomic_cmpxchg_acquire(atomic_t *v, int old, int new) { - kasan_check_write(v, sizeof(*v)); + __atomic_check_write(v, sizeof(*v)); return arch_atomic_cmpxchg_acquire(v, old, new); } #define atomic_cmpxchg_acquire atomic_cmpxchg_acquire @@ -670,7 +683,7 @@ atomic_cmpxchg_acquire(atomic_t *v, int old, int new) static inline int atomic_cmpxchg_release(atomic_t *v, int old, int new) { - kasan_check_write(v, sizeof(*v)); + __atomic_check_write(v, sizeof(*v)); return arch_atomic_cmpxchg_release(v, old, new); } #define atomic_cmpxchg_release atomic_cmpxchg_release @@ -680,7 +693,7 @@ atomic_cmpxchg_release(atomic_t *v, int old, int new) static inline int atomic_cmpxchg_relaxed(atomic_t *v, int old, int new) { - kasan_check_write(v, sizeof(*v)); + __atomic_check_write(v, sizeof(*v)); return arch_atomic_cmpxchg_relaxed(v, old, new); } #define atomic_cmpxchg_relaxed atomic_cmpxchg_relaxed @@ -690,8 +703,8 @@ atomic_cmpxchg_relaxed(atomic_t *v, int old, int new) static inline bool atomic_try_cmpxchg(atomic_t *v, int *old, int new) { - kasan_check_write(v, sizeof(*v)); - kasan_check_write(old, sizeof(*old)); + __atomic_check_write(v, sizeof(*v)); + __atomic_check_write(old, sizeof(*old)); return arch_atomic_try_cmpxchg(v, old, new); } #define atomic_try_cmpxchg atomic_try_cmpxchg @@ -701,8 +714,8 @@ atomic_try_cmpxchg(atomic_t *v, int *old, int new) static inline bool atomic_try_cmpxchg_acquire(atomic_t *v, int *old, int new) { - kasan_check_write(v, sizeof(*v)); - kasan_check_write(old, sizeof(*old)); + __atomic_check_write(v, sizeof(*v)); + __atomic_check_write(old, sizeof(*old)); return arch_atomic_try_cmpxchg_acquire(v, old, new); } #define atomic_try_cmpxchg_acquire atomic_try_cmpxchg_acquire @@ -712,8 +725,8 @@ atomic_try_cmpxchg_acquire(atomic_t *v, int *old, int new) static inline bool atomic_try_cmpxchg_release(atomic_t *v, int *old, int new) { - kasan_check_write(v, sizeof(*v)); - kasan_check_write(old, sizeof(*old)); + __atomic_check_write(v, sizeof(*v)); + __atomic_check_write(old, sizeof(*old)); return arch_atomic_try_cmpxchg_release(v, old, new); } #define atomic_try_cmpxchg_release atomic_try_cmpxchg_release @@ -723,8 +736,8 @@ atomic_try_cmpxchg_release(atomic_t *v, int *old, int new) static inline bool atomic_try_cmpxchg_relaxed(atomic_t *v, int *old, int new) { - kasan_check_write(v, sizeof(*v)); - kasan_check_write(old, sizeof(*old)); + __atomic_check_write(v, sizeof(*v)); + __atomic_check_write(old, sizeof(*old)); return arch_atomic_try_cmpxchg_relaxed(v, old, new); } #define atomic_try_cmpxchg_relaxed atomic_try_cmpxchg_relaxed @@ -734,7 +747,7 @@ atomic_try_cmpxchg_relaxed(atomic_t *v, int *old, int new) static inline bool atomic_sub_and_test(int i, atomic_t *v) { - kasan_check_write(v, sizeof(*v)); + __atomic_check_write(v, sizeof(*v)); return arch_atomic_sub_and_test(i, v); } #define atomic_sub_and_test atomic_sub_and_test @@ -744,7 +757,7 @@ atomic_sub_and_test(int i, atomic_t *v) static inline bool atomic_dec_and_test(atomic_t *v) { - kasan_check_write(v, sizeof(*v)); + __atomic_check_write(v, sizeof(*v)); return arch_atomic_dec_and_test(v); } #define atomic_dec_and_test atomic_dec_and_test @@ -754,7 +767,7 @@ atomic_dec_and_test(atomic_t *v) static inline bool atomic_inc_and_test(atomic_t *v) { - kasan_check_write(v, sizeof(*v)); + __atomic_check_write(v, sizeof(*v)); return arch_atomic_inc_and_test(v); } #define atomic_inc_and_test atomic_inc_and_test @@ -764,7 +777,7 @@ atomic_inc_and_test(atomic_t *v) static inline bool atomic_add_negative(int i, atomic_t *v) { - kasan_check_write(v, sizeof(*v)); + __atomic_check_write(v, sizeof(*v)); return arch_atomic_add_negative(i, v); } #define atomic_add_negative atomic_add_negative @@ -774,7 +787,7 @@ atomic_add_negative(int i, atomic_t *v) static inline int atomic_fetch_add_unless(atomic_t *v, int a, int u) { - kasan_check_write(v, sizeof(*v)); + __atomic_check_write(v, sizeof(*v)); return arch_atomic_fetch_add_unless(v, a, u); } #define atomic_fetch_add_unless atomic_fetch_add_unless @@ -784,7 +797,7 @@ atomic_fetch_add_unless(atomic_t *v, int a, int u) static inline bool atomic_add_unless(atomic_t *v, int a, int u) { - kasan_check_write(v, sizeof(*v)); + __atomic_check_write(v, sizeof(*v)); return arch_atomic_add_unless(v, a, u); } #define atomic_add_unless atomic_add_unless @@ -794,7 +807,7 @@ atomic_add_unless(atomic_t *v, int a, int u) static inline bool atomic_inc_not_zero(atomic_t *v) { - kasan_check_write(v, sizeof(*v)); + __atomic_check_write(v, sizeof(*v)); return arch_atomic_inc_not_zero(v); } #define atomic_inc_not_zero atomic_inc_not_zero @@ -804,7 +817,7 @@ atomic_inc_not_zero(atomic_t *v) static inline bool atomic_inc_unless_negative(atomic_t *v) { - kasan_check_write(v, sizeof(*v)); + __atomic_check_write(v, sizeof(*v)); return arch_atomic_inc_unless_negative(v); } #define atomic_inc_unless_negative atomic_inc_unless_negative @@ -814,7 +827,7 @@ atomic_inc_unless_negative(atomic_t *v) static inline bool atomic_dec_unless_positive(atomic_t *v) { - kasan_check_write(v, sizeof(*v)); + __atomic_check_write(v, sizeof(*v)); return arch_atomic_dec_unless_positive(v); } #define atomic_dec_unless_positive atomic_dec_unless_positive @@ -824,7 +837,7 @@ atomic_dec_unless_positive(atomic_t *v) static inline int atomic_dec_if_positive(atomic_t *v) { - kasan_check_write(v, sizeof(*v)); + __atomic_check_write(v, sizeof(*v)); return arch_atomic_dec_if_positive(v); } #define atomic_dec_if_positive atomic_dec_if_positive @@ -833,7 +846,7 @@ atomic_dec_if_positive(atomic_t *v) static inline s64 atomic64_read(const atomic64_t *v) { - kasan_check_read(v, sizeof(*v)); + __atomic_check_read(v, sizeof(*v)); return arch_atomic64_read(v); } #define atomic64_read atomic64_read @@ -842,7 +855,7 @@ atomic64_read(const atomic64_t *v) static inline s64 atomic64_read_acquire(const atomic64_t *v) { - kasan_check_read(v, sizeof(*v)); + __atomic_check_read(v, sizeof(*v)); return arch_atomic64_read_acquire(v); } #define atomic64_read_acquire atomic64_read_acquire @@ -851,7 +864,7 @@ atomic64_read_acquire(const atomic64_t *v) static inline void atomic64_set(atomic64_t *v, s64 i) { - kasan_check_write(v, sizeof(*v)); + __atomic_check_write(v, sizeof(*v)); arch_atomic64_set(v, i); } #define atomic64_set atomic64_set @@ -860,7 +873,7 @@ atomic64_set(atomic64_t *v, s64 i) static inline void atomic64_set_release(atomic64_t *v, s64 i) { - kasan_check_write(v, sizeof(*v)); + __atomic_check_write(v, sizeof(*v)); arch_atomic64_set_release(v, i); } #define atomic64_set_release atomic64_set_release @@ -869,7 +882,7 @@ atomic64_set_release(atomic64_t *v, s64 i) static inline void atomic64_add(s64 i, atomic64_t *v) { - kasan_check_write(v, sizeof(*v)); + __atomic_check_write(v, sizeof(*v)); arch_atomic64_add(i, v); } #define atomic64_add atomic64_add @@ -878,7 +891,7 @@ atomic64_add(s64 i, atomic64_t *v) static inline s64 atomic64_add_return(s64 i, atomic64_t *v) { - kasan_check_write(v, sizeof(*v)); + __atomic_check_write(v, sizeof(*v)); return arch_atomic64_add_return(i, v); } #define atomic64_add_return atomic64_add_return @@ -888,7 +901,7 @@ atomic64_add_return(s64 i, atomic64_t *v) static inline s64 atomic64_add_return_acquire(s64 i, atomic64_t *v) { - kasan_check_write(v, sizeof(*v)); + __atomic_check_write(v, sizeof(*v)); return arch_atomic64_add_return_acquire(i, v); } #define atomic64_add_return_acquire atomic64_add_return_acquire @@ -898,7 +911,7 @@ atomic64_add_return_acquire(s64 i, atomic64_t *v) static inline s64 atomic64_add_return_release(s64 i, atomic64_t *v) { - kasan_check_write(v, sizeof(*v)); + __atomic_check_write(v, sizeof(*v)); return arch_atomic64_add_return_release(i, v); } #define atomic64_add_return_release atomic64_add_return_release @@ -908,7 +921,7 @@ atomic64_add_return_release(s64 i, atomic64_t *v) static inline s64 atomic64_add_return_relaxed(s64 i, atomic64_t *v) { - kasan_check_write(v, sizeof(*v)); + __atomic_check_write(v, sizeof(*v)); return arch_atomic64_add_return_relaxed(i, v); } #define atomic64_add_return_relaxed atomic64_add_return_relaxed @@ -918,7 +931,7 @@ atomic64_add_return_relaxed(s64 i, atomic64_t *v) static inline s64 atomic64_fetch_add(s64 i, atomic64_t *v) { - kasan_check_write(v, sizeof(*v)); + __atomic_check_write(v, sizeof(*v)); return arch_atomic64_fetch_add(i, v); } #define atomic64_fetch_add atomic64_fetch_add @@ -928,7 +941,7 @@ atomic64_fetch_add(s64 i, atomic64_t *v) static inline s64 atomic64_fetch_add_acquire(s64 i, atomic64_t *v) { - kasan_check_write(v, sizeof(*v)); + __atomic_check_write(v, sizeof(*v)); return arch_atomic64_fetch_add_acquire(i, v); } #define atomic64_fetch_add_acquire atomic64_fetch_add_acquire @@ -938,7 +951,7 @@ atomic64_fetch_add_acquire(s64 i, atomic64_t *v) static inline s64 atomic64_fetch_add_release(s64 i, atomic64_t *v) { - kasan_check_write(v, sizeof(*v)); + __atomic_check_write(v, sizeof(*v)); return arch_atomic64_fetch_add_release(i, v); } #define atomic64_fetch_add_release atomic64_fetch_add_release @@ -948,7 +961,7 @@ atomic64_fetch_add_release(s64 i, atomic64_t *v) static inline s64 atomic64_fetch_add_relaxed(s64 i, atomic64_t *v) { - kasan_check_write(v, sizeof(*v)); + __atomic_check_write(v, sizeof(*v)); return arch_atomic64_fetch_add_relaxed(i, v); } #define atomic64_fetch_add_relaxed atomic64_fetch_add_relaxed @@ -957,7 +970,7 @@ atomic64_fetch_add_relaxed(s64 i, atomic64_t *v) static inline void atomic64_sub(s64 i, atomic64_t *v) { - kasan_check_write(v, sizeof(*v)); + __atomic_check_write(v, sizeof(*v)); arch_atomic64_sub(i, v); } #define atomic64_sub atomic64_sub @@ -966,7 +979,7 @@ atomic64_sub(s64 i, atomic64_t *v) static inline s64 atomic64_sub_return(s64 i, atomic64_t *v) { - kasan_check_write(v, sizeof(*v)); + __atomic_check_write(v, sizeof(*v)); return arch_atomic64_sub_return(i, v); } #define atomic64_sub_return atomic64_sub_return @@ -976,7 +989,7 @@ atomic64_sub_return(s64 i, atomic64_t *v) static inline s64 atomic64_sub_return_acquire(s64 i, atomic64_t *v) { - kasan_check_write(v, sizeof(*v)); + __atomic_check_write(v, sizeof(*v)); return arch_atomic64_sub_return_acquire(i, v); } #define atomic64_sub_return_acquire atomic64_sub_return_acquire @@ -986,7 +999,7 @@ atomic64_sub_return_acquire(s64 i, atomic64_t *v) static inline s64 atomic64_sub_return_release(s64 i, atomic64_t *v) { - kasan_check_write(v, sizeof(*v)); + __atomic_check_write(v, sizeof(*v)); return arch_atomic64_sub_return_release(i, v); } #define atomic64_sub_return_release atomic64_sub_return_release @@ -996,7 +1009,7 @@ atomic64_sub_return_release(s64 i, atomic64_t *v) static inline s64 atomic64_sub_return_relaxed(s64 i, atomic64_t *v) { - kasan_check_write(v, sizeof(*v)); + __atomic_check_write(v, sizeof(*v)); return arch_atomic64_sub_return_relaxed(i, v); } #define atomic64_sub_return_relaxed atomic64_sub_return_relaxed @@ -1006,7 +1019,7 @@ atomic64_sub_return_relaxed(s64 i, atomic64_t *v) static inline s64 atomic64_fetch_sub(s64 i, atomic64_t *v) { - kasan_check_write(v, sizeof(*v)); + __atomic_check_write(v, sizeof(*v)); return arch_atomic64_fetch_sub(i, v); } #define atomic64_fetch_sub atomic64_fetch_sub @@ -1016,7 +1029,7 @@ atomic64_fetch_sub(s64 i, atomic64_t *v) static inline s64 atomic64_fetch_sub_acquire(s64 i, atomic64_t *v) { - kasan_check_write(v, sizeof(*v)); + __atomic_check_write(v, sizeof(*v)); return arch_atomic64_fetch_sub_acquire(i, v); } #define atomic64_fetch_sub_acquire atomic64_fetch_sub_acquire @@ -1026,7 +1039,7 @@ atomic64_fetch_sub_acquire(s64 i, atomic64_t *v) static inline s64 atomic64_fetch_sub_release(s64 i, atomic64_t *v) { - kasan_check_write(v, sizeof(*v)); + __atomic_check_write(v, sizeof(*v)); return arch_atomic64_fetch_sub_release(i, v); } #define atomic64_fetch_sub_release atomic64_fetch_sub_release @@ -1036,7 +1049,7 @@ atomic64_fetch_sub_release(s64 i, atomic64_t *v) static inline s64 atomic64_fetch_sub_relaxed(s64 i, atomic64_t *v) { - kasan_check_write(v, sizeof(*v)); + __atomic_check_write(v, sizeof(*v)); return arch_atomic64_fetch_sub_relaxed(i, v); } #define atomic64_fetch_sub_relaxed atomic64_fetch_sub_relaxed @@ -1046,7 +1059,7 @@ atomic64_fetch_sub_relaxed(s64 i, atomic64_t *v) static inline void atomic64_inc(atomic64_t *v) { - kasan_check_write(v, sizeof(*v)); + __atomic_check_write(v, sizeof(*v)); arch_atomic64_inc(v); } #define atomic64_inc atomic64_inc @@ -1056,7 +1069,7 @@ atomic64_inc(atomic64_t *v) static inline s64 atomic64_inc_return(atomic64_t *v) { - kasan_check_write(v, sizeof(*v)); + __atomic_check_write(v, sizeof(*v)); return arch_atomic64_inc_return(v); } #define atomic64_inc_return atomic64_inc_return @@ -1066,7 +1079,7 @@ atomic64_inc_return(atomic64_t *v) static inline s64 atomic64_inc_return_acquire(atomic64_t *v) { - kasan_check_write(v, sizeof(*v)); + __atomic_check_write(v, sizeof(*v)); return arch_atomic64_inc_return_acquire(v); } #define atomic64_inc_return_acquire atomic64_inc_return_acquire @@ -1076,7 +1089,7 @@ atomic64_inc_return_acquire(atomic64_t *v) static inline s64 atomic64_inc_return_release(atomic64_t *v) { - kasan_check_write(v, sizeof(*v)); + __atomic_check_write(v, sizeof(*v)); return arch_atomic64_inc_return_release(v); } #define atomic64_inc_return_release atomic64_inc_return_release @@ -1086,7 +1099,7 @@ atomic64_inc_return_release(atomic64_t *v) static inline s64 atomic64_inc_return_relaxed(atomic64_t *v) { - kasan_check_write(v, sizeof(*v)); + __atomic_check_write(v, sizeof(*v)); return arch_atomic64_inc_return_relaxed(v); } #define atomic64_inc_return_relaxed atomic64_inc_return_relaxed @@ -1096,7 +1109,7 @@ atomic64_inc_return_relaxed(atomic64_t *v) static inline s64 atomic64_fetch_inc(atomic64_t *v) { - kasan_check_write(v, sizeof(*v)); + __atomic_check_write(v, sizeof(*v)); return arch_atomic64_fetch_inc(v); } #define atomic64_fetch_inc atomic64_fetch_inc @@ -1106,7 +1119,7 @@ atomic64_fetch_inc(atomic64_t *v) static inline s64 atomic64_fetch_inc_acquire(atomic64_t *v) { - kasan_check_write(v, sizeof(*v)); + __atomic_check_write(v, sizeof(*v)); return arch_atomic64_fetch_inc_acquire(v); } #define atomic64_fetch_inc_acquire atomic64_fetch_inc_acquire @@ -1116,7 +1129,7 @@ atomic64_fetch_inc_acquire(atomic64_t *v) static inline s64 atomic64_fetch_inc_release(atomic64_t *v) { - kasan_check_write(v, sizeof(*v)); + __atomic_check_write(v, sizeof(*v)); return arch_atomic64_fetch_inc_release(v); } #define atomic64_fetch_inc_release atomic64_fetch_inc_release @@ -1126,7 +1139,7 @@ atomic64_fetch_inc_release(atomic64_t *v) static inline s64 atomic64_fetch_inc_relaxed(atomic64_t *v) { - kasan_check_write(v, sizeof(*v)); + __atomic_check_write(v, sizeof(*v)); return arch_atomic64_fetch_inc_relaxed(v); } #define atomic64_fetch_inc_relaxed atomic64_fetch_inc_relaxed @@ -1136,7 +1149,7 @@ atomic64_fetch_inc_relaxed(atomic64_t *v) static inline void atomic64_dec(atomic64_t *v) { - kasan_check_write(v, sizeof(*v)); + __atomic_check_write(v, sizeof(*v)); arch_atomic64_dec(v); } #define atomic64_dec atomic64_dec @@ -1146,7 +1159,7 @@ atomic64_dec(atomic64_t *v) static inline s64 atomic64_dec_return(atomic64_t *v) { - kasan_check_write(v, sizeof(*v)); + __atomic_check_write(v, sizeof(*v)); return arch_atomic64_dec_return(v); } #define atomic64_dec_return atomic64_dec_return @@ -1156,7 +1169,7 @@ atomic64_dec_return(atomic64_t *v) static inline s64 atomic64_dec_return_acquire(atomic64_t *v) { - kasan_check_write(v, sizeof(*v)); + __atomic_check_write(v, sizeof(*v)); return arch_atomic64_dec_return_acquire(v); } #define atomic64_dec_return_acquire atomic64_dec_return_acquire @@ -1166,7 +1179,7 @@ atomic64_dec_return_acquire(atomic64_t *v) static inline s64 atomic64_dec_return_release(atomic64_t *v) { - kasan_check_write(v, sizeof(*v)); + __atomic_check_write(v, sizeof(*v)); return arch_atomic64_dec_return_release(v); } #define atomic64_dec_return_release atomic64_dec_return_release @@ -1176,7 +1189,7 @@ atomic64_dec_return_release(atomic64_t *v) static inline s64 atomic64_dec_return_relaxed(atomic64_t *v) { - kasan_check_write(v, sizeof(*v)); + __atomic_check_write(v, sizeof(*v)); return arch_atomic64_dec_return_relaxed(v); } #define atomic64_dec_return_relaxed atomic64_dec_return_relaxed @@ -1186,7 +1199,7 @@ atomic64_dec_return_relaxed(atomic64_t *v) static inline s64 atomic64_fetch_dec(atomic64_t *v) { - kasan_check_write(v, sizeof(*v)); + __atomic_check_write(v, sizeof(*v)); return arch_atomic64_fetch_dec(v); } #define atomic64_fetch_dec atomic64_fetch_dec @@ -1196,7 +1209,7 @@ atomic64_fetch_dec(atomic64_t *v) static inline s64 atomic64_fetch_dec_acquire(atomic64_t *v) { - kasan_check_write(v, sizeof(*v)); + __atomic_check_write(v, sizeof(*v)); return arch_atomic64_fetch_dec_acquire(v); } #define atomic64_fetch_dec_acquire atomic64_fetch_dec_acquire @@ -1206,7 +1219,7 @@ atomic64_fetch_dec_acquire(atomic64_t *v) static inline s64 atomic64_fetch_dec_release(atomic64_t *v) { - kasan_check_write(v, sizeof(*v)); + __atomic_check_write(v, sizeof(*v)); return arch_atomic64_fetch_dec_release(v); } #define atomic64_fetch_dec_release atomic64_fetch_dec_release @@ -1216,7 +1229,7 @@ atomic64_fetch_dec_release(atomic64_t *v) static inline s64 atomic64_fetch_dec_relaxed(atomic64_t *v) { - kasan_check_write(v, sizeof(*v)); + __atomic_check_write(v, sizeof(*v)); return arch_atomic64_fetch_dec_relaxed(v); } #define atomic64_fetch_dec_relaxed atomic64_fetch_dec_relaxed @@ -1225,7 +1238,7 @@ atomic64_fetch_dec_relaxed(atomic64_t *v) static inline void atomic64_and(s64 i, atomic64_t *v) { - kasan_check_write(v, sizeof(*v)); + __atomic_check_write(v, sizeof(*v)); arch_atomic64_and(i, v); } #define atomic64_and atomic64_and @@ -1234,7 +1247,7 @@ atomic64_and(s64 i, atomic64_t *v) static inline s64 atomic64_fetch_and(s64 i, atomic64_t *v) { - kasan_check_write(v, sizeof(*v)); + __atomic_check_write(v, sizeof(*v)); return arch_atomic64_fetch_and(i, v); } #define atomic64_fetch_and atomic64_fetch_and @@ -1244,7 +1257,7 @@ atomic64_fetch_and(s64 i, atomic64_t *v) static inline s64 atomic64_fetch_and_acquire(s64 i, atomic64_t *v) { - kasan_check_write(v, sizeof(*v)); + __atomic_check_write(v, sizeof(*v)); return arch_atomic64_fetch_and_acquire(i, v); } #define atomic64_fetch_and_acquire atomic64_fetch_and_acquire @@ -1254,7 +1267,7 @@ atomic64_fetch_and_acquire(s64 i, atomic64_t *v) static inline s64 atomic64_fetch_and_release(s64 i, atomic64_t *v) { - kasan_check_write(v, sizeof(*v)); + __atomic_check_write(v, sizeof(*v)); return arch_atomic64_fetch_and_release(i, v); } #define atomic64_fetch_and_release atomic64_fetch_and_release @@ -1264,7 +1277,7 @@ atomic64_fetch_and_release(s64 i, atomic64_t *v) static inline s64 atomic64_fetch_and_relaxed(s64 i, atomic64_t *v) { - kasan_check_write(v, sizeof(*v)); + __atomic_check_write(v, sizeof(*v)); return arch_atomic64_fetch_and_relaxed(i, v); } #define atomic64_fetch_and_relaxed atomic64_fetch_and_relaxed @@ -1274,7 +1287,7 @@ atomic64_fetch_and_relaxed(s64 i, atomic64_t *v) static inline void atomic64_andnot(s64 i, atomic64_t *v) { - kasan_check_write(v, sizeof(*v)); + __atomic_check_write(v, sizeof(*v)); arch_atomic64_andnot(i, v); } #define atomic64_andnot atomic64_andnot @@ -1284,7 +1297,7 @@ atomic64_andnot(s64 i, atomic64_t *v) static inline s64 atomic64_fetch_andnot(s64 i, atomic64_t *v) { - kasan_check_write(v, sizeof(*v)); + __atomic_check_write(v, sizeof(*v)); return arch_atomic64_fetch_andnot(i, v); } #define atomic64_fetch_andnot atomic64_fetch_andnot @@ -1294,7 +1307,7 @@ atomic64_fetch_andnot(s64 i, atomic64_t *v) static inline s64 atomic64_fetch_andnot_acquire(s64 i, atomic64_t *v) { - kasan_check_write(v, sizeof(*v)); + __atomic_check_write(v, sizeof(*v)); return arch_atomic64_fetch_andnot_acquire(i, v); } #define atomic64_fetch_andnot_acquire atomic64_fetch_andnot_acquire @@ -1304,7 +1317,7 @@ atomic64_fetch_andnot_acquire(s64 i, atomic64_t *v) static inline s64 atomic64_fetch_andnot_release(s64 i, atomic64_t *v) { - kasan_check_write(v, sizeof(*v)); + __atomic_check_write(v, sizeof(*v)); return arch_atomic64_fetch_andnot_release(i, v); } #define atomic64_fetch_andnot_release atomic64_fetch_andnot_release @@ -1314,7 +1327,7 @@ atomic64_fetch_andnot_release(s64 i, atomic64_t *v) static inline s64 atomic64_fetch_andnot_relaxed(s64 i, atomic64_t *v) { - kasan_check_write(v, sizeof(*v)); + __atomic_check_write(v, sizeof(*v)); return arch_atomic64_fetch_andnot_relaxed(i, v); } #define atomic64_fetch_andnot_relaxed atomic64_fetch_andnot_relaxed @@ -1323,7 +1336,7 @@ atomic64_fetch_andnot_relaxed(s64 i, atomic64_t *v) static inline void atomic64_or(s64 i, atomic64_t *v) { - kasan_check_write(v, sizeof(*v)); + __atomic_check_write(v, sizeof(*v)); arch_atomic64_or(i, v); } #define atomic64_or atomic64_or @@ -1332,7 +1345,7 @@ atomic64_or(s64 i, atomic64_t *v) static inline s64 atomic64_fetch_or(s64 i, atomic64_t *v) { - kasan_check_write(v, sizeof(*v)); + __atomic_check_write(v, sizeof(*v)); return arch_atomic64_fetch_or(i, v); } #define atomic64_fetch_or atomic64_fetch_or @@ -1342,7 +1355,7 @@ atomic64_fetch_or(s64 i, atomic64_t *v) static inline s64 atomic64_fetch_or_acquire(s64 i, atomic64_t *v) { - kasan_check_write(v, sizeof(*v)); + __atomic_check_write(v, sizeof(*v)); return arch_atomic64_fetch_or_acquire(i, v); } #define atomic64_fetch_or_acquire atomic64_fetch_or_acquire @@ -1352,7 +1365,7 @@ atomic64_fetch_or_acquire(s64 i, atomic64_t *v) static inline s64 atomic64_fetch_or_release(s64 i, atomic64_t *v) { - kasan_check_write(v, sizeof(*v)); + __atomic_check_write(v, sizeof(*v)); return arch_atomic64_fetch_or_release(i, v); } #define atomic64_fetch_or_release atomic64_fetch_or_release @@ -1362,7 +1375,7 @@ atomic64_fetch_or_release(s64 i, atomic64_t *v) static inline s64 atomic64_fetch_or_relaxed(s64 i, atomic64_t *v) { - kasan_check_write(v, sizeof(*v)); + __atomic_check_write(v, sizeof(*v)); return arch_atomic64_fetch_or_relaxed(i, v); } #define atomic64_fetch_or_relaxed atomic64_fetch_or_relaxed @@ -1371,7 +1384,7 @@ atomic64_fetch_or_relaxed(s64 i, atomic64_t *v) static inline void atomic64_xor(s64 i, atomic64_t *v) { - kasan_check_write(v, sizeof(*v)); + __atomic_check_write(v, sizeof(*v)); arch_atomic64_xor(i, v); } #define atomic64_xor atomic64_xor @@ -1380,7 +1393,7 @@ atomic64_xor(s64 i, atomic64_t *v) static inline s64 atomic64_fetch_xor(s64 i, atomic64_t *v) { - kasan_check_write(v, sizeof(*v)); + __atomic_check_write(v, sizeof(*v)); return arch_atomic64_fetch_xor(i, v); } #define atomic64_fetch_xor atomic64_fetch_xor @@ -1390,7 +1403,7 @@ atomic64_fetch_xor(s64 i, atomic64_t *v) static inline s64 atomic64_fetch_xor_acquire(s64 i, atomic64_t *v) { - kasan_check_write(v, sizeof(*v)); + __atomic_check_write(v, sizeof(*v)); return arch_atomic64_fetch_xor_acquire(i, v); } #define atomic64_fetch_xor_acquire atomic64_fetch_xor_acquire @@ -1400,7 +1413,7 @@ atomic64_fetch_xor_acquire(s64 i, atomic64_t *v) static inline s64 atomic64_fetch_xor_release(s64 i, atomic64_t *v) { - kasan_check_write(v, sizeof(*v)); + __atomic_check_write(v, sizeof(*v)); return arch_atomic64_fetch_xor_release(i, v); } #define atomic64_fetch_xor_release atomic64_fetch_xor_release @@ -1410,7 +1423,7 @@ atomic64_fetch_xor_release(s64 i, atomic64_t *v) static inline s64 atomic64_fetch_xor_relaxed(s64 i, atomic64_t *v) { - kasan_check_write(v, sizeof(*v)); + __atomic_check_write(v, sizeof(*v)); return arch_atomic64_fetch_xor_relaxed(i, v); } #define atomic64_fetch_xor_relaxed atomic64_fetch_xor_relaxed @@ -1420,7 +1433,7 @@ atomic64_fetch_xor_relaxed(s64 i, atomic64_t *v) static inline s64 atomic64_xchg(atomic64_t *v, s64 i) { - kasan_check_write(v, sizeof(*v)); + __atomic_check_write(v, sizeof(*v)); return arch_atomic64_xchg(v, i); } #define atomic64_xchg atomic64_xchg @@ -1430,7 +1443,7 @@ atomic64_xchg(atomic64_t *v, s64 i) static inline s64 atomic64_xchg_acquire(atomic64_t *v, s64 i) { - kasan_check_write(v, sizeof(*v)); + __atomic_check_write(v, sizeof(*v)); return arch_atomic64_xchg_acquire(v, i); } #define atomic64_xchg_acquire atomic64_xchg_acquire @@ -1440,7 +1453,7 @@ atomic64_xchg_acquire(atomic64_t *v, s64 i) static inline s64 atomic64_xchg_release(atomic64_t *v, s64 i) { - kasan_check_write(v, sizeof(*v)); + __atomic_check_write(v, sizeof(*v)); return arch_atomic64_xchg_release(v, i); } #define atomic64_xchg_release atomic64_xchg_release @@ -1450,7 +1463,7 @@ atomic64_xchg_release(atomic64_t *v, s64 i) static inline s64 atomic64_xchg_relaxed(atomic64_t *v, s64 i) { - kasan_check_write(v, sizeof(*v)); + __atomic_check_write(v, sizeof(*v)); return arch_atomic64_xchg_relaxed(v, i); } #define atomic64_xchg_relaxed atomic64_xchg_relaxed @@ -1460,7 +1473,7 @@ atomic64_xchg_relaxed(atomic64_t *v, s64 i) static inline s64 atomic64_cmpxchg(atomic64_t *v, s64 old, s64 new) { - kasan_check_write(v, sizeof(*v)); + __atomic_check_write(v, sizeof(*v)); return arch_atomic64_cmpxchg(v, old, new); } #define atomic64_cmpxchg atomic64_cmpxchg @@ -1470,7 +1483,7 @@ atomic64_cmpxchg(atomic64_t *v, s64 old, s64 new) static inline s64 atomic64_cmpxchg_acquire(atomic64_t *v, s64 old, s64 new) { - kasan_check_write(v, sizeof(*v)); + __atomic_check_write(v, sizeof(*v)); return arch_atomic64_cmpxchg_acquire(v, old, new); } #define atomic64_cmpxchg_acquire atomic64_cmpxchg_acquire @@ -1480,7 +1493,7 @@ atomic64_cmpxchg_acquire(atomic64_t *v, s64 old, s64 new) static inline s64 atomic64_cmpxchg_release(atomic64_t *v, s64 old, s64 new) { - kasan_check_write(v, sizeof(*v)); + __atomic_check_write(v, sizeof(*v)); return arch_atomic64_cmpxchg_release(v, old, new); } #define atomic64_cmpxchg_release atomic64_cmpxchg_release @@ -1490,7 +1503,7 @@ atomic64_cmpxchg_release(atomic64_t *v, s64 old, s64 new) static inline s64 atomic64_cmpxchg_relaxed(atomic64_t *v, s64 old, s64 new) { - kasan_check_write(v, sizeof(*v)); + __atomic_check_write(v, sizeof(*v)); return arch_atomic64_cmpxchg_relaxed(v, old, new); } #define atomic64_cmpxchg_relaxed atomic64_cmpxchg_relaxed @@ -1500,8 +1513,8 @@ atomic64_cmpxchg_relaxed(atomic64_t *v, s64 old, s64 new) static inline bool atomic64_try_cmpxchg(atomic64_t *v, s64 *old, s64 new) { - kasan_check_write(v, sizeof(*v)); - kasan_check_write(old, sizeof(*old)); + __atomic_check_write(v, sizeof(*v)); + __atomic_check_write(old, sizeof(*old)); return arch_atomic64_try_cmpxchg(v, old, new); } #define atomic64_try_cmpxchg atomic64_try_cmpxchg @@ -1511,8 +1524,8 @@ atomic64_try_cmpxchg(atomic64_t *v, s64 *old, s64 new) static inline bool atomic64_try_cmpxchg_acquire(atomic64_t *v, s64 *old, s64 new) { - kasan_check_write(v, sizeof(*v)); - kasan_check_write(old, sizeof(*old)); + __atomic_check_write(v, sizeof(*v)); + __atomic_check_write(old, sizeof(*old)); return arch_atomic64_try_cmpxchg_acquire(v, old, new); } #define atomic64_try_cmpxchg_acquire atomic64_try_cmpxchg_acquire @@ -1522,8 +1535,8 @@ atomic64_try_cmpxchg_acquire(atomic64_t *v, s64 *old, s64 new) static inline bool atomic64_try_cmpxchg_release(atomic64_t *v, s64 *old, s64 new) { - kasan_check_write(v, sizeof(*v)); - kasan_check_write(old, sizeof(*old)); + __atomic_check_write(v, sizeof(*v)); + __atomic_check_write(old, sizeof(*old)); return arch_atomic64_try_cmpxchg_release(v, old, new); } #define atomic64_try_cmpxchg_release atomic64_try_cmpxchg_release @@ -1533,8 +1546,8 @@ atomic64_try_cmpxchg_release(atomic64_t *v, s64 *old, s64 new) static inline bool atomic64_try_cmpxchg_relaxed(atomic64_t *v, s64 *old, s64 new) { - kasan_check_write(v, sizeof(*v)); - kasan_check_write(old, sizeof(*old)); + __atomic_check_write(v, sizeof(*v)); + __atomic_check_write(old, sizeof(*old)); return arch_atomic64_try_cmpxchg_relaxed(v, old, new); } #define atomic64_try_cmpxchg_relaxed atomic64_try_cmpxchg_relaxed @@ -1544,7 +1557,7 @@ atomic64_try_cmpxchg_relaxed(atomic64_t *v, s64 *old, s64 new) static inline bool atomic64_sub_and_test(s64 i, atomic64_t *v) { - kasan_check_write(v, sizeof(*v)); + __atomic_check_write(v, sizeof(*v)); return arch_atomic64_sub_and_test(i, v); } #define atomic64_sub_and_test atomic64_sub_and_test @@ -1554,7 +1567,7 @@ atomic64_sub_and_test(s64 i, atomic64_t *v) static inline bool atomic64_dec_and_test(atomic64_t *v) { - kasan_check_write(v, sizeof(*v)); + __atomic_check_write(v, sizeof(*v)); return arch_atomic64_dec_and_test(v); } #define atomic64_dec_and_test atomic64_dec_and_test @@ -1564,7 +1577,7 @@ atomic64_dec_and_test(atomic64_t *v) static inline bool atomic64_inc_and_test(atomic64_t *v) { - kasan_check_write(v, sizeof(*v)); + __atomic_check_write(v, sizeof(*v)); return arch_atomic64_inc_and_test(v); } #define atomic64_inc_and_test atomic64_inc_and_test @@ -1574,7 +1587,7 @@ atomic64_inc_and_test(atomic64_t *v) static inline bool atomic64_add_negative(s64 i, atomic64_t *v) { - kasan_check_write(v, sizeof(*v)); + __atomic_check_write(v, sizeof(*v)); return arch_atomic64_add_negative(i, v); } #define atomic64_add_negative atomic64_add_negative @@ -1584,7 +1597,7 @@ atomic64_add_negative(s64 i, atomic64_t *v) static inline s64 atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u) { - kasan_check_write(v, sizeof(*v)); + __atomic_check_write(v, sizeof(*v)); return arch_atomic64_fetch_add_unless(v, a, u); } #define atomic64_fetch_add_unless atomic64_fetch_add_unless @@ -1594,7 +1607,7 @@ atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u) static inline bool atomic64_add_unless(atomic64_t *v, s64 a, s64 u) { - kasan_check_write(v, sizeof(*v)); + __atomic_check_write(v, sizeof(*v)); return arch_atomic64_add_unless(v, a, u); } #define atomic64_add_unless atomic64_add_unless @@ -1604,7 +1617,7 @@ atomic64_add_unless(atomic64_t *v, s64 a, s64 u) static inline bool atomic64_inc_not_zero(atomic64_t *v) { - kasan_check_write(v, sizeof(*v)); + __atomic_check_write(v, sizeof(*v)); return arch_atomic64_inc_not_zero(v); } #define atomic64_inc_not_zero atomic64_inc_not_zero @@ -1614,7 +1627,7 @@ atomic64_inc_not_zero(atomic64_t *v) static inline bool atomic64_inc_unless_negative(atomic64_t *v) { - kasan_check_write(v, sizeof(*v)); + __atomic_check_write(v, sizeof(*v)); return arch_atomic64_inc_unless_negative(v); } #define atomic64_inc_unless_negative atomic64_inc_unless_negative @@ -1624,7 +1637,7 @@ atomic64_inc_unless_negative(atomic64_t *v) static inline bool atomic64_dec_unless_positive(atomic64_t *v) { - kasan_check_write(v, sizeof(*v)); + __atomic_check_write(v, sizeof(*v)); return arch_atomic64_dec_unless_positive(v); } #define atomic64_dec_unless_positive atomic64_dec_unless_positive @@ -1634,7 +1647,7 @@ atomic64_dec_unless_positive(atomic64_t *v) static inline s64 atomic64_dec_if_positive(atomic64_t *v) { - kasan_check_write(v, sizeof(*v)); + __atomic_check_write(v, sizeof(*v)); return arch_atomic64_dec_if_positive(v); } #define atomic64_dec_if_positive atomic64_dec_if_positive @@ -1644,7 +1657,7 @@ atomic64_dec_if_positive(atomic64_t *v) #define xchg(ptr, ...) \ ({ \ typeof(ptr) __ai_ptr = (ptr); \ - kasan_check_write(__ai_ptr, sizeof(*__ai_ptr)); \ + __atomic_check_write(__ai_ptr, sizeof(*__ai_ptr)); \ arch_xchg(__ai_ptr, __VA_ARGS__); \ }) #endif @@ -1653,7 +1666,7 @@ atomic64_dec_if_positive(atomic64_t *v) #define xchg_acquire(ptr, ...) \ ({ \ typeof(ptr) __ai_ptr = (ptr); \ - kasan_check_write(__ai_ptr, sizeof(*__ai_ptr)); \ + __atomic_check_write(__ai_ptr, sizeof(*__ai_ptr)); \ arch_xchg_acquire(__ai_ptr, __VA_ARGS__); \ }) #endif @@ -1662,7 +1675,7 @@ atomic64_dec_if_positive(atomic64_t *v) #define xchg_release(ptr, ...) \ ({ \ typeof(ptr) __ai_ptr = (ptr); \ - kasan_check_write(__ai_ptr, sizeof(*__ai_ptr)); \ + __atomic_check_write(__ai_ptr, sizeof(*__ai_ptr)); \ arch_xchg_release(__ai_ptr, __VA_ARGS__); \ }) #endif @@ -1671,7 +1684,7 @@ atomic64_dec_if_positive(atomic64_t *v) #define xchg_relaxed(ptr, ...) \ ({ \ typeof(ptr) __ai_ptr = (ptr); \ - kasan_check_write(__ai_ptr, sizeof(*__ai_ptr)); \ + __atomic_check_write(__ai_ptr, sizeof(*__ai_ptr)); \ arch_xchg_relaxed(__ai_ptr, __VA_ARGS__); \ }) #endif @@ -1680,7 +1693,7 @@ atomic64_dec_if_positive(atomic64_t *v) #define cmpxchg(ptr, ...) \ ({ \ typeof(ptr) __ai_ptr = (ptr); \ - kasan_check_write(__ai_ptr, sizeof(*__ai_ptr)); \ + __atomic_check_write(__ai_ptr, sizeof(*__ai_ptr)); \ arch_cmpxchg(__ai_ptr, __VA_ARGS__); \ }) #endif @@ -1689,7 +1702,7 @@ atomic64_dec_if_positive(atomic64_t *v) #define cmpxchg_acquire(ptr, ...) \ ({ \ typeof(ptr) __ai_ptr = (ptr); \ - kasan_check_write(__ai_ptr, sizeof(*__ai_ptr)); \ + __atomic_check_write(__ai_ptr, sizeof(*__ai_ptr)); \ arch_cmpxchg_acquire(__ai_ptr, __VA_ARGS__); \ }) #endif @@ -1698,7 +1711,7 @@ atomic64_dec_if_positive(atomic64_t *v) #define cmpxchg_release(ptr, ...) \ ({ \ typeof(ptr) __ai_ptr = (ptr); \ - kasan_check_write(__ai_ptr, sizeof(*__ai_ptr)); \ + __atomic_check_write(__ai_ptr, sizeof(*__ai_ptr)); \ arch_cmpxchg_release(__ai_ptr, __VA_ARGS__); \ }) #endif @@ -1707,7 +1720,7 @@ atomic64_dec_if_positive(atomic64_t *v) #define cmpxchg_relaxed(ptr, ...) \ ({ \ typeof(ptr) __ai_ptr = (ptr); \ - kasan_check_write(__ai_ptr, sizeof(*__ai_ptr)); \ + __atomic_check_write(__ai_ptr, sizeof(*__ai_ptr)); \ arch_cmpxchg_relaxed(__ai_ptr, __VA_ARGS__); \ }) #endif @@ -1716,7 +1729,7 @@ atomic64_dec_if_positive(atomic64_t *v) #define cmpxchg64(ptr, ...) \ ({ \ typeof(ptr) __ai_ptr = (ptr); \ - kasan_check_write(__ai_ptr, sizeof(*__ai_ptr)); \ + __atomic_check_write(__ai_ptr, sizeof(*__ai_ptr)); \ arch_cmpxchg64(__ai_ptr, __VA_ARGS__); \ }) #endif @@ -1725,7 +1738,7 @@ atomic64_dec_if_positive(atomic64_t *v) #define cmpxchg64_acquire(ptr, ...) \ ({ \ typeof(ptr) __ai_ptr = (ptr); \ - kasan_check_write(__ai_ptr, sizeof(*__ai_ptr)); \ + __atomic_check_write(__ai_ptr, sizeof(*__ai_ptr)); \ arch_cmpxchg64_acquire(__ai_ptr, __VA_ARGS__); \ }) #endif @@ -1734,7 +1747,7 @@ atomic64_dec_if_positive(atomic64_t *v) #define cmpxchg64_release(ptr, ...) \ ({ \ typeof(ptr) __ai_ptr = (ptr); \ - kasan_check_write(__ai_ptr, sizeof(*__ai_ptr)); \ + __atomic_check_write(__ai_ptr, sizeof(*__ai_ptr)); \ arch_cmpxchg64_release(__ai_ptr, __VA_ARGS__); \ }) #endif @@ -1743,7 +1756,7 @@ atomic64_dec_if_positive(atomic64_t *v) #define cmpxchg64_relaxed(ptr, ...) \ ({ \ typeof(ptr) __ai_ptr = (ptr); \ - kasan_check_write(__ai_ptr, sizeof(*__ai_ptr)); \ + __atomic_check_write(__ai_ptr, sizeof(*__ai_ptr)); \ arch_cmpxchg64_relaxed(__ai_ptr, __VA_ARGS__); \ }) #endif @@ -1751,28 +1764,28 @@ atomic64_dec_if_positive(atomic64_t *v) #define cmpxchg_local(ptr, ...) \ ({ \ typeof(ptr) __ai_ptr = (ptr); \ - kasan_check_write(__ai_ptr, sizeof(*__ai_ptr)); \ + __atomic_check_write(__ai_ptr, sizeof(*__ai_ptr)); \ arch_cmpxchg_local(__ai_ptr, __VA_ARGS__); \ }) #define cmpxchg64_local(ptr, ...) \ ({ \ typeof(ptr) __ai_ptr = (ptr); \ - kasan_check_write(__ai_ptr, sizeof(*__ai_ptr)); \ + __atomic_check_write(__ai_ptr, sizeof(*__ai_ptr)); \ arch_cmpxchg64_local(__ai_ptr, __VA_ARGS__); \ }) #define sync_cmpxchg(ptr, ...) \ ({ \ typeof(ptr) __ai_ptr = (ptr); \ - kasan_check_write(__ai_ptr, sizeof(*__ai_ptr)); \ + __atomic_check_write(__ai_ptr, sizeof(*__ai_ptr)); \ arch_sync_cmpxchg(__ai_ptr, __VA_ARGS__); \ }) #define cmpxchg_double(ptr, ...) \ ({ \ typeof(ptr) __ai_ptr = (ptr); \ - kasan_check_write(__ai_ptr, 2 * sizeof(*__ai_ptr)); \ + __atomic_check_write(__ai_ptr, 2 * sizeof(*__ai_ptr)); \ arch_cmpxchg_double(__ai_ptr, __VA_ARGS__); \ }) @@ -1780,9 +1793,9 @@ atomic64_dec_if_positive(atomic64_t *v) #define cmpxchg_double_local(ptr, ...) \ ({ \ typeof(ptr) __ai_ptr = (ptr); \ - kasan_check_write(__ai_ptr, 2 * sizeof(*__ai_ptr)); \ + __atomic_check_write(__ai_ptr, 2 * sizeof(*__ai_ptr)); \ arch_cmpxchg_double_local(__ai_ptr, __VA_ARGS__); \ }) #endif /* _ASM_GENERIC_ATOMIC_INSTRUMENTED_H */ -// b29b625d5de9280f680e42c7be859b55b15e5f6a +// beea41c2a0f2c69e4958ed71bf26f59740fa4b12 diff --git a/scripts/atomic/gen-atomic-instrumented.sh b/scripts/atomic/gen-atomic-instrumented.sh index e09812372b17e..8b8b2a6f8d682 100755 --- a/scripts/atomic/gen-atomic-instrumented.sh +++ b/scripts/atomic/gen-atomic-instrumented.sh @@ -20,7 +20,7 @@ gen_param_check() # We don't write to constant parameters [ ${type#c} != ${type} ] && rw="read" - printf "\tkasan_check_${rw}(${name}, sizeof(*${name}));\n" + printf "\t__atomic_check_${rw}(${name}, sizeof(*${name}));\n" } #gen_param_check(arg...) @@ -107,7 +107,7 @@ cat < #include +#include + +static inline void __atomic_check_read(const volatile void *v, size_t size) +{ + kasan_check_read(v, size); + kcsan_check_atomic_read(v, size); +} + +static inline void __atomic_check_write(const volatile void *v, size_t size) +{ + kasan_check_write(v, size); + kcsan_check_atomic_write(v, size); +} EOF -- GitLab From 40d04110f87940b6a03bf0aa19cd29e84f465f20 Mon Sep 17 00:00:00 2001 From: Marco Elver Date: Thu, 14 Nov 2019 19:03:03 +0100 Subject: [PATCH 0009/1971] x86, kcsan: Enable KCSAN for x86 This patch enables KCSAN for x86, with updates to build rules to not use KCSAN for several incompatible compilation units. Signed-off-by: Marco Elver Acked-by: Paul E. McKenney Signed-off-by: Paul E. McKenney --- arch/x86/Kconfig | 1 + arch/x86/boot/Makefile | 2 ++ arch/x86/boot/compressed/Makefile | 2 ++ arch/x86/entry/vdso/Makefile | 3 +++ arch/x86/include/asm/bitops.h | 6 +++++- arch/x86/kernel/Makefile | 4 ++++ arch/x86/kernel/cpu/Makefile | 3 +++ arch/x86/lib/Makefile | 4 ++++ arch/x86/mm/Makefile | 4 ++++ arch/x86/purgatory/Makefile | 2 ++ arch/x86/realmode/Makefile | 3 +++ arch/x86/realmode/rm/Makefile | 3 +++ 12 files changed, 36 insertions(+), 1 deletion(-) diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index d6e1faa28c58e..81859be4a005e 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -226,6 +226,7 @@ config X86 select VIRT_TO_BUS select X86_FEATURE_NAMES if PROC_FS select PROC_PID_ARCH_STATUS if PROC_FS + select HAVE_ARCH_KCSAN if X86_64 config INSTRUCTION_DECODER def_bool y diff --git a/arch/x86/boot/Makefile b/arch/x86/boot/Makefile index e2839b5c246c2..9c79427941646 100644 --- a/arch/x86/boot/Makefile +++ b/arch/x86/boot/Makefile @@ -9,7 +9,9 @@ # Changed by many, many contributors over the years. # +# Sanitizer runtimes are unavailable and cannot be linked for early boot code. KASAN_SANITIZE := n +KCSAN_SANITIZE := n OBJECT_FILES_NON_STANDARD := y # Kernel does not boot with kcov instrumentation here. diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile index 6b84afdd75382..a1c248b8439f3 100644 --- a/arch/x86/boot/compressed/Makefile +++ b/arch/x86/boot/compressed/Makefile @@ -17,7 +17,9 @@ # (see scripts/Makefile.lib size_append) # compressed vmlinux.bin.all + u32 size of vmlinux.bin.all +# Sanitizer runtimes are unavailable and cannot be linked for early boot code. KASAN_SANITIZE := n +KCSAN_SANITIZE := n OBJECT_FILES_NON_STANDARD := y # Prevents link failures: __sanitizer_cov_trace_pc() is not linked in. diff --git a/arch/x86/entry/vdso/Makefile b/arch/x86/entry/vdso/Makefile index 0f2154106d016..a23debaad5b91 100644 --- a/arch/x86/entry/vdso/Makefile +++ b/arch/x86/entry/vdso/Makefile @@ -10,8 +10,11 @@ ARCH_REL_TYPE_ABS += R_386_GLOB_DAT|R_386_JMP_SLOT|R_386_RELATIVE include $(srctree)/lib/vdso/Makefile KBUILD_CFLAGS += $(DISABLE_LTO) + +# Sanitizer runtimes are unavailable and cannot be linked here. KASAN_SANITIZE := n UBSAN_SANITIZE := n +KCSAN_SANITIZE := n OBJECT_FILES_NON_STANDARD := y # Prevents link failures: __sanitizer_cov_trace_pc() is not linked in. diff --git a/arch/x86/include/asm/bitops.h b/arch/x86/include/asm/bitops.h index 7d1f6a49bfae1..542b63ddc8aa4 100644 --- a/arch/x86/include/asm/bitops.h +++ b/arch/x86/include/asm/bitops.h @@ -201,8 +201,12 @@ arch_test_and_change_bit(long nr, volatile unsigned long *addr) return GEN_BINARY_RMWcc(LOCK_PREFIX __ASM_SIZE(btc), *addr, c, "Ir", nr); } -static __always_inline bool constant_test_bit(long nr, const volatile unsigned long *addr) +static __no_kcsan_or_inline bool constant_test_bit(long nr, const volatile unsigned long *addr) { + /* + * Because this is a plain access, we need to disable KCSAN here to + * avoid double instrumentation via instrumented bitops. + */ return ((1UL << (nr & (BITS_PER_LONG-1))) & (addr[nr >> _BITOPS_LONG_SHIFT])) != 0; } diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile index 3578ad248bc98..a9a1cab437bca 100644 --- a/arch/x86/kernel/Makefile +++ b/arch/x86/kernel/Makefile @@ -28,6 +28,10 @@ KASAN_SANITIZE_dumpstack_$(BITS).o := n KASAN_SANITIZE_stacktrace.o := n KASAN_SANITIZE_paravirt.o := n +# With some compiler versions the generated code results in boot hangs, caused +# by several compilation units. To be safe, disable all instrumentation. +KCSAN_SANITIZE := n + OBJECT_FILES_NON_STANDARD_relocate_kernel_$(BITS).o := y OBJECT_FILES_NON_STANDARD_test_nx.o := y OBJECT_FILES_NON_STANDARD_paravirt_patch.o := y diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile index d7a1e5a9331ca..1f1b0edc01873 100644 --- a/arch/x86/kernel/cpu/Makefile +++ b/arch/x86/kernel/cpu/Makefile @@ -13,6 +13,9 @@ endif KCOV_INSTRUMENT_common.o := n KCOV_INSTRUMENT_perf_event.o := n +# As above, instrumenting secondary CPU boot code causes boot hangs. +KCSAN_SANITIZE_common.o := n + # Make sure load_percpu_segment has no stackprotector nostackp := $(call cc-option, -fno-stack-protector) CFLAGS_common.o := $(nostackp) diff --git a/arch/x86/lib/Makefile b/arch/x86/lib/Makefile index 5246db42de457..432a077056775 100644 --- a/arch/x86/lib/Makefile +++ b/arch/x86/lib/Makefile @@ -6,10 +6,14 @@ # Produces uninteresting flaky coverage. KCOV_INSTRUMENT_delay.o := n +# KCSAN uses udelay for introducing watchpoint delay; avoid recursion. +KCSAN_SANITIZE_delay.o := n + # Early boot use of cmdline; don't instrument it ifdef CONFIG_AMD_MEM_ENCRYPT KCOV_INSTRUMENT_cmdline.o := n KASAN_SANITIZE_cmdline.o := n +KCSAN_SANITIZE_cmdline.o := n ifdef CONFIG_FUNCTION_TRACER CFLAGS_REMOVE_cmdline.o = -pg diff --git a/arch/x86/mm/Makefile b/arch/x86/mm/Makefile index 84373dc9b341e..3559f4297ee1b 100644 --- a/arch/x86/mm/Makefile +++ b/arch/x86/mm/Makefile @@ -7,6 +7,10 @@ KCOV_INSTRUMENT_mem_encrypt_identity.o := n KASAN_SANITIZE_mem_encrypt.o := n KASAN_SANITIZE_mem_encrypt_identity.o := n +# Disable KCSAN entirely, because otherwise we get warnings that some functions +# reference __initdata sections. +KCSAN_SANITIZE := n + ifdef CONFIG_FUNCTION_TRACER CFLAGS_REMOVE_mem_encrypt.o = -pg CFLAGS_REMOVE_mem_encrypt_identity.o = -pg diff --git a/arch/x86/purgatory/Makefile b/arch/x86/purgatory/Makefile index fb4ee54443799..69379bce9574c 100644 --- a/arch/x86/purgatory/Makefile +++ b/arch/x86/purgatory/Makefile @@ -17,7 +17,9 @@ CFLAGS_sha256.o := -D__DISABLE_EXPORTS LDFLAGS_purgatory.ro := -e purgatory_start -r --no-undefined -nostdlib -z nodefaultlib targets += purgatory.ro +# Sanitizer runtimes are unavailable and cannot be linked here. KASAN_SANITIZE := n +KCSAN_SANITIZE := n KCOV_INSTRUMENT := n # These are adjustments to the compiler flags used for objects that diff --git a/arch/x86/realmode/Makefile b/arch/x86/realmode/Makefile index 682c895753d96..6b1f3a4eeb44e 100644 --- a/arch/x86/realmode/Makefile +++ b/arch/x86/realmode/Makefile @@ -6,7 +6,10 @@ # for more details. # # + +# Sanitizer runtimes are unavailable and cannot be linked here. KASAN_SANITIZE := n +KCSAN_SANITIZE := n OBJECT_FILES_NON_STANDARD := y subdir- := rm diff --git a/arch/x86/realmode/rm/Makefile b/arch/x86/realmode/rm/Makefile index f60501a384f94..fdbbb945c2168 100644 --- a/arch/x86/realmode/rm/Makefile +++ b/arch/x86/realmode/rm/Makefile @@ -6,7 +6,10 @@ # for more details. # # + +# Sanitizer runtimes are unavailable and cannot be linked here. KASAN_SANITIZE := n +KCSAN_SANITIZE := n OBJECT_FILES_NON_STANDARD := y # Prevents link failures: __sanitizer_cov_trace_pc() is not linked in. -- GitLab From 5cbaefe9743bf14c9d3106db0cc19f8cb0a3ca22 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Wed, 20 Nov 2019 10:41:43 +0100 Subject: [PATCH 0010/1971] kcsan: Improve various small stylistic details Tidy up a few bits: - Fix typos and grammar, improve wording. - Remove spurious newlines that are col80 warning artifacts where the resulting line-break is worse than the disease it's curing. - Use core kernel coding style to improve readability and reduce spurious code pattern variations. - Use better vertical alignment for structure definitions and initialization sequences. - Misc other small details. No change in functionality intended. Cc: linux-kernel@vger.kernel.org Cc: Marco Elver Cc: Paul E. McKenney Cc: Peter Zijlstra Cc: Linus Torvalds Cc: Andrew Morton Cc: Thomas Gleixner Cc: Paul E. McKenney Cc: Will Deacon Signed-off-by: Ingo Molnar --- arch/x86/Kconfig | 2 +- include/linux/compiler-clang.h | 2 +- include/linux/compiler.h | 2 +- include/linux/kcsan-checks.h | 22 +++++------- include/linux/kcsan.h | 23 +++++-------- include/linux/seqlock.h | 8 ++--- kernel/kcsan/atomic.h | 2 +- kernel/kcsan/core.c | 59 +++++++++++++++----------------- kernel/kcsan/debugfs.c | 62 ++++++++++++++++------------------ kernel/kcsan/encoding.h | 25 +++++++------- kernel/kcsan/kcsan.h | 11 +++--- kernel/kcsan/report.c | 42 +++++++++++------------ kernel/kcsan/test.c | 6 ++-- kernel/sched/Makefile | 2 +- lib/Kconfig.kcsan | 16 ++++----- 15 files changed, 131 insertions(+), 153 deletions(-) diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 9933ca8ffe160..9cfa4a5c6f429 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -226,7 +226,7 @@ config X86 select VIRT_TO_BUS select X86_FEATURE_NAMES if PROC_FS select PROC_PID_ARCH_STATUS if PROC_FS - select HAVE_ARCH_KCSAN if X86_64 + select HAVE_ARCH_KCSAN if X86_64 config INSTRUCTION_DECODER def_bool y diff --git a/include/linux/compiler-clang.h b/include/linux/compiler-clang.h index a213eb55e725f..2cb42d8bdedc6 100644 --- a/include/linux/compiler-clang.h +++ b/include/linux/compiler-clang.h @@ -16,7 +16,7 @@ #define KASAN_ABI_VERSION 5 #if __has_feature(address_sanitizer) || __has_feature(hwaddress_sanitizer) -/* emulate gcc's __SANITIZE_ADDRESS__ flag */ +/* Emulate GCC's __SANITIZE_ADDRESS__ flag */ #define __SANITIZE_ADDRESS__ #define __no_sanitize_address \ __attribute__((no_sanitize("address", "hwaddress"))) diff --git a/include/linux/compiler.h b/include/linux/compiler.h index 7d3e777815782..ad8c76144a3c5 100644 --- a/include/linux/compiler.h +++ b/include/linux/compiler.h @@ -313,7 +313,7 @@ unsigned long read_word_at_a_time(const void *addr) #include /* - * data_race: macro to document that accesses in an expression may conflict with + * data_race(): macro to document that accesses in an expression may conflict with * other concurrent accesses resulting in data races, but the resulting * behaviour is deemed safe regardless. * diff --git a/include/linux/kcsan-checks.h b/include/linux/kcsan-checks.h index e78220661086b..ef3ee233a3fa9 100644 --- a/include/linux/kcsan-checks.h +++ b/include/linux/kcsan-checks.h @@ -8,17 +8,17 @@ /* * Access type modifiers. */ -#define KCSAN_ACCESS_WRITE 0x1 +#define KCSAN_ACCESS_WRITE 0x1 #define KCSAN_ACCESS_ATOMIC 0x2 /* - * __kcsan_*: Always calls into runtime when KCSAN is enabled. This may be used + * __kcsan_*: Always calls into the runtime when KCSAN is enabled. This may be used * even in compilation units that selectively disable KCSAN, but must use KCSAN - * to validate access to an address. Never use these in header files! + * to validate access to an address. Never use these in header files! */ #ifdef CONFIG_KCSAN /** - * __kcsan_check_access - check generic access for data race + * __kcsan_check_access - check generic access for data races * * @ptr address of access * @size size of access @@ -32,7 +32,7 @@ static inline void __kcsan_check_access(const volatile void *ptr, size_t size, #endif /* - * kcsan_*: Only calls into runtime when the particular compilation unit has + * kcsan_*: Only calls into the runtime when the particular compilation unit has * KCSAN instrumentation enabled. May be used in header files. */ #ifdef __SANITIZE_THREAD__ @@ -77,16 +77,12 @@ static inline void kcsan_check_access(const volatile void *ptr, size_t size, kcsan_check_access(ptr, size, KCSAN_ACCESS_WRITE) /* - * Check for atomic accesses: if atomic access are not ignored, this simply - * aliases to kcsan_check_access, otherwise becomes a no-op. + * Check for atomic accesses: if atomic accesses are not ignored, this simply + * aliases to kcsan_check_access(), otherwise becomes a no-op. */ #ifdef CONFIG_KCSAN_IGNORE_ATOMICS -#define kcsan_check_atomic_read(...) \ - do { \ - } while (0) -#define kcsan_check_atomic_write(...) \ - do { \ - } while (0) +#define kcsan_check_atomic_read(...) do { } while (0) +#define kcsan_check_atomic_write(...) do { } while (0) #else #define kcsan_check_atomic_read(ptr, size) \ kcsan_check_access(ptr, size, KCSAN_ACCESS_ATOMIC) diff --git a/include/linux/kcsan.h b/include/linux/kcsan.h index 9047048fee842..1019e3a2c6897 100644 --- a/include/linux/kcsan.h +++ b/include/linux/kcsan.h @@ -94,21 +94,14 @@ void kcsan_atomic_next(int n); #else /* CONFIG_KCSAN */ -static inline void kcsan_init(void) { } - -static inline void kcsan_disable_current(void) { } - -static inline void kcsan_enable_current(void) { } - -static inline void kcsan_nestable_atomic_begin(void) { } - -static inline void kcsan_nestable_atomic_end(void) { } - -static inline void kcsan_flat_atomic_begin(void) { } - -static inline void kcsan_flat_atomic_end(void) { } - -static inline void kcsan_atomic_next(int n) { } +static inline void kcsan_init(void) { } +static inline void kcsan_disable_current(void) { } +static inline void kcsan_enable_current(void) { } +static inline void kcsan_nestable_atomic_begin(void) { } +static inline void kcsan_nestable_atomic_end(void) { } +static inline void kcsan_flat_atomic_begin(void) { } +static inline void kcsan_flat_atomic_end(void) { } +static inline void kcsan_atomic_next(int n) { } #endif /* CONFIG_KCSAN */ diff --git a/include/linux/seqlock.h b/include/linux/seqlock.h index f52c91be89397..f80d50cac1992 100644 --- a/include/linux/seqlock.h +++ b/include/linux/seqlock.h @@ -48,7 +48,7 @@ * * As a consequence, we take the following best-effort approach for raw usage * via seqcount_t under KCSAN: upon beginning a seq-reader critical section, - * pessimistically mark then next KCSAN_SEQLOCK_REGION_MAX memory accesses as + * pessimistically mark the next KCSAN_SEQLOCK_REGION_MAX memory accesses as * atomics; if there is a matching read_seqcount_retry() call, no following * memory operations are considered atomic. Usage of seqlocks via seqlock_t * interface is not affected. @@ -265,7 +265,7 @@ static inline void raw_write_seqcount_end(seqcount_t *s) * usual consistency guarantee. It is one wmb cheaper, because we can * collapse the two back-to-back wmb()s. * - * Note that, writes surrounding the barrier should be declared atomic (e.g. + * Note that writes surrounding the barrier should be declared atomic (e.g. * via WRITE_ONCE): a) to ensure the writes become visible to other threads * atomically, avoiding compiler optimizations; b) to document which writes are * meant to propagate to the reader critical section. This is necessary because @@ -465,7 +465,7 @@ static inline unsigned read_seqbegin(const seqlock_t *sl) { unsigned ret = read_seqcount_begin(&sl->seqcount); - kcsan_atomic_next(0); /* non-raw usage, assume closing read_seqretry */ + kcsan_atomic_next(0); /* non-raw usage, assume closing read_seqretry() */ kcsan_flat_atomic_begin(); return ret; } @@ -473,7 +473,7 @@ static inline unsigned read_seqbegin(const seqlock_t *sl) static inline unsigned read_seqretry(const seqlock_t *sl, unsigned start) { /* - * Assume not nested: read_seqretry may be called multiple times when + * Assume not nested: read_seqretry() may be called multiple times when * completing read critical section. */ kcsan_flat_atomic_end(); diff --git a/kernel/kcsan/atomic.h b/kernel/kcsan/atomic.h index c9c3fe6280114..576e03ddd6a31 100644 --- a/kernel/kcsan/atomic.h +++ b/kernel/kcsan/atomic.h @@ -6,7 +6,7 @@ #include /* - * Helper that returns true if access to ptr should be considered as an atomic + * Helper that returns true if access to @ptr should be considered an atomic * access, even though it is not explicitly atomic. * * List all volatile globals that have been observed in races, to suppress diff --git a/kernel/kcsan/core.c b/kernel/kcsan/core.c index d9410d58c93ec..3314fc29e2369 100644 --- a/kernel/kcsan/core.c +++ b/kernel/kcsan/core.c @@ -19,10 +19,10 @@ bool kcsan_enabled; /* Per-CPU kcsan_ctx for interrupts */ static DEFINE_PER_CPU(struct kcsan_ctx, kcsan_cpu_ctx) = { - .disable_count = 0, - .atomic_next = 0, - .atomic_nest_count = 0, - .in_flat_atomic = false, + .disable_count = 0, + .atomic_next = 0, + .atomic_nest_count = 0, + .in_flat_atomic = false, }; /* @@ -50,11 +50,11 @@ static DEFINE_PER_CPU(struct kcsan_ctx, kcsan_cpu_ctx) = { * slot=9: [10, 11, 9] * slot=63: [64, 65, 63] */ -#define NUM_SLOTS (1 + 2 * KCSAN_CHECK_ADJACENT) +#define NUM_SLOTS (1 + 2*KCSAN_CHECK_ADJACENT) #define SLOT_IDX(slot, i) (slot + ((i + KCSAN_CHECK_ADJACENT) % NUM_SLOTS)) /* - * SLOT_IDX_FAST is used in fast-path. Not first checking the address's primary + * SLOT_IDX_FAST is used in the fast-path. Not first checking the address's primary * slot (middle) is fine if we assume that data races occur rarely. The set of * indices {SLOT_IDX(slot, i) | i in [0, NUM_SLOTS)} is equivalent to * {SLOT_IDX_FAST(slot, i) | i in [0, NUM_SLOTS)}. @@ -68,9 +68,9 @@ static DEFINE_PER_CPU(struct kcsan_ctx, kcsan_cpu_ctx) = { * zero-initialized state matches INVALID_WATCHPOINT. * * Add NUM_SLOTS-1 entries to account for overflow; this helps avoid having to - * use more complicated SLOT_IDX_FAST calculation with modulo in fast-path. + * use more complicated SLOT_IDX_FAST calculation with modulo in the fast-path. */ -static atomic_long_t watchpoints[CONFIG_KCSAN_NUM_WATCHPOINTS + NUM_SLOTS - 1]; +static atomic_long_t watchpoints[CONFIG_KCSAN_NUM_WATCHPOINTS + NUM_SLOTS-1]; /* * Instructions to skip watching counter, used in should_watch(). We use a @@ -78,7 +78,8 @@ static atomic_long_t watchpoints[CONFIG_KCSAN_NUM_WATCHPOINTS + NUM_SLOTS - 1]; */ static DEFINE_PER_CPU(long, kcsan_skip); -static inline atomic_long_t *find_watchpoint(unsigned long addr, size_t size, +static inline atomic_long_t *find_watchpoint(unsigned long addr, + size_t size, bool expect_write, long *encoded_watchpoint) { @@ -110,8 +111,8 @@ static inline atomic_long_t *find_watchpoint(unsigned long addr, size_t size, return NULL; } -static inline atomic_long_t *insert_watchpoint(unsigned long addr, size_t size, - bool is_write) +static inline atomic_long_t * +insert_watchpoint(unsigned long addr, size_t size, bool is_write) { const int slot = watchpoint_slot(addr); const long encoded_watchpoint = encode_watchpoint(addr, size, is_write); @@ -120,21 +121,16 @@ static inline atomic_long_t *insert_watchpoint(unsigned long addr, size_t size, /* Check slot index logic, ensuring we stay within array bounds. */ BUILD_BUG_ON(SLOT_IDX(0, 0) != KCSAN_CHECK_ADJACENT); - BUILD_BUG_ON(SLOT_IDX(0, KCSAN_CHECK_ADJACENT + 1) != 0); - BUILD_BUG_ON(SLOT_IDX(CONFIG_KCSAN_NUM_WATCHPOINTS - 1, - KCSAN_CHECK_ADJACENT) != - ARRAY_SIZE(watchpoints) - 1); - BUILD_BUG_ON(SLOT_IDX(CONFIG_KCSAN_NUM_WATCHPOINTS - 1, - KCSAN_CHECK_ADJACENT + 1) != - ARRAY_SIZE(watchpoints) - NUM_SLOTS); + BUILD_BUG_ON(SLOT_IDX(0, KCSAN_CHECK_ADJACENT+1) != 0); + BUILD_BUG_ON(SLOT_IDX(CONFIG_KCSAN_NUM_WATCHPOINTS-1, KCSAN_CHECK_ADJACENT) != ARRAY_SIZE(watchpoints)-1); + BUILD_BUG_ON(SLOT_IDX(CONFIG_KCSAN_NUM_WATCHPOINTS-1, KCSAN_CHECK_ADJACENT+1) != ARRAY_SIZE(watchpoints) - NUM_SLOTS); for (i = 0; i < NUM_SLOTS; ++i) { long expect_val = INVALID_WATCHPOINT; /* Try to acquire this slot. */ watchpoint = &watchpoints[SLOT_IDX(slot, i)]; - if (atomic_long_try_cmpxchg_relaxed(watchpoint, &expect_val, - encoded_watchpoint)) + if (atomic_long_try_cmpxchg_relaxed(watchpoint, &expect_val, encoded_watchpoint)) return watchpoint; } @@ -150,11 +146,10 @@ static inline atomic_long_t *insert_watchpoint(unsigned long addr, size_t size, * 2. the thread that set up the watchpoint already removed it; * 3. the watchpoint was removed and then re-used. */ -static inline bool try_consume_watchpoint(atomic_long_t *watchpoint, - long encoded_watchpoint) +static inline bool +try_consume_watchpoint(atomic_long_t *watchpoint, long encoded_watchpoint) { - return atomic_long_try_cmpxchg_relaxed(watchpoint, &encoded_watchpoint, - CONSUMED_WATCHPOINT); + return atomic_long_try_cmpxchg_relaxed(watchpoint, &encoded_watchpoint, CONSUMED_WATCHPOINT); } /* @@ -162,14 +157,13 @@ static inline bool try_consume_watchpoint(atomic_long_t *watchpoint, */ static inline bool remove_watchpoint(atomic_long_t *watchpoint) { - return atomic_long_xchg_relaxed(watchpoint, INVALID_WATCHPOINT) != - CONSUMED_WATCHPOINT; + return atomic_long_xchg_relaxed(watchpoint, INVALID_WATCHPOINT) != CONSUMED_WATCHPOINT; } static inline struct kcsan_ctx *get_ctx(void) { /* - * In interrupt, use raw_cpu_ptr to avoid unnecessary checks, that would + * In interrupts, use raw_cpu_ptr to avoid unnecessary checks, that would * also result in calls that generate warnings in uaccess regions. */ return in_task() ? ¤t->kcsan_ctx : raw_cpu_ptr(&kcsan_cpu_ctx); @@ -260,7 +254,8 @@ static inline unsigned int get_delay(void) */ static noinline void kcsan_found_watchpoint(const volatile void *ptr, - size_t size, bool is_write, + size_t size, + bool is_write, atomic_long_t *watchpoint, long encoded_watchpoint) { @@ -296,8 +291,8 @@ static noinline void kcsan_found_watchpoint(const volatile void *ptr, user_access_restore(flags); } -static noinline void kcsan_setup_watchpoint(const volatile void *ptr, - size_t size, bool is_write) +static noinline void +kcsan_setup_watchpoint(const volatile void *ptr, size_t size, bool is_write) { atomic_long_t *watchpoint; union { @@ -346,8 +341,8 @@ static noinline void kcsan_setup_watchpoint(const volatile void *ptr, watchpoint = insert_watchpoint((unsigned long)ptr, size, is_write); if (watchpoint == NULL) { /* - * Out of capacity: the size of `watchpoints`, and the frequency - * with which `should_watch()` returns true should be tweaked so + * Out of capacity: the size of 'watchpoints', and the frequency + * with which should_watch() returns true should be tweaked so * that this case happens very rarely. */ kcsan_counter_inc(KCSAN_COUNTER_NO_CAPACITY); diff --git a/kernel/kcsan/debugfs.c b/kernel/kcsan/debugfs.c index 041d520a01837..bec42dab32ee8 100644 --- a/kernel/kcsan/debugfs.c +++ b/kernel/kcsan/debugfs.c @@ -24,39 +24,31 @@ static atomic_long_t counters[KCSAN_COUNTER_COUNT]; * whitelist or blacklist. */ static struct { - unsigned long *addrs; /* array of addresses */ - size_t size; /* current size */ - int used; /* number of elements used */ - bool sorted; /* if elements are sorted */ - bool whitelist; /* if list is a blacklist or whitelist */ + unsigned long *addrs; /* array of addresses */ + size_t size; /* current size */ + int used; /* number of elements used */ + bool sorted; /* if elements are sorted */ + bool whitelist; /* if list is a blacklist or whitelist */ } report_filterlist = { - .addrs = NULL, - .size = 8, /* small initial size */ - .used = 0, - .sorted = false, - .whitelist = false, /* default is blacklist */ + .addrs = NULL, + .size = 8, /* small initial size */ + .used = 0, + .sorted = false, + .whitelist = false, /* default is blacklist */ }; static DEFINE_SPINLOCK(report_filterlist_lock); static const char *counter_to_name(enum kcsan_counter_id id) { switch (id) { - case KCSAN_COUNTER_USED_WATCHPOINTS: - return "used_watchpoints"; - case KCSAN_COUNTER_SETUP_WATCHPOINTS: - return "setup_watchpoints"; - case KCSAN_COUNTER_DATA_RACES: - return "data_races"; - case KCSAN_COUNTER_NO_CAPACITY: - return "no_capacity"; - case KCSAN_COUNTER_REPORT_RACES: - return "report_races"; - case KCSAN_COUNTER_RACES_UNKNOWN_ORIGIN: - return "races_unknown_origin"; - case KCSAN_COUNTER_UNENCODABLE_ACCESSES: - return "unencodable_accesses"; - case KCSAN_COUNTER_ENCODING_FALSE_POSITIVES: - return "encoding_false_positives"; + case KCSAN_COUNTER_USED_WATCHPOINTS: return "used_watchpoints"; + case KCSAN_COUNTER_SETUP_WATCHPOINTS: return "setup_watchpoints"; + case KCSAN_COUNTER_DATA_RACES: return "data_races"; + case KCSAN_COUNTER_NO_CAPACITY: return "no_capacity"; + case KCSAN_COUNTER_REPORT_RACES: return "report_races"; + case KCSAN_COUNTER_RACES_UNKNOWN_ORIGIN: return "races_unknown_origin"; + case KCSAN_COUNTER_UNENCODABLE_ACCESSES: return "unencodable_accesses"; + case KCSAN_COUNTER_ENCODING_FALSE_POSITIVES: return "encoding_false_positives"; case KCSAN_COUNTER_COUNT: BUG(); } @@ -116,7 +108,7 @@ bool kcsan_skip_report_debugfs(unsigned long func_addr) if (!kallsyms_lookup_size_offset(func_addr, &symbolsize, &offset)) return false; - func_addr -= offset; /* get function start */ + func_addr -= offset; /* Get function start */ spin_lock_irqsave(&report_filterlist_lock, flags); if (report_filterlist.used == 0) @@ -195,6 +187,7 @@ static ssize_t insert_report_filterlist(const char *func) out: spin_unlock_irqrestore(&report_filterlist_lock, flags); + return ret; } @@ -226,8 +219,8 @@ static int debugfs_open(struct inode *inode, struct file *file) return single_open(file, show_info, NULL); } -static ssize_t debugfs_write(struct file *file, const char __user *buf, - size_t count, loff_t *off) +static ssize_t +debugfs_write(struct file *file, const char __user *buf, size_t count, loff_t *off) { char kbuf[KSYM_NAME_LEN]; char *arg; @@ -264,10 +257,13 @@ static ssize_t debugfs_write(struct file *file, const char __user *buf, return count; } -static const struct file_operations debugfs_ops = { .read = seq_read, - .open = debugfs_open, - .write = debugfs_write, - .release = single_release }; +static const struct file_operations debugfs_ops = +{ + .read = seq_read, + .open = debugfs_open, + .write = debugfs_write, + .release = single_release +}; void __init kcsan_debugfs_init(void) { diff --git a/kernel/kcsan/encoding.h b/kernel/kcsan/encoding.h index e17bdac0e54bd..b63890e864493 100644 --- a/kernel/kcsan/encoding.h +++ b/kernel/kcsan/encoding.h @@ -10,7 +10,8 @@ #include "kcsan.h" #define SLOT_RANGE PAGE_SIZE -#define INVALID_WATCHPOINT 0 + +#define INVALID_WATCHPOINT 0 #define CONSUMED_WATCHPOINT 1 /* @@ -34,24 +35,24 @@ * Both these are assumed to be very unlikely. However, in case it still happens * happens, the report logic will filter out the false positive (see report.c). */ -#define WATCHPOINT_ADDR_BITS (BITS_PER_LONG - 1 - WATCHPOINT_SIZE_BITS) +#define WATCHPOINT_ADDR_BITS (BITS_PER_LONG-1 - WATCHPOINT_SIZE_BITS) /* * Masks to set/retrieve the encoded data. */ -#define WATCHPOINT_WRITE_MASK BIT(BITS_PER_LONG - 1) +#define WATCHPOINT_WRITE_MASK BIT(BITS_PER_LONG-1) #define WATCHPOINT_SIZE_MASK \ - GENMASK(BITS_PER_LONG - 2, BITS_PER_LONG - 2 - WATCHPOINT_SIZE_BITS) + GENMASK(BITS_PER_LONG-2, BITS_PER_LONG-2 - WATCHPOINT_SIZE_BITS) #define WATCHPOINT_ADDR_MASK \ - GENMASK(BITS_PER_LONG - 3 - WATCHPOINT_SIZE_BITS, 0) + GENMASK(BITS_PER_LONG-3 - WATCHPOINT_SIZE_BITS, 0) static inline bool check_encodable(unsigned long addr, size_t size) { return size <= MAX_ENCODABLE_SIZE; } -static inline long encode_watchpoint(unsigned long addr, size_t size, - bool is_write) +static inline long +encode_watchpoint(unsigned long addr, size_t size, bool is_write) { return (long)((is_write ? WATCHPOINT_WRITE_MASK : 0) | (size << WATCHPOINT_ADDR_BITS) | @@ -59,17 +60,17 @@ static inline long encode_watchpoint(unsigned long addr, size_t size, } static inline bool decode_watchpoint(long watchpoint, - unsigned long *addr_masked, size_t *size, + unsigned long *addr_masked, + size_t *size, bool *is_write) { if (watchpoint == INVALID_WATCHPOINT || watchpoint == CONSUMED_WATCHPOINT) return false; - *addr_masked = (unsigned long)watchpoint & WATCHPOINT_ADDR_MASK; - *size = ((unsigned long)watchpoint & WATCHPOINT_SIZE_MASK) >> - WATCHPOINT_ADDR_BITS; - *is_write = !!((unsigned long)watchpoint & WATCHPOINT_WRITE_MASK); + *addr_masked = (unsigned long)watchpoint & WATCHPOINT_ADDR_MASK; + *size = ((unsigned long)watchpoint & WATCHPOINT_SIZE_MASK) >> WATCHPOINT_ADDR_BITS; + *is_write = !!((unsigned long)watchpoint & WATCHPOINT_WRITE_MASK); return true; } diff --git a/kernel/kcsan/kcsan.h b/kernel/kcsan/kcsan.h index 1bb2f1c0d61e4..d3b9a96ac8a43 100644 --- a/kernel/kcsan/kcsan.h +++ b/kernel/kcsan/kcsan.h @@ -72,14 +72,14 @@ enum kcsan_counter_id { /* * Increment/decrement counter with given id; avoid calling these in fast-path. */ -void kcsan_counter_inc(enum kcsan_counter_id id); -void kcsan_counter_dec(enum kcsan_counter_id id); +extern void kcsan_counter_inc(enum kcsan_counter_id id); +extern void kcsan_counter_dec(enum kcsan_counter_id id); /* * Returns true if data races in the function symbol that maps to func_addr * (offsets are ignored) should *not* be reported. */ -bool kcsan_skip_report_debugfs(unsigned long func_addr); +extern bool kcsan_skip_report_debugfs(unsigned long func_addr); enum kcsan_report_type { /* @@ -99,10 +99,11 @@ enum kcsan_report_type { */ KCSAN_REPORT_RACE_UNKNOWN_ORIGIN, }; + /* * Print a race report from thread that encountered the race. */ -void kcsan_report(const volatile void *ptr, size_t size, bool is_write, - bool value_change, int cpu_id, enum kcsan_report_type type); +extern void kcsan_report(const volatile void *ptr, size_t size, bool is_write, + bool value_change, int cpu_id, enum kcsan_report_type type); #endif /* _KERNEL_KCSAN_KCSAN_H */ diff --git a/kernel/kcsan/report.c b/kernel/kcsan/report.c index ead5610bafa71..0eea05a3135b7 100644 --- a/kernel/kcsan/report.c +++ b/kernel/kcsan/report.c @@ -22,13 +22,13 @@ * the reports, with reporting being in the slow-path. */ static struct { - const volatile void *ptr; - size_t size; - bool is_write; - int task_pid; - int cpu_id; - unsigned long stack_entries[NUM_STACK_ENTRIES]; - int num_stack_entries; + const volatile void *ptr; + size_t size; + bool is_write; + int task_pid; + int cpu_id; + unsigned long stack_entries[NUM_STACK_ENTRIES]; + int num_stack_entries; } other_info = { .ptr = NULL }; /* @@ -40,8 +40,8 @@ static DEFINE_SPINLOCK(report_lock); /* * Special rules to skip reporting. */ -static bool skip_report(bool is_write, bool value_change, - unsigned long top_frame) +static bool +skip_report(bool is_write, bool value_change, unsigned long top_frame) { if (IS_ENABLED(CONFIG_KCSAN_REPORT_VALUE_CHANGE_ONLY) && is_write && !value_change) { @@ -105,6 +105,7 @@ static int sym_strcmp(void *addr1, void *addr2) snprintf(buf1, sizeof(buf1), "%pS", addr1); snprintf(buf2, sizeof(buf2), "%pS", addr2); + return strncmp(buf1, buf2, sizeof(buf1)); } @@ -116,8 +117,7 @@ static bool print_report(const volatile void *ptr, size_t size, bool is_write, enum kcsan_report_type type) { unsigned long stack_entries[NUM_STACK_ENTRIES] = { 0 }; - int num_stack_entries = - stack_trace_save(stack_entries, NUM_STACK_ENTRIES, 1); + int num_stack_entries = stack_trace_save(stack_entries, NUM_STACK_ENTRIES, 1); int skipnr = get_stack_skipnr(stack_entries, num_stack_entries); int other_skipnr; @@ -131,7 +131,7 @@ static bool print_report(const volatile void *ptr, size_t size, bool is_write, other_skipnr = get_stack_skipnr(other_info.stack_entries, other_info.num_stack_entries); - /* value_change is only known for the other thread */ + /* @value_change is only known for the other thread */ if (skip_report(other_info.is_write, value_change, other_info.stack_entries[other_skipnr])) return false; @@ -241,13 +241,12 @@ retry: if (other_info.ptr != NULL) break; /* still in use, retry */ - other_info.ptr = ptr; - other_info.size = size; - other_info.is_write = is_write; - other_info.task_pid = in_task() ? task_pid_nr(current) : -1; - other_info.cpu_id = cpu_id; - other_info.num_stack_entries = stack_trace_save( - other_info.stack_entries, NUM_STACK_ENTRIES, 1); + other_info.ptr = ptr; + other_info.size = size; + other_info.is_write = is_write; + other_info.task_pid = in_task() ? task_pid_nr(current) : -1; + other_info.cpu_id = cpu_id; + other_info.num_stack_entries = stack_trace_save(other_info.stack_entries, NUM_STACK_ENTRIES, 1); spin_unlock_irqrestore(&report_lock, *flags); @@ -299,6 +298,7 @@ retry: } spin_unlock_irqrestore(&report_lock, *flags); + goto retry; } @@ -309,9 +309,7 @@ void kcsan_report(const volatile void *ptr, size_t size, bool is_write, kcsan_disable_current(); if (prepare_report(&flags, ptr, size, is_write, cpu_id, type)) { - if (print_report(ptr, size, is_write, value_change, cpu_id, - type) && - panic_on_warn) + if (print_report(ptr, size, is_write, value_change, cpu_id, type) && panic_on_warn) panic("panic_on_warn set ...\n"); release_report(&flags, type); diff --git a/kernel/kcsan/test.c b/kernel/kcsan/test.c index 0bae63c5ca65a..cc6000239dc01 100644 --- a/kernel/kcsan/test.c +++ b/kernel/kcsan/test.c @@ -34,7 +34,7 @@ static bool test_encode_decode(void) if (WARN_ON(!check_encodable(addr, size))) return false; - /* encode and decode */ + /* Encode and decode */ { const long encoded_watchpoint = encode_watchpoint(addr, size, is_write); @@ -42,7 +42,7 @@ static bool test_encode_decode(void) size_t verif_size; bool verif_is_write; - /* check special watchpoints */ + /* Check special watchpoints */ if (WARN_ON(decode_watchpoint( INVALID_WATCHPOINT, &verif_masked_addr, &verif_size, &verif_is_write))) @@ -52,7 +52,7 @@ static bool test_encode_decode(void) &verif_size, &verif_is_write))) return false; - /* check decoding watchpoint returns same data */ + /* Check decoding watchpoint returns same data */ if (WARN_ON(!decode_watchpoint( encoded_watchpoint, &verif_masked_addr, &verif_size, &verif_is_write))) diff --git a/kernel/sched/Makefile b/kernel/sched/Makefile index e9307a9c54e77..5fc9c9b70862f 100644 --- a/kernel/sched/Makefile +++ b/kernel/sched/Makefile @@ -7,7 +7,7 @@ endif # that is not a function of syscall inputs. E.g. involuntary context switches. KCOV_INSTRUMENT := n -# There are numerous races here, however, most of them due to plain accesses. +# There are numerous data races here, however, most of them are due to plain accesses. # This would make it even harder for syzbot to find reproducers, because these # bugs trigger without specific input. Disable by default, but should re-enable # eventually. diff --git a/lib/Kconfig.kcsan b/lib/Kconfig.kcsan index 5dd464e52ab43..3f78b1434375d 100644 --- a/lib/Kconfig.kcsan +++ b/lib/Kconfig.kcsan @@ -6,7 +6,6 @@ config HAVE_ARCH_KCSAN menuconfig KCSAN bool "KCSAN: watchpoint-based dynamic data race detector" depends on HAVE_ARCH_KCSAN && !KASAN && STACKTRACE - default n help Kernel Concurrency Sanitizer is a dynamic data race detector, which uses a watchpoint-based sampling approach to detect races. See @@ -16,13 +15,12 @@ if KCSAN config KCSAN_DEBUG bool "Debugging of KCSAN internals" - default n config KCSAN_SELFTEST bool "Perform short selftests on boot" default y help - Run KCSAN selftests on boot. On test failure, causes kernel to panic. + Run KCSAN selftests on boot. On test failure, causes the kernel to panic. config KCSAN_EARLY_ENABLE bool "Early enable during boot" @@ -62,7 +60,8 @@ config KCSAN_DELAY_RANDOMIZE default y help If delays should be randomized, where the maximum is KCSAN_UDELAY_*. - If false, the chosen delays are always KCSAN_UDELAY_* defined above. + If false, the chosen delays are always the KCSAN_UDELAY_* values + as defined above. config KCSAN_SKIP_WATCH int "Skip instructions before setting up watchpoint" @@ -86,9 +85,9 @@ config KCSAN_SKIP_WATCH_RANDOMIZE # parameters, to optimize for the common use-case, we avoid this because: (a) # it would impact performance (and we want to avoid static branch for all # {READ,WRITE}_ONCE, atomic_*, bitops, etc.), and (b) complicate the design -# without real benefit. The main purpose of the below options are for use in -# fuzzer configs to control reported data races, and are not expected to be -# switched frequently by a user. +# without real benefit. The main purpose of the below options is for use in +# fuzzer configs to control reported data races, and they are not expected +# to be switched frequently by a user. config KCSAN_REPORT_RACE_UNKNOWN_ORIGIN bool "Report races of unknown origin" @@ -103,13 +102,12 @@ config KCSAN_REPORT_VALUE_CHANGE_ONLY bool "Only report races where watcher observed a data value change" default y help - If enabled and a conflicting write is observed via watchpoint, but + If enabled and a conflicting write is observed via a watchpoint, but the data value of the memory location was observed to remain unchanged, do not report the data race. config KCSAN_IGNORE_ATOMICS bool "Do not instrument marked atomic accesses" - default n help If enabled, never instruments marked atomic accesses. This results in not reporting data races where one access is atomic and the other is -- GitLab From d47715f50e833f12c5e829ce9dcc4a65104fa74f Mon Sep 17 00:00:00 2001 From: Marco Elver Date: Tue, 19 Nov 2019 19:57:42 +0100 Subject: [PATCH 0011/1971] kcsan, ubsan: Make KCSAN+UBSAN work together Context: http://lkml.kernel.org/r/fb7e25d8-aba4-3dcf-7761-cb7ecb3ebb71@infradead.org Reported-by: Randy Dunlap Signed-off-by: Marco Elver Acked-by: Randy Dunlap # build-tested Signed-off-by: Paul E. McKenney --- kernel/kcsan/Makefile | 1 + lib/Makefile | 1 + 2 files changed, 2 insertions(+) diff --git a/kernel/kcsan/Makefile b/kernel/kcsan/Makefile index dd15b62ec0b58..df6b7799e4927 100644 --- a/kernel/kcsan/Makefile +++ b/kernel/kcsan/Makefile @@ -1,6 +1,7 @@ # SPDX-License-Identifier: GPL-2.0 KCSAN_SANITIZE := n KCOV_INSTRUMENT := n +UBSAN_SANITIZE := n CFLAGS_REMOVE_core.o = $(CC_FLAGS_FTRACE) diff --git a/lib/Makefile b/lib/Makefile index 778ab704e3ad5..9d5bda950f5f0 100644 --- a/lib/Makefile +++ b/lib/Makefile @@ -279,6 +279,7 @@ obj-$(CONFIG_UBSAN) += ubsan.o UBSAN_SANITIZE_ubsan.o := n KASAN_SANITIZE_ubsan.o := n +KCSAN_SANITIZE_ubsan.o := n CFLAGS_ubsan.o := $(call cc-option, -fno-stack-protector) $(DISABLE_STACKLEAK_PLUGIN) obj-$(CONFIG_SBITMAP) += sbitmap.o -- GitLab From c020395b6634b7a674ee6aa91a971b08e268caba Mon Sep 17 00:00:00 2001 From: Marco Elver Date: Tue, 26 Nov 2019 15:04:04 +0100 Subject: [PATCH 0012/1971] asm-generic/atomic: Use __always_inline for pure wrappers Prefer __always_inline for atomic wrappers. When building for size (CC_OPTIMIZE_FOR_SIZE), some compilers appear to be less inclined to inline even relatively small static inline functions that are assumed to be inlinable such as atomic ops. This can cause problems, for example in UACCESS regions. By using __always_inline, we let the real implementation and not the wrapper determine the final inlining preference. For x86 tinyconfig we observe: - vmlinux baseline: 1316204 - vmlinux with patch: 1315988 (-216 bytes) This came up when addressing UACCESS warnings with CC_OPTIMIZE_FOR_SIZE in the KCSAN runtime: http://lkml.kernel.org/r/58708908-84a0-0a81-a836-ad97e33dbb62@infradead.org Reported-by: Randy Dunlap Signed-off-by: Marco Elver Acked-by: Mark Rutland Signed-off-by: Paul E. McKenney --- include/asm-generic/atomic-instrumented.h | 335 +++++++++++----------- include/asm-generic/atomic-long.h | 331 ++++++++++----------- scripts/atomic/gen-atomic-instrumented.sh | 7 +- scripts/atomic/gen-atomic-long.sh | 3 +- 4 files changed, 340 insertions(+), 336 deletions(-) diff --git a/include/asm-generic/atomic-instrumented.h b/include/asm-generic/atomic-instrumented.h index 3dc0f38544f65..63869ded73ac1 100644 --- a/include/asm-generic/atomic-instrumented.h +++ b/include/asm-generic/atomic-instrumented.h @@ -18,22 +18,23 @@ #define _ASM_GENERIC_ATOMIC_INSTRUMENTED_H #include +#include #include #include -static inline void __atomic_check_read(const volatile void *v, size_t size) +static __always_inline void __atomic_check_read(const volatile void *v, size_t size) { kasan_check_read(v, size); kcsan_check_atomic_read(v, size); } -static inline void __atomic_check_write(const volatile void *v, size_t size) +static __always_inline void __atomic_check_write(const volatile void *v, size_t size) { kasan_check_write(v, size); kcsan_check_atomic_write(v, size); } -static inline int +static __always_inline int atomic_read(const atomic_t *v) { __atomic_check_read(v, sizeof(*v)); @@ -42,7 +43,7 @@ atomic_read(const atomic_t *v) #define atomic_read atomic_read #if defined(arch_atomic_read_acquire) -static inline int +static __always_inline int atomic_read_acquire(const atomic_t *v) { __atomic_check_read(v, sizeof(*v)); @@ -51,7 +52,7 @@ atomic_read_acquire(const atomic_t *v) #define atomic_read_acquire atomic_read_acquire #endif -static inline void +static __always_inline void atomic_set(atomic_t *v, int i) { __atomic_check_write(v, sizeof(*v)); @@ -60,7 +61,7 @@ atomic_set(atomic_t *v, int i) #define atomic_set atomic_set #if defined(arch_atomic_set_release) -static inline void +static __always_inline void atomic_set_release(atomic_t *v, int i) { __atomic_check_write(v, sizeof(*v)); @@ -69,7 +70,7 @@ atomic_set_release(atomic_t *v, int i) #define atomic_set_release atomic_set_release #endif -static inline void +static __always_inline void atomic_add(int i, atomic_t *v) { __atomic_check_write(v, sizeof(*v)); @@ -78,7 +79,7 @@ atomic_add(int i, atomic_t *v) #define atomic_add atomic_add #if !defined(arch_atomic_add_return_relaxed) || defined(arch_atomic_add_return) -static inline int +static __always_inline int atomic_add_return(int i, atomic_t *v) { __atomic_check_write(v, sizeof(*v)); @@ -88,7 +89,7 @@ atomic_add_return(int i, atomic_t *v) #endif #if defined(arch_atomic_add_return_acquire) -static inline int +static __always_inline int atomic_add_return_acquire(int i, atomic_t *v) { __atomic_check_write(v, sizeof(*v)); @@ -98,7 +99,7 @@ atomic_add_return_acquire(int i, atomic_t *v) #endif #if defined(arch_atomic_add_return_release) -static inline int +static __always_inline int atomic_add_return_release(int i, atomic_t *v) { __atomic_check_write(v, sizeof(*v)); @@ -108,7 +109,7 @@ atomic_add_return_release(int i, atomic_t *v) #endif #if defined(arch_atomic_add_return_relaxed) -static inline int +static __always_inline int atomic_add_return_relaxed(int i, atomic_t *v) { __atomic_check_write(v, sizeof(*v)); @@ -118,7 +119,7 @@ atomic_add_return_relaxed(int i, atomic_t *v) #endif #if !defined(arch_atomic_fetch_add_relaxed) || defined(arch_atomic_fetch_add) -static inline int +static __always_inline int atomic_fetch_add(int i, atomic_t *v) { __atomic_check_write(v, sizeof(*v)); @@ -128,7 +129,7 @@ atomic_fetch_add(int i, atomic_t *v) #endif #if defined(arch_atomic_fetch_add_acquire) -static inline int +static __always_inline int atomic_fetch_add_acquire(int i, atomic_t *v) { __atomic_check_write(v, sizeof(*v)); @@ -138,7 +139,7 @@ atomic_fetch_add_acquire(int i, atomic_t *v) #endif #if defined(arch_atomic_fetch_add_release) -static inline int +static __always_inline int atomic_fetch_add_release(int i, atomic_t *v) { __atomic_check_write(v, sizeof(*v)); @@ -148,7 +149,7 @@ atomic_fetch_add_release(int i, atomic_t *v) #endif #if defined(arch_atomic_fetch_add_relaxed) -static inline int +static __always_inline int atomic_fetch_add_relaxed(int i, atomic_t *v) { __atomic_check_write(v, sizeof(*v)); @@ -157,7 +158,7 @@ atomic_fetch_add_relaxed(int i, atomic_t *v) #define atomic_fetch_add_relaxed atomic_fetch_add_relaxed #endif -static inline void +static __always_inline void atomic_sub(int i, atomic_t *v) { __atomic_check_write(v, sizeof(*v)); @@ -166,7 +167,7 @@ atomic_sub(int i, atomic_t *v) #define atomic_sub atomic_sub #if !defined(arch_atomic_sub_return_relaxed) || defined(arch_atomic_sub_return) -static inline int +static __always_inline int atomic_sub_return(int i, atomic_t *v) { __atomic_check_write(v, sizeof(*v)); @@ -176,7 +177,7 @@ atomic_sub_return(int i, atomic_t *v) #endif #if defined(arch_atomic_sub_return_acquire) -static inline int +static __always_inline int atomic_sub_return_acquire(int i, atomic_t *v) { __atomic_check_write(v, sizeof(*v)); @@ -186,7 +187,7 @@ atomic_sub_return_acquire(int i, atomic_t *v) #endif #if defined(arch_atomic_sub_return_release) -static inline int +static __always_inline int atomic_sub_return_release(int i, atomic_t *v) { __atomic_check_write(v, sizeof(*v)); @@ -196,7 +197,7 @@ atomic_sub_return_release(int i, atomic_t *v) #endif #if defined(arch_atomic_sub_return_relaxed) -static inline int +static __always_inline int atomic_sub_return_relaxed(int i, atomic_t *v) { __atomic_check_write(v, sizeof(*v)); @@ -206,7 +207,7 @@ atomic_sub_return_relaxed(int i, atomic_t *v) #endif #if !defined(arch_atomic_fetch_sub_relaxed) || defined(arch_atomic_fetch_sub) -static inline int +static __always_inline int atomic_fetch_sub(int i, atomic_t *v) { __atomic_check_write(v, sizeof(*v)); @@ -216,7 +217,7 @@ atomic_fetch_sub(int i, atomic_t *v) #endif #if defined(arch_atomic_fetch_sub_acquire) -static inline int +static __always_inline int atomic_fetch_sub_acquire(int i, atomic_t *v) { __atomic_check_write(v, sizeof(*v)); @@ -226,7 +227,7 @@ atomic_fetch_sub_acquire(int i, atomic_t *v) #endif #if defined(arch_atomic_fetch_sub_release) -static inline int +static __always_inline int atomic_fetch_sub_release(int i, atomic_t *v) { __atomic_check_write(v, sizeof(*v)); @@ -236,7 +237,7 @@ atomic_fetch_sub_release(int i, atomic_t *v) #endif #if defined(arch_atomic_fetch_sub_relaxed) -static inline int +static __always_inline int atomic_fetch_sub_relaxed(int i, atomic_t *v) { __atomic_check_write(v, sizeof(*v)); @@ -246,7 +247,7 @@ atomic_fetch_sub_relaxed(int i, atomic_t *v) #endif #if defined(arch_atomic_inc) -static inline void +static __always_inline void atomic_inc(atomic_t *v) { __atomic_check_write(v, sizeof(*v)); @@ -256,7 +257,7 @@ atomic_inc(atomic_t *v) #endif #if defined(arch_atomic_inc_return) -static inline int +static __always_inline int atomic_inc_return(atomic_t *v) { __atomic_check_write(v, sizeof(*v)); @@ -266,7 +267,7 @@ atomic_inc_return(atomic_t *v) #endif #if defined(arch_atomic_inc_return_acquire) -static inline int +static __always_inline int atomic_inc_return_acquire(atomic_t *v) { __atomic_check_write(v, sizeof(*v)); @@ -276,7 +277,7 @@ atomic_inc_return_acquire(atomic_t *v) #endif #if defined(arch_atomic_inc_return_release) -static inline int +static __always_inline int atomic_inc_return_release(atomic_t *v) { __atomic_check_write(v, sizeof(*v)); @@ -286,7 +287,7 @@ atomic_inc_return_release(atomic_t *v) #endif #if defined(arch_atomic_inc_return_relaxed) -static inline int +static __always_inline int atomic_inc_return_relaxed(atomic_t *v) { __atomic_check_write(v, sizeof(*v)); @@ -296,7 +297,7 @@ atomic_inc_return_relaxed(atomic_t *v) #endif #if defined(arch_atomic_fetch_inc) -static inline int +static __always_inline int atomic_fetch_inc(atomic_t *v) { __atomic_check_write(v, sizeof(*v)); @@ -306,7 +307,7 @@ atomic_fetch_inc(atomic_t *v) #endif #if defined(arch_atomic_fetch_inc_acquire) -static inline int +static __always_inline int atomic_fetch_inc_acquire(atomic_t *v) { __atomic_check_write(v, sizeof(*v)); @@ -316,7 +317,7 @@ atomic_fetch_inc_acquire(atomic_t *v) #endif #if defined(arch_atomic_fetch_inc_release) -static inline int +static __always_inline int atomic_fetch_inc_release(atomic_t *v) { __atomic_check_write(v, sizeof(*v)); @@ -326,7 +327,7 @@ atomic_fetch_inc_release(atomic_t *v) #endif #if defined(arch_atomic_fetch_inc_relaxed) -static inline int +static __always_inline int atomic_fetch_inc_relaxed(atomic_t *v) { __atomic_check_write(v, sizeof(*v)); @@ -336,7 +337,7 @@ atomic_fetch_inc_relaxed(atomic_t *v) #endif #if defined(arch_atomic_dec) -static inline void +static __always_inline void atomic_dec(atomic_t *v) { __atomic_check_write(v, sizeof(*v)); @@ -346,7 +347,7 @@ atomic_dec(atomic_t *v) #endif #if defined(arch_atomic_dec_return) -static inline int +static __always_inline int atomic_dec_return(atomic_t *v) { __atomic_check_write(v, sizeof(*v)); @@ -356,7 +357,7 @@ atomic_dec_return(atomic_t *v) #endif #if defined(arch_atomic_dec_return_acquire) -static inline int +static __always_inline int atomic_dec_return_acquire(atomic_t *v) { __atomic_check_write(v, sizeof(*v)); @@ -366,7 +367,7 @@ atomic_dec_return_acquire(atomic_t *v) #endif #if defined(arch_atomic_dec_return_release) -static inline int +static __always_inline int atomic_dec_return_release(atomic_t *v) { __atomic_check_write(v, sizeof(*v)); @@ -376,7 +377,7 @@ atomic_dec_return_release(atomic_t *v) #endif #if defined(arch_atomic_dec_return_relaxed) -static inline int +static __always_inline int atomic_dec_return_relaxed(atomic_t *v) { __atomic_check_write(v, sizeof(*v)); @@ -386,7 +387,7 @@ atomic_dec_return_relaxed(atomic_t *v) #endif #if defined(arch_atomic_fetch_dec) -static inline int +static __always_inline int atomic_fetch_dec(atomic_t *v) { __atomic_check_write(v, sizeof(*v)); @@ -396,7 +397,7 @@ atomic_fetch_dec(atomic_t *v) #endif #if defined(arch_atomic_fetch_dec_acquire) -static inline int +static __always_inline int atomic_fetch_dec_acquire(atomic_t *v) { __atomic_check_write(v, sizeof(*v)); @@ -406,7 +407,7 @@ atomic_fetch_dec_acquire(atomic_t *v) #endif #if defined(arch_atomic_fetch_dec_release) -static inline int +static __always_inline int atomic_fetch_dec_release(atomic_t *v) { __atomic_check_write(v, sizeof(*v)); @@ -416,7 +417,7 @@ atomic_fetch_dec_release(atomic_t *v) #endif #if defined(arch_atomic_fetch_dec_relaxed) -static inline int +static __always_inline int atomic_fetch_dec_relaxed(atomic_t *v) { __atomic_check_write(v, sizeof(*v)); @@ -425,7 +426,7 @@ atomic_fetch_dec_relaxed(atomic_t *v) #define atomic_fetch_dec_relaxed atomic_fetch_dec_relaxed #endif -static inline void +static __always_inline void atomic_and(int i, atomic_t *v) { __atomic_check_write(v, sizeof(*v)); @@ -434,7 +435,7 @@ atomic_and(int i, atomic_t *v) #define atomic_and atomic_and #if !defined(arch_atomic_fetch_and_relaxed) || defined(arch_atomic_fetch_and) -static inline int +static __always_inline int atomic_fetch_and(int i, atomic_t *v) { __atomic_check_write(v, sizeof(*v)); @@ -444,7 +445,7 @@ atomic_fetch_and(int i, atomic_t *v) #endif #if defined(arch_atomic_fetch_and_acquire) -static inline int +static __always_inline int atomic_fetch_and_acquire(int i, atomic_t *v) { __atomic_check_write(v, sizeof(*v)); @@ -454,7 +455,7 @@ atomic_fetch_and_acquire(int i, atomic_t *v) #endif #if defined(arch_atomic_fetch_and_release) -static inline int +static __always_inline int atomic_fetch_and_release(int i, atomic_t *v) { __atomic_check_write(v, sizeof(*v)); @@ -464,7 +465,7 @@ atomic_fetch_and_release(int i, atomic_t *v) #endif #if defined(arch_atomic_fetch_and_relaxed) -static inline int +static __always_inline int atomic_fetch_and_relaxed(int i, atomic_t *v) { __atomic_check_write(v, sizeof(*v)); @@ -474,7 +475,7 @@ atomic_fetch_and_relaxed(int i, atomic_t *v) #endif #if defined(arch_atomic_andnot) -static inline void +static __always_inline void atomic_andnot(int i, atomic_t *v) { __atomic_check_write(v, sizeof(*v)); @@ -484,7 +485,7 @@ atomic_andnot(int i, atomic_t *v) #endif #if defined(arch_atomic_fetch_andnot) -static inline int +static __always_inline int atomic_fetch_andnot(int i, atomic_t *v) { __atomic_check_write(v, sizeof(*v)); @@ -494,7 +495,7 @@ atomic_fetch_andnot(int i, atomic_t *v) #endif #if defined(arch_atomic_fetch_andnot_acquire) -static inline int +static __always_inline int atomic_fetch_andnot_acquire(int i, atomic_t *v) { __atomic_check_write(v, sizeof(*v)); @@ -504,7 +505,7 @@ atomic_fetch_andnot_acquire(int i, atomic_t *v) #endif #if defined(arch_atomic_fetch_andnot_release) -static inline int +static __always_inline int atomic_fetch_andnot_release(int i, atomic_t *v) { __atomic_check_write(v, sizeof(*v)); @@ -514,7 +515,7 @@ atomic_fetch_andnot_release(int i, atomic_t *v) #endif #if defined(arch_atomic_fetch_andnot_relaxed) -static inline int +static __always_inline int atomic_fetch_andnot_relaxed(int i, atomic_t *v) { __atomic_check_write(v, sizeof(*v)); @@ -523,7 +524,7 @@ atomic_fetch_andnot_relaxed(int i, atomic_t *v) #define atomic_fetch_andnot_relaxed atomic_fetch_andnot_relaxed #endif -static inline void +static __always_inline void atomic_or(int i, atomic_t *v) { __atomic_check_write(v, sizeof(*v)); @@ -532,7 +533,7 @@ atomic_or(int i, atomic_t *v) #define atomic_or atomic_or #if !defined(arch_atomic_fetch_or_relaxed) || defined(arch_atomic_fetch_or) -static inline int +static __always_inline int atomic_fetch_or(int i, atomic_t *v) { __atomic_check_write(v, sizeof(*v)); @@ -542,7 +543,7 @@ atomic_fetch_or(int i, atomic_t *v) #endif #if defined(arch_atomic_fetch_or_acquire) -static inline int +static __always_inline int atomic_fetch_or_acquire(int i, atomic_t *v) { __atomic_check_write(v, sizeof(*v)); @@ -552,7 +553,7 @@ atomic_fetch_or_acquire(int i, atomic_t *v) #endif #if defined(arch_atomic_fetch_or_release) -static inline int +static __always_inline int atomic_fetch_or_release(int i, atomic_t *v) { __atomic_check_write(v, sizeof(*v)); @@ -562,7 +563,7 @@ atomic_fetch_or_release(int i, atomic_t *v) #endif #if defined(arch_atomic_fetch_or_relaxed) -static inline int +static __always_inline int atomic_fetch_or_relaxed(int i, atomic_t *v) { __atomic_check_write(v, sizeof(*v)); @@ -571,7 +572,7 @@ atomic_fetch_or_relaxed(int i, atomic_t *v) #define atomic_fetch_or_relaxed atomic_fetch_or_relaxed #endif -static inline void +static __always_inline void atomic_xor(int i, atomic_t *v) { __atomic_check_write(v, sizeof(*v)); @@ -580,7 +581,7 @@ atomic_xor(int i, atomic_t *v) #define atomic_xor atomic_xor #if !defined(arch_atomic_fetch_xor_relaxed) || defined(arch_atomic_fetch_xor) -static inline int +static __always_inline int atomic_fetch_xor(int i, atomic_t *v) { __atomic_check_write(v, sizeof(*v)); @@ -590,7 +591,7 @@ atomic_fetch_xor(int i, atomic_t *v) #endif #if defined(arch_atomic_fetch_xor_acquire) -static inline int +static __always_inline int atomic_fetch_xor_acquire(int i, atomic_t *v) { __atomic_check_write(v, sizeof(*v)); @@ -600,7 +601,7 @@ atomic_fetch_xor_acquire(int i, atomic_t *v) #endif #if defined(arch_atomic_fetch_xor_release) -static inline int +static __always_inline int atomic_fetch_xor_release(int i, atomic_t *v) { __atomic_check_write(v, sizeof(*v)); @@ -610,7 +611,7 @@ atomic_fetch_xor_release(int i, atomic_t *v) #endif #if defined(arch_atomic_fetch_xor_relaxed) -static inline int +static __always_inline int atomic_fetch_xor_relaxed(int i, atomic_t *v) { __atomic_check_write(v, sizeof(*v)); @@ -620,7 +621,7 @@ atomic_fetch_xor_relaxed(int i, atomic_t *v) #endif #if !defined(arch_atomic_xchg_relaxed) || defined(arch_atomic_xchg) -static inline int +static __always_inline int atomic_xchg(atomic_t *v, int i) { __atomic_check_write(v, sizeof(*v)); @@ -630,7 +631,7 @@ atomic_xchg(atomic_t *v, int i) #endif #if defined(arch_atomic_xchg_acquire) -static inline int +static __always_inline int atomic_xchg_acquire(atomic_t *v, int i) { __atomic_check_write(v, sizeof(*v)); @@ -640,7 +641,7 @@ atomic_xchg_acquire(atomic_t *v, int i) #endif #if defined(arch_atomic_xchg_release) -static inline int +static __always_inline int atomic_xchg_release(atomic_t *v, int i) { __atomic_check_write(v, sizeof(*v)); @@ -650,7 +651,7 @@ atomic_xchg_release(atomic_t *v, int i) #endif #if defined(arch_atomic_xchg_relaxed) -static inline int +static __always_inline int atomic_xchg_relaxed(atomic_t *v, int i) { __atomic_check_write(v, sizeof(*v)); @@ -660,7 +661,7 @@ atomic_xchg_relaxed(atomic_t *v, int i) #endif #if !defined(arch_atomic_cmpxchg_relaxed) || defined(arch_atomic_cmpxchg) -static inline int +static __always_inline int atomic_cmpxchg(atomic_t *v, int old, int new) { __atomic_check_write(v, sizeof(*v)); @@ -670,7 +671,7 @@ atomic_cmpxchg(atomic_t *v, int old, int new) #endif #if defined(arch_atomic_cmpxchg_acquire) -static inline int +static __always_inline int atomic_cmpxchg_acquire(atomic_t *v, int old, int new) { __atomic_check_write(v, sizeof(*v)); @@ -680,7 +681,7 @@ atomic_cmpxchg_acquire(atomic_t *v, int old, int new) #endif #if defined(arch_atomic_cmpxchg_release) -static inline int +static __always_inline int atomic_cmpxchg_release(atomic_t *v, int old, int new) { __atomic_check_write(v, sizeof(*v)); @@ -690,7 +691,7 @@ atomic_cmpxchg_release(atomic_t *v, int old, int new) #endif #if defined(arch_atomic_cmpxchg_relaxed) -static inline int +static __always_inline int atomic_cmpxchg_relaxed(atomic_t *v, int old, int new) { __atomic_check_write(v, sizeof(*v)); @@ -700,7 +701,7 @@ atomic_cmpxchg_relaxed(atomic_t *v, int old, int new) #endif #if defined(arch_atomic_try_cmpxchg) -static inline bool +static __always_inline bool atomic_try_cmpxchg(atomic_t *v, int *old, int new) { __atomic_check_write(v, sizeof(*v)); @@ -711,7 +712,7 @@ atomic_try_cmpxchg(atomic_t *v, int *old, int new) #endif #if defined(arch_atomic_try_cmpxchg_acquire) -static inline bool +static __always_inline bool atomic_try_cmpxchg_acquire(atomic_t *v, int *old, int new) { __atomic_check_write(v, sizeof(*v)); @@ -722,7 +723,7 @@ atomic_try_cmpxchg_acquire(atomic_t *v, int *old, int new) #endif #if defined(arch_atomic_try_cmpxchg_release) -static inline bool +static __always_inline bool atomic_try_cmpxchg_release(atomic_t *v, int *old, int new) { __atomic_check_write(v, sizeof(*v)); @@ -733,7 +734,7 @@ atomic_try_cmpxchg_release(atomic_t *v, int *old, int new) #endif #if defined(arch_atomic_try_cmpxchg_relaxed) -static inline bool +static __always_inline bool atomic_try_cmpxchg_relaxed(atomic_t *v, int *old, int new) { __atomic_check_write(v, sizeof(*v)); @@ -744,7 +745,7 @@ atomic_try_cmpxchg_relaxed(atomic_t *v, int *old, int new) #endif #if defined(arch_atomic_sub_and_test) -static inline bool +static __always_inline bool atomic_sub_and_test(int i, atomic_t *v) { __atomic_check_write(v, sizeof(*v)); @@ -754,7 +755,7 @@ atomic_sub_and_test(int i, atomic_t *v) #endif #if defined(arch_atomic_dec_and_test) -static inline bool +static __always_inline bool atomic_dec_and_test(atomic_t *v) { __atomic_check_write(v, sizeof(*v)); @@ -764,7 +765,7 @@ atomic_dec_and_test(atomic_t *v) #endif #if defined(arch_atomic_inc_and_test) -static inline bool +static __always_inline bool atomic_inc_and_test(atomic_t *v) { __atomic_check_write(v, sizeof(*v)); @@ -774,7 +775,7 @@ atomic_inc_and_test(atomic_t *v) #endif #if defined(arch_atomic_add_negative) -static inline bool +static __always_inline bool atomic_add_negative(int i, atomic_t *v) { __atomic_check_write(v, sizeof(*v)); @@ -784,7 +785,7 @@ atomic_add_negative(int i, atomic_t *v) #endif #if defined(arch_atomic_fetch_add_unless) -static inline int +static __always_inline int atomic_fetch_add_unless(atomic_t *v, int a, int u) { __atomic_check_write(v, sizeof(*v)); @@ -794,7 +795,7 @@ atomic_fetch_add_unless(atomic_t *v, int a, int u) #endif #if defined(arch_atomic_add_unless) -static inline bool +static __always_inline bool atomic_add_unless(atomic_t *v, int a, int u) { __atomic_check_write(v, sizeof(*v)); @@ -804,7 +805,7 @@ atomic_add_unless(atomic_t *v, int a, int u) #endif #if defined(arch_atomic_inc_not_zero) -static inline bool +static __always_inline bool atomic_inc_not_zero(atomic_t *v) { __atomic_check_write(v, sizeof(*v)); @@ -814,7 +815,7 @@ atomic_inc_not_zero(atomic_t *v) #endif #if defined(arch_atomic_inc_unless_negative) -static inline bool +static __always_inline bool atomic_inc_unless_negative(atomic_t *v) { __atomic_check_write(v, sizeof(*v)); @@ -824,7 +825,7 @@ atomic_inc_unless_negative(atomic_t *v) #endif #if defined(arch_atomic_dec_unless_positive) -static inline bool +static __always_inline bool atomic_dec_unless_positive(atomic_t *v) { __atomic_check_write(v, sizeof(*v)); @@ -834,7 +835,7 @@ atomic_dec_unless_positive(atomic_t *v) #endif #if defined(arch_atomic_dec_if_positive) -static inline int +static __always_inline int atomic_dec_if_positive(atomic_t *v) { __atomic_check_write(v, sizeof(*v)); @@ -843,7 +844,7 @@ atomic_dec_if_positive(atomic_t *v) #define atomic_dec_if_positive atomic_dec_if_positive #endif -static inline s64 +static __always_inline s64 atomic64_read(const atomic64_t *v) { __atomic_check_read(v, sizeof(*v)); @@ -852,7 +853,7 @@ atomic64_read(const atomic64_t *v) #define atomic64_read atomic64_read #if defined(arch_atomic64_read_acquire) -static inline s64 +static __always_inline s64 atomic64_read_acquire(const atomic64_t *v) { __atomic_check_read(v, sizeof(*v)); @@ -861,7 +862,7 @@ atomic64_read_acquire(const atomic64_t *v) #define atomic64_read_acquire atomic64_read_acquire #endif -static inline void +static __always_inline void atomic64_set(atomic64_t *v, s64 i) { __atomic_check_write(v, sizeof(*v)); @@ -870,7 +871,7 @@ atomic64_set(atomic64_t *v, s64 i) #define atomic64_set atomic64_set #if defined(arch_atomic64_set_release) -static inline void +static __always_inline void atomic64_set_release(atomic64_t *v, s64 i) { __atomic_check_write(v, sizeof(*v)); @@ -879,7 +880,7 @@ atomic64_set_release(atomic64_t *v, s64 i) #define atomic64_set_release atomic64_set_release #endif -static inline void +static __always_inline void atomic64_add(s64 i, atomic64_t *v) { __atomic_check_write(v, sizeof(*v)); @@ -888,7 +889,7 @@ atomic64_add(s64 i, atomic64_t *v) #define atomic64_add atomic64_add #if !defined(arch_atomic64_add_return_relaxed) || defined(arch_atomic64_add_return) -static inline s64 +static __always_inline s64 atomic64_add_return(s64 i, atomic64_t *v) { __atomic_check_write(v, sizeof(*v)); @@ -898,7 +899,7 @@ atomic64_add_return(s64 i, atomic64_t *v) #endif #if defined(arch_atomic64_add_return_acquire) -static inline s64 +static __always_inline s64 atomic64_add_return_acquire(s64 i, atomic64_t *v) { __atomic_check_write(v, sizeof(*v)); @@ -908,7 +909,7 @@ atomic64_add_return_acquire(s64 i, atomic64_t *v) #endif #if defined(arch_atomic64_add_return_release) -static inline s64 +static __always_inline s64 atomic64_add_return_release(s64 i, atomic64_t *v) { __atomic_check_write(v, sizeof(*v)); @@ -918,7 +919,7 @@ atomic64_add_return_release(s64 i, atomic64_t *v) #endif #if defined(arch_atomic64_add_return_relaxed) -static inline s64 +static __always_inline s64 atomic64_add_return_relaxed(s64 i, atomic64_t *v) { __atomic_check_write(v, sizeof(*v)); @@ -928,7 +929,7 @@ atomic64_add_return_relaxed(s64 i, atomic64_t *v) #endif #if !defined(arch_atomic64_fetch_add_relaxed) || defined(arch_atomic64_fetch_add) -static inline s64 +static __always_inline s64 atomic64_fetch_add(s64 i, atomic64_t *v) { __atomic_check_write(v, sizeof(*v)); @@ -938,7 +939,7 @@ atomic64_fetch_add(s64 i, atomic64_t *v) #endif #if defined(arch_atomic64_fetch_add_acquire) -static inline s64 +static __always_inline s64 atomic64_fetch_add_acquire(s64 i, atomic64_t *v) { __atomic_check_write(v, sizeof(*v)); @@ -948,7 +949,7 @@ atomic64_fetch_add_acquire(s64 i, atomic64_t *v) #endif #if defined(arch_atomic64_fetch_add_release) -static inline s64 +static __always_inline s64 atomic64_fetch_add_release(s64 i, atomic64_t *v) { __atomic_check_write(v, sizeof(*v)); @@ -958,7 +959,7 @@ atomic64_fetch_add_release(s64 i, atomic64_t *v) #endif #if defined(arch_atomic64_fetch_add_relaxed) -static inline s64 +static __always_inline s64 atomic64_fetch_add_relaxed(s64 i, atomic64_t *v) { __atomic_check_write(v, sizeof(*v)); @@ -967,7 +968,7 @@ atomic64_fetch_add_relaxed(s64 i, atomic64_t *v) #define atomic64_fetch_add_relaxed atomic64_fetch_add_relaxed #endif -static inline void +static __always_inline void atomic64_sub(s64 i, atomic64_t *v) { __atomic_check_write(v, sizeof(*v)); @@ -976,7 +977,7 @@ atomic64_sub(s64 i, atomic64_t *v) #define atomic64_sub atomic64_sub #if !defined(arch_atomic64_sub_return_relaxed) || defined(arch_atomic64_sub_return) -static inline s64 +static __always_inline s64 atomic64_sub_return(s64 i, atomic64_t *v) { __atomic_check_write(v, sizeof(*v)); @@ -986,7 +987,7 @@ atomic64_sub_return(s64 i, atomic64_t *v) #endif #if defined(arch_atomic64_sub_return_acquire) -static inline s64 +static __always_inline s64 atomic64_sub_return_acquire(s64 i, atomic64_t *v) { __atomic_check_write(v, sizeof(*v)); @@ -996,7 +997,7 @@ atomic64_sub_return_acquire(s64 i, atomic64_t *v) #endif #if defined(arch_atomic64_sub_return_release) -static inline s64 +static __always_inline s64 atomic64_sub_return_release(s64 i, atomic64_t *v) { __atomic_check_write(v, sizeof(*v)); @@ -1006,7 +1007,7 @@ atomic64_sub_return_release(s64 i, atomic64_t *v) #endif #if defined(arch_atomic64_sub_return_relaxed) -static inline s64 +static __always_inline s64 atomic64_sub_return_relaxed(s64 i, atomic64_t *v) { __atomic_check_write(v, sizeof(*v)); @@ -1016,7 +1017,7 @@ atomic64_sub_return_relaxed(s64 i, atomic64_t *v) #endif #if !defined(arch_atomic64_fetch_sub_relaxed) || defined(arch_atomic64_fetch_sub) -static inline s64 +static __always_inline s64 atomic64_fetch_sub(s64 i, atomic64_t *v) { __atomic_check_write(v, sizeof(*v)); @@ -1026,7 +1027,7 @@ atomic64_fetch_sub(s64 i, atomic64_t *v) #endif #if defined(arch_atomic64_fetch_sub_acquire) -static inline s64 +static __always_inline s64 atomic64_fetch_sub_acquire(s64 i, atomic64_t *v) { __atomic_check_write(v, sizeof(*v)); @@ -1036,7 +1037,7 @@ atomic64_fetch_sub_acquire(s64 i, atomic64_t *v) #endif #if defined(arch_atomic64_fetch_sub_release) -static inline s64 +static __always_inline s64 atomic64_fetch_sub_release(s64 i, atomic64_t *v) { __atomic_check_write(v, sizeof(*v)); @@ -1046,7 +1047,7 @@ atomic64_fetch_sub_release(s64 i, atomic64_t *v) #endif #if defined(arch_atomic64_fetch_sub_relaxed) -static inline s64 +static __always_inline s64 atomic64_fetch_sub_relaxed(s64 i, atomic64_t *v) { __atomic_check_write(v, sizeof(*v)); @@ -1056,7 +1057,7 @@ atomic64_fetch_sub_relaxed(s64 i, atomic64_t *v) #endif #if defined(arch_atomic64_inc) -static inline void +static __always_inline void atomic64_inc(atomic64_t *v) { __atomic_check_write(v, sizeof(*v)); @@ -1066,7 +1067,7 @@ atomic64_inc(atomic64_t *v) #endif #if defined(arch_atomic64_inc_return) -static inline s64 +static __always_inline s64 atomic64_inc_return(atomic64_t *v) { __atomic_check_write(v, sizeof(*v)); @@ -1076,7 +1077,7 @@ atomic64_inc_return(atomic64_t *v) #endif #if defined(arch_atomic64_inc_return_acquire) -static inline s64 +static __always_inline s64 atomic64_inc_return_acquire(atomic64_t *v) { __atomic_check_write(v, sizeof(*v)); @@ -1086,7 +1087,7 @@ atomic64_inc_return_acquire(atomic64_t *v) #endif #if defined(arch_atomic64_inc_return_release) -static inline s64 +static __always_inline s64 atomic64_inc_return_release(atomic64_t *v) { __atomic_check_write(v, sizeof(*v)); @@ -1096,7 +1097,7 @@ atomic64_inc_return_release(atomic64_t *v) #endif #if defined(arch_atomic64_inc_return_relaxed) -static inline s64 +static __always_inline s64 atomic64_inc_return_relaxed(atomic64_t *v) { __atomic_check_write(v, sizeof(*v)); @@ -1106,7 +1107,7 @@ atomic64_inc_return_relaxed(atomic64_t *v) #endif #if defined(arch_atomic64_fetch_inc) -static inline s64 +static __always_inline s64 atomic64_fetch_inc(atomic64_t *v) { __atomic_check_write(v, sizeof(*v)); @@ -1116,7 +1117,7 @@ atomic64_fetch_inc(atomic64_t *v) #endif #if defined(arch_atomic64_fetch_inc_acquire) -static inline s64 +static __always_inline s64 atomic64_fetch_inc_acquire(atomic64_t *v) { __atomic_check_write(v, sizeof(*v)); @@ -1126,7 +1127,7 @@ atomic64_fetch_inc_acquire(atomic64_t *v) #endif #if defined(arch_atomic64_fetch_inc_release) -static inline s64 +static __always_inline s64 atomic64_fetch_inc_release(atomic64_t *v) { __atomic_check_write(v, sizeof(*v)); @@ -1136,7 +1137,7 @@ atomic64_fetch_inc_release(atomic64_t *v) #endif #if defined(arch_atomic64_fetch_inc_relaxed) -static inline s64 +static __always_inline s64 atomic64_fetch_inc_relaxed(atomic64_t *v) { __atomic_check_write(v, sizeof(*v)); @@ -1146,7 +1147,7 @@ atomic64_fetch_inc_relaxed(atomic64_t *v) #endif #if defined(arch_atomic64_dec) -static inline void +static __always_inline void atomic64_dec(atomic64_t *v) { __atomic_check_write(v, sizeof(*v)); @@ -1156,7 +1157,7 @@ atomic64_dec(atomic64_t *v) #endif #if defined(arch_atomic64_dec_return) -static inline s64 +static __always_inline s64 atomic64_dec_return(atomic64_t *v) { __atomic_check_write(v, sizeof(*v)); @@ -1166,7 +1167,7 @@ atomic64_dec_return(atomic64_t *v) #endif #if defined(arch_atomic64_dec_return_acquire) -static inline s64 +static __always_inline s64 atomic64_dec_return_acquire(atomic64_t *v) { __atomic_check_write(v, sizeof(*v)); @@ -1176,7 +1177,7 @@ atomic64_dec_return_acquire(atomic64_t *v) #endif #if defined(arch_atomic64_dec_return_release) -static inline s64 +static __always_inline s64 atomic64_dec_return_release(atomic64_t *v) { __atomic_check_write(v, sizeof(*v)); @@ -1186,7 +1187,7 @@ atomic64_dec_return_release(atomic64_t *v) #endif #if defined(arch_atomic64_dec_return_relaxed) -static inline s64 +static __always_inline s64 atomic64_dec_return_relaxed(atomic64_t *v) { __atomic_check_write(v, sizeof(*v)); @@ -1196,7 +1197,7 @@ atomic64_dec_return_relaxed(atomic64_t *v) #endif #if defined(arch_atomic64_fetch_dec) -static inline s64 +static __always_inline s64 atomic64_fetch_dec(atomic64_t *v) { __atomic_check_write(v, sizeof(*v)); @@ -1206,7 +1207,7 @@ atomic64_fetch_dec(atomic64_t *v) #endif #if defined(arch_atomic64_fetch_dec_acquire) -static inline s64 +static __always_inline s64 atomic64_fetch_dec_acquire(atomic64_t *v) { __atomic_check_write(v, sizeof(*v)); @@ -1216,7 +1217,7 @@ atomic64_fetch_dec_acquire(atomic64_t *v) #endif #if defined(arch_atomic64_fetch_dec_release) -static inline s64 +static __always_inline s64 atomic64_fetch_dec_release(atomic64_t *v) { __atomic_check_write(v, sizeof(*v)); @@ -1226,7 +1227,7 @@ atomic64_fetch_dec_release(atomic64_t *v) #endif #if defined(arch_atomic64_fetch_dec_relaxed) -static inline s64 +static __always_inline s64 atomic64_fetch_dec_relaxed(atomic64_t *v) { __atomic_check_write(v, sizeof(*v)); @@ -1235,7 +1236,7 @@ atomic64_fetch_dec_relaxed(atomic64_t *v) #define atomic64_fetch_dec_relaxed atomic64_fetch_dec_relaxed #endif -static inline void +static __always_inline void atomic64_and(s64 i, atomic64_t *v) { __atomic_check_write(v, sizeof(*v)); @@ -1244,7 +1245,7 @@ atomic64_and(s64 i, atomic64_t *v) #define atomic64_and atomic64_and #if !defined(arch_atomic64_fetch_and_relaxed) || defined(arch_atomic64_fetch_and) -static inline s64 +static __always_inline s64 atomic64_fetch_and(s64 i, atomic64_t *v) { __atomic_check_write(v, sizeof(*v)); @@ -1254,7 +1255,7 @@ atomic64_fetch_and(s64 i, atomic64_t *v) #endif #if defined(arch_atomic64_fetch_and_acquire) -static inline s64 +static __always_inline s64 atomic64_fetch_and_acquire(s64 i, atomic64_t *v) { __atomic_check_write(v, sizeof(*v)); @@ -1264,7 +1265,7 @@ atomic64_fetch_and_acquire(s64 i, atomic64_t *v) #endif #if defined(arch_atomic64_fetch_and_release) -static inline s64 +static __always_inline s64 atomic64_fetch_and_release(s64 i, atomic64_t *v) { __atomic_check_write(v, sizeof(*v)); @@ -1274,7 +1275,7 @@ atomic64_fetch_and_release(s64 i, atomic64_t *v) #endif #if defined(arch_atomic64_fetch_and_relaxed) -static inline s64 +static __always_inline s64 atomic64_fetch_and_relaxed(s64 i, atomic64_t *v) { __atomic_check_write(v, sizeof(*v)); @@ -1284,7 +1285,7 @@ atomic64_fetch_and_relaxed(s64 i, atomic64_t *v) #endif #if defined(arch_atomic64_andnot) -static inline void +static __always_inline void atomic64_andnot(s64 i, atomic64_t *v) { __atomic_check_write(v, sizeof(*v)); @@ -1294,7 +1295,7 @@ atomic64_andnot(s64 i, atomic64_t *v) #endif #if defined(arch_atomic64_fetch_andnot) -static inline s64 +static __always_inline s64 atomic64_fetch_andnot(s64 i, atomic64_t *v) { __atomic_check_write(v, sizeof(*v)); @@ -1304,7 +1305,7 @@ atomic64_fetch_andnot(s64 i, atomic64_t *v) #endif #if defined(arch_atomic64_fetch_andnot_acquire) -static inline s64 +static __always_inline s64 atomic64_fetch_andnot_acquire(s64 i, atomic64_t *v) { __atomic_check_write(v, sizeof(*v)); @@ -1314,7 +1315,7 @@ atomic64_fetch_andnot_acquire(s64 i, atomic64_t *v) #endif #if defined(arch_atomic64_fetch_andnot_release) -static inline s64 +static __always_inline s64 atomic64_fetch_andnot_release(s64 i, atomic64_t *v) { __atomic_check_write(v, sizeof(*v)); @@ -1324,7 +1325,7 @@ atomic64_fetch_andnot_release(s64 i, atomic64_t *v) #endif #if defined(arch_atomic64_fetch_andnot_relaxed) -static inline s64 +static __always_inline s64 atomic64_fetch_andnot_relaxed(s64 i, atomic64_t *v) { __atomic_check_write(v, sizeof(*v)); @@ -1333,7 +1334,7 @@ atomic64_fetch_andnot_relaxed(s64 i, atomic64_t *v) #define atomic64_fetch_andnot_relaxed atomic64_fetch_andnot_relaxed #endif -static inline void +static __always_inline void atomic64_or(s64 i, atomic64_t *v) { __atomic_check_write(v, sizeof(*v)); @@ -1342,7 +1343,7 @@ atomic64_or(s64 i, atomic64_t *v) #define atomic64_or atomic64_or #if !defined(arch_atomic64_fetch_or_relaxed) || defined(arch_atomic64_fetch_or) -static inline s64 +static __always_inline s64 atomic64_fetch_or(s64 i, atomic64_t *v) { __atomic_check_write(v, sizeof(*v)); @@ -1352,7 +1353,7 @@ atomic64_fetch_or(s64 i, atomic64_t *v) #endif #if defined(arch_atomic64_fetch_or_acquire) -static inline s64 +static __always_inline s64 atomic64_fetch_or_acquire(s64 i, atomic64_t *v) { __atomic_check_write(v, sizeof(*v)); @@ -1362,7 +1363,7 @@ atomic64_fetch_or_acquire(s64 i, atomic64_t *v) #endif #if defined(arch_atomic64_fetch_or_release) -static inline s64 +static __always_inline s64 atomic64_fetch_or_release(s64 i, atomic64_t *v) { __atomic_check_write(v, sizeof(*v)); @@ -1372,7 +1373,7 @@ atomic64_fetch_or_release(s64 i, atomic64_t *v) #endif #if defined(arch_atomic64_fetch_or_relaxed) -static inline s64 +static __always_inline s64 atomic64_fetch_or_relaxed(s64 i, atomic64_t *v) { __atomic_check_write(v, sizeof(*v)); @@ -1381,7 +1382,7 @@ atomic64_fetch_or_relaxed(s64 i, atomic64_t *v) #define atomic64_fetch_or_relaxed atomic64_fetch_or_relaxed #endif -static inline void +static __always_inline void atomic64_xor(s64 i, atomic64_t *v) { __atomic_check_write(v, sizeof(*v)); @@ -1390,7 +1391,7 @@ atomic64_xor(s64 i, atomic64_t *v) #define atomic64_xor atomic64_xor #if !defined(arch_atomic64_fetch_xor_relaxed) || defined(arch_atomic64_fetch_xor) -static inline s64 +static __always_inline s64 atomic64_fetch_xor(s64 i, atomic64_t *v) { __atomic_check_write(v, sizeof(*v)); @@ -1400,7 +1401,7 @@ atomic64_fetch_xor(s64 i, atomic64_t *v) #endif #if defined(arch_atomic64_fetch_xor_acquire) -static inline s64 +static __always_inline s64 atomic64_fetch_xor_acquire(s64 i, atomic64_t *v) { __atomic_check_write(v, sizeof(*v)); @@ -1410,7 +1411,7 @@ atomic64_fetch_xor_acquire(s64 i, atomic64_t *v) #endif #if defined(arch_atomic64_fetch_xor_release) -static inline s64 +static __always_inline s64 atomic64_fetch_xor_release(s64 i, atomic64_t *v) { __atomic_check_write(v, sizeof(*v)); @@ -1420,7 +1421,7 @@ atomic64_fetch_xor_release(s64 i, atomic64_t *v) #endif #if defined(arch_atomic64_fetch_xor_relaxed) -static inline s64 +static __always_inline s64 atomic64_fetch_xor_relaxed(s64 i, atomic64_t *v) { __atomic_check_write(v, sizeof(*v)); @@ -1430,7 +1431,7 @@ atomic64_fetch_xor_relaxed(s64 i, atomic64_t *v) #endif #if !defined(arch_atomic64_xchg_relaxed) || defined(arch_atomic64_xchg) -static inline s64 +static __always_inline s64 atomic64_xchg(atomic64_t *v, s64 i) { __atomic_check_write(v, sizeof(*v)); @@ -1440,7 +1441,7 @@ atomic64_xchg(atomic64_t *v, s64 i) #endif #if defined(arch_atomic64_xchg_acquire) -static inline s64 +static __always_inline s64 atomic64_xchg_acquire(atomic64_t *v, s64 i) { __atomic_check_write(v, sizeof(*v)); @@ -1450,7 +1451,7 @@ atomic64_xchg_acquire(atomic64_t *v, s64 i) #endif #if defined(arch_atomic64_xchg_release) -static inline s64 +static __always_inline s64 atomic64_xchg_release(atomic64_t *v, s64 i) { __atomic_check_write(v, sizeof(*v)); @@ -1460,7 +1461,7 @@ atomic64_xchg_release(atomic64_t *v, s64 i) #endif #if defined(arch_atomic64_xchg_relaxed) -static inline s64 +static __always_inline s64 atomic64_xchg_relaxed(atomic64_t *v, s64 i) { __atomic_check_write(v, sizeof(*v)); @@ -1470,7 +1471,7 @@ atomic64_xchg_relaxed(atomic64_t *v, s64 i) #endif #if !defined(arch_atomic64_cmpxchg_relaxed) || defined(arch_atomic64_cmpxchg) -static inline s64 +static __always_inline s64 atomic64_cmpxchg(atomic64_t *v, s64 old, s64 new) { __atomic_check_write(v, sizeof(*v)); @@ -1480,7 +1481,7 @@ atomic64_cmpxchg(atomic64_t *v, s64 old, s64 new) #endif #if defined(arch_atomic64_cmpxchg_acquire) -static inline s64 +static __always_inline s64 atomic64_cmpxchg_acquire(atomic64_t *v, s64 old, s64 new) { __atomic_check_write(v, sizeof(*v)); @@ -1490,7 +1491,7 @@ atomic64_cmpxchg_acquire(atomic64_t *v, s64 old, s64 new) #endif #if defined(arch_atomic64_cmpxchg_release) -static inline s64 +static __always_inline s64 atomic64_cmpxchg_release(atomic64_t *v, s64 old, s64 new) { __atomic_check_write(v, sizeof(*v)); @@ -1500,7 +1501,7 @@ atomic64_cmpxchg_release(atomic64_t *v, s64 old, s64 new) #endif #if defined(arch_atomic64_cmpxchg_relaxed) -static inline s64 +static __always_inline s64 atomic64_cmpxchg_relaxed(atomic64_t *v, s64 old, s64 new) { __atomic_check_write(v, sizeof(*v)); @@ -1510,7 +1511,7 @@ atomic64_cmpxchg_relaxed(atomic64_t *v, s64 old, s64 new) #endif #if defined(arch_atomic64_try_cmpxchg) -static inline bool +static __always_inline bool atomic64_try_cmpxchg(atomic64_t *v, s64 *old, s64 new) { __atomic_check_write(v, sizeof(*v)); @@ -1521,7 +1522,7 @@ atomic64_try_cmpxchg(atomic64_t *v, s64 *old, s64 new) #endif #if defined(arch_atomic64_try_cmpxchg_acquire) -static inline bool +static __always_inline bool atomic64_try_cmpxchg_acquire(atomic64_t *v, s64 *old, s64 new) { __atomic_check_write(v, sizeof(*v)); @@ -1532,7 +1533,7 @@ atomic64_try_cmpxchg_acquire(atomic64_t *v, s64 *old, s64 new) #endif #if defined(arch_atomic64_try_cmpxchg_release) -static inline bool +static __always_inline bool atomic64_try_cmpxchg_release(atomic64_t *v, s64 *old, s64 new) { __atomic_check_write(v, sizeof(*v)); @@ -1543,7 +1544,7 @@ atomic64_try_cmpxchg_release(atomic64_t *v, s64 *old, s64 new) #endif #if defined(arch_atomic64_try_cmpxchg_relaxed) -static inline bool +static __always_inline bool atomic64_try_cmpxchg_relaxed(atomic64_t *v, s64 *old, s64 new) { __atomic_check_write(v, sizeof(*v)); @@ -1554,7 +1555,7 @@ atomic64_try_cmpxchg_relaxed(atomic64_t *v, s64 *old, s64 new) #endif #if defined(arch_atomic64_sub_and_test) -static inline bool +static __always_inline bool atomic64_sub_and_test(s64 i, atomic64_t *v) { __atomic_check_write(v, sizeof(*v)); @@ -1564,7 +1565,7 @@ atomic64_sub_and_test(s64 i, atomic64_t *v) #endif #if defined(arch_atomic64_dec_and_test) -static inline bool +static __always_inline bool atomic64_dec_and_test(atomic64_t *v) { __atomic_check_write(v, sizeof(*v)); @@ -1574,7 +1575,7 @@ atomic64_dec_and_test(atomic64_t *v) #endif #if defined(arch_atomic64_inc_and_test) -static inline bool +static __always_inline bool atomic64_inc_and_test(atomic64_t *v) { __atomic_check_write(v, sizeof(*v)); @@ -1584,7 +1585,7 @@ atomic64_inc_and_test(atomic64_t *v) #endif #if defined(arch_atomic64_add_negative) -static inline bool +static __always_inline bool atomic64_add_negative(s64 i, atomic64_t *v) { __atomic_check_write(v, sizeof(*v)); @@ -1594,7 +1595,7 @@ atomic64_add_negative(s64 i, atomic64_t *v) #endif #if defined(arch_atomic64_fetch_add_unless) -static inline s64 +static __always_inline s64 atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u) { __atomic_check_write(v, sizeof(*v)); @@ -1604,7 +1605,7 @@ atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u) #endif #if defined(arch_atomic64_add_unless) -static inline bool +static __always_inline bool atomic64_add_unless(atomic64_t *v, s64 a, s64 u) { __atomic_check_write(v, sizeof(*v)); @@ -1614,7 +1615,7 @@ atomic64_add_unless(atomic64_t *v, s64 a, s64 u) #endif #if defined(arch_atomic64_inc_not_zero) -static inline bool +static __always_inline bool atomic64_inc_not_zero(atomic64_t *v) { __atomic_check_write(v, sizeof(*v)); @@ -1624,7 +1625,7 @@ atomic64_inc_not_zero(atomic64_t *v) #endif #if defined(arch_atomic64_inc_unless_negative) -static inline bool +static __always_inline bool atomic64_inc_unless_negative(atomic64_t *v) { __atomic_check_write(v, sizeof(*v)); @@ -1634,7 +1635,7 @@ atomic64_inc_unless_negative(atomic64_t *v) #endif #if defined(arch_atomic64_dec_unless_positive) -static inline bool +static __always_inline bool atomic64_dec_unless_positive(atomic64_t *v) { __atomic_check_write(v, sizeof(*v)); @@ -1644,7 +1645,7 @@ atomic64_dec_unless_positive(atomic64_t *v) #endif #if defined(arch_atomic64_dec_if_positive) -static inline s64 +static __always_inline s64 atomic64_dec_if_positive(atomic64_t *v) { __atomic_check_write(v, sizeof(*v)); @@ -1798,4 +1799,4 @@ atomic64_dec_if_positive(atomic64_t *v) }) #endif /* _ASM_GENERIC_ATOMIC_INSTRUMENTED_H */ -// beea41c2a0f2c69e4958ed71bf26f59740fa4b12 +// 7b7e2af0e75c8ecb6f02298a7075f503f30d244c diff --git a/include/asm-generic/atomic-long.h b/include/asm-generic/atomic-long.h index 881c7e27af284..073cf40f431b8 100644 --- a/include/asm-generic/atomic-long.h +++ b/include/asm-generic/atomic-long.h @@ -6,6 +6,7 @@ #ifndef _ASM_GENERIC_ATOMIC_LONG_H #define _ASM_GENERIC_ATOMIC_LONG_H +#include #include #ifdef CONFIG_64BIT @@ -22,493 +23,493 @@ typedef atomic_t atomic_long_t; #ifdef CONFIG_64BIT -static inline long +static __always_inline long atomic_long_read(const atomic_long_t *v) { return atomic64_read(v); } -static inline long +static __always_inline long atomic_long_read_acquire(const atomic_long_t *v) { return atomic64_read_acquire(v); } -static inline void +static __always_inline void atomic_long_set(atomic_long_t *v, long i) { atomic64_set(v, i); } -static inline void +static __always_inline void atomic_long_set_release(atomic_long_t *v, long i) { atomic64_set_release(v, i); } -static inline void +static __always_inline void atomic_long_add(long i, atomic_long_t *v) { atomic64_add(i, v); } -static inline long +static __always_inline long atomic_long_add_return(long i, atomic_long_t *v) { return atomic64_add_return(i, v); } -static inline long +static __always_inline long atomic_long_add_return_acquire(long i, atomic_long_t *v) { return atomic64_add_return_acquire(i, v); } -static inline long +static __always_inline long atomic_long_add_return_release(long i, atomic_long_t *v) { return atomic64_add_return_release(i, v); } -static inline long +static __always_inline long atomic_long_add_return_relaxed(long i, atomic_long_t *v) { return atomic64_add_return_relaxed(i, v); } -static inline long +static __always_inline long atomic_long_fetch_add(long i, atomic_long_t *v) { return atomic64_fetch_add(i, v); } -static inline long +static __always_inline long atomic_long_fetch_add_acquire(long i, atomic_long_t *v) { return atomic64_fetch_add_acquire(i, v); } -static inline long +static __always_inline long atomic_long_fetch_add_release(long i, atomic_long_t *v) { return atomic64_fetch_add_release(i, v); } -static inline long +static __always_inline long atomic_long_fetch_add_relaxed(long i, atomic_long_t *v) { return atomic64_fetch_add_relaxed(i, v); } -static inline void +static __always_inline void atomic_long_sub(long i, atomic_long_t *v) { atomic64_sub(i, v); } -static inline long +static __always_inline long atomic_long_sub_return(long i, atomic_long_t *v) { return atomic64_sub_return(i, v); } -static inline long +static __always_inline long atomic_long_sub_return_acquire(long i, atomic_long_t *v) { return atomic64_sub_return_acquire(i, v); } -static inline long +static __always_inline long atomic_long_sub_return_release(long i, atomic_long_t *v) { return atomic64_sub_return_release(i, v); } -static inline long +static __always_inline long atomic_long_sub_return_relaxed(long i, atomic_long_t *v) { return atomic64_sub_return_relaxed(i, v); } -static inline long +static __always_inline long atomic_long_fetch_sub(long i, atomic_long_t *v) { return atomic64_fetch_sub(i, v); } -static inline long +static __always_inline long atomic_long_fetch_sub_acquire(long i, atomic_long_t *v) { return atomic64_fetch_sub_acquire(i, v); } -static inline long +static __always_inline long atomic_long_fetch_sub_release(long i, atomic_long_t *v) { return atomic64_fetch_sub_release(i, v); } -static inline long +static __always_inline long atomic_long_fetch_sub_relaxed(long i, atomic_long_t *v) { return atomic64_fetch_sub_relaxed(i, v); } -static inline void +static __always_inline void atomic_long_inc(atomic_long_t *v) { atomic64_inc(v); } -static inline long +static __always_inline long atomic_long_inc_return(atomic_long_t *v) { return atomic64_inc_return(v); } -static inline long +static __always_inline long atomic_long_inc_return_acquire(atomic_long_t *v) { return atomic64_inc_return_acquire(v); } -static inline long +static __always_inline long atomic_long_inc_return_release(atomic_long_t *v) { return atomic64_inc_return_release(v); } -static inline long +static __always_inline long atomic_long_inc_return_relaxed(atomic_long_t *v) { return atomic64_inc_return_relaxed(v); } -static inline long +static __always_inline long atomic_long_fetch_inc(atomic_long_t *v) { return atomic64_fetch_inc(v); } -static inline long +static __always_inline long atomic_long_fetch_inc_acquire(atomic_long_t *v) { return atomic64_fetch_inc_acquire(v); } -static inline long +static __always_inline long atomic_long_fetch_inc_release(atomic_long_t *v) { return atomic64_fetch_inc_release(v); } -static inline long +static __always_inline long atomic_long_fetch_inc_relaxed(atomic_long_t *v) { return atomic64_fetch_inc_relaxed(v); } -static inline void +static __always_inline void atomic_long_dec(atomic_long_t *v) { atomic64_dec(v); } -static inline long +static __always_inline long atomic_long_dec_return(atomic_long_t *v) { return atomic64_dec_return(v); } -static inline long +static __always_inline long atomic_long_dec_return_acquire(atomic_long_t *v) { return atomic64_dec_return_acquire(v); } -static inline long +static __always_inline long atomic_long_dec_return_release(atomic_long_t *v) { return atomic64_dec_return_release(v); } -static inline long +static __always_inline long atomic_long_dec_return_relaxed(atomic_long_t *v) { return atomic64_dec_return_relaxed(v); } -static inline long +static __always_inline long atomic_long_fetch_dec(atomic_long_t *v) { return atomic64_fetch_dec(v); } -static inline long +static __always_inline long atomic_long_fetch_dec_acquire(atomic_long_t *v) { return atomic64_fetch_dec_acquire(v); } -static inline long +static __always_inline long atomic_long_fetch_dec_release(atomic_long_t *v) { return atomic64_fetch_dec_release(v); } -static inline long +static __always_inline long atomic_long_fetch_dec_relaxed(atomic_long_t *v) { return atomic64_fetch_dec_relaxed(v); } -static inline void +static __always_inline void atomic_long_and(long i, atomic_long_t *v) { atomic64_and(i, v); } -static inline long +static __always_inline long atomic_long_fetch_and(long i, atomic_long_t *v) { return atomic64_fetch_and(i, v); } -static inline long +static __always_inline long atomic_long_fetch_and_acquire(long i, atomic_long_t *v) { return atomic64_fetch_and_acquire(i, v); } -static inline long +static __always_inline long atomic_long_fetch_and_release(long i, atomic_long_t *v) { return atomic64_fetch_and_release(i, v); } -static inline long +static __always_inline long atomic_long_fetch_and_relaxed(long i, atomic_long_t *v) { return atomic64_fetch_and_relaxed(i, v); } -static inline void +static __always_inline void atomic_long_andnot(long i, atomic_long_t *v) { atomic64_andnot(i, v); } -static inline long +static __always_inline long atomic_long_fetch_andnot(long i, atomic_long_t *v) { return atomic64_fetch_andnot(i, v); } -static inline long +static __always_inline long atomic_long_fetch_andnot_acquire(long i, atomic_long_t *v) { return atomic64_fetch_andnot_acquire(i, v); } -static inline long +static __always_inline long atomic_long_fetch_andnot_release(long i, atomic_long_t *v) { return atomic64_fetch_andnot_release(i, v); } -static inline long +static __always_inline long atomic_long_fetch_andnot_relaxed(long i, atomic_long_t *v) { return atomic64_fetch_andnot_relaxed(i, v); } -static inline void +static __always_inline void atomic_long_or(long i, atomic_long_t *v) { atomic64_or(i, v); } -static inline long +static __always_inline long atomic_long_fetch_or(long i, atomic_long_t *v) { return atomic64_fetch_or(i, v); } -static inline long +static __always_inline long atomic_long_fetch_or_acquire(long i, atomic_long_t *v) { return atomic64_fetch_or_acquire(i, v); } -static inline long +static __always_inline long atomic_long_fetch_or_release(long i, atomic_long_t *v) { return atomic64_fetch_or_release(i, v); } -static inline long +static __always_inline long atomic_long_fetch_or_relaxed(long i, atomic_long_t *v) { return atomic64_fetch_or_relaxed(i, v); } -static inline void +static __always_inline void atomic_long_xor(long i, atomic_long_t *v) { atomic64_xor(i, v); } -static inline long +static __always_inline long atomic_long_fetch_xor(long i, atomic_long_t *v) { return atomic64_fetch_xor(i, v); } -static inline long +static __always_inline long atomic_long_fetch_xor_acquire(long i, atomic_long_t *v) { return atomic64_fetch_xor_acquire(i, v); } -static inline long +static __always_inline long atomic_long_fetch_xor_release(long i, atomic_long_t *v) { return atomic64_fetch_xor_release(i, v); } -static inline long +static __always_inline long atomic_long_fetch_xor_relaxed(long i, atomic_long_t *v) { return atomic64_fetch_xor_relaxed(i, v); } -static inline long +static __always_inline long atomic_long_xchg(atomic_long_t *v, long i) { return atomic64_xchg(v, i); } -static inline long +static __always_inline long atomic_long_xchg_acquire(atomic_long_t *v, long i) { return atomic64_xchg_acquire(v, i); } -static inline long +static __always_inline long atomic_long_xchg_release(atomic_long_t *v, long i) { return atomic64_xchg_release(v, i); } -static inline long +static __always_inline long atomic_long_xchg_relaxed(atomic_long_t *v, long i) { return atomic64_xchg_relaxed(v, i); } -static inline long +static __always_inline long atomic_long_cmpxchg(atomic_long_t *v, long old, long new) { return atomic64_cmpxchg(v, old, new); } -static inline long +static __always_inline long atomic_long_cmpxchg_acquire(atomic_long_t *v, long old, long new) { return atomic64_cmpxchg_acquire(v, old, new); } -static inline long +static __always_inline long atomic_long_cmpxchg_release(atomic_long_t *v, long old, long new) { return atomic64_cmpxchg_release(v, old, new); } -static inline long +static __always_inline long atomic_long_cmpxchg_relaxed(atomic_long_t *v, long old, long new) { return atomic64_cmpxchg_relaxed(v, old, new); } -static inline bool +static __always_inline bool atomic_long_try_cmpxchg(atomic_long_t *v, long *old, long new) { return atomic64_try_cmpxchg(v, (s64 *)old, new); } -static inline bool +static __always_inline bool atomic_long_try_cmpxchg_acquire(atomic_long_t *v, long *old, long new) { return atomic64_try_cmpxchg_acquire(v, (s64 *)old, new); } -static inline bool +static __always_inline bool atomic_long_try_cmpxchg_release(atomic_long_t *v, long *old, long new) { return atomic64_try_cmpxchg_release(v, (s64 *)old, new); } -static inline bool +static __always_inline bool atomic_long_try_cmpxchg_relaxed(atomic_long_t *v, long *old, long new) { return atomic64_try_cmpxchg_relaxed(v, (s64 *)old, new); } -static inline bool +static __always_inline bool atomic_long_sub_and_test(long i, atomic_long_t *v) { return atomic64_sub_and_test(i, v); } -static inline bool +static __always_inline bool atomic_long_dec_and_test(atomic_long_t *v) { return atomic64_dec_and_test(v); } -static inline bool +static __always_inline bool atomic_long_inc_and_test(atomic_long_t *v) { return atomic64_inc_and_test(v); } -static inline bool +static __always_inline bool atomic_long_add_negative(long i, atomic_long_t *v) { return atomic64_add_negative(i, v); } -static inline long +static __always_inline long atomic_long_fetch_add_unless(atomic_long_t *v, long a, long u) { return atomic64_fetch_add_unless(v, a, u); } -static inline bool +static __always_inline bool atomic_long_add_unless(atomic_long_t *v, long a, long u) { return atomic64_add_unless(v, a, u); } -static inline bool +static __always_inline bool atomic_long_inc_not_zero(atomic_long_t *v) { return atomic64_inc_not_zero(v); } -static inline bool +static __always_inline bool atomic_long_inc_unless_negative(atomic_long_t *v) { return atomic64_inc_unless_negative(v); } -static inline bool +static __always_inline bool atomic_long_dec_unless_positive(atomic_long_t *v) { return atomic64_dec_unless_positive(v); } -static inline long +static __always_inline long atomic_long_dec_if_positive(atomic_long_t *v) { return atomic64_dec_if_positive(v); @@ -516,493 +517,493 @@ atomic_long_dec_if_positive(atomic_long_t *v) #else /* CONFIG_64BIT */ -static inline long +static __always_inline long atomic_long_read(const atomic_long_t *v) { return atomic_read(v); } -static inline long +static __always_inline long atomic_long_read_acquire(const atomic_long_t *v) { return atomic_read_acquire(v); } -static inline void +static __always_inline void atomic_long_set(atomic_long_t *v, long i) { atomic_set(v, i); } -static inline void +static __always_inline void atomic_long_set_release(atomic_long_t *v, long i) { atomic_set_release(v, i); } -static inline void +static __always_inline void atomic_long_add(long i, atomic_long_t *v) { atomic_add(i, v); } -static inline long +static __always_inline long atomic_long_add_return(long i, atomic_long_t *v) { return atomic_add_return(i, v); } -static inline long +static __always_inline long atomic_long_add_return_acquire(long i, atomic_long_t *v) { return atomic_add_return_acquire(i, v); } -static inline long +static __always_inline long atomic_long_add_return_release(long i, atomic_long_t *v) { return atomic_add_return_release(i, v); } -static inline long +static __always_inline long atomic_long_add_return_relaxed(long i, atomic_long_t *v) { return atomic_add_return_relaxed(i, v); } -static inline long +static __always_inline long atomic_long_fetch_add(long i, atomic_long_t *v) { return atomic_fetch_add(i, v); } -static inline long +static __always_inline long atomic_long_fetch_add_acquire(long i, atomic_long_t *v) { return atomic_fetch_add_acquire(i, v); } -static inline long +static __always_inline long atomic_long_fetch_add_release(long i, atomic_long_t *v) { return atomic_fetch_add_release(i, v); } -static inline long +static __always_inline long atomic_long_fetch_add_relaxed(long i, atomic_long_t *v) { return atomic_fetch_add_relaxed(i, v); } -static inline void +static __always_inline void atomic_long_sub(long i, atomic_long_t *v) { atomic_sub(i, v); } -static inline long +static __always_inline long atomic_long_sub_return(long i, atomic_long_t *v) { return atomic_sub_return(i, v); } -static inline long +static __always_inline long atomic_long_sub_return_acquire(long i, atomic_long_t *v) { return atomic_sub_return_acquire(i, v); } -static inline long +static __always_inline long atomic_long_sub_return_release(long i, atomic_long_t *v) { return atomic_sub_return_release(i, v); } -static inline long +static __always_inline long atomic_long_sub_return_relaxed(long i, atomic_long_t *v) { return atomic_sub_return_relaxed(i, v); } -static inline long +static __always_inline long atomic_long_fetch_sub(long i, atomic_long_t *v) { return atomic_fetch_sub(i, v); } -static inline long +static __always_inline long atomic_long_fetch_sub_acquire(long i, atomic_long_t *v) { return atomic_fetch_sub_acquire(i, v); } -static inline long +static __always_inline long atomic_long_fetch_sub_release(long i, atomic_long_t *v) { return atomic_fetch_sub_release(i, v); } -static inline long +static __always_inline long atomic_long_fetch_sub_relaxed(long i, atomic_long_t *v) { return atomic_fetch_sub_relaxed(i, v); } -static inline void +static __always_inline void atomic_long_inc(atomic_long_t *v) { atomic_inc(v); } -static inline long +static __always_inline long atomic_long_inc_return(atomic_long_t *v) { return atomic_inc_return(v); } -static inline long +static __always_inline long atomic_long_inc_return_acquire(atomic_long_t *v) { return atomic_inc_return_acquire(v); } -static inline long +static __always_inline long atomic_long_inc_return_release(atomic_long_t *v) { return atomic_inc_return_release(v); } -static inline long +static __always_inline long atomic_long_inc_return_relaxed(atomic_long_t *v) { return atomic_inc_return_relaxed(v); } -static inline long +static __always_inline long atomic_long_fetch_inc(atomic_long_t *v) { return atomic_fetch_inc(v); } -static inline long +static __always_inline long atomic_long_fetch_inc_acquire(atomic_long_t *v) { return atomic_fetch_inc_acquire(v); } -static inline long +static __always_inline long atomic_long_fetch_inc_release(atomic_long_t *v) { return atomic_fetch_inc_release(v); } -static inline long +static __always_inline long atomic_long_fetch_inc_relaxed(atomic_long_t *v) { return atomic_fetch_inc_relaxed(v); } -static inline void +static __always_inline void atomic_long_dec(atomic_long_t *v) { atomic_dec(v); } -static inline long +static __always_inline long atomic_long_dec_return(atomic_long_t *v) { return atomic_dec_return(v); } -static inline long +static __always_inline long atomic_long_dec_return_acquire(atomic_long_t *v) { return atomic_dec_return_acquire(v); } -static inline long +static __always_inline long atomic_long_dec_return_release(atomic_long_t *v) { return atomic_dec_return_release(v); } -static inline long +static __always_inline long atomic_long_dec_return_relaxed(atomic_long_t *v) { return atomic_dec_return_relaxed(v); } -static inline long +static __always_inline long atomic_long_fetch_dec(atomic_long_t *v) { return atomic_fetch_dec(v); } -static inline long +static __always_inline long atomic_long_fetch_dec_acquire(atomic_long_t *v) { return atomic_fetch_dec_acquire(v); } -static inline long +static __always_inline long atomic_long_fetch_dec_release(atomic_long_t *v) { return atomic_fetch_dec_release(v); } -static inline long +static __always_inline long atomic_long_fetch_dec_relaxed(atomic_long_t *v) { return atomic_fetch_dec_relaxed(v); } -static inline void +static __always_inline void atomic_long_and(long i, atomic_long_t *v) { atomic_and(i, v); } -static inline long +static __always_inline long atomic_long_fetch_and(long i, atomic_long_t *v) { return atomic_fetch_and(i, v); } -static inline long +static __always_inline long atomic_long_fetch_and_acquire(long i, atomic_long_t *v) { return atomic_fetch_and_acquire(i, v); } -static inline long +static __always_inline long atomic_long_fetch_and_release(long i, atomic_long_t *v) { return atomic_fetch_and_release(i, v); } -static inline long +static __always_inline long atomic_long_fetch_and_relaxed(long i, atomic_long_t *v) { return atomic_fetch_and_relaxed(i, v); } -static inline void +static __always_inline void atomic_long_andnot(long i, atomic_long_t *v) { atomic_andnot(i, v); } -static inline long +static __always_inline long atomic_long_fetch_andnot(long i, atomic_long_t *v) { return atomic_fetch_andnot(i, v); } -static inline long +static __always_inline long atomic_long_fetch_andnot_acquire(long i, atomic_long_t *v) { return atomic_fetch_andnot_acquire(i, v); } -static inline long +static __always_inline long atomic_long_fetch_andnot_release(long i, atomic_long_t *v) { return atomic_fetch_andnot_release(i, v); } -static inline long +static __always_inline long atomic_long_fetch_andnot_relaxed(long i, atomic_long_t *v) { return atomic_fetch_andnot_relaxed(i, v); } -static inline void +static __always_inline void atomic_long_or(long i, atomic_long_t *v) { atomic_or(i, v); } -static inline long +static __always_inline long atomic_long_fetch_or(long i, atomic_long_t *v) { return atomic_fetch_or(i, v); } -static inline long +static __always_inline long atomic_long_fetch_or_acquire(long i, atomic_long_t *v) { return atomic_fetch_or_acquire(i, v); } -static inline long +static __always_inline long atomic_long_fetch_or_release(long i, atomic_long_t *v) { return atomic_fetch_or_release(i, v); } -static inline long +static __always_inline long atomic_long_fetch_or_relaxed(long i, atomic_long_t *v) { return atomic_fetch_or_relaxed(i, v); } -static inline void +static __always_inline void atomic_long_xor(long i, atomic_long_t *v) { atomic_xor(i, v); } -static inline long +static __always_inline long atomic_long_fetch_xor(long i, atomic_long_t *v) { return atomic_fetch_xor(i, v); } -static inline long +static __always_inline long atomic_long_fetch_xor_acquire(long i, atomic_long_t *v) { return atomic_fetch_xor_acquire(i, v); } -static inline long +static __always_inline long atomic_long_fetch_xor_release(long i, atomic_long_t *v) { return atomic_fetch_xor_release(i, v); } -static inline long +static __always_inline long atomic_long_fetch_xor_relaxed(long i, atomic_long_t *v) { return atomic_fetch_xor_relaxed(i, v); } -static inline long +static __always_inline long atomic_long_xchg(atomic_long_t *v, long i) { return atomic_xchg(v, i); } -static inline long +static __always_inline long atomic_long_xchg_acquire(atomic_long_t *v, long i) { return atomic_xchg_acquire(v, i); } -static inline long +static __always_inline long atomic_long_xchg_release(atomic_long_t *v, long i) { return atomic_xchg_release(v, i); } -static inline long +static __always_inline long atomic_long_xchg_relaxed(atomic_long_t *v, long i) { return atomic_xchg_relaxed(v, i); } -static inline long +static __always_inline long atomic_long_cmpxchg(atomic_long_t *v, long old, long new) { return atomic_cmpxchg(v, old, new); } -static inline long +static __always_inline long atomic_long_cmpxchg_acquire(atomic_long_t *v, long old, long new) { return atomic_cmpxchg_acquire(v, old, new); } -static inline long +static __always_inline long atomic_long_cmpxchg_release(atomic_long_t *v, long old, long new) { return atomic_cmpxchg_release(v, old, new); } -static inline long +static __always_inline long atomic_long_cmpxchg_relaxed(atomic_long_t *v, long old, long new) { return atomic_cmpxchg_relaxed(v, old, new); } -static inline bool +static __always_inline bool atomic_long_try_cmpxchg(atomic_long_t *v, long *old, long new) { return atomic_try_cmpxchg(v, (int *)old, new); } -static inline bool +static __always_inline bool atomic_long_try_cmpxchg_acquire(atomic_long_t *v, long *old, long new) { return atomic_try_cmpxchg_acquire(v, (int *)old, new); } -static inline bool +static __always_inline bool atomic_long_try_cmpxchg_release(atomic_long_t *v, long *old, long new) { return atomic_try_cmpxchg_release(v, (int *)old, new); } -static inline bool +static __always_inline bool atomic_long_try_cmpxchg_relaxed(atomic_long_t *v, long *old, long new) { return atomic_try_cmpxchg_relaxed(v, (int *)old, new); } -static inline bool +static __always_inline bool atomic_long_sub_and_test(long i, atomic_long_t *v) { return atomic_sub_and_test(i, v); } -static inline bool +static __always_inline bool atomic_long_dec_and_test(atomic_long_t *v) { return atomic_dec_and_test(v); } -static inline bool +static __always_inline bool atomic_long_inc_and_test(atomic_long_t *v) { return atomic_inc_and_test(v); } -static inline bool +static __always_inline bool atomic_long_add_negative(long i, atomic_long_t *v) { return atomic_add_negative(i, v); } -static inline long +static __always_inline long atomic_long_fetch_add_unless(atomic_long_t *v, long a, long u) { return atomic_fetch_add_unless(v, a, u); } -static inline bool +static __always_inline bool atomic_long_add_unless(atomic_long_t *v, long a, long u) { return atomic_add_unless(v, a, u); } -static inline bool +static __always_inline bool atomic_long_inc_not_zero(atomic_long_t *v) { return atomic_inc_not_zero(v); } -static inline bool +static __always_inline bool atomic_long_inc_unless_negative(atomic_long_t *v) { return atomic_inc_unless_negative(v); } -static inline bool +static __always_inline bool atomic_long_dec_unless_positive(atomic_long_t *v) { return atomic_dec_unless_positive(v); } -static inline long +static __always_inline long atomic_long_dec_if_positive(atomic_long_t *v) { return atomic_dec_if_positive(v); @@ -1010,4 +1011,4 @@ atomic_long_dec_if_positive(atomic_long_t *v) #endif /* CONFIG_64BIT */ #endif /* _ASM_GENERIC_ATOMIC_LONG_H */ -// 77558968132ce4f911ad53f6f52ce423006f6268 +// a624200981f552b2c6be4f32fe44da8289f30d87 diff --git a/scripts/atomic/gen-atomic-instrumented.sh b/scripts/atomic/gen-atomic-instrumented.sh index 8b8b2a6f8d682..fb4222548b22e 100755 --- a/scripts/atomic/gen-atomic-instrumented.sh +++ b/scripts/atomic/gen-atomic-instrumented.sh @@ -84,7 +84,7 @@ gen_proto_order_variant() [ ! -z "${guard}" ] && printf "#if ${guard}\n" cat < +#include #include #include -static inline void __atomic_check_read(const volatile void *v, size_t size) +static __always_inline void __atomic_check_read(const volatile void *v, size_t size) { kasan_check_read(v, size); kcsan_check_atomic_read(v, size); } -static inline void __atomic_check_write(const volatile void *v, size_t size) +static __always_inline void __atomic_check_write(const volatile void *v, size_t size) { kasan_check_write(v, size); kcsan_check_atomic_write(v, size); diff --git a/scripts/atomic/gen-atomic-long.sh b/scripts/atomic/gen-atomic-long.sh index c240a7231b2ef..e318d3f92e53b 100755 --- a/scripts/atomic/gen-atomic-long.sh +++ b/scripts/atomic/gen-atomic-long.sh @@ -46,7 +46,7 @@ gen_proto_order_variant() local retstmt="$(gen_ret_stmt "${meta}")" cat < #include #ifdef CONFIG_64BIT -- GitLab From 944bc9cca7c392879fa2c3f911bbef7422707679 Mon Sep 17 00:00:00 2001 From: Marco Elver Date: Tue, 26 Nov 2019 15:04:05 +0100 Subject: [PATCH 0013/1971] asm-generic/atomic: Use __always_inline for fallback wrappers Use __always_inline for atomic fallback wrappers. When building for size (CC_OPTIMIZE_FOR_SIZE), some compilers appear to be less inclined to inline even relatively small static inline functions that are assumed to be inlinable such as atomic ops. This can cause problems, for example in UACCESS regions. While the fallback wrappers aren't pure wrappers, they are trivial nonetheless, and the function they wrap should determine the final inlining policy. For x86 tinyconfig we observe: - vmlinux baseline: 1315988 - vmlinux with patch: 1315928 (-60 bytes) Suggested-by: Mark Rutland Signed-off-by: Marco Elver Acked-by: Mark Rutland Signed-off-by: Paul E. McKenney --- include/linux/atomic-fallback.h | 340 ++++++++++--------- scripts/atomic/fallbacks/acquire | 2 +- scripts/atomic/fallbacks/add_negative | 2 +- scripts/atomic/fallbacks/add_unless | 2 +- scripts/atomic/fallbacks/andnot | 2 +- scripts/atomic/fallbacks/dec | 2 +- scripts/atomic/fallbacks/dec_and_test | 2 +- scripts/atomic/fallbacks/dec_if_positive | 2 +- scripts/atomic/fallbacks/dec_unless_positive | 2 +- scripts/atomic/fallbacks/fence | 2 +- scripts/atomic/fallbacks/fetch_add_unless | 2 +- scripts/atomic/fallbacks/inc | 2 +- scripts/atomic/fallbacks/inc_and_test | 2 +- scripts/atomic/fallbacks/inc_not_zero | 2 +- scripts/atomic/fallbacks/inc_unless_negative | 2 +- scripts/atomic/fallbacks/read_acquire | 2 +- scripts/atomic/fallbacks/release | 2 +- scripts/atomic/fallbacks/set_release | 2 +- scripts/atomic/fallbacks/sub_and_test | 2 +- scripts/atomic/fallbacks/try_cmpxchg | 2 +- scripts/atomic/gen-atomic-fallback.sh | 2 + 21 files changed, 192 insertions(+), 188 deletions(-) diff --git a/include/linux/atomic-fallback.h b/include/linux/atomic-fallback.h index a7d240e465c07..656b5489b6737 100644 --- a/include/linux/atomic-fallback.h +++ b/include/linux/atomic-fallback.h @@ -6,6 +6,8 @@ #ifndef _LINUX_ATOMIC_FALLBACK_H #define _LINUX_ATOMIC_FALLBACK_H +#include + #ifndef xchg_relaxed #define xchg_relaxed xchg #define xchg_acquire xchg @@ -76,7 +78,7 @@ #endif /* cmpxchg64_relaxed */ #ifndef atomic_read_acquire -static inline int +static __always_inline int atomic_read_acquire(const atomic_t *v) { return smp_load_acquire(&(v)->counter); @@ -85,7 +87,7 @@ atomic_read_acquire(const atomic_t *v) #endif #ifndef atomic_set_release -static inline void +static __always_inline void atomic_set_release(atomic_t *v, int i) { smp_store_release(&(v)->counter, i); @@ -100,7 +102,7 @@ atomic_set_release(atomic_t *v, int i) #else /* atomic_add_return_relaxed */ #ifndef atomic_add_return_acquire -static inline int +static __always_inline int atomic_add_return_acquire(int i, atomic_t *v) { int ret = atomic_add_return_relaxed(i, v); @@ -111,7 +113,7 @@ atomic_add_return_acquire(int i, atomic_t *v) #endif #ifndef atomic_add_return_release -static inline int +static __always_inline int atomic_add_return_release(int i, atomic_t *v) { __atomic_release_fence(); @@ -121,7 +123,7 @@ atomic_add_return_release(int i, atomic_t *v) #endif #ifndef atomic_add_return -static inline int +static __always_inline int atomic_add_return(int i, atomic_t *v) { int ret; @@ -142,7 +144,7 @@ atomic_add_return(int i, atomic_t *v) #else /* atomic_fetch_add_relaxed */ #ifndef atomic_fetch_add_acquire -static inline int +static __always_inline int atomic_fetch_add_acquire(int i, atomic_t *v) { int ret = atomic_fetch_add_relaxed(i, v); @@ -153,7 +155,7 @@ atomic_fetch_add_acquire(int i, atomic_t *v) #endif #ifndef atomic_fetch_add_release -static inline int +static __always_inline int atomic_fetch_add_release(int i, atomic_t *v) { __atomic_release_fence(); @@ -163,7 +165,7 @@ atomic_fetch_add_release(int i, atomic_t *v) #endif #ifndef atomic_fetch_add -static inline int +static __always_inline int atomic_fetch_add(int i, atomic_t *v) { int ret; @@ -184,7 +186,7 @@ atomic_fetch_add(int i, atomic_t *v) #else /* atomic_sub_return_relaxed */ #ifndef atomic_sub_return_acquire -static inline int +static __always_inline int atomic_sub_return_acquire(int i, atomic_t *v) { int ret = atomic_sub_return_relaxed(i, v); @@ -195,7 +197,7 @@ atomic_sub_return_acquire(int i, atomic_t *v) #endif #ifndef atomic_sub_return_release -static inline int +static __always_inline int atomic_sub_return_release(int i, atomic_t *v) { __atomic_release_fence(); @@ -205,7 +207,7 @@ atomic_sub_return_release(int i, atomic_t *v) #endif #ifndef atomic_sub_return -static inline int +static __always_inline int atomic_sub_return(int i, atomic_t *v) { int ret; @@ -226,7 +228,7 @@ atomic_sub_return(int i, atomic_t *v) #else /* atomic_fetch_sub_relaxed */ #ifndef atomic_fetch_sub_acquire -static inline int +static __always_inline int atomic_fetch_sub_acquire(int i, atomic_t *v) { int ret = atomic_fetch_sub_relaxed(i, v); @@ -237,7 +239,7 @@ atomic_fetch_sub_acquire(int i, atomic_t *v) #endif #ifndef atomic_fetch_sub_release -static inline int +static __always_inline int atomic_fetch_sub_release(int i, atomic_t *v) { __atomic_release_fence(); @@ -247,7 +249,7 @@ atomic_fetch_sub_release(int i, atomic_t *v) #endif #ifndef atomic_fetch_sub -static inline int +static __always_inline int atomic_fetch_sub(int i, atomic_t *v) { int ret; @@ -262,7 +264,7 @@ atomic_fetch_sub(int i, atomic_t *v) #endif /* atomic_fetch_sub_relaxed */ #ifndef atomic_inc -static inline void +static __always_inline void atomic_inc(atomic_t *v) { atomic_add(1, v); @@ -278,7 +280,7 @@ atomic_inc(atomic_t *v) #endif /* atomic_inc_return */ #ifndef atomic_inc_return -static inline int +static __always_inline int atomic_inc_return(atomic_t *v) { return atomic_add_return(1, v); @@ -287,7 +289,7 @@ atomic_inc_return(atomic_t *v) #endif #ifndef atomic_inc_return_acquire -static inline int +static __always_inline int atomic_inc_return_acquire(atomic_t *v) { return atomic_add_return_acquire(1, v); @@ -296,7 +298,7 @@ atomic_inc_return_acquire(atomic_t *v) #endif #ifndef atomic_inc_return_release -static inline int +static __always_inline int atomic_inc_return_release(atomic_t *v) { return atomic_add_return_release(1, v); @@ -305,7 +307,7 @@ atomic_inc_return_release(atomic_t *v) #endif #ifndef atomic_inc_return_relaxed -static inline int +static __always_inline int atomic_inc_return_relaxed(atomic_t *v) { return atomic_add_return_relaxed(1, v); @@ -316,7 +318,7 @@ atomic_inc_return_relaxed(atomic_t *v) #else /* atomic_inc_return_relaxed */ #ifndef atomic_inc_return_acquire -static inline int +static __always_inline int atomic_inc_return_acquire(atomic_t *v) { int ret = atomic_inc_return_relaxed(v); @@ -327,7 +329,7 @@ atomic_inc_return_acquire(atomic_t *v) #endif #ifndef atomic_inc_return_release -static inline int +static __always_inline int atomic_inc_return_release(atomic_t *v) { __atomic_release_fence(); @@ -337,7 +339,7 @@ atomic_inc_return_release(atomic_t *v) #endif #ifndef atomic_inc_return -static inline int +static __always_inline int atomic_inc_return(atomic_t *v) { int ret; @@ -359,7 +361,7 @@ atomic_inc_return(atomic_t *v) #endif /* atomic_fetch_inc */ #ifndef atomic_fetch_inc -static inline int +static __always_inline int atomic_fetch_inc(atomic_t *v) { return atomic_fetch_add(1, v); @@ -368,7 +370,7 @@ atomic_fetch_inc(atomic_t *v) #endif #ifndef atomic_fetch_inc_acquire -static inline int +static __always_inline int atomic_fetch_inc_acquire(atomic_t *v) { return atomic_fetch_add_acquire(1, v); @@ -377,7 +379,7 @@ atomic_fetch_inc_acquire(atomic_t *v) #endif #ifndef atomic_fetch_inc_release -static inline int +static __always_inline int atomic_fetch_inc_release(atomic_t *v) { return atomic_fetch_add_release(1, v); @@ -386,7 +388,7 @@ atomic_fetch_inc_release(atomic_t *v) #endif #ifndef atomic_fetch_inc_relaxed -static inline int +static __always_inline int atomic_fetch_inc_relaxed(atomic_t *v) { return atomic_fetch_add_relaxed(1, v); @@ -397,7 +399,7 @@ atomic_fetch_inc_relaxed(atomic_t *v) #else /* atomic_fetch_inc_relaxed */ #ifndef atomic_fetch_inc_acquire -static inline int +static __always_inline int atomic_fetch_inc_acquire(atomic_t *v) { int ret = atomic_fetch_inc_relaxed(v); @@ -408,7 +410,7 @@ atomic_fetch_inc_acquire(atomic_t *v) #endif #ifndef atomic_fetch_inc_release -static inline int +static __always_inline int atomic_fetch_inc_release(atomic_t *v) { __atomic_release_fence(); @@ -418,7 +420,7 @@ atomic_fetch_inc_release(atomic_t *v) #endif #ifndef atomic_fetch_inc -static inline int +static __always_inline int atomic_fetch_inc(atomic_t *v) { int ret; @@ -433,7 +435,7 @@ atomic_fetch_inc(atomic_t *v) #endif /* atomic_fetch_inc_relaxed */ #ifndef atomic_dec -static inline void +static __always_inline void atomic_dec(atomic_t *v) { atomic_sub(1, v); @@ -449,7 +451,7 @@ atomic_dec(atomic_t *v) #endif /* atomic_dec_return */ #ifndef atomic_dec_return -static inline int +static __always_inline int atomic_dec_return(atomic_t *v) { return atomic_sub_return(1, v); @@ -458,7 +460,7 @@ atomic_dec_return(atomic_t *v) #endif #ifndef atomic_dec_return_acquire -static inline int +static __always_inline int atomic_dec_return_acquire(atomic_t *v) { return atomic_sub_return_acquire(1, v); @@ -467,7 +469,7 @@ atomic_dec_return_acquire(atomic_t *v) #endif #ifndef atomic_dec_return_release -static inline int +static __always_inline int atomic_dec_return_release(atomic_t *v) { return atomic_sub_return_release(1, v); @@ -476,7 +478,7 @@ atomic_dec_return_release(atomic_t *v) #endif #ifndef atomic_dec_return_relaxed -static inline int +static __always_inline int atomic_dec_return_relaxed(atomic_t *v) { return atomic_sub_return_relaxed(1, v); @@ -487,7 +489,7 @@ atomic_dec_return_relaxed(atomic_t *v) #else /* atomic_dec_return_relaxed */ #ifndef atomic_dec_return_acquire -static inline int +static __always_inline int atomic_dec_return_acquire(atomic_t *v) { int ret = atomic_dec_return_relaxed(v); @@ -498,7 +500,7 @@ atomic_dec_return_acquire(atomic_t *v) #endif #ifndef atomic_dec_return_release -static inline int +static __always_inline int atomic_dec_return_release(atomic_t *v) { __atomic_release_fence(); @@ -508,7 +510,7 @@ atomic_dec_return_release(atomic_t *v) #endif #ifndef atomic_dec_return -static inline int +static __always_inline int atomic_dec_return(atomic_t *v) { int ret; @@ -530,7 +532,7 @@ atomic_dec_return(atomic_t *v) #endif /* atomic_fetch_dec */ #ifndef atomic_fetch_dec -static inline int +static __always_inline int atomic_fetch_dec(atomic_t *v) { return atomic_fetch_sub(1, v); @@ -539,7 +541,7 @@ atomic_fetch_dec(atomic_t *v) #endif #ifndef atomic_fetch_dec_acquire -static inline int +static __always_inline int atomic_fetch_dec_acquire(atomic_t *v) { return atomic_fetch_sub_acquire(1, v); @@ -548,7 +550,7 @@ atomic_fetch_dec_acquire(atomic_t *v) #endif #ifndef atomic_fetch_dec_release -static inline int +static __always_inline int atomic_fetch_dec_release(atomic_t *v) { return atomic_fetch_sub_release(1, v); @@ -557,7 +559,7 @@ atomic_fetch_dec_release(atomic_t *v) #endif #ifndef atomic_fetch_dec_relaxed -static inline int +static __always_inline int atomic_fetch_dec_relaxed(atomic_t *v) { return atomic_fetch_sub_relaxed(1, v); @@ -568,7 +570,7 @@ atomic_fetch_dec_relaxed(atomic_t *v) #else /* atomic_fetch_dec_relaxed */ #ifndef atomic_fetch_dec_acquire -static inline int +static __always_inline int atomic_fetch_dec_acquire(atomic_t *v) { int ret = atomic_fetch_dec_relaxed(v); @@ -579,7 +581,7 @@ atomic_fetch_dec_acquire(atomic_t *v) #endif #ifndef atomic_fetch_dec_release -static inline int +static __always_inline int atomic_fetch_dec_release(atomic_t *v) { __atomic_release_fence(); @@ -589,7 +591,7 @@ atomic_fetch_dec_release(atomic_t *v) #endif #ifndef atomic_fetch_dec -static inline int +static __always_inline int atomic_fetch_dec(atomic_t *v) { int ret; @@ -610,7 +612,7 @@ atomic_fetch_dec(atomic_t *v) #else /* atomic_fetch_and_relaxed */ #ifndef atomic_fetch_and_acquire -static inline int +static __always_inline int atomic_fetch_and_acquire(int i, atomic_t *v) { int ret = atomic_fetch_and_relaxed(i, v); @@ -621,7 +623,7 @@ atomic_fetch_and_acquire(int i, atomic_t *v) #endif #ifndef atomic_fetch_and_release -static inline int +static __always_inline int atomic_fetch_and_release(int i, atomic_t *v) { __atomic_release_fence(); @@ -631,7 +633,7 @@ atomic_fetch_and_release(int i, atomic_t *v) #endif #ifndef atomic_fetch_and -static inline int +static __always_inline int atomic_fetch_and(int i, atomic_t *v) { int ret; @@ -646,7 +648,7 @@ atomic_fetch_and(int i, atomic_t *v) #endif /* atomic_fetch_and_relaxed */ #ifndef atomic_andnot -static inline void +static __always_inline void atomic_andnot(int i, atomic_t *v) { atomic_and(~i, v); @@ -662,7 +664,7 @@ atomic_andnot(int i, atomic_t *v) #endif /* atomic_fetch_andnot */ #ifndef atomic_fetch_andnot -static inline int +static __always_inline int atomic_fetch_andnot(int i, atomic_t *v) { return atomic_fetch_and(~i, v); @@ -671,7 +673,7 @@ atomic_fetch_andnot(int i, atomic_t *v) #endif #ifndef atomic_fetch_andnot_acquire -static inline int +static __always_inline int atomic_fetch_andnot_acquire(int i, atomic_t *v) { return atomic_fetch_and_acquire(~i, v); @@ -680,7 +682,7 @@ atomic_fetch_andnot_acquire(int i, atomic_t *v) #endif #ifndef atomic_fetch_andnot_release -static inline int +static __always_inline int atomic_fetch_andnot_release(int i, atomic_t *v) { return atomic_fetch_and_release(~i, v); @@ -689,7 +691,7 @@ atomic_fetch_andnot_release(int i, atomic_t *v) #endif #ifndef atomic_fetch_andnot_relaxed -static inline int +static __always_inline int atomic_fetch_andnot_relaxed(int i, atomic_t *v) { return atomic_fetch_and_relaxed(~i, v); @@ -700,7 +702,7 @@ atomic_fetch_andnot_relaxed(int i, atomic_t *v) #else /* atomic_fetch_andnot_relaxed */ #ifndef atomic_fetch_andnot_acquire -static inline int +static __always_inline int atomic_fetch_andnot_acquire(int i, atomic_t *v) { int ret = atomic_fetch_andnot_relaxed(i, v); @@ -711,7 +713,7 @@ atomic_fetch_andnot_acquire(int i, atomic_t *v) #endif #ifndef atomic_fetch_andnot_release -static inline int +static __always_inline int atomic_fetch_andnot_release(int i, atomic_t *v) { __atomic_release_fence(); @@ -721,7 +723,7 @@ atomic_fetch_andnot_release(int i, atomic_t *v) #endif #ifndef atomic_fetch_andnot -static inline int +static __always_inline int atomic_fetch_andnot(int i, atomic_t *v) { int ret; @@ -742,7 +744,7 @@ atomic_fetch_andnot(int i, atomic_t *v) #else /* atomic_fetch_or_relaxed */ #ifndef atomic_fetch_or_acquire -static inline int +static __always_inline int atomic_fetch_or_acquire(int i, atomic_t *v) { int ret = atomic_fetch_or_relaxed(i, v); @@ -753,7 +755,7 @@ atomic_fetch_or_acquire(int i, atomic_t *v) #endif #ifndef atomic_fetch_or_release -static inline int +static __always_inline int atomic_fetch_or_release(int i, atomic_t *v) { __atomic_release_fence(); @@ -763,7 +765,7 @@ atomic_fetch_or_release(int i, atomic_t *v) #endif #ifndef atomic_fetch_or -static inline int +static __always_inline int atomic_fetch_or(int i, atomic_t *v) { int ret; @@ -784,7 +786,7 @@ atomic_fetch_or(int i, atomic_t *v) #else /* atomic_fetch_xor_relaxed */ #ifndef atomic_fetch_xor_acquire -static inline int +static __always_inline int atomic_fetch_xor_acquire(int i, atomic_t *v) { int ret = atomic_fetch_xor_relaxed(i, v); @@ -795,7 +797,7 @@ atomic_fetch_xor_acquire(int i, atomic_t *v) #endif #ifndef atomic_fetch_xor_release -static inline int +static __always_inline int atomic_fetch_xor_release(int i, atomic_t *v) { __atomic_release_fence(); @@ -805,7 +807,7 @@ atomic_fetch_xor_release(int i, atomic_t *v) #endif #ifndef atomic_fetch_xor -static inline int +static __always_inline int atomic_fetch_xor(int i, atomic_t *v) { int ret; @@ -826,7 +828,7 @@ atomic_fetch_xor(int i, atomic_t *v) #else /* atomic_xchg_relaxed */ #ifndef atomic_xchg_acquire -static inline int +static __always_inline int atomic_xchg_acquire(atomic_t *v, int i) { int ret = atomic_xchg_relaxed(v, i); @@ -837,7 +839,7 @@ atomic_xchg_acquire(atomic_t *v, int i) #endif #ifndef atomic_xchg_release -static inline int +static __always_inline int atomic_xchg_release(atomic_t *v, int i) { __atomic_release_fence(); @@ -847,7 +849,7 @@ atomic_xchg_release(atomic_t *v, int i) #endif #ifndef atomic_xchg -static inline int +static __always_inline int atomic_xchg(atomic_t *v, int i) { int ret; @@ -868,7 +870,7 @@ atomic_xchg(atomic_t *v, int i) #else /* atomic_cmpxchg_relaxed */ #ifndef atomic_cmpxchg_acquire -static inline int +static __always_inline int atomic_cmpxchg_acquire(atomic_t *v, int old, int new) { int ret = atomic_cmpxchg_relaxed(v, old, new); @@ -879,7 +881,7 @@ atomic_cmpxchg_acquire(atomic_t *v, int old, int new) #endif #ifndef atomic_cmpxchg_release -static inline int +static __always_inline int atomic_cmpxchg_release(atomic_t *v, int old, int new) { __atomic_release_fence(); @@ -889,7 +891,7 @@ atomic_cmpxchg_release(atomic_t *v, int old, int new) #endif #ifndef atomic_cmpxchg -static inline int +static __always_inline int atomic_cmpxchg(atomic_t *v, int old, int new) { int ret; @@ -911,7 +913,7 @@ atomic_cmpxchg(atomic_t *v, int old, int new) #endif /* atomic_try_cmpxchg */ #ifndef atomic_try_cmpxchg -static inline bool +static __always_inline bool atomic_try_cmpxchg(atomic_t *v, int *old, int new) { int r, o = *old; @@ -924,7 +926,7 @@ atomic_try_cmpxchg(atomic_t *v, int *old, int new) #endif #ifndef atomic_try_cmpxchg_acquire -static inline bool +static __always_inline bool atomic_try_cmpxchg_acquire(atomic_t *v, int *old, int new) { int r, o = *old; @@ -937,7 +939,7 @@ atomic_try_cmpxchg_acquire(atomic_t *v, int *old, int new) #endif #ifndef atomic_try_cmpxchg_release -static inline bool +static __always_inline bool atomic_try_cmpxchg_release(atomic_t *v, int *old, int new) { int r, o = *old; @@ -950,7 +952,7 @@ atomic_try_cmpxchg_release(atomic_t *v, int *old, int new) #endif #ifndef atomic_try_cmpxchg_relaxed -static inline bool +static __always_inline bool atomic_try_cmpxchg_relaxed(atomic_t *v, int *old, int new) { int r, o = *old; @@ -965,7 +967,7 @@ atomic_try_cmpxchg_relaxed(atomic_t *v, int *old, int new) #else /* atomic_try_cmpxchg_relaxed */ #ifndef atomic_try_cmpxchg_acquire -static inline bool +static __always_inline bool atomic_try_cmpxchg_acquire(atomic_t *v, int *old, int new) { bool ret = atomic_try_cmpxchg_relaxed(v, old, new); @@ -976,7 +978,7 @@ atomic_try_cmpxchg_acquire(atomic_t *v, int *old, int new) #endif #ifndef atomic_try_cmpxchg_release -static inline bool +static __always_inline bool atomic_try_cmpxchg_release(atomic_t *v, int *old, int new) { __atomic_release_fence(); @@ -986,7 +988,7 @@ atomic_try_cmpxchg_release(atomic_t *v, int *old, int new) #endif #ifndef atomic_try_cmpxchg -static inline bool +static __always_inline bool atomic_try_cmpxchg(atomic_t *v, int *old, int new) { bool ret; @@ -1010,7 +1012,7 @@ atomic_try_cmpxchg(atomic_t *v, int *old, int new) * true if the result is zero, or false for all * other cases. */ -static inline bool +static __always_inline bool atomic_sub_and_test(int i, atomic_t *v) { return atomic_sub_return(i, v) == 0; @@ -1027,7 +1029,7 @@ atomic_sub_and_test(int i, atomic_t *v) * returns true if the result is 0, or false for all other * cases. */ -static inline bool +static __always_inline bool atomic_dec_and_test(atomic_t *v) { return atomic_dec_return(v) == 0; @@ -1044,7 +1046,7 @@ atomic_dec_and_test(atomic_t *v) * and returns true if the result is zero, or false for all * other cases. */ -static inline bool +static __always_inline bool atomic_inc_and_test(atomic_t *v) { return atomic_inc_return(v) == 0; @@ -1062,7 +1064,7 @@ atomic_inc_and_test(atomic_t *v) * if the result is negative, or false when * result is greater than or equal to zero. */ -static inline bool +static __always_inline bool atomic_add_negative(int i, atomic_t *v) { return atomic_add_return(i, v) < 0; @@ -1080,7 +1082,7 @@ atomic_add_negative(int i, atomic_t *v) * Atomically adds @a to @v, so long as @v was not already @u. * Returns original value of @v */ -static inline int +static __always_inline int atomic_fetch_add_unless(atomic_t *v, int a, int u) { int c = atomic_read(v); @@ -1105,7 +1107,7 @@ atomic_fetch_add_unless(atomic_t *v, int a, int u) * Atomically adds @a to @v, if @v was not already @u. * Returns true if the addition was done. */ -static inline bool +static __always_inline bool atomic_add_unless(atomic_t *v, int a, int u) { return atomic_fetch_add_unless(v, a, u) != u; @@ -1121,7 +1123,7 @@ atomic_add_unless(atomic_t *v, int a, int u) * Atomically increments @v by 1, if @v is non-zero. * Returns true if the increment was done. */ -static inline bool +static __always_inline bool atomic_inc_not_zero(atomic_t *v) { return atomic_add_unless(v, 1, 0); @@ -1130,7 +1132,7 @@ atomic_inc_not_zero(atomic_t *v) #endif #ifndef atomic_inc_unless_negative -static inline bool +static __always_inline bool atomic_inc_unless_negative(atomic_t *v) { int c = atomic_read(v); @@ -1146,7 +1148,7 @@ atomic_inc_unless_negative(atomic_t *v) #endif #ifndef atomic_dec_unless_positive -static inline bool +static __always_inline bool atomic_dec_unless_positive(atomic_t *v) { int c = atomic_read(v); @@ -1162,7 +1164,7 @@ atomic_dec_unless_positive(atomic_t *v) #endif #ifndef atomic_dec_if_positive -static inline int +static __always_inline int atomic_dec_if_positive(atomic_t *v) { int dec, c = atomic_read(v); @@ -1186,7 +1188,7 @@ atomic_dec_if_positive(atomic_t *v) #endif #ifndef atomic64_read_acquire -static inline s64 +static __always_inline s64 atomic64_read_acquire(const atomic64_t *v) { return smp_load_acquire(&(v)->counter); @@ -1195,7 +1197,7 @@ atomic64_read_acquire(const atomic64_t *v) #endif #ifndef atomic64_set_release -static inline void +static __always_inline void atomic64_set_release(atomic64_t *v, s64 i) { smp_store_release(&(v)->counter, i); @@ -1210,7 +1212,7 @@ atomic64_set_release(atomic64_t *v, s64 i) #else /* atomic64_add_return_relaxed */ #ifndef atomic64_add_return_acquire -static inline s64 +static __always_inline s64 atomic64_add_return_acquire(s64 i, atomic64_t *v) { s64 ret = atomic64_add_return_relaxed(i, v); @@ -1221,7 +1223,7 @@ atomic64_add_return_acquire(s64 i, atomic64_t *v) #endif #ifndef atomic64_add_return_release -static inline s64 +static __always_inline s64 atomic64_add_return_release(s64 i, atomic64_t *v) { __atomic_release_fence(); @@ -1231,7 +1233,7 @@ atomic64_add_return_release(s64 i, atomic64_t *v) #endif #ifndef atomic64_add_return -static inline s64 +static __always_inline s64 atomic64_add_return(s64 i, atomic64_t *v) { s64 ret; @@ -1252,7 +1254,7 @@ atomic64_add_return(s64 i, atomic64_t *v) #else /* atomic64_fetch_add_relaxed */ #ifndef atomic64_fetch_add_acquire -static inline s64 +static __always_inline s64 atomic64_fetch_add_acquire(s64 i, atomic64_t *v) { s64 ret = atomic64_fetch_add_relaxed(i, v); @@ -1263,7 +1265,7 @@ atomic64_fetch_add_acquire(s64 i, atomic64_t *v) #endif #ifndef atomic64_fetch_add_release -static inline s64 +static __always_inline s64 atomic64_fetch_add_release(s64 i, atomic64_t *v) { __atomic_release_fence(); @@ -1273,7 +1275,7 @@ atomic64_fetch_add_release(s64 i, atomic64_t *v) #endif #ifndef atomic64_fetch_add -static inline s64 +static __always_inline s64 atomic64_fetch_add(s64 i, atomic64_t *v) { s64 ret; @@ -1294,7 +1296,7 @@ atomic64_fetch_add(s64 i, atomic64_t *v) #else /* atomic64_sub_return_relaxed */ #ifndef atomic64_sub_return_acquire -static inline s64 +static __always_inline s64 atomic64_sub_return_acquire(s64 i, atomic64_t *v) { s64 ret = atomic64_sub_return_relaxed(i, v); @@ -1305,7 +1307,7 @@ atomic64_sub_return_acquire(s64 i, atomic64_t *v) #endif #ifndef atomic64_sub_return_release -static inline s64 +static __always_inline s64 atomic64_sub_return_release(s64 i, atomic64_t *v) { __atomic_release_fence(); @@ -1315,7 +1317,7 @@ atomic64_sub_return_release(s64 i, atomic64_t *v) #endif #ifndef atomic64_sub_return -static inline s64 +static __always_inline s64 atomic64_sub_return(s64 i, atomic64_t *v) { s64 ret; @@ -1336,7 +1338,7 @@ atomic64_sub_return(s64 i, atomic64_t *v) #else /* atomic64_fetch_sub_relaxed */ #ifndef atomic64_fetch_sub_acquire -static inline s64 +static __always_inline s64 atomic64_fetch_sub_acquire(s64 i, atomic64_t *v) { s64 ret = atomic64_fetch_sub_relaxed(i, v); @@ -1347,7 +1349,7 @@ atomic64_fetch_sub_acquire(s64 i, atomic64_t *v) #endif #ifndef atomic64_fetch_sub_release -static inline s64 +static __always_inline s64 atomic64_fetch_sub_release(s64 i, atomic64_t *v) { __atomic_release_fence(); @@ -1357,7 +1359,7 @@ atomic64_fetch_sub_release(s64 i, atomic64_t *v) #endif #ifndef atomic64_fetch_sub -static inline s64 +static __always_inline s64 atomic64_fetch_sub(s64 i, atomic64_t *v) { s64 ret; @@ -1372,7 +1374,7 @@ atomic64_fetch_sub(s64 i, atomic64_t *v) #endif /* atomic64_fetch_sub_relaxed */ #ifndef atomic64_inc -static inline void +static __always_inline void atomic64_inc(atomic64_t *v) { atomic64_add(1, v); @@ -1388,7 +1390,7 @@ atomic64_inc(atomic64_t *v) #endif /* atomic64_inc_return */ #ifndef atomic64_inc_return -static inline s64 +static __always_inline s64 atomic64_inc_return(atomic64_t *v) { return atomic64_add_return(1, v); @@ -1397,7 +1399,7 @@ atomic64_inc_return(atomic64_t *v) #endif #ifndef atomic64_inc_return_acquire -static inline s64 +static __always_inline s64 atomic64_inc_return_acquire(atomic64_t *v) { return atomic64_add_return_acquire(1, v); @@ -1406,7 +1408,7 @@ atomic64_inc_return_acquire(atomic64_t *v) #endif #ifndef atomic64_inc_return_release -static inline s64 +static __always_inline s64 atomic64_inc_return_release(atomic64_t *v) { return atomic64_add_return_release(1, v); @@ -1415,7 +1417,7 @@ atomic64_inc_return_release(atomic64_t *v) #endif #ifndef atomic64_inc_return_relaxed -static inline s64 +static __always_inline s64 atomic64_inc_return_relaxed(atomic64_t *v) { return atomic64_add_return_relaxed(1, v); @@ -1426,7 +1428,7 @@ atomic64_inc_return_relaxed(atomic64_t *v) #else /* atomic64_inc_return_relaxed */ #ifndef atomic64_inc_return_acquire -static inline s64 +static __always_inline s64 atomic64_inc_return_acquire(atomic64_t *v) { s64 ret = atomic64_inc_return_relaxed(v); @@ -1437,7 +1439,7 @@ atomic64_inc_return_acquire(atomic64_t *v) #endif #ifndef atomic64_inc_return_release -static inline s64 +static __always_inline s64 atomic64_inc_return_release(atomic64_t *v) { __atomic_release_fence(); @@ -1447,7 +1449,7 @@ atomic64_inc_return_release(atomic64_t *v) #endif #ifndef atomic64_inc_return -static inline s64 +static __always_inline s64 atomic64_inc_return(atomic64_t *v) { s64 ret; @@ -1469,7 +1471,7 @@ atomic64_inc_return(atomic64_t *v) #endif /* atomic64_fetch_inc */ #ifndef atomic64_fetch_inc -static inline s64 +static __always_inline s64 atomic64_fetch_inc(atomic64_t *v) { return atomic64_fetch_add(1, v); @@ -1478,7 +1480,7 @@ atomic64_fetch_inc(atomic64_t *v) #endif #ifndef atomic64_fetch_inc_acquire -static inline s64 +static __always_inline s64 atomic64_fetch_inc_acquire(atomic64_t *v) { return atomic64_fetch_add_acquire(1, v); @@ -1487,7 +1489,7 @@ atomic64_fetch_inc_acquire(atomic64_t *v) #endif #ifndef atomic64_fetch_inc_release -static inline s64 +static __always_inline s64 atomic64_fetch_inc_release(atomic64_t *v) { return atomic64_fetch_add_release(1, v); @@ -1496,7 +1498,7 @@ atomic64_fetch_inc_release(atomic64_t *v) #endif #ifndef atomic64_fetch_inc_relaxed -static inline s64 +static __always_inline s64 atomic64_fetch_inc_relaxed(atomic64_t *v) { return atomic64_fetch_add_relaxed(1, v); @@ -1507,7 +1509,7 @@ atomic64_fetch_inc_relaxed(atomic64_t *v) #else /* atomic64_fetch_inc_relaxed */ #ifndef atomic64_fetch_inc_acquire -static inline s64 +static __always_inline s64 atomic64_fetch_inc_acquire(atomic64_t *v) { s64 ret = atomic64_fetch_inc_relaxed(v); @@ -1518,7 +1520,7 @@ atomic64_fetch_inc_acquire(atomic64_t *v) #endif #ifndef atomic64_fetch_inc_release -static inline s64 +static __always_inline s64 atomic64_fetch_inc_release(atomic64_t *v) { __atomic_release_fence(); @@ -1528,7 +1530,7 @@ atomic64_fetch_inc_release(atomic64_t *v) #endif #ifndef atomic64_fetch_inc -static inline s64 +static __always_inline s64 atomic64_fetch_inc(atomic64_t *v) { s64 ret; @@ -1543,7 +1545,7 @@ atomic64_fetch_inc(atomic64_t *v) #endif /* atomic64_fetch_inc_relaxed */ #ifndef atomic64_dec -static inline void +static __always_inline void atomic64_dec(atomic64_t *v) { atomic64_sub(1, v); @@ -1559,7 +1561,7 @@ atomic64_dec(atomic64_t *v) #endif /* atomic64_dec_return */ #ifndef atomic64_dec_return -static inline s64 +static __always_inline s64 atomic64_dec_return(atomic64_t *v) { return atomic64_sub_return(1, v); @@ -1568,7 +1570,7 @@ atomic64_dec_return(atomic64_t *v) #endif #ifndef atomic64_dec_return_acquire -static inline s64 +static __always_inline s64 atomic64_dec_return_acquire(atomic64_t *v) { return atomic64_sub_return_acquire(1, v); @@ -1577,7 +1579,7 @@ atomic64_dec_return_acquire(atomic64_t *v) #endif #ifndef atomic64_dec_return_release -static inline s64 +static __always_inline s64 atomic64_dec_return_release(atomic64_t *v) { return atomic64_sub_return_release(1, v); @@ -1586,7 +1588,7 @@ atomic64_dec_return_release(atomic64_t *v) #endif #ifndef atomic64_dec_return_relaxed -static inline s64 +static __always_inline s64 atomic64_dec_return_relaxed(atomic64_t *v) { return atomic64_sub_return_relaxed(1, v); @@ -1597,7 +1599,7 @@ atomic64_dec_return_relaxed(atomic64_t *v) #else /* atomic64_dec_return_relaxed */ #ifndef atomic64_dec_return_acquire -static inline s64 +static __always_inline s64 atomic64_dec_return_acquire(atomic64_t *v) { s64 ret = atomic64_dec_return_relaxed(v); @@ -1608,7 +1610,7 @@ atomic64_dec_return_acquire(atomic64_t *v) #endif #ifndef atomic64_dec_return_release -static inline s64 +static __always_inline s64 atomic64_dec_return_release(atomic64_t *v) { __atomic_release_fence(); @@ -1618,7 +1620,7 @@ atomic64_dec_return_release(atomic64_t *v) #endif #ifndef atomic64_dec_return -static inline s64 +static __always_inline s64 atomic64_dec_return(atomic64_t *v) { s64 ret; @@ -1640,7 +1642,7 @@ atomic64_dec_return(atomic64_t *v) #endif /* atomic64_fetch_dec */ #ifndef atomic64_fetch_dec -static inline s64 +static __always_inline s64 atomic64_fetch_dec(atomic64_t *v) { return atomic64_fetch_sub(1, v); @@ -1649,7 +1651,7 @@ atomic64_fetch_dec(atomic64_t *v) #endif #ifndef atomic64_fetch_dec_acquire -static inline s64 +static __always_inline s64 atomic64_fetch_dec_acquire(atomic64_t *v) { return atomic64_fetch_sub_acquire(1, v); @@ -1658,7 +1660,7 @@ atomic64_fetch_dec_acquire(atomic64_t *v) #endif #ifndef atomic64_fetch_dec_release -static inline s64 +static __always_inline s64 atomic64_fetch_dec_release(atomic64_t *v) { return atomic64_fetch_sub_release(1, v); @@ -1667,7 +1669,7 @@ atomic64_fetch_dec_release(atomic64_t *v) #endif #ifndef atomic64_fetch_dec_relaxed -static inline s64 +static __always_inline s64 atomic64_fetch_dec_relaxed(atomic64_t *v) { return atomic64_fetch_sub_relaxed(1, v); @@ -1678,7 +1680,7 @@ atomic64_fetch_dec_relaxed(atomic64_t *v) #else /* atomic64_fetch_dec_relaxed */ #ifndef atomic64_fetch_dec_acquire -static inline s64 +static __always_inline s64 atomic64_fetch_dec_acquire(atomic64_t *v) { s64 ret = atomic64_fetch_dec_relaxed(v); @@ -1689,7 +1691,7 @@ atomic64_fetch_dec_acquire(atomic64_t *v) #endif #ifndef atomic64_fetch_dec_release -static inline s64 +static __always_inline s64 atomic64_fetch_dec_release(atomic64_t *v) { __atomic_release_fence(); @@ -1699,7 +1701,7 @@ atomic64_fetch_dec_release(atomic64_t *v) #endif #ifndef atomic64_fetch_dec -static inline s64 +static __always_inline s64 atomic64_fetch_dec(atomic64_t *v) { s64 ret; @@ -1720,7 +1722,7 @@ atomic64_fetch_dec(atomic64_t *v) #else /* atomic64_fetch_and_relaxed */ #ifndef atomic64_fetch_and_acquire -static inline s64 +static __always_inline s64 atomic64_fetch_and_acquire(s64 i, atomic64_t *v) { s64 ret = atomic64_fetch_and_relaxed(i, v); @@ -1731,7 +1733,7 @@ atomic64_fetch_and_acquire(s64 i, atomic64_t *v) #endif #ifndef atomic64_fetch_and_release -static inline s64 +static __always_inline s64 atomic64_fetch_and_release(s64 i, atomic64_t *v) { __atomic_release_fence(); @@ -1741,7 +1743,7 @@ atomic64_fetch_and_release(s64 i, atomic64_t *v) #endif #ifndef atomic64_fetch_and -static inline s64 +static __always_inline s64 atomic64_fetch_and(s64 i, atomic64_t *v) { s64 ret; @@ -1756,7 +1758,7 @@ atomic64_fetch_and(s64 i, atomic64_t *v) #endif /* atomic64_fetch_and_relaxed */ #ifndef atomic64_andnot -static inline void +static __always_inline void atomic64_andnot(s64 i, atomic64_t *v) { atomic64_and(~i, v); @@ -1772,7 +1774,7 @@ atomic64_andnot(s64 i, atomic64_t *v) #endif /* atomic64_fetch_andnot */ #ifndef atomic64_fetch_andnot -static inline s64 +static __always_inline s64 atomic64_fetch_andnot(s64 i, atomic64_t *v) { return atomic64_fetch_and(~i, v); @@ -1781,7 +1783,7 @@ atomic64_fetch_andnot(s64 i, atomic64_t *v) #endif #ifndef atomic64_fetch_andnot_acquire -static inline s64 +static __always_inline s64 atomic64_fetch_andnot_acquire(s64 i, atomic64_t *v) { return atomic64_fetch_and_acquire(~i, v); @@ -1790,7 +1792,7 @@ atomic64_fetch_andnot_acquire(s64 i, atomic64_t *v) #endif #ifndef atomic64_fetch_andnot_release -static inline s64 +static __always_inline s64 atomic64_fetch_andnot_release(s64 i, atomic64_t *v) { return atomic64_fetch_and_release(~i, v); @@ -1799,7 +1801,7 @@ atomic64_fetch_andnot_release(s64 i, atomic64_t *v) #endif #ifndef atomic64_fetch_andnot_relaxed -static inline s64 +static __always_inline s64 atomic64_fetch_andnot_relaxed(s64 i, atomic64_t *v) { return atomic64_fetch_and_relaxed(~i, v); @@ -1810,7 +1812,7 @@ atomic64_fetch_andnot_relaxed(s64 i, atomic64_t *v) #else /* atomic64_fetch_andnot_relaxed */ #ifndef atomic64_fetch_andnot_acquire -static inline s64 +static __always_inline s64 atomic64_fetch_andnot_acquire(s64 i, atomic64_t *v) { s64 ret = atomic64_fetch_andnot_relaxed(i, v); @@ -1821,7 +1823,7 @@ atomic64_fetch_andnot_acquire(s64 i, atomic64_t *v) #endif #ifndef atomic64_fetch_andnot_release -static inline s64 +static __always_inline s64 atomic64_fetch_andnot_release(s64 i, atomic64_t *v) { __atomic_release_fence(); @@ -1831,7 +1833,7 @@ atomic64_fetch_andnot_release(s64 i, atomic64_t *v) #endif #ifndef atomic64_fetch_andnot -static inline s64 +static __always_inline s64 atomic64_fetch_andnot(s64 i, atomic64_t *v) { s64 ret; @@ -1852,7 +1854,7 @@ atomic64_fetch_andnot(s64 i, atomic64_t *v) #else /* atomic64_fetch_or_relaxed */ #ifndef atomic64_fetch_or_acquire -static inline s64 +static __always_inline s64 atomic64_fetch_or_acquire(s64 i, atomic64_t *v) { s64 ret = atomic64_fetch_or_relaxed(i, v); @@ -1863,7 +1865,7 @@ atomic64_fetch_or_acquire(s64 i, atomic64_t *v) #endif #ifndef atomic64_fetch_or_release -static inline s64 +static __always_inline s64 atomic64_fetch_or_release(s64 i, atomic64_t *v) { __atomic_release_fence(); @@ -1873,7 +1875,7 @@ atomic64_fetch_or_release(s64 i, atomic64_t *v) #endif #ifndef atomic64_fetch_or -static inline s64 +static __always_inline s64 atomic64_fetch_or(s64 i, atomic64_t *v) { s64 ret; @@ -1894,7 +1896,7 @@ atomic64_fetch_or(s64 i, atomic64_t *v) #else /* atomic64_fetch_xor_relaxed */ #ifndef atomic64_fetch_xor_acquire -static inline s64 +static __always_inline s64 atomic64_fetch_xor_acquire(s64 i, atomic64_t *v) { s64 ret = atomic64_fetch_xor_relaxed(i, v); @@ -1905,7 +1907,7 @@ atomic64_fetch_xor_acquire(s64 i, atomic64_t *v) #endif #ifndef atomic64_fetch_xor_release -static inline s64 +static __always_inline s64 atomic64_fetch_xor_release(s64 i, atomic64_t *v) { __atomic_release_fence(); @@ -1915,7 +1917,7 @@ atomic64_fetch_xor_release(s64 i, atomic64_t *v) #endif #ifndef atomic64_fetch_xor -static inline s64 +static __always_inline s64 atomic64_fetch_xor(s64 i, atomic64_t *v) { s64 ret; @@ -1936,7 +1938,7 @@ atomic64_fetch_xor(s64 i, atomic64_t *v) #else /* atomic64_xchg_relaxed */ #ifndef atomic64_xchg_acquire -static inline s64 +static __always_inline s64 atomic64_xchg_acquire(atomic64_t *v, s64 i) { s64 ret = atomic64_xchg_relaxed(v, i); @@ -1947,7 +1949,7 @@ atomic64_xchg_acquire(atomic64_t *v, s64 i) #endif #ifndef atomic64_xchg_release -static inline s64 +static __always_inline s64 atomic64_xchg_release(atomic64_t *v, s64 i) { __atomic_release_fence(); @@ -1957,7 +1959,7 @@ atomic64_xchg_release(atomic64_t *v, s64 i) #endif #ifndef atomic64_xchg -static inline s64 +static __always_inline s64 atomic64_xchg(atomic64_t *v, s64 i) { s64 ret; @@ -1978,7 +1980,7 @@ atomic64_xchg(atomic64_t *v, s64 i) #else /* atomic64_cmpxchg_relaxed */ #ifndef atomic64_cmpxchg_acquire -static inline s64 +static __always_inline s64 atomic64_cmpxchg_acquire(atomic64_t *v, s64 old, s64 new) { s64 ret = atomic64_cmpxchg_relaxed(v, old, new); @@ -1989,7 +1991,7 @@ atomic64_cmpxchg_acquire(atomic64_t *v, s64 old, s64 new) #endif #ifndef atomic64_cmpxchg_release -static inline s64 +static __always_inline s64 atomic64_cmpxchg_release(atomic64_t *v, s64 old, s64 new) { __atomic_release_fence(); @@ -1999,7 +2001,7 @@ atomic64_cmpxchg_release(atomic64_t *v, s64 old, s64 new) #endif #ifndef atomic64_cmpxchg -static inline s64 +static __always_inline s64 atomic64_cmpxchg(atomic64_t *v, s64 old, s64 new) { s64 ret; @@ -2021,7 +2023,7 @@ atomic64_cmpxchg(atomic64_t *v, s64 old, s64 new) #endif /* atomic64_try_cmpxchg */ #ifndef atomic64_try_cmpxchg -static inline bool +static __always_inline bool atomic64_try_cmpxchg(atomic64_t *v, s64 *old, s64 new) { s64 r, o = *old; @@ -2034,7 +2036,7 @@ atomic64_try_cmpxchg(atomic64_t *v, s64 *old, s64 new) #endif #ifndef atomic64_try_cmpxchg_acquire -static inline bool +static __always_inline bool atomic64_try_cmpxchg_acquire(atomic64_t *v, s64 *old, s64 new) { s64 r, o = *old; @@ -2047,7 +2049,7 @@ atomic64_try_cmpxchg_acquire(atomic64_t *v, s64 *old, s64 new) #endif #ifndef atomic64_try_cmpxchg_release -static inline bool +static __always_inline bool atomic64_try_cmpxchg_release(atomic64_t *v, s64 *old, s64 new) { s64 r, o = *old; @@ -2060,7 +2062,7 @@ atomic64_try_cmpxchg_release(atomic64_t *v, s64 *old, s64 new) #endif #ifndef atomic64_try_cmpxchg_relaxed -static inline bool +static __always_inline bool atomic64_try_cmpxchg_relaxed(atomic64_t *v, s64 *old, s64 new) { s64 r, o = *old; @@ -2075,7 +2077,7 @@ atomic64_try_cmpxchg_relaxed(atomic64_t *v, s64 *old, s64 new) #else /* atomic64_try_cmpxchg_relaxed */ #ifndef atomic64_try_cmpxchg_acquire -static inline bool +static __always_inline bool atomic64_try_cmpxchg_acquire(atomic64_t *v, s64 *old, s64 new) { bool ret = atomic64_try_cmpxchg_relaxed(v, old, new); @@ -2086,7 +2088,7 @@ atomic64_try_cmpxchg_acquire(atomic64_t *v, s64 *old, s64 new) #endif #ifndef atomic64_try_cmpxchg_release -static inline bool +static __always_inline bool atomic64_try_cmpxchg_release(atomic64_t *v, s64 *old, s64 new) { __atomic_release_fence(); @@ -2096,7 +2098,7 @@ atomic64_try_cmpxchg_release(atomic64_t *v, s64 *old, s64 new) #endif #ifndef atomic64_try_cmpxchg -static inline bool +static __always_inline bool atomic64_try_cmpxchg(atomic64_t *v, s64 *old, s64 new) { bool ret; @@ -2120,7 +2122,7 @@ atomic64_try_cmpxchg(atomic64_t *v, s64 *old, s64 new) * true if the result is zero, or false for all * other cases. */ -static inline bool +static __always_inline bool atomic64_sub_and_test(s64 i, atomic64_t *v) { return atomic64_sub_return(i, v) == 0; @@ -2137,7 +2139,7 @@ atomic64_sub_and_test(s64 i, atomic64_t *v) * returns true if the result is 0, or false for all other * cases. */ -static inline bool +static __always_inline bool atomic64_dec_and_test(atomic64_t *v) { return atomic64_dec_return(v) == 0; @@ -2154,7 +2156,7 @@ atomic64_dec_and_test(atomic64_t *v) * and returns true if the result is zero, or false for all * other cases. */ -static inline bool +static __always_inline bool atomic64_inc_and_test(atomic64_t *v) { return atomic64_inc_return(v) == 0; @@ -2172,7 +2174,7 @@ atomic64_inc_and_test(atomic64_t *v) * if the result is negative, or false when * result is greater than or equal to zero. */ -static inline bool +static __always_inline bool atomic64_add_negative(s64 i, atomic64_t *v) { return atomic64_add_return(i, v) < 0; @@ -2190,7 +2192,7 @@ atomic64_add_negative(s64 i, atomic64_t *v) * Atomically adds @a to @v, so long as @v was not already @u. * Returns original value of @v */ -static inline s64 +static __always_inline s64 atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u) { s64 c = atomic64_read(v); @@ -2215,7 +2217,7 @@ atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u) * Atomically adds @a to @v, if @v was not already @u. * Returns true if the addition was done. */ -static inline bool +static __always_inline bool atomic64_add_unless(atomic64_t *v, s64 a, s64 u) { return atomic64_fetch_add_unless(v, a, u) != u; @@ -2231,7 +2233,7 @@ atomic64_add_unless(atomic64_t *v, s64 a, s64 u) * Atomically increments @v by 1, if @v is non-zero. * Returns true if the increment was done. */ -static inline bool +static __always_inline bool atomic64_inc_not_zero(atomic64_t *v) { return atomic64_add_unless(v, 1, 0); @@ -2240,7 +2242,7 @@ atomic64_inc_not_zero(atomic64_t *v) #endif #ifndef atomic64_inc_unless_negative -static inline bool +static __always_inline bool atomic64_inc_unless_negative(atomic64_t *v) { s64 c = atomic64_read(v); @@ -2256,7 +2258,7 @@ atomic64_inc_unless_negative(atomic64_t *v) #endif #ifndef atomic64_dec_unless_positive -static inline bool +static __always_inline bool atomic64_dec_unless_positive(atomic64_t *v) { s64 c = atomic64_read(v); @@ -2272,7 +2274,7 @@ atomic64_dec_unless_positive(atomic64_t *v) #endif #ifndef atomic64_dec_if_positive -static inline s64 +static __always_inline s64 atomic64_dec_if_positive(atomic64_t *v) { s64 dec, c = atomic64_read(v); @@ -2292,4 +2294,4 @@ atomic64_dec_if_positive(atomic64_t *v) #define atomic64_cond_read_relaxed(v, c) smp_cond_load_relaxed(&(v)->counter, (c)) #endif /* _LINUX_ATOMIC_FALLBACK_H */ -// 25de4a2804d70f57e994fe3b419148658bb5378a +// baaf45f4c24ed88ceae58baca39d7fd80bb8101b diff --git a/scripts/atomic/fallbacks/acquire b/scripts/atomic/fallbacks/acquire index e38871e64db65..ea489acc285e5 100755 --- a/scripts/atomic/fallbacks/acquire +++ b/scripts/atomic/fallbacks/acquire @@ -1,5 +1,5 @@ cat <counter); diff --git a/scripts/atomic/fallbacks/release b/scripts/atomic/fallbacks/release index 3f628a3802d98..730d2a6d3e072 100755 --- a/scripts/atomic/fallbacks/release +++ b/scripts/atomic/fallbacks/release @@ -1,5 +1,5 @@ cat <counter, i); diff --git a/scripts/atomic/fallbacks/sub_and_test b/scripts/atomic/fallbacks/sub_and_test index 289ef17a2d7a2..6cfe4ed497467 100755 --- a/scripts/atomic/fallbacks/sub_and_test +++ b/scripts/atomic/fallbacks/sub_and_test @@ -8,7 +8,7 @@ cat < + EOF for xchg in "xchg" "cmpxchg" "cmpxchg64"; do -- GitLab From 7161177481d521e725a7bc6c9308ac2968fee038 Mon Sep 17 00:00:00 2001 From: Marco Elver Date: Thu, 12 Dec 2019 01:07:08 +0100 Subject: [PATCH 0014/1971] kcsan: Document static blacklisting options Updates the section on "Selective analysis", listing all available options to blacklist reporting data races for: specific accesses, functions, compilation units, and entire directories. These options should provide adequate control for maintainers to opt out of KCSAN analysis at varying levels of granularity. It is hoped to provide the required control to reflect preferences for handling data races across the kernel. Signed-off-by: Marco Elver Signed-off-by: Paul E. McKenney --- Documentation/dev-tools/kcsan.rst | 24 +++++++++++++++++------- 1 file changed, 17 insertions(+), 7 deletions(-) diff --git a/Documentation/dev-tools/kcsan.rst b/Documentation/dev-tools/kcsan.rst index a6f4f92df2fa0..65a0be513b7de 100644 --- a/Documentation/dev-tools/kcsan.rst +++ b/Documentation/dev-tools/kcsan.rst @@ -101,18 +101,28 @@ instrumentation or e.g. DMA accesses. Selective analysis ~~~~~~~~~~~~~~~~~~ -To disable KCSAN data race detection for an entire subsystem, add to the -respective ``Makefile``:: +It may be desirable to disable data race detection for specific accesses, +functions, compilation units, or entire subsystems. For static blacklisting, +the below options are available: - KCSAN_SANITIZE := n +* KCSAN understands the ``data_race(expr)`` annotation, which tells KCSAN that + any data races due to accesses in ``expr`` should be ignored and resulting + behaviour when encountering a data race is deemed safe. + +* Disabling data race detection for entire functions can be accomplished by + using the function attribute ``__no_kcsan`` (or ``__no_kcsan_or_inline`` for + ``__always_inline`` functions). To dynamically control for which functions + data races are reported, see the `debugfs`_ blacklist/whitelist feature. -To disable KCSAN on a per-file basis, add to the ``Makefile``:: +* To disable data race detection for a particular compilation unit, add to the + ``Makefile``:: KCSAN_SANITIZE_file.o := n -KCSAN also understands the ``data_race(expr)`` annotation, which tells KCSAN -that any data races due to accesses in ``expr`` should be ignored and resulting -behaviour when encountering a data race is deemed safe. +* To disable data race detection for all compilation units listed in a + ``Makefile``, add to the respective ``Makefile``:: + + KCSAN_SANITIZE := n debugfs ~~~~~~~ -- GitLab From e33f9a169747880a008dd5e7b934fc592e91cd63 Mon Sep 17 00:00:00 2001 From: Marco Elver Date: Thu, 12 Dec 2019 01:07:09 +0100 Subject: [PATCH 0015/1971] kcsan: Add __no_kcsan function attribute Since the use of -fsanitize=thread is an implementation detail of KCSAN, the name __no_sanitize_thread could be misleading if used widely. Instead, we introduce the __no_kcsan attribute which is shorter and more accurate in the context of KCSAN. This matches the attribute name __no_kcsan_or_inline. The use of __kcsan_or_inline itself is still required for __always_inline functions to retain compatibility with older compilers. Signed-off-by: Marco Elver Signed-off-by: Paul E. McKenney --- include/linux/compiler-gcc.h | 3 +-- include/linux/compiler.h | 7 +++++-- 2 files changed, 6 insertions(+), 4 deletions(-) diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h index 0eb2a1cc411dc..cf294faec2f87 100644 --- a/include/linux/compiler-gcc.h +++ b/include/linux/compiler-gcc.h @@ -146,8 +146,7 @@ #endif #if defined(__SANITIZE_THREAD__) && __has_attribute(__no_sanitize_thread__) -#define __no_sanitize_thread \ - __attribute__((__noinline__)) __attribute__((no_sanitize_thread)) +#define __no_sanitize_thread __attribute__((no_sanitize_thread)) #else #define __no_sanitize_thread #endif diff --git a/include/linux/compiler.h b/include/linux/compiler.h index ad8c76144a3c5..8c0beb10c1ddb 100644 --- a/include/linux/compiler.h +++ b/include/linux/compiler.h @@ -207,12 +207,15 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val, # define __no_kasan_or_inline __always_inline #endif +#define __no_kcsan __no_sanitize_thread #ifdef __SANITIZE_THREAD__ /* * Rely on __SANITIZE_THREAD__ instead of CONFIG_KCSAN, to avoid not inlining in - * compilation units where instrumentation is disabled. + * compilation units where instrumentation is disabled. The attribute 'noinline' + * is required for older compilers, where implicit inlining of very small + * functions renders __no_sanitize_thread ineffective. */ -# define __no_kcsan_or_inline __no_sanitize_thread notrace __maybe_unused +# define __no_kcsan_or_inline __no_kcsan noinline notrace __maybe_unused # define __no_sanitize_or_inline __no_kcsan_or_inline #else # define __no_kcsan_or_inline __always_inline -- GitLab From c29a59e43829beabc4c26036ebcc6a32dd0b6a01 Mon Sep 17 00:00:00 2001 From: Jann Horn Date: Mon, 6 Jan 2020 21:02:04 +0100 Subject: [PATCH 0016/1971] x86/vdso: Enable sanitizers for vma.o The vDSO makefile opts out of all sanitizers (and objtool validation); however, vma.o is a normal kernel object file (and already has objtool validation selectively enabled), so turn the sanitizers back on for that file. Signed-off-by: Jann Horn Cc: Andy Lutomirski Cc: Borislav Petkov Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: Marco Elver Cc: Paul E. McKenney Link: https://lkml.kernel.org/r/20200106200204.94782-1-jannh@google.com Signed-off-by: Ingo Molnar --- arch/x86/entry/vdso/Makefile | 3 +++ 1 file changed, 3 insertions(+) diff --git a/arch/x86/entry/vdso/Makefile b/arch/x86/entry/vdso/Makefile index 0f46273b45565..50a1c90088644 100644 --- a/arch/x86/entry/vdso/Makefile +++ b/arch/x86/entry/vdso/Makefile @@ -30,6 +30,9 @@ vobjs-y := vdso-note.o vclock_gettime.o vgetcpu.o # files to link into kernel obj-y += vma.o +KASAN_SANITIZE_vma.o := y +UBSAN_SANITIZE_vma.o := y +KCSAN_SANITIZE_vma.o := y OBJECT_FILES_NON_STANDARD_vma.o := n # vDSO images to build -- GitLab From 8efbc518b884e1db2dd6a6fce62d0112ab871dcf Mon Sep 17 00:00:00 2001 From: Dave Young Date: Wed, 12 Feb 2020 19:04:24 +0800 Subject: [PATCH 0017/1971] x86/kexec: Do not reserve EFI setup_data in the kexec e820 table The e820 table for the kexec kernel unconditionally marks setup_data as reserved because the second kernel can reuse setup_data passed by the 1st kernel's boot loader, for example SETUP_PCI marked regions like PCI BIOS, etc. SETUP_EFI types, however, are used by kexec itself to enable EFI in the 2nd kernel. Thus, it is pointless to add this type of setup_data to the kexec e820 table as reserved. IOW, what happens is this: - 1st physical boot: no SETUP_EFI. - kexec loads a new kernel and prepares a SETUP_EFI setup_data blob, then reboots the machine. - 2nd kernel sees SETUP_EFI, reserves it both in the e820 and in the kexec e820 table. - If another kexec load is executed, it prepares a new SETUP_EFI blob and then reboots the machine into the new kernel. 5. The 3rd kexec-ed kernel has two SETUP_EFI ranges reserved. And so on... Thus skip SETUP_EFI while reserving setup_data in the e820_table_kexec table because it is not needed. [ bp: Heavily massage commit message, shorten line and improve comment. ] Signed-off-by: Dave Young Signed-off-by: Borislav Petkov Link: https://lkml.kernel.org/r/20200212110424.GA2938@dhcp-128-65.nay.redhat.com --- arch/x86/kernel/e820.c | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/arch/x86/kernel/e820.c b/arch/x86/kernel/e820.c index c5399e80c59c5..c92029651b85b 100644 --- a/arch/x86/kernel/e820.c +++ b/arch/x86/kernel/e820.c @@ -999,7 +999,15 @@ void __init e820__reserve_setup_data(void) while (pa_data) { data = early_memremap(pa_data, sizeof(*data)); e820__range_update(pa_data, sizeof(*data)+data->len, E820_TYPE_RAM, E820_TYPE_RESERVED_KERN); - e820__range_update_kexec(pa_data, sizeof(*data)+data->len, E820_TYPE_RAM, E820_TYPE_RESERVED_KERN); + + /* + * SETUP_EFI is supplied by kexec and does not need to be + * reserved. + */ + if (data->type != SETUP_EFI) + e820__range_update_kexec(pa_data, + sizeof(*data) + data->len, + E820_TYPE_RAM, E820_TYPE_RESERVED_KERN); if (data->type == SETUP_INDIRECT && ((struct setup_indirect *)data->data)->type != SETUP_INDIRECT) { -- GitLab From e2ac07c06058ae2d58b45bbf2a2a352771d76fcb Mon Sep 17 00:00:00 2001 From: Hans de Goede Date: Tue, 17 Mar 2020 14:08:40 +0100 Subject: [PATCH 0018/1971] x86/purgatory: Disable various profiling and sanitizing options Since the purgatory is a special stand-alone binary, various profiling and sanitizing options must be disabled. Having these options enabled typically will cause dependencies on various special symbols exported by special libs / stubs used by these frameworks. Since the purgatory is special, it is not linked against these stubs causing missing symbols in the purgatory if these options are not disabled. Sync the set of disabled profiling and sanitizing options with that from drivers/firmware/efi/libstub/Makefile, adding -DDISABLE_BRANCH_PROFILING to the CFLAGS and setting: GCOV_PROFILE := n UBSAN_SANITIZE := n This fixes broken references to ftrace_likely_update() when CONFIG_TRACE_BRANCH_PROFILING is enabled and to __gcov_init() and __gcov_exit() when CONFIG_GCOV_KERNEL is enabled. Signed-off-by: Hans de Goede Signed-off-by: Borislav Petkov Link: https://lkml.kernel.org/r/20200317130841.290418-1-hdegoede@redhat.com --- arch/x86/purgatory/Makefile | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/arch/x86/purgatory/Makefile b/arch/x86/purgatory/Makefile index fb4ee54443799..9733d1cc791dd 100644 --- a/arch/x86/purgatory/Makefile +++ b/arch/x86/purgatory/Makefile @@ -17,7 +17,10 @@ CFLAGS_sha256.o := -D__DISABLE_EXPORTS LDFLAGS_purgatory.ro := -e purgatory_start -r --no-undefined -nostdlib -z nodefaultlib targets += purgatory.ro +# Sanitizer, etc. runtimes are unavailable and cannot be linked here. +GCOV_PROFILE := n KASAN_SANITIZE := n +UBSAN_SANITIZE := n KCOV_INSTRUMENT := n # These are adjustments to the compiler flags used for objects that @@ -25,7 +28,7 @@ KCOV_INSTRUMENT := n PURGATORY_CFLAGS_REMOVE := -mcmodel=kernel PURGATORY_CFLAGS := -mcmodel=large -ffreestanding -fno-zero-initialized-in-bss -PURGATORY_CFLAGS += $(DISABLE_STACKLEAK_PLUGIN) +PURGATORY_CFLAGS += $(DISABLE_STACKLEAK_PLUGIN) -DDISABLE_BRANCH_PROFILING # Default KBUILD_CFLAGS can have -pg option set when FTRACE is enabled. That # in turn leaves some undefined symbols like __fentry__ in purgatory and not -- GitLab From e4160b2e4b02377c67f8ecd05786811598f39acd Mon Sep 17 00:00:00 2001 From: Hans de Goede Date: Tue, 17 Mar 2020 14:08:41 +0100 Subject: [PATCH 0019/1971] x86/purgatory: Fail the build if purgatory.ro has missing symbols Linking purgatory.ro with -r enables "incremental linking"; this means no checks for unresolved symbols are done while linking purgatory.ro. A change to the sha256 code has caused the purgatory in 5.4-rc1 to have a missing symbol on memzero_explicit(), yet things still happily build. Add an extra check for unresolved symbols by calling ld without -r before running bin2c to generate kexec-purgatory.c. This causes a build of 5.4-rc1 with this patch added to fail as it should: CHK arch/x86/purgatory/purgatory.ro ld: arch/x86/purgatory/purgatory.ro: in function `sha256_transform': sha256.c:(.text+0x1c0c): undefined reference to `memzero_explicit' make[2]: *** [arch/x86/purgatory/Makefile:72: arch/x86/purgatory/kexec-purgatory.c] Error 1 make[1]: *** [scripts/Makefile.build:509: arch/x86/purgatory] Error 2 make: *** [Makefile:1650: arch/x86] Error 2 Also remove --no-undefined from LDFLAGS_purgatory.ro as that has no effect. Signed-off-by: Hans de Goede Signed-off-by: Borislav Petkov Link: https://lkml.kernel.org/r/20200317130841.290418-2-hdegoede@redhat.com --- arch/x86/purgatory/.gitignore | 1 + arch/x86/purgatory/Makefile | 13 ++++++++++--- 2 files changed, 11 insertions(+), 3 deletions(-) create mode 100644 arch/x86/purgatory/.gitignore diff --git a/arch/x86/purgatory/.gitignore b/arch/x86/purgatory/.gitignore new file mode 100644 index 0000000000000..d2be1500671de --- /dev/null +++ b/arch/x86/purgatory/.gitignore @@ -0,0 +1 @@ +purgatory.chk diff --git a/arch/x86/purgatory/Makefile b/arch/x86/purgatory/Makefile index 9733d1cc791dd..54aadbb7e0b69 100644 --- a/arch/x86/purgatory/Makefile +++ b/arch/x86/purgatory/Makefile @@ -14,8 +14,12 @@ $(obj)/sha256.o: $(srctree)/lib/crypto/sha256.c FORCE CFLAGS_sha256.o := -D__DISABLE_EXPORTS -LDFLAGS_purgatory.ro := -e purgatory_start -r --no-undefined -nostdlib -z nodefaultlib -targets += purgatory.ro +# When linking purgatory.ro with -r unresolved symbols are not checked, +# also link a purgatory.chk binary without -r to check for unresolved symbols. +PURGATORY_LDFLAGS := -e purgatory_start -nostdlib -z nodefaultlib +LDFLAGS_purgatory.ro := -r $(PURGATORY_LDFLAGS) +LDFLAGS_purgatory.chk := $(PURGATORY_LDFLAGS) +targets += purgatory.ro purgatory.chk # Sanitizer, etc. runtimes are unavailable and cannot be linked here. GCOV_PROFILE := n @@ -61,12 +65,15 @@ CFLAGS_string.o += $(PURGATORY_CFLAGS) $(obj)/purgatory.ro: $(PURGATORY_OBJS) FORCE $(call if_changed,ld) +$(obj)/purgatory.chk: $(obj)/purgatory.ro FORCE + $(call if_changed,ld) + targets += kexec-purgatory.c quiet_cmd_bin2c = BIN2C $@ cmd_bin2c = $(objtree)/scripts/bin2c kexec_purgatory < $< > $@ -$(obj)/kexec-purgatory.c: $(obj)/purgatory.ro FORCE +$(obj)/kexec-purgatory.c: $(obj)/purgatory.ro $(obj)/purgatory.chk FORCE $(call if_changed,bin2c) obj-$(CONFIG_KEXEC_FILE) += kexec-purgatory.o -- GitLab From 5c361425744d1e3b03d835dde659708683ca27d1 Mon Sep 17 00:00:00 2001 From: Marco Elver Date: Tue, 7 Jan 2020 17:31:04 +0100 Subject: [PATCH 0020/1971] kcsan: Prefer __always_inline for fast-path Prefer __always_inline for fast-path functions that are called outside of user_access_save, to avoid generating UACCESS warnings when optimizing for size (CC_OPTIMIZE_FOR_SIZE). It will also avoid future surprises with compiler versions that change the inlining heuristic even when optimizing for performance. Reported-by: Randy Dunlap Acked-by: Randy Dunlap # build-tested Signed-off-by: Marco Elver Signed-off-by: Paul E. McKenney Signed-off-by: Ingo Molnar Link: http://lkml.kernel.org/r/58708908-84a0-0a81-a836-ad97e33dbb62@infradead.org --- kernel/kcsan/atomic.h | 2 +- kernel/kcsan/core.c | 18 +++++++++--------- kernel/kcsan/encoding.h | 14 +++++++------- 3 files changed, 17 insertions(+), 17 deletions(-) diff --git a/kernel/kcsan/atomic.h b/kernel/kcsan/atomic.h index 576e03ddd6a31..a9c1930534914 100644 --- a/kernel/kcsan/atomic.h +++ b/kernel/kcsan/atomic.h @@ -18,7 +18,7 @@ * than cast to volatile. Eventually, we hope to be able to remove this * function. */ -static inline bool kcsan_is_atomic(const volatile void *ptr) +static __always_inline bool kcsan_is_atomic(const volatile void *ptr) { /* only jiffies for now */ return ptr == &jiffies; diff --git a/kernel/kcsan/core.c b/kernel/kcsan/core.c index 3314fc29e2369..4d4ab5c5dc53d 100644 --- a/kernel/kcsan/core.c +++ b/kernel/kcsan/core.c @@ -78,10 +78,10 @@ static atomic_long_t watchpoints[CONFIG_KCSAN_NUM_WATCHPOINTS + NUM_SLOTS-1]; */ static DEFINE_PER_CPU(long, kcsan_skip); -static inline atomic_long_t *find_watchpoint(unsigned long addr, - size_t size, - bool expect_write, - long *encoded_watchpoint) +static __always_inline atomic_long_t *find_watchpoint(unsigned long addr, + size_t size, + bool expect_write, + long *encoded_watchpoint) { const int slot = watchpoint_slot(addr); const unsigned long addr_masked = addr & WATCHPOINT_ADDR_MASK; @@ -146,7 +146,7 @@ insert_watchpoint(unsigned long addr, size_t size, bool is_write) * 2. the thread that set up the watchpoint already removed it; * 3. the watchpoint was removed and then re-used. */ -static inline bool +static __always_inline bool try_consume_watchpoint(atomic_long_t *watchpoint, long encoded_watchpoint) { return atomic_long_try_cmpxchg_relaxed(watchpoint, &encoded_watchpoint, CONSUMED_WATCHPOINT); @@ -160,7 +160,7 @@ static inline bool remove_watchpoint(atomic_long_t *watchpoint) return atomic_long_xchg_relaxed(watchpoint, INVALID_WATCHPOINT) != CONSUMED_WATCHPOINT; } -static inline struct kcsan_ctx *get_ctx(void) +static __always_inline struct kcsan_ctx *get_ctx(void) { /* * In interrupts, use raw_cpu_ptr to avoid unnecessary checks, that would @@ -169,7 +169,7 @@ static inline struct kcsan_ctx *get_ctx(void) return in_task() ? ¤t->kcsan_ctx : raw_cpu_ptr(&kcsan_cpu_ctx); } -static inline bool is_atomic(const volatile void *ptr) +static __always_inline bool is_atomic(const volatile void *ptr) { struct kcsan_ctx *ctx = get_ctx(); @@ -193,7 +193,7 @@ static inline bool is_atomic(const volatile void *ptr) return kcsan_is_atomic(ptr); } -static inline bool should_watch(const volatile void *ptr, int type) +static __always_inline bool should_watch(const volatile void *ptr, int type) { /* * Never set up watchpoints when memory operations are atomic. @@ -226,7 +226,7 @@ static inline void reset_kcsan_skip(void) this_cpu_write(kcsan_skip, skip_count); } -static inline bool kcsan_is_enabled(void) +static __always_inline bool kcsan_is_enabled(void) { return READ_ONCE(kcsan_enabled) && get_ctx()->disable_count == 0; } diff --git a/kernel/kcsan/encoding.h b/kernel/kcsan/encoding.h index b63890e864493..f03562aaf2eb4 100644 --- a/kernel/kcsan/encoding.h +++ b/kernel/kcsan/encoding.h @@ -59,10 +59,10 @@ encode_watchpoint(unsigned long addr, size_t size, bool is_write) (addr & WATCHPOINT_ADDR_MASK)); } -static inline bool decode_watchpoint(long watchpoint, - unsigned long *addr_masked, - size_t *size, - bool *is_write) +static __always_inline bool decode_watchpoint(long watchpoint, + unsigned long *addr_masked, + size_t *size, + bool *is_write) { if (watchpoint == INVALID_WATCHPOINT || watchpoint == CONSUMED_WATCHPOINT) @@ -78,13 +78,13 @@ static inline bool decode_watchpoint(long watchpoint, /* * Return watchpoint slot for an address. */ -static inline int watchpoint_slot(unsigned long addr) +static __always_inline int watchpoint_slot(unsigned long addr) { return (addr / PAGE_SIZE) % CONFIG_KCSAN_NUM_WATCHPOINTS; } -static inline bool matching_access(unsigned long addr1, size_t size1, - unsigned long addr2, size_t size2) +static __always_inline bool matching_access(unsigned long addr1, size_t size1, + unsigned long addr2, size_t size2) { unsigned long end_range1 = addr1 + size1 - 1; unsigned long end_range2 = addr2 + size2 - 1; -- GitLab From 47144eca282189afcf34ef25aee8408c168765d4 Mon Sep 17 00:00:00 2001 From: Marco Elver Date: Fri, 10 Jan 2020 19:48:33 +0100 Subject: [PATCH 0021/1971] kcsan: Show full access type in report This commit adds access-type information to KCSAN's reports as follows: "read", "read (marked)", "write", and "write (marked)". Suggested-by: Paul E. McKenney Signed-off-by: Marco Elver Signed-off-by: Paul E. McKenney Signed-off-by: Ingo Molnar --- kernel/kcsan/core.c | 15 ++++++++------- kernel/kcsan/kcsan.h | 2 +- kernel/kcsan/report.c | 43 ++++++++++++++++++++++++++++--------------- 3 files changed, 37 insertions(+), 23 deletions(-) diff --git a/kernel/kcsan/core.c b/kernel/kcsan/core.c index 4d4ab5c5dc53d..87bf857c88935 100644 --- a/kernel/kcsan/core.c +++ b/kernel/kcsan/core.c @@ -255,7 +255,7 @@ static inline unsigned int get_delay(void) static noinline void kcsan_found_watchpoint(const volatile void *ptr, size_t size, - bool is_write, + int type, atomic_long_t *watchpoint, long encoded_watchpoint) { @@ -276,7 +276,7 @@ static noinline void kcsan_found_watchpoint(const volatile void *ptr, flags = user_access_save(); if (consumed) { - kcsan_report(ptr, size, is_write, true, raw_smp_processor_id(), + kcsan_report(ptr, size, type, true, raw_smp_processor_id(), KCSAN_REPORT_CONSUMED_WATCHPOINT); } else { /* @@ -292,8 +292,9 @@ static noinline void kcsan_found_watchpoint(const volatile void *ptr, } static noinline void -kcsan_setup_watchpoint(const volatile void *ptr, size_t size, bool is_write) +kcsan_setup_watchpoint(const volatile void *ptr, size_t size, int type) { + const bool is_write = (type & KCSAN_ACCESS_WRITE) != 0; atomic_long_t *watchpoint; union { u8 _1; @@ -415,13 +416,13 @@ kcsan_setup_watchpoint(const volatile void *ptr, size_t size, bool is_write) * No need to increment 'data_races' counter, as the racing * thread already did. */ - kcsan_report(ptr, size, is_write, size > 8 || value_change, + kcsan_report(ptr, size, type, size > 8 || value_change, smp_processor_id(), KCSAN_REPORT_RACE_SIGNAL); } else if (value_change) { /* Inferring a race, since the value should not have changed. */ kcsan_counter_inc(KCSAN_COUNTER_RACES_UNKNOWN_ORIGIN); if (IS_ENABLED(CONFIG_KCSAN_REPORT_RACE_UNKNOWN_ORIGIN)) - kcsan_report(ptr, size, is_write, true, + kcsan_report(ptr, size, type, true, smp_processor_id(), KCSAN_REPORT_RACE_UNKNOWN_ORIGIN); } @@ -455,10 +456,10 @@ static __always_inline void check_access(const volatile void *ptr, size_t size, */ if (unlikely(watchpoint != NULL)) - kcsan_found_watchpoint(ptr, size, is_write, watchpoint, + kcsan_found_watchpoint(ptr, size, type, watchpoint, encoded_watchpoint); else if (unlikely(should_watch(ptr, type))) - kcsan_setup_watchpoint(ptr, size, is_write); + kcsan_setup_watchpoint(ptr, size, type); } /* === Public interface ===================================================== */ diff --git a/kernel/kcsan/kcsan.h b/kernel/kcsan/kcsan.h index d3b9a96ac8a43..8492da45494bf 100644 --- a/kernel/kcsan/kcsan.h +++ b/kernel/kcsan/kcsan.h @@ -103,7 +103,7 @@ enum kcsan_report_type { /* * Print a race report from thread that encountered the race. */ -extern void kcsan_report(const volatile void *ptr, size_t size, bool is_write, +extern void kcsan_report(const volatile void *ptr, size_t size, int access_type, bool value_change, int cpu_id, enum kcsan_report_type type); #endif /* _KERNEL_KCSAN_KCSAN_H */ diff --git a/kernel/kcsan/report.c b/kernel/kcsan/report.c index 0eea05a3135b7..9f503ca2ff7a3 100644 --- a/kernel/kcsan/report.c +++ b/kernel/kcsan/report.c @@ -24,7 +24,7 @@ static struct { const volatile void *ptr; size_t size; - bool is_write; + int access_type; int task_pid; int cpu_id; unsigned long stack_entries[NUM_STACK_ENTRIES]; @@ -41,8 +41,10 @@ static DEFINE_SPINLOCK(report_lock); * Special rules to skip reporting. */ static bool -skip_report(bool is_write, bool value_change, unsigned long top_frame) +skip_report(int access_type, bool value_change, unsigned long top_frame) { + const bool is_write = (access_type & KCSAN_ACCESS_WRITE) != 0; + if (IS_ENABLED(CONFIG_KCSAN_REPORT_VALUE_CHANGE_ONLY) && is_write && !value_change) { /* @@ -63,9 +65,20 @@ skip_report(bool is_write, bool value_change, unsigned long top_frame) return kcsan_skip_report_debugfs(top_frame); } -static inline const char *get_access_type(bool is_write) +static const char *get_access_type(int type) { - return is_write ? "write" : "read"; + switch (type) { + case 0: + return "read"; + case KCSAN_ACCESS_ATOMIC: + return "read (marked)"; + case KCSAN_ACCESS_WRITE: + return "write"; + case KCSAN_ACCESS_WRITE | KCSAN_ACCESS_ATOMIC: + return "write (marked)"; + default: + BUG(); + } } /* Return thread description: in task or interrupt. */ @@ -112,7 +125,7 @@ static int sym_strcmp(void *addr1, void *addr2) /* * Returns true if a report was generated, false otherwise. */ -static bool print_report(const volatile void *ptr, size_t size, bool is_write, +static bool print_report(const volatile void *ptr, size_t size, int access_type, bool value_change, int cpu_id, enum kcsan_report_type type) { @@ -124,7 +137,7 @@ static bool print_report(const volatile void *ptr, size_t size, bool is_write, /* * Must check report filter rules before starting to print. */ - if (skip_report(is_write, true, stack_entries[skipnr])) + if (skip_report(access_type, true, stack_entries[skipnr])) return false; if (type == KCSAN_REPORT_RACE_SIGNAL) { @@ -132,7 +145,7 @@ static bool print_report(const volatile void *ptr, size_t size, bool is_write, other_info.num_stack_entries); /* @value_change is only known for the other thread */ - if (skip_report(other_info.is_write, value_change, + if (skip_report(other_info.access_type, value_change, other_info.stack_entries[other_skipnr])) return false; } @@ -170,7 +183,7 @@ static bool print_report(const volatile void *ptr, size_t size, bool is_write, switch (type) { case KCSAN_REPORT_RACE_SIGNAL: pr_err("%s to 0x%px of %zu bytes by %s on cpu %i:\n", - get_access_type(other_info.is_write), other_info.ptr, + get_access_type(other_info.access_type), other_info.ptr, other_info.size, get_thread_desc(other_info.task_pid), other_info.cpu_id); @@ -181,14 +194,14 @@ static bool print_report(const volatile void *ptr, size_t size, bool is_write, pr_err("\n"); pr_err("%s to 0x%px of %zu bytes by %s on cpu %i:\n", - get_access_type(is_write), ptr, size, + get_access_type(access_type), ptr, size, get_thread_desc(in_task() ? task_pid_nr(current) : -1), cpu_id); break; case KCSAN_REPORT_RACE_UNKNOWN_ORIGIN: pr_err("race at unknown origin, with %s to 0x%px of %zu bytes by %s on cpu %i:\n", - get_access_type(is_write), ptr, size, + get_access_type(access_type), ptr, size, get_thread_desc(in_task() ? task_pid_nr(current) : -1), cpu_id); break; @@ -223,7 +236,7 @@ static void release_report(unsigned long *flags, enum kcsan_report_type type) * required for the report type, simply acquires report_lock and returns true. */ static bool prepare_report(unsigned long *flags, const volatile void *ptr, - size_t size, bool is_write, int cpu_id, + size_t size, int access_type, int cpu_id, enum kcsan_report_type type) { if (type != KCSAN_REPORT_CONSUMED_WATCHPOINT && @@ -243,7 +256,7 @@ retry: other_info.ptr = ptr; other_info.size = size; - other_info.is_write = is_write; + other_info.access_type = access_type; other_info.task_pid = in_task() ? task_pid_nr(current) : -1; other_info.cpu_id = cpu_id; other_info.num_stack_entries = stack_trace_save(other_info.stack_entries, NUM_STACK_ENTRIES, 1); @@ -302,14 +315,14 @@ retry: goto retry; } -void kcsan_report(const volatile void *ptr, size_t size, bool is_write, +void kcsan_report(const volatile void *ptr, size_t size, int access_type, bool value_change, int cpu_id, enum kcsan_report_type type) { unsigned long flags = 0; kcsan_disable_current(); - if (prepare_report(&flags, ptr, size, is_write, cpu_id, type)) { - if (print_report(ptr, size, is_write, value_change, cpu_id, type) && panic_on_warn) + if (prepare_report(&flags, ptr, size, access_type, cpu_id, type)) { + if (print_report(ptr, size, access_type, value_change, cpu_id, type) && panic_on_warn) panic("panic_on_warn set ...\n"); release_report(&flags, type); -- GitLab From 05f9a4067964e3f864210271a6299f13d2eeea55 Mon Sep 17 00:00:00 2001 From: Marco Elver Date: Fri, 10 Jan 2020 19:48:34 +0100 Subject: [PATCH 0022/1971] kcsan: Rate-limit reporting per data races KCSAN data-race reports can occur quite frequently, so much so as to render the system useless. This commit therefore adds support for time-based rate-limiting KCSAN reports, with the time interval specified by a new KCSAN_REPORT_ONCE_IN_MS Kconfig option. The default is 3000 milliseconds, also known as three seconds. Because KCSAN must detect data races in allocators and in other contexts where use of allocation is ill-advised, a fixed-size array is used to buffer reports during each reporting interval. To reduce the number of reports lost due to array overflow, this commit stores only one instance of duplicate reports, which has the benefit of further reducing KCSAN's console output rate. Reported-by: Qian Cai Suggested-by: Paul E. McKenney Signed-off-by: Marco Elver Signed-off-by: Paul E. McKenney Signed-off-by: Ingo Molnar --- kernel/kcsan/report.c | 110 ++++++++++++++++++++++++++++++++++++++---- lib/Kconfig.kcsan | 10 ++++ 2 files changed, 110 insertions(+), 10 deletions(-) diff --git a/kernel/kcsan/report.c b/kernel/kcsan/report.c index 9f503ca2ff7a3..b5b4feea49de5 100644 --- a/kernel/kcsan/report.c +++ b/kernel/kcsan/report.c @@ -1,5 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 +#include #include #include #include @@ -31,12 +32,99 @@ static struct { int num_stack_entries; } other_info = { .ptr = NULL }; +/* + * Information about reported data races; used to rate limit reporting. + */ +struct report_time { + /* + * The last time the data race was reported. + */ + unsigned long time; + + /* + * The frames of the 2 threads; if only 1 thread is known, one frame + * will be 0. + */ + unsigned long frame1; + unsigned long frame2; +}; + +/* + * Since we also want to be able to debug allocators with KCSAN, to avoid + * deadlock, report_times cannot be dynamically resized with krealloc in + * rate_limit_report. + * + * Therefore, we use a fixed-size array, which at most will occupy a page. This + * still adequately rate limits reports, assuming that a) number of unique data + * races is not excessive, and b) occurrence of unique data races within the + * same time window is limited. + */ +#define REPORT_TIMES_MAX (PAGE_SIZE / sizeof(struct report_time)) +#define REPORT_TIMES_SIZE \ + (CONFIG_KCSAN_REPORT_ONCE_IN_MS > REPORT_TIMES_MAX ? \ + REPORT_TIMES_MAX : \ + CONFIG_KCSAN_REPORT_ONCE_IN_MS) +static struct report_time report_times[REPORT_TIMES_SIZE]; + /* * This spinlock protects reporting and other_info, since other_info is usually * required when reporting. */ static DEFINE_SPINLOCK(report_lock); +/* + * Checks if the data race identified by thread frames frame1 and frame2 has + * been reported since (now - KCSAN_REPORT_ONCE_IN_MS). + */ +static bool rate_limit_report(unsigned long frame1, unsigned long frame2) +{ + struct report_time *use_entry = &report_times[0]; + unsigned long invalid_before; + int i; + + BUILD_BUG_ON(CONFIG_KCSAN_REPORT_ONCE_IN_MS != 0 && REPORT_TIMES_SIZE == 0); + + if (CONFIG_KCSAN_REPORT_ONCE_IN_MS == 0) + return false; + + invalid_before = jiffies - msecs_to_jiffies(CONFIG_KCSAN_REPORT_ONCE_IN_MS); + + /* Check if a matching data race report exists. */ + for (i = 0; i < REPORT_TIMES_SIZE; ++i) { + struct report_time *rt = &report_times[i]; + + /* + * Must always select an entry for use to store info as we + * cannot resize report_times; at the end of the scan, use_entry + * will be the oldest entry, which ideally also happened before + * KCSAN_REPORT_ONCE_IN_MS ago. + */ + if (time_before(rt->time, use_entry->time)) + use_entry = rt; + + /* + * Initially, no need to check any further as this entry as well + * as following entries have never been used. + */ + if (rt->time == 0) + break; + + /* Check if entry expired. */ + if (time_before(rt->time, invalid_before)) + continue; /* before KCSAN_REPORT_ONCE_IN_MS ago */ + + /* Reported recently, check if data race matches. */ + if ((rt->frame1 == frame1 && rt->frame2 == frame2) || + (rt->frame1 == frame2 && rt->frame2 == frame1)) + return true; + } + + use_entry->time = jiffies; + use_entry->frame1 = frame1; + use_entry->frame2 = frame2; + return false; +} + /* * Special rules to skip reporting. */ @@ -132,7 +220,9 @@ static bool print_report(const volatile void *ptr, size_t size, int access_type, unsigned long stack_entries[NUM_STACK_ENTRIES] = { 0 }; int num_stack_entries = stack_trace_save(stack_entries, NUM_STACK_ENTRIES, 1); int skipnr = get_stack_skipnr(stack_entries, num_stack_entries); - int other_skipnr; + unsigned long this_frame = stack_entries[skipnr]; + unsigned long other_frame = 0; + int other_skipnr = 0; /* silence uninit warnings */ /* * Must check report filter rules before starting to print. @@ -143,34 +233,34 @@ static bool print_report(const volatile void *ptr, size_t size, int access_type, if (type == KCSAN_REPORT_RACE_SIGNAL) { other_skipnr = get_stack_skipnr(other_info.stack_entries, other_info.num_stack_entries); + other_frame = other_info.stack_entries[other_skipnr]; /* @value_change is only known for the other thread */ - if (skip_report(other_info.access_type, value_change, - other_info.stack_entries[other_skipnr])) + if (skip_report(other_info.access_type, value_change, other_frame)) return false; } + if (rate_limit_report(this_frame, other_frame)) + return false; + /* Print report header. */ pr_err("==================================================================\n"); switch (type) { case KCSAN_REPORT_RACE_SIGNAL: { - void *this_fn = (void *)stack_entries[skipnr]; - void *other_fn = (void *)other_info.stack_entries[other_skipnr]; int cmp; /* * Order functions lexographically for consistent bug titles. * Do not print offset of functions to keep title short. */ - cmp = sym_strcmp(other_fn, this_fn); + cmp = sym_strcmp((void *)other_frame, (void *)this_frame); pr_err("BUG: KCSAN: data-race in %ps / %ps\n", - cmp < 0 ? other_fn : this_fn, - cmp < 0 ? this_fn : other_fn); + (void *)(cmp < 0 ? other_frame : this_frame), + (void *)(cmp < 0 ? this_frame : other_frame)); } break; case KCSAN_REPORT_RACE_UNKNOWN_ORIGIN: - pr_err("BUG: KCSAN: data-race in %pS\n", - (void *)stack_entries[skipnr]); + pr_err("BUG: KCSAN: data-race in %pS\n", (void *)this_frame); break; default: diff --git a/lib/Kconfig.kcsan b/lib/Kconfig.kcsan index 3f78b1434375d..3552990abcfe5 100644 --- a/lib/Kconfig.kcsan +++ b/lib/Kconfig.kcsan @@ -81,6 +81,16 @@ config KCSAN_SKIP_WATCH_RANDOMIZE KCSAN_WATCH_SKIP. If false, the chosen value is always KCSAN_WATCH_SKIP. +config KCSAN_REPORT_ONCE_IN_MS + int "Duration in milliseconds, in which any given data race is only reported once" + default 3000 + help + Any given data race is only reported once in the defined time window. + Different data races may still generate reports within a duration + that is smaller than the duration defined here. This allows rate + limiting reporting to avoid flooding the console with reports. + Setting this to 0 disables rate limiting. + # Note that, while some of the below options could be turned into boot # parameters, to optimize for the common use-case, we avoid this because: (a) # it would impact performance (and we want to avoid static branch for all -- GitLab From f1bc96210c6a6b853b4b2eec808141956e8fbc5d Mon Sep 17 00:00:00 2001 From: Marco Elver Date: Wed, 15 Jan 2020 17:25:12 +0100 Subject: [PATCH 0023/1971] kcsan: Make KCSAN compatible with lockdep We must avoid any recursion into lockdep if KCSAN is enabled on utilities used by lockdep. One manifestation of this is corruption of lockdep's IRQ trace state (if TRACE_IRQFLAGS), resulting in spurious warnings (see below). This commit fixes this by: 1. Using raw_local_irq{save,restore} in kcsan_setup_watchpoint(). 2. Disabling lockdep in kcsan_report(). Tested with: CONFIG_LOCKDEP=y CONFIG_DEBUG_LOCKDEP=y CONFIG_TRACE_IRQFLAGS=y This fix eliminates spurious warnings such as the following one: WARNING: CPU: 0 PID: 2 at kernel/locking/lockdep.c:4406 check_flags.part.0+0x101/0x220 Modules linked in: CPU: 0 PID: 2 Comm: kthreadd Not tainted 5.5.0-rc1+ #11 Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.12.0-1 04/01/2014 RIP: 0010:check_flags.part.0+0x101/0x220 Call Trace: lock_is_held_type+0x69/0x150 freezer_fork+0x20b/0x370 cgroup_post_fork+0x2c9/0x5c0 copy_process+0x2675/0x3b40 _do_fork+0xbe/0xa30 ? _raw_spin_unlock_irqrestore+0x40/0x50 ? match_held_lock+0x56/0x250 ? kthread_park+0xf0/0xf0 kernel_thread+0xa6/0xd0 ? kthread_park+0xf0/0xf0 kthreadd+0x321/0x3d0 ? kthread_create_on_cpu+0x130/0x130 ret_from_fork+0x3a/0x50 irq event stamp: 64 hardirqs last enabled at (63): [] _raw_spin_unlock_irqrestore+0x40/0x50 hardirqs last disabled at (64): [] kcsan_setup_watchpoint+0x92/0x460 softirqs last enabled at (32): [] fpu__copy+0xe8/0x470 softirqs last disabled at (30): [] fpu__copy+0x69/0x470 Reported-by: Qian Cai Signed-off-by: Marco Elver Acked-by: Alexander Potapenko Tested-by: Qian Cai Signed-off-by: Paul E. McKenney Signed-off-by: Ingo Molnar --- kernel/kcsan/core.c | 6 ++++-- kernel/kcsan/report.c | 11 +++++++++++ kernel/locking/Makefile | 3 +++ 3 files changed, 18 insertions(+), 2 deletions(-) diff --git a/kernel/kcsan/core.c b/kernel/kcsan/core.c index 87bf857c88935..64b30f7716a12 100644 --- a/kernel/kcsan/core.c +++ b/kernel/kcsan/core.c @@ -336,8 +336,10 @@ kcsan_setup_watchpoint(const volatile void *ptr, size_t size, int type) * CPU-local data accesses), it makes more sense (from a data race * detection point of view) to simply disable preemptions to ensure * as many tasks as possible run on other CPUs. + * + * Use raw versions, to avoid lockdep recursion via IRQ flags tracing. */ - local_irq_save(irq_flags); + raw_local_irq_save(irq_flags); watchpoint = insert_watchpoint((unsigned long)ptr, size, is_write); if (watchpoint == NULL) { @@ -429,7 +431,7 @@ kcsan_setup_watchpoint(const volatile void *ptr, size_t size, int type) kcsan_counter_dec(KCSAN_COUNTER_USED_WATCHPOINTS); out_unlock: - local_irq_restore(irq_flags); + raw_local_irq_restore(irq_flags); out: user_access_restore(ua_flags); } diff --git a/kernel/kcsan/report.c b/kernel/kcsan/report.c index b5b4feea49de5..33bdf8b229b53 100644 --- a/kernel/kcsan/report.c +++ b/kernel/kcsan/report.c @@ -2,6 +2,7 @@ #include #include +#include #include #include #include @@ -410,6 +411,14 @@ void kcsan_report(const volatile void *ptr, size_t size, int access_type, { unsigned long flags = 0; + /* + * With TRACE_IRQFLAGS, lockdep's IRQ trace state becomes corrupted if + * we do not turn off lockdep here; this could happen due to recursion + * into lockdep via KCSAN if we detect a data race in utilities used by + * lockdep. + */ + lockdep_off(); + kcsan_disable_current(); if (prepare_report(&flags, ptr, size, access_type, cpu_id, type)) { if (print_report(ptr, size, access_type, value_change, cpu_id, type) && panic_on_warn) @@ -418,4 +427,6 @@ void kcsan_report(const volatile void *ptr, size_t size, int access_type, release_report(&flags, type); } kcsan_enable_current(); + + lockdep_on(); } diff --git a/kernel/locking/Makefile b/kernel/locking/Makefile index 45452facff3b4..6d11cfb9b41f2 100644 --- a/kernel/locking/Makefile +++ b/kernel/locking/Makefile @@ -5,6 +5,9 @@ KCOV_INSTRUMENT := n obj-y += mutex.o semaphore.o rwsem.o percpu-rwsem.o +# Avoid recursion lockdep -> KCSAN -> ... -> lockdep. +KCSAN_SANITIZE_lockdep.o := n + ifdef CONFIG_FUNCTION_TRACER CFLAGS_REMOVE_lockdep.o = $(CC_FLAGS_FTRACE) CFLAGS_REMOVE_lockdep_proc.o = $(CC_FLAGS_FTRACE) -- GitLab From ad4f8eeca8eaa24afb6059c241a2f4baf86378f3 Mon Sep 17 00:00:00 2001 From: Marco Elver Date: Wed, 29 Jan 2020 16:01:02 +0100 Subject: [PATCH 0024/1971] kcsan: Address missing case with KCSAN_REPORT_VALUE_CHANGE_ONLY Even with KCSAN_REPORT_VALUE_CHANGE_ONLY, KCSAN still reports data races between reads and watchpointed writes, even if the writes wrote values already present. This commit causes KCSAN to unconditionally skip reporting in this case. Signed-off-by: Marco Elver Signed-off-by: Paul E. McKenney Signed-off-by: Ingo Molnar --- kernel/kcsan/report.c | 27 ++++++++++++++++++++------- 1 file changed, 20 insertions(+), 7 deletions(-) diff --git a/kernel/kcsan/report.c b/kernel/kcsan/report.c index 33bdf8b229b53..7cd34285df740 100644 --- a/kernel/kcsan/report.c +++ b/kernel/kcsan/report.c @@ -130,12 +130,25 @@ static bool rate_limit_report(unsigned long frame1, unsigned long frame2) * Special rules to skip reporting. */ static bool -skip_report(int access_type, bool value_change, unsigned long top_frame) +skip_report(bool value_change, unsigned long top_frame) { - const bool is_write = (access_type & KCSAN_ACCESS_WRITE) != 0; - - if (IS_ENABLED(CONFIG_KCSAN_REPORT_VALUE_CHANGE_ONLY) && is_write && - !value_change) { + /* + * The first call to skip_report always has value_change==true, since we + * cannot know the value written of an instrumented access. For the 2nd + * call there are 6 cases with CONFIG_KCSAN_REPORT_VALUE_CHANGE_ONLY: + * + * 1. read watchpoint, conflicting write (value_change==true): report; + * 2. read watchpoint, conflicting write (value_change==false): skip; + * 3. write watchpoint, conflicting write (value_change==true): report; + * 4. write watchpoint, conflicting write (value_change==false): skip; + * 5. write watchpoint, conflicting read (value_change==false): skip; + * 6. write watchpoint, conflicting read (value_change==true): impossible; + * + * Cases 1-4 are intuitive and expected; case 5 ensures we do not report + * data races where the write may have rewritten the same value; and + * case 6 is simply impossible. + */ + if (IS_ENABLED(CONFIG_KCSAN_REPORT_VALUE_CHANGE_ONLY) && !value_change) { /* * The access is a write, but the data value did not change. * @@ -228,7 +241,7 @@ static bool print_report(const volatile void *ptr, size_t size, int access_type, /* * Must check report filter rules before starting to print. */ - if (skip_report(access_type, true, stack_entries[skipnr])) + if (skip_report(true, stack_entries[skipnr])) return false; if (type == KCSAN_REPORT_RACE_SIGNAL) { @@ -237,7 +250,7 @@ static bool print_report(const volatile void *ptr, size_t size, int access_type, other_frame = other_info.stack_entries[other_skipnr]; /* @value_change is only known for the other thread */ - if (skip_report(other_info.access_type, value_change, other_frame)) + if (skip_report(value_change, other_frame)) return false; } -- GitLab From 36e4d4dd4fc4f1d99e7966a460a2b12ce438abc2 Mon Sep 17 00:00:00 2001 From: Marco Elver Date: Tue, 21 Jan 2020 17:05:08 +0100 Subject: [PATCH 0025/1971] include/linux: Add instrumented.h infrastructure This adds instrumented.h, which provides generic wrappers for memory access instrumentation that the compiler cannot emit for various sanitizers. Currently this unifies KASAN and KCSAN instrumentation. In future this will also include KMSAN instrumentation. Note that, copy_{to,from}_user should use special instrumentation, since we should be able to instrument both source and destination memory accesses if both are kernel memory. The current patch only instruments the memory access where the address is always in kernel space, however, both may in fact be kernel addresses when a compat syscall passes an argument allocated in the kernel to a real syscall. In a future change, both KASAN and KCSAN should check both addresses in such cases, as well as KMSAN will make use of both addresses. [It made more sense to provide the completed function signature, rather than updating it and changing all locations again at a later time.] Suggested-by: Arnd Bergmann Signed-off-by: Marco Elver Acked-by: Alexander Potapenko Reviewed-by: Dmitry Vyukov Signed-off-by: Paul E. McKenney Signed-off-by: Ingo Molnar --- include/linux/instrumented.h | 109 +++++++++++++++++++++++++++++++++++ 1 file changed, 109 insertions(+) create mode 100644 include/linux/instrumented.h diff --git a/include/linux/instrumented.h b/include/linux/instrumented.h new file mode 100644 index 0000000000000..43e6ea5919759 --- /dev/null +++ b/include/linux/instrumented.h @@ -0,0 +1,109 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +/* + * This header provides generic wrappers for memory access instrumentation that + * the compiler cannot emit for: KASAN, KCSAN. + */ +#ifndef _LINUX_INSTRUMENTED_H +#define _LINUX_INSTRUMENTED_H + +#include +#include +#include +#include + +/** + * instrument_read - instrument regular read access + * + * Instrument a regular read access. The instrumentation should be inserted + * before the actual read happens. + * + * @ptr address of access + * @size size of access + */ +static __always_inline void instrument_read(const volatile void *v, size_t size) +{ + kasan_check_read(v, size); + kcsan_check_read(v, size); +} + +/** + * instrument_write - instrument regular write access + * + * Instrument a regular write access. The instrumentation should be inserted + * before the actual write happens. + * + * @ptr address of access + * @size size of access + */ +static __always_inline void instrument_write(const volatile void *v, size_t size) +{ + kasan_check_write(v, size); + kcsan_check_write(v, size); +} + +/** + * instrument_atomic_read - instrument atomic read access + * + * Instrument an atomic read access. The instrumentation should be inserted + * before the actual read happens. + * + * @ptr address of access + * @size size of access + */ +static __always_inline void instrument_atomic_read(const volatile void *v, size_t size) +{ + kasan_check_read(v, size); + kcsan_check_atomic_read(v, size); +} + +/** + * instrument_atomic_write - instrument atomic write access + * + * Instrument an atomic write access. The instrumentation should be inserted + * before the actual write happens. + * + * @ptr address of access + * @size size of access + */ +static __always_inline void instrument_atomic_write(const volatile void *v, size_t size) +{ + kasan_check_write(v, size); + kcsan_check_atomic_write(v, size); +} + +/** + * instrument_copy_to_user - instrument reads of copy_to_user + * + * Instrument reads from kernel memory, that are due to copy_to_user (and + * variants). The instrumentation must be inserted before the accesses. + * + * @to destination address + * @from source address + * @n number of bytes to copy + */ +static __always_inline void +instrument_copy_to_user(void __user *to, const void *from, unsigned long n) +{ + kasan_check_read(from, n); + kcsan_check_read(from, n); +} + +/** + * instrument_copy_from_user - instrument writes of copy_from_user + * + * Instrument writes to kernel memory, that are due to copy_from_user (and + * variants). The instrumentation should be inserted before the accesses. + * + * @to destination address + * @from source address + * @n number of bytes to copy + */ +static __always_inline void +instrument_copy_from_user(const void *to, const void __user *from, unsigned long n) +{ + kasan_check_write(to, n); + kcsan_check_write(to, n); +} + +#endif /* _LINUX_INSTRUMENTED_H */ -- GitLab From ed8af2e4d2a71bd58f5776b7e5a477d136e32be4 Mon Sep 17 00:00:00 2001 From: Marco Elver Date: Tue, 21 Jan 2020 17:05:09 +0100 Subject: [PATCH 0026/1971] asm-generic, atomic-instrumented: Use generic instrumented.h This switches atomic-instrumented.h to use the generic instrumentation wrappers provided by instrumented.h. No functional change intended. Suggested-by: Arnd Bergmann Signed-off-by: Marco Elver Acked-by: Arnd Bergmann Signed-off-by: Paul E. McKenney Signed-off-by: Ingo Molnar --- include/asm-generic/atomic-instrumented.h | 395 +++++++++++----------- scripts/atomic/gen-atomic-instrumented.sh | 19 +- 2 files changed, 194 insertions(+), 220 deletions(-) diff --git a/include/asm-generic/atomic-instrumented.h b/include/asm-generic/atomic-instrumented.h index 63869ded73ac1..379986e40159f 100644 --- a/include/asm-generic/atomic-instrumented.h +++ b/include/asm-generic/atomic-instrumented.h @@ -19,25 +19,12 @@ #include #include -#include -#include - -static __always_inline void __atomic_check_read(const volatile void *v, size_t size) -{ - kasan_check_read(v, size); - kcsan_check_atomic_read(v, size); -} - -static __always_inline void __atomic_check_write(const volatile void *v, size_t size) -{ - kasan_check_write(v, size); - kcsan_check_atomic_write(v, size); -} +#include static __always_inline int atomic_read(const atomic_t *v) { - __atomic_check_read(v, sizeof(*v)); + instrument_atomic_read(v, sizeof(*v)); return arch_atomic_read(v); } #define atomic_read atomic_read @@ -46,7 +33,7 @@ atomic_read(const atomic_t *v) static __always_inline int atomic_read_acquire(const atomic_t *v) { - __atomic_check_read(v, sizeof(*v)); + instrument_atomic_read(v, sizeof(*v)); return arch_atomic_read_acquire(v); } #define atomic_read_acquire atomic_read_acquire @@ -55,7 +42,7 @@ atomic_read_acquire(const atomic_t *v) static __always_inline void atomic_set(atomic_t *v, int i) { - __atomic_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); arch_atomic_set(v, i); } #define atomic_set atomic_set @@ -64,7 +51,7 @@ atomic_set(atomic_t *v, int i) static __always_inline void atomic_set_release(atomic_t *v, int i) { - __atomic_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); arch_atomic_set_release(v, i); } #define atomic_set_release atomic_set_release @@ -73,7 +60,7 @@ atomic_set_release(atomic_t *v, int i) static __always_inline void atomic_add(int i, atomic_t *v) { - __atomic_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); arch_atomic_add(i, v); } #define atomic_add atomic_add @@ -82,7 +69,7 @@ atomic_add(int i, atomic_t *v) static __always_inline int atomic_add_return(int i, atomic_t *v) { - __atomic_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic_add_return(i, v); } #define atomic_add_return atomic_add_return @@ -92,7 +79,7 @@ atomic_add_return(int i, atomic_t *v) static __always_inline int atomic_add_return_acquire(int i, atomic_t *v) { - __atomic_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic_add_return_acquire(i, v); } #define atomic_add_return_acquire atomic_add_return_acquire @@ -102,7 +89,7 @@ atomic_add_return_acquire(int i, atomic_t *v) static __always_inline int atomic_add_return_release(int i, atomic_t *v) { - __atomic_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic_add_return_release(i, v); } #define atomic_add_return_release atomic_add_return_release @@ -112,7 +99,7 @@ atomic_add_return_release(int i, atomic_t *v) static __always_inline int atomic_add_return_relaxed(int i, atomic_t *v) { - __atomic_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic_add_return_relaxed(i, v); } #define atomic_add_return_relaxed atomic_add_return_relaxed @@ -122,7 +109,7 @@ atomic_add_return_relaxed(int i, atomic_t *v) static __always_inline int atomic_fetch_add(int i, atomic_t *v) { - __atomic_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic_fetch_add(i, v); } #define atomic_fetch_add atomic_fetch_add @@ -132,7 +119,7 @@ atomic_fetch_add(int i, atomic_t *v) static __always_inline int atomic_fetch_add_acquire(int i, atomic_t *v) { - __atomic_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic_fetch_add_acquire(i, v); } #define atomic_fetch_add_acquire atomic_fetch_add_acquire @@ -142,7 +129,7 @@ atomic_fetch_add_acquire(int i, atomic_t *v) static __always_inline int atomic_fetch_add_release(int i, atomic_t *v) { - __atomic_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic_fetch_add_release(i, v); } #define atomic_fetch_add_release atomic_fetch_add_release @@ -152,7 +139,7 @@ atomic_fetch_add_release(int i, atomic_t *v) static __always_inline int atomic_fetch_add_relaxed(int i, atomic_t *v) { - __atomic_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic_fetch_add_relaxed(i, v); } #define atomic_fetch_add_relaxed atomic_fetch_add_relaxed @@ -161,7 +148,7 @@ atomic_fetch_add_relaxed(int i, atomic_t *v) static __always_inline void atomic_sub(int i, atomic_t *v) { - __atomic_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); arch_atomic_sub(i, v); } #define atomic_sub atomic_sub @@ -170,7 +157,7 @@ atomic_sub(int i, atomic_t *v) static __always_inline int atomic_sub_return(int i, atomic_t *v) { - __atomic_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic_sub_return(i, v); } #define atomic_sub_return atomic_sub_return @@ -180,7 +167,7 @@ atomic_sub_return(int i, atomic_t *v) static __always_inline int atomic_sub_return_acquire(int i, atomic_t *v) { - __atomic_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic_sub_return_acquire(i, v); } #define atomic_sub_return_acquire atomic_sub_return_acquire @@ -190,7 +177,7 @@ atomic_sub_return_acquire(int i, atomic_t *v) static __always_inline int atomic_sub_return_release(int i, atomic_t *v) { - __atomic_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic_sub_return_release(i, v); } #define atomic_sub_return_release atomic_sub_return_release @@ -200,7 +187,7 @@ atomic_sub_return_release(int i, atomic_t *v) static __always_inline int atomic_sub_return_relaxed(int i, atomic_t *v) { - __atomic_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic_sub_return_relaxed(i, v); } #define atomic_sub_return_relaxed atomic_sub_return_relaxed @@ -210,7 +197,7 @@ atomic_sub_return_relaxed(int i, atomic_t *v) static __always_inline int atomic_fetch_sub(int i, atomic_t *v) { - __atomic_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic_fetch_sub(i, v); } #define atomic_fetch_sub atomic_fetch_sub @@ -220,7 +207,7 @@ atomic_fetch_sub(int i, atomic_t *v) static __always_inline int atomic_fetch_sub_acquire(int i, atomic_t *v) { - __atomic_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic_fetch_sub_acquire(i, v); } #define atomic_fetch_sub_acquire atomic_fetch_sub_acquire @@ -230,7 +217,7 @@ atomic_fetch_sub_acquire(int i, atomic_t *v) static __always_inline int atomic_fetch_sub_release(int i, atomic_t *v) { - __atomic_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic_fetch_sub_release(i, v); } #define atomic_fetch_sub_release atomic_fetch_sub_release @@ -240,7 +227,7 @@ atomic_fetch_sub_release(int i, atomic_t *v) static __always_inline int atomic_fetch_sub_relaxed(int i, atomic_t *v) { - __atomic_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic_fetch_sub_relaxed(i, v); } #define atomic_fetch_sub_relaxed atomic_fetch_sub_relaxed @@ -250,7 +237,7 @@ atomic_fetch_sub_relaxed(int i, atomic_t *v) static __always_inline void atomic_inc(atomic_t *v) { - __atomic_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); arch_atomic_inc(v); } #define atomic_inc atomic_inc @@ -260,7 +247,7 @@ atomic_inc(atomic_t *v) static __always_inline int atomic_inc_return(atomic_t *v) { - __atomic_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic_inc_return(v); } #define atomic_inc_return atomic_inc_return @@ -270,7 +257,7 @@ atomic_inc_return(atomic_t *v) static __always_inline int atomic_inc_return_acquire(atomic_t *v) { - __atomic_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic_inc_return_acquire(v); } #define atomic_inc_return_acquire atomic_inc_return_acquire @@ -280,7 +267,7 @@ atomic_inc_return_acquire(atomic_t *v) static __always_inline int atomic_inc_return_release(atomic_t *v) { - __atomic_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic_inc_return_release(v); } #define atomic_inc_return_release atomic_inc_return_release @@ -290,7 +277,7 @@ atomic_inc_return_release(atomic_t *v) static __always_inline int atomic_inc_return_relaxed(atomic_t *v) { - __atomic_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic_inc_return_relaxed(v); } #define atomic_inc_return_relaxed atomic_inc_return_relaxed @@ -300,7 +287,7 @@ atomic_inc_return_relaxed(atomic_t *v) static __always_inline int atomic_fetch_inc(atomic_t *v) { - __atomic_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic_fetch_inc(v); } #define atomic_fetch_inc atomic_fetch_inc @@ -310,7 +297,7 @@ atomic_fetch_inc(atomic_t *v) static __always_inline int atomic_fetch_inc_acquire(atomic_t *v) { - __atomic_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic_fetch_inc_acquire(v); } #define atomic_fetch_inc_acquire atomic_fetch_inc_acquire @@ -320,7 +307,7 @@ atomic_fetch_inc_acquire(atomic_t *v) static __always_inline int atomic_fetch_inc_release(atomic_t *v) { - __atomic_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic_fetch_inc_release(v); } #define atomic_fetch_inc_release atomic_fetch_inc_release @@ -330,7 +317,7 @@ atomic_fetch_inc_release(atomic_t *v) static __always_inline int atomic_fetch_inc_relaxed(atomic_t *v) { - __atomic_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic_fetch_inc_relaxed(v); } #define atomic_fetch_inc_relaxed atomic_fetch_inc_relaxed @@ -340,7 +327,7 @@ atomic_fetch_inc_relaxed(atomic_t *v) static __always_inline void atomic_dec(atomic_t *v) { - __atomic_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); arch_atomic_dec(v); } #define atomic_dec atomic_dec @@ -350,7 +337,7 @@ atomic_dec(atomic_t *v) static __always_inline int atomic_dec_return(atomic_t *v) { - __atomic_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic_dec_return(v); } #define atomic_dec_return atomic_dec_return @@ -360,7 +347,7 @@ atomic_dec_return(atomic_t *v) static __always_inline int atomic_dec_return_acquire(atomic_t *v) { - __atomic_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic_dec_return_acquire(v); } #define atomic_dec_return_acquire atomic_dec_return_acquire @@ -370,7 +357,7 @@ atomic_dec_return_acquire(atomic_t *v) static __always_inline int atomic_dec_return_release(atomic_t *v) { - __atomic_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic_dec_return_release(v); } #define atomic_dec_return_release atomic_dec_return_release @@ -380,7 +367,7 @@ atomic_dec_return_release(atomic_t *v) static __always_inline int atomic_dec_return_relaxed(atomic_t *v) { - __atomic_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic_dec_return_relaxed(v); } #define atomic_dec_return_relaxed atomic_dec_return_relaxed @@ -390,7 +377,7 @@ atomic_dec_return_relaxed(atomic_t *v) static __always_inline int atomic_fetch_dec(atomic_t *v) { - __atomic_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic_fetch_dec(v); } #define atomic_fetch_dec atomic_fetch_dec @@ -400,7 +387,7 @@ atomic_fetch_dec(atomic_t *v) static __always_inline int atomic_fetch_dec_acquire(atomic_t *v) { - __atomic_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic_fetch_dec_acquire(v); } #define atomic_fetch_dec_acquire atomic_fetch_dec_acquire @@ -410,7 +397,7 @@ atomic_fetch_dec_acquire(atomic_t *v) static __always_inline int atomic_fetch_dec_release(atomic_t *v) { - __atomic_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic_fetch_dec_release(v); } #define atomic_fetch_dec_release atomic_fetch_dec_release @@ -420,7 +407,7 @@ atomic_fetch_dec_release(atomic_t *v) static __always_inline int atomic_fetch_dec_relaxed(atomic_t *v) { - __atomic_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic_fetch_dec_relaxed(v); } #define atomic_fetch_dec_relaxed atomic_fetch_dec_relaxed @@ -429,7 +416,7 @@ atomic_fetch_dec_relaxed(atomic_t *v) static __always_inline void atomic_and(int i, atomic_t *v) { - __atomic_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); arch_atomic_and(i, v); } #define atomic_and atomic_and @@ -438,7 +425,7 @@ atomic_and(int i, atomic_t *v) static __always_inline int atomic_fetch_and(int i, atomic_t *v) { - __atomic_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic_fetch_and(i, v); } #define atomic_fetch_and atomic_fetch_and @@ -448,7 +435,7 @@ atomic_fetch_and(int i, atomic_t *v) static __always_inline int atomic_fetch_and_acquire(int i, atomic_t *v) { - __atomic_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic_fetch_and_acquire(i, v); } #define atomic_fetch_and_acquire atomic_fetch_and_acquire @@ -458,7 +445,7 @@ atomic_fetch_and_acquire(int i, atomic_t *v) static __always_inline int atomic_fetch_and_release(int i, atomic_t *v) { - __atomic_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic_fetch_and_release(i, v); } #define atomic_fetch_and_release atomic_fetch_and_release @@ -468,7 +455,7 @@ atomic_fetch_and_release(int i, atomic_t *v) static __always_inline int atomic_fetch_and_relaxed(int i, atomic_t *v) { - __atomic_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic_fetch_and_relaxed(i, v); } #define atomic_fetch_and_relaxed atomic_fetch_and_relaxed @@ -478,7 +465,7 @@ atomic_fetch_and_relaxed(int i, atomic_t *v) static __always_inline void atomic_andnot(int i, atomic_t *v) { - __atomic_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); arch_atomic_andnot(i, v); } #define atomic_andnot atomic_andnot @@ -488,7 +475,7 @@ atomic_andnot(int i, atomic_t *v) static __always_inline int atomic_fetch_andnot(int i, atomic_t *v) { - __atomic_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic_fetch_andnot(i, v); } #define atomic_fetch_andnot atomic_fetch_andnot @@ -498,7 +485,7 @@ atomic_fetch_andnot(int i, atomic_t *v) static __always_inline int atomic_fetch_andnot_acquire(int i, atomic_t *v) { - __atomic_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic_fetch_andnot_acquire(i, v); } #define atomic_fetch_andnot_acquire atomic_fetch_andnot_acquire @@ -508,7 +495,7 @@ atomic_fetch_andnot_acquire(int i, atomic_t *v) static __always_inline int atomic_fetch_andnot_release(int i, atomic_t *v) { - __atomic_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic_fetch_andnot_release(i, v); } #define atomic_fetch_andnot_release atomic_fetch_andnot_release @@ -518,7 +505,7 @@ atomic_fetch_andnot_release(int i, atomic_t *v) static __always_inline int atomic_fetch_andnot_relaxed(int i, atomic_t *v) { - __atomic_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic_fetch_andnot_relaxed(i, v); } #define atomic_fetch_andnot_relaxed atomic_fetch_andnot_relaxed @@ -527,7 +514,7 @@ atomic_fetch_andnot_relaxed(int i, atomic_t *v) static __always_inline void atomic_or(int i, atomic_t *v) { - __atomic_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); arch_atomic_or(i, v); } #define atomic_or atomic_or @@ -536,7 +523,7 @@ atomic_or(int i, atomic_t *v) static __always_inline int atomic_fetch_or(int i, atomic_t *v) { - __atomic_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic_fetch_or(i, v); } #define atomic_fetch_or atomic_fetch_or @@ -546,7 +533,7 @@ atomic_fetch_or(int i, atomic_t *v) static __always_inline int atomic_fetch_or_acquire(int i, atomic_t *v) { - __atomic_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic_fetch_or_acquire(i, v); } #define atomic_fetch_or_acquire atomic_fetch_or_acquire @@ -556,7 +543,7 @@ atomic_fetch_or_acquire(int i, atomic_t *v) static __always_inline int atomic_fetch_or_release(int i, atomic_t *v) { - __atomic_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic_fetch_or_release(i, v); } #define atomic_fetch_or_release atomic_fetch_or_release @@ -566,7 +553,7 @@ atomic_fetch_or_release(int i, atomic_t *v) static __always_inline int atomic_fetch_or_relaxed(int i, atomic_t *v) { - __atomic_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic_fetch_or_relaxed(i, v); } #define atomic_fetch_or_relaxed atomic_fetch_or_relaxed @@ -575,7 +562,7 @@ atomic_fetch_or_relaxed(int i, atomic_t *v) static __always_inline void atomic_xor(int i, atomic_t *v) { - __atomic_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); arch_atomic_xor(i, v); } #define atomic_xor atomic_xor @@ -584,7 +571,7 @@ atomic_xor(int i, atomic_t *v) static __always_inline int atomic_fetch_xor(int i, atomic_t *v) { - __atomic_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic_fetch_xor(i, v); } #define atomic_fetch_xor atomic_fetch_xor @@ -594,7 +581,7 @@ atomic_fetch_xor(int i, atomic_t *v) static __always_inline int atomic_fetch_xor_acquire(int i, atomic_t *v) { - __atomic_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic_fetch_xor_acquire(i, v); } #define atomic_fetch_xor_acquire atomic_fetch_xor_acquire @@ -604,7 +591,7 @@ atomic_fetch_xor_acquire(int i, atomic_t *v) static __always_inline int atomic_fetch_xor_release(int i, atomic_t *v) { - __atomic_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic_fetch_xor_release(i, v); } #define atomic_fetch_xor_release atomic_fetch_xor_release @@ -614,7 +601,7 @@ atomic_fetch_xor_release(int i, atomic_t *v) static __always_inline int atomic_fetch_xor_relaxed(int i, atomic_t *v) { - __atomic_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic_fetch_xor_relaxed(i, v); } #define atomic_fetch_xor_relaxed atomic_fetch_xor_relaxed @@ -624,7 +611,7 @@ atomic_fetch_xor_relaxed(int i, atomic_t *v) static __always_inline int atomic_xchg(atomic_t *v, int i) { - __atomic_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic_xchg(v, i); } #define atomic_xchg atomic_xchg @@ -634,7 +621,7 @@ atomic_xchg(atomic_t *v, int i) static __always_inline int atomic_xchg_acquire(atomic_t *v, int i) { - __atomic_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic_xchg_acquire(v, i); } #define atomic_xchg_acquire atomic_xchg_acquire @@ -644,7 +631,7 @@ atomic_xchg_acquire(atomic_t *v, int i) static __always_inline int atomic_xchg_release(atomic_t *v, int i) { - __atomic_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic_xchg_release(v, i); } #define atomic_xchg_release atomic_xchg_release @@ -654,7 +641,7 @@ atomic_xchg_release(atomic_t *v, int i) static __always_inline int atomic_xchg_relaxed(atomic_t *v, int i) { - __atomic_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic_xchg_relaxed(v, i); } #define atomic_xchg_relaxed atomic_xchg_relaxed @@ -664,7 +651,7 @@ atomic_xchg_relaxed(atomic_t *v, int i) static __always_inline int atomic_cmpxchg(atomic_t *v, int old, int new) { - __atomic_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic_cmpxchg(v, old, new); } #define atomic_cmpxchg atomic_cmpxchg @@ -674,7 +661,7 @@ atomic_cmpxchg(atomic_t *v, int old, int new) static __always_inline int atomic_cmpxchg_acquire(atomic_t *v, int old, int new) { - __atomic_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic_cmpxchg_acquire(v, old, new); } #define atomic_cmpxchg_acquire atomic_cmpxchg_acquire @@ -684,7 +671,7 @@ atomic_cmpxchg_acquire(atomic_t *v, int old, int new) static __always_inline int atomic_cmpxchg_release(atomic_t *v, int old, int new) { - __atomic_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic_cmpxchg_release(v, old, new); } #define atomic_cmpxchg_release atomic_cmpxchg_release @@ -694,7 +681,7 @@ atomic_cmpxchg_release(atomic_t *v, int old, int new) static __always_inline int atomic_cmpxchg_relaxed(atomic_t *v, int old, int new) { - __atomic_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic_cmpxchg_relaxed(v, old, new); } #define atomic_cmpxchg_relaxed atomic_cmpxchg_relaxed @@ -704,8 +691,8 @@ atomic_cmpxchg_relaxed(atomic_t *v, int old, int new) static __always_inline bool atomic_try_cmpxchg(atomic_t *v, int *old, int new) { - __atomic_check_write(v, sizeof(*v)); - __atomic_check_write(old, sizeof(*old)); + instrument_atomic_write(v, sizeof(*v)); + instrument_atomic_write(old, sizeof(*old)); return arch_atomic_try_cmpxchg(v, old, new); } #define atomic_try_cmpxchg atomic_try_cmpxchg @@ -715,8 +702,8 @@ atomic_try_cmpxchg(atomic_t *v, int *old, int new) static __always_inline bool atomic_try_cmpxchg_acquire(atomic_t *v, int *old, int new) { - __atomic_check_write(v, sizeof(*v)); - __atomic_check_write(old, sizeof(*old)); + instrument_atomic_write(v, sizeof(*v)); + instrument_atomic_write(old, sizeof(*old)); return arch_atomic_try_cmpxchg_acquire(v, old, new); } #define atomic_try_cmpxchg_acquire atomic_try_cmpxchg_acquire @@ -726,8 +713,8 @@ atomic_try_cmpxchg_acquire(atomic_t *v, int *old, int new) static __always_inline bool atomic_try_cmpxchg_release(atomic_t *v, int *old, int new) { - __atomic_check_write(v, sizeof(*v)); - __atomic_check_write(old, sizeof(*old)); + instrument_atomic_write(v, sizeof(*v)); + instrument_atomic_write(old, sizeof(*old)); return arch_atomic_try_cmpxchg_release(v, old, new); } #define atomic_try_cmpxchg_release atomic_try_cmpxchg_release @@ -737,8 +724,8 @@ atomic_try_cmpxchg_release(atomic_t *v, int *old, int new) static __always_inline bool atomic_try_cmpxchg_relaxed(atomic_t *v, int *old, int new) { - __atomic_check_write(v, sizeof(*v)); - __atomic_check_write(old, sizeof(*old)); + instrument_atomic_write(v, sizeof(*v)); + instrument_atomic_write(old, sizeof(*old)); return arch_atomic_try_cmpxchg_relaxed(v, old, new); } #define atomic_try_cmpxchg_relaxed atomic_try_cmpxchg_relaxed @@ -748,7 +735,7 @@ atomic_try_cmpxchg_relaxed(atomic_t *v, int *old, int new) static __always_inline bool atomic_sub_and_test(int i, atomic_t *v) { - __atomic_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic_sub_and_test(i, v); } #define atomic_sub_and_test atomic_sub_and_test @@ -758,7 +745,7 @@ atomic_sub_and_test(int i, atomic_t *v) static __always_inline bool atomic_dec_and_test(atomic_t *v) { - __atomic_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic_dec_and_test(v); } #define atomic_dec_and_test atomic_dec_and_test @@ -768,7 +755,7 @@ atomic_dec_and_test(atomic_t *v) static __always_inline bool atomic_inc_and_test(atomic_t *v) { - __atomic_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic_inc_and_test(v); } #define atomic_inc_and_test atomic_inc_and_test @@ -778,7 +765,7 @@ atomic_inc_and_test(atomic_t *v) static __always_inline bool atomic_add_negative(int i, atomic_t *v) { - __atomic_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic_add_negative(i, v); } #define atomic_add_negative atomic_add_negative @@ -788,7 +775,7 @@ atomic_add_negative(int i, atomic_t *v) static __always_inline int atomic_fetch_add_unless(atomic_t *v, int a, int u) { - __atomic_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic_fetch_add_unless(v, a, u); } #define atomic_fetch_add_unless atomic_fetch_add_unless @@ -798,7 +785,7 @@ atomic_fetch_add_unless(atomic_t *v, int a, int u) static __always_inline bool atomic_add_unless(atomic_t *v, int a, int u) { - __atomic_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic_add_unless(v, a, u); } #define atomic_add_unless atomic_add_unless @@ -808,7 +795,7 @@ atomic_add_unless(atomic_t *v, int a, int u) static __always_inline bool atomic_inc_not_zero(atomic_t *v) { - __atomic_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic_inc_not_zero(v); } #define atomic_inc_not_zero atomic_inc_not_zero @@ -818,7 +805,7 @@ atomic_inc_not_zero(atomic_t *v) static __always_inline bool atomic_inc_unless_negative(atomic_t *v) { - __atomic_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic_inc_unless_negative(v); } #define atomic_inc_unless_negative atomic_inc_unless_negative @@ -828,7 +815,7 @@ atomic_inc_unless_negative(atomic_t *v) static __always_inline bool atomic_dec_unless_positive(atomic_t *v) { - __atomic_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic_dec_unless_positive(v); } #define atomic_dec_unless_positive atomic_dec_unless_positive @@ -838,7 +825,7 @@ atomic_dec_unless_positive(atomic_t *v) static __always_inline int atomic_dec_if_positive(atomic_t *v) { - __atomic_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic_dec_if_positive(v); } #define atomic_dec_if_positive atomic_dec_if_positive @@ -847,7 +834,7 @@ atomic_dec_if_positive(atomic_t *v) static __always_inline s64 atomic64_read(const atomic64_t *v) { - __atomic_check_read(v, sizeof(*v)); + instrument_atomic_read(v, sizeof(*v)); return arch_atomic64_read(v); } #define atomic64_read atomic64_read @@ -856,7 +843,7 @@ atomic64_read(const atomic64_t *v) static __always_inline s64 atomic64_read_acquire(const atomic64_t *v) { - __atomic_check_read(v, sizeof(*v)); + instrument_atomic_read(v, sizeof(*v)); return arch_atomic64_read_acquire(v); } #define atomic64_read_acquire atomic64_read_acquire @@ -865,7 +852,7 @@ atomic64_read_acquire(const atomic64_t *v) static __always_inline void atomic64_set(atomic64_t *v, s64 i) { - __atomic_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); arch_atomic64_set(v, i); } #define atomic64_set atomic64_set @@ -874,7 +861,7 @@ atomic64_set(atomic64_t *v, s64 i) static __always_inline void atomic64_set_release(atomic64_t *v, s64 i) { - __atomic_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); arch_atomic64_set_release(v, i); } #define atomic64_set_release atomic64_set_release @@ -883,7 +870,7 @@ atomic64_set_release(atomic64_t *v, s64 i) static __always_inline void atomic64_add(s64 i, atomic64_t *v) { - __atomic_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); arch_atomic64_add(i, v); } #define atomic64_add atomic64_add @@ -892,7 +879,7 @@ atomic64_add(s64 i, atomic64_t *v) static __always_inline s64 atomic64_add_return(s64 i, atomic64_t *v) { - __atomic_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic64_add_return(i, v); } #define atomic64_add_return atomic64_add_return @@ -902,7 +889,7 @@ atomic64_add_return(s64 i, atomic64_t *v) static __always_inline s64 atomic64_add_return_acquire(s64 i, atomic64_t *v) { - __atomic_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic64_add_return_acquire(i, v); } #define atomic64_add_return_acquire atomic64_add_return_acquire @@ -912,7 +899,7 @@ atomic64_add_return_acquire(s64 i, atomic64_t *v) static __always_inline s64 atomic64_add_return_release(s64 i, atomic64_t *v) { - __atomic_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic64_add_return_release(i, v); } #define atomic64_add_return_release atomic64_add_return_release @@ -922,7 +909,7 @@ atomic64_add_return_release(s64 i, atomic64_t *v) static __always_inline s64 atomic64_add_return_relaxed(s64 i, atomic64_t *v) { - __atomic_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic64_add_return_relaxed(i, v); } #define atomic64_add_return_relaxed atomic64_add_return_relaxed @@ -932,7 +919,7 @@ atomic64_add_return_relaxed(s64 i, atomic64_t *v) static __always_inline s64 atomic64_fetch_add(s64 i, atomic64_t *v) { - __atomic_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic64_fetch_add(i, v); } #define atomic64_fetch_add atomic64_fetch_add @@ -942,7 +929,7 @@ atomic64_fetch_add(s64 i, atomic64_t *v) static __always_inline s64 atomic64_fetch_add_acquire(s64 i, atomic64_t *v) { - __atomic_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic64_fetch_add_acquire(i, v); } #define atomic64_fetch_add_acquire atomic64_fetch_add_acquire @@ -952,7 +939,7 @@ atomic64_fetch_add_acquire(s64 i, atomic64_t *v) static __always_inline s64 atomic64_fetch_add_release(s64 i, atomic64_t *v) { - __atomic_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic64_fetch_add_release(i, v); } #define atomic64_fetch_add_release atomic64_fetch_add_release @@ -962,7 +949,7 @@ atomic64_fetch_add_release(s64 i, atomic64_t *v) static __always_inline s64 atomic64_fetch_add_relaxed(s64 i, atomic64_t *v) { - __atomic_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic64_fetch_add_relaxed(i, v); } #define atomic64_fetch_add_relaxed atomic64_fetch_add_relaxed @@ -971,7 +958,7 @@ atomic64_fetch_add_relaxed(s64 i, atomic64_t *v) static __always_inline void atomic64_sub(s64 i, atomic64_t *v) { - __atomic_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); arch_atomic64_sub(i, v); } #define atomic64_sub atomic64_sub @@ -980,7 +967,7 @@ atomic64_sub(s64 i, atomic64_t *v) static __always_inline s64 atomic64_sub_return(s64 i, atomic64_t *v) { - __atomic_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic64_sub_return(i, v); } #define atomic64_sub_return atomic64_sub_return @@ -990,7 +977,7 @@ atomic64_sub_return(s64 i, atomic64_t *v) static __always_inline s64 atomic64_sub_return_acquire(s64 i, atomic64_t *v) { - __atomic_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic64_sub_return_acquire(i, v); } #define atomic64_sub_return_acquire atomic64_sub_return_acquire @@ -1000,7 +987,7 @@ atomic64_sub_return_acquire(s64 i, atomic64_t *v) static __always_inline s64 atomic64_sub_return_release(s64 i, atomic64_t *v) { - __atomic_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic64_sub_return_release(i, v); } #define atomic64_sub_return_release atomic64_sub_return_release @@ -1010,7 +997,7 @@ atomic64_sub_return_release(s64 i, atomic64_t *v) static __always_inline s64 atomic64_sub_return_relaxed(s64 i, atomic64_t *v) { - __atomic_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic64_sub_return_relaxed(i, v); } #define atomic64_sub_return_relaxed atomic64_sub_return_relaxed @@ -1020,7 +1007,7 @@ atomic64_sub_return_relaxed(s64 i, atomic64_t *v) static __always_inline s64 atomic64_fetch_sub(s64 i, atomic64_t *v) { - __atomic_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic64_fetch_sub(i, v); } #define atomic64_fetch_sub atomic64_fetch_sub @@ -1030,7 +1017,7 @@ atomic64_fetch_sub(s64 i, atomic64_t *v) static __always_inline s64 atomic64_fetch_sub_acquire(s64 i, atomic64_t *v) { - __atomic_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic64_fetch_sub_acquire(i, v); } #define atomic64_fetch_sub_acquire atomic64_fetch_sub_acquire @@ -1040,7 +1027,7 @@ atomic64_fetch_sub_acquire(s64 i, atomic64_t *v) static __always_inline s64 atomic64_fetch_sub_release(s64 i, atomic64_t *v) { - __atomic_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic64_fetch_sub_release(i, v); } #define atomic64_fetch_sub_release atomic64_fetch_sub_release @@ -1050,7 +1037,7 @@ atomic64_fetch_sub_release(s64 i, atomic64_t *v) static __always_inline s64 atomic64_fetch_sub_relaxed(s64 i, atomic64_t *v) { - __atomic_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic64_fetch_sub_relaxed(i, v); } #define atomic64_fetch_sub_relaxed atomic64_fetch_sub_relaxed @@ -1060,7 +1047,7 @@ atomic64_fetch_sub_relaxed(s64 i, atomic64_t *v) static __always_inline void atomic64_inc(atomic64_t *v) { - __atomic_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); arch_atomic64_inc(v); } #define atomic64_inc atomic64_inc @@ -1070,7 +1057,7 @@ atomic64_inc(atomic64_t *v) static __always_inline s64 atomic64_inc_return(atomic64_t *v) { - __atomic_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic64_inc_return(v); } #define atomic64_inc_return atomic64_inc_return @@ -1080,7 +1067,7 @@ atomic64_inc_return(atomic64_t *v) static __always_inline s64 atomic64_inc_return_acquire(atomic64_t *v) { - __atomic_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic64_inc_return_acquire(v); } #define atomic64_inc_return_acquire atomic64_inc_return_acquire @@ -1090,7 +1077,7 @@ atomic64_inc_return_acquire(atomic64_t *v) static __always_inline s64 atomic64_inc_return_release(atomic64_t *v) { - __atomic_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic64_inc_return_release(v); } #define atomic64_inc_return_release atomic64_inc_return_release @@ -1100,7 +1087,7 @@ atomic64_inc_return_release(atomic64_t *v) static __always_inline s64 atomic64_inc_return_relaxed(atomic64_t *v) { - __atomic_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic64_inc_return_relaxed(v); } #define atomic64_inc_return_relaxed atomic64_inc_return_relaxed @@ -1110,7 +1097,7 @@ atomic64_inc_return_relaxed(atomic64_t *v) static __always_inline s64 atomic64_fetch_inc(atomic64_t *v) { - __atomic_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic64_fetch_inc(v); } #define atomic64_fetch_inc atomic64_fetch_inc @@ -1120,7 +1107,7 @@ atomic64_fetch_inc(atomic64_t *v) static __always_inline s64 atomic64_fetch_inc_acquire(atomic64_t *v) { - __atomic_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic64_fetch_inc_acquire(v); } #define atomic64_fetch_inc_acquire atomic64_fetch_inc_acquire @@ -1130,7 +1117,7 @@ atomic64_fetch_inc_acquire(atomic64_t *v) static __always_inline s64 atomic64_fetch_inc_release(atomic64_t *v) { - __atomic_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic64_fetch_inc_release(v); } #define atomic64_fetch_inc_release atomic64_fetch_inc_release @@ -1140,7 +1127,7 @@ atomic64_fetch_inc_release(atomic64_t *v) static __always_inline s64 atomic64_fetch_inc_relaxed(atomic64_t *v) { - __atomic_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic64_fetch_inc_relaxed(v); } #define atomic64_fetch_inc_relaxed atomic64_fetch_inc_relaxed @@ -1150,7 +1137,7 @@ atomic64_fetch_inc_relaxed(atomic64_t *v) static __always_inline void atomic64_dec(atomic64_t *v) { - __atomic_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); arch_atomic64_dec(v); } #define atomic64_dec atomic64_dec @@ -1160,7 +1147,7 @@ atomic64_dec(atomic64_t *v) static __always_inline s64 atomic64_dec_return(atomic64_t *v) { - __atomic_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic64_dec_return(v); } #define atomic64_dec_return atomic64_dec_return @@ -1170,7 +1157,7 @@ atomic64_dec_return(atomic64_t *v) static __always_inline s64 atomic64_dec_return_acquire(atomic64_t *v) { - __atomic_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic64_dec_return_acquire(v); } #define atomic64_dec_return_acquire atomic64_dec_return_acquire @@ -1180,7 +1167,7 @@ atomic64_dec_return_acquire(atomic64_t *v) static __always_inline s64 atomic64_dec_return_release(atomic64_t *v) { - __atomic_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic64_dec_return_release(v); } #define atomic64_dec_return_release atomic64_dec_return_release @@ -1190,7 +1177,7 @@ atomic64_dec_return_release(atomic64_t *v) static __always_inline s64 atomic64_dec_return_relaxed(atomic64_t *v) { - __atomic_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic64_dec_return_relaxed(v); } #define atomic64_dec_return_relaxed atomic64_dec_return_relaxed @@ -1200,7 +1187,7 @@ atomic64_dec_return_relaxed(atomic64_t *v) static __always_inline s64 atomic64_fetch_dec(atomic64_t *v) { - __atomic_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic64_fetch_dec(v); } #define atomic64_fetch_dec atomic64_fetch_dec @@ -1210,7 +1197,7 @@ atomic64_fetch_dec(atomic64_t *v) static __always_inline s64 atomic64_fetch_dec_acquire(atomic64_t *v) { - __atomic_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic64_fetch_dec_acquire(v); } #define atomic64_fetch_dec_acquire atomic64_fetch_dec_acquire @@ -1220,7 +1207,7 @@ atomic64_fetch_dec_acquire(atomic64_t *v) static __always_inline s64 atomic64_fetch_dec_release(atomic64_t *v) { - __atomic_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic64_fetch_dec_release(v); } #define atomic64_fetch_dec_release atomic64_fetch_dec_release @@ -1230,7 +1217,7 @@ atomic64_fetch_dec_release(atomic64_t *v) static __always_inline s64 atomic64_fetch_dec_relaxed(atomic64_t *v) { - __atomic_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic64_fetch_dec_relaxed(v); } #define atomic64_fetch_dec_relaxed atomic64_fetch_dec_relaxed @@ -1239,7 +1226,7 @@ atomic64_fetch_dec_relaxed(atomic64_t *v) static __always_inline void atomic64_and(s64 i, atomic64_t *v) { - __atomic_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); arch_atomic64_and(i, v); } #define atomic64_and atomic64_and @@ -1248,7 +1235,7 @@ atomic64_and(s64 i, atomic64_t *v) static __always_inline s64 atomic64_fetch_and(s64 i, atomic64_t *v) { - __atomic_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic64_fetch_and(i, v); } #define atomic64_fetch_and atomic64_fetch_and @@ -1258,7 +1245,7 @@ atomic64_fetch_and(s64 i, atomic64_t *v) static __always_inline s64 atomic64_fetch_and_acquire(s64 i, atomic64_t *v) { - __atomic_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic64_fetch_and_acquire(i, v); } #define atomic64_fetch_and_acquire atomic64_fetch_and_acquire @@ -1268,7 +1255,7 @@ atomic64_fetch_and_acquire(s64 i, atomic64_t *v) static __always_inline s64 atomic64_fetch_and_release(s64 i, atomic64_t *v) { - __atomic_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic64_fetch_and_release(i, v); } #define atomic64_fetch_and_release atomic64_fetch_and_release @@ -1278,7 +1265,7 @@ atomic64_fetch_and_release(s64 i, atomic64_t *v) static __always_inline s64 atomic64_fetch_and_relaxed(s64 i, atomic64_t *v) { - __atomic_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic64_fetch_and_relaxed(i, v); } #define atomic64_fetch_and_relaxed atomic64_fetch_and_relaxed @@ -1288,7 +1275,7 @@ atomic64_fetch_and_relaxed(s64 i, atomic64_t *v) static __always_inline void atomic64_andnot(s64 i, atomic64_t *v) { - __atomic_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); arch_atomic64_andnot(i, v); } #define atomic64_andnot atomic64_andnot @@ -1298,7 +1285,7 @@ atomic64_andnot(s64 i, atomic64_t *v) static __always_inline s64 atomic64_fetch_andnot(s64 i, atomic64_t *v) { - __atomic_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic64_fetch_andnot(i, v); } #define atomic64_fetch_andnot atomic64_fetch_andnot @@ -1308,7 +1295,7 @@ atomic64_fetch_andnot(s64 i, atomic64_t *v) static __always_inline s64 atomic64_fetch_andnot_acquire(s64 i, atomic64_t *v) { - __atomic_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic64_fetch_andnot_acquire(i, v); } #define atomic64_fetch_andnot_acquire atomic64_fetch_andnot_acquire @@ -1318,7 +1305,7 @@ atomic64_fetch_andnot_acquire(s64 i, atomic64_t *v) static __always_inline s64 atomic64_fetch_andnot_release(s64 i, atomic64_t *v) { - __atomic_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic64_fetch_andnot_release(i, v); } #define atomic64_fetch_andnot_release atomic64_fetch_andnot_release @@ -1328,7 +1315,7 @@ atomic64_fetch_andnot_release(s64 i, atomic64_t *v) static __always_inline s64 atomic64_fetch_andnot_relaxed(s64 i, atomic64_t *v) { - __atomic_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic64_fetch_andnot_relaxed(i, v); } #define atomic64_fetch_andnot_relaxed atomic64_fetch_andnot_relaxed @@ -1337,7 +1324,7 @@ atomic64_fetch_andnot_relaxed(s64 i, atomic64_t *v) static __always_inline void atomic64_or(s64 i, atomic64_t *v) { - __atomic_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); arch_atomic64_or(i, v); } #define atomic64_or atomic64_or @@ -1346,7 +1333,7 @@ atomic64_or(s64 i, atomic64_t *v) static __always_inline s64 atomic64_fetch_or(s64 i, atomic64_t *v) { - __atomic_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic64_fetch_or(i, v); } #define atomic64_fetch_or atomic64_fetch_or @@ -1356,7 +1343,7 @@ atomic64_fetch_or(s64 i, atomic64_t *v) static __always_inline s64 atomic64_fetch_or_acquire(s64 i, atomic64_t *v) { - __atomic_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic64_fetch_or_acquire(i, v); } #define atomic64_fetch_or_acquire atomic64_fetch_or_acquire @@ -1366,7 +1353,7 @@ atomic64_fetch_or_acquire(s64 i, atomic64_t *v) static __always_inline s64 atomic64_fetch_or_release(s64 i, atomic64_t *v) { - __atomic_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic64_fetch_or_release(i, v); } #define atomic64_fetch_or_release atomic64_fetch_or_release @@ -1376,7 +1363,7 @@ atomic64_fetch_or_release(s64 i, atomic64_t *v) static __always_inline s64 atomic64_fetch_or_relaxed(s64 i, atomic64_t *v) { - __atomic_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic64_fetch_or_relaxed(i, v); } #define atomic64_fetch_or_relaxed atomic64_fetch_or_relaxed @@ -1385,7 +1372,7 @@ atomic64_fetch_or_relaxed(s64 i, atomic64_t *v) static __always_inline void atomic64_xor(s64 i, atomic64_t *v) { - __atomic_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); arch_atomic64_xor(i, v); } #define atomic64_xor atomic64_xor @@ -1394,7 +1381,7 @@ atomic64_xor(s64 i, atomic64_t *v) static __always_inline s64 atomic64_fetch_xor(s64 i, atomic64_t *v) { - __atomic_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic64_fetch_xor(i, v); } #define atomic64_fetch_xor atomic64_fetch_xor @@ -1404,7 +1391,7 @@ atomic64_fetch_xor(s64 i, atomic64_t *v) static __always_inline s64 atomic64_fetch_xor_acquire(s64 i, atomic64_t *v) { - __atomic_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic64_fetch_xor_acquire(i, v); } #define atomic64_fetch_xor_acquire atomic64_fetch_xor_acquire @@ -1414,7 +1401,7 @@ atomic64_fetch_xor_acquire(s64 i, atomic64_t *v) static __always_inline s64 atomic64_fetch_xor_release(s64 i, atomic64_t *v) { - __atomic_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic64_fetch_xor_release(i, v); } #define atomic64_fetch_xor_release atomic64_fetch_xor_release @@ -1424,7 +1411,7 @@ atomic64_fetch_xor_release(s64 i, atomic64_t *v) static __always_inline s64 atomic64_fetch_xor_relaxed(s64 i, atomic64_t *v) { - __atomic_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic64_fetch_xor_relaxed(i, v); } #define atomic64_fetch_xor_relaxed atomic64_fetch_xor_relaxed @@ -1434,7 +1421,7 @@ atomic64_fetch_xor_relaxed(s64 i, atomic64_t *v) static __always_inline s64 atomic64_xchg(atomic64_t *v, s64 i) { - __atomic_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic64_xchg(v, i); } #define atomic64_xchg atomic64_xchg @@ -1444,7 +1431,7 @@ atomic64_xchg(atomic64_t *v, s64 i) static __always_inline s64 atomic64_xchg_acquire(atomic64_t *v, s64 i) { - __atomic_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic64_xchg_acquire(v, i); } #define atomic64_xchg_acquire atomic64_xchg_acquire @@ -1454,7 +1441,7 @@ atomic64_xchg_acquire(atomic64_t *v, s64 i) static __always_inline s64 atomic64_xchg_release(atomic64_t *v, s64 i) { - __atomic_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic64_xchg_release(v, i); } #define atomic64_xchg_release atomic64_xchg_release @@ -1464,7 +1451,7 @@ atomic64_xchg_release(atomic64_t *v, s64 i) static __always_inline s64 atomic64_xchg_relaxed(atomic64_t *v, s64 i) { - __atomic_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic64_xchg_relaxed(v, i); } #define atomic64_xchg_relaxed atomic64_xchg_relaxed @@ -1474,7 +1461,7 @@ atomic64_xchg_relaxed(atomic64_t *v, s64 i) static __always_inline s64 atomic64_cmpxchg(atomic64_t *v, s64 old, s64 new) { - __atomic_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic64_cmpxchg(v, old, new); } #define atomic64_cmpxchg atomic64_cmpxchg @@ -1484,7 +1471,7 @@ atomic64_cmpxchg(atomic64_t *v, s64 old, s64 new) static __always_inline s64 atomic64_cmpxchg_acquire(atomic64_t *v, s64 old, s64 new) { - __atomic_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic64_cmpxchg_acquire(v, old, new); } #define atomic64_cmpxchg_acquire atomic64_cmpxchg_acquire @@ -1494,7 +1481,7 @@ atomic64_cmpxchg_acquire(atomic64_t *v, s64 old, s64 new) static __always_inline s64 atomic64_cmpxchg_release(atomic64_t *v, s64 old, s64 new) { - __atomic_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic64_cmpxchg_release(v, old, new); } #define atomic64_cmpxchg_release atomic64_cmpxchg_release @@ -1504,7 +1491,7 @@ atomic64_cmpxchg_release(atomic64_t *v, s64 old, s64 new) static __always_inline s64 atomic64_cmpxchg_relaxed(atomic64_t *v, s64 old, s64 new) { - __atomic_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic64_cmpxchg_relaxed(v, old, new); } #define atomic64_cmpxchg_relaxed atomic64_cmpxchg_relaxed @@ -1514,8 +1501,8 @@ atomic64_cmpxchg_relaxed(atomic64_t *v, s64 old, s64 new) static __always_inline bool atomic64_try_cmpxchg(atomic64_t *v, s64 *old, s64 new) { - __atomic_check_write(v, sizeof(*v)); - __atomic_check_write(old, sizeof(*old)); + instrument_atomic_write(v, sizeof(*v)); + instrument_atomic_write(old, sizeof(*old)); return arch_atomic64_try_cmpxchg(v, old, new); } #define atomic64_try_cmpxchg atomic64_try_cmpxchg @@ -1525,8 +1512,8 @@ atomic64_try_cmpxchg(atomic64_t *v, s64 *old, s64 new) static __always_inline bool atomic64_try_cmpxchg_acquire(atomic64_t *v, s64 *old, s64 new) { - __atomic_check_write(v, sizeof(*v)); - __atomic_check_write(old, sizeof(*old)); + instrument_atomic_write(v, sizeof(*v)); + instrument_atomic_write(old, sizeof(*old)); return arch_atomic64_try_cmpxchg_acquire(v, old, new); } #define atomic64_try_cmpxchg_acquire atomic64_try_cmpxchg_acquire @@ -1536,8 +1523,8 @@ atomic64_try_cmpxchg_acquire(atomic64_t *v, s64 *old, s64 new) static __always_inline bool atomic64_try_cmpxchg_release(atomic64_t *v, s64 *old, s64 new) { - __atomic_check_write(v, sizeof(*v)); - __atomic_check_write(old, sizeof(*old)); + instrument_atomic_write(v, sizeof(*v)); + instrument_atomic_write(old, sizeof(*old)); return arch_atomic64_try_cmpxchg_release(v, old, new); } #define atomic64_try_cmpxchg_release atomic64_try_cmpxchg_release @@ -1547,8 +1534,8 @@ atomic64_try_cmpxchg_release(atomic64_t *v, s64 *old, s64 new) static __always_inline bool atomic64_try_cmpxchg_relaxed(atomic64_t *v, s64 *old, s64 new) { - __atomic_check_write(v, sizeof(*v)); - __atomic_check_write(old, sizeof(*old)); + instrument_atomic_write(v, sizeof(*v)); + instrument_atomic_write(old, sizeof(*old)); return arch_atomic64_try_cmpxchg_relaxed(v, old, new); } #define atomic64_try_cmpxchg_relaxed atomic64_try_cmpxchg_relaxed @@ -1558,7 +1545,7 @@ atomic64_try_cmpxchg_relaxed(atomic64_t *v, s64 *old, s64 new) static __always_inline bool atomic64_sub_and_test(s64 i, atomic64_t *v) { - __atomic_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic64_sub_and_test(i, v); } #define atomic64_sub_and_test atomic64_sub_and_test @@ -1568,7 +1555,7 @@ atomic64_sub_and_test(s64 i, atomic64_t *v) static __always_inline bool atomic64_dec_and_test(atomic64_t *v) { - __atomic_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic64_dec_and_test(v); } #define atomic64_dec_and_test atomic64_dec_and_test @@ -1578,7 +1565,7 @@ atomic64_dec_and_test(atomic64_t *v) static __always_inline bool atomic64_inc_and_test(atomic64_t *v) { - __atomic_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic64_inc_and_test(v); } #define atomic64_inc_and_test atomic64_inc_and_test @@ -1588,7 +1575,7 @@ atomic64_inc_and_test(atomic64_t *v) static __always_inline bool atomic64_add_negative(s64 i, atomic64_t *v) { - __atomic_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic64_add_negative(i, v); } #define atomic64_add_negative atomic64_add_negative @@ -1598,7 +1585,7 @@ atomic64_add_negative(s64 i, atomic64_t *v) static __always_inline s64 atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u) { - __atomic_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic64_fetch_add_unless(v, a, u); } #define atomic64_fetch_add_unless atomic64_fetch_add_unless @@ -1608,7 +1595,7 @@ atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u) static __always_inline bool atomic64_add_unless(atomic64_t *v, s64 a, s64 u) { - __atomic_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic64_add_unless(v, a, u); } #define atomic64_add_unless atomic64_add_unless @@ -1618,7 +1605,7 @@ atomic64_add_unless(atomic64_t *v, s64 a, s64 u) static __always_inline bool atomic64_inc_not_zero(atomic64_t *v) { - __atomic_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic64_inc_not_zero(v); } #define atomic64_inc_not_zero atomic64_inc_not_zero @@ -1628,7 +1615,7 @@ atomic64_inc_not_zero(atomic64_t *v) static __always_inline bool atomic64_inc_unless_negative(atomic64_t *v) { - __atomic_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic64_inc_unless_negative(v); } #define atomic64_inc_unless_negative atomic64_inc_unless_negative @@ -1638,7 +1625,7 @@ atomic64_inc_unless_negative(atomic64_t *v) static __always_inline bool atomic64_dec_unless_positive(atomic64_t *v) { - __atomic_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic64_dec_unless_positive(v); } #define atomic64_dec_unless_positive atomic64_dec_unless_positive @@ -1648,7 +1635,7 @@ atomic64_dec_unless_positive(atomic64_t *v) static __always_inline s64 atomic64_dec_if_positive(atomic64_t *v) { - __atomic_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic64_dec_if_positive(v); } #define atomic64_dec_if_positive atomic64_dec_if_positive @@ -1658,7 +1645,7 @@ atomic64_dec_if_positive(atomic64_t *v) #define xchg(ptr, ...) \ ({ \ typeof(ptr) __ai_ptr = (ptr); \ - __atomic_check_write(__ai_ptr, sizeof(*__ai_ptr)); \ + instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \ arch_xchg(__ai_ptr, __VA_ARGS__); \ }) #endif @@ -1667,7 +1654,7 @@ atomic64_dec_if_positive(atomic64_t *v) #define xchg_acquire(ptr, ...) \ ({ \ typeof(ptr) __ai_ptr = (ptr); \ - __atomic_check_write(__ai_ptr, sizeof(*__ai_ptr)); \ + instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \ arch_xchg_acquire(__ai_ptr, __VA_ARGS__); \ }) #endif @@ -1676,7 +1663,7 @@ atomic64_dec_if_positive(atomic64_t *v) #define xchg_release(ptr, ...) \ ({ \ typeof(ptr) __ai_ptr = (ptr); \ - __atomic_check_write(__ai_ptr, sizeof(*__ai_ptr)); \ + instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \ arch_xchg_release(__ai_ptr, __VA_ARGS__); \ }) #endif @@ -1685,7 +1672,7 @@ atomic64_dec_if_positive(atomic64_t *v) #define xchg_relaxed(ptr, ...) \ ({ \ typeof(ptr) __ai_ptr = (ptr); \ - __atomic_check_write(__ai_ptr, sizeof(*__ai_ptr)); \ + instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \ arch_xchg_relaxed(__ai_ptr, __VA_ARGS__); \ }) #endif @@ -1694,7 +1681,7 @@ atomic64_dec_if_positive(atomic64_t *v) #define cmpxchg(ptr, ...) \ ({ \ typeof(ptr) __ai_ptr = (ptr); \ - __atomic_check_write(__ai_ptr, sizeof(*__ai_ptr)); \ + instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \ arch_cmpxchg(__ai_ptr, __VA_ARGS__); \ }) #endif @@ -1703,7 +1690,7 @@ atomic64_dec_if_positive(atomic64_t *v) #define cmpxchg_acquire(ptr, ...) \ ({ \ typeof(ptr) __ai_ptr = (ptr); \ - __atomic_check_write(__ai_ptr, sizeof(*__ai_ptr)); \ + instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \ arch_cmpxchg_acquire(__ai_ptr, __VA_ARGS__); \ }) #endif @@ -1712,7 +1699,7 @@ atomic64_dec_if_positive(atomic64_t *v) #define cmpxchg_release(ptr, ...) \ ({ \ typeof(ptr) __ai_ptr = (ptr); \ - __atomic_check_write(__ai_ptr, sizeof(*__ai_ptr)); \ + instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \ arch_cmpxchg_release(__ai_ptr, __VA_ARGS__); \ }) #endif @@ -1721,7 +1708,7 @@ atomic64_dec_if_positive(atomic64_t *v) #define cmpxchg_relaxed(ptr, ...) \ ({ \ typeof(ptr) __ai_ptr = (ptr); \ - __atomic_check_write(__ai_ptr, sizeof(*__ai_ptr)); \ + instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \ arch_cmpxchg_relaxed(__ai_ptr, __VA_ARGS__); \ }) #endif @@ -1730,7 +1717,7 @@ atomic64_dec_if_positive(atomic64_t *v) #define cmpxchg64(ptr, ...) \ ({ \ typeof(ptr) __ai_ptr = (ptr); \ - __atomic_check_write(__ai_ptr, sizeof(*__ai_ptr)); \ + instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \ arch_cmpxchg64(__ai_ptr, __VA_ARGS__); \ }) #endif @@ -1739,7 +1726,7 @@ atomic64_dec_if_positive(atomic64_t *v) #define cmpxchg64_acquire(ptr, ...) \ ({ \ typeof(ptr) __ai_ptr = (ptr); \ - __atomic_check_write(__ai_ptr, sizeof(*__ai_ptr)); \ + instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \ arch_cmpxchg64_acquire(__ai_ptr, __VA_ARGS__); \ }) #endif @@ -1748,7 +1735,7 @@ atomic64_dec_if_positive(atomic64_t *v) #define cmpxchg64_release(ptr, ...) \ ({ \ typeof(ptr) __ai_ptr = (ptr); \ - __atomic_check_write(__ai_ptr, sizeof(*__ai_ptr)); \ + instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \ arch_cmpxchg64_release(__ai_ptr, __VA_ARGS__); \ }) #endif @@ -1757,7 +1744,7 @@ atomic64_dec_if_positive(atomic64_t *v) #define cmpxchg64_relaxed(ptr, ...) \ ({ \ typeof(ptr) __ai_ptr = (ptr); \ - __atomic_check_write(__ai_ptr, sizeof(*__ai_ptr)); \ + instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \ arch_cmpxchg64_relaxed(__ai_ptr, __VA_ARGS__); \ }) #endif @@ -1765,28 +1752,28 @@ atomic64_dec_if_positive(atomic64_t *v) #define cmpxchg_local(ptr, ...) \ ({ \ typeof(ptr) __ai_ptr = (ptr); \ - __atomic_check_write(__ai_ptr, sizeof(*__ai_ptr)); \ + instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \ arch_cmpxchg_local(__ai_ptr, __VA_ARGS__); \ }) #define cmpxchg64_local(ptr, ...) \ ({ \ typeof(ptr) __ai_ptr = (ptr); \ - __atomic_check_write(__ai_ptr, sizeof(*__ai_ptr)); \ + instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \ arch_cmpxchg64_local(__ai_ptr, __VA_ARGS__); \ }) #define sync_cmpxchg(ptr, ...) \ ({ \ typeof(ptr) __ai_ptr = (ptr); \ - __atomic_check_write(__ai_ptr, sizeof(*__ai_ptr)); \ + instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \ arch_sync_cmpxchg(__ai_ptr, __VA_ARGS__); \ }) #define cmpxchg_double(ptr, ...) \ ({ \ typeof(ptr) __ai_ptr = (ptr); \ - __atomic_check_write(__ai_ptr, 2 * sizeof(*__ai_ptr)); \ + instrument_atomic_write(__ai_ptr, 2 * sizeof(*__ai_ptr)); \ arch_cmpxchg_double(__ai_ptr, __VA_ARGS__); \ }) @@ -1794,9 +1781,9 @@ atomic64_dec_if_positive(atomic64_t *v) #define cmpxchg_double_local(ptr, ...) \ ({ \ typeof(ptr) __ai_ptr = (ptr); \ - __atomic_check_write(__ai_ptr, 2 * sizeof(*__ai_ptr)); \ + instrument_atomic_write(__ai_ptr, 2 * sizeof(*__ai_ptr)); \ arch_cmpxchg_double_local(__ai_ptr, __VA_ARGS__); \ }) #endif /* _ASM_GENERIC_ATOMIC_INSTRUMENTED_H */ -// 7b7e2af0e75c8ecb6f02298a7075f503f30d244c +// 89bf97f3a7509b740845e51ddf31055b48a81f40 diff --git a/scripts/atomic/gen-atomic-instrumented.sh b/scripts/atomic/gen-atomic-instrumented.sh index fb4222548b22e..6afadf73da170 100755 --- a/scripts/atomic/gen-atomic-instrumented.sh +++ b/scripts/atomic/gen-atomic-instrumented.sh @@ -20,7 +20,7 @@ gen_param_check() # We don't write to constant parameters [ ${type#c} != ${type} ] && rw="read" - printf "\t__atomic_check_${rw}(${name}, sizeof(*${name}));\n" + printf "\tinstrument_atomic_${rw}(${name}, sizeof(*${name}));\n" } #gen_param_check(arg...) @@ -107,7 +107,7 @@ cat < #include -#include -#include - -static __always_inline void __atomic_check_read(const volatile void *v, size_t size) -{ - kasan_check_read(v, size); - kcsan_check_atomic_read(v, size); -} - -static __always_inline void __atomic_check_write(const volatile void *v, size_t size) -{ - kasan_check_write(v, size); - kcsan_check_atomic_write(v, size); -} +#include EOF -- GitLab From 27f937cc810aef63b0752f5bfbf383390dd076a1 Mon Sep 17 00:00:00 2001 From: Marco Elver Date: Tue, 21 Jan 2020 17:05:10 +0100 Subject: [PATCH 0027/1971] asm-generic, kcsan: Add KCSAN instrumentation for bitops Add explicit KCSAN checks for bitops. Acked-by: Arnd Bergmann Signed-off-by: Marco Elver Signed-off-by: Paul E. McKenney Signed-off-by: Ingo Molnar --- include/asm-generic/bitops/instrumented-atomic.h | 14 +++++++------- include/asm-generic/bitops/instrumented-lock.h | 10 +++++----- .../asm-generic/bitops/instrumented-non-atomic.h | 16 ++++++++-------- 3 files changed, 20 insertions(+), 20 deletions(-) diff --git a/include/asm-generic/bitops/instrumented-atomic.h b/include/asm-generic/bitops/instrumented-atomic.h index 18ce3c9e8eec7..fb2cb33a40138 100644 --- a/include/asm-generic/bitops/instrumented-atomic.h +++ b/include/asm-generic/bitops/instrumented-atomic.h @@ -11,7 +11,7 @@ #ifndef _ASM_GENERIC_BITOPS_INSTRUMENTED_ATOMIC_H #define _ASM_GENERIC_BITOPS_INSTRUMENTED_ATOMIC_H -#include +#include /** * set_bit - Atomically set a bit in memory @@ -25,7 +25,7 @@ */ static inline void set_bit(long nr, volatile unsigned long *addr) { - kasan_check_write(addr + BIT_WORD(nr), sizeof(long)); + instrument_atomic_write(addr + BIT_WORD(nr), sizeof(long)); arch_set_bit(nr, addr); } @@ -38,7 +38,7 @@ static inline void set_bit(long nr, volatile unsigned long *addr) */ static inline void clear_bit(long nr, volatile unsigned long *addr) { - kasan_check_write(addr + BIT_WORD(nr), sizeof(long)); + instrument_atomic_write(addr + BIT_WORD(nr), sizeof(long)); arch_clear_bit(nr, addr); } @@ -54,7 +54,7 @@ static inline void clear_bit(long nr, volatile unsigned long *addr) */ static inline void change_bit(long nr, volatile unsigned long *addr) { - kasan_check_write(addr + BIT_WORD(nr), sizeof(long)); + instrument_atomic_write(addr + BIT_WORD(nr), sizeof(long)); arch_change_bit(nr, addr); } @@ -67,7 +67,7 @@ static inline void change_bit(long nr, volatile unsigned long *addr) */ static inline bool test_and_set_bit(long nr, volatile unsigned long *addr) { - kasan_check_write(addr + BIT_WORD(nr), sizeof(long)); + instrument_atomic_write(addr + BIT_WORD(nr), sizeof(long)); return arch_test_and_set_bit(nr, addr); } @@ -80,7 +80,7 @@ static inline bool test_and_set_bit(long nr, volatile unsigned long *addr) */ static inline bool test_and_clear_bit(long nr, volatile unsigned long *addr) { - kasan_check_write(addr + BIT_WORD(nr), sizeof(long)); + instrument_atomic_write(addr + BIT_WORD(nr), sizeof(long)); return arch_test_and_clear_bit(nr, addr); } @@ -93,7 +93,7 @@ static inline bool test_and_clear_bit(long nr, volatile unsigned long *addr) */ static inline bool test_and_change_bit(long nr, volatile unsigned long *addr) { - kasan_check_write(addr + BIT_WORD(nr), sizeof(long)); + instrument_atomic_write(addr + BIT_WORD(nr), sizeof(long)); return arch_test_and_change_bit(nr, addr); } diff --git a/include/asm-generic/bitops/instrumented-lock.h b/include/asm-generic/bitops/instrumented-lock.h index ec53fdeea9eca..b9bec468ae03c 100644 --- a/include/asm-generic/bitops/instrumented-lock.h +++ b/include/asm-generic/bitops/instrumented-lock.h @@ -11,7 +11,7 @@ #ifndef _ASM_GENERIC_BITOPS_INSTRUMENTED_LOCK_H #define _ASM_GENERIC_BITOPS_INSTRUMENTED_LOCK_H -#include +#include /** * clear_bit_unlock - Clear a bit in memory, for unlock @@ -22,7 +22,7 @@ */ static inline void clear_bit_unlock(long nr, volatile unsigned long *addr) { - kasan_check_write(addr + BIT_WORD(nr), sizeof(long)); + instrument_atomic_write(addr + BIT_WORD(nr), sizeof(long)); arch_clear_bit_unlock(nr, addr); } @@ -37,7 +37,7 @@ static inline void clear_bit_unlock(long nr, volatile unsigned long *addr) */ static inline void __clear_bit_unlock(long nr, volatile unsigned long *addr) { - kasan_check_write(addr + BIT_WORD(nr), sizeof(long)); + instrument_write(addr + BIT_WORD(nr), sizeof(long)); arch___clear_bit_unlock(nr, addr); } @@ -52,7 +52,7 @@ static inline void __clear_bit_unlock(long nr, volatile unsigned long *addr) */ static inline bool test_and_set_bit_lock(long nr, volatile unsigned long *addr) { - kasan_check_write(addr + BIT_WORD(nr), sizeof(long)); + instrument_atomic_write(addr + BIT_WORD(nr), sizeof(long)); return arch_test_and_set_bit_lock(nr, addr); } @@ -71,7 +71,7 @@ static inline bool test_and_set_bit_lock(long nr, volatile unsigned long *addr) static inline bool clear_bit_unlock_is_negative_byte(long nr, volatile unsigned long *addr) { - kasan_check_write(addr + BIT_WORD(nr), sizeof(long)); + instrument_atomic_write(addr + BIT_WORD(nr), sizeof(long)); return arch_clear_bit_unlock_is_negative_byte(nr, addr); } /* Let everybody know we have it. */ diff --git a/include/asm-generic/bitops/instrumented-non-atomic.h b/include/asm-generic/bitops/instrumented-non-atomic.h index 95ff28d128a17..20f788a25ef95 100644 --- a/include/asm-generic/bitops/instrumented-non-atomic.h +++ b/include/asm-generic/bitops/instrumented-non-atomic.h @@ -11,7 +11,7 @@ #ifndef _ASM_GENERIC_BITOPS_INSTRUMENTED_NON_ATOMIC_H #define _ASM_GENERIC_BITOPS_INSTRUMENTED_NON_ATOMIC_H -#include +#include /** * __set_bit - Set a bit in memory @@ -24,7 +24,7 @@ */ static inline void __set_bit(long nr, volatile unsigned long *addr) { - kasan_check_write(addr + BIT_WORD(nr), sizeof(long)); + instrument_write(addr + BIT_WORD(nr), sizeof(long)); arch___set_bit(nr, addr); } @@ -39,7 +39,7 @@ static inline void __set_bit(long nr, volatile unsigned long *addr) */ static inline void __clear_bit(long nr, volatile unsigned long *addr) { - kasan_check_write(addr + BIT_WORD(nr), sizeof(long)); + instrument_write(addr + BIT_WORD(nr), sizeof(long)); arch___clear_bit(nr, addr); } @@ -54,7 +54,7 @@ static inline void __clear_bit(long nr, volatile unsigned long *addr) */ static inline void __change_bit(long nr, volatile unsigned long *addr) { - kasan_check_write(addr + BIT_WORD(nr), sizeof(long)); + instrument_write(addr + BIT_WORD(nr), sizeof(long)); arch___change_bit(nr, addr); } @@ -68,7 +68,7 @@ static inline void __change_bit(long nr, volatile unsigned long *addr) */ static inline bool __test_and_set_bit(long nr, volatile unsigned long *addr) { - kasan_check_write(addr + BIT_WORD(nr), sizeof(long)); + instrument_write(addr + BIT_WORD(nr), sizeof(long)); return arch___test_and_set_bit(nr, addr); } @@ -82,7 +82,7 @@ static inline bool __test_and_set_bit(long nr, volatile unsigned long *addr) */ static inline bool __test_and_clear_bit(long nr, volatile unsigned long *addr) { - kasan_check_write(addr + BIT_WORD(nr), sizeof(long)); + instrument_write(addr + BIT_WORD(nr), sizeof(long)); return arch___test_and_clear_bit(nr, addr); } @@ -96,7 +96,7 @@ static inline bool __test_and_clear_bit(long nr, volatile unsigned long *addr) */ static inline bool __test_and_change_bit(long nr, volatile unsigned long *addr) { - kasan_check_write(addr + BIT_WORD(nr), sizeof(long)); + instrument_write(addr + BIT_WORD(nr), sizeof(long)); return arch___test_and_change_bit(nr, addr); } @@ -107,7 +107,7 @@ static inline bool __test_and_change_bit(long nr, volatile unsigned long *addr) */ static inline bool test_bit(long nr, const volatile unsigned long *addr) { - kasan_check_read(addr + BIT_WORD(nr), sizeof(long)); + instrument_atomic_read(addr + BIT_WORD(nr), sizeof(long)); return arch_test_bit(nr, addr); } -- GitLab From d0ef4c360f7ea33905539b9b36fa2273915703f0 Mon Sep 17 00:00:00 2001 From: Marco Elver Date: Tue, 21 Jan 2020 17:05:11 +0100 Subject: [PATCH 0028/1971] iov_iter: Use generic instrumented.h This replaces the kasan instrumentation with generic instrumentation, implicitly adding KCSAN instrumentation support. For KASAN no functional change is intended. Suggested-by: Arnd Bergmann Signed-off-by: Marco Elver Signed-off-by: Paul E. McKenney Signed-off-by: Ingo Molnar --- lib/iov_iter.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/lib/iov_iter.c b/lib/iov_iter.c index 51595bf3af850..bf538c2bec777 100644 --- a/lib/iov_iter.c +++ b/lib/iov_iter.c @@ -8,6 +8,7 @@ #include #include #include +#include #define PIPE_PARANOIA /* for now */ @@ -138,7 +139,7 @@ static int copyout(void __user *to, const void *from, size_t n) { if (access_ok(to, n)) { - kasan_check_read(from, n); + instrument_copy_to_user(to, from, n); n = raw_copy_to_user(to, from, n); } return n; @@ -147,7 +148,7 @@ static int copyout(void __user *to, const void *from, size_t n) static int copyin(void *to, const void __user *from, size_t n) { if (access_ok(from, n)) { - kasan_check_write(to, n); + instrument_copy_from_user(to, from, n); n = raw_copy_from_user(to, from, n); } return n; @@ -639,7 +640,7 @@ EXPORT_SYMBOL(_copy_to_iter); static int copyout_mcsafe(void __user *to, const void *from, size_t n) { if (access_ok(to, n)) { - kasan_check_read(from, n); + instrument_copy_to_user(to, from, n); n = copy_to_user_mcsafe((__force void *) to, from, n); } return n; -- GitLab From 76d6f06c36a3b5cc402eeeb709613cb211fdaa8f Mon Sep 17 00:00:00 2001 From: Marco Elver Date: Tue, 21 Jan 2020 17:05:12 +0100 Subject: [PATCH 0029/1971] copy_to_user, copy_from_user: Use generic instrumented.h This replaces the KASAN instrumentation with generic instrumentation, implicitly adding KCSAN instrumentation support. For KASAN no functional change is intended. Suggested-by: Arnd Bergmann Signed-off-by: Marco Elver Signed-off-by: Paul E. McKenney Signed-off-by: Ingo Molnar --- include/linux/uaccess.h | 14 +++++++------- lib/usercopy.c | 7 ++++--- 2 files changed, 11 insertions(+), 10 deletions(-) diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h index 67f016010aad5..8a215c5c1aed8 100644 --- a/include/linux/uaccess.h +++ b/include/linux/uaccess.h @@ -2,9 +2,9 @@ #ifndef __LINUX_UACCESS_H__ #define __LINUX_UACCESS_H__ +#include #include #include -#include #define uaccess_kernel() segment_eq(get_fs(), KERNEL_DS) @@ -58,7 +58,7 @@ static __always_inline __must_check unsigned long __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n) { - kasan_check_write(to, n); + instrument_copy_from_user(to, from, n); check_object_size(to, n, false); return raw_copy_from_user(to, from, n); } @@ -67,7 +67,7 @@ static __always_inline __must_check unsigned long __copy_from_user(void *to, const void __user *from, unsigned long n) { might_fault(); - kasan_check_write(to, n); + instrument_copy_from_user(to, from, n); check_object_size(to, n, false); return raw_copy_from_user(to, from, n); } @@ -88,7 +88,7 @@ __copy_from_user(void *to, const void __user *from, unsigned long n) static __always_inline __must_check unsigned long __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n) { - kasan_check_read(from, n); + instrument_copy_to_user(to, from, n); check_object_size(from, n, true); return raw_copy_to_user(to, from, n); } @@ -97,7 +97,7 @@ static __always_inline __must_check unsigned long __copy_to_user(void __user *to, const void *from, unsigned long n) { might_fault(); - kasan_check_read(from, n); + instrument_copy_to_user(to, from, n); check_object_size(from, n, true); return raw_copy_to_user(to, from, n); } @@ -109,7 +109,7 @@ _copy_from_user(void *to, const void __user *from, unsigned long n) unsigned long res = n; might_fault(); if (likely(access_ok(from, n))) { - kasan_check_write(to, n); + instrument_copy_from_user(to, from, n); res = raw_copy_from_user(to, from, n); } if (unlikely(res)) @@ -127,7 +127,7 @@ _copy_to_user(void __user *to, const void *from, unsigned long n) { might_fault(); if (access_ok(to, n)) { - kasan_check_read(from, n); + instrument_copy_to_user(to, from, n); n = raw_copy_to_user(to, from, n); } return n; diff --git a/lib/usercopy.c b/lib/usercopy.c index cbb4d9ec00f20..4bb1c5e7a3eb0 100644 --- a/lib/usercopy.c +++ b/lib/usercopy.c @@ -1,6 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 -#include #include +#include +#include /* out-of-line parts */ @@ -10,7 +11,7 @@ unsigned long _copy_from_user(void *to, const void __user *from, unsigned long n unsigned long res = n; might_fault(); if (likely(access_ok(from, n))) { - kasan_check_write(to, n); + instrument_copy_from_user(to, from, n); res = raw_copy_from_user(to, from, n); } if (unlikely(res)) @@ -25,7 +26,7 @@ unsigned long _copy_to_user(void __user *to, const void *from, unsigned long n) { might_fault(); if (likely(access_ok(to, n))) { - kasan_check_read(from, n); + instrument_copy_to_user(to, from, n); n = raw_copy_to_user(to, from, n); } return n; -- GitLab From 7ad900d35b49af5a05f595d2274c32e69e01b055 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Mon, 3 Feb 2020 14:42:18 -0800 Subject: [PATCH 0030/1971] kcsan: Add docbook header for data_race() Signed-off-by: Paul E. McKenney Signed-off-by: Ingo Molnar Cc: Marco Elver Cc: Dmitry Vyukov --- include/linux/compiler.h | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/include/linux/compiler.h b/include/linux/compiler.h index 8c0beb10c1ddb..c1bdf37571cb8 100644 --- a/include/linux/compiler.h +++ b/include/linux/compiler.h @@ -315,13 +315,15 @@ unsigned long read_word_at_a_time(const void *addr) #include -/* - * data_race(): macro to document that accesses in an expression may conflict with - * other concurrent accesses resulting in data races, but the resulting - * behaviour is deemed safe regardless. +/** + * data_race - mark an expression as containing intentional data races + * + * This data_race() macro is useful for situations in which data races + * should be forgiven. One example is diagnostic code that accesses + * shared variables but is not a part of the core synchronization design. * - * This macro *does not* affect normal code generation, but is a hint to tooling - * that data races here should be ignored. + * This macro *does not* affect normal code generation, but is a hint + * to tooling that data races here are to be ignored. */ #define data_race(expr) \ ({ \ -- GitLab From 1e6ee2f0fe8ae682757960edf455e99f611268a0 Mon Sep 17 00:00:00 2001 From: Marco Elver Date: Tue, 4 Feb 2020 18:21:10 +0100 Subject: [PATCH 0031/1971] kcsan: Add option to assume plain aligned writes up to word size are atomic This adds option KCSAN_ASSUME_PLAIN_WRITES_ATOMIC. If enabled, plain aligned writes up to word size are assumed to be atomic, and also not subject to other unsafe compiler optimizations resulting in data races. This option has been enabled by default to reflect current kernel-wide preferences. Signed-off-by: Marco Elver Signed-off-by: Paul E. McKenney Signed-off-by: Ingo Molnar --- kernel/kcsan/core.c | 22 +++++++++++++++++----- lib/Kconfig.kcsan | 27 ++++++++++++++++++++------- 2 files changed, 37 insertions(+), 12 deletions(-) diff --git a/kernel/kcsan/core.c b/kernel/kcsan/core.c index 64b30f7716a12..e3c7d8f34f2ff 100644 --- a/kernel/kcsan/core.c +++ b/kernel/kcsan/core.c @@ -5,6 +5,7 @@ #include #include #include +#include #include #include #include @@ -169,10 +170,20 @@ static __always_inline struct kcsan_ctx *get_ctx(void) return in_task() ? ¤t->kcsan_ctx : raw_cpu_ptr(&kcsan_cpu_ctx); } -static __always_inline bool is_atomic(const volatile void *ptr) +static __always_inline bool +is_atomic(const volatile void *ptr, size_t size, int type) { - struct kcsan_ctx *ctx = get_ctx(); + struct kcsan_ctx *ctx; + + if ((type & KCSAN_ACCESS_ATOMIC) != 0) + return true; + if (IS_ENABLED(CONFIG_KCSAN_ASSUME_PLAIN_WRITES_ATOMIC) && + (type & KCSAN_ACCESS_WRITE) != 0 && size <= sizeof(long) && + IS_ALIGNED((unsigned long)ptr, size)) + return true; /* Assume aligned writes up to word size are atomic. */ + + ctx = get_ctx(); if (unlikely(ctx->atomic_next > 0)) { /* * Because we do not have separate contexts for nested @@ -193,7 +204,8 @@ static __always_inline bool is_atomic(const volatile void *ptr) return kcsan_is_atomic(ptr); } -static __always_inline bool should_watch(const volatile void *ptr, int type) +static __always_inline bool +should_watch(const volatile void *ptr, size_t size, int type) { /* * Never set up watchpoints when memory operations are atomic. @@ -202,7 +214,7 @@ static __always_inline bool should_watch(const volatile void *ptr, int type) * should not count towards skipped instructions, and (2) to actually * decrement kcsan_atomic_next for consecutive instruction stream. */ - if ((type & KCSAN_ACCESS_ATOMIC) != 0 || is_atomic(ptr)) + if (is_atomic(ptr, size, type)) return false; if (this_cpu_dec_return(kcsan_skip) >= 0) @@ -460,7 +472,7 @@ static __always_inline void check_access(const volatile void *ptr, size_t size, if (unlikely(watchpoint != NULL)) kcsan_found_watchpoint(ptr, size, type, watchpoint, encoded_watchpoint); - else if (unlikely(should_watch(ptr, type))) + else if (unlikely(should_watch(ptr, size, type))) kcsan_setup_watchpoint(ptr, size, type); } diff --git a/lib/Kconfig.kcsan b/lib/Kconfig.kcsan index 3552990abcfe5..66126853dab02 100644 --- a/lib/Kconfig.kcsan +++ b/lib/Kconfig.kcsan @@ -91,13 +91,13 @@ config KCSAN_REPORT_ONCE_IN_MS limiting reporting to avoid flooding the console with reports. Setting this to 0 disables rate limiting. -# Note that, while some of the below options could be turned into boot -# parameters, to optimize for the common use-case, we avoid this because: (a) -# it would impact performance (and we want to avoid static branch for all -# {READ,WRITE}_ONCE, atomic_*, bitops, etc.), and (b) complicate the design -# without real benefit. The main purpose of the below options is for use in -# fuzzer configs to control reported data races, and they are not expected -# to be switched frequently by a user. +# The main purpose of the below options is to control reported data races (e.g. +# in fuzzer configs), and are not expected to be switched frequently by other +# users. We could turn some of them into boot parameters, but given they should +# not be switched normally, let's keep them here to simplify configuration. +# +# The defaults below are chosen to be very conservative, and may miss certain +# bugs. config KCSAN_REPORT_RACE_UNKNOWN_ORIGIN bool "Report races of unknown origin" @@ -116,6 +116,19 @@ config KCSAN_REPORT_VALUE_CHANGE_ONLY the data value of the memory location was observed to remain unchanged, do not report the data race. +config KCSAN_ASSUME_PLAIN_WRITES_ATOMIC + bool "Assume that plain aligned writes up to word size are atomic" + default y + help + Assume that plain aligned writes up to word size are atomic by + default, and also not subject to other unsafe compiler optimizations + resulting in data races. This will cause KCSAN to not report data + races due to conflicts where the only plain accesses are aligned + writes up to word size: conflicts between marked reads and plain + aligned writes up to word size will not be reported as data races; + notice that data races between two conflicting plain aligned writes + will also not be reported. + config KCSAN_IGNORE_ATOMICS bool "Do not instrument marked atomic accesses" help -- GitLab From a249a73231e2e30944b948c5351025e5ff65f6d1 Mon Sep 17 00:00:00 2001 From: Marco Elver Date: Tue, 4 Feb 2020 18:21:11 +0100 Subject: [PATCH 0032/1971] kcsan: Clarify Kconfig option KCSAN_IGNORE_ATOMICS Clarify difference between options KCSAN_IGNORE_ATOMICS and KCSAN_ASSUME_PLAIN_WRITES_ATOMIC in help text. Signed-off-by: Marco Elver Signed-off-by: Paul E. McKenney Signed-off-by: Ingo Molnar --- lib/Kconfig.kcsan | 16 +++++++++++++--- 1 file changed, 13 insertions(+), 3 deletions(-) diff --git a/lib/Kconfig.kcsan b/lib/Kconfig.kcsan index 66126853dab02..020ac63e43617 100644 --- a/lib/Kconfig.kcsan +++ b/lib/Kconfig.kcsan @@ -132,8 +132,18 @@ config KCSAN_ASSUME_PLAIN_WRITES_ATOMIC config KCSAN_IGNORE_ATOMICS bool "Do not instrument marked atomic accesses" help - If enabled, never instruments marked atomic accesses. This results in - not reporting data races where one access is atomic and the other is - a plain access. + Never instrument marked atomic accesses. This option can be used for + additional filtering. Conflicting marked atomic reads and plain + writes will never be reported as a data race, however, will cause + plain reads and marked writes to result in "unknown origin" reports. + If combined with CONFIG_KCSAN_REPORT_RACE_UNKNOWN_ORIGIN=n, data + races where at least one access is marked atomic will never be + reported. + + Similar to KCSAN_ASSUME_PLAIN_WRITES_ATOMIC, but including unaligned + accesses, conflicting marked atomic reads and plain writes will not + be reported as data races; however, unlike that option, data races + due to two conflicting plain writes will be reported (aligned and + unaligned, if CONFIG_KCSAN_ASSUME_PLAIN_WRITES_ATOMIC=n). endif # KCSAN -- GitLab From 8cfbb04fae75260eae07ab8c74c1dcd44294d739 Mon Sep 17 00:00:00 2001 From: Marco Elver Date: Tue, 4 Feb 2020 18:21:12 +0100 Subject: [PATCH 0033/1971] kcsan: Clean up the main KCSAN Kconfig option This patch cleans up the rules of the 'KCSAN' Kconfig option by: 1. implicitly selecting 'STACKTRACE' instead of depending on it; 2. depending on DEBUG_KERNEL, to avoid accidentally turning KCSAN on if the kernel is not meant to be a debug kernel; 3. updating the short and long summaries. Signed-off-by: Marco Elver Signed-off-by: Paul E. McKenney Signed-off-by: Ingo Molnar --- lib/Kconfig.kcsan | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/lib/Kconfig.kcsan b/lib/Kconfig.kcsan index 020ac63e43617..9785bbf9a1d11 100644 --- a/lib/Kconfig.kcsan +++ b/lib/Kconfig.kcsan @@ -4,12 +4,15 @@ config HAVE_ARCH_KCSAN bool menuconfig KCSAN - bool "KCSAN: watchpoint-based dynamic data race detector" - depends on HAVE_ARCH_KCSAN && !KASAN && STACKTRACE + bool "KCSAN: dynamic data race detector" + depends on HAVE_ARCH_KCSAN && DEBUG_KERNEL && !KASAN + select STACKTRACE help - Kernel Concurrency Sanitizer is a dynamic data race detector, which - uses a watchpoint-based sampling approach to detect races. See - for more details. + The Kernel Concurrency Sanitizer (KCSAN) is a dynamic data race + detector, which relies on compile-time instrumentation, and uses a + watchpoint-based sampling approach to detect data races. + + See for more details. if KCSAN -- GitLab From ed95f95c86cd53621103d865d62b5e1f96e60edb Mon Sep 17 00:00:00 2001 From: Marco Elver Date: Wed, 5 Feb 2020 11:14:19 +0100 Subject: [PATCH 0034/1971] kcsan: Fix 0-sized checks Instrumentation of arbitrary memory-copy functions, such as user-copies, may be called with size of 0, which could lead to false positives. To avoid this, add a comparison in check_access() for size==0, which will be optimized out for constant sized instrumentation (__tsan_{read,write}N), and therefore not affect the common-case fast-path. Signed-off-by: Marco Elver Signed-off-by: Paul E. McKenney Signed-off-by: Ingo Molnar --- kernel/kcsan/core.c | 7 +++++++ kernel/kcsan/test.c | 10 ++++++++++ 2 files changed, 17 insertions(+) diff --git a/kernel/kcsan/core.c b/kernel/kcsan/core.c index e3c7d8f34f2ff..82c2bef827d42 100644 --- a/kernel/kcsan/core.c +++ b/kernel/kcsan/core.c @@ -455,6 +455,13 @@ static __always_inline void check_access(const volatile void *ptr, size_t size, atomic_long_t *watchpoint; long encoded_watchpoint; + /* + * Do nothing for 0 sized check; this comparison will be optimized out + * for constant sized instrumentation (__tsan_{read,write}N). + */ + if (unlikely(size == 0)) + return; + /* * Avoid user_access_save in fast-path: find_watchpoint is safe without * user_access_save, as the address that ptr points to is only used to diff --git a/kernel/kcsan/test.c b/kernel/kcsan/test.c index cc6000239dc01..d26a052d33838 100644 --- a/kernel/kcsan/test.c +++ b/kernel/kcsan/test.c @@ -92,6 +92,16 @@ static bool test_matching_access(void) return false; if (WARN_ON(matching_access(9, 1, 10, 1))) return false; + + /* + * An access of size 0 could match another access, as demonstrated here. + * Rather than add more comparisons to 'matching_access()', which would + * end up in the fast-path for *all* checks, check_access() simply + * returns for all accesses of size 0. + */ + if (WARN_ON(!matching_access(8, 8, 12, 0))) + return false; + return true; } -- GitLab From d591ec3db75f9eadfa7976ff8796c674c0027715 Mon Sep 17 00:00:00 2001 From: Marco Elver Date: Thu, 6 Feb 2020 16:46:24 +0100 Subject: [PATCH 0035/1971] kcsan: Introduce KCSAN_ACCESS_ASSERT access type The KCSAN_ACCESS_ASSERT access type may be used to introduce dummy reads and writes to assert certain properties of concurrent code, where bugs could not be detected as normal data races. For example, a variable that is only meant to be written by a single CPU, but may be read (without locking) by other CPUs must still be marked properly to avoid data races. However, concurrent writes, regardless if WRITE_ONCE() or not, would be a bug. Using kcsan_check_access(&x, sizeof(x), KCSAN_ACCESS_ASSERT) would allow catching such bugs. To support KCSAN_ACCESS_ASSERT the following notable changes were made: * If an access is of type KCSAN_ASSERT_ACCESS, disable various filters that only apply to data races, so that all races that KCSAN observes are reported. * Bug reports that involve an ASSERT access type will be reported as "KCSAN: assert: race in ..." instead of "data-race"; this will help more easily distinguish them. * Update a few comments to just mention 'races' where we do not always mean pure data races. Signed-off-by: Marco Elver Signed-off-by: Paul E. McKenney Signed-off-by: Ingo Molnar --- include/linux/kcsan-checks.h | 18 ++++++++++----- kernel/kcsan/core.c | 44 +++++++++++++++++++++++++++++++----- kernel/kcsan/debugfs.c | 1 + kernel/kcsan/kcsan.h | 7 ++++++ kernel/kcsan/report.c | 43 +++++++++++++++++++++++++---------- lib/Kconfig.kcsan | 24 ++++++++++++-------- 6 files changed, 103 insertions(+), 34 deletions(-) diff --git a/include/linux/kcsan-checks.h b/include/linux/kcsan-checks.h index ef3ee233a3fa9..5dcadc221026e 100644 --- a/include/linux/kcsan-checks.h +++ b/include/linux/kcsan-checks.h @@ -6,10 +6,16 @@ #include /* - * Access type modifiers. + * ACCESS TYPE MODIFIERS + * + * : normal read access; + * WRITE : write access; + * ATOMIC: access is atomic; + * ASSERT: access is not a regular access, but an assertion; */ #define KCSAN_ACCESS_WRITE 0x1 #define KCSAN_ACCESS_ATOMIC 0x2 +#define KCSAN_ACCESS_ASSERT 0x4 /* * __kcsan_*: Always calls into the runtime when KCSAN is enabled. This may be used @@ -18,7 +24,7 @@ */ #ifdef CONFIG_KCSAN /** - * __kcsan_check_access - check generic access for data races + * __kcsan_check_access - check generic access for races * * @ptr address of access * @size size of access @@ -43,7 +49,7 @@ static inline void kcsan_check_access(const volatile void *ptr, size_t size, #endif /** - * __kcsan_check_read - check regular read access for data races + * __kcsan_check_read - check regular read access for races * * @ptr address of access * @size size of access @@ -51,7 +57,7 @@ static inline void kcsan_check_access(const volatile void *ptr, size_t size, #define __kcsan_check_read(ptr, size) __kcsan_check_access(ptr, size, 0) /** - * __kcsan_check_write - check regular write access for data races + * __kcsan_check_write - check regular write access for races * * @ptr address of access * @size size of access @@ -60,7 +66,7 @@ static inline void kcsan_check_access(const volatile void *ptr, size_t size, __kcsan_check_access(ptr, size, KCSAN_ACCESS_WRITE) /** - * kcsan_check_read - check regular read access for data races + * kcsan_check_read - check regular read access for races * * @ptr address of access * @size size of access @@ -68,7 +74,7 @@ static inline void kcsan_check_access(const volatile void *ptr, size_t size, #define kcsan_check_read(ptr, size) kcsan_check_access(ptr, size, 0) /** - * kcsan_check_write - check regular write access for data races + * kcsan_check_write - check regular write access for races * * @ptr address of access * @size size of access diff --git a/kernel/kcsan/core.c b/kernel/kcsan/core.c index 82c2bef827d42..87ef01e40199d 100644 --- a/kernel/kcsan/core.c +++ b/kernel/kcsan/core.c @@ -56,7 +56,7 @@ static DEFINE_PER_CPU(struct kcsan_ctx, kcsan_cpu_ctx) = { /* * SLOT_IDX_FAST is used in the fast-path. Not first checking the address's primary - * slot (middle) is fine if we assume that data races occur rarely. The set of + * slot (middle) is fine if we assume that races occur rarely. The set of * indices {SLOT_IDX(slot, i) | i in [0, NUM_SLOTS)} is equivalent to * {SLOT_IDX_FAST(slot, i) | i in [0, NUM_SLOTS)}. */ @@ -178,6 +178,14 @@ is_atomic(const volatile void *ptr, size_t size, int type) if ((type & KCSAN_ACCESS_ATOMIC) != 0) return true; + /* + * Unless explicitly declared atomic, never consider an assertion access + * as atomic. This allows using them also in atomic regions, such as + * seqlocks, without implicitly changing their semantics. + */ + if ((type & KCSAN_ACCESS_ASSERT) != 0) + return false; + if (IS_ENABLED(CONFIG_KCSAN_ASSUME_PLAIN_WRITES_ATOMIC) && (type & KCSAN_ACCESS_WRITE) != 0 && size <= sizeof(long) && IS_ALIGNED((unsigned long)ptr, size)) @@ -298,7 +306,11 @@ static noinline void kcsan_found_watchpoint(const volatile void *ptr, */ kcsan_counter_inc(KCSAN_COUNTER_REPORT_RACES); } - kcsan_counter_inc(KCSAN_COUNTER_DATA_RACES); + + if ((type & KCSAN_ACCESS_ASSERT) != 0) + kcsan_counter_inc(KCSAN_COUNTER_ASSERT_FAILURES); + else + kcsan_counter_inc(KCSAN_COUNTER_DATA_RACES); user_access_restore(flags); } @@ -307,6 +319,7 @@ static noinline void kcsan_setup_watchpoint(const volatile void *ptr, size_t size, int type) { const bool is_write = (type & KCSAN_ACCESS_WRITE) != 0; + const bool is_assert = (type & KCSAN_ACCESS_ASSERT) != 0; atomic_long_t *watchpoint; union { u8 _1; @@ -429,13 +442,32 @@ kcsan_setup_watchpoint(const volatile void *ptr, size_t size, int type) /* * No need to increment 'data_races' counter, as the racing * thread already did. + * + * Count 'assert_failures' for each failed ASSERT access, + * therefore both this thread and the racing thread may + * increment this counter. */ - kcsan_report(ptr, size, type, size > 8 || value_change, - smp_processor_id(), KCSAN_REPORT_RACE_SIGNAL); + if (is_assert) + kcsan_counter_inc(KCSAN_COUNTER_ASSERT_FAILURES); + + /* + * - If we were not able to observe a value change due to size + * constraints, always assume a value change. + * - If the access type is an assertion, we also always assume a + * value change to always report the race. + */ + value_change = value_change || size > 8 || is_assert; + + kcsan_report(ptr, size, type, value_change, smp_processor_id(), + KCSAN_REPORT_RACE_SIGNAL); } else if (value_change) { /* Inferring a race, since the value should not have changed. */ + kcsan_counter_inc(KCSAN_COUNTER_RACES_UNKNOWN_ORIGIN); - if (IS_ENABLED(CONFIG_KCSAN_REPORT_RACE_UNKNOWN_ORIGIN)) + if (is_assert) + kcsan_counter_inc(KCSAN_COUNTER_ASSERT_FAILURES); + + if (IS_ENABLED(CONFIG_KCSAN_REPORT_RACE_UNKNOWN_ORIGIN) || is_assert) kcsan_report(ptr, size, type, true, smp_processor_id(), KCSAN_REPORT_RACE_UNKNOWN_ORIGIN); @@ -471,7 +503,7 @@ static __always_inline void check_access(const volatile void *ptr, size_t size, &encoded_watchpoint); /* * It is safe to check kcsan_is_enabled() after find_watchpoint in the - * slow-path, as long as no state changes that cause a data race to be + * slow-path, as long as no state changes that cause a race to be * detected and reported have occurred until kcsan_is_enabled() is * checked. */ diff --git a/kernel/kcsan/debugfs.c b/kernel/kcsan/debugfs.c index bec42dab32ee8..a9dad44130e62 100644 --- a/kernel/kcsan/debugfs.c +++ b/kernel/kcsan/debugfs.c @@ -44,6 +44,7 @@ static const char *counter_to_name(enum kcsan_counter_id id) case KCSAN_COUNTER_USED_WATCHPOINTS: return "used_watchpoints"; case KCSAN_COUNTER_SETUP_WATCHPOINTS: return "setup_watchpoints"; case KCSAN_COUNTER_DATA_RACES: return "data_races"; + case KCSAN_COUNTER_ASSERT_FAILURES: return "assert_failures"; case KCSAN_COUNTER_NO_CAPACITY: return "no_capacity"; case KCSAN_COUNTER_REPORT_RACES: return "report_races"; case KCSAN_COUNTER_RACES_UNKNOWN_ORIGIN: return "races_unknown_origin"; diff --git a/kernel/kcsan/kcsan.h b/kernel/kcsan/kcsan.h index 8492da45494bf..50078e7d43c32 100644 --- a/kernel/kcsan/kcsan.h +++ b/kernel/kcsan/kcsan.h @@ -39,6 +39,13 @@ enum kcsan_counter_id { */ KCSAN_COUNTER_DATA_RACES, + /* + * Total number of ASSERT failures due to races. If the observed race is + * due to two conflicting ASSERT type accesses, then both will be + * counted. + */ + KCSAN_COUNTER_ASSERT_FAILURES, + /* * Number of times no watchpoints were available. */ diff --git a/kernel/kcsan/report.c b/kernel/kcsan/report.c index 7cd34285df740..3bc590e6be7e3 100644 --- a/kernel/kcsan/report.c +++ b/kernel/kcsan/report.c @@ -34,11 +34,11 @@ static struct { } other_info = { .ptr = NULL }; /* - * Information about reported data races; used to rate limit reporting. + * Information about reported races; used to rate limit reporting. */ struct report_time { /* - * The last time the data race was reported. + * The last time the race was reported. */ unsigned long time; @@ -57,7 +57,7 @@ struct report_time { * * Therefore, we use a fixed-size array, which at most will occupy a page. This * still adequately rate limits reports, assuming that a) number of unique data - * races is not excessive, and b) occurrence of unique data races within the + * races is not excessive, and b) occurrence of unique races within the * same time window is limited. */ #define REPORT_TIMES_MAX (PAGE_SIZE / sizeof(struct report_time)) @@ -74,7 +74,7 @@ static struct report_time report_times[REPORT_TIMES_SIZE]; static DEFINE_SPINLOCK(report_lock); /* - * Checks if the data race identified by thread frames frame1 and frame2 has + * Checks if the race identified by thread frames frame1 and frame2 has * been reported since (now - KCSAN_REPORT_ONCE_IN_MS). */ static bool rate_limit_report(unsigned long frame1, unsigned long frame2) @@ -90,7 +90,7 @@ static bool rate_limit_report(unsigned long frame1, unsigned long frame2) invalid_before = jiffies - msecs_to_jiffies(CONFIG_KCSAN_REPORT_ONCE_IN_MS); - /* Check if a matching data race report exists. */ + /* Check if a matching race report exists. */ for (i = 0; i < REPORT_TIMES_SIZE; ++i) { struct report_time *rt = &report_times[i]; @@ -114,7 +114,7 @@ static bool rate_limit_report(unsigned long frame1, unsigned long frame2) if (time_before(rt->time, invalid_before)) continue; /* before KCSAN_REPORT_ONCE_IN_MS ago */ - /* Reported recently, check if data race matches. */ + /* Reported recently, check if race matches. */ if ((rt->frame1 == frame1 && rt->frame2 == frame2) || (rt->frame1 == frame2 && rt->frame2 == frame1)) return true; @@ -142,11 +142,12 @@ skip_report(bool value_change, unsigned long top_frame) * 3. write watchpoint, conflicting write (value_change==true): report; * 4. write watchpoint, conflicting write (value_change==false): skip; * 5. write watchpoint, conflicting read (value_change==false): skip; - * 6. write watchpoint, conflicting read (value_change==true): impossible; + * 6. write watchpoint, conflicting read (value_change==true): report; * * Cases 1-4 are intuitive and expected; case 5 ensures we do not report - * data races where the write may have rewritten the same value; and - * case 6 is simply impossible. + * data races where the write may have rewritten the same value; case 6 + * is possible either if the size is larger than what we check value + * changes for or the access type is KCSAN_ACCESS_ASSERT. */ if (IS_ENABLED(CONFIG_KCSAN_REPORT_VALUE_CHANGE_ONLY) && !value_change) { /* @@ -178,11 +179,27 @@ static const char *get_access_type(int type) return "write"; case KCSAN_ACCESS_WRITE | KCSAN_ACCESS_ATOMIC: return "write (marked)"; + + /* + * ASSERT variants: + */ + case KCSAN_ACCESS_ASSERT: + case KCSAN_ACCESS_ASSERT | KCSAN_ACCESS_ATOMIC: + return "assert no writes"; + case KCSAN_ACCESS_ASSERT | KCSAN_ACCESS_WRITE: + case KCSAN_ACCESS_ASSERT | KCSAN_ACCESS_WRITE | KCSAN_ACCESS_ATOMIC: + return "assert no accesses"; + default: BUG(); } } +static const char *get_bug_type(int type) +{ + return (type & KCSAN_ACCESS_ASSERT) != 0 ? "assert: race" : "data-race"; +} + /* Return thread description: in task or interrupt. */ static const char *get_thread_desc(int task_id) { @@ -268,13 +285,15 @@ static bool print_report(const volatile void *ptr, size_t size, int access_type, * Do not print offset of functions to keep title short. */ cmp = sym_strcmp((void *)other_frame, (void *)this_frame); - pr_err("BUG: KCSAN: data-race in %ps / %ps\n", + pr_err("BUG: KCSAN: %s in %ps / %ps\n", + get_bug_type(access_type | other_info.access_type), (void *)(cmp < 0 ? other_frame : this_frame), (void *)(cmp < 0 ? this_frame : other_frame)); } break; case KCSAN_REPORT_RACE_UNKNOWN_ORIGIN: - pr_err("BUG: KCSAN: data-race in %pS\n", (void *)this_frame); + pr_err("BUG: KCSAN: %s in %pS\n", get_bug_type(access_type), + (void *)this_frame); break; default: @@ -427,7 +446,7 @@ void kcsan_report(const volatile void *ptr, size_t size, int access_type, /* * With TRACE_IRQFLAGS, lockdep's IRQ trace state becomes corrupted if * we do not turn off lockdep here; this could happen due to recursion - * into lockdep via KCSAN if we detect a data race in utilities used by + * into lockdep via KCSAN if we detect a race in utilities used by * lockdep. */ lockdep_off(); diff --git a/lib/Kconfig.kcsan b/lib/Kconfig.kcsan index 9785bbf9a1d11..f0b791143c6ab 100644 --- a/lib/Kconfig.kcsan +++ b/lib/Kconfig.kcsan @@ -4,13 +4,17 @@ config HAVE_ARCH_KCSAN bool menuconfig KCSAN - bool "KCSAN: dynamic data race detector" + bool "KCSAN: dynamic race detector" depends on HAVE_ARCH_KCSAN && DEBUG_KERNEL && !KASAN select STACKTRACE help - The Kernel Concurrency Sanitizer (KCSAN) is a dynamic data race - detector, which relies on compile-time instrumentation, and uses a - watchpoint-based sampling approach to detect data races. + The Kernel Concurrency Sanitizer (KCSAN) is a dynamic race detector, + which relies on compile-time instrumentation, and uses a + watchpoint-based sampling approach to detect races. + + KCSAN's primary purpose is to detect data races. KCSAN can also be + used to check properties, with the help of provided assertions, of + concurrent code where bugs do not manifest as data races. See for more details. @@ -85,14 +89,14 @@ config KCSAN_SKIP_WATCH_RANDOMIZE KCSAN_WATCH_SKIP. config KCSAN_REPORT_ONCE_IN_MS - int "Duration in milliseconds, in which any given data race is only reported once" + int "Duration in milliseconds, in which any given race is only reported once" default 3000 help - Any given data race is only reported once in the defined time window. - Different data races may still generate reports within a duration - that is smaller than the duration defined here. This allows rate - limiting reporting to avoid flooding the console with reports. - Setting this to 0 disables rate limiting. + Any given race is only reported once in the defined time window. + Different races may still generate reports within a duration that is + smaller than the duration defined here. This allows rate limiting + reporting to avoid flooding the console with reports. Setting this + to 0 disables rate limiting. # The main purpose of the below options is to control reported data races (e.g. # in fuzzer configs), and are not expected to be switched frequently by other -- GitLab From f97f713dc25714ac13f3b5abdeffce2da3f6fe70 Mon Sep 17 00:00:00 2001 From: Marco Elver Date: Thu, 6 Feb 2020 16:46:25 +0100 Subject: [PATCH 0036/1971] kcsan: Introduce ASSERT_EXCLUSIVE_*() macros Introduces ASSERT_EXCLUSIVE_WRITER() and ASSERT_EXCLUSIVE_ACCESS(), which may be used to assert properties of synchronization logic, where violation cannot be detected as a normal data race. Examples of the reports that may be generated: ================================================================== BUG: KCSAN: assert: race in test_thread / test_thread write to 0xffffffffab3d1540 of 8 bytes by task 466 on cpu 2: test_thread+0x8d/0x111 debugfs_write.cold+0x32/0x44 ... assert no writes to 0xffffffffab3d1540 of 8 bytes by task 464 on cpu 0: test_thread+0xa3/0x111 debugfs_write.cold+0x32/0x44 ... ================================================================== ================================================================== BUG: KCSAN: assert: race in test_thread / test_thread assert no accesses to 0xffffffffab3d1540 of 8 bytes by task 465 on cpu 1: test_thread+0xb9/0x111 debugfs_write.cold+0x32/0x44 ... read to 0xffffffffab3d1540 of 8 bytes by task 464 on cpu 0: test_thread+0x77/0x111 debugfs_write.cold+0x32/0x44 ... ================================================================== Suggested-by: Paul E. McKenney Signed-off-by: Marco Elver Signed-off-by: Paul E. McKenney Signed-off-by: Ingo Molnar --- include/linux/kcsan-checks.h | 40 ++++++++++++++++++++++++++++++++++++ 1 file changed, 40 insertions(+) diff --git a/include/linux/kcsan-checks.h b/include/linux/kcsan-checks.h index 5dcadc221026e..cf6961794e9a1 100644 --- a/include/linux/kcsan-checks.h +++ b/include/linux/kcsan-checks.h @@ -96,4 +96,44 @@ static inline void kcsan_check_access(const volatile void *ptr, size_t size, kcsan_check_access(ptr, size, KCSAN_ACCESS_ATOMIC | KCSAN_ACCESS_WRITE) #endif +/** + * ASSERT_EXCLUSIVE_WRITER - assert no other threads are writing @var + * + * Assert that there are no other threads writing @var; other readers are + * allowed. This assertion can be used to specify properties of concurrent code, + * where violation cannot be detected as a normal data race. + * + * For example, if a per-CPU variable is only meant to be written by a single + * CPU, but may be read from other CPUs; in this case, reads and writes must be + * marked properly, however, if an off-CPU WRITE_ONCE() races with the owning + * CPU's WRITE_ONCE(), would not constitute a data race but could be a harmful + * race condition. Using this macro allows specifying this property in the code + * and catch such bugs. + * + * @var variable to assert on + */ +#define ASSERT_EXCLUSIVE_WRITER(var) \ + __kcsan_check_access(&(var), sizeof(var), KCSAN_ACCESS_ASSERT) + +/** + * ASSERT_EXCLUSIVE_ACCESS - assert no other threads are accessing @var + * + * Assert that no other thread is accessing @var (no readers nor writers). This + * assertion can be used to specify properties of concurrent code, where + * violation cannot be detected as a normal data race. + * + * For example, in a reference-counting algorithm where exclusive access is + * expected after the refcount reaches 0. We can check that this property + * actually holds as follows: + * + * if (refcount_dec_and_test(&obj->refcnt)) { + * ASSERT_EXCLUSIVE_ACCESS(*obj); + * safely_dispose_of(obj); + * } + * + * @var variable to assert on + */ +#define ASSERT_EXCLUSIVE_ACCESS(var) \ + __kcsan_check_access(&(var), sizeof(var), KCSAN_ACCESS_WRITE | KCSAN_ACCESS_ASSERT) + #endif /* _LINUX_KCSAN_CHECKS_H */ -- GitLab From a312013578e4775003689e31c1f487df11f362a3 Mon Sep 17 00:00:00 2001 From: Marco Elver Date: Thu, 6 Feb 2020 16:46:26 +0100 Subject: [PATCH 0037/1971] kcsan: Add test to generate conflicts via debugfs Add 'test=' option to KCSAN's debugfs interface to invoke KCSAN checks on a dummy variable. By writing 'test=' to the debugfs file from multiple tasks, we can generate real conflicts, and trigger data race reports. Signed-off-by: Marco Elver Signed-off-by: Paul E. McKenney Signed-off-by: Ingo Molnar --- kernel/kcsan/debugfs.c | 51 +++++++++++++++++++++++++++++++++++++----- 1 file changed, 46 insertions(+), 5 deletions(-) diff --git a/kernel/kcsan/debugfs.c b/kernel/kcsan/debugfs.c index a9dad44130e62..9bbba0e57c9b3 100644 --- a/kernel/kcsan/debugfs.c +++ b/kernel/kcsan/debugfs.c @@ -6,6 +6,7 @@ #include #include #include +#include #include #include #include @@ -69,9 +70,9 @@ void kcsan_counter_dec(enum kcsan_counter_id id) /* * The microbenchmark allows benchmarking KCSAN core runtime only. To run * multiple threads, pipe 'microbench=' from multiple tasks into the - * debugfs file. + * debugfs file. This will not generate any conflicts, and tests fast-path only. */ -static void microbenchmark(unsigned long iters) +static noinline void microbenchmark(unsigned long iters) { cycles_t cycles; @@ -81,18 +82,52 @@ static void microbenchmark(unsigned long iters) while (iters--) { /* * We can run this benchmark from multiple tasks; this address - * calculation increases likelyhood of some accesses overlapping - * (they still won't conflict because all are reads). + * calculation increases likelyhood of some accesses + * overlapping. Make the access type an atomic read, to never + * set up watchpoints and test the fast-path only. */ unsigned long addr = iters % (CONFIG_KCSAN_NUM_WATCHPOINTS * PAGE_SIZE); - __kcsan_check_read((void *)addr, sizeof(long)); + __kcsan_check_access((void *)addr, sizeof(long), KCSAN_ACCESS_ATOMIC); } cycles = get_cycles() - cycles; pr_info("KCSAN: %s end | cycles: %llu\n", __func__, cycles); } +/* + * Simple test to create conflicting accesses. Write 'test=' to KCSAN's + * debugfs file from multiple tasks to generate real conflicts and show reports. + */ +static long test_dummy; +static noinline void test_thread(unsigned long iters) +{ + const struct kcsan_ctx ctx_save = current->kcsan_ctx; + cycles_t cycles; + + /* We may have been called from an atomic region; reset context. */ + memset(¤t->kcsan_ctx, 0, sizeof(current->kcsan_ctx)); + + pr_info("KCSAN: %s begin | iters: %lu\n", __func__, iters); + + cycles = get_cycles(); + while (iters--) { + __kcsan_check_read(&test_dummy, sizeof(test_dummy)); + __kcsan_check_write(&test_dummy, sizeof(test_dummy)); + ASSERT_EXCLUSIVE_WRITER(test_dummy); + ASSERT_EXCLUSIVE_ACCESS(test_dummy); + + /* not actually instrumented */ + WRITE_ONCE(test_dummy, iters); /* to observe value-change */ + } + cycles = get_cycles() - cycles; + + pr_info("KCSAN: %s end | cycles: %llu\n", __func__, cycles); + + /* restore context */ + current->kcsan_ctx = ctx_save; +} + static int cmp_filterlist_addrs(const void *rhs, const void *lhs) { const unsigned long a = *(const unsigned long *)rhs; @@ -242,6 +277,12 @@ debugfs_write(struct file *file, const char __user *buf, size_t count, loff_t *o if (kstrtoul(&arg[sizeof("microbench=") - 1], 0, &iters)) return -EINVAL; microbenchmark(iters); + } else if (!strncmp(arg, "test=", sizeof("test=") - 1)) { + unsigned long iters; + + if (kstrtoul(&arg[sizeof("test=") - 1], 0, &iters)) + return -EINVAL; + test_thread(iters); } else if (!strcmp(arg, "whitelist")) { set_report_filterlist_whitelist(true); } else if (!strcmp(arg, "blacklist")) { -- GitLab From 80d4c4775216602ccdc9e761ce251c8451d0c6ca Mon Sep 17 00:00:00 2001 From: Marco Elver Date: Fri, 7 Feb 2020 19:59:10 +0100 Subject: [PATCH 0038/1971] kcsan: Expose core configuration parameters as module params This adds early_boot, udelay_{task,interrupt}, and skip_watch as module params. The latter parameters are useful to modify at runtime to tune KCSAN's performance on new systems. This will also permit auto-tuning these parameters to maximize overall system performance and KCSAN's race detection ability. None of the parameters are used in the fast-path and referring to them via static variables instead of CONFIG constants will not affect performance. Signed-off-by: Marco Elver Signed-off-by: Paul E. McKenney Signed-off-by: Ingo Molnar Cc: Qian Cai --- kernel/kcsan/core.c | 24 +++++++++++++++++++----- 1 file changed, 19 insertions(+), 5 deletions(-) diff --git a/kernel/kcsan/core.c b/kernel/kcsan/core.c index 87ef01e40199d..498b1eb3c1cda 100644 --- a/kernel/kcsan/core.c +++ b/kernel/kcsan/core.c @@ -6,6 +6,7 @@ #include #include #include +#include #include #include #include @@ -16,6 +17,20 @@ #include "encoding.h" #include "kcsan.h" +static bool kcsan_early_enable = IS_ENABLED(CONFIG_KCSAN_EARLY_ENABLE); +static unsigned int kcsan_udelay_task = CONFIG_KCSAN_UDELAY_TASK; +static unsigned int kcsan_udelay_interrupt = CONFIG_KCSAN_UDELAY_INTERRUPT; +static long kcsan_skip_watch = CONFIG_KCSAN_SKIP_WATCH; + +#ifdef MODULE_PARAM_PREFIX +#undef MODULE_PARAM_PREFIX +#endif +#define MODULE_PARAM_PREFIX "kcsan." +module_param_named(early_enable, kcsan_early_enable, bool, 0); +module_param_named(udelay_task, kcsan_udelay_task, uint, 0644); +module_param_named(udelay_interrupt, kcsan_udelay_interrupt, uint, 0644); +module_param_named(skip_watch, kcsan_skip_watch, long, 0644); + bool kcsan_enabled; /* Per-CPU kcsan_ctx for interrupts */ @@ -239,9 +254,9 @@ should_watch(const volatile void *ptr, size_t size, int type) static inline void reset_kcsan_skip(void) { - long skip_count = CONFIG_KCSAN_SKIP_WATCH - + long skip_count = kcsan_skip_watch - (IS_ENABLED(CONFIG_KCSAN_SKIP_WATCH_RANDOMIZE) ? - prandom_u32_max(CONFIG_KCSAN_SKIP_WATCH) : + prandom_u32_max(kcsan_skip_watch) : 0); this_cpu_write(kcsan_skip, skip_count); } @@ -253,8 +268,7 @@ static __always_inline bool kcsan_is_enabled(void) static inline unsigned int get_delay(void) { - unsigned int delay = in_task() ? CONFIG_KCSAN_UDELAY_TASK : - CONFIG_KCSAN_UDELAY_INTERRUPT; + unsigned int delay = in_task() ? kcsan_udelay_task : kcsan_udelay_interrupt; return delay - (IS_ENABLED(CONFIG_KCSAN_DELAY_RANDOMIZE) ? prandom_u32_max(delay) : 0); @@ -527,7 +541,7 @@ void __init kcsan_init(void) * We are in the init task, and no other tasks should be running; * WRITE_ONCE without memory barrier is sufficient. */ - if (IS_ENABLED(CONFIG_KCSAN_EARLY_ENABLE)) + if (kcsan_early_enable) WRITE_ONCE(kcsan_enabled, true); } -- GitLab From 3a5b45e5031fa395ab6e53545fb726b2d5b104f0 Mon Sep 17 00:00:00 2001 From: Marco Elver Date: Mon, 10 Feb 2020 15:56:39 +0100 Subject: [PATCH 0039/1971] kcsan: Fix misreporting if concurrent races on same address If there are at least 4 threads racing on the same address, it can happen that one of the readers may observe another matching reader in other_info. To avoid locking up, we have to consume 'other_info' regardless, but skip the report. See the added comment for more details. Signed-off-by: Marco Elver Signed-off-by: Paul E. McKenney Signed-off-by: Ingo Molnar --- kernel/kcsan/report.c | 38 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 38 insertions(+) diff --git a/kernel/kcsan/report.c b/kernel/kcsan/report.c index 3bc590e6be7e3..abf6852dff72f 100644 --- a/kernel/kcsan/report.c +++ b/kernel/kcsan/report.c @@ -422,6 +422,44 @@ retry: return false; } + access_type |= other_info.access_type; + if ((access_type & KCSAN_ACCESS_WRITE) == 0) { + /* + * While the address matches, this is not the other_info + * from the thread that consumed our watchpoint, since + * neither this nor the access in other_info is a write. + * It is invalid to continue with the report, since we + * only have information about reads. + * + * This can happen due to concurrent races on the same + * address, with at least 4 threads. To avoid locking up + * other_info and all other threads, we have to consume + * it regardless. + * + * A concrete case to illustrate why we might lock up if + * we do not consume other_info: + * + * We have 4 threads, all accessing the same address + * (or matching address ranges). Assume the following + * watcher and watchpoint consumer pairs: + * write1-read1, read2-write2. The first to populate + * other_info is write2, however, write1 consumes it, + * resulting in a report of write1-write2. This report + * is valid, however, now read1 populates other_info; + * read2-read1 is an invalid conflict, yet, no other + * conflicting access is left. Therefore, we must + * consume read1's other_info. + * + * Since this case is assumed to be rare, it is + * reasonable to omit this report: one of the other + * reports includes information about the same shared + * data, and at this point the likelihood that we + * re-report the same race again is high. + */ + release_report(flags, KCSAN_REPORT_RACE_SIGNAL); + return false; + } + /* * Matching & usable access in other_info: keep other_info_lock * locked, as this thread consumes it to print the full report; -- GitLab From f0f6928c2c4c19ab6171d4f468f542fac1888a8f Mon Sep 17 00:00:00 2001 From: Marco Elver Date: Tue, 11 Feb 2020 17:04:19 +0100 Subject: [PATCH 0040/1971] kcsan: Move interfaces that affects checks to kcsan-checks.h This moves functions that affect state changing the behaviour of kcsan_check_access() to kcsan-checks.h. Since these are likely used with kcsan_check_access() it makes more sense to have them in kcsan-checks.h, to avoid including all of 'include/linux/kcsan.h'. No functional change intended. Acked-by: John Hubbard Signed-off-by: Marco Elver Signed-off-by: Paul E. McKenney Signed-off-by: Ingo Molnar --- include/linux/kcsan-checks.h | 48 ++++++++++++++++++++++++++++++++++-- include/linux/kcsan.h | 41 ------------------------------ 2 files changed, 46 insertions(+), 43 deletions(-) diff --git a/include/linux/kcsan-checks.h b/include/linux/kcsan-checks.h index cf6961794e9a1..8675411c8dbcd 100644 --- a/include/linux/kcsan-checks.h +++ b/include/linux/kcsan-checks.h @@ -32,10 +32,54 @@ */ void __kcsan_check_access(const volatile void *ptr, size_t size, int type); -#else +/** + * kcsan_nestable_atomic_begin - begin nestable atomic region + * + * Accesses within the atomic region may appear to race with other accesses but + * should be considered atomic. + */ +void kcsan_nestable_atomic_begin(void); + +/** + * kcsan_nestable_atomic_end - end nestable atomic region + */ +void kcsan_nestable_atomic_end(void); + +/** + * kcsan_flat_atomic_begin - begin flat atomic region + * + * Accesses within the atomic region may appear to race with other accesses but + * should be considered atomic. + */ +void kcsan_flat_atomic_begin(void); + +/** + * kcsan_flat_atomic_end - end flat atomic region + */ +void kcsan_flat_atomic_end(void); + +/** + * kcsan_atomic_next - consider following accesses as atomic + * + * Force treating the next n memory accesses for the current context as atomic + * operations. + * + * @n number of following memory accesses to treat as atomic. + */ +void kcsan_atomic_next(int n); + +#else /* CONFIG_KCSAN */ + static inline void __kcsan_check_access(const volatile void *ptr, size_t size, int type) { } -#endif + +static inline void kcsan_nestable_atomic_begin(void) { } +static inline void kcsan_nestable_atomic_end(void) { } +static inline void kcsan_flat_atomic_begin(void) { } +static inline void kcsan_flat_atomic_end(void) { } +static inline void kcsan_atomic_next(int n) { } + +#endif /* CONFIG_KCSAN */ /* * kcsan_*: Only calls into the runtime when the particular compilation unit has diff --git a/include/linux/kcsan.h b/include/linux/kcsan.h index 1019e3a2c6897..7a614ca558f65 100644 --- a/include/linux/kcsan.h +++ b/include/linux/kcsan.h @@ -56,52 +56,11 @@ void kcsan_disable_current(void); */ void kcsan_enable_current(void); -/** - * kcsan_nestable_atomic_begin - begin nestable atomic region - * - * Accesses within the atomic region may appear to race with other accesses but - * should be considered atomic. - */ -void kcsan_nestable_atomic_begin(void); - -/** - * kcsan_nestable_atomic_end - end nestable atomic region - */ -void kcsan_nestable_atomic_end(void); - -/** - * kcsan_flat_atomic_begin - begin flat atomic region - * - * Accesses within the atomic region may appear to race with other accesses but - * should be considered atomic. - */ -void kcsan_flat_atomic_begin(void); - -/** - * kcsan_flat_atomic_end - end flat atomic region - */ -void kcsan_flat_atomic_end(void); - -/** - * kcsan_atomic_next - consider following accesses as atomic - * - * Force treating the next n memory accesses for the current context as atomic - * operations. - * - * @n number of following memory accesses to treat as atomic. - */ -void kcsan_atomic_next(int n); - #else /* CONFIG_KCSAN */ static inline void kcsan_init(void) { } static inline void kcsan_disable_current(void) { } static inline void kcsan_enable_current(void) { } -static inline void kcsan_nestable_atomic_begin(void) { } -static inline void kcsan_nestable_atomic_end(void) { } -static inline void kcsan_flat_atomic_begin(void) { } -static inline void kcsan_flat_atomic_end(void) { } -static inline void kcsan_atomic_next(int n) { } #endif /* CONFIG_KCSAN */ -- GitLab From b968a08f242d51982e46041c506115b5e11a7570 Mon Sep 17 00:00:00 2001 From: Marco Elver Date: Tue, 11 Feb 2020 17:04:20 +0100 Subject: [PATCH 0041/1971] compiler.h, seqlock.h: Remove unnecessary kcsan.h includes No we longer have to include kcsan.h, since the required KCSAN interface for both compiler.h and seqlock.h are now provided by kcsan-checks.h. Acked-by: John Hubbard Signed-off-by: Marco Elver Signed-off-by: Paul E. McKenney Signed-off-by: Ingo Molnar --- include/linux/compiler.h | 2 -- include/linux/seqlock.h | 2 +- 2 files changed, 1 insertion(+), 3 deletions(-) diff --git a/include/linux/compiler.h b/include/linux/compiler.h index c1bdf37571cb8..f504edebd5d71 100644 --- a/include/linux/compiler.h +++ b/include/linux/compiler.h @@ -313,8 +313,6 @@ unsigned long read_word_at_a_time(const void *addr) __u.__val; \ }) -#include - /** * data_race - mark an expression as containing intentional data races * diff --git a/include/linux/seqlock.h b/include/linux/seqlock.h index 239701cae3764..8b97204f35a77 100644 --- a/include/linux/seqlock.h +++ b/include/linux/seqlock.h @@ -37,7 +37,7 @@ #include #include #include -#include +#include #include /* -- GitLab From b738f6169f1260b4ed5bd9f220b1c84d79f3ab8d Mon Sep 17 00:00:00 2001 From: Marco Elver Date: Tue, 11 Feb 2020 17:04:21 +0100 Subject: [PATCH 0042/1971] kcsan: Introduce kcsan_value_change type Introduces kcsan_value_change type, which explicitly points out if we either observed a value-change (TRUE), or we could not observe one but cannot rule out a value-change happened (MAYBE). The MAYBE state can either be reported or not, depending on configuration preferences. A follow-up patch introduces the FALSE state, which should never be reported. No functional change intended. Acked-by: John Hubbard Signed-off-by: Marco Elver Signed-off-by: Paul E. McKenney Signed-off-by: Ingo Molnar --- kernel/kcsan/core.c | 38 ++++++++++++++++++++++---------------- kernel/kcsan/kcsan.h | 19 ++++++++++++++++++- kernel/kcsan/report.c | 26 ++++++++++++++------------ 3 files changed, 54 insertions(+), 29 deletions(-) diff --git a/kernel/kcsan/core.c b/kernel/kcsan/core.c index 498b1eb3c1cda..3f89801161d33 100644 --- a/kernel/kcsan/core.c +++ b/kernel/kcsan/core.c @@ -341,7 +341,7 @@ kcsan_setup_watchpoint(const volatile void *ptr, size_t size, int type) u32 _4; u64 _8; } expect_value; - bool value_change = false; + enum kcsan_value_change value_change = KCSAN_VALUE_CHANGE_MAYBE; unsigned long ua_flags = user_access_save(); unsigned long irq_flags; @@ -398,6 +398,7 @@ kcsan_setup_watchpoint(const volatile void *ptr, size_t size, int type) * Read the current value, to later check and infer a race if the data * was modified via a non-instrumented access, e.g. from a device. */ + expect_value._8 = 0; switch (size) { case 1: expect_value._1 = READ_ONCE(*(const u8 *)ptr); @@ -436,23 +437,36 @@ kcsan_setup_watchpoint(const volatile void *ptr, size_t size, int type) */ switch (size) { case 1: - value_change = expect_value._1 != READ_ONCE(*(const u8 *)ptr); + expect_value._1 ^= READ_ONCE(*(const u8 *)ptr); break; case 2: - value_change = expect_value._2 != READ_ONCE(*(const u16 *)ptr); + expect_value._2 ^= READ_ONCE(*(const u16 *)ptr); break; case 4: - value_change = expect_value._4 != READ_ONCE(*(const u32 *)ptr); + expect_value._4 ^= READ_ONCE(*(const u32 *)ptr); break; case 8: - value_change = expect_value._8 != READ_ONCE(*(const u64 *)ptr); + expect_value._8 ^= READ_ONCE(*(const u64 *)ptr); break; default: break; /* ignore; we do not diff the values */ } + /* Were we able to observe a value-change? */ + if (expect_value._8 != 0) + value_change = KCSAN_VALUE_CHANGE_TRUE; + /* Check if this access raced with another. */ if (!remove_watchpoint(watchpoint)) { + /* + * Depending on the access type, map a value_change of MAYBE to + * TRUE (require reporting). + */ + if (value_change == KCSAN_VALUE_CHANGE_MAYBE && (size > 8 || is_assert)) { + /* Always assume a value-change. */ + value_change = KCSAN_VALUE_CHANGE_TRUE; + } + /* * No need to increment 'data_races' counter, as the racing * thread already did. @@ -461,20 +475,12 @@ kcsan_setup_watchpoint(const volatile void *ptr, size_t size, int type) * therefore both this thread and the racing thread may * increment this counter. */ - if (is_assert) + if (is_assert && value_change == KCSAN_VALUE_CHANGE_TRUE) kcsan_counter_inc(KCSAN_COUNTER_ASSERT_FAILURES); - /* - * - If we were not able to observe a value change due to size - * constraints, always assume a value change. - * - If the access type is an assertion, we also always assume a - * value change to always report the race. - */ - value_change = value_change || size > 8 || is_assert; - kcsan_report(ptr, size, type, value_change, smp_processor_id(), KCSAN_REPORT_RACE_SIGNAL); - } else if (value_change) { + } else if (value_change == KCSAN_VALUE_CHANGE_TRUE) { /* Inferring a race, since the value should not have changed. */ kcsan_counter_inc(KCSAN_COUNTER_RACES_UNKNOWN_ORIGIN); @@ -482,7 +488,7 @@ kcsan_setup_watchpoint(const volatile void *ptr, size_t size, int type) kcsan_counter_inc(KCSAN_COUNTER_ASSERT_FAILURES); if (IS_ENABLED(CONFIG_KCSAN_REPORT_RACE_UNKNOWN_ORIGIN) || is_assert) - kcsan_report(ptr, size, type, true, + kcsan_report(ptr, size, type, KCSAN_VALUE_CHANGE_TRUE, smp_processor_id(), KCSAN_REPORT_RACE_UNKNOWN_ORIGIN); } diff --git a/kernel/kcsan/kcsan.h b/kernel/kcsan/kcsan.h index 50078e7d43c32..83a79b08b550e 100644 --- a/kernel/kcsan/kcsan.h +++ b/kernel/kcsan/kcsan.h @@ -88,6 +88,22 @@ extern void kcsan_counter_dec(enum kcsan_counter_id id); */ extern bool kcsan_skip_report_debugfs(unsigned long func_addr); +/* + * Value-change states. + */ +enum kcsan_value_change { + /* + * Did not observe a value-change, however, it is valid to report the + * race, depending on preferences. + */ + KCSAN_VALUE_CHANGE_MAYBE, + + /* + * The value was observed to change, and the race should be reported. + */ + KCSAN_VALUE_CHANGE_TRUE, +}; + enum kcsan_report_type { /* * The thread that set up the watchpoint and briefly stalled was @@ -111,6 +127,7 @@ enum kcsan_report_type { * Print a race report from thread that encountered the race. */ extern void kcsan_report(const volatile void *ptr, size_t size, int access_type, - bool value_change, int cpu_id, enum kcsan_report_type type); + enum kcsan_value_change value_change, int cpu_id, + enum kcsan_report_type type); #endif /* _KERNEL_KCSAN_KCSAN_H */ diff --git a/kernel/kcsan/report.c b/kernel/kcsan/report.c index abf6852dff72f..d871476dc1348 100644 --- a/kernel/kcsan/report.c +++ b/kernel/kcsan/report.c @@ -130,26 +130,27 @@ static bool rate_limit_report(unsigned long frame1, unsigned long frame2) * Special rules to skip reporting. */ static bool -skip_report(bool value_change, unsigned long top_frame) +skip_report(enum kcsan_value_change value_change, unsigned long top_frame) { /* - * The first call to skip_report always has value_change==true, since we + * The first call to skip_report always has value_change==TRUE, since we * cannot know the value written of an instrumented access. For the 2nd * call there are 6 cases with CONFIG_KCSAN_REPORT_VALUE_CHANGE_ONLY: * - * 1. read watchpoint, conflicting write (value_change==true): report; - * 2. read watchpoint, conflicting write (value_change==false): skip; - * 3. write watchpoint, conflicting write (value_change==true): report; - * 4. write watchpoint, conflicting write (value_change==false): skip; - * 5. write watchpoint, conflicting read (value_change==false): skip; - * 6. write watchpoint, conflicting read (value_change==true): report; + * 1. read watchpoint, conflicting write (value_change==TRUE): report; + * 2. read watchpoint, conflicting write (value_change==MAYBE): skip; + * 3. write watchpoint, conflicting write (value_change==TRUE): report; + * 4. write watchpoint, conflicting write (value_change==MAYBE): skip; + * 5. write watchpoint, conflicting read (value_change==MAYBE): skip; + * 6. write watchpoint, conflicting read (value_change==TRUE): report; * * Cases 1-4 are intuitive and expected; case 5 ensures we do not report * data races where the write may have rewritten the same value; case 6 * is possible either if the size is larger than what we check value * changes for or the access type is KCSAN_ACCESS_ASSERT. */ - if (IS_ENABLED(CONFIG_KCSAN_REPORT_VALUE_CHANGE_ONLY) && !value_change) { + if (IS_ENABLED(CONFIG_KCSAN_REPORT_VALUE_CHANGE_ONLY) && + value_change == KCSAN_VALUE_CHANGE_MAYBE) { /* * The access is a write, but the data value did not change. * @@ -245,7 +246,7 @@ static int sym_strcmp(void *addr1, void *addr2) * Returns true if a report was generated, false otherwise. */ static bool print_report(const volatile void *ptr, size_t size, int access_type, - bool value_change, int cpu_id, + enum kcsan_value_change value_change, int cpu_id, enum kcsan_report_type type) { unsigned long stack_entries[NUM_STACK_ENTRIES] = { 0 }; @@ -258,7 +259,7 @@ static bool print_report(const volatile void *ptr, size_t size, int access_type, /* * Must check report filter rules before starting to print. */ - if (skip_report(true, stack_entries[skipnr])) + if (skip_report(KCSAN_VALUE_CHANGE_TRUE, stack_entries[skipnr])) return false; if (type == KCSAN_REPORT_RACE_SIGNAL) { @@ -477,7 +478,8 @@ retry: } void kcsan_report(const volatile void *ptr, size_t size, int access_type, - bool value_change, int cpu_id, enum kcsan_report_type type) + enum kcsan_value_change value_change, int cpu_id, + enum kcsan_report_type type) { unsigned long flags = 0; -- GitLab From 81af89e15862909881ff010a0adb67148487e88a Mon Sep 17 00:00:00 2001 From: Marco Elver Date: Tue, 11 Feb 2020 17:04:22 +0100 Subject: [PATCH 0043/1971] kcsan: Add kcsan_set_access_mask() support When setting up an access mask with kcsan_set_access_mask(), KCSAN will only report races if concurrent changes to bits set in access_mask are observed. Conveying access_mask via a separate call avoids introducing overhead in the common-case fast-path. Acked-by: John Hubbard Signed-off-by: Marco Elver Signed-off-by: Paul E. McKenney Signed-off-by: Ingo Molnar --- include/linux/kcsan-checks.h | 11 +++++++++ include/linux/kcsan.h | 5 +++++ init/init_task.c | 1 + kernel/kcsan/core.c | 43 ++++++++++++++++++++++++++++++++---- kernel/kcsan/kcsan.h | 5 +++++ kernel/kcsan/report.c | 13 ++++++++++- 6 files changed, 73 insertions(+), 5 deletions(-) diff --git a/include/linux/kcsan-checks.h b/include/linux/kcsan-checks.h index 8675411c8dbcd..4ef5233ff3f04 100644 --- a/include/linux/kcsan-checks.h +++ b/include/linux/kcsan-checks.h @@ -68,6 +68,16 @@ void kcsan_flat_atomic_end(void); */ void kcsan_atomic_next(int n); +/** + * kcsan_set_access_mask - set access mask + * + * Set the access mask for all accesses for the current context if non-zero. + * Only value changes to bits set in the mask will be reported. + * + * @mask bitmask + */ +void kcsan_set_access_mask(unsigned long mask); + #else /* CONFIG_KCSAN */ static inline void __kcsan_check_access(const volatile void *ptr, size_t size, @@ -78,6 +88,7 @@ static inline void kcsan_nestable_atomic_end(void) { } static inline void kcsan_flat_atomic_begin(void) { } static inline void kcsan_flat_atomic_end(void) { } static inline void kcsan_atomic_next(int n) { } +static inline void kcsan_set_access_mask(unsigned long mask) { } #endif /* CONFIG_KCSAN */ diff --git a/include/linux/kcsan.h b/include/linux/kcsan.h index 7a614ca558f65..3b84606e1e675 100644 --- a/include/linux/kcsan.h +++ b/include/linux/kcsan.h @@ -35,6 +35,11 @@ struct kcsan_ctx { */ int atomic_nest_count; bool in_flat_atomic; + + /* + * Access mask for all accesses if non-zero. + */ + unsigned long access_mask; }; /** diff --git a/init/init_task.c b/init/init_task.c index 2b4fe98b0f095..096191d177d5c 100644 --- a/init/init_task.c +++ b/init/init_task.c @@ -167,6 +167,7 @@ struct task_struct init_task .atomic_next = 0, .atomic_nest_count = 0, .in_flat_atomic = false, + .access_mask = 0, }, #endif #ifdef CONFIG_TRACE_IRQFLAGS diff --git a/kernel/kcsan/core.c b/kernel/kcsan/core.c index 3f89801161d33..589b1e7f0f253 100644 --- a/kernel/kcsan/core.c +++ b/kernel/kcsan/core.c @@ -39,6 +39,7 @@ static DEFINE_PER_CPU(struct kcsan_ctx, kcsan_cpu_ctx) = { .atomic_next = 0, .atomic_nest_count = 0, .in_flat_atomic = false, + .access_mask = 0, }; /* @@ -298,6 +299,15 @@ static noinline void kcsan_found_watchpoint(const volatile void *ptr, if (!kcsan_is_enabled()) return; + + /* + * The access_mask check relies on value-change comparison. To avoid + * reporting a race where e.g. the writer set up the watchpoint, but the + * reader has access_mask!=0, we have to ignore the found watchpoint. + */ + if (get_ctx()->access_mask != 0) + return; + /* * Consume the watchpoint as soon as possible, to minimize the chances * of !consumed. Consuming the watchpoint must always be guarded by @@ -341,6 +351,7 @@ kcsan_setup_watchpoint(const volatile void *ptr, size_t size, int type) u32 _4; u64 _8; } expect_value; + unsigned long access_mask; enum kcsan_value_change value_change = KCSAN_VALUE_CHANGE_MAYBE; unsigned long ua_flags = user_access_save(); unsigned long irq_flags; @@ -435,18 +446,27 @@ kcsan_setup_watchpoint(const volatile void *ptr, size_t size, int type) * Re-read value, and check if it is as expected; if not, we infer a * racy access. */ + access_mask = get_ctx()->access_mask; switch (size) { case 1: expect_value._1 ^= READ_ONCE(*(const u8 *)ptr); + if (access_mask) + expect_value._1 &= (u8)access_mask; break; case 2: expect_value._2 ^= READ_ONCE(*(const u16 *)ptr); + if (access_mask) + expect_value._2 &= (u16)access_mask; break; case 4: expect_value._4 ^= READ_ONCE(*(const u32 *)ptr); + if (access_mask) + expect_value._4 &= (u32)access_mask; break; case 8: expect_value._8 ^= READ_ONCE(*(const u64 *)ptr); + if (access_mask) + expect_value._8 &= (u64)access_mask; break; default: break; /* ignore; we do not diff the values */ @@ -460,11 +480,20 @@ kcsan_setup_watchpoint(const volatile void *ptr, size_t size, int type) if (!remove_watchpoint(watchpoint)) { /* * Depending on the access type, map a value_change of MAYBE to - * TRUE (require reporting). + * TRUE (always report) or FALSE (never report). */ - if (value_change == KCSAN_VALUE_CHANGE_MAYBE && (size > 8 || is_assert)) { - /* Always assume a value-change. */ - value_change = KCSAN_VALUE_CHANGE_TRUE; + if (value_change == KCSAN_VALUE_CHANGE_MAYBE) { + if (access_mask != 0) { + /* + * For access with access_mask, we require a + * value-change, as it is likely that races on + * ~access_mask bits are expected. + */ + value_change = KCSAN_VALUE_CHANGE_FALSE; + } else if (size > 8 || is_assert) { + /* Always assume a value-change. */ + value_change = KCSAN_VALUE_CHANGE_TRUE; + } } /* @@ -622,6 +651,12 @@ void kcsan_atomic_next(int n) } EXPORT_SYMBOL(kcsan_atomic_next); +void kcsan_set_access_mask(unsigned long mask) +{ + get_ctx()->access_mask = mask; +} +EXPORT_SYMBOL(kcsan_set_access_mask); + void __kcsan_check_access(const volatile void *ptr, size_t size, int type) { check_access(ptr, size, type); diff --git a/kernel/kcsan/kcsan.h b/kernel/kcsan/kcsan.h index 83a79b08b550e..892de5120c1b6 100644 --- a/kernel/kcsan/kcsan.h +++ b/kernel/kcsan/kcsan.h @@ -98,6 +98,11 @@ enum kcsan_value_change { */ KCSAN_VALUE_CHANGE_MAYBE, + /* + * Did not observe a value-change, and it is invalid to report the race. + */ + KCSAN_VALUE_CHANGE_FALSE, + /* * The value was observed to change, and the race should be reported. */ diff --git a/kernel/kcsan/report.c b/kernel/kcsan/report.c index d871476dc1348..11c791b886f3c 100644 --- a/kernel/kcsan/report.c +++ b/kernel/kcsan/report.c @@ -132,6 +132,9 @@ static bool rate_limit_report(unsigned long frame1, unsigned long frame2) static bool skip_report(enum kcsan_value_change value_change, unsigned long top_frame) { + /* Should never get here if value_change==FALSE. */ + WARN_ON_ONCE(value_change == KCSAN_VALUE_CHANGE_FALSE); + /* * The first call to skip_report always has value_change==TRUE, since we * cannot know the value written of an instrumented access. For the 2nd @@ -493,7 +496,15 @@ void kcsan_report(const volatile void *ptr, size_t size, int access_type, kcsan_disable_current(); if (prepare_report(&flags, ptr, size, access_type, cpu_id, type)) { - if (print_report(ptr, size, access_type, value_change, cpu_id, type) && panic_on_warn) + /* + * Never report if value_change is FALSE, only if we it is + * either TRUE or MAYBE. In case of MAYBE, further filtering may + * be done once we know the full stack trace in print_report(). + */ + bool reported = value_change != KCSAN_VALUE_CHANGE_FALSE && + print_report(ptr, size, access_type, value_change, cpu_id, type); + + if (reported && panic_on_warn) panic("panic_on_warn set ...\n"); release_report(&flags, type); -- GitLab From 703b321501c95c658275fd76775282fe45989641 Mon Sep 17 00:00:00 2001 From: Marco Elver Date: Tue, 11 Feb 2020 17:04:23 +0100 Subject: [PATCH 0044/1971] kcsan: Introduce ASSERT_EXCLUSIVE_BITS(var, mask) This introduces ASSERT_EXCLUSIVE_BITS(var, mask). ASSERT_EXCLUSIVE_BITS(var, mask) will cause KCSAN to assume that the following access is safe w.r.t. data races (however, please see the docbook comment for disclaimer here). For more context on why this was considered necessary, please see: http://lkml.kernel.org/r/1580995070-25139-1-git-send-email-cai@lca.pw In particular, before this patch, data races between reads (that use @mask bits of an access that should not be modified concurrently) and writes (that change ~@mask bits not used by the readers) would have been annotated with "data_race()" (or "READ_ONCE()"). However, doing so would then hide real problems: we would no longer be able to detect harmful races between reads to @mask bits and writes to @mask bits. Therefore, by using ASSERT_EXCLUSIVE_BITS(var, mask), we accomplish: 1. Avoid proliferation of specific macros at the call sites: by including a single mask in the argument list, we can use the same macro in a wide variety of call sites, regardless of how and which bits in a field each call site actually accesses. 2. The existing code does not need to be modified (although READ_ONCE() may still be advisable if we cannot prove that the data race is always safe). 3. We catch bugs where the exclusive bits are modified concurrently. 4. We document properties of the current code. Acked-by: John Hubbard Signed-off-by: Marco Elver Signed-off-by: Paul E. McKenney Signed-off-by: Ingo Molnar Cc: David Hildenbrand Cc: Jan Kara Cc: Qian Cai --- include/linux/kcsan-checks.h | 69 ++++++++++++++++++++++++++++++++---- kernel/kcsan/debugfs.c | 15 +++++++- 2 files changed, 77 insertions(+), 7 deletions(-) diff --git a/include/linux/kcsan-checks.h b/include/linux/kcsan-checks.h index 4ef5233ff3f04..8f9f6e2191dc9 100644 --- a/include/linux/kcsan-checks.h +++ b/include/linux/kcsan-checks.h @@ -152,9 +152,9 @@ static inline void kcsan_check_access(const volatile void *ptr, size_t size, #endif /** - * ASSERT_EXCLUSIVE_WRITER - assert no other threads are writing @var + * ASSERT_EXCLUSIVE_WRITER - assert no concurrent writes to @var * - * Assert that there are no other threads writing @var; other readers are + * Assert that there are no concurrent writes to @var; other readers are * allowed. This assertion can be used to specify properties of concurrent code, * where violation cannot be detected as a normal data race. * @@ -171,11 +171,11 @@ static inline void kcsan_check_access(const volatile void *ptr, size_t size, __kcsan_check_access(&(var), sizeof(var), KCSAN_ACCESS_ASSERT) /** - * ASSERT_EXCLUSIVE_ACCESS - assert no other threads are accessing @var + * ASSERT_EXCLUSIVE_ACCESS - assert no concurrent accesses to @var * - * Assert that no other thread is accessing @var (no readers nor writers). This - * assertion can be used to specify properties of concurrent code, where - * violation cannot be detected as a normal data race. + * Assert that there are no concurrent accesses to @var (no readers nor + * writers). This assertion can be used to specify properties of concurrent + * code, where violation cannot be detected as a normal data race. * * For example, in a reference-counting algorithm where exclusive access is * expected after the refcount reaches 0. We can check that this property @@ -191,4 +191,61 @@ static inline void kcsan_check_access(const volatile void *ptr, size_t size, #define ASSERT_EXCLUSIVE_ACCESS(var) \ __kcsan_check_access(&(var), sizeof(var), KCSAN_ACCESS_WRITE | KCSAN_ACCESS_ASSERT) +/** + * ASSERT_EXCLUSIVE_BITS - assert no concurrent writes to subset of bits in @var + * + * Bit-granular variant of ASSERT_EXCLUSIVE_WRITER(var). + * + * Assert that there are no concurrent writes to a subset of bits in @var; + * concurrent readers are permitted. This assertion captures more detailed + * bit-level properties, compared to the other (word granularity) assertions. + * Only the bits set in @mask are checked for concurrent modifications, while + * ignoring the remaining bits, i.e. concurrent writes (or reads) to ~@mask bits + * are ignored. + * + * Use this for variables, where some bits must not be modified concurrently, + * yet other bits are expected to be modified concurrently. + * + * For example, variables where, after initialization, some bits are read-only, + * but other bits may still be modified concurrently. A reader may wish to + * assert that this is true as follows: + * + * ASSERT_EXCLUSIVE_BITS(flags, READ_ONLY_MASK); + * foo = (READ_ONCE(flags) & READ_ONLY_MASK) >> READ_ONLY_SHIFT; + * + * Note: The access that immediately follows ASSERT_EXCLUSIVE_BITS() is + * assumed to access the masked bits only, and KCSAN optimistically assumes it + * is therefore safe, even in the presence of data races, and marking it with + * READ_ONCE() is optional from KCSAN's point-of-view. We caution, however, + * that it may still be advisable to do so, since we cannot reason about all + * compiler optimizations when it comes to bit manipulations (on the reader + * and writer side). If you are sure nothing can go wrong, we can write the + * above simply as: + * + * ASSERT_EXCLUSIVE_BITS(flags, READ_ONLY_MASK); + * foo = (flags & READ_ONLY_MASK) >> READ_ONLY_SHIFT; + * + * Another example, where this may be used, is when certain bits of @var may + * only be modified when holding the appropriate lock, but other bits may still + * be modified concurrently. Writers, where other bits may change concurrently, + * could use the assertion as follows: + * + * spin_lock(&foo_lock); + * ASSERT_EXCLUSIVE_BITS(flags, FOO_MASK); + * old_flags = READ_ONCE(flags); + * new_flags = (old_flags & ~FOO_MASK) | (new_foo << FOO_SHIFT); + * if (cmpxchg(&flags, old_flags, new_flags) != old_flags) { ... } + * spin_unlock(&foo_lock); + * + * @var variable to assert on + * @mask only check for modifications to bits set in @mask + */ +#define ASSERT_EXCLUSIVE_BITS(var, mask) \ + do { \ + kcsan_set_access_mask(mask); \ + __kcsan_check_access(&(var), sizeof(var), KCSAN_ACCESS_ASSERT);\ + kcsan_set_access_mask(0); \ + kcsan_atomic_next(1); \ + } while (0) + #endif /* _LINUX_KCSAN_CHECKS_H */ diff --git a/kernel/kcsan/debugfs.c b/kernel/kcsan/debugfs.c index 9bbba0e57c9b3..2ff1961239778 100644 --- a/kernel/kcsan/debugfs.c +++ b/kernel/kcsan/debugfs.c @@ -100,8 +100,10 @@ static noinline void microbenchmark(unsigned long iters) * debugfs file from multiple tasks to generate real conflicts and show reports. */ static long test_dummy; +static long test_flags; static noinline void test_thread(unsigned long iters) { + const long CHANGE_BITS = 0xff00ff00ff00ff00L; const struct kcsan_ctx ctx_save = current->kcsan_ctx; cycles_t cycles; @@ -109,16 +111,27 @@ static noinline void test_thread(unsigned long iters) memset(¤t->kcsan_ctx, 0, sizeof(current->kcsan_ctx)); pr_info("KCSAN: %s begin | iters: %lu\n", __func__, iters); + pr_info("test_dummy@%px, test_flags@%px\n", &test_dummy, &test_flags); cycles = get_cycles(); while (iters--) { + /* These all should generate reports. */ __kcsan_check_read(&test_dummy, sizeof(test_dummy)); - __kcsan_check_write(&test_dummy, sizeof(test_dummy)); ASSERT_EXCLUSIVE_WRITER(test_dummy); ASSERT_EXCLUSIVE_ACCESS(test_dummy); + ASSERT_EXCLUSIVE_BITS(test_flags, ~CHANGE_BITS); /* no report */ + __kcsan_check_read(&test_flags, sizeof(test_flags)); /* no report */ + + ASSERT_EXCLUSIVE_BITS(test_flags, CHANGE_BITS); /* report */ + __kcsan_check_read(&test_flags, sizeof(test_flags)); /* no report */ + /* not actually instrumented */ WRITE_ONCE(test_dummy, iters); /* to observe value-change */ + __kcsan_check_write(&test_dummy, sizeof(test_dummy)); + + test_flags ^= CHANGE_BITS; /* generate value-change */ + __kcsan_check_write(&test_flags, sizeof(test_flags)); } cycles = get_cycles() - cycles; -- GitLab From f5d2313bd3c540be405c4977a63840cd6d0167b5 Mon Sep 17 00:00:00 2001 From: Marco Elver Date: Fri, 14 Feb 2020 22:10:35 +0100 Subject: [PATCH 0045/1971] kcsan, trace: Make KCSAN compatible with tracing Previously the system would lock up if ftrace was enabled together with KCSAN. This is due to recursion on reporting if the tracer code is instrumented with KCSAN. To avoid this for all types of tracing, disable KCSAN instrumentation for all of kernel/trace. Furthermore, since KCSAN relies on udelay() to introduce delay, we have to disable ftrace for udelay() (currently done for x86) in case KCSAN is used together with lockdep and ftrace. The reason is that it may corrupt lockdep IRQ flags tracing state due to a peculiar case of recursion (details in Makefile comment). Reported-by: Qian Cai Tested-by: Qian Cai Acked-by: Steven Rostedt (VMware) Signed-off-by: Marco Elver Signed-off-by: Paul E. McKenney Signed-off-by: Ingo Molnar --- arch/x86/lib/Makefile | 5 +++++ kernel/kcsan/Makefile | 2 ++ kernel/trace/Makefile | 3 +++ 3 files changed, 10 insertions(+) diff --git a/arch/x86/lib/Makefile b/arch/x86/lib/Makefile index 432a077056775..6110bce7237bd 100644 --- a/arch/x86/lib/Makefile +++ b/arch/x86/lib/Makefile @@ -8,6 +8,11 @@ KCOV_INSTRUMENT_delay.o := n # KCSAN uses udelay for introducing watchpoint delay; avoid recursion. KCSAN_SANITIZE_delay.o := n +ifdef CONFIG_KCSAN +# In case KCSAN+lockdep+ftrace are enabled, disable ftrace for delay.o to avoid +# lockdep -> [other libs] -> KCSAN -> udelay -> ftrace -> lockdep recursion. +CFLAGS_REMOVE_delay.o = $(CC_FLAGS_FTRACE) +endif # Early boot use of cmdline; don't instrument it ifdef CONFIG_AMD_MEM_ENCRYPT diff --git a/kernel/kcsan/Makefile b/kernel/kcsan/Makefile index df6b7799e4927..d4999b38d1be5 100644 --- a/kernel/kcsan/Makefile +++ b/kernel/kcsan/Makefile @@ -4,6 +4,8 @@ KCOV_INSTRUMENT := n UBSAN_SANITIZE := n CFLAGS_REMOVE_core.o = $(CC_FLAGS_FTRACE) +CFLAGS_REMOVE_debugfs.o = $(CC_FLAGS_FTRACE) +CFLAGS_REMOVE_report.o = $(CC_FLAGS_FTRACE) CFLAGS_core.o := $(call cc-option,-fno-conserve-stack,) \ $(call cc-option,-fno-stack-protector,) diff --git a/kernel/trace/Makefile b/kernel/trace/Makefile index f9dcd19165fa2..6b601d88bf71e 100644 --- a/kernel/trace/Makefile +++ b/kernel/trace/Makefile @@ -6,6 +6,9 @@ ifdef CONFIG_FUNCTION_TRACER ORIG_CFLAGS := $(KBUILD_CFLAGS) KBUILD_CFLAGS = $(subst $(CC_FLAGS_FTRACE),,$(ORIG_CFLAGS)) +# Avoid recursion due to instrumentation. +KCSAN_SANITIZE := n + ifdef CONFIG_FTRACE_SELFTEST # selftest needs instrumentation CFLAGS_trace_selftest_dynamic.o = $(CC_FLAGS_FTRACE) -- GitLab From 48b1fc190a180d971fb69217c88c7247f4f2ca19 Mon Sep 17 00:00:00 2001 From: Marco Elver Date: Fri, 21 Feb 2020 23:02:09 +0100 Subject: [PATCH 0046/1971] kcsan: Add option to allow watcher interruptions Add option to allow interrupts while a watchpoint is set up. This can be enabled either via CONFIG_KCSAN_INTERRUPT_WATCHER or via the boot parameter 'kcsan.interrupt_watcher=1'. Note that, currently not all safe per-CPU access primitives and patterns are accounted for, which could result in false positives. For example, asm-generic/percpu.h uses plain operations, which by default are instrumented. On interrupts and subsequent accesses to the same variable, KCSAN would currently report a data race with this option. Therefore, this option should currently remain disabled by default, but may be enabled for specific test scenarios. To avoid new warnings, changes all uses of smp_processor_id() to use the raw version (as already done in kcsan_found_watchpoint()). The exact SMP processor id is for informational purposes in the report, and correctness is not affected. Signed-off-by: Marco Elver Signed-off-by: Paul E. McKenney --- kernel/kcsan/core.c | 34 ++++++++++------------------------ lib/Kconfig.kcsan | 11 +++++++++++ 2 files changed, 21 insertions(+), 24 deletions(-) diff --git a/kernel/kcsan/core.c b/kernel/kcsan/core.c index 589b1e7f0f253..e7387fec66795 100644 --- a/kernel/kcsan/core.c +++ b/kernel/kcsan/core.c @@ -21,6 +21,7 @@ static bool kcsan_early_enable = IS_ENABLED(CONFIG_KCSAN_EARLY_ENABLE); static unsigned int kcsan_udelay_task = CONFIG_KCSAN_UDELAY_TASK; static unsigned int kcsan_udelay_interrupt = CONFIG_KCSAN_UDELAY_INTERRUPT; static long kcsan_skip_watch = CONFIG_KCSAN_SKIP_WATCH; +static bool kcsan_interrupt_watcher = IS_ENABLED(CONFIG_KCSAN_INTERRUPT_WATCHER); #ifdef MODULE_PARAM_PREFIX #undef MODULE_PARAM_PREFIX @@ -30,6 +31,7 @@ module_param_named(early_enable, kcsan_early_enable, bool, 0); module_param_named(udelay_task, kcsan_udelay_task, uint, 0644); module_param_named(udelay_interrupt, kcsan_udelay_interrupt, uint, 0644); module_param_named(skip_watch, kcsan_skip_watch, long, 0644); +module_param_named(interrupt_watcher, kcsan_interrupt_watcher, bool, 0444); bool kcsan_enabled; @@ -354,7 +356,7 @@ kcsan_setup_watchpoint(const volatile void *ptr, size_t size, int type) unsigned long access_mask; enum kcsan_value_change value_change = KCSAN_VALUE_CHANGE_MAYBE; unsigned long ua_flags = user_access_save(); - unsigned long irq_flags; + unsigned long irq_flags = 0; /* * Always reset kcsan_skip counter in slow-path to avoid underflow; see @@ -370,26 +372,9 @@ kcsan_setup_watchpoint(const volatile void *ptr, size_t size, int type) goto out; } - /* - * Disable interrupts & preemptions to avoid another thread on the same - * CPU accessing memory locations for the set up watchpoint; this is to - * avoid reporting races to e.g. CPU-local data. - * - * An alternative would be adding the source CPU to the watchpoint - * encoding, and checking that watchpoint-CPU != this-CPU. There are - * several problems with this: - * 1. we should avoid stealing more bits from the watchpoint encoding - * as it would affect accuracy, as well as increase performance - * overhead in the fast-path; - * 2. if we are preempted, but there *is* a genuine data race, we - * would *not* report it -- since this is the common case (vs. - * CPU-local data accesses), it makes more sense (from a data race - * detection point of view) to simply disable preemptions to ensure - * as many tasks as possible run on other CPUs. - * - * Use raw versions, to avoid lockdep recursion via IRQ flags tracing. - */ - raw_local_irq_save(irq_flags); + if (!kcsan_interrupt_watcher) + /* Use raw to avoid lockdep recursion via IRQ flags tracing. */ + raw_local_irq_save(irq_flags); watchpoint = insert_watchpoint((unsigned long)ptr, size, is_write); if (watchpoint == NULL) { @@ -507,7 +492,7 @@ kcsan_setup_watchpoint(const volatile void *ptr, size_t size, int type) if (is_assert && value_change == KCSAN_VALUE_CHANGE_TRUE) kcsan_counter_inc(KCSAN_COUNTER_ASSERT_FAILURES); - kcsan_report(ptr, size, type, value_change, smp_processor_id(), + kcsan_report(ptr, size, type, value_change, raw_smp_processor_id(), KCSAN_REPORT_RACE_SIGNAL); } else if (value_change == KCSAN_VALUE_CHANGE_TRUE) { /* Inferring a race, since the value should not have changed. */ @@ -518,13 +503,14 @@ kcsan_setup_watchpoint(const volatile void *ptr, size_t size, int type) if (IS_ENABLED(CONFIG_KCSAN_REPORT_RACE_UNKNOWN_ORIGIN) || is_assert) kcsan_report(ptr, size, type, KCSAN_VALUE_CHANGE_TRUE, - smp_processor_id(), + raw_smp_processor_id(), KCSAN_REPORT_RACE_UNKNOWN_ORIGIN); } kcsan_counter_dec(KCSAN_COUNTER_USED_WATCHPOINTS); out_unlock: - raw_local_irq_restore(irq_flags); + if (!kcsan_interrupt_watcher) + raw_local_irq_restore(irq_flags); out: user_access_restore(ua_flags); } diff --git a/lib/Kconfig.kcsan b/lib/Kconfig.kcsan index f0b791143c6ab..081ed2e1bf7b1 100644 --- a/lib/Kconfig.kcsan +++ b/lib/Kconfig.kcsan @@ -88,6 +88,17 @@ config KCSAN_SKIP_WATCH_RANDOMIZE KCSAN_WATCH_SKIP. If false, the chosen value is always KCSAN_WATCH_SKIP. +config KCSAN_INTERRUPT_WATCHER + bool "Interruptible watchers" + help + If enabled, a task that set up a watchpoint may be interrupted while + delayed. This option will allow KCSAN to detect races between + interrupted tasks and other threads of execution on the same CPU. + + Currently disabled by default, because not all safe per-CPU access + primitives and patterns may be accounted for, and therefore could + result in false positives. + config KCSAN_REPORT_ONCE_IN_MS int "Duration in milliseconds, in which any given race is only reported once" default 3000 -- GitLab From 2402d0eae589a31ee7b1774cb220d84d0f5605b4 Mon Sep 17 00:00:00 2001 From: Marco Elver Date: Sat, 22 Feb 2020 00:10:27 +0100 Subject: [PATCH 0047/1971] kcsan: Add option for verbose reporting Adds CONFIG_KCSAN_VERBOSE to optionally enable more verbose reports. Currently information about the reporting task's held locks and IRQ trace events are shown, if they are enabled. Signed-off-by: Marco Elver Suggested-by: Qian Cai Signed-off-by: Paul E. McKenney --- kernel/kcsan/core.c | 4 +- kernel/kcsan/kcsan.h | 3 ++ kernel/kcsan/report.c | 103 +++++++++++++++++++++++++++++++++++++++++- lib/Kconfig.kcsan | 13 ++++++ 4 files changed, 120 insertions(+), 3 deletions(-) diff --git a/kernel/kcsan/core.c b/kernel/kcsan/core.c index e7387fec66795..065615df88eaa 100644 --- a/kernel/kcsan/core.c +++ b/kernel/kcsan/core.c @@ -18,8 +18,8 @@ #include "kcsan.h" static bool kcsan_early_enable = IS_ENABLED(CONFIG_KCSAN_EARLY_ENABLE); -static unsigned int kcsan_udelay_task = CONFIG_KCSAN_UDELAY_TASK; -static unsigned int kcsan_udelay_interrupt = CONFIG_KCSAN_UDELAY_INTERRUPT; +unsigned int kcsan_udelay_task = CONFIG_KCSAN_UDELAY_TASK; +unsigned int kcsan_udelay_interrupt = CONFIG_KCSAN_UDELAY_INTERRUPT; static long kcsan_skip_watch = CONFIG_KCSAN_SKIP_WATCH; static bool kcsan_interrupt_watcher = IS_ENABLED(CONFIG_KCSAN_INTERRUPT_WATCHER); diff --git a/kernel/kcsan/kcsan.h b/kernel/kcsan/kcsan.h index 892de5120c1b6..e282f8b5749e9 100644 --- a/kernel/kcsan/kcsan.h +++ b/kernel/kcsan/kcsan.h @@ -13,6 +13,9 @@ /* The number of adjacent watchpoints to check. */ #define KCSAN_CHECK_ADJACENT 1 +extern unsigned int kcsan_udelay_task; +extern unsigned int kcsan_udelay_interrupt; + /* * Globally enable and disable KCSAN. */ diff --git a/kernel/kcsan/report.c b/kernel/kcsan/report.c index 11c791b886f3c..18f9d3bc93a59 100644 --- a/kernel/kcsan/report.c +++ b/kernel/kcsan/report.c @@ -1,5 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 +#include +#include #include #include #include @@ -31,7 +33,26 @@ static struct { int cpu_id; unsigned long stack_entries[NUM_STACK_ENTRIES]; int num_stack_entries; -} other_info = { .ptr = NULL }; + + /* + * Optionally pass @current. Typically we do not need to pass @current + * via @other_info since just @task_pid is sufficient. Passing @current + * has additional overhead. + * + * To safely pass @current, we must either use get_task_struct/ + * put_task_struct, or stall the thread that populated @other_info. + * + * We cannot rely on get_task_struct/put_task_struct in case + * release_report() races with a task being released, and would have to + * free it in release_report(). This may result in deadlock if we want + * to use KCSAN on the allocators. + * + * Since we also want to reliably print held locks for + * CONFIG_KCSAN_VERBOSE, the current implementation stalls the thread + * that populated @other_info until it has been consumed. + */ + struct task_struct *task; +} other_info; /* * Information about reported races; used to rate limit reporting. @@ -245,6 +266,16 @@ static int sym_strcmp(void *addr1, void *addr2) return strncmp(buf1, buf2, sizeof(buf1)); } +static void print_verbose_info(struct task_struct *task) +{ + if (!task) + return; + + pr_err("\n"); + debug_show_held_locks(task); + print_irqtrace_events(task); +} + /* * Returns true if a report was generated, false otherwise. */ @@ -319,6 +350,9 @@ static bool print_report(const volatile void *ptr, size_t size, int access_type, other_info.num_stack_entries - other_skipnr, 0); + if (IS_ENABLED(CONFIG_KCSAN_VERBOSE)) + print_verbose_info(other_info.task); + pr_err("\n"); pr_err("%s to 0x%px of %zu bytes by %s on cpu %i:\n", get_access_type(access_type), ptr, size, @@ -340,6 +374,9 @@ static bool print_report(const volatile void *ptr, size_t size, int access_type, stack_trace_print(stack_entries + skipnr, num_stack_entries - skipnr, 0); + if (IS_ENABLED(CONFIG_KCSAN_VERBOSE)) + print_verbose_info(current); + /* Print report footer. */ pr_err("\n"); pr_err("Reported by Kernel Concurrency Sanitizer on:\n"); @@ -357,6 +394,67 @@ static void release_report(unsigned long *flags, enum kcsan_report_type type) spin_unlock_irqrestore(&report_lock, *flags); } +/* + * Sets @other_info.task and awaits consumption of @other_info. + * + * Precondition: report_lock is held. + * Postcondition: report_lock is held. + */ +static void +set_other_info_task_blocking(unsigned long *flags, const volatile void *ptr) +{ + /* + * We may be instrumenting a code-path where current->state is already + * something other than TASK_RUNNING. + */ + const bool is_running = current->state == TASK_RUNNING; + /* + * To avoid deadlock in case we are in an interrupt here and this is a + * race with a task on the same CPU (KCSAN_INTERRUPT_WATCHER), provide a + * timeout to ensure this works in all contexts. + * + * Await approximately the worst case delay of the reporting thread (if + * we are not interrupted). + */ + int timeout = max(kcsan_udelay_task, kcsan_udelay_interrupt); + + other_info.task = current; + do { + if (is_running) { + /* + * Let lockdep know the real task is sleeping, to print + * the held locks (recall we turned lockdep off, so + * locking/unlocking @report_lock won't be recorded). + */ + set_current_state(TASK_UNINTERRUPTIBLE); + } + spin_unlock_irqrestore(&report_lock, *flags); + /* + * We cannot call schedule() since we also cannot reliably + * determine if sleeping here is permitted -- see in_atomic(). + */ + + udelay(1); + spin_lock_irqsave(&report_lock, *flags); + if (timeout-- < 0) { + /* + * Abort. Reset other_info.task to NULL, since it + * appears the other thread is still going to consume + * it. It will result in no verbose info printed for + * this task. + */ + other_info.task = NULL; + break; + } + /* + * If @ptr nor @current matches, then our information has been + * consumed and we may continue. If not, retry. + */ + } while (other_info.ptr == ptr && other_info.task == current); + if (is_running) + set_current_state(TASK_RUNNING); +} + /* * Depending on the report type either sets other_info and returns false, or * acquires the matching other_info and returns true. If other_info is not @@ -388,6 +486,9 @@ retry: other_info.cpu_id = cpu_id; other_info.num_stack_entries = stack_trace_save(other_info.stack_entries, NUM_STACK_ENTRIES, 1); + if (IS_ENABLED(CONFIG_KCSAN_VERBOSE)) + set_other_info_task_blocking(flags, ptr); + spin_unlock_irqrestore(&report_lock, *flags); /* diff --git a/lib/Kconfig.kcsan b/lib/Kconfig.kcsan index 081ed2e1bf7b1..0f1447ff8f558 100644 --- a/lib/Kconfig.kcsan +++ b/lib/Kconfig.kcsan @@ -20,6 +20,19 @@ menuconfig KCSAN if KCSAN +config KCSAN_VERBOSE + bool "Show verbose reports with more information about system state" + depends on PROVE_LOCKING + help + If enabled, reports show more information about the system state that + may help better analyze and debug races. This includes held locks and + IRQ trace events. + + While this option should generally be benign, we call into more + external functions on report generation; if a race report is + generated from any one of them, system stability may suffer due to + deadlocks or recursion. If in doubt, say N. + config KCSAN_DEBUG bool "Debugging of KCSAN internals" -- GitLab From 44656d3dc4f0dc20010d054f27397a4a1469fabf Mon Sep 17 00:00:00 2001 From: Marco Elver Date: Tue, 25 Feb 2020 15:32:58 +0100 Subject: [PATCH 0048/1971] kcsan: Add current->state to implicitly atomic accesses Add volatile current->state to list of implicitly atomic accesses. This is in preparation to eventually enable KCSAN on kernel/sched (which currently still has KCSAN_SANITIZE := n). Since accesses that match the special check in atomic.h are rare, it makes more sense to move this check to the slow-path, avoiding the additional compare in the fast-path. With the microbenchmark, a speedup of ~6% is measured. Signed-off-by: Marco Elver Signed-off-by: Paul E. McKenney --- kernel/kcsan/atomic.h | 21 +++++++-------------- kernel/kcsan/core.c | 22 +++++++++++++++------- kernel/kcsan/debugfs.c | 27 ++++++++++++++++++--------- 3 files changed, 40 insertions(+), 30 deletions(-) diff --git a/kernel/kcsan/atomic.h b/kernel/kcsan/atomic.h index a9c1930534914..be9e625227f3b 100644 --- a/kernel/kcsan/atomic.h +++ b/kernel/kcsan/atomic.h @@ -4,24 +4,17 @@ #define _KERNEL_KCSAN_ATOMIC_H #include +#include /* - * Helper that returns true if access to @ptr should be considered an atomic - * access, even though it is not explicitly atomic. - * - * List all volatile globals that have been observed in races, to suppress - * data race reports between accesses to these variables. - * - * For now, we assume that volatile accesses of globals are as strong as atomic - * accesses (READ_ONCE, WRITE_ONCE cast to volatile). The situation is still not - * entirely clear, as on some architectures (Alpha) READ_ONCE/WRITE_ONCE do more - * than cast to volatile. Eventually, we hope to be able to remove this - * function. + * Special rules for certain memory where concurrent conflicting accesses are + * common, however, the current convention is to not mark them; returns true if + * access to @ptr should be considered atomic. Called from slow-path. */ -static __always_inline bool kcsan_is_atomic(const volatile void *ptr) +static bool kcsan_is_atomic_special(const volatile void *ptr) { - /* only jiffies for now */ - return ptr == &jiffies; + /* volatile globals that have been observed in data races. */ + return ptr == &jiffies || ptr == ¤t->state; } #endif /* _KERNEL_KCSAN_ATOMIC_H */ diff --git a/kernel/kcsan/core.c b/kernel/kcsan/core.c index 065615df88eaa..eb30ecdc8c009 100644 --- a/kernel/kcsan/core.c +++ b/kernel/kcsan/core.c @@ -188,12 +188,13 @@ static __always_inline struct kcsan_ctx *get_ctx(void) return in_task() ? ¤t->kcsan_ctx : raw_cpu_ptr(&kcsan_cpu_ctx); } +/* Rules for generic atomic accesses. Called from fast-path. */ static __always_inline bool is_atomic(const volatile void *ptr, size_t size, int type) { struct kcsan_ctx *ctx; - if ((type & KCSAN_ACCESS_ATOMIC) != 0) + if (type & KCSAN_ACCESS_ATOMIC) return true; /* @@ -201,16 +202,16 @@ is_atomic(const volatile void *ptr, size_t size, int type) * as atomic. This allows using them also in atomic regions, such as * seqlocks, without implicitly changing their semantics. */ - if ((type & KCSAN_ACCESS_ASSERT) != 0) + if (type & KCSAN_ACCESS_ASSERT) return false; if (IS_ENABLED(CONFIG_KCSAN_ASSUME_PLAIN_WRITES_ATOMIC) && - (type & KCSAN_ACCESS_WRITE) != 0 && size <= sizeof(long) && + (type & KCSAN_ACCESS_WRITE) && size <= sizeof(long) && IS_ALIGNED((unsigned long)ptr, size)) return true; /* Assume aligned writes up to word size are atomic. */ ctx = get_ctx(); - if (unlikely(ctx->atomic_next > 0)) { + if (ctx->atomic_next > 0) { /* * Because we do not have separate contexts for nested * interrupts, in case atomic_next is set, we simply assume that @@ -224,10 +225,8 @@ is_atomic(const volatile void *ptr, size_t size, int type) --ctx->atomic_next; /* in task, or outer interrupt */ return true; } - if (unlikely(ctx->atomic_nest_count > 0 || ctx->in_flat_atomic)) - return true; - return kcsan_is_atomic(ptr); + return ctx->atomic_nest_count > 0 || ctx->in_flat_atomic; } static __always_inline bool @@ -367,6 +366,15 @@ kcsan_setup_watchpoint(const volatile void *ptr, size_t size, int type) if (!kcsan_is_enabled()) goto out; + /* + * Special atomic rules: unlikely to be true, so we check them here in + * the slow-path, and not in the fast-path in is_atomic(). Call after + * kcsan_is_enabled(), as we may access memory that is not yet + * initialized during early boot. + */ + if (!is_assert && kcsan_is_atomic_special(ptr)) + goto out; + if (!check_encodable((unsigned long)ptr, size)) { kcsan_counter_inc(KCSAN_COUNTER_UNENCODABLE_ACCESSES); goto out; diff --git a/kernel/kcsan/debugfs.c b/kernel/kcsan/debugfs.c index 2ff1961239778..72ee188ebc54a 100644 --- a/kernel/kcsan/debugfs.c +++ b/kernel/kcsan/debugfs.c @@ -74,25 +74,34 @@ void kcsan_counter_dec(enum kcsan_counter_id id) */ static noinline void microbenchmark(unsigned long iters) { + const struct kcsan_ctx ctx_save = current->kcsan_ctx; + const bool was_enabled = READ_ONCE(kcsan_enabled); cycles_t cycles; + /* We may have been called from an atomic region; reset context. */ + memset(¤t->kcsan_ctx, 0, sizeof(current->kcsan_ctx)); + /* + * Disable to benchmark fast-path for all accesses, and (expected + * negligible) call into slow-path, but never set up watchpoints. + */ + WRITE_ONCE(kcsan_enabled, false); + pr_info("KCSAN: %s begin | iters: %lu\n", __func__, iters); cycles = get_cycles(); while (iters--) { - /* - * We can run this benchmark from multiple tasks; this address - * calculation increases likelyhood of some accesses - * overlapping. Make the access type an atomic read, to never - * set up watchpoints and test the fast-path only. - */ - unsigned long addr = - iters % (CONFIG_KCSAN_NUM_WATCHPOINTS * PAGE_SIZE); - __kcsan_check_access((void *)addr, sizeof(long), KCSAN_ACCESS_ATOMIC); + unsigned long addr = iters & ((PAGE_SIZE << 8) - 1); + int type = !(iters & 0x7f) ? KCSAN_ACCESS_ATOMIC : + (!(iters & 0xf) ? KCSAN_ACCESS_WRITE : 0); + __kcsan_check_access((void *)addr, sizeof(long), type); } cycles = get_cycles() - cycles; pr_info("KCSAN: %s end | cycles: %llu\n", __func__, cycles); + + WRITE_ONCE(kcsan_enabled, was_enabled); + /* restore context */ + current->kcsan_ctx = ctx_save; } /* -- GitLab From e7b34100500733f7e052ce3dee94e6338b86e6bc Mon Sep 17 00:00:00 2001 From: Qiujun Huang Date: Thu, 5 Mar 2020 15:21:07 +0100 Subject: [PATCH 0049/1971] kcsan: Fix a typo in a comment s/slots slots/slots/ Signed-off-by: Qiujun Huang Reviewed-by: Nick Desaulniers [elver: commit message] Signed-off-by: Marco Elver Signed-off-by: Paul E. McKenney --- kernel/kcsan/core.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kernel/kcsan/core.c b/kernel/kcsan/core.c index eb30ecdc8c009..ee8200835b607 100644 --- a/kernel/kcsan/core.c +++ b/kernel/kcsan/core.c @@ -45,7 +45,7 @@ static DEFINE_PER_CPU(struct kcsan_ctx, kcsan_cpu_ctx) = { }; /* - * Helper macros to index into adjacent slots slots, starting from address slot + * Helper macros to index into adjacent slots, starting from address slot * itself, followed by the right and left slots. * * The purpose is 2-fold: -- GitLab From e7325b774cc72edc2cffc4a1ce40f4dbf1bc0930 Mon Sep 17 00:00:00 2001 From: Marco Elver Date: Thu, 5 Mar 2020 15:21:08 +0100 Subject: [PATCH 0050/1971] kcsan: Update Documentation/dev-tools/kcsan.rst Extend and improve based on recent changes, and summarize important bits that have been missing. Tested with "make htmldocs". Signed-off-by: Marco Elver Cc: Qian Cai Signed-off-by: Paul E. McKenney --- Documentation/dev-tools/kcsan.rst | 227 +++++++++++++++++++----------- 1 file changed, 144 insertions(+), 83 deletions(-) diff --git a/Documentation/dev-tools/kcsan.rst b/Documentation/dev-tools/kcsan.rst index 65a0be513b7de..52a5d6fb9701f 100644 --- a/Documentation/dev-tools/kcsan.rst +++ b/Documentation/dev-tools/kcsan.rst @@ -1,27 +1,22 @@ The Kernel Concurrency Sanitizer (KCSAN) ======================================== -Overview --------- - -*Kernel Concurrency Sanitizer (KCSAN)* is a dynamic data race detector for -kernel space. KCSAN is a sampling watchpoint-based data race detector. Key -priorities in KCSAN's design are lack of false positives, scalability, and -simplicity. More details can be found in `Implementation Details`_. - -KCSAN uses compile-time instrumentation to instrument memory accesses. KCSAN is -supported in both GCC and Clang. With GCC it requires version 7.3.0 or later. -With Clang it requires version 7.0.0 or later. +The Kernel Concurrency Sanitizer (KCSAN) is a dynamic race detector, which +relies on compile-time instrumentation, and uses a watchpoint-based sampling +approach to detect races. KCSAN's primary purpose is to detect `data races`_. Usage ----- -To enable KCSAN configure kernel with:: +KCSAN is supported in both GCC and Clang. With GCC it requires version 7.3.0 or +later. With Clang it requires version 7.0.0 or later. + +To enable KCSAN configure the kernel with:: CONFIG_KCSAN = y KCSAN provides several other configuration options to customize behaviour (see -their respective help text for more info). +the respective help text in ``lib/Kconfig.kcsan`` for more info). Error reports ~~~~~~~~~~~~~ @@ -96,7 +91,8 @@ The other less common type of data race report looks like this:: This report is generated where it was not possible to determine the other racing thread, but a race was inferred due to the data value of the watched memory location having changed. These can occur either due to missing -instrumentation or e.g. DMA accesses. +instrumentation or e.g. DMA accesses. These reports will only be generated if +``CONFIG_KCSAN_REPORT_RACE_UNKNOWN_ORIGIN=y`` (selected by default). Selective analysis ~~~~~~~~~~~~~~~~~~ @@ -110,9 +106,26 @@ the below options are available: behaviour when encountering a data race is deemed safe. * Disabling data race detection for entire functions can be accomplished by - using the function attribute ``__no_kcsan`` (or ``__no_kcsan_or_inline`` for - ``__always_inline`` functions). To dynamically control for which functions - data races are reported, see the `debugfs`_ blacklist/whitelist feature. + using the function attribute ``__no_kcsan``:: + + __no_kcsan + void foo(void) { + ... + + To dynamically limit for which functions to generate reports, see the + `DebugFS interface`_ blacklist/whitelist feature. + + For ``__always_inline`` functions, replace ``__always_inline`` with + ``__no_kcsan_or_inline`` (which implies ``__always_inline``):: + + static __no_kcsan_or_inline void foo(void) { + ... + + Note: Older compiler versions (GCC < 9) also do not always honor the + ``__no_kcsan`` attribute on regular ``inline`` functions. If false positives + with these compilers cannot be tolerated, for small functions where + ``__always_inline`` would be appropriate, ``__no_kcsan_or_inline`` should be + preferred instead. * To disable data race detection for a particular compilation unit, add to the ``Makefile``:: @@ -124,13 +137,29 @@ the below options are available: KCSAN_SANITIZE := n -debugfs -~~~~~~~ +Furthermore, it is possible to tell KCSAN to show or hide entire classes of +data races, depending on preferences. These can be changed via the following +Kconfig options: + +* ``CONFIG_KCSAN_REPORT_VALUE_CHANGE_ONLY``: If enabled and a conflicting write + is observed via a watchpoint, but the data value of the memory location was + observed to remain unchanged, do not report the data race. + +* ``CONFIG_KCSAN_ASSUME_PLAIN_WRITES_ATOMIC``: Assume that plain aligned writes + up to word size are atomic by default. Assumes that such writes are not + subject to unsafe compiler optimizations resulting in data races. The option + causes KCSAN to not report data races due to conflicts where the only plain + accesses are aligned writes up to word size. + +DebugFS interface +~~~~~~~~~~~~~~~~~ + +The file ``/sys/kernel/debug/kcsan`` provides the following interface: -* The file ``/sys/kernel/debug/kcsan`` can be read to get stats. +* Reading ``/sys/kernel/debug/kcsan`` returns various runtime statistics. -* KCSAN can be turned on or off by writing ``on`` or ``off`` to - ``/sys/kernel/debug/kcsan``. +* Writing ``on`` or ``off`` to ``/sys/kernel/debug/kcsan`` allows turning KCSAN + on or off, respectively. * Writing ``!some_func_name`` to ``/sys/kernel/debug/kcsan`` adds ``some_func_name`` to the report filter list, which (by default) blacklists @@ -142,91 +171,120 @@ debugfs can be used to silence frequently occurring data races; the whitelist feature can help with reproduction and testing of fixes. +Tuning performance +~~~~~~~~~~~~~~~~~~ + +Core parameters that affect KCSAN's overall performance and bug detection +ability are exposed as kernel command-line arguments whose defaults can also be +changed via the corresponding Kconfig options. + +* ``kcsan.skip_watch`` (``CONFIG_KCSAN_SKIP_WATCH``): Number of per-CPU memory + operations to skip, before another watchpoint is set up. Setting up + watchpoints more frequently will result in the likelihood of races to be + observed to increase. This parameter has the most significant impact on + overall system performance and race detection ability. + +* ``kcsan.udelay_task`` (``CONFIG_KCSAN_UDELAY_TASK``): For tasks, the + microsecond delay to stall execution after a watchpoint has been set up. + Larger values result in the window in which we may observe a race to + increase. + +* ``kcsan.udelay_interrupt`` (``CONFIG_KCSAN_UDELAY_INTERRUPT``): For + interrupts, the microsecond delay to stall execution after a watchpoint has + been set up. Interrupts have tighter latency requirements, and their delay + should generally be smaller than the one chosen for tasks. + +They may be tweaked at runtime via ``/sys/module/kcsan/parameters/``. + Data Races ---------- -Informally, two operations *conflict* if they access the same memory location, -and at least one of them is a write operation. In an execution, two memory -operations from different threads form a **data race** if they *conflict*, at -least one of them is a *plain access* (non-atomic), and they are *unordered* in -the "happens-before" order according to the `LKMM -<../../tools/memory-model/Documentation/explanation.txt>`_. +In an execution, two memory accesses form a *data race* if they *conflict*, +they happen concurrently in different threads, and at least one of them is a +*plain access*; they *conflict* if both access the same memory location, and at +least one is a write. For a more thorough discussion and definition, see `"Plain +Accesses and Data Races" in the LKMM`_. + +.. _"Plain Accesses and Data Races" in the LKMM: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/tree/tools/memory-model/Documentation/explanation.txt#n1922 -Relationship with the Linux Kernel Memory Model (LKMM) -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Relationship with the Linux-Kernel Memory Consistency Model (LKMM) +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The LKMM defines the propagation and ordering rules of various memory operations, which gives developers the ability to reason about concurrent code. Ultimately this allows to determine the possible executions of concurrent code, and if that code is free from data races. -KCSAN is aware of *atomic* accesses (``READ_ONCE``, ``WRITE_ONCE``, -``atomic_*``, etc.), but is oblivious of any ordering guarantees. In other -words, KCSAN assumes that as long as a plain access is not observed to race -with another conflicting access, memory operations are correctly ordered. +KCSAN is aware of *marked atomic operations* (``READ_ONCE``, ``WRITE_ONCE``, +``atomic_*``, etc.), but is oblivious of any ordering guarantees and simply +assumes that memory barriers are placed correctly. In other words, KCSAN +assumes that as long as a plain access is not observed to race with another +conflicting access, memory operations are correctly ordered. This means that KCSAN will not report *potential* data races due to missing -memory ordering. If, however, missing memory ordering (that is observable with -a particular compiler and architecture) leads to an observable data race (e.g. -entering a critical section erroneously), KCSAN would report the resulting -data race. - -Race conditions vs. data races -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Race conditions are logic bugs, where unexpected interleaving of racing -concurrent operations result in an erroneous state. - -Data races on the other hand are defined at the *memory model/language level*. -Many data races are also harmful race conditions, which a tool like KCSAN -reports! However, not all data races are race conditions and vice-versa. -KCSAN's intent is to report data races according to the LKMM. A data race -detector can only work at the memory model/language level. - -Deeper analysis, to find high-level race conditions only, requires conveying -the intended kernel logic to a tool. This requires (1) the developer writing a -specification or model of their code, and then (2) the tool verifying that the -implementation matches. This has been done for small bits of code using model -checkers and other formal methods, but does not scale to the level of what can -be covered with a dynamic analysis based data race detector such as KCSAN. - -For reasons outlined in this `article `_, -data races can be much more subtle, but can cause no less harm than high-level -race conditions. +memory ordering. Developers should therefore carefully consider the required +memory ordering requirements that remain unchecked. If, however, missing +memory ordering (that is observable with a particular compiler and +architecture) leads to an observable data race (e.g. entering a critical +section erroneously), KCSAN would report the resulting data race. + +Race Detection Beyond Data Races +-------------------------------- + +For code with complex concurrency design, race-condition bugs may not always +manifest as data races. Race conditions occur if concurrently executing +operations result in unexpected system behaviour. On the other hand, data races +are defined at the C-language level. The following macros can be used to check +properties of concurrent code where bugs would not manifest as data races. + +.. kernel-doc:: include/linux/kcsan-checks.h + :functions: ASSERT_EXCLUSIVE_WRITER ASSERT_EXCLUSIVE_ACCESS + ASSERT_EXCLUSIVE_BITS Implementation Details ---------------------- -The general approach is inspired by `DataCollider +KCSAN relies on observing that two accesses happen concurrently. Crucially, we +want to (a) increase the chances of observing races (especially for races that +manifest rarely), and (b) be able to actually observe them. We can accomplish +(a) by injecting various delays, and (b) by using address watchpoints (or +breakpoints). + +If we deliberately stall a memory access, while we have a watchpoint for its +address set up, and then observe the watchpoint to fire, two accesses to the +same address just raced. Using hardware watchpoints, this is the approach taken +in `DataCollider `_. Unlike DataCollider, KCSAN does not use hardware watchpoints, but instead -relies on compiler instrumentation. Watchpoints are implemented using an -efficient encoding that stores access type, size, and address in a long; the -benefits of using "soft watchpoints" are portability and greater flexibility in -limiting which accesses trigger a watchpoint. +relies on compiler instrumentation and "soft watchpoints". -More specifically, KCSAN requires instrumenting plain (unmarked, non-atomic) -memory operations; for each instrumented plain access: +In KCSAN, watchpoints are implemented using an efficient encoding that stores +access type, size, and address in a long; the benefits of using "soft +watchpoints" are portability and greater flexibility. KCSAN then relies on the +compiler instrumenting plain accesses. For each instrumented plain access: 1. Check if a matching watchpoint exists; if yes, and at least one access is a write, then we encountered a racing access. 2. Periodically, if no matching watchpoint exists, set up a watchpoint and - stall for a small delay. + stall for a small randomized delay. 3. Also check the data value before the delay, and re-check the data value after delay; if the values mismatch, we infer a race of unknown origin. -To detect data races between plain and atomic memory operations, KCSAN also -annotates atomic accesses, but only to check if a watchpoint exists -(``kcsan_check_atomic_*``); i.e. KCSAN never sets up a watchpoint on atomic -accesses. +To detect data races between plain and marked accesses, KCSAN also annotates +marked accesses, but only to check if a watchpoint exists; i.e. KCSAN never +sets up a watchpoint on marked accesses. By never setting up watchpoints for +marked operations, if all accesses to a variable that is accessed concurrently +are properly marked, KCSAN will never trigger a watchpoint and therefore never +report the accesses. Key Properties ~~~~~~~~~~~~~~ -1. **Memory Overhead:** The current implementation uses a small array of longs - to encode watchpoint information, which is negligible. +1. **Memory Overhead:** The overall memory overhead is only a few MiB + depending on configuration. The current implementation uses a small array of + longs to encode watchpoint information, which is negligible. 2. **Performance Overhead:** KCSAN's runtime aims to be minimal, using an efficient watchpoint encoding that does not require acquiring any shared @@ -253,14 +311,17 @@ Key Properties Alternatives Considered ----------------------- -An alternative data race detection approach for the kernel can be found in +An alternative data race detection approach for the kernel can be found in the `Kernel Thread Sanitizer (KTSAN) `_. KTSAN is a happens-before data race detector, which explicitly establishes the happens-before order between memory operations, which can then be used to -determine data races as defined in `Data Races`_. To build a correct -happens-before relation, KTSAN must be aware of all ordering rules of the LKMM -and synchronization primitives. Unfortunately, any omission leads to false -positives, which is especially important in the context of the kernel which -includes numerous custom synchronization mechanisms. Furthermore, KTSAN's -implementation requires metadata for each memory location (shadow memory); -currently, for each page, KTSAN requires 4 pages of shadow memory. +determine data races as defined in `Data Races`_. + +To build a correct happens-before relation, KTSAN must be aware of all ordering +rules of the LKMM and synchronization primitives. Unfortunately, any omission +leads to large numbers of false positives, which is especially detrimental in +the context of the kernel which includes numerous custom synchronization +mechanisms. To track the happens-before relation, KTSAN's implementation +requires metadata for each memory location (shadow memory), which for each page +corresponds to 4 pages of shadow memory, and can translate into overhead of +tens of GiB on a large system. -- GitLab From 1443b8c9e712ef8914a2cab9ae7ce133229ed96c Mon Sep 17 00:00:00 2001 From: Marco Elver Date: Thu, 5 Mar 2020 15:21:09 +0100 Subject: [PATCH 0051/1971] kcsan: Update API documentation in kcsan-checks.h Update the API documentation for ASSERT_EXCLUSIVE_* macros and make them generate readable documentation for the code examples. All @variable short summaries were missing ':', which was updated for the whole file. Tested with "make htmldocs". Signed-off-by: Marco Elver Signed-off-by: Paul E. McKenney --- include/linux/kcsan-checks.h | 98 ++++++++++++++++++++++-------------- 1 file changed, 61 insertions(+), 37 deletions(-) diff --git a/include/linux/kcsan-checks.h b/include/linux/kcsan-checks.h index 8f9f6e2191dc9..3cd8bb03eb417 100644 --- a/include/linux/kcsan-checks.h +++ b/include/linux/kcsan-checks.h @@ -26,9 +26,9 @@ /** * __kcsan_check_access - check generic access for races * - * @ptr address of access - * @size size of access - * @type access type modifier + * @ptr: address of access + * @size: size of access + * @type: access type modifier */ void __kcsan_check_access(const volatile void *ptr, size_t size, int type); @@ -64,7 +64,7 @@ void kcsan_flat_atomic_end(void); * Force treating the next n memory accesses for the current context as atomic * operations. * - * @n number of following memory accesses to treat as atomic. + * @n: number of following memory accesses to treat as atomic. */ void kcsan_atomic_next(int n); @@ -74,7 +74,7 @@ void kcsan_atomic_next(int n); * Set the access mask for all accesses for the current context if non-zero. * Only value changes to bits set in the mask will be reported. * - * @mask bitmask + * @mask: bitmask */ void kcsan_set_access_mask(unsigned long mask); @@ -106,16 +106,16 @@ static inline void kcsan_check_access(const volatile void *ptr, size_t size, /** * __kcsan_check_read - check regular read access for races * - * @ptr address of access - * @size size of access + * @ptr: address of access + * @size: size of access */ #define __kcsan_check_read(ptr, size) __kcsan_check_access(ptr, size, 0) /** * __kcsan_check_write - check regular write access for races * - * @ptr address of access - * @size size of access + * @ptr: address of access + * @size: size of access */ #define __kcsan_check_write(ptr, size) \ __kcsan_check_access(ptr, size, KCSAN_ACCESS_WRITE) @@ -123,16 +123,16 @@ static inline void kcsan_check_access(const volatile void *ptr, size_t size, /** * kcsan_check_read - check regular read access for races * - * @ptr address of access - * @size size of access + * @ptr: address of access + * @size: size of access */ #define kcsan_check_read(ptr, size) kcsan_check_access(ptr, size, 0) /** * kcsan_check_write - check regular write access for races * - * @ptr address of access - * @size size of access + * @ptr: address of access + * @size: size of access */ #define kcsan_check_write(ptr, size) \ kcsan_check_access(ptr, size, KCSAN_ACCESS_WRITE) @@ -158,14 +158,26 @@ static inline void kcsan_check_access(const volatile void *ptr, size_t size, * allowed. This assertion can be used to specify properties of concurrent code, * where violation cannot be detected as a normal data race. * - * For example, if a per-CPU variable is only meant to be written by a single - * CPU, but may be read from other CPUs; in this case, reads and writes must be - * marked properly, however, if an off-CPU WRITE_ONCE() races with the owning - * CPU's WRITE_ONCE(), would not constitute a data race but could be a harmful - * race condition. Using this macro allows specifying this property in the code - * and catch such bugs. + * For example, if we only have a single writer, but multiple concurrent + * readers, to avoid data races, all these accesses must be marked; even + * concurrent marked writes racing with the single writer are bugs. + * Unfortunately, due to being marked, they are no longer data races. For cases + * like these, we can use the macro as follows: * - * @var variable to assert on + * .. code-block:: c + * + * void writer(void) { + * spin_lock(&update_foo_lock); + * ASSERT_EXCLUSIVE_WRITER(shared_foo); + * WRITE_ONCE(shared_foo, ...); + * spin_unlock(&update_foo_lock); + * } + * void reader(void) { + * // update_foo_lock does not need to be held! + * ... = READ_ONCE(shared_foo); + * } + * + * @var: variable to assert on */ #define ASSERT_EXCLUSIVE_WRITER(var) \ __kcsan_check_access(&(var), sizeof(var), KCSAN_ACCESS_ASSERT) @@ -177,16 +189,22 @@ static inline void kcsan_check_access(const volatile void *ptr, size_t size, * writers). This assertion can be used to specify properties of concurrent * code, where violation cannot be detected as a normal data race. * - * For example, in a reference-counting algorithm where exclusive access is - * expected after the refcount reaches 0. We can check that this property - * actually holds as follows: + * For example, where exclusive access is expected after determining no other + * users of an object are left, but the object is not actually freed. We can + * check that this property actually holds as follows: + * + * .. code-block:: c * * if (refcount_dec_and_test(&obj->refcnt)) { * ASSERT_EXCLUSIVE_ACCESS(*obj); - * safely_dispose_of(obj); + * do_some_cleanup(obj); + * release_for_reuse(obj); * } * - * @var variable to assert on + * Note: For cases where the object is freed, `KASAN `_ is a better + * fit to detect use-after-free bugs. + * + * @var: variable to assert on */ #define ASSERT_EXCLUSIVE_ACCESS(var) \ __kcsan_check_access(&(var), sizeof(var), KCSAN_ACCESS_WRITE | KCSAN_ACCESS_ASSERT) @@ -200,7 +218,7 @@ static inline void kcsan_check_access(const volatile void *ptr, size_t size, * concurrent readers are permitted. This assertion captures more detailed * bit-level properties, compared to the other (word granularity) assertions. * Only the bits set in @mask are checked for concurrent modifications, while - * ignoring the remaining bits, i.e. concurrent writes (or reads) to ~@mask bits + * ignoring the remaining bits, i.e. concurrent writes (or reads) to ~mask bits * are ignored. * * Use this for variables, where some bits must not be modified concurrently, @@ -210,17 +228,21 @@ static inline void kcsan_check_access(const volatile void *ptr, size_t size, * but other bits may still be modified concurrently. A reader may wish to * assert that this is true as follows: * + * .. code-block:: c + * * ASSERT_EXCLUSIVE_BITS(flags, READ_ONLY_MASK); * foo = (READ_ONCE(flags) & READ_ONLY_MASK) >> READ_ONLY_SHIFT; * - * Note: The access that immediately follows ASSERT_EXCLUSIVE_BITS() is - * assumed to access the masked bits only, and KCSAN optimistically assumes it - * is therefore safe, even in the presence of data races, and marking it with - * READ_ONCE() is optional from KCSAN's point-of-view. We caution, however, - * that it may still be advisable to do so, since we cannot reason about all - * compiler optimizations when it comes to bit manipulations (on the reader - * and writer side). If you are sure nothing can go wrong, we can write the - * above simply as: + * Note: The access that immediately follows ASSERT_EXCLUSIVE_BITS() is assumed + * to access the masked bits only, and KCSAN optimistically assumes it is + * therefore safe, even in the presence of data races, and marking it with + * READ_ONCE() is optional from KCSAN's point-of-view. We caution, however, that + * it may still be advisable to do so, since we cannot reason about all compiler + * optimizations when it comes to bit manipulations (on the reader and writer + * side). If you are sure nothing can go wrong, we can write the above simply + * as: + * + * .. code-block:: c * * ASSERT_EXCLUSIVE_BITS(flags, READ_ONLY_MASK); * foo = (flags & READ_ONLY_MASK) >> READ_ONLY_SHIFT; @@ -230,15 +252,17 @@ static inline void kcsan_check_access(const volatile void *ptr, size_t size, * be modified concurrently. Writers, where other bits may change concurrently, * could use the assertion as follows: * + * .. code-block:: c + * * spin_lock(&foo_lock); * ASSERT_EXCLUSIVE_BITS(flags, FOO_MASK); - * old_flags = READ_ONCE(flags); + * old_flags = flags; * new_flags = (old_flags & ~FOO_MASK) | (new_foo << FOO_SHIFT); * if (cmpxchg(&flags, old_flags, new_flags) != old_flags) { ... } * spin_unlock(&foo_lock); * - * @var variable to assert on - * @mask only check for modifications to bits set in @mask + * @var: variable to assert on + * @mask: only check for modifications to bits set in @mask */ #define ASSERT_EXCLUSIVE_BITS(var, mask) \ do { \ -- GitLab From 87c3d579c8ed0eaea6b1567d529a8daa85a2bc6c Mon Sep 17 00:00:00 2001 From: Enric Balletbo i Serra Date: Mon, 13 Apr 2020 16:10:51 +0200 Subject: [PATCH 0052/1971] power: supply: bq24257_charger: Replace depends on REGMAP_I2C with select regmap is a library function that gets selected by drivers that need it. No driver modules should depend on it. Depending on REGMAP_I2C makes this driver only build if another driver already selected REGMAP_I2C, as the symbol can't be selected through the menu kernel configuration. Fixes: 2219a935963e ("power_supply: Add TI BQ24257 charger driver") Signed-off-by: Enric Balletbo i Serra Signed-off-by: Sebastian Reichel --- drivers/power/supply/Kconfig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/power/supply/Kconfig b/drivers/power/supply/Kconfig index f3424fdce341a..d37ec0d03237f 100644 --- a/drivers/power/supply/Kconfig +++ b/drivers/power/supply/Kconfig @@ -577,7 +577,7 @@ config CHARGER_BQ24257 tristate "TI BQ24250/24251/24257 battery charger driver" depends on I2C depends on GPIOLIB || COMPILE_TEST - depends on REGMAP_I2C + select REGMAP_I2C help Say Y to enable support for the TI BQ24250, BQ24251, and BQ24257 battery chargers. -- GitLab From d5b2b22558f1a265ea8e88e5a54fc61ae7da3355 Mon Sep 17 00:00:00 2001 From: Waibel Georg Date: Wed, 18 Mar 2020 10:33:24 +0000 Subject: [PATCH 0053/1971] clk: imx: imx6ul: change flexcan clock to support CiA bitrates Setting a CAN bitrate of 800kbit/s fails with a bitrate error of 1.3% if the flexcan module is clocked at 30MHz (CAN_CLK_ROOT). This patch changes the clock frequency from 30MHz to 40MHz which allows to support all bitrates recommended by CiA. The patch sets CAN_CLK_SEL to 80MHz by changing its clock parent from CLK_PLL3_60M to CLK_PLL3_80M. The post-divider CAN_CLK_PODF is set to /2 by default which makes 40MHz CAN_CLK_ROOT from its parent CAN_CLK_SEL. Background: CAN in Automation document 102 (CiA102) recommends the CAN bitrates 10, 20, 50, 125, 250, 500, 800 and 1000kbit/s. With the flexcan serial clock at 30MHz (original value) setting some common bitrates ("ip link set canX type can bitrate ") gives the following results: requested value / actually set value 5000: bitrate 5000 sample-point 0.708 10000: bitrate 10000 sample-point 0.866 20000: bitrate 20000 sample-point 0.866 40000: bitrate 40000 sample-point 0.866 50000: bitrate 50000 sample-point 0.866 80000: bitrate 80000 sample-point 0.866 100000: bitrate 100000 sample-point 0.866 125000: bitrate 125000 sample-point 0.875 250000: bitrate 250000 sample-point 0.866 400000: bitrate 400000 sample-point 0.866 500000: bitrate 500000 sample-point 0.866 666666: bitrate 666666 sample-point 0.800 800000: bitrate 789473 sample-point 0.789 !!!bitrate error 1.3% 1000000: bitrate 1000000 sample-point 0.733 With the flexcan serial clock at 40MHz (new value) we get this: 5000: no more possible 10000: bitrate 10000 sample-point 0.875 20000: bitrate 20000 sample-point 0.875 40000: bitrate 40000 sample-point 0.850 50000: bitrate 50000 sample-point 0.875 80000: bitrate 80000 sample-point 0.850 100000: bitrate 100000 sample-point 0.875 125000: bitrate 125000 sample-point 0.875 250000: bitrate 250000 sample-point 0.875 400000: bitrate 400000 sample-point 0.850 500000: bitrate 500000 sample-point 0.875 666666: bitrate 666666 sample-point 0.800 800000: bitrate 800000 sample-point 0.800 1000000: bitrate 1000000 sample-point 0.750 A drawback of the modification is that 5kbit/s is no more supported. Setting the flexcan serial clock to 60MHz or 80MHz would produce similar results but with losing even more bitrates at the lower end. Changing the flexcan serial clock to 40MHz might apply for other SoCs using the flaxcan module as well (e.g. imx6q/d/s..). But since I don't have such hardware to test I did not add this to the patch. Signed-off-by: Georg Waibel Signed-off-by: Shawn Guo --- drivers/clk/imx/clk-imx6ul.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/clk/imx/clk-imx6ul.c b/drivers/clk/imx/clk-imx6ul.c index dafc8806b03e8..5dbb6a9377324 100644 --- a/drivers/clk/imx/clk-imx6ul.c +++ b/drivers/clk/imx/clk-imx6ul.c @@ -503,7 +503,7 @@ static void __init imx6ul_clocks_init(struct device_node *ccm_node) clk_prepare_enable(hws[IMX6UL_CLK_USBPHY2_GATE]->clk); } - clk_set_parent(hws[IMX6UL_CLK_CAN_SEL]->clk, hws[IMX6UL_CLK_PLL3_60M]->clk); + clk_set_parent(hws[IMX6UL_CLK_CAN_SEL]->clk, hws[IMX6UL_CLK_PLL3_80M]->clk); if (clk_on_imx6ul()) clk_set_parent(hws[IMX6UL_CLK_SIM_PRE_SEL]->clk, hws[IMX6UL_CLK_PLL3_USB_OTG]->clk); else if (clk_on_imx6ull()) -- GitLab From f05ec5073fdea165d13624322d6fc4d295bccc65 Mon Sep 17 00:00:00 2001 From: Jason Yan Date: Thu, 9 Apr 2020 16:54:01 +0800 Subject: [PATCH 0054/1971] power: supply: max14656: remove set but not used 'ret' MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Fix the following gcc warning: drivers/power/supply/max14656_charger_detector.c:142:6: warning: variable ‘ret’ set but not used [-Wunused-but-set-variable] int ret = 0; ^~~ Reported-by: Hulk Robot Signed-off-by: Jason Yan Signed-off-by: Sebastian Reichel --- drivers/power/supply/max14656_charger_detector.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/drivers/power/supply/max14656_charger_detector.c b/drivers/power/supply/max14656_charger_detector.c index 3bbb8b4c8ae77..137f9fafce8c7 100644 --- a/drivers/power/supply/max14656_charger_detector.c +++ b/drivers/power/supply/max14656_charger_detector.c @@ -139,10 +139,9 @@ static void max14656_irq_worker(struct work_struct *work) u8 buf[REG_TOTAL_NUM]; u8 chg_type; - int ret = 0; - ret = max14656_read_block_reg(chip->client, MAX14656_DEVICE_ID, - REG_TOTAL_NUM, buf); + max14656_read_block_reg(chip->client, MAX14656_DEVICE_ID, + REG_TOTAL_NUM, buf); if ((buf[MAX14656_STATUS_1] & STATUS1_VB_VALID_MASK) && (buf[MAX14656_STATUS_1] & STATUS1_CHG_TYPE_MASK)) { -- GitLab From 4ac54b88b6c191e400a4e5d09da3a693307365eb Mon Sep 17 00:00:00 2001 From: Rafael Gandolfi Date: Mon, 6 Apr 2020 16:05:38 +0200 Subject: [PATCH 0055/1971] power: supply: axp288_fuel_gauge: Add the Meegopad T02 to the blacklist. The Meegopad T02 is a PC in stick format and doesn't have a battery, it is reported with a random and constant battery charge but as discharging to userspace. Add it to the blacklist to avoid the bogus battery status reporting. Signed-off-by: Rafael Gandolfi Reviewed-by: Hans de Goede Signed-off-by: Sebastian Reichel --- drivers/power/supply/axp288_fuel_gauge.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/drivers/power/supply/axp288_fuel_gauge.c b/drivers/power/supply/axp288_fuel_gauge.c index f40fa0e63b6e5..148eb8105803a 100644 --- a/drivers/power/supply/axp288_fuel_gauge.c +++ b/drivers/power/supply/axp288_fuel_gauge.c @@ -717,6 +717,12 @@ static const struct dmi_system_id axp288_fuel_gauge_blacklist[] = { DMI_MATCH(DMI_PRODUCT_NAME, "STK1A32SC"), }, }, + { + /* Meegopad T02 */ + .matches = { + DMI_MATCH(DMI_PRODUCT_NAME, "MEEGOPAD T02"), + }, + }, { /* Meegopad T08 */ .matches = { -- GitLab From 164eaf6b435c31d98fdabe0151e979fa0b4280a0 Mon Sep 17 00:00:00 2001 From: Tang Bin Date: Mon, 6 Apr 2020 13:07:57 +0800 Subject: [PATCH 0056/1971] power: supply: 88pm860x_battery: remove redundant dev_err message In the pm860x_battery_probe(), when get irq failed, the function platform_get_irq() logs an dev_err message, so remove redundant message here. Signed-off-by: Tang Bin Signed-off-by: Sebastian Reichel --- drivers/power/supply/88pm860x_battery.c | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/drivers/power/supply/88pm860x_battery.c b/drivers/power/supply/88pm860x_battery.c index 5ca047b3f58fb..1308f3a185f3d 100644 --- a/drivers/power/supply/88pm860x_battery.c +++ b/drivers/power/supply/88pm860x_battery.c @@ -919,16 +919,12 @@ static int pm860x_battery_probe(struct platform_device *pdev) return -ENOMEM; info->irq_cc = platform_get_irq(pdev, 0); - if (info->irq_cc <= 0) { - dev_err(&pdev->dev, "No IRQ resource!\n"); + if (info->irq_cc <= 0) return -EINVAL; - } info->irq_batt = platform_get_irq(pdev, 1); - if (info->irq_batt <= 0) { - dev_err(&pdev->dev, "No IRQ resource!\n"); + if (info->irq_batt <= 0) return -EINVAL; - } info->chip = chip; info->i2c = -- GitLab From 135c0872d86948046d11d7083e36c930cc43ac93 Mon Sep 17 00:00:00 2001 From: Marco Elver Date: Wed, 18 Mar 2020 18:38:44 +0100 Subject: [PATCH 0057/1971] kcsan: Introduce report access_info and other_info Improve readability by introducing access_info and other_info structs, and in preparation of the following commit in this series replaces the single instance of other_info with an array of size 1. No functional change intended. Signed-off-by: Marco Elver Signed-off-by: Paul E. McKenney --- kernel/kcsan/core.c | 6 +- kernel/kcsan/kcsan.h | 2 +- kernel/kcsan/report.c | 147 +++++++++++++++++++++--------------------- 3 files changed, 77 insertions(+), 78 deletions(-) diff --git a/kernel/kcsan/core.c b/kernel/kcsan/core.c index ee8200835b607..f1c38620e3cf8 100644 --- a/kernel/kcsan/core.c +++ b/kernel/kcsan/core.c @@ -321,7 +321,7 @@ static noinline void kcsan_found_watchpoint(const volatile void *ptr, flags = user_access_save(); if (consumed) { - kcsan_report(ptr, size, type, true, raw_smp_processor_id(), + kcsan_report(ptr, size, type, KCSAN_VALUE_CHANGE_MAYBE, KCSAN_REPORT_CONSUMED_WATCHPOINT); } else { /* @@ -500,8 +500,7 @@ kcsan_setup_watchpoint(const volatile void *ptr, size_t size, int type) if (is_assert && value_change == KCSAN_VALUE_CHANGE_TRUE) kcsan_counter_inc(KCSAN_COUNTER_ASSERT_FAILURES); - kcsan_report(ptr, size, type, value_change, raw_smp_processor_id(), - KCSAN_REPORT_RACE_SIGNAL); + kcsan_report(ptr, size, type, value_change, KCSAN_REPORT_RACE_SIGNAL); } else if (value_change == KCSAN_VALUE_CHANGE_TRUE) { /* Inferring a race, since the value should not have changed. */ @@ -511,7 +510,6 @@ kcsan_setup_watchpoint(const volatile void *ptr, size_t size, int type) if (IS_ENABLED(CONFIG_KCSAN_REPORT_RACE_UNKNOWN_ORIGIN) || is_assert) kcsan_report(ptr, size, type, KCSAN_VALUE_CHANGE_TRUE, - raw_smp_processor_id(), KCSAN_REPORT_RACE_UNKNOWN_ORIGIN); } diff --git a/kernel/kcsan/kcsan.h b/kernel/kcsan/kcsan.h index e282f8b5749e9..6630dfe32f31f 100644 --- a/kernel/kcsan/kcsan.h +++ b/kernel/kcsan/kcsan.h @@ -135,7 +135,7 @@ enum kcsan_report_type { * Print a race report from thread that encountered the race. */ extern void kcsan_report(const volatile void *ptr, size_t size, int access_type, - enum kcsan_value_change value_change, int cpu_id, + enum kcsan_value_change value_change, enum kcsan_report_type type); #endif /* _KERNEL_KCSAN_KCSAN_H */ diff --git a/kernel/kcsan/report.c b/kernel/kcsan/report.c index 18f9d3bc93a59..de234d1c1b3d9 100644 --- a/kernel/kcsan/report.c +++ b/kernel/kcsan/report.c @@ -19,18 +19,23 @@ */ #define NUM_STACK_ENTRIES 64 +/* Common access info. */ +struct access_info { + const volatile void *ptr; + size_t size; + int access_type; + int task_pid; + int cpu_id; +}; + /* * Other thread info: communicated from other racing thread to thread that set * up the watchpoint, which then prints the complete report atomically. Only * need one struct, as all threads should to be serialized regardless to print * the reports, with reporting being in the slow-path. */ -static struct { - const volatile void *ptr; - size_t size; - int access_type; - int task_pid; - int cpu_id; +struct other_info { + struct access_info ai; unsigned long stack_entries[NUM_STACK_ENTRIES]; int num_stack_entries; @@ -52,7 +57,9 @@ static struct { * that populated @other_info until it has been consumed. */ struct task_struct *task; -} other_info; +}; + +static struct other_info other_infos[1]; /* * Information about reported races; used to rate limit reporting. @@ -238,7 +245,7 @@ static const char *get_thread_desc(int task_id) } /* Helper to skip KCSAN-related functions in stack-trace. */ -static int get_stack_skipnr(unsigned long stack_entries[], int num_entries) +static int get_stack_skipnr(const unsigned long stack_entries[], int num_entries) { char buf[64]; int skip = 0; @@ -279,9 +286,10 @@ static void print_verbose_info(struct task_struct *task) /* * Returns true if a report was generated, false otherwise. */ -static bool print_report(const volatile void *ptr, size_t size, int access_type, - enum kcsan_value_change value_change, int cpu_id, - enum kcsan_report_type type) +static bool print_report(enum kcsan_value_change value_change, + enum kcsan_report_type type, + const struct access_info *ai, + const struct other_info *other_info) { unsigned long stack_entries[NUM_STACK_ENTRIES] = { 0 }; int num_stack_entries = stack_trace_save(stack_entries, NUM_STACK_ENTRIES, 1); @@ -297,9 +305,9 @@ static bool print_report(const volatile void *ptr, size_t size, int access_type, return false; if (type == KCSAN_REPORT_RACE_SIGNAL) { - other_skipnr = get_stack_skipnr(other_info.stack_entries, - other_info.num_stack_entries); - other_frame = other_info.stack_entries[other_skipnr]; + other_skipnr = get_stack_skipnr(other_info->stack_entries, + other_info->num_stack_entries); + other_frame = other_info->stack_entries[other_skipnr]; /* @value_change is only known for the other thread */ if (skip_report(value_change, other_frame)) @@ -321,13 +329,13 @@ static bool print_report(const volatile void *ptr, size_t size, int access_type, */ cmp = sym_strcmp((void *)other_frame, (void *)this_frame); pr_err("BUG: KCSAN: %s in %ps / %ps\n", - get_bug_type(access_type | other_info.access_type), + get_bug_type(ai->access_type | other_info->ai.access_type), (void *)(cmp < 0 ? other_frame : this_frame), (void *)(cmp < 0 ? this_frame : other_frame)); } break; case KCSAN_REPORT_RACE_UNKNOWN_ORIGIN: - pr_err("BUG: KCSAN: %s in %pS\n", get_bug_type(access_type), + pr_err("BUG: KCSAN: %s in %pS\n", get_bug_type(ai->access_type), (void *)this_frame); break; @@ -341,30 +349,28 @@ static bool print_report(const volatile void *ptr, size_t size, int access_type, switch (type) { case KCSAN_REPORT_RACE_SIGNAL: pr_err("%s to 0x%px of %zu bytes by %s on cpu %i:\n", - get_access_type(other_info.access_type), other_info.ptr, - other_info.size, get_thread_desc(other_info.task_pid), - other_info.cpu_id); + get_access_type(other_info->ai.access_type), other_info->ai.ptr, + other_info->ai.size, get_thread_desc(other_info->ai.task_pid), + other_info->ai.cpu_id); /* Print the other thread's stack trace. */ - stack_trace_print(other_info.stack_entries + other_skipnr, - other_info.num_stack_entries - other_skipnr, + stack_trace_print(other_info->stack_entries + other_skipnr, + other_info->num_stack_entries - other_skipnr, 0); if (IS_ENABLED(CONFIG_KCSAN_VERBOSE)) - print_verbose_info(other_info.task); + print_verbose_info(other_info->task); pr_err("\n"); pr_err("%s to 0x%px of %zu bytes by %s on cpu %i:\n", - get_access_type(access_type), ptr, size, - get_thread_desc(in_task() ? task_pid_nr(current) : -1), - cpu_id); + get_access_type(ai->access_type), ai->ptr, ai->size, + get_thread_desc(ai->task_pid), ai->cpu_id); break; case KCSAN_REPORT_RACE_UNKNOWN_ORIGIN: pr_err("race at unknown origin, with %s to 0x%px of %zu bytes by %s on cpu %i:\n", - get_access_type(access_type), ptr, size, - get_thread_desc(in_task() ? task_pid_nr(current) : -1), - cpu_id); + get_access_type(ai->access_type), ai->ptr, ai->size, + get_thread_desc(ai->task_pid), ai->cpu_id); break; default: @@ -386,22 +392,23 @@ static bool print_report(const volatile void *ptr, size_t size, int access_type, return true; } -static void release_report(unsigned long *flags, enum kcsan_report_type type) +static void release_report(unsigned long *flags, struct other_info *other_info) { - if (type == KCSAN_REPORT_RACE_SIGNAL) - other_info.ptr = NULL; /* mark for reuse */ + if (other_info) + other_info->ai.ptr = NULL; /* Mark for reuse. */ spin_unlock_irqrestore(&report_lock, *flags); } /* - * Sets @other_info.task and awaits consumption of @other_info. + * Sets @other_info->task and awaits consumption of @other_info. * * Precondition: report_lock is held. * Postcondition: report_lock is held. */ -static void -set_other_info_task_blocking(unsigned long *flags, const volatile void *ptr) +static void set_other_info_task_blocking(unsigned long *flags, + const struct access_info *ai, + struct other_info *other_info) { /* * We may be instrumenting a code-path where current->state is already @@ -418,7 +425,7 @@ set_other_info_task_blocking(unsigned long *flags, const volatile void *ptr) */ int timeout = max(kcsan_udelay_task, kcsan_udelay_interrupt); - other_info.task = current; + other_info->task = current; do { if (is_running) { /* @@ -438,19 +445,19 @@ set_other_info_task_blocking(unsigned long *flags, const volatile void *ptr) spin_lock_irqsave(&report_lock, *flags); if (timeout-- < 0) { /* - * Abort. Reset other_info.task to NULL, since it + * Abort. Reset @other_info->task to NULL, since it * appears the other thread is still going to consume * it. It will result in no verbose info printed for * this task. */ - other_info.task = NULL; + other_info->task = NULL; break; } /* * If @ptr nor @current matches, then our information has been * consumed and we may continue. If not, retry. */ - } while (other_info.ptr == ptr && other_info.task == current); + } while (other_info->ai.ptr == ai->ptr && other_info->task == current); if (is_running) set_current_state(TASK_RUNNING); } @@ -460,9 +467,8 @@ set_other_info_task_blocking(unsigned long *flags, const volatile void *ptr) * acquires the matching other_info and returns true. If other_info is not * required for the report type, simply acquires report_lock and returns true. */ -static bool prepare_report(unsigned long *flags, const volatile void *ptr, - size_t size, int access_type, int cpu_id, - enum kcsan_report_type type) +static bool prepare_report(unsigned long *flags, enum kcsan_report_type type, + const struct access_info *ai, struct other_info *other_info) { if (type != KCSAN_REPORT_CONSUMED_WATCHPOINT && type != KCSAN_REPORT_RACE_SIGNAL) { @@ -476,18 +482,14 @@ retry: switch (type) { case KCSAN_REPORT_CONSUMED_WATCHPOINT: - if (other_info.ptr != NULL) + if (other_info->ai.ptr) break; /* still in use, retry */ - other_info.ptr = ptr; - other_info.size = size; - other_info.access_type = access_type; - other_info.task_pid = in_task() ? task_pid_nr(current) : -1; - other_info.cpu_id = cpu_id; - other_info.num_stack_entries = stack_trace_save(other_info.stack_entries, NUM_STACK_ENTRIES, 1); + other_info->ai = *ai; + other_info->num_stack_entries = stack_trace_save(other_info->stack_entries, NUM_STACK_ENTRIES, 1); if (IS_ENABLED(CONFIG_KCSAN_VERBOSE)) - set_other_info_task_blocking(flags, ptr); + set_other_info_task_blocking(flags, ai, other_info); spin_unlock_irqrestore(&report_lock, *flags); @@ -498,37 +500,31 @@ retry: return false; case KCSAN_REPORT_RACE_SIGNAL: - if (other_info.ptr == NULL) + if (!other_info->ai.ptr) break; /* no data available yet, retry */ /* * First check if this is the other_info we are expecting, i.e. * matches based on how watchpoint was encoded. */ - if (!matching_access((unsigned long)other_info.ptr & - WATCHPOINT_ADDR_MASK, - other_info.size, - (unsigned long)ptr & WATCHPOINT_ADDR_MASK, - size)) + if (!matching_access((unsigned long)other_info->ai.ptr & WATCHPOINT_ADDR_MASK, other_info->ai.size, + (unsigned long)ai->ptr & WATCHPOINT_ADDR_MASK, ai->size)) break; /* mismatching watchpoint, retry */ - if (!matching_access((unsigned long)other_info.ptr, - other_info.size, (unsigned long)ptr, - size)) { + if (!matching_access((unsigned long)other_info->ai.ptr, other_info->ai.size, + (unsigned long)ai->ptr, ai->size)) { /* * If the actual accesses to not match, this was a false * positive due to watchpoint encoding. */ - kcsan_counter_inc( - KCSAN_COUNTER_ENCODING_FALSE_POSITIVES); + kcsan_counter_inc(KCSAN_COUNTER_ENCODING_FALSE_POSITIVES); /* discard this other_info */ - release_report(flags, KCSAN_REPORT_RACE_SIGNAL); + release_report(flags, other_info); return false; } - access_type |= other_info.access_type; - if ((access_type & KCSAN_ACCESS_WRITE) == 0) { + if (!((ai->access_type | other_info->ai.access_type) & KCSAN_ACCESS_WRITE)) { /* * While the address matches, this is not the other_info * from the thread that consumed our watchpoint, since @@ -561,15 +557,11 @@ retry: * data, and at this point the likelihood that we * re-report the same race again is high. */ - release_report(flags, KCSAN_REPORT_RACE_SIGNAL); + release_report(flags, other_info); return false; } - /* - * Matching & usable access in other_info: keep other_info_lock - * locked, as this thread consumes it to print the full report; - * unlocked in release_report. - */ + /* Matching access in other_info. */ return true; default: @@ -582,10 +574,19 @@ retry: } void kcsan_report(const volatile void *ptr, size_t size, int access_type, - enum kcsan_value_change value_change, int cpu_id, + enum kcsan_value_change value_change, enum kcsan_report_type type) { unsigned long flags = 0; + const struct access_info ai = { + .ptr = ptr, + .size = size, + .access_type = access_type, + .task_pid = in_task() ? task_pid_nr(current) : -1, + .cpu_id = raw_smp_processor_id() + }; + struct other_info *other_info = type == KCSAN_REPORT_RACE_UNKNOWN_ORIGIN + ? NULL : &other_infos[0]; /* * With TRACE_IRQFLAGS, lockdep's IRQ trace state becomes corrupted if @@ -596,19 +597,19 @@ void kcsan_report(const volatile void *ptr, size_t size, int access_type, lockdep_off(); kcsan_disable_current(); - if (prepare_report(&flags, ptr, size, access_type, cpu_id, type)) { + if (prepare_report(&flags, type, &ai, other_info)) { /* * Never report if value_change is FALSE, only if we it is * either TRUE or MAYBE. In case of MAYBE, further filtering may * be done once we know the full stack trace in print_report(). */ bool reported = value_change != KCSAN_VALUE_CHANGE_FALSE && - print_report(ptr, size, access_type, value_change, cpu_id, type); + print_report(value_change, type, &ai, other_info); if (reported && panic_on_warn) panic("panic_on_warn set ...\n"); - release_report(&flags, type); + release_report(&flags, other_info); } kcsan_enable_current(); -- GitLab From 6119418f94ca5314392d258d27eb0cb58bb1774e Mon Sep 17 00:00:00 2001 From: Marco Elver Date: Wed, 18 Mar 2020 18:38:45 +0100 Subject: [PATCH 0058/1971] kcsan: Avoid blocking producers in prepare_report() To avoid deadlock in case watchers can be interrupted, we need to ensure that producers of the struct other_info can never be blocked by an unrelated consumer. (Likely to occur with KCSAN_INTERRUPT_WATCHER.) There are several cases that can lead to this scenario, for example: 1. A watchpoint A was set up by task T1, but interrupted by interrupt I1. Some other thread (task or interrupt) finds watchpoint A consumes it, and sets other_info. Then I1 also finds some unrelated watchpoint B, consumes it, but is blocked because other_info is in use. T1 cannot consume other_info because I1 never returns -> deadlock. 2. A watchpoint A was set up by task T1, but interrupted by interrupt I1, which also sets up a watchpoint B. Some other thread finds watchpoint A, and consumes it and sets up other_info with its information. Similarly some other thread finds watchpoint B and consumes it, but is then blocked because other_info is in use. When I1 continues it sees its watchpoint was consumed, and that it must wait for other_info, which currently contains information to be consumed by T1. However, T1 cannot unblock other_info because I1 never returns -> deadlock. To avoid this, we need to ensure that producers of struct other_info always have a usable other_info entry. This is obviously not the case with only a single instance of struct other_info, as concurrent producers must wait for the entry to be released by some consumer (which may be locked up as illustrated above). While it would be nice if producers could simply call kmalloc() and append their instance of struct other_info to a list, we are very limited in this code path: since KCSAN can instrument the allocators themselves, calling kmalloc() could lead to deadlock or corrupted allocator state. Since producers of the struct other_info will always succeed at try_consume_watchpoint(), preceding the call into kcsan_report(), we know that the particular watchpoint slot cannot simply be reused or consumed by another potential other_info producer. If we move removal of a watchpoint after reporting (by the consumer of struct other_info), we can see a consumed watchpoint as a held lock on elements of other_info, if we create a one-to-one mapping of a watchpoint to an other_info element. Therefore, the simplest solution is to create an array of struct other_info that is as large as the watchpoints array in core.c, and pass the watchpoint index to kcsan_report() for producers and consumers, and change watchpoints to be removed after reporting is done. With a default config on a 64-bit system, the array other_infos consumes ~37KiB. For most systems today this is not a problem. On smaller memory constrained systems, the config value CONFIG_KCSAN_NUM_WATCHPOINTS can be reduced appropriately. Overall, this change is a simplification of the prepare_report() code, and makes some of the checks (such as checking if at least one access is a write) redundant. Tested: $ tools/testing/selftests/rcutorture/bin/kvm.sh \ --cpus 12 --duration 10 --kconfig "CONFIG_DEBUG_INFO=y \ CONFIG_KCSAN=y CONFIG_KCSAN_ASSUME_PLAIN_WRITES_ATOMIC=n \ CONFIG_KCSAN_REPORT_VALUE_CHANGE_ONLY=n \ CONFIG_KCSAN_REPORT_ONCE_IN_MS=100000 CONFIG_KCSAN_VERBOSE=y \ CONFIG_KCSAN_INTERRUPT_WATCHER=y CONFIG_PROVE_LOCKING=y" \ --configs TREE03 => No longer hangs and runs to completion as expected. Reported-by: Paul E. McKenney Signed-off-by: Marco Elver Signed-off-by: Paul E. McKenney --- kernel/kcsan/core.c | 31 ++++-- kernel/kcsan/kcsan.h | 3 +- kernel/kcsan/report.c | 212 ++++++++++++++++++++---------------------- 3 files changed, 124 insertions(+), 122 deletions(-) diff --git a/kernel/kcsan/core.c b/kernel/kcsan/core.c index f1c38620e3cf8..4d8ea0fca5f11 100644 --- a/kernel/kcsan/core.c +++ b/kernel/kcsan/core.c @@ -69,7 +69,6 @@ static DEFINE_PER_CPU(struct kcsan_ctx, kcsan_cpu_ctx) = { * slot=9: [10, 11, 9] * slot=63: [64, 65, 63] */ -#define NUM_SLOTS (1 + 2*KCSAN_CHECK_ADJACENT) #define SLOT_IDX(slot, i) (slot + ((i + KCSAN_CHECK_ADJACENT) % NUM_SLOTS)) /* @@ -171,12 +170,16 @@ try_consume_watchpoint(atomic_long_t *watchpoint, long encoded_watchpoint) return atomic_long_try_cmpxchg_relaxed(watchpoint, &encoded_watchpoint, CONSUMED_WATCHPOINT); } -/* - * Return true if watchpoint was not touched, false if consumed. - */ -static inline bool remove_watchpoint(atomic_long_t *watchpoint) +/* Return true if watchpoint was not touched, false if already consumed. */ +static inline bool consume_watchpoint(atomic_long_t *watchpoint) { - return atomic_long_xchg_relaxed(watchpoint, INVALID_WATCHPOINT) != CONSUMED_WATCHPOINT; + return atomic_long_xchg_relaxed(watchpoint, CONSUMED_WATCHPOINT) != CONSUMED_WATCHPOINT; +} + +/* Remove the watchpoint -- its slot may be reused after. */ +static inline void remove_watchpoint(atomic_long_t *watchpoint) +{ + atomic_long_set(watchpoint, INVALID_WATCHPOINT); } static __always_inline struct kcsan_ctx *get_ctx(void) @@ -322,7 +325,8 @@ static noinline void kcsan_found_watchpoint(const volatile void *ptr, if (consumed) { kcsan_report(ptr, size, type, KCSAN_VALUE_CHANGE_MAYBE, - KCSAN_REPORT_CONSUMED_WATCHPOINT); + KCSAN_REPORT_CONSUMED_WATCHPOINT, + watchpoint - watchpoints); } else { /* * The other thread may not print any diagnostics, as it has @@ -470,7 +474,7 @@ kcsan_setup_watchpoint(const volatile void *ptr, size_t size, int type) value_change = KCSAN_VALUE_CHANGE_TRUE; /* Check if this access raced with another. */ - if (!remove_watchpoint(watchpoint)) { + if (!consume_watchpoint(watchpoint)) { /* * Depending on the access type, map a value_change of MAYBE to * TRUE (always report) or FALSE (never report). @@ -500,7 +504,8 @@ kcsan_setup_watchpoint(const volatile void *ptr, size_t size, int type) if (is_assert && value_change == KCSAN_VALUE_CHANGE_TRUE) kcsan_counter_inc(KCSAN_COUNTER_ASSERT_FAILURES); - kcsan_report(ptr, size, type, value_change, KCSAN_REPORT_RACE_SIGNAL); + kcsan_report(ptr, size, type, value_change, KCSAN_REPORT_RACE_SIGNAL, + watchpoint - watchpoints); } else if (value_change == KCSAN_VALUE_CHANGE_TRUE) { /* Inferring a race, since the value should not have changed. */ @@ -510,9 +515,15 @@ kcsan_setup_watchpoint(const volatile void *ptr, size_t size, int type) if (IS_ENABLED(CONFIG_KCSAN_REPORT_RACE_UNKNOWN_ORIGIN) || is_assert) kcsan_report(ptr, size, type, KCSAN_VALUE_CHANGE_TRUE, - KCSAN_REPORT_RACE_UNKNOWN_ORIGIN); + KCSAN_REPORT_RACE_UNKNOWN_ORIGIN, + watchpoint - watchpoints); } + /* + * Remove watchpoint; must be after reporting, since the slot may be + * reused after this point. + */ + remove_watchpoint(watchpoint); kcsan_counter_dec(KCSAN_COUNTER_USED_WATCHPOINTS); out_unlock: if (!kcsan_interrupt_watcher) diff --git a/kernel/kcsan/kcsan.h b/kernel/kcsan/kcsan.h index 6630dfe32f31f..763d6d08d94bb 100644 --- a/kernel/kcsan/kcsan.h +++ b/kernel/kcsan/kcsan.h @@ -12,6 +12,7 @@ /* The number of adjacent watchpoints to check. */ #define KCSAN_CHECK_ADJACENT 1 +#define NUM_SLOTS (1 + 2*KCSAN_CHECK_ADJACENT) extern unsigned int kcsan_udelay_task; extern unsigned int kcsan_udelay_interrupt; @@ -136,6 +137,6 @@ enum kcsan_report_type { */ extern void kcsan_report(const volatile void *ptr, size_t size, int access_type, enum kcsan_value_change value_change, - enum kcsan_report_type type); + enum kcsan_report_type type, int watchpoint_idx); #endif /* _KERNEL_KCSAN_KCSAN_H */ diff --git a/kernel/kcsan/report.c b/kernel/kcsan/report.c index de234d1c1b3d9..ae0a383238eae 100644 --- a/kernel/kcsan/report.c +++ b/kernel/kcsan/report.c @@ -30,9 +30,7 @@ struct access_info { /* * Other thread info: communicated from other racing thread to thread that set - * up the watchpoint, which then prints the complete report atomically. Only - * need one struct, as all threads should to be serialized regardless to print - * the reports, with reporting being in the slow-path. + * up the watchpoint, which then prints the complete report atomically. */ struct other_info { struct access_info ai; @@ -59,7 +57,11 @@ struct other_info { struct task_struct *task; }; -static struct other_info other_infos[1]; +/* + * To never block any producers of struct other_info, we need as many elements + * as we have watchpoints (upper bound on concurrent races to report). + */ +static struct other_info other_infos[CONFIG_KCSAN_NUM_WATCHPOINTS + NUM_SLOTS-1]; /* * Information about reported races; used to rate limit reporting. @@ -96,10 +98,11 @@ struct report_time { static struct report_time report_times[REPORT_TIMES_SIZE]; /* - * This spinlock protects reporting and other_info, since other_info is usually - * required when reporting. + * Spinlock serializing report generation, and access to @other_infos. Although + * it could make sense to have a finer-grained locking story for @other_infos, + * report generation needs to be serialized either way, so not much is gained. */ -static DEFINE_SPINLOCK(report_lock); +static DEFINE_RAW_SPINLOCK(report_lock); /* * Checks if the race identified by thread frames frame1 and frame2 has @@ -395,9 +398,13 @@ static bool print_report(enum kcsan_value_change value_change, static void release_report(unsigned long *flags, struct other_info *other_info) { if (other_info) - other_info->ai.ptr = NULL; /* Mark for reuse. */ + /* + * Use size to denote valid/invalid, since KCSAN entirely + * ignores 0-sized accesses. + */ + other_info->ai.size = 0; - spin_unlock_irqrestore(&report_lock, *flags); + raw_spin_unlock_irqrestore(&report_lock, *flags); } /* @@ -435,14 +442,14 @@ static void set_other_info_task_blocking(unsigned long *flags, */ set_current_state(TASK_UNINTERRUPTIBLE); } - spin_unlock_irqrestore(&report_lock, *flags); + raw_spin_unlock_irqrestore(&report_lock, *flags); /* * We cannot call schedule() since we also cannot reliably * determine if sleeping here is permitted -- see in_atomic(). */ udelay(1); - spin_lock_irqsave(&report_lock, *flags); + raw_spin_lock_irqsave(&report_lock, *flags); if (timeout-- < 0) { /* * Abort. Reset @other_info->task to NULL, since it @@ -454,128 +461,107 @@ static void set_other_info_task_blocking(unsigned long *flags, break; } /* - * If @ptr nor @current matches, then our information has been - * consumed and we may continue. If not, retry. + * If invalid, or @ptr nor @current matches, then @other_info + * has been consumed and we may continue. If not, retry. */ - } while (other_info->ai.ptr == ai->ptr && other_info->task == current); + } while (other_info->ai.size && other_info->ai.ptr == ai->ptr && + other_info->task == current); if (is_running) set_current_state(TASK_RUNNING); } -/* - * Depending on the report type either sets other_info and returns false, or - * acquires the matching other_info and returns true. If other_info is not - * required for the report type, simply acquires report_lock and returns true. - */ -static bool prepare_report(unsigned long *flags, enum kcsan_report_type type, - const struct access_info *ai, struct other_info *other_info) +/* Populate @other_info; requires that the provided @other_info not in use. */ +static void prepare_report_producer(unsigned long *flags, + const struct access_info *ai, + struct other_info *other_info) { - if (type != KCSAN_REPORT_CONSUMED_WATCHPOINT && - type != KCSAN_REPORT_RACE_SIGNAL) { - /* other_info not required; just acquire report_lock */ - spin_lock_irqsave(&report_lock, *flags); - return true; - } + raw_spin_lock_irqsave(&report_lock, *flags); -retry: - spin_lock_irqsave(&report_lock, *flags); + /* + * The same @other_infos entry cannot be used concurrently, because + * there is a one-to-one mapping to watchpoint slots (@watchpoints in + * core.c), and a watchpoint is only released for reuse after reporting + * is done by the consumer of @other_info. Therefore, it is impossible + * for another concurrent prepare_report_producer() to set the same + * @other_info, and are guaranteed exclusivity for the @other_infos + * entry pointed to by @other_info. + * + * To check this property holds, size should never be non-zero here, + * because every consumer of struct other_info resets size to 0 in + * release_report(). + */ + WARN_ON(other_info->ai.size); - switch (type) { - case KCSAN_REPORT_CONSUMED_WATCHPOINT: - if (other_info->ai.ptr) - break; /* still in use, retry */ + other_info->ai = *ai; + other_info->num_stack_entries = stack_trace_save(other_info->stack_entries, NUM_STACK_ENTRIES, 2); - other_info->ai = *ai; - other_info->num_stack_entries = stack_trace_save(other_info->stack_entries, NUM_STACK_ENTRIES, 1); + if (IS_ENABLED(CONFIG_KCSAN_VERBOSE)) + set_other_info_task_blocking(flags, ai, other_info); - if (IS_ENABLED(CONFIG_KCSAN_VERBOSE)) - set_other_info_task_blocking(flags, ai, other_info); + raw_spin_unlock_irqrestore(&report_lock, *flags); +} - spin_unlock_irqrestore(&report_lock, *flags); +/* Awaits producer to fill @other_info and then returns. */ +static bool prepare_report_consumer(unsigned long *flags, + const struct access_info *ai, + struct other_info *other_info) +{ - /* - * The other thread will print the summary; other_info may now - * be consumed. - */ - return false; + raw_spin_lock_irqsave(&report_lock, *flags); + while (!other_info->ai.size) { /* Await valid @other_info. */ + raw_spin_unlock_irqrestore(&report_lock, *flags); + cpu_relax(); + raw_spin_lock_irqsave(&report_lock, *flags); + } - case KCSAN_REPORT_RACE_SIGNAL: - if (!other_info->ai.ptr) - break; /* no data available yet, retry */ + /* Should always have a matching access based on watchpoint encoding. */ + if (WARN_ON(!matching_access((unsigned long)other_info->ai.ptr & WATCHPOINT_ADDR_MASK, other_info->ai.size, + (unsigned long)ai->ptr & WATCHPOINT_ADDR_MASK, ai->size))) + goto discard; + if (!matching_access((unsigned long)other_info->ai.ptr, other_info->ai.size, + (unsigned long)ai->ptr, ai->size)) { /* - * First check if this is the other_info we are expecting, i.e. - * matches based on how watchpoint was encoded. + * If the actual accesses to not match, this was a false + * positive due to watchpoint encoding. */ - if (!matching_access((unsigned long)other_info->ai.ptr & WATCHPOINT_ADDR_MASK, other_info->ai.size, - (unsigned long)ai->ptr & WATCHPOINT_ADDR_MASK, ai->size)) - break; /* mismatching watchpoint, retry */ - - if (!matching_access((unsigned long)other_info->ai.ptr, other_info->ai.size, - (unsigned long)ai->ptr, ai->size)) { - /* - * If the actual accesses to not match, this was a false - * positive due to watchpoint encoding. - */ - kcsan_counter_inc(KCSAN_COUNTER_ENCODING_FALSE_POSITIVES); - - /* discard this other_info */ - release_report(flags, other_info); - return false; - } + kcsan_counter_inc(KCSAN_COUNTER_ENCODING_FALSE_POSITIVES); + goto discard; + } - if (!((ai->access_type | other_info->ai.access_type) & KCSAN_ACCESS_WRITE)) { - /* - * While the address matches, this is not the other_info - * from the thread that consumed our watchpoint, since - * neither this nor the access in other_info is a write. - * It is invalid to continue with the report, since we - * only have information about reads. - * - * This can happen due to concurrent races on the same - * address, with at least 4 threads. To avoid locking up - * other_info and all other threads, we have to consume - * it regardless. - * - * A concrete case to illustrate why we might lock up if - * we do not consume other_info: - * - * We have 4 threads, all accessing the same address - * (or matching address ranges). Assume the following - * watcher and watchpoint consumer pairs: - * write1-read1, read2-write2. The first to populate - * other_info is write2, however, write1 consumes it, - * resulting in a report of write1-write2. This report - * is valid, however, now read1 populates other_info; - * read2-read1 is an invalid conflict, yet, no other - * conflicting access is left. Therefore, we must - * consume read1's other_info. - * - * Since this case is assumed to be rare, it is - * reasonable to omit this report: one of the other - * reports includes information about the same shared - * data, and at this point the likelihood that we - * re-report the same race again is high. - */ - release_report(flags, other_info); - return false; - } + return true; - /* Matching access in other_info. */ - return true; +discard: + release_report(flags, other_info); + return false; +} +/* + * Depending on the report type either sets @other_info and returns false, or + * awaits @other_info and returns true. If @other_info is not required for the + * report type, simply acquires @report_lock and returns true. + */ +static noinline bool prepare_report(unsigned long *flags, + enum kcsan_report_type type, + const struct access_info *ai, + struct other_info *other_info) +{ + switch (type) { + case KCSAN_REPORT_CONSUMED_WATCHPOINT: + prepare_report_producer(flags, ai, other_info); + return false; + case KCSAN_REPORT_RACE_SIGNAL: + return prepare_report_consumer(flags, ai, other_info); default: - BUG(); + /* @other_info not required; just acquire @report_lock. */ + raw_spin_lock_irqsave(&report_lock, *flags); + return true; } - - spin_unlock_irqrestore(&report_lock, *flags); - - goto retry; } void kcsan_report(const volatile void *ptr, size_t size, int access_type, enum kcsan_value_change value_change, - enum kcsan_report_type type) + enum kcsan_report_type type, int watchpoint_idx) { unsigned long flags = 0; const struct access_info ai = { @@ -586,7 +572,11 @@ void kcsan_report(const volatile void *ptr, size_t size, int access_type, .cpu_id = raw_smp_processor_id() }; struct other_info *other_info = type == KCSAN_REPORT_RACE_UNKNOWN_ORIGIN - ? NULL : &other_infos[0]; + ? NULL : &other_infos[watchpoint_idx]; + + kcsan_disable_current(); + if (WARN_ON(watchpoint_idx < 0 || watchpoint_idx >= ARRAY_SIZE(other_infos))) + goto out; /* * With TRACE_IRQFLAGS, lockdep's IRQ trace state becomes corrupted if @@ -596,7 +586,6 @@ void kcsan_report(const volatile void *ptr, size_t size, int access_type, */ lockdep_off(); - kcsan_disable_current(); if (prepare_report(&flags, type, &ai, other_info)) { /* * Never report if value_change is FALSE, only if we it is @@ -611,7 +600,8 @@ void kcsan_report(const volatile void *ptr, size_t size, int access_type, release_report(&flags, other_info); } - kcsan_enable_current(); lockdep_on(); +out: + kcsan_enable_current(); } -- GitLab From 757a4cefde76697af2b2c284c8a320912b77e7e6 Mon Sep 17 00:00:00 2001 From: Marco Elver Date: Wed, 25 Mar 2020 17:41:56 +0100 Subject: [PATCH 0059/1971] kcsan: Add support for scoped accesses This adds support for scoped accesses, where the memory range is checked for the duration of the scope. The feature is implemented by inserting the relevant access information into a list of scoped accesses for the current execution context, which are then checked (until removed) on every call (through instrumentation) into the KCSAN runtime. An alternative, more complex, implementation could set up a watchpoint for the scoped access, and keep the watchpoint set up. This, however, would require first exposing a handle to the watchpoint, as well as dealing with cases such as accesses by the same thread while the watchpoint is still set up (and several more cases). It is also doubtful if this would provide any benefit, since the majority of delay where the watchpoint is set up is likely due to the injected delays by KCSAN. Therefore, the implementation in this patch is simpler and avoids hurting KCSAN's main use-case (normal data race detection); it also implicitly increases scoped-access race-detection-ability due to increased probability of setting up watchpoints by repeatedly calling __kcsan_check_access() throughout the scope of the access. The implementation required adding an additional conditional branch to the fast-path. However, the microbenchmark showed a *speedup* of ~5% on the fast-path. This appears to be due to subtly improved codegen by GCC from moving get_ctx() and associated load of preempt_count earlier. Suggested-by: Boqun Feng Suggested-by: Paul E. McKenney Signed-off-by: Marco Elver Signed-off-by: Paul E. McKenney --- include/linux/kcsan-checks.h | 57 +++++++++++++++++++++++++ include/linux/kcsan.h | 3 ++ init/init_task.c | 1 + kernel/kcsan/core.c | 83 ++++++++++++++++++++++++++++++++---- kernel/kcsan/report.c | 33 +++++++++----- 5 files changed, 158 insertions(+), 19 deletions(-) diff --git a/include/linux/kcsan-checks.h b/include/linux/kcsan-checks.h index 3cd8bb03eb417..b24253d3a4427 100644 --- a/include/linux/kcsan-checks.h +++ b/include/linux/kcsan-checks.h @@ -3,6 +3,8 @@ #ifndef _LINUX_KCSAN_CHECKS_H #define _LINUX_KCSAN_CHECKS_H +/* Note: Only include what is already included by compiler.h. */ +#include #include /* @@ -12,10 +14,12 @@ * WRITE : write access; * ATOMIC: access is atomic; * ASSERT: access is not a regular access, but an assertion; + * SCOPED: access is a scoped access; */ #define KCSAN_ACCESS_WRITE 0x1 #define KCSAN_ACCESS_ATOMIC 0x2 #define KCSAN_ACCESS_ASSERT 0x4 +#define KCSAN_ACCESS_SCOPED 0x8 /* * __kcsan_*: Always calls into the runtime when KCSAN is enabled. This may be used @@ -78,6 +82,52 @@ void kcsan_atomic_next(int n); */ void kcsan_set_access_mask(unsigned long mask); +/* Scoped access information. */ +struct kcsan_scoped_access { + struct list_head list; + const volatile void *ptr; + size_t size; + int type; +}; +/* + * Automatically call kcsan_end_scoped_access() when kcsan_scoped_access goes + * out of scope; relies on attribute "cleanup", which is supported by all + * compilers that support KCSAN. + */ +#define __kcsan_cleanup_scoped \ + __maybe_unused __attribute__((__cleanup__(kcsan_end_scoped_access))) + +/** + * kcsan_begin_scoped_access - begin scoped access + * + * Begin scoped access and initialize @sa, which will cause KCSAN to + * continuously check the memory range in the current thread until + * kcsan_end_scoped_access() is called for @sa. + * + * Scoped accesses are implemented by appending @sa to an internal list for the + * current execution context, and then checked on every call into the KCSAN + * runtime. + * + * @ptr: address of access + * @size: size of access + * @type: access type modifier + * @sa: struct kcsan_scoped_access to use for the scope of the access + */ +struct kcsan_scoped_access * +kcsan_begin_scoped_access(const volatile void *ptr, size_t size, int type, + struct kcsan_scoped_access *sa); + +/** + * kcsan_end_scoped_access - end scoped access + * + * End a scoped access, which will stop KCSAN checking the memory range. + * Requires that kcsan_begin_scoped_access() was previously called once for @sa. + * + * @sa: a previously initialized struct kcsan_scoped_access + */ +void kcsan_end_scoped_access(struct kcsan_scoped_access *sa); + + #else /* CONFIG_KCSAN */ static inline void __kcsan_check_access(const volatile void *ptr, size_t size, @@ -90,6 +140,13 @@ static inline void kcsan_flat_atomic_end(void) { } static inline void kcsan_atomic_next(int n) { } static inline void kcsan_set_access_mask(unsigned long mask) { } +struct kcsan_scoped_access { }; +#define __kcsan_cleanup_scoped __maybe_unused +static inline struct kcsan_scoped_access * +kcsan_begin_scoped_access(const volatile void *ptr, size_t size, int type, + struct kcsan_scoped_access *sa) { return sa; } +static inline void kcsan_end_scoped_access(struct kcsan_scoped_access *sa) { } + #endif /* CONFIG_KCSAN */ /* diff --git a/include/linux/kcsan.h b/include/linux/kcsan.h index 3b84606e1e675..17ae59e4b685c 100644 --- a/include/linux/kcsan.h +++ b/include/linux/kcsan.h @@ -40,6 +40,9 @@ struct kcsan_ctx { * Access mask for all accesses if non-zero. */ unsigned long access_mask; + + /* List of scoped accesses. */ + struct list_head scoped_accesses; }; /** diff --git a/init/init_task.c b/init/init_task.c index 096191d177d5c..198943851cafa 100644 --- a/init/init_task.c +++ b/init/init_task.c @@ -168,6 +168,7 @@ struct task_struct init_task .atomic_nest_count = 0, .in_flat_atomic = false, .access_mask = 0, + .scoped_accesses = {LIST_POISON1, NULL}, }, #endif #ifdef CONFIG_TRACE_IRQFLAGS diff --git a/kernel/kcsan/core.c b/kernel/kcsan/core.c index 4d8ea0fca5f11..a572aae61b98d 100644 --- a/kernel/kcsan/core.c +++ b/kernel/kcsan/core.c @@ -6,6 +6,7 @@ #include #include #include +#include #include #include #include @@ -42,6 +43,7 @@ static DEFINE_PER_CPU(struct kcsan_ctx, kcsan_cpu_ctx) = { .atomic_nest_count = 0, .in_flat_atomic = false, .access_mask = 0, + .scoped_accesses = {LIST_POISON1, NULL}, }; /* @@ -191,12 +193,23 @@ static __always_inline struct kcsan_ctx *get_ctx(void) return in_task() ? ¤t->kcsan_ctx : raw_cpu_ptr(&kcsan_cpu_ctx); } +/* Check scoped accesses; never inline because this is a slow-path! */ +static noinline void kcsan_check_scoped_accesses(void) +{ + struct kcsan_ctx *ctx = get_ctx(); + struct list_head *prev_save = ctx->scoped_accesses.prev; + struct kcsan_scoped_access *scoped_access; + + ctx->scoped_accesses.prev = NULL; /* Avoid recursion. */ + list_for_each_entry(scoped_access, &ctx->scoped_accesses, list) + __kcsan_check_access(scoped_access->ptr, scoped_access->size, scoped_access->type); + ctx->scoped_accesses.prev = prev_save; +} + /* Rules for generic atomic accesses. Called from fast-path. */ static __always_inline bool -is_atomic(const volatile void *ptr, size_t size, int type) +is_atomic(const volatile void *ptr, size_t size, int type, struct kcsan_ctx *ctx) { - struct kcsan_ctx *ctx; - if (type & KCSAN_ACCESS_ATOMIC) return true; @@ -213,7 +226,6 @@ is_atomic(const volatile void *ptr, size_t size, int type) IS_ALIGNED((unsigned long)ptr, size)) return true; /* Assume aligned writes up to word size are atomic. */ - ctx = get_ctx(); if (ctx->atomic_next > 0) { /* * Because we do not have separate contexts for nested @@ -233,7 +245,7 @@ is_atomic(const volatile void *ptr, size_t size, int type) } static __always_inline bool -should_watch(const volatile void *ptr, size_t size, int type) +should_watch(const volatile void *ptr, size_t size, int type, struct kcsan_ctx *ctx) { /* * Never set up watchpoints when memory operations are atomic. @@ -242,7 +254,7 @@ should_watch(const volatile void *ptr, size_t size, int type) * should not count towards skipped instructions, and (2) to actually * decrement kcsan_atomic_next for consecutive instruction stream. */ - if (is_atomic(ptr, size, type)) + if (is_atomic(ptr, size, type, ctx)) return false; if (this_cpu_dec_return(kcsan_skip) >= 0) @@ -563,8 +575,14 @@ static __always_inline void check_access(const volatile void *ptr, size_t size, if (unlikely(watchpoint != NULL)) kcsan_found_watchpoint(ptr, size, type, watchpoint, encoded_watchpoint); - else if (unlikely(should_watch(ptr, size, type))) - kcsan_setup_watchpoint(ptr, size, type); + else { + struct kcsan_ctx *ctx = get_ctx(); /* Call only once in fast-path. */ + + if (unlikely(should_watch(ptr, size, type, ctx))) + kcsan_setup_watchpoint(ptr, size, type); + else if (unlikely(ctx->scoped_accesses.prev)) + kcsan_check_scoped_accesses(); + } } /* === Public interface ===================================================== */ @@ -660,6 +678,55 @@ void kcsan_set_access_mask(unsigned long mask) } EXPORT_SYMBOL(kcsan_set_access_mask); +struct kcsan_scoped_access * +kcsan_begin_scoped_access(const volatile void *ptr, size_t size, int type, + struct kcsan_scoped_access *sa) +{ + struct kcsan_ctx *ctx = get_ctx(); + + __kcsan_check_access(ptr, size, type); + + ctx->disable_count++; /* Disable KCSAN, in case list debugging is on. */ + + INIT_LIST_HEAD(&sa->list); + sa->ptr = ptr; + sa->size = size; + sa->type = type; + + if (!ctx->scoped_accesses.prev) /* Lazy initialize list head. */ + INIT_LIST_HEAD(&ctx->scoped_accesses); + list_add(&sa->list, &ctx->scoped_accesses); + + ctx->disable_count--; + return sa; +} +EXPORT_SYMBOL(kcsan_begin_scoped_access); + +void kcsan_end_scoped_access(struct kcsan_scoped_access *sa) +{ + struct kcsan_ctx *ctx = get_ctx(); + + if (WARN(!ctx->scoped_accesses.prev, "Unbalanced %s()?", __func__)) + return; + + ctx->disable_count++; /* Disable KCSAN, in case list debugging is on. */ + + list_del(&sa->list); + if (list_empty(&ctx->scoped_accesses)) + /* + * Ensure we do not enter kcsan_check_scoped_accesses() + * slow-path if unnecessary, and avoids requiring list_empty() + * in the fast-path (to avoid a READ_ONCE() and potential + * uaccess warning). + */ + ctx->scoped_accesses.prev = NULL; + + ctx->disable_count--; + + __kcsan_check_access(sa->ptr, sa->size, sa->type); +} +EXPORT_SYMBOL(kcsan_end_scoped_access); + void __kcsan_check_access(const volatile void *ptr, size_t size, int type) { check_access(ptr, size, type); diff --git a/kernel/kcsan/report.c b/kernel/kcsan/report.c index ae0a383238eae..ddc18f1224a40 100644 --- a/kernel/kcsan/report.c +++ b/kernel/kcsan/report.c @@ -205,6 +205,20 @@ skip_report(enum kcsan_value_change value_change, unsigned long top_frame) static const char *get_access_type(int type) { + if (type & KCSAN_ACCESS_ASSERT) { + if (type & KCSAN_ACCESS_SCOPED) { + if (type & KCSAN_ACCESS_WRITE) + return "assert no accesses (scoped)"; + else + return "assert no writes (scoped)"; + } else { + if (type & KCSAN_ACCESS_WRITE) + return "assert no accesses"; + else + return "assert no writes"; + } + } + switch (type) { case 0: return "read"; @@ -214,17 +228,14 @@ static const char *get_access_type(int type) return "write"; case KCSAN_ACCESS_WRITE | KCSAN_ACCESS_ATOMIC: return "write (marked)"; - - /* - * ASSERT variants: - */ - case KCSAN_ACCESS_ASSERT: - case KCSAN_ACCESS_ASSERT | KCSAN_ACCESS_ATOMIC: - return "assert no writes"; - case KCSAN_ACCESS_ASSERT | KCSAN_ACCESS_WRITE: - case KCSAN_ACCESS_ASSERT | KCSAN_ACCESS_WRITE | KCSAN_ACCESS_ATOMIC: - return "assert no accesses"; - + case KCSAN_ACCESS_SCOPED: + return "read (scoped)"; + case KCSAN_ACCESS_SCOPED | KCSAN_ACCESS_ATOMIC: + return "read (marked, scoped)"; + case KCSAN_ACCESS_SCOPED | KCSAN_ACCESS_WRITE: + return "write (scoped)"; + case KCSAN_ACCESS_SCOPED | KCSAN_ACCESS_WRITE | KCSAN_ACCESS_ATOMIC: + return "write (marked, scoped)"; default: BUG(); } -- GitLab From 9967683ce5d4ce21829bbc807e006ee33cc68725 Mon Sep 17 00:00:00 2001 From: Marco Elver Date: Wed, 25 Mar 2020 17:41:57 +0100 Subject: [PATCH 0060/1971] objtool, kcsan: Add explicit check functions to uaccess whitelist Add explicitly invoked KCSAN check functions to objtool's uaccess whitelist. This is needed in order to permit calling into kcsan_check_scoped_accesses() from the fast-path, which in turn calls __kcsan_check_access(). __kcsan_check_access() is the generic variant of the already whitelisted specializations __tsan_{read,write}N. Signed-off-by: Marco Elver Signed-off-by: Paul E. McKenney --- tools/objtool/check.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tools/objtool/check.c b/tools/objtool/check.c index b6da413bcbd61..b6a573d56f2ef 100644 --- a/tools/objtool/check.c +++ b/tools/objtool/check.c @@ -468,8 +468,10 @@ static const char *uaccess_safe_builtin[] = { "__asan_report_store8_noabort", "__asan_report_store16_noabort", /* KCSAN */ + "__kcsan_check_access", "kcsan_found_watchpoint", "kcsan_setup_watchpoint", + "kcsan_check_scoped_accesses", /* KCSAN/TSAN */ "__tsan_func_entry", "__tsan_func_exit", -- GitLab From d8949ef1d9f1062848cd068cf369a57ce33dae6f Mon Sep 17 00:00:00 2001 From: Marco Elver Date: Wed, 25 Mar 2020 17:41:58 +0100 Subject: [PATCH 0061/1971] kcsan: Introduce scoped ASSERT_EXCLUSIVE macros Introduce ASSERT_EXCLUSIVE_*_SCOPED(), which provide an intuitive interface to use the scoped-access feature, without having to explicitly mark the start and end of the desired scope. Basing duration of the checks on scope avoids accidental misuse and resulting false positives, which may be hard to debug. See added comments for usage. The macros are implemented using __attribute__((__cleanup__(func))), which is supported by all compilers that currently support KCSAN. Suggested-by: Boqun Feng Suggested-by: Paul E. McKenney Signed-off-by: Marco Elver Signed-off-by: Paul E. McKenney --- Documentation/dev-tools/kcsan.rst | 3 +- include/linux/kcsan-checks.h | 73 ++++++++++++++++++++++++++++++- kernel/kcsan/debugfs.c | 16 ++++++- 3 files changed, 89 insertions(+), 3 deletions(-) diff --git a/Documentation/dev-tools/kcsan.rst b/Documentation/dev-tools/kcsan.rst index 52a5d6fb9701f..f4b5766f12cc8 100644 --- a/Documentation/dev-tools/kcsan.rst +++ b/Documentation/dev-tools/kcsan.rst @@ -238,7 +238,8 @@ are defined at the C-language level. The following macros can be used to check properties of concurrent code where bugs would not manifest as data races. .. kernel-doc:: include/linux/kcsan-checks.h - :functions: ASSERT_EXCLUSIVE_WRITER ASSERT_EXCLUSIVE_ACCESS + :functions: ASSERT_EXCLUSIVE_WRITER ASSERT_EXCLUSIVE_WRITER_SCOPED + ASSERT_EXCLUSIVE_ACCESS ASSERT_EXCLUSIVE_ACCESS_SCOPED ASSERT_EXCLUSIVE_BITS Implementation Details diff --git a/include/linux/kcsan-checks.h b/include/linux/kcsan-checks.h index b24253d3a4427..101df7f46d892 100644 --- a/include/linux/kcsan-checks.h +++ b/include/linux/kcsan-checks.h @@ -234,11 +234,63 @@ static inline void kcsan_check_access(const volatile void *ptr, size_t size, * ... = READ_ONCE(shared_foo); * } * + * Note: ASSERT_EXCLUSIVE_WRITER_SCOPED(), if applicable, performs more thorough + * checking if a clear scope where no concurrent writes are expected exists. + * * @var: variable to assert on */ #define ASSERT_EXCLUSIVE_WRITER(var) \ __kcsan_check_access(&(var), sizeof(var), KCSAN_ACCESS_ASSERT) +/* + * Helper macros for implementation of for ASSERT_EXCLUSIVE_*_SCOPED(). @id is + * expected to be unique for the scope in which instances of kcsan_scoped_access + * are declared. + */ +#define __kcsan_scoped_name(c, suffix) __kcsan_scoped_##c##suffix +#define __ASSERT_EXCLUSIVE_SCOPED(var, type, id) \ + struct kcsan_scoped_access __kcsan_scoped_name(id, _) \ + __kcsan_cleanup_scoped; \ + struct kcsan_scoped_access *__kcsan_scoped_name(id, _dummy_p) \ + __maybe_unused = kcsan_begin_scoped_access( \ + &(var), sizeof(var), KCSAN_ACCESS_SCOPED | (type), \ + &__kcsan_scoped_name(id, _)) + +/** + * ASSERT_EXCLUSIVE_WRITER_SCOPED - assert no concurrent writes to @var in scope + * + * Scoped variant of ASSERT_EXCLUSIVE_WRITER(). + * + * Assert that there are no concurrent writes to @var for the duration of the + * scope in which it is introduced. This provides a better way to fully cover + * the enclosing scope, compared to multiple ASSERT_EXCLUSIVE_WRITER(), and + * increases the likelihood for KCSAN to detect racing accesses. + * + * For example, it allows finding race-condition bugs that only occur due to + * state changes within the scope itself: + * + * .. code-block:: c + * + * void writer(void) { + * spin_lock(&update_foo_lock); + * { + * ASSERT_EXCLUSIVE_WRITER_SCOPED(shared_foo); + * WRITE_ONCE(shared_foo, 42); + * ... + * // shared_foo should still be 42 here! + * } + * spin_unlock(&update_foo_lock); + * } + * void buggy(void) { + * if (READ_ONCE(shared_foo) == 42) + * WRITE_ONCE(shared_foo, 1); // bug! + * } + * + * @var: variable to assert on + */ +#define ASSERT_EXCLUSIVE_WRITER_SCOPED(var) \ + __ASSERT_EXCLUSIVE_SCOPED(var, KCSAN_ACCESS_ASSERT, __COUNTER__) + /** * ASSERT_EXCLUSIVE_ACCESS - assert no concurrent accesses to @var * @@ -258,6 +310,9 @@ static inline void kcsan_check_access(const volatile void *ptr, size_t size, * release_for_reuse(obj); * } * + * Note: ASSERT_EXCLUSIVE_ACCESS_SCOPED(), if applicable, performs more thorough + * checking if a clear scope where no concurrent accesses are expected exists. + * * Note: For cases where the object is freed, `KASAN `_ is a better * fit to detect use-after-free bugs. * @@ -266,10 +321,26 @@ static inline void kcsan_check_access(const volatile void *ptr, size_t size, #define ASSERT_EXCLUSIVE_ACCESS(var) \ __kcsan_check_access(&(var), sizeof(var), KCSAN_ACCESS_WRITE | KCSAN_ACCESS_ASSERT) +/** + * ASSERT_EXCLUSIVE_ACCESS_SCOPED - assert no concurrent accesses to @var in scope + * + * Scoped variant of ASSERT_EXCLUSIVE_ACCESS(). + * + * Assert that there are no concurrent accesses to @var (no readers nor writers) + * for the entire duration of the scope in which it is introduced. This provides + * a better way to fully cover the enclosing scope, compared to multiple + * ASSERT_EXCLUSIVE_ACCESS(), and increases the likelihood for KCSAN to detect + * racing accesses. + * + * @var: variable to assert on + */ +#define ASSERT_EXCLUSIVE_ACCESS_SCOPED(var) \ + __ASSERT_EXCLUSIVE_SCOPED(var, KCSAN_ACCESS_WRITE | KCSAN_ACCESS_ASSERT, __COUNTER__) + /** * ASSERT_EXCLUSIVE_BITS - assert no concurrent writes to subset of bits in @var * - * Bit-granular variant of ASSERT_EXCLUSIVE_WRITER(var). + * Bit-granular variant of ASSERT_EXCLUSIVE_WRITER(). * * Assert that there are no concurrent writes to a subset of bits in @var; * concurrent readers are permitted. This assertion captures more detailed diff --git a/kernel/kcsan/debugfs.c b/kernel/kcsan/debugfs.c index 72ee188ebc54a..1a08664a7fabb 100644 --- a/kernel/kcsan/debugfs.c +++ b/kernel/kcsan/debugfs.c @@ -110,6 +110,7 @@ static noinline void microbenchmark(unsigned long iters) */ static long test_dummy; static long test_flags; +static long test_scoped; static noinline void test_thread(unsigned long iters) { const long CHANGE_BITS = 0xff00ff00ff00ff00L; @@ -120,7 +121,8 @@ static noinline void test_thread(unsigned long iters) memset(¤t->kcsan_ctx, 0, sizeof(current->kcsan_ctx)); pr_info("KCSAN: %s begin | iters: %lu\n", __func__, iters); - pr_info("test_dummy@%px, test_flags@%px\n", &test_dummy, &test_flags); + pr_info("test_dummy@%px, test_flags@%px, test_scoped@%px,\n", + &test_dummy, &test_flags, &test_scoped); cycles = get_cycles(); while (iters--) { @@ -141,6 +143,18 @@ static noinline void test_thread(unsigned long iters) test_flags ^= CHANGE_BITS; /* generate value-change */ __kcsan_check_write(&test_flags, sizeof(test_flags)); + + BUG_ON(current->kcsan_ctx.scoped_accesses.prev); + { + /* Should generate reports anywhere in this block. */ + ASSERT_EXCLUSIVE_WRITER_SCOPED(test_scoped); + ASSERT_EXCLUSIVE_ACCESS_SCOPED(test_scoped); + BUG_ON(!current->kcsan_ctx.scoped_accesses.prev); + /* Unrelated accesses. */ + __kcsan_check_access(&cycles, sizeof(cycles), 0); + __kcsan_check_access(&cycles, sizeof(cycles), KCSAN_ACCESS_ATOMIC); + } + BUG_ON(current->kcsan_ctx.scoped_accesses.prev); } cycles = get_cycles() - cycles; -- GitLab From 01b4ff58f72dbee926077d9afa0650f6e685e866 Mon Sep 17 00:00:00 2001 From: Marco Elver Date: Tue, 31 Mar 2020 21:32:32 +0200 Subject: [PATCH 0062/1971] kcsan: Move kcsan_{disable,enable}_current() to kcsan-checks.h Both affect access checks, and should therefore be in kcsan-checks.h. This is in preparation to use these in compiler.h. Acked-by: Will Deacon Signed-off-by: Marco Elver Signed-off-by: Paul E. McKenney --- include/linux/kcsan-checks.h | 16 ++++++++++++++++ include/linux/kcsan.h | 16 ---------------- 2 files changed, 16 insertions(+), 16 deletions(-) diff --git a/include/linux/kcsan-checks.h b/include/linux/kcsan-checks.h index 101df7f46d892..ef95ddc491825 100644 --- a/include/linux/kcsan-checks.h +++ b/include/linux/kcsan-checks.h @@ -36,6 +36,20 @@ */ void __kcsan_check_access(const volatile void *ptr, size_t size, int type); +/** + * kcsan_disable_current - disable KCSAN for the current context + * + * Supports nesting. + */ +void kcsan_disable_current(void); + +/** + * kcsan_enable_current - re-enable KCSAN for the current context + * + * Supports nesting. + */ +void kcsan_enable_current(void); + /** * kcsan_nestable_atomic_begin - begin nestable atomic region * @@ -133,6 +147,8 @@ void kcsan_end_scoped_access(struct kcsan_scoped_access *sa); static inline void __kcsan_check_access(const volatile void *ptr, size_t size, int type) { } +static inline void kcsan_disable_current(void) { } +static inline void kcsan_enable_current(void) { } static inline void kcsan_nestable_atomic_begin(void) { } static inline void kcsan_nestable_atomic_end(void) { } static inline void kcsan_flat_atomic_begin(void) { } diff --git a/include/linux/kcsan.h b/include/linux/kcsan.h index 17ae59e4b685c..53340d8789f97 100644 --- a/include/linux/kcsan.h +++ b/include/linux/kcsan.h @@ -50,25 +50,9 @@ struct kcsan_ctx { */ void kcsan_init(void); -/** - * kcsan_disable_current - disable KCSAN for the current context - * - * Supports nesting. - */ -void kcsan_disable_current(void); - -/** - * kcsan_enable_current - re-enable KCSAN for the current context - * - * Supports nesting. - */ -void kcsan_enable_current(void); - #else /* CONFIG_KCSAN */ static inline void kcsan_init(void) { } -static inline void kcsan_disable_current(void) { } -static inline void kcsan_enable_current(void) { } #endif /* CONFIG_KCSAN */ -- GitLab From d071e91361bbfef524ae8abf7e560fb294d0ad64 Mon Sep 17 00:00:00 2001 From: Marco Elver Date: Tue, 31 Mar 2020 21:32:33 +0200 Subject: [PATCH 0063/1971] kcsan: Change data_race() to no longer require marking racing accesses Thus far, accesses marked with data_race() would still require the racing access to be marked in some way (be it with READ_ONCE(), WRITE_ONCE(), or data_race() itself), as otherwise KCSAN would still report a data race. This requirement, however, seems to be unintuitive, and some valid use-cases demand *not* marking other accesses, as it might hide more serious bugs (e.g. diagnostic reads). Therefore, this commit changes data_race() to no longer require marking racing accesses (although it's still recommended if possible). The alternative would have been introducing another variant of data_race(), however, since usage of data_race() already needs to be carefully reasoned about, distinguishing between these cases likely adds more complexity in the wrong place. Link: https://lkml.kernel.org/r/20200331131002.GA30975@willie-the-truck Cc: Paul E. McKenney Cc: Will Deacon Cc: Qian Cai Acked-by: Will Deacon Signed-off-by: Marco Elver Signed-off-by: Paul E. McKenney --- include/linux/compiler.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/include/linux/compiler.h b/include/linux/compiler.h index f504edebd5d71..1729bd17e9b78 100644 --- a/include/linux/compiler.h +++ b/include/linux/compiler.h @@ -326,9 +326,9 @@ unsigned long read_word_at_a_time(const void *addr) #define data_race(expr) \ ({ \ typeof(({ expr; })) __val; \ - kcsan_nestable_atomic_begin(); \ + kcsan_disable_current(); \ __val = ({ expr; }); \ - kcsan_nestable_atomic_end(); \ + kcsan_enable_current(); \ __val; \ }) #else -- GitLab From f770ed10a9ee65529f3ec8d90eb374bbd8b7c238 Mon Sep 17 00:00:00 2001 From: Marco Elver Date: Fri, 10 Apr 2020 18:44:17 +0200 Subject: [PATCH 0064/1971] kcsan: Fix function matching in report Pass string length as returned by scnprintf() to strnstr(), since strnstr() searches exactly len bytes in haystack, even if it contains a NUL-terminator before haystack+len. Signed-off-by: Marco Elver Signed-off-by: Paul E. McKenney --- kernel/kcsan/report.c | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/kernel/kcsan/report.c b/kernel/kcsan/report.c index ddc18f1224a40..cf41d63dd0cdf 100644 --- a/kernel/kcsan/report.c +++ b/kernel/kcsan/report.c @@ -192,11 +192,11 @@ skip_report(enum kcsan_value_change value_change, unsigned long top_frame) * maintainers. */ char buf[64]; + int len = scnprintf(buf, sizeof(buf), "%ps", (void *)top_frame); - snprintf(buf, sizeof(buf), "%ps", (void *)top_frame); - if (!strnstr(buf, "rcu_", sizeof(buf)) && - !strnstr(buf, "_rcu", sizeof(buf)) && - !strnstr(buf, "_srcu", sizeof(buf))) + if (!strnstr(buf, "rcu_", len) && + !strnstr(buf, "_rcu", len) && + !strnstr(buf, "_srcu", len)) return true; } @@ -262,15 +262,15 @@ static const char *get_thread_desc(int task_id) static int get_stack_skipnr(const unsigned long stack_entries[], int num_entries) { char buf[64]; + int len; int skip = 0; for (; skip < num_entries; ++skip) { - snprintf(buf, sizeof(buf), "%ps", (void *)stack_entries[skip]); - if (!strnstr(buf, "csan_", sizeof(buf)) && - !strnstr(buf, "tsan_", sizeof(buf)) && - !strnstr(buf, "_once_size", sizeof(buf))) { + len = scnprintf(buf, sizeof(buf), "%ps", (void *)stack_entries[skip]); + if (!strnstr(buf, "csan_", len) && + !strnstr(buf, "tsan_", len) && + !strnstr(buf, "_once_size", len)) break; - } } return skip; } -- GitLab From cdb9b07d8c78be63d72aba9a2686ff161ddd2099 Mon Sep 17 00:00:00 2001 From: Marco Elver Date: Fri, 10 Apr 2020 18:44:18 +0200 Subject: [PATCH 0065/1971] kcsan: Make reporting aware of KCSAN tests Reporting hides KCSAN runtime functions in the stack trace, with filtering done based on function names. Currently this included all functions (or modules) that would match "kcsan_". Make the filter aware of KCSAN tests, which contain "kcsan_test", and are no longer skipped in the report. This is in preparation for adding a KCSAN test module. Signed-off-by: Marco Elver Signed-off-by: Paul E. McKenney --- kernel/kcsan/report.c | 30 +++++++++++++++++++++++------- 1 file changed, 23 insertions(+), 7 deletions(-) diff --git a/kernel/kcsan/report.c b/kernel/kcsan/report.c index cf41d63dd0cdf..ac5f8345bae90 100644 --- a/kernel/kcsan/report.c +++ b/kernel/kcsan/report.c @@ -262,16 +262,32 @@ static const char *get_thread_desc(int task_id) static int get_stack_skipnr(const unsigned long stack_entries[], int num_entries) { char buf[64]; - int len; - int skip = 0; + char *cur; + int len, skip; - for (; skip < num_entries; ++skip) { + for (skip = 0; skip < num_entries; ++skip) { len = scnprintf(buf, sizeof(buf), "%ps", (void *)stack_entries[skip]); - if (!strnstr(buf, "csan_", len) && - !strnstr(buf, "tsan_", len) && - !strnstr(buf, "_once_size", len)) - break; + + /* Never show tsan_* or {read,write}_once_size. */ + if (strnstr(buf, "tsan_", len) || + strnstr(buf, "_once_size", len)) + continue; + + cur = strnstr(buf, "kcsan_", len); + if (cur) { + cur += sizeof("kcsan_") - 1; + if (strncmp(cur, "test", sizeof("test") - 1)) + continue; /* KCSAN runtime function. */ + /* KCSAN related test. */ + } + + /* + * No match for runtime functions -- @skip entries to skip to + * get to first frame of interest. + */ + break; } + return skip; } -- GitLab From ee25d9742dabed3fd18158b518f846abeb70f319 Mon Sep 17 00:00:00 2001 From: Rikard Falkeborn Date: Tue, 17 Mar 2020 22:13:32 +0100 Subject: [PATCH 0066/1971] clk: sunxi: Fix incorrect usage of round_down() round_down() can only round to powers of 2. If round_down() is asked to round to something that is not a power of 2, incorrect results are produced. The incorrect results can be both too large and too small. Instead, use rounddown() which can round to any number. Fixes: 6a721db180a2 ("clk: sunxi: Add A31 clocks support") Signed-off-by: Rikard Falkeborn Signed-off-by: Maxime Ripard --- drivers/clk/sunxi/clk-sunxi.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/clk/sunxi/clk-sunxi.c b/drivers/clk/sunxi/clk-sunxi.c index 27201fd26e442..e1aa1fbac48a0 100644 --- a/drivers/clk/sunxi/clk-sunxi.c +++ b/drivers/clk/sunxi/clk-sunxi.c @@ -90,7 +90,7 @@ static void sun6i_a31_get_pll1_factors(struct factors_request *req) * Round down the frequency to the closest multiple of either * 6 or 16 */ - u32 round_freq_6 = round_down(freq_mhz, 6); + u32 round_freq_6 = rounddown(freq_mhz, 6); u32 round_freq_16 = round_down(freq_mhz, 16); if (round_freq_6 > round_freq_16) -- GitLab From 91a577e77fdf55bdc6cd863581658761908c1686 Mon Sep 17 00:00:00 2001 From: Yoshihiro Shimoda Date: Thu, 26 Mar 2020 14:30:08 +0900 Subject: [PATCH 0067/1971] dt-bindings: clock: renesas: rcar-usb2-clock-sel: Add r8a77961 support This patch adds support for r8a77961 (R-Car M3-W+). To avoid confusion between R-Car M3-W (R8A77960) and R-Car M3-W+ (R8A77961), this patch also updates the comment of "renesas,r8a7796-rcar-usb2-clock-sel". Signed-off-by: Yoshihiro Shimoda Link: https://lore.kernel.org/r/1585200608-30103-1-git-send-email-yoshihiro.shimoda.uh@renesas.com Signed-off-by: Geert Uytterhoeven --- .../devicetree/bindings/clock/renesas,rcar-usb2-clock-sel.txt | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/Documentation/devicetree/bindings/clock/renesas,rcar-usb2-clock-sel.txt b/Documentation/devicetree/bindings/clock/renesas,rcar-usb2-clock-sel.txt index 4bf6f53bd95ea..da92f5748deee 100644 --- a/Documentation/devicetree/bindings/clock/renesas,rcar-usb2-clock-sel.txt +++ b/Documentation/devicetree/bindings/clock/renesas,rcar-usb2-clock-sel.txt @@ -27,7 +27,9 @@ Required properties: - compatible: "renesas,r8a7795-rcar-usb2-clock-sel" if the device is a part of an R8A7795 SoC. "renesas,r8a7796-rcar-usb2-clock-sel" if the device if a part of - an R8A7796 SoC. + an R8A77960 SoC. + "renesas,r8a77961-rcar-usb2-clock-sel" if the device if a part of + an R8A77961 SoC. "renesas,rcar-gen3-usb2-clock-sel" for a generic R-Car Gen3 compatible device. -- GitLab From cdfdeb4a381d97d441ac421a86a2e8158346ad51 Mon Sep 17 00:00:00 2001 From: Christophe JAILLET Date: Mon, 13 Apr 2020 06:17:09 +0200 Subject: [PATCH 0068/1971] clk: renesas: r9a06g032: Fix some typo in comments This file seems to be for R9A06G032 only. So replace reference to R9A09G032 by R9A06G032 to avoid confusion. AFAIK, R9A09G032 does'nt exist. Signed-off-by: Christophe JAILLET Link: https://lore.kernel.org/r/20200413041709.3630-1-christophe.jaillet@wanadoo.fr Signed-off-by: Geert Uytterhoeven --- drivers/clk/renesas/r9a06g032-clocks.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/clk/renesas/r9a06g032-clocks.c b/drivers/clk/renesas/r9a06g032-clocks.c index 1907ee195a08c..d900f6bf53d0b 100644 --- a/drivers/clk/renesas/r9a06g032-clocks.c +++ b/drivers/clk/renesas/r9a06g032-clocks.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 /* - * R9A09G032 clock driver + * R9A06G032 clock driver * * Copyright (C) 2018 Renesas Electronics Europe Limited * @@ -338,8 +338,8 @@ clk_rdesc_get(struct r9a06g032_priv *clocks, } /* - * This implements the R9A09G032 clock gate 'driver'. We cannot use the system's - * clock gate framework as the gates on the R9A09G032 have a special enabling + * This implements the R9A06G032 clock gate 'driver'. We cannot use the system's + * clock gate framework as the gates on the R9A06G032 have a special enabling * sequence, therefore we use this little proxy. */ struct r9a06g032_clk_gate { -- GitLab From d2bc4dd91da6095a769fdc9bc519d3be7ad5f97a Mon Sep 17 00:00:00 2001 From: Anson Huang Date: Thu, 26 Mar 2020 11:13:31 +0800 Subject: [PATCH 0069/1971] thermal: imx_sc_thermal: Add hwmon support Expose i.MX SC thermal sensors as HWMON devices. Signed-off-by: Anson Huang Reviewed-by: Amit Kucheria Signed-off-by: Daniel Lezcano Link: https://lore.kernel.org/r/1585192411-25593-1-git-send-email-Anson.Huang@nxp.com --- drivers/thermal/imx_sc_thermal.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/drivers/thermal/imx_sc_thermal.c b/drivers/thermal/imx_sc_thermal.c index a8723b1eb8b04..b2b68c9437382 100644 --- a/drivers/thermal/imx_sc_thermal.c +++ b/drivers/thermal/imx_sc_thermal.c @@ -14,6 +14,7 @@ #include #include "thermal_core.h" +#include "thermal_hwmon.h" #define IMX_SC_MISC_FUNC_GET_TEMP 13 @@ -115,6 +116,9 @@ static int imx_sc_thermal_probe(struct platform_device *pdev) ret = PTR_ERR(sensor->tzd); break; } + + if (devm_thermal_add_hwmon_sysfs(sensor->tzd)) + dev_warn(&pdev->dev, "failed to add hwmon sysfs attributes\n"); } of_node_put(sensor_np); -- GitLab From bceb5646a15dad49ceb29ec16b8acc10075d0627 Mon Sep 17 00:00:00 2001 From: Daniel Lezcano Date: Tue, 31 Mar 2020 18:54:48 +0200 Subject: [PATCH 0070/1971] thermal: core: Make thermal_zone_set_trips private The function thermal_zone_set_trips() is used by the thermal core code in order to update the next trip points, there are no other users. Move the function definition in the thermal_core.h, remove the EXPORT_SYMBOL_GPL and document the function. Signed-off-by: Daniel Lezcano Reviewed-by: Lukasz Luba Reviewed-by: Amit Kucheria Link: https://lore.kernel.org/r/20200331165449.30355-1-daniel.lezcano@linaro.org --- drivers/thermal/thermal_core.h | 3 +++ drivers/thermal/thermal_helpers.c | 13 ++++++++++++- include/linux/thermal.h | 3 --- 3 files changed, 15 insertions(+), 4 deletions(-) diff --git a/drivers/thermal/thermal_core.h b/drivers/thermal/thermal_core.h index a9bf00e91d649..37cd4e2bead21 100644 --- a/drivers/thermal/thermal_core.h +++ b/drivers/thermal/thermal_core.h @@ -69,6 +69,9 @@ void thermal_zone_device_unbind_exception(struct thermal_zone_device *, int thermal_zone_device_set_policy(struct thermal_zone_device *, char *); int thermal_build_list_of_policies(char *buf); +/* Helpers */ +void thermal_zone_set_trips(struct thermal_zone_device *tz); + /* sysfs I/F */ int thermal_zone_create_device_groups(struct thermal_zone_device *, int); void thermal_zone_destroy_device_groups(struct thermal_zone_device *); diff --git a/drivers/thermal/thermal_helpers.c b/drivers/thermal/thermal_helpers.c index 2ba756af76b7f..59eaf2d0fdb32 100644 --- a/drivers/thermal/thermal_helpers.c +++ b/drivers/thermal/thermal_helpers.c @@ -113,6 +113,18 @@ exit: } EXPORT_SYMBOL_GPL(thermal_zone_get_temp); +/** + * thermal_zone_set_trips - Computes the next trip points for the driver + * @tz: a pointer to a thermal zone device structure + * + * The function computes the next temperature boundaries by browsing + * the trip points. The result is the closer low and high trip points + * to the current temperature. These values are passed to the backend + * driver to let it set its own notification mechanism (usually an + * interrupt). + * + * It does not return a value + */ void thermal_zone_set_trips(struct thermal_zone_device *tz) { int low = -INT_MAX; @@ -161,7 +173,6 @@ void thermal_zone_set_trips(struct thermal_zone_device *tz) exit: mutex_unlock(&tz->lock); } -EXPORT_SYMBOL_GPL(thermal_zone_set_trips); void thermal_cdev_update(struct thermal_cooling_device *cdev) { diff --git a/include/linux/thermal.h b/include/linux/thermal.h index c91b1e344d56d..448841ab0dca4 100644 --- a/include/linux/thermal.h +++ b/include/linux/thermal.h @@ -439,7 +439,6 @@ int thermal_zone_unbind_cooling_device(struct thermal_zone_device *, int, struct thermal_cooling_device *); void thermal_zone_device_update(struct thermal_zone_device *, enum thermal_notify_event); -void thermal_zone_set_trips(struct thermal_zone_device *); struct thermal_cooling_device *thermal_cooling_device_register(const char *, void *, const struct thermal_cooling_device_ops *); @@ -497,8 +496,6 @@ static inline int thermal_zone_unbind_cooling_device( static inline void thermal_zone_device_update(struct thermal_zone_device *tz, enum thermal_notify_event event) { } -static inline void thermal_zone_set_trips(struct thermal_zone_device *tz) -{ } static inline struct thermal_cooling_device * thermal_cooling_device_register(char *type, void *devdata, const struct thermal_cooling_device_ops *ops) -- GitLab From 44fc73223eebaf4a0c970072819d34ff1d92ce7b Mon Sep 17 00:00:00 2001 From: Daniel Lezcano Date: Tue, 31 Mar 2020 18:54:49 +0200 Subject: [PATCH 0071/1971] thermal: core: Remove pointless debug traces The last temperature and the current temperature are show via a dev_debug. The line before, those temperature are also traced. It is pointless to duplicate the traces for the temperatures, remove the dev_dbg traces. Signed-off-by: Daniel Lezcano Reviewed-by: Lukasz Luba Reviewed-by: Amit Kucheria Link: https://lore.kernel.org/r/20200331165449.30355-2-daniel.lezcano@linaro.org --- drivers/thermal/thermal_core.c | 6 ------ 1 file changed, 6 deletions(-) diff --git a/drivers/thermal/thermal_core.c b/drivers/thermal/thermal_core.c index 9a321dc548c82..c065509309799 100644 --- a/drivers/thermal/thermal_core.c +++ b/drivers/thermal/thermal_core.c @@ -447,12 +447,6 @@ static void update_temperature(struct thermal_zone_device *tz) mutex_unlock(&tz->lock); trace_thermal_temperature(tz); - if (tz->last_temperature == THERMAL_TEMP_INVALID) - dev_dbg(&tz->device, "last_temperature N/A, current_temperature=%d\n", - tz->temperature); - else - dev_dbg(&tz->device, "last_temperature=%d, current_temperature=%d\n", - tz->last_temperature, tz->temperature); } static void thermal_zone_device_init(struct thermal_zone_device *tz) -- GitLab From 04fa9c804b0ed50a3c6bae1d4b60a2494c81a23f Mon Sep 17 00:00:00 2001 From: Matthias Kaehlcke Date: Wed, 18 Mar 2020 11:45:46 +0000 Subject: [PATCH 0072/1971] thermal: devfreq_cooling: Use PM QoS to set frequency limits Now that devfreq supports limiting the frequency range of a device through PM QoS make use of it instead of disabling OPPs that should not be used. The switch from disabling OPPs to PM QoS introduces a subtle behavioral change in case of conflicting requests (min > max): PM QoS gives precedence to the MIN_FREQUENCY request, while higher OPPs disabled with dev_pm_opp_disable() would override MIN_FREQUENCY. Signed-off-by: Matthias Kaehlcke Reviewed-by: Lukasz Luba Reviewed-by: Chanwoo Choi Signed-off-by: Daniel Lezcano Link: https://lore.kernel.org/r/20200318114548.19916-4-lukasz.luba@arm.com --- drivers/thermal/devfreq_cooling.c | 70 ++++++++++--------------------- 1 file changed, 23 insertions(+), 47 deletions(-) diff --git a/drivers/thermal/devfreq_cooling.c b/drivers/thermal/devfreq_cooling.c index a87d4fa031c87..f7f32e98331b1 100644 --- a/drivers/thermal/devfreq_cooling.c +++ b/drivers/thermal/devfreq_cooling.c @@ -24,11 +24,13 @@ #include #include #include +#include #include #include -#define SCALE_ERROR_MITIGATION 100 +#define HZ_PER_KHZ 1000 +#define SCALE_ERROR_MITIGATION 100 static DEFINE_IDA(devfreq_ida); @@ -54,6 +56,8 @@ static DEFINE_IDA(devfreq_ida); * The 'res_util' range is from 100 to (power_table[state] * 100) * for the corresponding 'state'. * @capped_state: index to cooling state with in dynamic power budget + * @req_max_freq: PM QoS request for limiting the maximum frequency + * of the devfreq device. */ struct devfreq_cooling_device { int id; @@ -66,49 +70,9 @@ struct devfreq_cooling_device { struct devfreq_cooling_power *power_ops; u32 res_util; int capped_state; + struct dev_pm_qos_request req_max_freq; }; -/** - * partition_enable_opps() - disable all opps above a given state - * @dfc: Pointer to devfreq we are operating on - * @cdev_state: cooling device state we're setting - * - * Go through the OPPs of the device, enabling all OPPs until - * @cdev_state and disabling those frequencies above it. - */ -static int partition_enable_opps(struct devfreq_cooling_device *dfc, - unsigned long cdev_state) -{ - int i; - struct device *dev = dfc->devfreq->dev.parent; - - for (i = 0; i < dfc->freq_table_size; i++) { - struct dev_pm_opp *opp; - int ret = 0; - unsigned int freq = dfc->freq_table[i]; - bool want_enable = i >= cdev_state ? true : false; - - opp = dev_pm_opp_find_freq_exact(dev, freq, !want_enable); - - if (PTR_ERR(opp) == -ERANGE) - continue; - else if (IS_ERR(opp)) - return PTR_ERR(opp); - - dev_pm_opp_put(opp); - - if (want_enable) - ret = dev_pm_opp_enable(dev, freq); - else - ret = dev_pm_opp_disable(dev, freq); - - if (ret) - return ret; - } - - return 0; -} - static int devfreq_cooling_get_max_state(struct thermal_cooling_device *cdev, unsigned long *state) { @@ -135,7 +99,7 @@ static int devfreq_cooling_set_cur_state(struct thermal_cooling_device *cdev, struct devfreq_cooling_device *dfc = cdev->devdata; struct devfreq *df = dfc->devfreq; struct device *dev = df->dev.parent; - int ret; + unsigned long freq; if (state == dfc->cooling_state) return 0; @@ -145,9 +109,10 @@ static int devfreq_cooling_set_cur_state(struct thermal_cooling_device *cdev, if (state >= dfc->freq_table_size) return -EINVAL; - ret = partition_enable_opps(dfc, state); - if (ret) - return ret; + freq = dfc->freq_table[state]; + + dev_pm_qos_update_request(&dfc->req_max_freq, + DIV_ROUND_UP(freq, HZ_PER_KHZ)); dfc->cooling_state = state; @@ -530,9 +495,15 @@ of_devfreq_cooling_register_power(struct device_node *np, struct devfreq *df, if (err) goto free_dfc; - err = ida_simple_get(&devfreq_ida, 0, 0, GFP_KERNEL); + err = dev_pm_qos_add_request(df->dev.parent, &dfc->req_max_freq, + DEV_PM_QOS_MAX_FREQUENCY, + PM_QOS_MAX_FREQUENCY_DEFAULT_VALUE); if (err < 0) goto free_tables; + + err = ida_simple_get(&devfreq_ida, 0, 0, GFP_KERNEL); + if (err < 0) + goto remove_qos_req; dfc->id = err; snprintf(dev_name, sizeof(dev_name), "thermal-devfreq-%d", dfc->id); @@ -553,6 +524,10 @@ of_devfreq_cooling_register_power(struct device_node *np, struct devfreq *df, release_ida: ida_simple_remove(&devfreq_ida, dfc->id); + +remove_qos_req: + dev_pm_qos_remove_request(&dfc->req_max_freq); + free_tables: kfree(dfc->power_table); kfree(dfc->freq_table); @@ -601,6 +576,7 @@ void devfreq_cooling_unregister(struct thermal_cooling_device *cdev) thermal_cooling_device_unregister(dfc->cdev); ida_simple_remove(&devfreq_ida, dfc->id); + dev_pm_qos_remove_request(&dfc->req_max_freq); kfree(dfc->power_table); kfree(dfc->freq_table); -- GitLab From 8097db407a08f33236b6fa6ba8b62d669321c720 Mon Sep 17 00:00:00 2001 From: Daniel Lezcano Date: Thu, 2 Apr 2020 16:27:39 +0200 Subject: [PATCH 0073/1971] thermal: Move default governor config option to the internal header The default governor set at compilation time is a thermal internal business, no need to export to the global thermal header. Move the config options to the internal header. Signed-off-by: Daniel Lezcano Reviewed-by: Amit Kucheria Acked-by: Zhang Rui Link: https://lore.kernel.org/r/20200402142747.8307-1-daniel.lezcano@linaro.org --- drivers/thermal/thermal_core.h | 11 +++++++++++ include/linux/thermal.h | 11 ----------- 2 files changed, 11 insertions(+), 11 deletions(-) diff --git a/drivers/thermal/thermal_core.h b/drivers/thermal/thermal_core.h index 37cd4e2bead21..8283055085568 100644 --- a/drivers/thermal/thermal_core.h +++ b/drivers/thermal/thermal_core.h @@ -12,6 +12,17 @@ #include #include +/* Default Thermal Governor */ +#if defined(CONFIG_THERMAL_DEFAULT_GOV_STEP_WISE) +#define DEFAULT_THERMAL_GOVERNOR "step_wise" +#elif defined(CONFIG_THERMAL_DEFAULT_GOV_FAIR_SHARE) +#define DEFAULT_THERMAL_GOVERNOR "fair_share" +#elif defined(CONFIG_THERMAL_DEFAULT_GOV_USER_SPACE) +#define DEFAULT_THERMAL_GOVERNOR "user_space" +#elif defined(CONFIG_THERMAL_DEFAULT_GOV_POWER_ALLOCATOR) +#define DEFAULT_THERMAL_GOVERNOR "power_allocator" +#endif + /* Initial state of a cooling device during binding */ #define THERMAL_NO_TARGET -1UL diff --git a/include/linux/thermal.h b/include/linux/thermal.h index 448841ab0dca4..71cff87dcb465 100644 --- a/include/linux/thermal.h +++ b/include/linux/thermal.h @@ -32,17 +32,6 @@ /* use value, which < 0K, to indicate an invalid/uninitialized temperature */ #define THERMAL_TEMP_INVALID -274000 -/* Default Thermal Governor */ -#if defined(CONFIG_THERMAL_DEFAULT_GOV_STEP_WISE) -#define DEFAULT_THERMAL_GOVERNOR "step_wise" -#elif defined(CONFIG_THERMAL_DEFAULT_GOV_FAIR_SHARE) -#define DEFAULT_THERMAL_GOVERNOR "fair_share" -#elif defined(CONFIG_THERMAL_DEFAULT_GOV_USER_SPACE) -#define DEFAULT_THERMAL_GOVERNOR "user_space" -#elif defined(CONFIG_THERMAL_DEFAULT_GOV_POWER_ALLOCATOR) -#define DEFAULT_THERMAL_GOVERNOR "power_allocator" -#endif - struct thermal_zone_device; struct thermal_cooling_device; struct thermal_instance; -- GitLab From c68df440b07f88210c5839d4507b5cbfa35e3df9 Mon Sep 17 00:00:00 2001 From: Daniel Lezcano Date: Thu, 2 Apr 2020 16:27:40 +0200 Subject: [PATCH 0074/1971] thermal: Move struct thermal_attr to the private header The structure belongs to the thermal core internals but it is exported in the include/linux/thermal.h For better self-encapsulation and less impact for the compilation if a change is made on it. Move the structure in the thermal core internal header file. Signed-off-by: Daniel Lezcano Reviewed-by: Amit Kucheria Acked-by: Zhang Rui Link: https://lore.kernel.org/r/20200402142747.8307-2-daniel.lezcano@linaro.org --- drivers/thermal/thermal_core.h | 5 +++++ include/linux/thermal.h | 6 +----- 2 files changed, 6 insertions(+), 5 deletions(-) diff --git a/drivers/thermal/thermal_core.h b/drivers/thermal/thermal_core.h index 8283055085568..5d08ad60d9df8 100644 --- a/drivers/thermal/thermal_core.h +++ b/drivers/thermal/thermal_core.h @@ -41,6 +41,11 @@ extern struct thermal_governor *__governor_thermal_table_end[]; __governor < __governor_thermal_table_end; \ __governor++) +struct thermal_attr { + struct device_attribute attr; + char name[THERMAL_NAME_LENGTH]; +}; + /* * This structure is used to describe the behavior of * a certain cooling device on a certain trip point diff --git a/include/linux/thermal.h b/include/linux/thermal.h index 71cff87dcb465..5aa80fb2fb617 100644 --- a/include/linux/thermal.h +++ b/include/linux/thermal.h @@ -35,6 +35,7 @@ struct thermal_zone_device; struct thermal_cooling_device; struct thermal_instance; +struct thermal_attr; enum thermal_device_mode { THERMAL_DEVICE_DISABLED = 0, @@ -119,11 +120,6 @@ struct thermal_cooling_device { struct list_head node; }; -struct thermal_attr { - struct device_attribute attr; - char name[THERMAL_NAME_LENGTH]; -}; - /** * struct thermal_zone_device - structure for a thermal zone * @id: unique id number for each thermal zone -- GitLab From 33a88af10944edc7fd390000cd6bc9bbde918bc3 Mon Sep 17 00:00:00 2001 From: Daniel Lezcano Date: Thu, 2 Apr 2020 16:27:41 +0200 Subject: [PATCH 0075/1971] thermal: Move internal IPA functions The exported IPA functions are used by the IPA. It is pointless to declare the functions in the thermal.h file. For better self-encapsulation and less impact for the compilation if a change is made on it. Move the code in the thermal core internal header file. As the users depends on THERMAL then it is pointless to have the stub, remove them. Take also the opportunity to fix checkpatch warnings/errors when moving the code around. Signed-off-by: Daniel Lezcano Reviewed-by: Amit Kucheria Acked-by: Zhang Rui Link: https://lore.kernel.org/r/20200402142747.8307-3-daniel.lezcano@linaro.org --- drivers/thermal/thermal_core.h | 13 +++++++++++++ include/linux/thermal.h | 24 ------------------------ 2 files changed, 13 insertions(+), 24 deletions(-) diff --git a/drivers/thermal/thermal_core.h b/drivers/thermal/thermal_core.h index 5d08ad60d9df8..f99551ce98383 100644 --- a/drivers/thermal/thermal_core.h +++ b/drivers/thermal/thermal_core.h @@ -46,6 +46,19 @@ struct thermal_attr { char name[THERMAL_NAME_LENGTH]; }; +static inline bool cdev_is_power_actor(struct thermal_cooling_device *cdev) +{ + return cdev->ops->get_requested_power && cdev->ops->state2power && + cdev->ops->power2state; +} + +int power_actor_get_max_power(struct thermal_cooling_device *cdev, + struct thermal_zone_device *tz, u32 *max_power); +int power_actor_get_min_power(struct thermal_cooling_device *cdev, + struct thermal_zone_device *tz, u32 *min_power); +int power_actor_set_power(struct thermal_cooling_device *cdev, + struct thermal_instance *ti, u32 power); + /* * This structure is used to describe the behavior of * a certain cooling device on a certain trip point diff --git a/include/linux/thermal.h b/include/linux/thermal.h index 5aa80fb2fb617..e0279f7b43f45 100644 --- a/include/linux/thermal.h +++ b/include/linux/thermal.h @@ -399,18 +399,6 @@ void devm_thermal_zone_of_sensor_unregister(struct device *dev, #endif #if IS_ENABLED(CONFIG_THERMAL) -static inline bool cdev_is_power_actor(struct thermal_cooling_device *cdev) -{ - return cdev->ops->get_requested_power && cdev->ops->state2power && - cdev->ops->power2state; -} - -int power_actor_get_max_power(struct thermal_cooling_device *, - struct thermal_zone_device *tz, u32 *max_power); -int power_actor_get_min_power(struct thermal_cooling_device *, - struct thermal_zone_device *tz, u32 *min_power); -int power_actor_set_power(struct thermal_cooling_device *, - struct thermal_instance *, u32); struct thermal_zone_device *thermal_zone_device_register(const char *, int, int, void *, struct thermal_zone_device_ops *, struct thermal_zone_params *, int, int); @@ -447,18 +435,6 @@ struct thermal_instance *get_thermal_instance(struct thermal_zone_device *, void thermal_cdev_update(struct thermal_cooling_device *); void thermal_notify_framework(struct thermal_zone_device *, int); #else -static inline bool cdev_is_power_actor(struct thermal_cooling_device *cdev) -{ return false; } -static inline int power_actor_get_max_power(struct thermal_cooling_device *cdev, - struct thermal_zone_device *tz, u32 *max_power) -{ return 0; } -static inline int power_actor_get_min_power(struct thermal_cooling_device *cdev, - struct thermal_zone_device *tz, - u32 *min_power) -{ return -ENODEV; } -static inline int power_actor_set_power(struct thermal_cooling_device *cdev, - struct thermal_instance *tz, u32 power) -{ return 0; } static inline struct thermal_zone_device *thermal_zone_device_register( const char *type, int trips, int mask, void *devdata, struct thermal_zone_device_ops *ops, -- GitLab From 2e7700dc336dde93cdc9394d10ccd79593fff214 Mon Sep 17 00:00:00 2001 From: Daniel Lezcano Date: Thu, 2 Apr 2020 16:27:42 +0200 Subject: [PATCH 0076/1971] thermal: Move trip point structure definition to private header The struct thermal_trip is only used by the thermal internals, it is pointless to export the definition in the global header. Move the structure to the thermal_core.h internal header. Signed-off-by: Daniel Lezcano Reviewed-by: Amit Kucheria Acked-by: Zhang Rui Link: https://lore.kernel.org/r/20200402142747.8307-4-daniel.lezcano@linaro.org --- drivers/thermal/thermal_core.h | 13 +++++++++++++ include/linux/thermal.h | 15 --------------- 2 files changed, 13 insertions(+), 15 deletions(-) diff --git a/drivers/thermal/thermal_core.h b/drivers/thermal/thermal_core.h index f99551ce98383..d37de708c28a2 100644 --- a/drivers/thermal/thermal_core.h +++ b/drivers/thermal/thermal_core.h @@ -58,6 +58,19 @@ int power_actor_get_min_power(struct thermal_cooling_device *cdev, struct thermal_zone_device *tz, u32 *min_power); int power_actor_set_power(struct thermal_cooling_device *cdev, struct thermal_instance *ti, u32 power); +/** + * struct thermal_trip - representation of a point in temperature domain + * @np: pointer to struct device_node that this trip point was created from + * @temperature: temperature value in miliCelsius + * @hysteresis: relative hysteresis in miliCelsius + * @type: trip point type + */ +struct thermal_trip { + struct device_node *np; + int temperature; + int hysteresis; + enum thermal_trip_type type; +}; /* * This structure is used to describe the behavior of diff --git a/include/linux/thermal.h b/include/linux/thermal.h index e0279f7b43f45..7adbfe092281a 100644 --- a/include/linux/thermal.h +++ b/include/linux/thermal.h @@ -332,21 +332,6 @@ struct thermal_zone_of_device_ops { int (*set_trip_temp)(void *, int, int); }; -/** - * struct thermal_trip - representation of a point in temperature domain - * @np: pointer to struct device_node that this trip point was created from - * @temperature: temperature value in miliCelsius - * @hysteresis: relative hysteresis in miliCelsius - * @type: trip point type - */ - -struct thermal_trip { - struct device_node *np; - int temperature; - int hysteresis; - enum thermal_trip_type type; -}; - /* Function declarations */ #ifdef CONFIG_THERMAL_OF int thermal_zone_of_get_sensor_id(struct device_node *tz_np, -- GitLab From f0129c231772a85a726b233756cdc58ea42a9d84 Mon Sep 17 00:00:00 2001 From: Daniel Lezcano Date: Thu, 2 Apr 2020 16:27:43 +0200 Subject: [PATCH 0077/1971] thermal: Move get_tz_trend to the internal header The function is not used any place other than the thermal directory. It does not make sense to export its definition in the global header as there is no use of it. Move the definition to the internal header and allow better self-encapsulation. Take the opportunity to add the parameter names to make checkpatch happy and remove the pointless stubs. Signed-off-by: Daniel Lezcano Reviewed-by: Amit Kucheria Acked-by: Zhang Rui Link: https://lore.kernel.org/r/20200402142747.8307-5-daniel.lezcano@linaro.org --- drivers/thermal/thermal_core.h | 2 ++ include/linux/thermal.h | 4 +--- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/thermal/thermal_core.h b/drivers/thermal/thermal_core.h index d37de708c28a2..5fb2bd9c7034c 100644 --- a/drivers/thermal/thermal_core.h +++ b/drivers/thermal/thermal_core.h @@ -72,6 +72,8 @@ struct thermal_trip { enum thermal_trip_type type; }; +int get_tz_trend(struct thermal_zone_device *tz, int trip); + /* * This structure is used to describe the behavior of * a certain cooling device on a certain trip point diff --git a/include/linux/thermal.h b/include/linux/thermal.h index 7adbfe092281a..8006ba5de8551 100644 --- a/include/linux/thermal.h +++ b/include/linux/thermal.h @@ -414,7 +414,6 @@ int thermal_zone_get_temp(struct thermal_zone_device *tz, int *temp); int thermal_zone_get_slope(struct thermal_zone_device *tz); int thermal_zone_get_offset(struct thermal_zone_device *tz); -int get_tz_trend(struct thermal_zone_device *, int); struct thermal_instance *get_thermal_instance(struct thermal_zone_device *, struct thermal_cooling_device *, int); void thermal_cdev_update(struct thermal_cooling_device *); @@ -473,8 +472,7 @@ static inline int thermal_zone_get_slope( static inline int thermal_zone_get_offset( struct thermal_zone_device *tz) { return -ENODEV; } -static inline int get_tz_trend(struct thermal_zone_device *tz, int trip) -{ return -ENODEV; } + static inline struct thermal_instance * get_thermal_instance(struct thermal_zone_device *tz, struct thermal_cooling_device *cdev, int trip) -- GitLab From 06f1041f5023c00a54f63c269b997c61d1b3b739 Mon Sep 17 00:00:00 2001 From: Daniel Lezcano Date: Thu, 2 Apr 2020 16:27:44 +0200 Subject: [PATCH 0078/1971] thermal: Move get_thermal_instance to the internal header The function is not used any place other than the thermal directory. It does not make sense to export its definition in the global header as there is no use of it. Move the definition to the internal header and allow better self-encapsulation. Take the opportunity to add the parameter names to make checkpatch happy and remove the pointless stubs. Signed-off-by: Daniel Lezcano Reviewed-by: Amit Kucheria Acked-by: Zhang Rui Link: https://lore.kernel.org/r/20200402142747.8307-6-daniel.lezcano@linaro.org --- drivers/thermal/thermal_core.h | 5 +++++ include/linux/thermal.h | 6 ------ 2 files changed, 5 insertions(+), 6 deletions(-) diff --git a/drivers/thermal/thermal_core.h b/drivers/thermal/thermal_core.h index 5fb2bd9c7034c..c95689586e19a 100644 --- a/drivers/thermal/thermal_core.h +++ b/drivers/thermal/thermal_core.h @@ -74,6 +74,11 @@ struct thermal_trip { int get_tz_trend(struct thermal_zone_device *tz, int trip); +struct thermal_instance * +get_thermal_instance(struct thermal_zone_device *tz, + struct thermal_cooling_device *cdev, + int trip); + /* * This structure is used to describe the behavior of * a certain cooling device on a certain trip point diff --git a/include/linux/thermal.h b/include/linux/thermal.h index 8006ba5de8551..47e745c5dfca7 100644 --- a/include/linux/thermal.h +++ b/include/linux/thermal.h @@ -414,8 +414,6 @@ int thermal_zone_get_temp(struct thermal_zone_device *tz, int *temp); int thermal_zone_get_slope(struct thermal_zone_device *tz); int thermal_zone_get_offset(struct thermal_zone_device *tz); -struct thermal_instance *get_thermal_instance(struct thermal_zone_device *, - struct thermal_cooling_device *, int); void thermal_cdev_update(struct thermal_cooling_device *); void thermal_notify_framework(struct thermal_zone_device *, int); #else @@ -473,10 +471,6 @@ static inline int thermal_zone_get_offset( struct thermal_zone_device *tz) { return -ENODEV; } -static inline struct thermal_instance * -get_thermal_instance(struct thermal_zone_device *tz, - struct thermal_cooling_device *cdev, int trip) -{ return ERR_PTR(-ENODEV); } static inline void thermal_cdev_update(struct thermal_cooling_device *cdev) { } static inline void thermal_notify_framework(struct thermal_zone_device *tz, -- GitLab From 60518260cab21e749704baa5246ff13f7559fa91 Mon Sep 17 00:00:00 2001 From: Daniel Lezcano Date: Thu, 2 Apr 2020 16:27:45 +0200 Subject: [PATCH 0079/1971] thermal: Change IS_ENABLED to IFDEF in the header file The thermal framework can not be compiled as a module. The IS_ENABLED macro is useless here and can be replaced by an ifdef. Signed-off-by: Daniel Lezcano Reviewed-by: Amit Kucheria Acked-by: Zhang Rui Link: https://lore.kernel.org/r/20200402142747.8307-7-daniel.lezcano@linaro.org --- include/linux/thermal.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/include/linux/thermal.h b/include/linux/thermal.h index 47e745c5dfca7..12df9ff0182d9 100644 --- a/include/linux/thermal.h +++ b/include/linux/thermal.h @@ -383,7 +383,7 @@ void devm_thermal_zone_of_sensor_unregister(struct device *dev, #endif -#if IS_ENABLED(CONFIG_THERMAL) +#ifdef CONFIG_THERMAL struct thermal_zone_device *thermal_zone_device_register(const char *, int, int, void *, struct thermal_zone_device_ops *, struct thermal_zone_params *, int, int); -- GitLab From 708418500644c248d6f266e4df0bf43ce53bf746 Mon Sep 17 00:00:00 2001 From: Daniel Lezcano Date: Thu, 2 Apr 2020 16:27:46 +0200 Subject: [PATCH 0080/1971] thermal: Remove stubs for thermal_zone_[un]bind_cooling_device All callers of the functions depends on THERMAL, it is pointless to define stubs. Remove them. Signed-off-by: Daniel Lezcano Reviewed-by: Amit Kucheria Acked-by: Zhang Rui Link: https://lore.kernel.org/r/20200402142747.8307-8-daniel.lezcano@linaro.org --- include/linux/thermal.h | 10 ---------- 1 file changed, 10 deletions(-) diff --git a/include/linux/thermal.h b/include/linux/thermal.h index 12df9ff0182d9..7b3dbfe15b59f 100644 --- a/include/linux/thermal.h +++ b/include/linux/thermal.h @@ -426,16 +426,6 @@ static inline struct thermal_zone_device *thermal_zone_device_register( static inline void thermal_zone_device_unregister( struct thermal_zone_device *tz) { } -static inline int thermal_zone_bind_cooling_device( - struct thermal_zone_device *tz, int trip, - struct thermal_cooling_device *cdev, - unsigned long upper, unsigned long lower, - unsigned int weight) -{ return -ENODEV; } -static inline int thermal_zone_unbind_cooling_device( - struct thermal_zone_device *tz, int trip, - struct thermal_cooling_device *cdev) -{ return -ENODEV; } static inline void thermal_zone_device_update(struct thermal_zone_device *tz, enum thermal_notify_event event) { } -- GitLab From 0145f67866b71e8c1da3c1d9412623db7ba8a0c8 Mon Sep 17 00:00:00 2001 From: Daniel Lezcano Date: Thu, 2 Apr 2020 16:27:47 +0200 Subject: [PATCH 0081/1971] thermal: Remove thermal_zone_device_update() stub All users of the function depends on THERMAL, no stub is needed. Remove it. Signed-off-by: Daniel Lezcano Reviewed-by: Amit Kucheria Acked-by: Zhang Rui Link: https://lore.kernel.org/r/20200402142747.8307-9-daniel.lezcano@linaro.org --- include/linux/thermal.h | 3 --- 1 file changed, 3 deletions(-) diff --git a/include/linux/thermal.h b/include/linux/thermal.h index 7b3dbfe15b59f..216185bb3014a 100644 --- a/include/linux/thermal.h +++ b/include/linux/thermal.h @@ -426,9 +426,6 @@ static inline struct thermal_zone_device *thermal_zone_device_register( static inline void thermal_zone_device_unregister( struct thermal_zone_device *tz) { } -static inline void thermal_zone_device_update(struct thermal_zone_device *tz, - enum thermal_notify_event event) -{ } static inline struct thermal_cooling_device * thermal_cooling_device_register(char *type, void *devdata, const struct thermal_cooling_device_ops *ops) -- GitLab From 8cb775bb005c568857ba7909a1c5a297ed4c33ee Mon Sep 17 00:00:00 2001 From: Markus Elfring Date: Sun, 5 Apr 2020 18:35:16 +0200 Subject: [PATCH 0082/1971] thermal: Delete an error message in four functions MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The function “platform_get_irq” can log an error already. Thus omit redundant messages for the exception handling in the calling functions. This issue was detected by using the Coccinelle software. Signed-off-by: Markus Elfring Reviewed-by: Amit Kucheria Reviewed-by: Keerthy Signed-off-by: Daniel Lezcano Link: https://lore.kernel.org/r/05f49ae7-5cc7-d6a0-fc3d-abaf2a0b373c@web.de --- drivers/thermal/rockchip_thermal.c | 4 +--- drivers/thermal/st/st_thermal_memmap.c | 4 +--- drivers/thermal/st/stm_thermal.c | 4 +--- drivers/thermal/ti-soc-thermal/ti-bandgap.c | 5 ++--- 4 files changed, 5 insertions(+), 12 deletions(-) diff --git a/drivers/thermal/rockchip_thermal.c b/drivers/thermal/rockchip_thermal.c index 7c1a8bccdcba6..15a71ecc916c1 100644 --- a/drivers/thermal/rockchip_thermal.c +++ b/drivers/thermal/rockchip_thermal.c @@ -1241,10 +1241,8 @@ static int rockchip_thermal_probe(struct platform_device *pdev) return -ENXIO; irq = platform_get_irq(pdev, 0); - if (irq < 0) { - dev_err(&pdev->dev, "no irq resource?\n"); + if (irq < 0) return -EINVAL; - } thermal = devm_kzalloc(&pdev->dev, sizeof(struct rockchip_thermal_data), GFP_KERNEL); diff --git a/drivers/thermal/st/st_thermal_memmap.c b/drivers/thermal/st/st_thermal_memmap.c index a824b78dabf8f..a0114452d11f2 100644 --- a/drivers/thermal/st/st_thermal_memmap.c +++ b/drivers/thermal/st/st_thermal_memmap.c @@ -94,10 +94,8 @@ static int st_mmap_register_enable_irq(struct st_thermal_sensor *sensor) int ret; sensor->irq = platform_get_irq(pdev, 0); - if (sensor->irq < 0) { - dev_err(dev, "failed to register IRQ\n"); + if (sensor->irq < 0) return sensor->irq; - } ret = devm_request_threaded_irq(dev, sensor->irq, NULL, st_mmap_thermal_trip_handler, diff --git a/drivers/thermal/st/stm_thermal.c b/drivers/thermal/st/stm_thermal.c index 9314e3df6a427..331e2b768df54 100644 --- a/drivers/thermal/st/stm_thermal.c +++ b/drivers/thermal/st/stm_thermal.c @@ -385,10 +385,8 @@ static int stm_register_irq(struct stm_thermal_sensor *sensor) int ret; sensor->irq = platform_get_irq(pdev, 0); - if (sensor->irq < 0) { - dev_err(dev, "%s: Unable to find IRQ\n", __func__); + if (sensor->irq < 0) return sensor->irq; - } ret = devm_request_threaded_irq(dev, sensor->irq, NULL, diff --git a/drivers/thermal/ti-soc-thermal/ti-bandgap.c b/drivers/thermal/ti-soc-thermal/ti-bandgap.c index 263b0420fbe45..ab19ceff6e2a7 100644 --- a/drivers/thermal/ti-soc-thermal/ti-bandgap.c +++ b/drivers/thermal/ti-soc-thermal/ti-bandgap.c @@ -772,10 +772,9 @@ static int ti_bandgap_talert_init(struct ti_bandgap *bgp, int ret; bgp->irq = platform_get_irq(pdev, 0); - if (bgp->irq < 0) { - dev_err(&pdev->dev, "get_irq failed\n"); + if (bgp->irq < 0) return bgp->irq; - } + ret = request_threaded_irq(bgp->irq, NULL, ti_bandgap_talert_irq_handler, IRQF_TRIGGER_HIGH | IRQF_ONESHOT, -- GitLab From 3dc748754d68b5893307c3c68acfc67f76110ecf Mon Sep 17 00:00:00 2001 From: Keerthy Date: Tue, 7 Apr 2020 11:21:13 +0530 Subject: [PATCH 0083/1971] dt-bindings: thermal: k3: Add VTM bindings documentation Add VTM bindings documentation. In the Voltage Thermal Management Module(VTM), K3 AM654 supplies a voltage reference and a temperature sensor feature that are gathered in the band gap voltage and temperature sensor (VBGAPTS) module. The band gap provides current and voltage reference for its internal circuits and other analog IP blocks. The analog-to-digital converter (ADC) produces an output value that is proportional to the silicon temperature. Signed-off-by: Keerthy Reviewed-by: Rob Herring Reviewed-by: Amit Kucheria Signed-off-by: Daniel Lezcano Link: https://lore.kernel.org/r/20200407055116.16082-2-j-keerthy@ti.com --- .../bindings/thermal/ti,am654-thermal.yaml | 56 +++++++++++++++++++ 1 file changed, 56 insertions(+) create mode 100644 Documentation/devicetree/bindings/thermal/ti,am654-thermal.yaml diff --git a/Documentation/devicetree/bindings/thermal/ti,am654-thermal.yaml b/Documentation/devicetree/bindings/thermal/ti,am654-thermal.yaml new file mode 100644 index 0000000000000..25b9209c2e5d9 --- /dev/null +++ b/Documentation/devicetree/bindings/thermal/ti,am654-thermal.yaml @@ -0,0 +1,56 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/thermal/ti,am654-thermal.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Texas Instruments AM654 VTM (DTS) binding + +maintainers: + - Keerthy + +properties: + compatible: + const: ti,am654-vtm + + reg: + maxItems: 1 + + power-domains: + maxItems: 1 + + "#thermal-sensor-cells": + const: 1 + +required: + - compatible + - reg + - power-domains + - "#thermal-sensor-cells" + +additionalProperties: false + +examples: + - | + #include + vtm: thermal@42050000 { + compatible = "ti,am654-vtm"; + reg = <0x0 0x42050000 0x0 0x25c>; + power-domains = <&k3_pds 80 TI_SCI_PD_EXCLUSIVE>; + #thermal-sensor-cells = <1>; + }; + + mpu0_thermal: mpu0_thermal { + polling-delay-passive = <250>; /* milliseconds */ + polling-delay = <500>; /* milliseconds */ + thermal-sensors = <&vtm0 0>; + + trips { + mpu0_crit: mpu0_crit { + temperature = <125000>; /* milliCelsius */ + hysteresis = <2000>; /* milliCelsius */ + type = "critical"; + }; + }; + }; +... -- GitLab From 48b2bce8c7db92601145e1204fa7048b1f74e442 Mon Sep 17 00:00:00 2001 From: Keerthy Date: Tue, 7 Apr 2020 11:21:14 +0530 Subject: [PATCH 0084/1971] thermal: k3: Add support for bandgap sensors Add VTM thermal support. In the Voltage Thermal Management Module(VTM), K3 AM654 supplies a voltage reference and a temperature sensor feature that are gathered in the band gap voltage and temperature sensor (VBGAPTS) module. The band gap provides current and voltage reference for its internal circuits and other analog IP blocks. The analog-to-digital converter (ADC) produces an output value that is proportional to the silicon temperature. Currently reading temperatures only is supported. There are no active/passive cooling agent supported. Signed-off-by: Keerthy Reviewed-by: Amit Kucheria Signed-off-by: Daniel Lezcano Link: https://lore.kernel.org/r/20200407055116.16082-3-j-keerthy@ti.com --- drivers/thermal/Kconfig | 10 ++ drivers/thermal/Makefile | 1 + drivers/thermal/k3_bandgap.c | 264 +++++++++++++++++++++++++++++++++++ 3 files changed, 275 insertions(+) create mode 100644 drivers/thermal/k3_bandgap.c diff --git a/drivers/thermal/Kconfig b/drivers/thermal/Kconfig index 91af271e9bb02..e53314ea9e25e 100644 --- a/drivers/thermal/Kconfig +++ b/drivers/thermal/Kconfig @@ -273,6 +273,16 @@ config IMX8MM_THERMAL cpufreq is used as the cooling device to throttle CPUs when the passive trip is crossed. +config K3_THERMAL + tristate "Texas Instruments K3 thermal support" + depends on ARCH_K3 || COMPILE_TEST + help + If you say yes here you get thermal support for the Texas Instruments + K3 SoC family. The current chip supported is: + - AM654 + + This includes temperature reading functionality. + config MAX77620_THERMAL tristate "Temperature sensor driver for Maxim MAX77620 PMIC" depends on MFD_MAX77620 diff --git a/drivers/thermal/Makefile b/drivers/thermal/Makefile index 8c8ed7b799159..86c506410cc01 100644 --- a/drivers/thermal/Makefile +++ b/drivers/thermal/Makefile @@ -28,6 +28,7 @@ thermal_sys-$(CONFIG_CLOCK_THERMAL) += clock_cooling.o # devfreq cooling thermal_sys-$(CONFIG_DEVFREQ_THERMAL) += devfreq_cooling.o +obj-$(CONFIG_K3_THERMAL) += k3_bandgap.o # platform thermal drivers obj-y += broadcom/ obj-$(CONFIG_THERMAL_MMIO) += thermal_mmio.o diff --git a/drivers/thermal/k3_bandgap.c b/drivers/thermal/k3_bandgap.c new file mode 100644 index 0000000000000..35f41e8a0b759 --- /dev/null +++ b/drivers/thermal/k3_bandgap.c @@ -0,0 +1,264 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * TI Bandgap temperature sensor driver for K3 SoC Family + * + * Copyright (C) 2020 Texas Instruments Incorporated - http://www.ti.com/ + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define K3_VTM_DEVINFO_PWR0_OFFSET 0x4 +#define K3_VTM_DEVINFO_PWR0_TEMPSENS_CT_MASK 0xf0 +#define K3_VTM_TMPSENS0_CTRL_OFFSET 0x80 +#define K3_VTM_REGS_PER_TS 0x10 +#define K3_VTM_TS_STAT_DTEMP_MASK 0x3ff +#define K3_VTM_TMPSENS_CTRL_CBIASSEL BIT(0) +#define K3_VTM_TMPSENS_CTRL_SOC BIT(5) +#define K3_VTM_TMPSENS_CTRL_CLRZ BIT(6) +#define K3_VTM_TMPSENS_CTRL_CLKON_REQ BIT(7) + +#define K3_VTM_ADC_BEGIN_VAL 540 +#define K3_VTM_ADC_END_VAL 944 + +static const int k3_adc_to_temp[] = { + -40000, -40000, -40000, -40000, -39800, -39400, -39000, -38600, -38200, + -37800, -37400, -37000, -36600, -36200, -35800, -35300, -34700, -34200, + -33800, -33400, -33000, -32600, -32200, -31800, -31400, -31000, -30600, + -30200, -29800, -29400, -29000, -28600, -28200, -27700, -27100, -26600, + -26200, -25800, -25400, -25000, -24600, -24200, -23800, -23400, -23000, + -22600, -22200, -21800, -21400, -21000, -20500, -19900, -19400, -19000, + -18600, -18200, -17800, -17400, -17000, -16600, -16200, -15800, -15400, + -15000, -14600, -14200, -13800, -13400, -13000, -12500, -11900, -11400, + -11000, -10600, -10200, -9800, -9400, -9000, -8600, -8200, -7800, -7400, + -7000, -6600, -6200, -5800, -5400, -5000, -4500, -3900, -3400, -3000, + -2600, -2200, -1800, -1400, -1000, -600, -200, 200, 600, 1000, 1400, + 1800, 2200, 2600, 3000, 3400, 3900, 4500, 5000, 5400, 5800, 6200, 6600, + 7000, 7400, 7800, 8200, 8600, 9000, 9400, 9800, 10200, 10600, 11000, + 11400, 11800, 12200, 12700, 13300, 13800, 14200, 14600, 15000, 15400, + 15800, 16200, 16600, 17000, 17400, 17800, 18200, 18600, 19000, 19400, + 19800, 20200, 20600, 21000, 21400, 21900, 22500, 23000, 23400, 23800, + 24200, 24600, 25000, 25400, 25800, 26200, 26600, 27000, 27400, 27800, + 28200, 28600, 29000, 29400, 29800, 30200, 30600, 31000, 31400, 31900, + 32500, 33000, 33400, 33800, 34200, 34600, 35000, 35400, 35800, 36200, + 36600, 37000, 37400, 37800, 38200, 38600, 39000, 39400, 39800, 40200, + 40600, 41000, 41400, 41800, 42200, 42600, 43100, 43700, 44200, 44600, + 45000, 45400, 45800, 46200, 46600, 47000, 47400, 47800, 48200, 48600, + 49000, 49400, 49800, 50200, 50600, 51000, 51400, 51800, 52200, 52600, + 53000, 53400, 53800, 54200, 54600, 55000, 55400, 55900, 56500, 57000, + 57400, 57800, 58200, 58600, 59000, 59400, 59800, 60200, 60600, 61000, + 61400, 61800, 62200, 62600, 63000, 63400, 63800, 64200, 64600, 65000, + 65400, 65800, 66200, 66600, 67000, 67400, 67800, 68200, 68600, 69000, + 69400, 69800, 70200, 70600, 71000, 71500, 72100, 72600, 73000, 73400, + 73800, 74200, 74600, 75000, 75400, 75800, 76200, 76600, 77000, 77400, + 77800, 78200, 78600, 79000, 79400, 79800, 80200, 80600, 81000, 81400, + 81800, 82200, 82600, 83000, 83400, 83800, 84200, 84600, 85000, 85400, + 85800, 86200, 86600, 87000, 87400, 87800, 88200, 88600, 89000, 89400, + 89800, 90200, 90600, 91000, 91400, 91800, 92200, 92600, 93000, 93400, + 93800, 94200, 94600, 95000, 95400, 95800, 96200, 96600, 97000, 97500, + 98100, 98600, 99000, 99400, 99800, 100200, 100600, 101000, 101400, + 101800, 102200, 102600, 103000, 103400, 103800, 104200, 104600, 105000, + 105400, 105800, 106200, 106600, 107000, 107400, 107800, 108200, 108600, + 109000, 109400, 109800, 110200, 110600, 111000, 111400, 111800, 112200, + 112600, 113000, 113400, 113800, 114200, 114600, 115000, 115400, 115800, + 116200, 116600, 117000, 117400, 117800, 118200, 118600, 119000, 119400, + 119800, 120200, 120600, 121000, 121400, 121800, 122200, 122600, 123000, + 123400, 123800, 124200, 124600, 124900, 125000, +}; + +struct k3_bandgap { + void __iomem *base; + const struct k3_bandgap_data *conf; +}; + +/* common data structures */ +struct k3_thermal_data { + struct thermal_zone_device *tzd; + struct k3_bandgap *bgp; + int sensor_id; + u32 ctrl_offset; + u32 stat_offset; +}; + +static unsigned int vtm_get_best_value(unsigned int s0, unsigned int s1, + unsigned int s2) +{ + int d01 = abs(s0 - s1); + int d02 = abs(s0 - s2); + int d12 = abs(s1 - s2); + + if (d01 <= d02 && d01 <= d12) + return (s0 + s1) / 2; + + if (d02 <= d01 && d02 <= d12) + return (s0 + s2) / 2; + + return (s1 + s2) / 2; +} + +static int k3_bgp_read_temp(struct k3_thermal_data *devdata, + int *temp) +{ + struct k3_bandgap *bgp; + unsigned int dtemp, s0, s1, s2; + + bgp = devdata->bgp; + + /* + * Errata is applicable for am654 pg 1.0 silicon. There + * is a variation of the order for 8-10 degree centigrade. + * Work around that by getting the average of two closest + * readings out of three readings everytime we want to + * report temperatures. + * + * Errata workaround. + */ + s0 = readl(bgp->base + devdata->stat_offset) & + K3_VTM_TS_STAT_DTEMP_MASK; + s1 = readl(bgp->base + devdata->stat_offset) & + K3_VTM_TS_STAT_DTEMP_MASK; + s2 = readl(bgp->base + devdata->stat_offset) & + K3_VTM_TS_STAT_DTEMP_MASK; + dtemp = vtm_get_best_value(s0, s1, s2); + + if (dtemp < K3_VTM_ADC_BEGIN_VAL || dtemp > K3_VTM_ADC_END_VAL) + return -EINVAL; + + *temp = k3_adc_to_temp[dtemp - K3_VTM_ADC_BEGIN_VAL]; + + return 0; +} + +static int k3_thermal_get_temp(void *devdata, int *temp) +{ + struct k3_thermal_data *data = devdata; + int ret = 0; + + ret = k3_bgp_read_temp(data, temp); + if (ret) + return ret; + + return ret; +} + +static const struct thermal_zone_of_device_ops k3_of_thermal_ops = { + .get_temp = k3_thermal_get_temp, +}; + +static const struct of_device_id of_k3_bandgap_match[]; + +static int k3_bandgap_probe(struct platform_device *pdev) +{ + int ret = 0, cnt, val, id; + struct resource *res; + struct device *dev = &pdev->dev; + struct k3_bandgap *bgp; + struct k3_thermal_data *data; + + if (ARRAY_SIZE(k3_adc_to_temp) != (K3_VTM_ADC_END_VAL + 1 - + K3_VTM_ADC_BEGIN_VAL)) + return -EINVAL; + + bgp = devm_kzalloc(&pdev->dev, sizeof(*bgp), GFP_KERNEL); + if (!bgp) + return -ENOMEM; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + bgp->base = devm_ioremap_resource(dev, res); + if (IS_ERR(bgp->base)) + return PTR_ERR(bgp->base); + + pm_runtime_enable(dev); + ret = pm_runtime_get_sync(dev); + if (ret < 0) { + pm_runtime_put_noidle(dev); + pm_runtime_disable(dev); + return ret; + } + + /* Get the sensor count in the VTM */ + val = readl(bgp->base + K3_VTM_DEVINFO_PWR0_OFFSET); + cnt = val & K3_VTM_DEVINFO_PWR0_TEMPSENS_CT_MASK; + cnt >>= __ffs(K3_VTM_DEVINFO_PWR0_TEMPSENS_CT_MASK); + + data = devm_kcalloc(dev, cnt, sizeof(*data), GFP_KERNEL); + if (!data) { + ret = -ENOMEM; + goto err_alloc; + } + + /* Register the thermal sensors */ + for (id = 0; id < cnt; id++) { + data[id].sensor_id = id; + data[id].bgp = bgp; + data[id].ctrl_offset = K3_VTM_TMPSENS0_CTRL_OFFSET + + id * K3_VTM_REGS_PER_TS; + data[id].stat_offset = data[id].ctrl_offset + 0x8; + + val = readl(data[id].bgp->base + data[id].ctrl_offset); + val |= (K3_VTM_TMPSENS_CTRL_SOC | + K3_VTM_TMPSENS_CTRL_CLRZ | + K3_VTM_TMPSENS_CTRL_CLKON_REQ); + val &= ~K3_VTM_TMPSENS_CTRL_CBIASSEL; + writel(val, data[id].bgp->base + data[id].ctrl_offset); + + data[id].tzd = + devm_thermal_zone_of_sensor_register(dev, id, + &data[id], + &k3_of_thermal_ops); + if (IS_ERR(data[id].tzd)) { + dev_err(dev, "thermal zone device is NULL\n"); + ret = PTR_ERR(data[id].tzd); + goto err_alloc; + } + } + + platform_set_drvdata(pdev, bgp); + + return 0; + +err_alloc: + pm_runtime_put_sync(dev); + pm_runtime_disable(dev); + + return ret; +} + +static int k3_bandgap_remove(struct platform_device *pdev) +{ + pm_runtime_put_sync(&pdev->dev); + pm_runtime_disable(&pdev->dev); + + return 0; +} + +static const struct of_device_id of_k3_bandgap_match[] = { + { + .compatible = "ti,am654-vtm", + }, + { /* sentinel */ }, +}; +MODULE_DEVICE_TABLE(of, of_k3_bandgap_match); + +static struct platform_driver k3_bandgap_sensor_driver = { + .probe = k3_bandgap_probe, + .remove = k3_bandgap_remove, + .driver = { + .name = "k3-soc-thermal", + .of_match_table = of_k3_bandgap_match, + }, +}; + +module_platform_driver(k3_bandgap_sensor_driver); + +MODULE_DESCRIPTION("K3 bandgap temperature sensor driver"); +MODULE_LICENSE("GPL v2"); +MODULE_AUTHOR("J Keerthy "); -- GitLab From 778fb6b729080f5ec91dfdb392845304e0e57b3c Mon Sep 17 00:00:00 2001 From: Martin Blumenstingl Date: Tue, 31 Mar 2020 01:45:34 +0200 Subject: [PATCH 0085/1971] clk: meson8b: export the HDMI system clock Export the HDMI system clock (used by the HDMI transmitter) so it can be used in the dt-bindings. Signed-off-by: Martin Blumenstingl Signed-off-by: Jerome Brunet Acked-by: Rob Herring Link: https://lore.kernel.org/r/20200330234535.3327513-2-martin.blumenstingl@googlemail.com --- drivers/clk/meson/meson8b.h | 1 - include/dt-bindings/clock/meson8b-clkc.h | 1 + 2 files changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/clk/meson/meson8b.h b/drivers/clk/meson/meson8b.h index c889fbeec30f0..94ce3ef0c1d5b 100644 --- a/drivers/clk/meson/meson8b.h +++ b/drivers/clk/meson/meson8b.h @@ -146,7 +146,6 @@ #define CLKID_CTS_VDAC0 171 #define CLKID_HDMI_SYS_SEL 172 #define CLKID_HDMI_SYS_DIV 173 -#define CLKID_HDMI_SYS 174 #define CLKID_MALI_0_SEL 175 #define CLKID_MALI_0_DIV 176 #define CLKID_MALI_0 177 diff --git a/include/dt-bindings/clock/meson8b-clkc.h b/include/dt-bindings/clock/meson8b-clkc.h index 68862aaf977ed..4c5965ae1df4e 100644 --- a/include/dt-bindings/clock/meson8b-clkc.h +++ b/include/dt-bindings/clock/meson8b-clkc.h @@ -107,6 +107,7 @@ #define CLKID_PERIPH 126 #define CLKID_AXI 128 #define CLKID_L2_DRAM 130 +#define CLKID_HDMI_SYS 174 #define CLKID_VPU 190 #define CLKID_VDEC_1 196 #define CLKID_VDEC_HCODEC 199 -- GitLab From f0e0884acf8a5cbd8862834bbd5b7a1db284966b Mon Sep 17 00:00:00 2001 From: Martin Blumenstingl Date: Tue, 31 Mar 2020 01:45:35 +0200 Subject: [PATCH 0086/1971] clk: meson: meson8b: make the hdmi_sys clock tree mutable The HDMI TX controller requires the hdmi_sys clock to be enabled. Allow changing the whole clock tree now that we know that one of our drivers requires this. Signed-off-by: Martin Blumenstingl Signed-off-by: Jerome Brunet Link: https://lore.kernel.org/r/20200330234535.3327513-3-martin.blumenstingl@googlemail.com --- drivers/clk/meson/meson8b.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/clk/meson/meson8b.c b/drivers/clk/meson/meson8b.c index 34a70c4b48991..7c55c695cbaed 100644 --- a/drivers/clk/meson/meson8b.c +++ b/drivers/clk/meson/meson8b.c @@ -1725,7 +1725,7 @@ static struct clk_regmap meson8b_hdmi_sys_sel = { }, .hw.init = &(struct clk_init_data){ .name = "hdmi_sys_sel", - .ops = &clk_regmap_mux_ro_ops, + .ops = &clk_regmap_mux_ops, /* FIXME: all other parents are unknown */ .parent_data = &(const struct clk_parent_data) { .fw_name = "xtal", @@ -1745,7 +1745,7 @@ static struct clk_regmap meson8b_hdmi_sys_div = { }, .hw.init = &(struct clk_init_data){ .name = "hdmi_sys_div", - .ops = &clk_regmap_divider_ro_ops, + .ops = &clk_regmap_divider_ops, .parent_hws = (const struct clk_hw *[]) { &meson8b_hdmi_sys_sel.hw }, @@ -1761,7 +1761,7 @@ static struct clk_regmap meson8b_hdmi_sys = { }, .hw.init = &(struct clk_init_data) { .name = "hdmi_sys", - .ops = &clk_regmap_gate_ro_ops, + .ops = &clk_regmap_gate_ops, .parent_hws = (const struct clk_hw *[]) { &meson8b_hdmi_sys_div.hw }, -- GitLab From ada018b15ccecbdb95df46db7121516edb906bf6 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Fri, 14 Feb 2020 18:32:43 +0100 Subject: [PATCH 0087/1971] x86/mce/amd: Do proper cleanup on error paths Drop kobject reference counts properly on error in the banks and blocks allocation functions. [ bp: Write commit message. ] Signed-off-by: Thomas Gleixner Signed-off-by: Borislav Petkov Link: https://lkml.kernel.org/r/20200403161943.1458-2-bp@alien8.de --- arch/x86/kernel/cpu/mce/amd.c | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/arch/x86/kernel/cpu/mce/amd.c b/arch/x86/kernel/cpu/mce/amd.c index 52de616a80655..477cf773cf1cb 100644 --- a/arch/x86/kernel/cpu/mce/amd.c +++ b/arch/x86/kernel/cpu/mce/amd.c @@ -1267,13 +1267,12 @@ recurse: if (b) kobject_uevent(&b->kobj, KOBJ_ADD); - return err; + return 0; out_free: if (b) { - kobject_put(&b->kobj); list_del(&b->miscj); - kfree(b); + kobject_put(&b->kobj); } return err; } @@ -1339,6 +1338,7 @@ static int threshold_create_bank(unsigned int cpu, unsigned int bank) goto out; } + /* Associate the bank with the per-CPU MCE device */ b->kobj = kobject_create_and_add(name, &dev->kobj); if (!b->kobj) { err = -EINVAL; @@ -1357,16 +1357,17 @@ static int threshold_create_bank(unsigned int cpu, unsigned int bank) err = allocate_threshold_blocks(cpu, b, bank, 0, msr_ops.misc(bank)); if (err) - goto out_free; + goto out_kobj; per_cpu(threshold_banks, cpu)[bank] = b; return 0; - out_free: +out_kobj: + kobject_put(b->kobj); +out_free: kfree(b); - - out: +out: return err; } -- GitLab From 260dab4478f60d02a77692c803ea1ff33261843e Mon Sep 17 00:00:00 2001 From: Peng Fan Date: Mon, 16 Mar 2020 16:32:33 +0800 Subject: [PATCH 0088/1971] clk: imx7ulp: make it easy to change ARM core clk ARM clk could only source from divcore or hsrun_divcore. Follow what we already used on i.MX7D and i.MX8M SoCs, use imx_clk_hw_cpu API. When ARM core is running normaly, whether divcore or hwrun_divcore will finally source from SPLL_PFD0. However SPLL_PFD0 is marked with CLK_SET_GATE, so we need to disable SPLL_PFD0, when configure the rate. So add CORE and HSRUN_CORE virtual clk to make it easy to configure the clk using imx_clk_hw_cpu API. Since CORE and HSRUN_CORE already marked with CLK_IS_CRITICAL, no need to set ARM as CLK_IS_CRITICAL. And when set the rate of ARM clk, prograting it the parent with CLK_SET_RATE_PARENT will finally set the SPLL_PFD0 clk. Signed-off-by: Peng Fan Reviewed-by: Abel Vesa Signed-off-by: Shawn Guo --- drivers/clk/imx/clk-imx7ulp.c | 6 ++++-- include/dt-bindings/clock/imx7ulp-clock.h | 5 ++++- 2 files changed, 8 insertions(+), 3 deletions(-) diff --git a/drivers/clk/imx/clk-imx7ulp.c b/drivers/clk/imx/clk-imx7ulp.c index 3710aa0dee9bf..634c0b6636b0e 100644 --- a/drivers/clk/imx/clk-imx7ulp.c +++ b/drivers/clk/imx/clk-imx7ulp.c @@ -29,7 +29,7 @@ static const char * const ddr_sels[] = { "apll_pfd_sel", "dummy", "dummy", "dum static const char * const nic_sels[] = { "firc", "ddr_clk", }; static const char * const periph_plat_sels[] = { "dummy", "nic1_bus_clk", "nic1_clk", "ddr_clk", "apll_pfd2", "apll_pfd1", "apll_pfd0", "upll", }; static const char * const periph_bus_sels[] = { "dummy", "sosc_bus_clk", "dummy", "firc_bus_clk", "rosc", "nic1_bus_clk", "nic1_clk", "spll_bus_clk", }; -static const char * const arm_sels[] = { "divcore", "dummy", "dummy", "hsrun_divcore", }; +static const char * const arm_sels[] = { "core", "dummy", "dummy", "hsrun_core", }; /* used by sosc/sirc/firc/ddr/spll/apll dividers */ static const struct clk_div_table ulp_div_table[] = { @@ -121,7 +121,9 @@ static void __init imx7ulp_clk_scg1_init(struct device_node *np) hws[IMX7ULP_CLK_DDR_SEL] = imx_clk_hw_mux_flags("ddr_sel", base + 0x30, 24, 2, ddr_sels, ARRAY_SIZE(ddr_sels), CLK_SET_RATE_PARENT | CLK_OPS_PARENT_ENABLE); hws[IMX7ULP_CLK_CORE_DIV] = imx_clk_hw_divider_flags("divcore", "scs_sel", base + 0x14, 16, 4, CLK_SET_RATE_PARENT); + hws[IMX7ULP_CLK_CORE] = imx_clk_hw_cpu("core", "divcore", hws[IMX7ULP_CLK_CORE_DIV]->clk, hws[IMX7ULP_CLK_SYS_SEL]->clk, hws[IMX7ULP_CLK_SPLL_SEL]->clk, hws[IMX7ULP_CLK_FIRC]->clk); hws[IMX7ULP_CLK_HSRUN_CORE_DIV] = imx_clk_hw_divider_flags("hsrun_divcore", "hsrun_scs_sel", base + 0x1c, 16, 4, CLK_SET_RATE_PARENT); + hws[IMX7ULP_CLK_HSRUN_CORE] = imx_clk_hw_cpu("hsrun_core", "hsrun_divcore", hws[IMX7ULP_CLK_HSRUN_CORE_DIV]->clk, hws[IMX7ULP_CLK_HSRUN_SYS_SEL]->clk, hws[IMX7ULP_CLK_SPLL_SEL]->clk, hws[IMX7ULP_CLK_FIRC]->clk); hws[IMX7ULP_CLK_DDR_DIV] = imx_clk_hw_divider_gate("ddr_clk", "ddr_sel", CLK_SET_RATE_PARENT | CLK_IS_CRITICAL, base + 0x30, 0, 3, 0, ulp_div_table, &imx_ccm_lock); @@ -270,7 +272,7 @@ static void __init imx7ulp_clk_smc1_init(struct device_node *np) base = of_iomap(np, 0); WARN_ON(!base); - hws[IMX7ULP_CLK_ARM] = imx_clk_hw_mux_flags("arm", base + 0x10, 8, 2, arm_sels, ARRAY_SIZE(arm_sels), CLK_IS_CRITICAL); + hws[IMX7ULP_CLK_ARM] = imx_clk_hw_mux_flags("arm", base + 0x10, 8, 2, arm_sels, ARRAY_SIZE(arm_sels), CLK_SET_RATE_PARENT); imx_check_clk_hws(hws, clk_data->num); diff --git a/include/dt-bindings/clock/imx7ulp-clock.h b/include/dt-bindings/clock/imx7ulp-clock.h index 38145bdcd9752..b58370d146e2e 100644 --- a/include/dt-bindings/clock/imx7ulp-clock.h +++ b/include/dt-bindings/clock/imx7ulp-clock.h @@ -58,7 +58,10 @@ #define IMX7ULP_CLK_HSRUN_SYS_SEL 44 #define IMX7ULP_CLK_HSRUN_CORE_DIV 45 -#define IMX7ULP_CLK_SCG1_END 46 +#define IMX7ULP_CLK_CORE 46 +#define IMX7ULP_CLK_HSRUN_CORE 47 + +#define IMX7ULP_CLK_SCG1_END 48 /* PCC2 */ #define IMX7ULP_CLK_DMA1 0 -- GitLab From b1fe0d0937dd7a7dd6e1bbd7212e9c4b5d53a026 Mon Sep 17 00:00:00 2001 From: Peng Fan Date: Fri, 13 Mar 2020 11:27:15 +0800 Subject: [PATCH 0089/1971] clk: imx: drop the dependency on ARM64 for i.MX8M Moving to support aarch32 mode on aarch64 hardware, need to drop the dependency on ARM64 to make the driver could be selected for ARM32. Signed-off-by: Peng Fan Acked-by: Arnd Bergmann Acked-by: Stephen Boyd Signed-off-by: Shawn Guo --- drivers/clk/imx/Kconfig | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/clk/imx/Kconfig b/drivers/clk/imx/Kconfig index 01eadee88d661..db0253fa3d648 100644 --- a/drivers/clk/imx/Kconfig +++ b/drivers/clk/imx/Kconfig @@ -10,25 +10,25 @@ config MXC_CLK_SCU config CLK_IMX8MM bool "IMX8MM CCM Clock Driver" - depends on ARCH_MXC && ARM64 + depends on ARCH_MXC help Build the driver for i.MX8MM CCM Clock Driver config CLK_IMX8MN bool "IMX8MN CCM Clock Driver" - depends on ARCH_MXC && ARM64 + depends on ARCH_MXC help Build the driver for i.MX8MN CCM Clock Driver config CLK_IMX8MP bool "IMX8MP CCM Clock Driver" - depends on ARCH_MXC && ARM64 + depends on ARCH_MXC help Build the driver for i.MX8MP CCM Clock Driver config CLK_IMX8MQ bool "IMX8MQ CCM Clock Driver" - depends on ARCH_MXC && ARM64 + depends on ARCH_MXC help Build the driver for i.MX8MQ CCM Clock Driver -- GitLab From c9bf318f77b3a78483e656e609d005c52aadc86d Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Wed, 12 Feb 2020 00:34:01 +0100 Subject: [PATCH 0090/1971] x86/mce/amd: Init thresholding machinery only on relevant vendors ... and not unconditionally. [ bp: Add a new vendor_flags bit for that. ] Signed-off-by: Thomas Gleixner Signed-off-by: Borislav Petkov Link: https://lkml.kernel.org/r/20200403161943.1458-3-bp@alien8.de --- arch/x86/kernel/cpu/mce/amd.c | 12 ++++++++++-- arch/x86/kernel/cpu/mce/core.c | 1 + arch/x86/kernel/cpu/mce/internal.h | 9 ++++++--- 3 files changed, 17 insertions(+), 5 deletions(-) diff --git a/arch/x86/kernel/cpu/mce/amd.c b/arch/x86/kernel/cpu/mce/amd.c index 477cf773cf1cb..c3b3326ad4ac0 100644 --- a/arch/x86/kernel/cpu/mce/amd.c +++ b/arch/x86/kernel/cpu/mce/amd.c @@ -1442,15 +1442,20 @@ free_out: int mce_threshold_remove_device(unsigned int cpu) { + struct threshold_bank **bp = this_cpu_read(threshold_banks); unsigned int bank; + if (!bp) + return 0; + for (bank = 0; bank < per_cpu(mce_num_banks, cpu); ++bank) { if (!(per_cpu(bank_map, cpu) & (1 << bank))) continue; threshold_remove_bank(cpu, bank); } - kfree(per_cpu(threshold_banks, cpu)); - per_cpu(threshold_banks, cpu) = NULL; + /* Clear the pointer before freeing the memory */ + this_cpu_write(threshold_banks, NULL); + kfree(bp); return 0; } @@ -1461,6 +1466,9 @@ int mce_threshold_create_device(unsigned int cpu) struct threshold_bank **bp; int err = 0; + if (!mce_flags.amd_threshold) + return 0; + bp = per_cpu(threshold_banks, cpu); if (bp) return 0; diff --git a/arch/x86/kernel/cpu/mce/core.c b/arch/x86/kernel/cpu/mce/core.c index 54165f3569e88..43ca91e14a77c 100644 --- a/arch/x86/kernel/cpu/mce/core.c +++ b/arch/x86/kernel/cpu/mce/core.c @@ -1756,6 +1756,7 @@ static void __mcheck_cpu_init_early(struct cpuinfo_x86 *c) mce_flags.overflow_recov = !!cpu_has(c, X86_FEATURE_OVERFLOW_RECOV); mce_flags.succor = !!cpu_has(c, X86_FEATURE_SUCCOR); mce_flags.smca = !!cpu_has(c, X86_FEATURE_SMCA); + mce_flags.amd_threshold = 1; if (mce_flags.smca) { msr_ops.ctl = smca_ctl_reg; diff --git a/arch/x86/kernel/cpu/mce/internal.h b/arch/x86/kernel/cpu/mce/internal.h index 3b008172ad73d..74a01829c4f4a 100644 --- a/arch/x86/kernel/cpu/mce/internal.h +++ b/arch/x86/kernel/cpu/mce/internal.h @@ -148,7 +148,7 @@ struct mce_vendor_flags { * Recovery. It indicates support for data poisoning in HW and deferred * error interrupts. */ - succor : 1, + succor : 1, /* * (AMD) SMCA: This bit indicates support for Scalable MCA which expands @@ -156,9 +156,12 @@ struct mce_vendor_flags { * banks. Also, to accommodate the new banks and registers, the MCA * register space is moved to a new MSR range. */ - smca : 1, + smca : 1, - __reserved_0 : 61; + /* AMD-style error thresholding banks present. */ + amd_threshold : 1, + + __reserved_0 : 60; }; extern struct mce_vendor_flags mce_flags; -- GitLab From cca9cc05fe98f3eb0cfb58ec6739cfc9d0b4ccbf Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Thu, 12 Mar 2020 20:05:43 +0100 Subject: [PATCH 0091/1971] x86/mce/amd: Protect a not-fully initialized bank from the thresholding interrupt Make sure the thresholding bank descriptor is fully initialized when the thresholding interrupt fires after a hotplug event. [ bp: Write commit message and document long-forgotten bank_map. ] Signed-off-by: Thomas Gleixner Signed-off-by: Borislav Petkov Link: https://lkml.kernel.org/r/20200403161943.1458-4-bp@alien8.de --- arch/x86/kernel/cpu/mce/amd.c | 19 +++++++++++++++++-- 1 file changed, 17 insertions(+), 2 deletions(-) diff --git a/arch/x86/kernel/cpu/mce/amd.c b/arch/x86/kernel/cpu/mce/amd.c index c3b3326ad4ac0..5639421577586 100644 --- a/arch/x86/kernel/cpu/mce/amd.c +++ b/arch/x86/kernel/cpu/mce/amd.c @@ -192,7 +192,12 @@ EXPORT_SYMBOL_GPL(smca_banks); static char buf_mcatype[MAX_MCATYPE_NAME_LEN]; static DEFINE_PER_CPU(struct threshold_bank **, threshold_banks); -static DEFINE_PER_CPU(unsigned int, bank_map); /* see which banks are on */ + +/* + * A list of the banks enabled on each logical CPU. Controls which respective + * descriptors to initialize later in mce_threshold_create_device(). + */ +static DEFINE_PER_CPU(unsigned int, bank_map); /* Map of banks that have more than MCA_MISC0 available. */ static DEFINE_PER_CPU(u32, smca_misc_banks_map); @@ -1016,13 +1021,22 @@ static void log_and_reset_block(struct threshold_block *block) static void amd_threshold_interrupt(void) { struct threshold_block *first_block = NULL, *block = NULL, *tmp = NULL; + struct threshold_bank **bp = this_cpu_read(threshold_banks); unsigned int bank, cpu = smp_processor_id(); + /* + * Validate that the threshold bank has been initialized already. The + * handler is installed at boot time, but on a hotplug event the + * interrupt might fire before the data has been initialized. + */ + if (!bp) + return; + for (bank = 0; bank < this_cpu_read(mce_num_banks); ++bank) { if (!(per_cpu(bank_map, cpu) & (1 << bank))) continue; - first_block = per_cpu(threshold_banks, cpu)[bank]->blocks; + first_block = bp[bank]->blocks; if (!first_block) continue; @@ -1247,6 +1261,7 @@ static int allocate_threshold_blocks(unsigned int cpu, struct threshold_bank *tb INIT_LIST_HEAD(&b->miscj); + /* This is safe as @tb is not visible yet */ if (tb->blocks) list_add(&b->miscj, &tb->blocks->miscj); else -- GitLab From 6e7a41c63abcfee28734c4c8872dae8d642329b6 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Mon, 30 Mar 2020 16:21:54 +0200 Subject: [PATCH 0092/1971] x86/mce/amd: Sanitize thresholding device creation hotplug path Drop the stupid threshold_init_device() initcall iterating over all online CPUs in favor of properly setting up everything on the CPU hotplug path, when each CPU's callback is invoked. [ bp: Write commit message. ] Signed-off-by: Thomas Gleixner Signed-off-by: Borislav Petkov Link: https://lkml.kernel.org/r/20200403161943.1458-5-bp@alien8.de --- arch/x86/kernel/cpu/mce/amd.c | 57 ++++++++++------------------------ arch/x86/kernel/cpu/mce/core.c | 11 +++++++ 2 files changed, 27 insertions(+), 41 deletions(-) diff --git a/arch/x86/kernel/cpu/mce/amd.c b/arch/x86/kernel/cpu/mce/amd.c index 5639421577586..d3c416b6052a1 100644 --- a/arch/x86/kernel/cpu/mce/amd.c +++ b/arch/x86/kernel/cpu/mce/amd.c @@ -1474,12 +1474,22 @@ int mce_threshold_remove_device(unsigned int cpu) return 0; } -/* create dir/files for all valid threshold banks */ +/** + * mce_threshold_create_device - Create the per-CPU MCE threshold device + * @cpu: The plugged in CPU + * + * Create directories and files for all valid threshold banks. + * + * This is invoked from the CPU hotplug callback which was installed in + * mcheck_init_device(). The invocation happens in context of the hotplug + * thread running on @cpu. The callback is invoked on all CPUs which are + * online when the callback is installed or during a real hotplug event. + */ int mce_threshold_create_device(unsigned int cpu) { unsigned int bank; struct threshold_bank **bp; - int err = 0; + int err; if (!mce_flags.amd_threshold) return 0; @@ -1500,49 +1510,14 @@ int mce_threshold_create_device(unsigned int cpu) continue; err = threshold_create_bank(cpu, bank); if (err) - goto err; - } - return err; -err: - mce_threshold_remove_device(cpu); - return err; -} - -static __init int threshold_init_device(void) -{ - unsigned lcpu = 0; - - /* to hit CPUs online before the notifier is up */ - for_each_online_cpu(lcpu) { - int err = mce_threshold_create_device(lcpu); - - if (err) - return err; + goto out_err; } if (thresholding_irq_en) mce_threshold_vector = amd_threshold_interrupt; return 0; +out_err: + mce_threshold_remove_device(cpu); + return err; } -/* - * there are 3 funcs which need to be _initcalled in a logic sequence: - * 1. xen_late_init_mcelog - * 2. mcheck_init_device - * 3. threshold_init_device - * - * xen_late_init_mcelog must register xen_mce_chrdev_device before - * native mce_chrdev_device registration if running under xen platform; - * - * mcheck_init_device should be inited before threshold_init_device to - * initialize mce_device, otherwise a NULL ptr dereference will cause panic. - * - * so we use following _initcalls - * 1. device_initcall(xen_late_init_mcelog); - * 2. device_initcall_sync(mcheck_init_device); - * 3. late_initcall(threshold_init_device); - * - * when running under xen, the initcall order is 1,2,3; - * on baremetal, we skip 1 and we do only 2 and 3. - */ -late_initcall(threshold_init_device); diff --git a/arch/x86/kernel/cpu/mce/core.c b/arch/x86/kernel/cpu/mce/core.c index 43ca91e14a77c..a6009efdfe2b3 100644 --- a/arch/x86/kernel/cpu/mce/core.c +++ b/arch/x86/kernel/cpu/mce/core.c @@ -2481,6 +2481,13 @@ static __init void mce_init_banks(void) } } +/* + * When running on XEN, this initcall is ordered against the XEN mcelog + * initcall: + * + * device_initcall(xen_late_init_mcelog); + * device_initcall_sync(mcheck_init_device); + */ static __init int mcheck_init_device(void) { int err; @@ -2512,6 +2519,10 @@ static __init int mcheck_init_device(void) if (err) goto err_out_mem; + /* + * Invokes mce_cpu_online() on all CPUs which are online when + * the state is installed. + */ err = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "x86/mce:online", mce_cpu_online, mce_cpu_pre_down); if (err < 0) -- GitLab From 6458de97fc15530b54477c4e2b70af653e8ac3d9 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Mon, 30 Mar 2020 20:30:45 +0200 Subject: [PATCH 0093/1971] x86/mce/amd: Straighten CPU hotplug path mce_threshold_create_device() hotplug callback runs on the plugged in CPU so: - use this_cpu_read() which is faster - pass in struct threshold_bank **bp to threshold_create_bank() and instead of doing per-CPU accesses - Use rdmsr_safe() instead of rdmsr_safe_on_cpu() which avoids an IPI. No functional changes. Signed-off-by: Thomas Gleixner Signed-off-by: Borislav Petkov Link: https://lkml.kernel.org/r/20200403161943.1458-6-bp@alien8.de --- arch/x86/kernel/cpu/mce/amd.c | 32 +++++++++++++++----------------- 1 file changed, 15 insertions(+), 17 deletions(-) diff --git a/arch/x86/kernel/cpu/mce/amd.c b/arch/x86/kernel/cpu/mce/amd.c index d3c416b6052a1..a33d9a1caf369 100644 --- a/arch/x86/kernel/cpu/mce/amd.c +++ b/arch/x86/kernel/cpu/mce/amd.c @@ -1223,10 +1223,10 @@ static int allocate_threshold_blocks(unsigned int cpu, struct threshold_bank *tb u32 low, high; int err; - if ((bank >= per_cpu(mce_num_banks, cpu)) || (block >= NR_BLOCKS)) + if ((bank >= this_cpu_read(mce_num_banks)) || (block >= NR_BLOCKS)) return 0; - if (rdmsr_safe_on_cpu(cpu, address, &low, &high)) + if (rdmsr_safe(address, &low, &high)) return 0; if (!(high & MASK_VALID_HI)) { @@ -1316,9 +1316,10 @@ static int __threshold_add_blocks(struct threshold_bank *b) return err; } -static int threshold_create_bank(unsigned int cpu, unsigned int bank) +static int threshold_create_bank(struct threshold_bank **bp, unsigned int cpu, + unsigned int bank) { - struct device *dev = per_cpu(mce_device, cpu); + struct device *dev = this_cpu_read(mce_device); struct amd_northbridge *nb = NULL; struct threshold_bank *b = NULL; const char *name = get_name(bank, NULL); @@ -1338,7 +1339,7 @@ static int threshold_create_bank(unsigned int cpu, unsigned int bank) if (err) goto out; - per_cpu(threshold_banks, cpu)[bank] = b; + bp[bank] = b; refcount_inc(&b->cpus); err = __threshold_add_blocks(b); @@ -1374,8 +1375,7 @@ static int threshold_create_bank(unsigned int cpu, unsigned int bank) if (err) goto out_kobj; - per_cpu(threshold_banks, cpu)[bank] = b; - + bp[bank] = b; return 0; out_kobj: @@ -1487,35 +1487,33 @@ int mce_threshold_remove_device(unsigned int cpu) */ int mce_threshold_create_device(unsigned int cpu) { - unsigned int bank; + unsigned int numbanks, bank; struct threshold_bank **bp; int err; if (!mce_flags.amd_threshold) return 0; - bp = per_cpu(threshold_banks, cpu); + bp = this_cpu_read(threshold_banks); if (bp) return 0; - bp = kcalloc(per_cpu(mce_num_banks, cpu), sizeof(struct threshold_bank *), - GFP_KERNEL); + numbanks = this_cpu_read(mce_num_banks); + bp = kcalloc(numbanks, sizeof(*bp), GFP_KERNEL); if (!bp) return -ENOMEM; - per_cpu(threshold_banks, cpu) = bp; - - for (bank = 0; bank < per_cpu(mce_num_banks, cpu); ++bank) { - if (!(per_cpu(bank_map, cpu) & (1 << bank))) + for (bank = 0; bank < numbanks; ++bank) { + if (!(this_cpu_read(bank_map) & (1 << bank))) continue; - err = threshold_create_bank(cpu, bank); + err = threshold_create_bank(bp, cpu, bank); if (err) goto out_err; } + this_cpu_write(threshold_banks, bp); if (thresholding_irq_en) mce_threshold_vector = amd_threshold_interrupt; - return 0; out_err: mce_threshold_remove_device(cpu); -- GitLab From f26d2580a7ddc84aa9e51e47fdbb5ad63dbee5a7 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Tue, 31 Mar 2020 10:53:18 +0200 Subject: [PATCH 0094/1971] x86/mce/amd: Cleanup threshold device remove path Pass in the bank pointer directly to the cleaning up functions, obviating the need for per-CPU accesses. Make the clean up path interrupt-safe by cleaning the bank pointer first so that the rest of the teardown happens safe from the thresholding interrupt. No functional changes. [ bp: Write commit message and reverse bank->shared test to save an indentation level in threshold_remove_bank(). ] Signed-off-by: Thomas Gleixner Signed-off-by: Borislav Petkov Link: https://lkml.kernel.org/r/20200403161943.1458-7-bp@alien8.de --- arch/x86/include/asm/amd_nb.h | 1 + arch/x86/kernel/cpu/mce/amd.c | 79 ++++++++++++++++------------------- 2 files changed, 38 insertions(+), 42 deletions(-) diff --git a/arch/x86/include/asm/amd_nb.h b/arch/x86/include/asm/amd_nb.h index c7df20e78b095..455066a06f607 100644 --- a/arch/x86/include/asm/amd_nb.h +++ b/arch/x86/include/asm/amd_nb.h @@ -57,6 +57,7 @@ struct threshold_bank { /* initialized to the number of CPUs on the node sharing this bank */ refcount_t cpus; + unsigned int shared; }; struct amd_northbridge { diff --git a/arch/x86/kernel/cpu/mce/amd.c b/arch/x86/kernel/cpu/mce/amd.c index a33d9a1caf369..16e7aea86ab11 100644 --- a/arch/x86/kernel/cpu/mce/amd.c +++ b/arch/x86/kernel/cpu/mce/amd.c @@ -1362,6 +1362,7 @@ static int threshold_create_bank(struct threshold_bank **bp, unsigned int cpu, } if (is_shared_bank(bank)) { + b->shared = 1; refcount_set(&b->cpus, 1); /* nb is already initialized, see above */ @@ -1391,21 +1392,16 @@ static void threshold_block_release(struct kobject *kobj) kfree(to_block(kobj)); } -static void deallocate_threshold_block(unsigned int cpu, unsigned int bank) +static void deallocate_threshold_blocks(struct threshold_bank *bank) { - struct threshold_block *pos = NULL; - struct threshold_block *tmp = NULL; - struct threshold_bank *head = per_cpu(threshold_banks, cpu)[bank]; - - if (!head) - return; + struct threshold_block *pos, *tmp; - list_for_each_entry_safe(pos, tmp, &head->blocks->miscj, miscj) { + list_for_each_entry_safe(pos, tmp, &bank->blocks->miscj, miscj) { list_del(&pos->miscj); kobject_put(&pos->kobj); } - kobject_put(&head->blocks->kobj); + kobject_put(&bank->blocks->kobj); } static void __threshold_remove_blocks(struct threshold_bank *b) @@ -1419,57 +1415,56 @@ static void __threshold_remove_blocks(struct threshold_bank *b) kobject_del(&pos->kobj); } -static void threshold_remove_bank(unsigned int cpu, int bank) +static void threshold_remove_bank(struct threshold_bank *bank) { struct amd_northbridge *nb; - struct threshold_bank *b; - b = per_cpu(threshold_banks, cpu)[bank]; - if (!b) - return; + if (!bank->blocks) + goto out_free; - if (!b->blocks) - goto free_out; + if (!bank->shared) + goto out_dealloc; - if (is_shared_bank(bank)) { - if (!refcount_dec_and_test(&b->cpus)) { - __threshold_remove_blocks(b); - per_cpu(threshold_banks, cpu)[bank] = NULL; - return; - } else { - /* - * the last CPU on this node using the shared bank is - * going away, remove that bank now. - */ - nb = node_to_amd_nb(amd_get_nb_id(cpu)); - nb->bank4 = NULL; - } + if (!refcount_dec_and_test(&bank->cpus)) { + __threshold_remove_blocks(bank); + return; + } else { + /* + * The last CPU on this node using the shared bank is going + * away, remove that bank now. + */ + nb = node_to_amd_nb(amd_get_nb_id(smp_processor_id())); + nb->bank4 = NULL; } - deallocate_threshold_block(cpu, bank); +out_dealloc: + deallocate_threshold_blocks(bank); -free_out: - kobject_del(b->kobj); - kobject_put(b->kobj); - kfree(b); - per_cpu(threshold_banks, cpu)[bank] = NULL; +out_free: + kobject_put(bank->kobj); + kfree(bank); } int mce_threshold_remove_device(unsigned int cpu) { struct threshold_bank **bp = this_cpu_read(threshold_banks); - unsigned int bank; + unsigned int bank, numbanks = this_cpu_read(mce_num_banks); if (!bp) return 0; - for (bank = 0; bank < per_cpu(mce_num_banks, cpu); ++bank) { - if (!(per_cpu(bank_map, cpu) & (1 << bank))) - continue; - threshold_remove_bank(cpu, bank); - } - /* Clear the pointer before freeing the memory */ + /* + * Clear the pointer before cleaning up, so that the interrupt won't + * touch anything of this. + */ this_cpu_write(threshold_banks, NULL); + + for (bank = 0; bank < numbanks; bank++) { + if (bp[bank]) { + threshold_remove_bank(bp[bank]); + bp[bank] = NULL; + } + } kfree(bp); return 0; } -- GitLab From a037f3ca0ea0a660e3f961431095a88674b8f3c4 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Tue, 31 Mar 2020 13:16:44 +0200 Subject: [PATCH 0095/1971] x86/mce/amd: Make threshold bank setting hotplug robust Handle the cases when the CPU goes offline before the bank setting/reading happens. [ bp: Write commit message. ] Signed-off-by: Thomas Gleixner Signed-off-by: Borislav Petkov Link: https://lkml.kernel.org/r/20200403161943.1458-8-bp@alien8.de --- arch/x86/kernel/cpu/mce/amd.c | 14 +++++++++++--- 1 file changed, 11 insertions(+), 3 deletions(-) diff --git a/arch/x86/kernel/cpu/mce/amd.c b/arch/x86/kernel/cpu/mce/amd.c index 16e7aea86ab11..15c87b87b901a 100644 --- a/arch/x86/kernel/cpu/mce/amd.c +++ b/arch/x86/kernel/cpu/mce/amd.c @@ -386,6 +386,10 @@ static void threshold_restart_bank(void *_tr) struct thresh_restart *tr = _tr; u32 hi, lo; + /* sysfs write might race against an offline operation */ + if (this_cpu_read(threshold_banks)) + return; + rdmsr(tr->b->address, lo, hi); if (tr->b->threshold_limit < (hi & THRESHOLD_MAX)) @@ -1085,7 +1089,8 @@ store_interrupt_enable(struct threshold_block *b, const char *buf, size_t size) memset(&tr, 0, sizeof(tr)); tr.b = b; - smp_call_function_single(b->cpu, threshold_restart_bank, &tr, 1); + if (smp_call_function_single(b->cpu, threshold_restart_bank, &tr, 1)) + return -ENODEV; return size; } @@ -1109,7 +1114,8 @@ store_threshold_limit(struct threshold_block *b, const char *buf, size_t size) b->threshold_limit = new; tr.b = b; - smp_call_function_single(b->cpu, threshold_restart_bank, &tr, 1); + if (smp_call_function_single(b->cpu, threshold_restart_bank, &tr, 1)) + return -ENODEV; return size; } @@ -1118,7 +1124,9 @@ static ssize_t show_error_count(struct threshold_block *b, char *buf) { u32 lo, hi; - rdmsr_on_cpu(b->cpu, b->address, &lo, &hi); + /* CPU might be offline by now */ + if (rdmsr_on_cpu(b->cpu, b->address, &lo, &hi)) + return -ENODEV; return sprintf(buf, "%u\n", ((hi & THRESHOLD_MAX) - (THRESHOLD_MAX - b->threshold_limit))); -- GitLab From 3e0fdec858d82c829774f271e88b5ceb17051551 Mon Sep 17 00:00:00 2001 From: Borislav Petkov Date: Tue, 7 Apr 2020 09:55:10 +0200 Subject: [PATCH 0096/1971] x86/mce/amd, edac: Remove report_gart_errors ... because no one should be interested in spurious MCEs anyway. Make the filtering unconditional and move it to amd_filter_mce(). Signed-off-by: Borislav Petkov Tested-by: Tony Luck Link: https://lkml.kernel.org/r/20200407163414.18058-2-bp@alien8.de --- arch/x86/include/asm/mce.h | 3 ++- arch/x86/kernel/cpu/mce/amd.c | 9 +++++++-- drivers/edac/amd64_edac.c | 8 -------- drivers/edac/mce_amd.c | 24 ------------------------ drivers/edac/mce_amd.h | 2 -- 5 files changed, 9 insertions(+), 37 deletions(-) diff --git a/arch/x86/include/asm/mce.h b/arch/x86/include/asm/mce.h index f9cea081c05b0..83b6ddafa032c 100644 --- a/arch/x86/include/asm/mce.h +++ b/arch/x86/include/asm/mce.h @@ -127,6 +127,8 @@ #define MSR_AMD64_SMCA_MCx_DEADDR(x) (MSR_AMD64_SMCA_MC0_DEADDR + 0x10*(x)) #define MSR_AMD64_SMCA_MCx_MISCy(x, y) ((MSR_AMD64_SMCA_MC0_MISC1 + y) + (0x10*(x))) +#define XEC(x, mask) (((x) >> 16) & mask) + /* * This structure contains all data related to the MCE log. Also * carries a signature to make it easier to find from external @@ -347,5 +349,4 @@ umc_normaddr_to_sysaddr(u64 norm_addr, u16 nid, u8 umc, u64 *sys_addr) { return #endif static inline void mce_hygon_feature_init(struct cpuinfo_x86 *c) { return mce_amd_feature_init(c); } - #endif /* _ASM_X86_MCE_H */ diff --git a/arch/x86/kernel/cpu/mce/amd.c b/arch/x86/kernel/cpu/mce/amd.c index 15c87b87b901a..ea3cf714b7ad4 100644 --- a/arch/x86/kernel/cpu/mce/amd.c +++ b/arch/x86/kernel/cpu/mce/amd.c @@ -577,14 +577,19 @@ bool amd_filter_mce(struct mce *m) { enum smca_bank_types bank_type = smca_get_bank_type(m->bank); struct cpuinfo_x86 *c = &boot_cpu_data; - u8 xec = (m->status >> 16) & 0x3F; /* See Family 17h Models 10h-2Fh Erratum #1114. */ if (c->x86 == 0x17 && c->x86_model >= 0x10 && c->x86_model <= 0x2F && - bank_type == SMCA_IF && xec == 10) + bank_type == SMCA_IF && XEC(m->status, 0x3f) == 10) return true; + /* NB GART TLB error reporting is disabled by default. */ + if (c->x86 < 0x17) { + if (m->bank == 4 && XEC(m->status, 0x1f) == 0x5) + return true; + } + return false; } diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c index f91f3bc1e0b28..6bdc5bb8c8bc9 100644 --- a/drivers/edac/amd64_edac.c +++ b/drivers/edac/amd64_edac.c @@ -4,9 +4,6 @@ static struct edac_pci_ctl_info *pci_ctl; -static int report_gart_errors; -module_param(report_gart_errors, int, 0644); - /* * Set by command line parameter. If BIOS has enabled the ECC, this override is * cleared to prevent re-enabling the hardware by this driver. @@ -3681,9 +3678,6 @@ static int __init amd64_edac_init(void) } /* register stuff with EDAC MCE */ - if (report_gart_errors) - amd_report_gart_errors(true); - if (boot_cpu_data.x86 >= 0x17) amd_register_ecc_decoder(decode_umc_error); else @@ -3718,8 +3712,6 @@ static void __exit amd64_edac_exit(void) edac_pci_release_generic_ctl(pci_ctl); /* unregister from EDAC MCE */ - amd_report_gart_errors(false); - if (boot_cpu_data.x86 >= 0x17) amd_unregister_ecc_decoder(decode_umc_error); else diff --git a/drivers/edac/mce_amd.c b/drivers/edac/mce_amd.c index 8874b7722b2f2..e58644d9c92b7 100644 --- a/drivers/edac/mce_amd.c +++ b/drivers/edac/mce_amd.c @@ -10,15 +10,8 @@ static struct amd_decoder_ops fam_ops; static u8 xec_mask = 0xf; -static bool report_gart_errors; static void (*decode_dram_ecc)(int node_id, struct mce *m); -void amd_report_gart_errors(bool v) -{ - report_gart_errors = v; -} -EXPORT_SYMBOL_GPL(amd_report_gart_errors); - void amd_register_ecc_decoder(void (*f)(int, struct mce *)) { decode_dram_ecc = f; @@ -1030,20 +1023,6 @@ static inline void amd_decode_err_code(u16 ec) pr_cont("\n"); } -/* - * Filter out unwanted MCE signatures here. - */ -static bool ignore_mce(struct mce *m) -{ - /* - * NB GART TLB error reporting is disabled by default. - */ - if (m->bank == 4 && XEC(m->status, 0x1f) == 0x5 && !report_gart_errors) - return true; - - return false; -} - static const char *decode_error_status(struct mce *m) { if (m->status & MCI_STATUS_UC) { @@ -1067,9 +1046,6 @@ amd_decode_mce(struct notifier_block *nb, unsigned long val, void *data) unsigned int fam = x86_family(m->cpuid); int ecc; - if (ignore_mce(m)) - return NOTIFY_STOP; - pr_emerg(HW_ERR "%s\n", decode_error_status(m)); pr_emerg(HW_ERR "CPU:%d (%x:%x:%x) MC%d_STATUS[%s|%s|%s|%s|%s", diff --git a/drivers/edac/mce_amd.h b/drivers/edac/mce_amd.h index 4e9c5e596c6c2..4811b18d9606b 100644 --- a/drivers/edac/mce_amd.h +++ b/drivers/edac/mce_amd.h @@ -7,7 +7,6 @@ #include #define EC(x) ((x) & 0xffff) -#define XEC(x, mask) (((x) >> 16) & mask) #define LOW_SYNDROME(x) (((x) >> 15) & 0xff) #define HIGH_SYNDROME(x) (((x) >> 24) & 0xff) @@ -77,7 +76,6 @@ struct amd_decoder_ops { bool (*mc2_mce)(u16, u8); }; -void amd_report_gart_errors(bool); void amd_register_ecc_decoder(void (*f)(int, struct mce *)); void amd_unregister_ecc_decoder(void (*f)(int, struct mce *)); -- GitLab From c9c6d216ed28be6e2c91e3651af169eca284813a Mon Sep 17 00:00:00 2001 From: Tony Luck Date: Fri, 14 Feb 2020 14:27:14 -0800 Subject: [PATCH 0097/1971] x86/mce: Rename "first" function as "early" It isn't going to be first on the notifier chain when the CEC is moved to be a normal user of the notifier chain. Fix the enum for the MCE_PRIO symbols to list them in reverse order so that the compiler can give them numbers from low to high priority. Add an entry for MCE_PRIO_CEC as the highest priority. [ bp: Use passive voice, add comments. ] Signed-off-by: Tony Luck Signed-off-by: Borislav Petkov Tested-by: Tony Luck Link: https://lkml.kernel.org/r/20200214222720.13168-2-tony.luck@intel.com --- arch/x86/include/asm/mce.h | 16 +++++++++------- arch/x86/kernel/cpu/mce/core.c | 10 +++++----- 2 files changed, 14 insertions(+), 12 deletions(-) diff --git a/arch/x86/include/asm/mce.h b/arch/x86/include/asm/mce.h index 83b6ddafa032c..689ac6e9c65f0 100644 --- a/arch/x86/include/asm/mce.h +++ b/arch/x86/include/asm/mce.h @@ -144,14 +144,16 @@ struct mce_log_buffer { struct mce entry[]; }; +/* Highest last */ enum mce_notifier_prios { - MCE_PRIO_FIRST = INT_MAX, - MCE_PRIO_UC = INT_MAX - 1, - MCE_PRIO_EXTLOG = INT_MAX - 2, - MCE_PRIO_NFIT = INT_MAX - 3, - MCE_PRIO_EDAC = INT_MAX - 4, - MCE_PRIO_MCELOG = 1, - MCE_PRIO_LOWEST = 0, + MCE_PRIO_LOWEST, + MCE_PRIO_MCELOG, + MCE_PRIO_EDAC, + MCE_PRIO_NFIT, + MCE_PRIO_EXTLOG, + MCE_PRIO_UC, + MCE_PRIO_EARLY, + MCE_PRIO_CEC }; struct notifier_block; diff --git a/arch/x86/kernel/cpu/mce/core.c b/arch/x86/kernel/cpu/mce/core.c index a6009efdfe2b3..43b1519ad4e5b 100644 --- a/arch/x86/kernel/cpu/mce/core.c +++ b/arch/x86/kernel/cpu/mce/core.c @@ -559,7 +559,7 @@ static bool cec_add_mce(struct mce *m) return false; } -static int mce_first_notifier(struct notifier_block *nb, unsigned long val, +static int mce_early_notifier(struct notifier_block *nb, unsigned long val, void *data) { struct mce *m = (struct mce *)data; @@ -580,9 +580,9 @@ static int mce_first_notifier(struct notifier_block *nb, unsigned long val, return NOTIFY_DONE; } -static struct notifier_block first_nb = { - .notifier_call = mce_first_notifier, - .priority = MCE_PRIO_FIRST, +static struct notifier_block early_nb = { + .notifier_call = mce_early_notifier, + .priority = MCE_PRIO_EARLY, }; static int uc_decode_notifier(struct notifier_block *nb, unsigned long val, @@ -2041,7 +2041,7 @@ __setup("mce", mcheck_enable); int __init mcheck_init(void) { mcheck_intel_therm_init(); - mce_register_decode_chain(&first_nb); + mce_register_decode_chain(&early_nb); mce_register_decode_chain(&mce_uc_nb); mce_register_decode_chain(&mce_default_nb); mcheck_vendor_init_severity(); -- GitLab From db539cb928c0abe3b6c96ab16ed19b90c5dd110d Mon Sep 17 00:00:00 2001 From: Christophe JAILLET Date: Sat, 11 Apr 2020 17:35:28 +0200 Subject: [PATCH 0098/1971] pwm: Add missing '\n' in log messages MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Message logged by 'dev_xxx()' or 'pr_xxx()' should end with a '\n'. Fixes: 3ad1f3a33286 ("pwm: Implement some checks for lowlevel drivers") Signed-off-by: Christophe JAILLET Acked-by: Uwe Kleine-König Signed-off-by: Thierry Reding --- drivers/pwm/core.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/pwm/core.c b/drivers/pwm/core.c index 9973c442b4555..bca04965bfe65 100644 --- a/drivers/pwm/core.c +++ b/drivers/pwm/core.c @@ -537,7 +537,7 @@ static void pwm_apply_state_debug(struct pwm_device *pwm, if (!state->enabled && s2.enabled && s2.duty_cycle > 0) dev_warn(chip->dev, - "requested disabled, but yielded enabled with duty > 0"); + "requested disabled, but yielded enabled with duty > 0\n"); /* reapply the state that the driver reported being configured. */ err = chip->ops->apply(chip, pwm, &s1); -- GitLab From 9554bfe403bdfc084823df8695a01f28c680af61 Mon Sep 17 00:00:00 2001 From: Tony Luck Date: Fri, 14 Feb 2020 14:27:15 -0800 Subject: [PATCH 0099/1971] x86/mce: Convert the CEC to use the MCE notifier The CEC code has its claws in a couple of routines in mce/core.c. Convert it to just register itself on the normal MCE notifier chain. [ bp: Make cec_add_elem() and cec_init() static. ] Signed-off-by: Tony Luck Signed-off-by: Borislav Petkov Tested-by: Tony Luck Link: https://lkml.kernel.org/r/20200214222720.13168-3-tony.luck@intel.com --- arch/x86/kernel/cpu/mce/core.c | 19 ------------------- drivers/ras/cec.c | 30 ++++++++++++++++++++++++++++-- include/linux/ras.h | 5 ----- 3 files changed, 28 insertions(+), 26 deletions(-) diff --git a/arch/x86/kernel/cpu/mce/core.c b/arch/x86/kernel/cpu/mce/core.c index 43b1519ad4e5b..b033b35896300 100644 --- a/arch/x86/kernel/cpu/mce/core.c +++ b/arch/x86/kernel/cpu/mce/core.c @@ -544,21 +544,6 @@ bool mce_is_correctable(struct mce *m) } EXPORT_SYMBOL_GPL(mce_is_correctable); -static bool cec_add_mce(struct mce *m) -{ - if (!m) - return false; - - /* We eat only correctable DRAM errors with usable addresses. */ - if (mce_is_memory_error(m) && - mce_is_correctable(m) && - mce_usable_address(m)) - if (!cec_add_elem(m->addr >> PAGE_SHIFT)) - return true; - - return false; -} - static int mce_early_notifier(struct notifier_block *nb, unsigned long val, void *data) { @@ -567,9 +552,6 @@ static int mce_early_notifier(struct notifier_block *nb, unsigned long val, if (!m) return NOTIFY_DONE; - if (cec_add_mce(m)) - return NOTIFY_STOP; - /* Emit the trace record: */ trace_mce_record(m); @@ -2612,7 +2594,6 @@ static int __init mcheck_late_init(void) static_branch_inc(&mcsafe_key); mcheck_debugfs_init(); - cec_init(); /* * Flush out everything that has been logged during early boot, now that diff --git a/drivers/ras/cec.c b/drivers/ras/cec.c index c09cf55e2d204..6b42040bf956e 100644 --- a/drivers/ras/cec.c +++ b/drivers/ras/cec.c @@ -309,7 +309,7 @@ static bool sanity_check(struct ce_array *ca) return ret; } -int cec_add_elem(u64 pfn) +static int cec_add_elem(u64 pfn) { struct ce_array *ca = &ce_arr; unsigned int to = 0; @@ -527,7 +527,30 @@ err: return 1; } -void __init cec_init(void) +static int cec_notifier(struct notifier_block *nb, unsigned long val, + void *data) +{ + struct mce *m = (struct mce *)data; + + if (!m) + return NOTIFY_DONE; + + /* We eat only correctable DRAM errors with usable addresses. */ + if (mce_is_memory_error(m) && + mce_is_correctable(m) && + mce_usable_address(m)) + if (!cec_add_elem(m->addr >> PAGE_SHIFT)) + return NOTIFY_STOP; + + return NOTIFY_DONE; +} + +static struct notifier_block cec_nb = { + .notifier_call = cec_notifier, + .priority = MCE_PRIO_CEC, +}; + +static void __init cec_init(void) { if (ce_arr.disabled) return; @@ -546,8 +569,11 @@ void __init cec_init(void) INIT_DELAYED_WORK(&cec_work, cec_work_fn); schedule_delayed_work(&cec_work, CEC_DECAY_DEFAULT_INTERVAL); + mce_register_decode_chain(&cec_nb); + pr_info("Correctable Errors collector initialized.\n"); } +late_initcall(cec_init); int __init parse_cec_param(char *str) { diff --git a/include/linux/ras.h b/include/linux/ras.h index 7c3debb47c87a..1f4048bf2674d 100644 --- a/include/linux/ras.h +++ b/include/linux/ras.h @@ -17,12 +17,7 @@ static inline int ras_add_daemon_trace(void) { return 0; } #endif #ifdef CONFIG_RAS_CEC -void __init cec_init(void); int __init parse_cec_param(char *str); -int cec_add_elem(u64 pfn); -#else -static inline void __init cec_init(void) { } -static inline int cec_add_elem(u64 pfn) { return -ENODEV; } #endif #ifdef CONFIG_RAS -- GitLab From 07b053ff1c6ee7c972e7e3d0915d05f1b1280f14 Mon Sep 17 00:00:00 2001 From: Yoshihiro Shimoda Date: Fri, 10 Apr 2020 18:14:17 +0900 Subject: [PATCH 0100/1971] dt-bindings: pwm: rcar: add r8a77961 support Add support for r8a77961 (R-Car M3-W+). Signed-off-by: Yoshihiro Shimoda Reviewed-by: Geert Uytterhoeven Signed-off-by: Thierry Reding --- Documentation/devicetree/bindings/pwm/renesas,pwm-rcar.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/Documentation/devicetree/bindings/pwm/renesas,pwm-rcar.yaml b/Documentation/devicetree/bindings/pwm/renesas,pwm-rcar.yaml index 945c14e1be35e..620a2ff40fc76 100644 --- a/Documentation/devicetree/bindings/pwm/renesas,pwm-rcar.yaml +++ b/Documentation/devicetree/bindings/pwm/renesas,pwm-rcar.yaml @@ -27,6 +27,7 @@ properties: - renesas,pwm-r8a7794 # R-Car E2 - renesas,pwm-r8a7795 # R-Car H3 - renesas,pwm-r8a7796 # R-Car M3-W + - renesas,pwm-r8a77961 # R-Car M3-W+ - renesas,pwm-r8a77965 # R-Car M3-N - renesas,pwm-r8a77970 # R-Car V3M - renesas,pwm-r8a77980 # R-Car V3H -- GitLab From 1de08dccd383482a3e88845d3554094d338f5ff9 Mon Sep 17 00:00:00 2001 From: Tony Luck Date: Fri, 14 Feb 2020 14:27:16 -0800 Subject: [PATCH 0101/1971] x86/mce: Add a struct mce.kflags field There can be many different subsystems register on the mce handler chain. Add a new bitmask field and define values so that handlers can indicate whether they took any action to log or otherwise handle an error. The default handler at the end of the chain can use this information to decide whether to print to the console log. Boris suggested a generic name and leaving plenty of spare bits for possible future use. [ bp: Move flag bits to the internal mce.h header and use BIT_ULL(). ] Signed-off-by: Tony Luck Signed-off-by: Borislav Petkov Tested-by: Tony Luck Link: https://lkml.kernel.org/r/20200214222720.13168-4-tony.luck@intel.com --- arch/x86/include/asm/mce.h | 8 ++++++++ arch/x86/include/uapi/asm/mce.h | 1 + 2 files changed, 9 insertions(+) diff --git a/arch/x86/include/asm/mce.h b/arch/x86/include/asm/mce.h index 689ac6e9c65f0..5f04a24f30eaa 100644 --- a/arch/x86/include/asm/mce.h +++ b/arch/x86/include/asm/mce.h @@ -129,6 +129,14 @@ #define XEC(x, mask) (((x) >> 16) & mask) +/* mce.kflags flag bits for logging etc. */ +#define MCE_HANDLED_CEC BIT_ULL(0) +#define MCE_HANDLED_UC BIT_ULL(1) +#define MCE_HANDLED_EXTLOG BIT_ULL(2) +#define MCE_HANDLED_NFIT BIT_ULL(3) +#define MCE_HANDLED_EDAC BIT_ULL(4) +#define MCE_HANDLED_MCELOG BIT_ULL(5) + /* * This structure contains all data related to the MCE log. Also * carries a signature to make it easier to find from external diff --git a/arch/x86/include/uapi/asm/mce.h b/arch/x86/include/uapi/asm/mce.h index 955c2a2e1cf9d..5b59d80f1d4e1 100644 --- a/arch/x86/include/uapi/asm/mce.h +++ b/arch/x86/include/uapi/asm/mce.h @@ -35,6 +35,7 @@ struct mce { __u64 ipid; /* MCA_IPID MSR: only valid on SMCA systems */ __u64 ppin; /* Protected Processor Inventory Number */ __u32 microcode; /* Microcode revision */ + __u64 kflags; /* Internal kernel use. See below */ }; #define MCE_GET_RECORD_LEN _IOR('M', 1, int) -- GitLab From 23ba710a0864108910c7531dc4c73ef65eca5568 Mon Sep 17 00:00:00 2001 From: Tony Luck Date: Fri, 14 Feb 2020 14:27:17 -0800 Subject: [PATCH 0102/1971] x86/mce: Fix all mce notifiers to update the mce->kflags bitmask If the handler took any action to log or deal with the error, set a bit in mce->kflags so that the default handler on the end of the machine check chain can see what has been done. Get rid of NOTIFY_STOP returns. Make the EDAC and dev-mcelog handlers skip over errors already processed by CEC. Signed-off-by: Tony Luck Signed-off-by: Borislav Petkov Tested-by: Tony Luck Link: https://lkml.kernel.org/r/20200214222720.13168-5-tony.luck@intel.com --- arch/x86/kernel/cpu/mce/core.c | 4 +++- arch/x86/kernel/cpu/mce/dev-mcelog.c | 5 +++++ drivers/acpi/acpi_extlog.c | 5 +++-- drivers/acpi/nfit/mce.c | 1 + drivers/edac/i7core_edac.c | 5 +++-- drivers/edac/mce_amd.c | 6 +++++- drivers/edac/pnd2_edac.c | 5 +++-- drivers/edac/sb_edac.c | 5 ++++- drivers/edac/skx_common.c | 4 ++++ drivers/ras/cec.c | 9 ++++++--- 10 files changed, 37 insertions(+), 12 deletions(-) diff --git a/arch/x86/kernel/cpu/mce/core.c b/arch/x86/kernel/cpu/mce/core.c index b033b35896300..5666a48a4bc9a 100644 --- a/arch/x86/kernel/cpu/mce/core.c +++ b/arch/x86/kernel/cpu/mce/core.c @@ -581,8 +581,10 @@ static int uc_decode_notifier(struct notifier_block *nb, unsigned long val, return NOTIFY_DONE; pfn = mce->addr >> PAGE_SHIFT; - if (!memory_failure(pfn, 0)) + if (!memory_failure(pfn, 0)) { set_mce_nospec(pfn); + mce->kflags |= MCE_HANDLED_UC; + } return NOTIFY_OK; } diff --git a/arch/x86/kernel/cpu/mce/dev-mcelog.c b/arch/x86/kernel/cpu/mce/dev-mcelog.c index d089567a9ce82..c033e7ea9e3c3 100644 --- a/arch/x86/kernel/cpu/mce/dev-mcelog.c +++ b/arch/x86/kernel/cpu/mce/dev-mcelog.c @@ -39,6 +39,9 @@ static int dev_mce_log(struct notifier_block *nb, unsigned long val, struct mce *mce = (struct mce *)data; unsigned int entry; + if (mce->kflags & MCE_HANDLED_CEC) + return NOTIFY_DONE; + mutex_lock(&mce_chrdev_read_mutex); entry = mcelog->next; @@ -56,6 +59,7 @@ static int dev_mce_log(struct notifier_block *nb, unsigned long val, memcpy(mcelog->entry + entry, mce, sizeof(struct mce)); mcelog->entry[entry].finished = 1; + mcelog->entry[entry].kflags = 0; /* wake processes polling /dev/mcelog */ wake_up_interruptible(&mce_chrdev_wait); @@ -63,6 +67,7 @@ static int dev_mce_log(struct notifier_block *nb, unsigned long val, unlock: mutex_unlock(&mce_chrdev_read_mutex); + mce->kflags |= MCE_HANDLED_MCELOG; return NOTIFY_OK; } diff --git a/drivers/acpi/acpi_extlog.c b/drivers/acpi/acpi_extlog.c index 8596a106a933c..9cc3c1f92db56 100644 --- a/drivers/acpi/acpi_extlog.c +++ b/drivers/acpi/acpi_extlog.c @@ -146,7 +146,7 @@ static int extlog_print(struct notifier_block *nb, unsigned long val, static u32 err_seq; estatus = extlog_elog_entry_check(cpu, bank); - if (estatus == NULL) + if (estatus == NULL || (mce->kflags & MCE_HANDLED_CEC)) return NOTIFY_DONE; memcpy(elog_buf, (void *)estatus, ELOG_ENTRY_LEN); @@ -176,7 +176,8 @@ static int extlog_print(struct notifier_block *nb, unsigned long val, } out: - return NOTIFY_STOP; + mce->kflags |= MCE_HANDLED_EXTLOG; + return NOTIFY_OK; } static bool __init extlog_get_l1addr(void) diff --git a/drivers/acpi/nfit/mce.c b/drivers/acpi/nfit/mce.c index f0ae48515b48f..ee8d9973f60b8 100644 --- a/drivers/acpi/nfit/mce.c +++ b/drivers/acpi/nfit/mce.c @@ -76,6 +76,7 @@ static int nfit_handle_mce(struct notifier_block *nb, unsigned long val, */ acpi_nfit_ars_rescan(acpi_desc, 0); } + mce->kflags |= MCE_HANDLED_NFIT; break; } diff --git a/drivers/edac/i7core_edac.c b/drivers/edac/i7core_edac.c index b3135b208f9a0..5860ca41185cf 100644 --- a/drivers/edac/i7core_edac.c +++ b/drivers/edac/i7core_edac.c @@ -1815,7 +1815,7 @@ static int i7core_mce_check_error(struct notifier_block *nb, unsigned long val, struct mem_ctl_info *mci; i7_dev = get_i7core_dev(mce->socketid); - if (!i7_dev) + if (!i7_dev || (mce->kflags & MCE_HANDLED_CEC)) return NOTIFY_DONE; mci = i7_dev->mci; @@ -1834,7 +1834,8 @@ static int i7core_mce_check_error(struct notifier_block *nb, unsigned long val, i7core_check_error(mci, mce); /* Advise mcelog that the errors were handled */ - return NOTIFY_STOP; + mce->kflags |= MCE_HANDLED_EDAC; + return NOTIFY_OK; } static struct notifier_block i7_mce_dec = { diff --git a/drivers/edac/mce_amd.c b/drivers/edac/mce_amd.c index e58644d9c92b7..2b5401db56ad9 100644 --- a/drivers/edac/mce_amd.c +++ b/drivers/edac/mce_amd.c @@ -1046,6 +1046,9 @@ amd_decode_mce(struct notifier_block *nb, unsigned long val, void *data) unsigned int fam = x86_family(m->cpuid); int ecc; + if (m->kflags & MCE_HANDLED_CEC) + return NOTIFY_DONE; + pr_emerg(HW_ERR "%s\n", decode_error_status(m)); pr_emerg(HW_ERR "CPU:%d (%x:%x:%x) MC%d_STATUS[%s|%s|%s|%s|%s", @@ -1146,7 +1149,8 @@ amd_decode_mce(struct notifier_block *nb, unsigned long val, void *data) err_code: amd_decode_err_code(m->status & 0xffff); - return NOTIFY_STOP; + m->kflags |= MCE_HANDLED_EDAC; + return NOTIFY_OK; } static struct notifier_block amd_mce_dec_nb = { diff --git a/drivers/edac/pnd2_edac.c b/drivers/edac/pnd2_edac.c index bc47328eb4858..1929a5dc8f94d 100644 --- a/drivers/edac/pnd2_edac.c +++ b/drivers/edac/pnd2_edac.c @@ -1400,7 +1400,7 @@ static int pnd2_mce_check_error(struct notifier_block *nb, unsigned long val, vo return NOTIFY_DONE; mci = pnd2_mci; - if (!mci) + if (!mci || (mce->kflags & MCE_HANDLED_CEC)) return NOTIFY_DONE; /* @@ -1429,7 +1429,8 @@ static int pnd2_mce_check_error(struct notifier_block *nb, unsigned long val, vo pnd2_mce_output_error(mci, mce, &daddr); /* Advice mcelog that the error were handled */ - return NOTIFY_STOP; + mce->kflags |= MCE_HANDLED_EDAC; + return NOTIFY_OK; } static struct notifier_block pnd2_mce_dec = { diff --git a/drivers/edac/sb_edac.c b/drivers/edac/sb_edac.c index 7d51c82be62ba..f790f7d086880 100644 --- a/drivers/edac/sb_edac.c +++ b/drivers/edac/sb_edac.c @@ -3136,6 +3136,8 @@ static int sbridge_mce_check_error(struct notifier_block *nb, unsigned long val, if (edac_get_report_status() == EDAC_REPORTING_DISABLED) return NOTIFY_DONE; + if (mce->kflags & MCE_HANDLED_CEC) + return NOTIFY_DONE; /* * Just let mcelog handle it if the error is @@ -3183,7 +3185,8 @@ static int sbridge_mce_check_error(struct notifier_block *nb, unsigned long val, sbridge_mce_output_error(mci, mce); /* Advice mcelog that the error were handled */ - return NOTIFY_STOP; + mce->kflags |= MCE_HANDLED_EDAC; + return NOTIFY_OK; } static struct notifier_block sbridge_mce_dec = { diff --git a/drivers/edac/skx_common.c b/drivers/edac/skx_common.c index 99bbaf629b8d9..6f08a12f6b110 100644 --- a/drivers/edac/skx_common.c +++ b/drivers/edac/skx_common.c @@ -577,6 +577,9 @@ int skx_mce_check_error(struct notifier_block *nb, unsigned long val, if (edac_get_report_status() == EDAC_REPORTING_DISABLED) return NOTIFY_DONE; + if (mce->kflags & MCE_HANDLED_CEC) + return NOTIFY_DONE; + /* ignore unless this is memory related with an address */ if ((mce->status & 0xefff) >> 7 != 1 || !(mce->status & MCI_STATUS_ADDRV)) return NOTIFY_DONE; @@ -616,6 +619,7 @@ int skx_mce_check_error(struct notifier_block *nb, unsigned long val, skx_mce_output_error(mci, mce, &res); + mce->kflags |= MCE_HANDLED_EDAC; return NOTIFY_DONE; } diff --git a/drivers/ras/cec.c b/drivers/ras/cec.c index 6b42040bf956e..569d9ad2c5942 100644 --- a/drivers/ras/cec.c +++ b/drivers/ras/cec.c @@ -538,9 +538,12 @@ static int cec_notifier(struct notifier_block *nb, unsigned long val, /* We eat only correctable DRAM errors with usable addresses. */ if (mce_is_memory_error(m) && mce_is_correctable(m) && - mce_usable_address(m)) - if (!cec_add_elem(m->addr >> PAGE_SHIFT)) - return NOTIFY_STOP; + mce_usable_address(m)) { + if (!cec_add_elem(m->addr >> PAGE_SHIFT)) { + m->kflags |= MCE_HANDLED_CEC; + return NOTIFY_OK; + } + } return NOTIFY_DONE; } -- GitLab From 925946cfa715a5a71639528f82b98e58f14dd4cb Mon Sep 17 00:00:00 2001 From: Tony Luck Date: Fri, 14 Feb 2020 14:27:18 -0800 Subject: [PATCH 0103/1971] x86/mce: Change default MCE logger to check mce->kflags Instead of keeping count of how many handlers are registered on the MCE notifier chain and printing if below some magic value, look at mce->kflags to see if anyone claims to have handled/logged this error. [ bp: Do not print ->kflags in __print_mce(). ] Signed-off-by: Tony Luck Signed-off-by: Borislav Petkov Tested-by: Tony Luck Link: https://lkml.kernel.org/r/20200214222720.13168-6-tony.luck@intel.com --- arch/x86/kernel/cpu/mce/core.c | 19 +++---------------- 1 file changed, 3 insertions(+), 16 deletions(-) diff --git a/arch/x86/kernel/cpu/mce/core.c b/arch/x86/kernel/cpu/mce/core.c index 5666a48a4bc9a..fc879b6669d5f 100644 --- a/arch/x86/kernel/cpu/mce/core.c +++ b/arch/x86/kernel/cpu/mce/core.c @@ -158,29 +158,17 @@ void mce_log(struct mce *m) } EXPORT_SYMBOL_GPL(mce_log); -/* - * We run the default notifier if we have only the UC, the first and the - * default notifier registered. I.e., the mandatory NUM_DEFAULT_NOTIFIERS - * notifiers registered on the chain. - */ -#define NUM_DEFAULT_NOTIFIERS 3 -static atomic_t num_notifiers; - void mce_register_decode_chain(struct notifier_block *nb) { if (WARN_ON(nb->priority > MCE_PRIO_MCELOG && nb->priority < MCE_PRIO_EDAC)) return; - atomic_inc(&num_notifiers); - blocking_notifier_chain_register(&x86_mce_decoder_chain, nb); } EXPORT_SYMBOL_GPL(mce_register_decode_chain); void mce_unregister_decode_chain(struct notifier_block *nb) { - atomic_dec(&num_notifiers); - blocking_notifier_chain_unregister(&x86_mce_decoder_chain, nb); } EXPORT_SYMBOL_GPL(mce_unregister_decode_chain); @@ -263,6 +251,7 @@ static void __print_mce(struct mce *m) } pr_cont("\n"); + /* * Note this output is parsed by external tools and old fields * should not be changed. @@ -602,10 +591,8 @@ static int mce_default_notifier(struct notifier_block *nb, unsigned long val, if (!m) return NOTIFY_DONE; - if (atomic_read(&num_notifiers) > NUM_DEFAULT_NOTIFIERS) - return NOTIFY_DONE; - - __print_mce(m); + if (!m->kflags) + __print_mce(m); return NOTIFY_DONE; } -- GitLab From 43505646941bee217b91d064756975aa1ab6ee3b Mon Sep 17 00:00:00 2001 From: Tony Luck Date: Fri, 14 Feb 2020 14:27:19 -0800 Subject: [PATCH 0104/1971] x86/mce: Add mce=print_all option Sometimes, when logs are getting lost, it's nice to just have everything dumped to the serial console. Signed-off-by: Tony Luck Signed-off-by: Borislav Petkov Tested-by: Tony Luck Link: https://lkml.kernel.org/r/20200214222720.13168-7-tony.luck@intel.com --- arch/x86/kernel/cpu/mce/core.c | 7 ++++++- arch/x86/kernel/cpu/mce/internal.h | 1 + 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/arch/x86/kernel/cpu/mce/core.c b/arch/x86/kernel/cpu/mce/core.c index fc879b6669d5f..4efe6c1288878 100644 --- a/arch/x86/kernel/cpu/mce/core.c +++ b/arch/x86/kernel/cpu/mce/core.c @@ -591,7 +591,7 @@ static int mce_default_notifier(struct notifier_block *nb, unsigned long val, if (!m) return NOTIFY_DONE; - if (!m->kflags) + if (mca_cfg.print_all || !m->kflags) __print_mce(m); return NOTIFY_DONE; @@ -1962,6 +1962,7 @@ void mce_disable_bank(int bank) * mce=no_cmci Disables CMCI * mce=no_lmce Disables LMCE * mce=dont_log_ce Clears corrected events silently, no log created for CEs. + * mce=print_all Print all machine check logs to console * mce=ignore_ce Disables polling and CMCI, corrected events are not cleared. * mce=TOLERANCELEVEL[,monarchtimeout] (number, see above) * monarchtimeout is how long to wait for other CPUs on machine @@ -1990,6 +1991,8 @@ static int __init mcheck_enable(char *str) cfg->lmce_disabled = 1; else if (!strcmp(str, "dont_log_ce")) cfg->dont_log_ce = true; + else if (!strcmp(str, "print_all")) + cfg->print_all = true; else if (!strcmp(str, "ignore_ce")) cfg->ignore_ce = true; else if (!strcmp(str, "bootlog") || !strcmp(str, "nobootlog")) @@ -2256,6 +2259,7 @@ static ssize_t store_int_with_restart(struct device *s, static DEVICE_INT_ATTR(tolerant, 0644, mca_cfg.tolerant); static DEVICE_INT_ATTR(monarch_timeout, 0644, mca_cfg.monarch_timeout); static DEVICE_BOOL_ATTR(dont_log_ce, 0644, mca_cfg.dont_log_ce); +static DEVICE_BOOL_ATTR(print_all, 0644, mca_cfg.print_all); static struct dev_ext_attribute dev_attr_check_interval = { __ATTR(check_interval, 0644, device_show_int, store_int_with_restart), @@ -2280,6 +2284,7 @@ static struct device_attribute *mce_device_attrs[] = { #endif &dev_attr_monarch_timeout.attr, &dev_attr_dont_log_ce.attr, + &dev_attr_print_all.attr, &dev_attr_ignore_ce.attr, &dev_attr_cmci_disabled.attr, NULL diff --git a/arch/x86/kernel/cpu/mce/internal.h b/arch/x86/kernel/cpu/mce/internal.h index 74a01829c4f4a..55f5c7b755f23 100644 --- a/arch/x86/kernel/cpu/mce/internal.h +++ b/arch/x86/kernel/cpu/mce/internal.h @@ -119,6 +119,7 @@ struct mca_config { bool dont_log_ce; bool cmci_disabled; bool ignore_ce; + bool print_all; __u64 lmce_disabled : 1, disabled : 1, -- GitLab From 7fc0b9b995f222646ece8d5bca528060c098ee88 Mon Sep 17 00:00:00 2001 From: Tony Luck Date: Fri, 14 Feb 2020 14:27:20 -0800 Subject: [PATCH 0105/1971] EDAC: Drop the EDAC report status checks When acpi_extlog was added, we were worried that the same error would be reported more than once by different subsystems. But in the ensuing years I've seen complaints that people could not find an error log (because this mechanism suppressed the log they were looking for). Rip it all out. People are smart enough to notice the same address from different reporting mechanisms. Signed-off-by: Tony Luck Signed-off-by: Borislav Petkov Tested-by: Tony Luck Link: https://lkml.kernel.org/r/20200214222720.13168-8-tony.luck@intel.com --- drivers/acpi/acpi_extlog.c | 14 --------- drivers/edac/edac_mc.c | 61 -------------------------------------- drivers/edac/pnd2_edac.c | 3 -- drivers/edac/sb_edac.c | 4 --- drivers/edac/skx_common.c | 3 -- include/linux/edac.h | 8 ----- 6 files changed, 93 deletions(-) diff --git a/drivers/acpi/acpi_extlog.c b/drivers/acpi/acpi_extlog.c index 9cc3c1f92db56..f138e12b7b823 100644 --- a/drivers/acpi/acpi_extlog.c +++ b/drivers/acpi/acpi_extlog.c @@ -42,8 +42,6 @@ struct extlog_l1_head { u8 rev1[12]; }; -static int old_edac_report_status; - static u8 extlog_dsm_uuid[] __initdata = "663E35AF-CC10-41A4-88EA-5470AF055295"; /* L1 table related physical address */ @@ -229,11 +227,6 @@ static int __init extlog_init(void) if (!(cap & MCG_ELOG_P) || !extlog_get_l1addr()) return -ENODEV; - if (edac_get_report_status() == EDAC_REPORTING_FORCE) { - pr_warn("Not loading eMCA, error reporting force-enabled through EDAC.\n"); - return -EPERM; - } - rc = -EINVAL; /* get L1 header to fetch necessary information */ l1_hdr_size = sizeof(struct extlog_l1_head); @@ -281,12 +274,6 @@ static int __init extlog_init(void) if (elog_buf == NULL) goto err_release_elog; - /* - * eMCA event report method has higher priority than EDAC method, - * unless EDAC event report method is mandatory. - */ - old_edac_report_status = edac_get_report_status(); - edac_set_report_status(EDAC_REPORTING_DISABLED); mce_register_decode_chain(&extlog_mce_dec); /* enable OS to be involved to take over management from BIOS */ ((struct extlog_l1_head *)extlog_l1_addr)->flags |= FLAG_OS_OPTIN; @@ -308,7 +295,6 @@ err: static void __exit extlog_exit(void) { - edac_set_report_status(old_edac_report_status); mce_unregister_decode_chain(&extlog_mce_dec); ((struct extlog_l1_head *)extlog_l1_addr)->flags &= ~FLAG_OS_OPTIN; if (extlog_l1_addr) diff --git a/drivers/edac/edac_mc.c b/drivers/edac/edac_mc.c index 75ede27bdf6aa..5813e931f2f00 100644 --- a/drivers/edac/edac_mc.c +++ b/drivers/edac/edac_mc.c @@ -43,8 +43,6 @@ int edac_op_state = EDAC_OPSTATE_INVAL; EXPORT_SYMBOL_GPL(edac_op_state); -static int edac_report = EDAC_REPORTING_ENABLED; - /* lock to memory controller's control array */ static DEFINE_MUTEX(mem_ctls_mutex); static LIST_HEAD(mc_devices); @@ -60,65 +58,6 @@ static struct mem_ctl_info *error_desc_to_mci(struct edac_raw_error_desc *e) return container_of(e, struct mem_ctl_info, error_desc); } -int edac_get_report_status(void) -{ - return edac_report; -} -EXPORT_SYMBOL_GPL(edac_get_report_status); - -void edac_set_report_status(int new) -{ - if (new == EDAC_REPORTING_ENABLED || - new == EDAC_REPORTING_DISABLED || - new == EDAC_REPORTING_FORCE) - edac_report = new; -} -EXPORT_SYMBOL_GPL(edac_set_report_status); - -static int edac_report_set(const char *str, const struct kernel_param *kp) -{ - if (!str) - return -EINVAL; - - if (!strncmp(str, "on", 2)) - edac_report = EDAC_REPORTING_ENABLED; - else if (!strncmp(str, "off", 3)) - edac_report = EDAC_REPORTING_DISABLED; - else if (!strncmp(str, "force", 5)) - edac_report = EDAC_REPORTING_FORCE; - - return 0; -} - -static int edac_report_get(char *buffer, const struct kernel_param *kp) -{ - int ret = 0; - - switch (edac_report) { - case EDAC_REPORTING_ENABLED: - ret = sprintf(buffer, "on"); - break; - case EDAC_REPORTING_DISABLED: - ret = sprintf(buffer, "off"); - break; - case EDAC_REPORTING_FORCE: - ret = sprintf(buffer, "force"); - break; - default: - ret = -EINVAL; - break; - } - - return ret; -} - -static const struct kernel_param_ops edac_report_ops = { - .set = edac_report_set, - .get = edac_report_get, -}; - -module_param_cb(edac_report, &edac_report_ops, &edac_report, 0644); - unsigned int edac_dimm_info_location(struct dimm_info *dimm, char *buf, unsigned int len) { diff --git a/drivers/edac/pnd2_edac.c b/drivers/edac/pnd2_edac.c index 1929a5dc8f94d..c1f2e6deb021a 100644 --- a/drivers/edac/pnd2_edac.c +++ b/drivers/edac/pnd2_edac.c @@ -1396,9 +1396,6 @@ static int pnd2_mce_check_error(struct notifier_block *nb, unsigned long val, vo struct dram_addr daddr; char *type; - if (edac_get_report_status() == EDAC_REPORTING_DISABLED) - return NOTIFY_DONE; - mci = pnd2_mci; if (!mci || (mce->kflags & MCE_HANDLED_CEC)) return NOTIFY_DONE; diff --git a/drivers/edac/sb_edac.c b/drivers/edac/sb_edac.c index f790f7d086880..d414698ca3242 100644 --- a/drivers/edac/sb_edac.c +++ b/drivers/edac/sb_edac.c @@ -3134,8 +3134,6 @@ static int sbridge_mce_check_error(struct notifier_block *nb, unsigned long val, struct mem_ctl_info *mci; char *type; - if (edac_get_report_status() == EDAC_REPORTING_DISABLED) - return NOTIFY_DONE; if (mce->kflags & MCE_HANDLED_CEC) return NOTIFY_DONE; @@ -3526,8 +3524,6 @@ static int __init sbridge_init(void) if (rc >= 0) { mce_register_decode_chain(&sbridge_mce_dec); - if (edac_get_report_status() == EDAC_REPORTING_DISABLED) - sbridge_printk(KERN_WARNING, "Loading driver, error reporting disabled.\n"); return 0; } diff --git a/drivers/edac/skx_common.c b/drivers/edac/skx_common.c index 6f08a12f6b110..423d33aef54f0 100644 --- a/drivers/edac/skx_common.c +++ b/drivers/edac/skx_common.c @@ -574,9 +574,6 @@ int skx_mce_check_error(struct notifier_block *nb, unsigned long val, struct mem_ctl_info *mci; char *type; - if (edac_get_report_status() == EDAC_REPORTING_DISABLED) - return NOTIFY_DONE; - if (mce->kflags & MCE_HANDLED_CEC) return NOTIFY_DONE; diff --git a/include/linux/edac.h b/include/linux/edac.h index 0f20b986b0abd..6eb7d55d7c3d2 100644 --- a/include/linux/edac.h +++ b/include/linux/edac.h @@ -31,14 +31,6 @@ struct device; extern int edac_op_state; struct bus_type *edac_get_sysfs_subsys(void); -int edac_get_report_status(void); -void edac_set_report_status(int new); - -enum { - EDAC_REPORTING_ENABLED, - EDAC_REPORTING_DISABLED, - EDAC_REPORTING_FORCE -}; static inline void opstate_init(void) { -- GitLab From 1df73b2131e3b33d518609769636b41ce00212de Mon Sep 17 00:00:00 2001 From: Borislav Petkov Date: Tue, 7 Apr 2020 13:49:58 +0200 Subject: [PATCH 0106/1971] x86/mce: Fixup exception only for the correct MCEs The severity grading code returns IN_KERNEL_RECOV error context for errors which have happened in kernel space but from which the kernel can recover. Whether the recovery can happen is determined by the exception table entry having as handler ex_handler_fault() and which has been declared at build time using _ASM_EXTABLE_FAULT(). IN_KERNEL_RECOV is used in mce_severity_intel() to lookup the corresponding error severity in the severities table. However, the mapping back from error severity to whether the error is IN_KERNEL_RECOV is ambiguous and in the very paranoid case - which might not be possible right now - but be better safe than sorry later, an exception fixup could be attempted for another MCE whose address is in the exception table and has the proper severity. Which would be unfortunate, to say the least. Therefore, mark such MCEs explicitly as MCE_IN_KERNEL_RECOV so that the recovery attempt is done only for them. Document the whole handling, while at it, as it is not trivial. Reported-by: Thomas Gleixner Signed-off-by: Borislav Petkov Tested-by: Tony Luck Link: https://lkml.kernel.org/r/20200407163414.18058-10-bp@alien8.de --- arch/x86/include/asm/mce.h | 1 + arch/x86/kernel/cpu/mce/core.c | 15 +++++++++++++-- arch/x86/kernel/cpu/mce/severity.c | 6 +++++- 3 files changed, 19 insertions(+), 3 deletions(-) diff --git a/arch/x86/include/asm/mce.h b/arch/x86/include/asm/mce.h index 5f04a24f30eaa..c598aaab071b2 100644 --- a/arch/x86/include/asm/mce.h +++ b/arch/x86/include/asm/mce.h @@ -136,6 +136,7 @@ #define MCE_HANDLED_NFIT BIT_ULL(3) #define MCE_HANDLED_EDAC BIT_ULL(4) #define MCE_HANDLED_MCELOG BIT_ULL(5) +#define MCE_IN_KERNEL_RECOV BIT_ULL(6) /* * This structure contains all data related to the MCE log. Also diff --git a/arch/x86/kernel/cpu/mce/core.c b/arch/x86/kernel/cpu/mce/core.c index 4efe6c1288878..02e1f165f1483 100644 --- a/arch/x86/kernel/cpu/mce/core.c +++ b/arch/x86/kernel/cpu/mce/core.c @@ -1331,8 +1331,19 @@ void notrace do_machine_check(struct pt_regs *regs, long error_code) local_irq_disable(); ist_end_non_atomic(); } else { - if (!fixup_exception(regs, X86_TRAP_MC, error_code, 0)) - mce_panic("Failed kernel mode recovery", &m, msg); + /* + * Handle an MCE which has happened in kernel space but from + * which the kernel can recover: ex_has_fault_handler() has + * already verified that the rIP at which the error happened is + * a rIP from which the kernel can recover (by jumping to + * recovery code specified in _ASM_EXTABLE_FAULT()) and the + * corresponding exception handler which would do that is the + * proper one. + */ + if (m.kflags & MCE_IN_KERNEL_RECOV) { + if (!fixup_exception(regs, X86_TRAP_MC, error_code, 0)) + mce_panic("Failed kernel mode recovery", &m, msg); + } } out_ist: diff --git a/arch/x86/kernel/cpu/mce/severity.c b/arch/x86/kernel/cpu/mce/severity.c index 87bcdc6dc2f0c..e1da619add192 100644 --- a/arch/x86/kernel/cpu/mce/severity.c +++ b/arch/x86/kernel/cpu/mce/severity.c @@ -213,8 +213,12 @@ static int error_context(struct mce *m) { if ((m->cs & 3) == 3) return IN_USER; - if (mc_recoverable(m->mcgstatus) && ex_has_fault_handler(m->ip)) + + if (mc_recoverable(m->mcgstatus) && ex_has_fault_handler(m->ip)) { + m->kflags |= MCE_IN_KERNEL_RECOV; return IN_KERNEL_RECOV; + } + return IN_KERNEL; } -- GitLab From 6f0841a8197b20d01de9d41cb1eeed09b7b31f9b Mon Sep 17 00:00:00 2001 From: Jeff LaBundy Date: Sun, 12 Apr 2020 21:15:16 -0500 Subject: [PATCH 0107/1971] pwm: Add support for Azoteq IQS620A PWM generator MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This patch adds support for the Azoteq IQS620A, capable of generating a 1-kHz PWM output with duty cycle between ~0.4% and 100% (inclusive). Signed-off-by: Jeff LaBundy Reviewed-by: Uwe Kleine-König Signed-off-by: Thierry Reding --- drivers/pwm/Kconfig | 10 ++ drivers/pwm/Makefile | 1 + drivers/pwm/pwm-iqs620a.c | 270 ++++++++++++++++++++++++++++++++++++++ 3 files changed, 281 insertions(+) create mode 100644 drivers/pwm/pwm-iqs620a.c diff --git a/drivers/pwm/Kconfig b/drivers/pwm/Kconfig index eebbc917ac979..c13d146cdde53 100644 --- a/drivers/pwm/Kconfig +++ b/drivers/pwm/Kconfig @@ -232,6 +232,16 @@ config PWM_IMX_TPM To compile this driver as a module, choose M here: the module will be called pwm-imx-tpm. +config PWM_IQS620A + tristate "Azoteq IQS620A PWM support" + depends on MFD_IQS62X || COMPILE_TEST + help + Generic PWM framework driver for the Azoteq IQS620A multi-function + sensor. + + To compile this driver as a module, choose M here: the module will + be called pwm-iqs620a. + config PWM_JZ4740 tristate "Ingenic JZ47xx PWM support" depends on MACH_INGENIC diff --git a/drivers/pwm/Makefile b/drivers/pwm/Makefile index 9a475073dafcb..a59c710e98c76 100644 --- a/drivers/pwm/Makefile +++ b/drivers/pwm/Makefile @@ -20,6 +20,7 @@ obj-$(CONFIG_PWM_IMG) += pwm-img.o obj-$(CONFIG_PWM_IMX1) += pwm-imx1.o obj-$(CONFIG_PWM_IMX27) += pwm-imx27.o obj-$(CONFIG_PWM_IMX_TPM) += pwm-imx-tpm.o +obj-$(CONFIG_PWM_IQS620A) += pwm-iqs620a.o obj-$(CONFIG_PWM_JZ4740) += pwm-jz4740.o obj-$(CONFIG_PWM_LP3943) += pwm-lp3943.o obj-$(CONFIG_PWM_LPC18XX_SCT) += pwm-lpc18xx-sct.o diff --git a/drivers/pwm/pwm-iqs620a.c b/drivers/pwm/pwm-iqs620a.c new file mode 100644 index 0000000000000..674f0e238ba01 --- /dev/null +++ b/drivers/pwm/pwm-iqs620a.c @@ -0,0 +1,270 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Azoteq IQS620A PWM Generator + * + * Copyright (C) 2019 Jeff LaBundy + * + * Limitations: + * - The period is fixed to 1 ms and is generated continuously despite changes + * to the duty cycle or enable/disable state. + * - Changes to the duty cycle or enable/disable state take effect immediately + * and may result in a glitch during the period in which the change is made. + * - The device cannot generate a 0% duty cycle. For duty cycles below 1 / 256 + * ms, the output is disabled and relies upon an external pull-down resistor + * to hold the GPIO3/LTX pin low. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define IQS620_PWR_SETTINGS 0xD2 +#define IQS620_PWR_SETTINGS_PWM_OUT BIT(7) + +#define IQS620_PWM_DUTY_CYCLE 0xD8 + +#define IQS620_PWM_PERIOD_NS 1000000 + +struct iqs620_pwm_private { + struct iqs62x_core *iqs62x; + struct pwm_chip chip; + struct notifier_block notifier; + struct mutex lock; + bool out_en; + u8 duty_val; +}; + +static int iqs620_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm, + const struct pwm_state *state) +{ + struct iqs620_pwm_private *iqs620_pwm; + struct iqs62x_core *iqs62x; + int duty_scale, ret; + + if (state->polarity != PWM_POLARITY_NORMAL) + return -ENOTSUPP; + + if (state->period < IQS620_PWM_PERIOD_NS) + return -EINVAL; + + iqs620_pwm = container_of(chip, struct iqs620_pwm_private, chip); + iqs62x = iqs620_pwm->iqs62x; + + /* + * The duty cycle generated by the device is calculated as follows: + * + * duty_cycle = (IQS620_PWM_DUTY_CYCLE + 1) / 256 * 1 ms + * + * ...where IQS620_PWM_DUTY_CYCLE is a register value between 0 and 255 + * (inclusive). Therefore the lowest duty cycle the device can generate + * while the output is enabled is 1 / 256 ms. + * + * For lower duty cycles (e.g. 0), the PWM output is simply disabled to + * allow an external pull-down resistor to hold the GPIO3/LTX pin low. + */ + duty_scale = state->duty_cycle * 256 / IQS620_PWM_PERIOD_NS; + + mutex_lock(&iqs620_pwm->lock); + + if (!state->enabled || !duty_scale) { + ret = regmap_update_bits(iqs62x->regmap, IQS620_PWR_SETTINGS, + IQS620_PWR_SETTINGS_PWM_OUT, 0); + if (ret) + goto err_mutex; + } + + if (duty_scale) { + u8 duty_val = min(duty_scale - 1, 0xFF); + + ret = regmap_write(iqs62x->regmap, IQS620_PWM_DUTY_CYCLE, + duty_val); + if (ret) + goto err_mutex; + + iqs620_pwm->duty_val = duty_val; + } + + if (state->enabled && duty_scale) { + ret = regmap_update_bits(iqs62x->regmap, IQS620_PWR_SETTINGS, + IQS620_PWR_SETTINGS_PWM_OUT, 0xFF); + if (ret) + goto err_mutex; + } + + iqs620_pwm->out_en = state->enabled; + +err_mutex: + mutex_unlock(&iqs620_pwm->lock); + + return ret; +} + +static void iqs620_pwm_get_state(struct pwm_chip *chip, struct pwm_device *pwm, + struct pwm_state *state) +{ + struct iqs620_pwm_private *iqs620_pwm; + + iqs620_pwm = container_of(chip, struct iqs620_pwm_private, chip); + + mutex_lock(&iqs620_pwm->lock); + + /* + * Since the device cannot generate a 0% duty cycle, requests to do so + * cause subsequent calls to iqs620_pwm_get_state to report the output + * as disabled with duty cycle equal to that which was in use prior to + * the request. This is not ideal, but is the best compromise based on + * the capabilities of the device. + */ + state->enabled = iqs620_pwm->out_en; + state->duty_cycle = DIV_ROUND_UP((iqs620_pwm->duty_val + 1) * + IQS620_PWM_PERIOD_NS, 256); + + mutex_unlock(&iqs620_pwm->lock); + + state->period = IQS620_PWM_PERIOD_NS; +} + +static int iqs620_pwm_notifier(struct notifier_block *notifier, + unsigned long event_flags, void *context) +{ + struct iqs620_pwm_private *iqs620_pwm; + struct iqs62x_core *iqs62x; + int ret; + + if (!(event_flags & BIT(IQS62X_EVENT_SYS_RESET))) + return NOTIFY_DONE; + + iqs620_pwm = container_of(notifier, struct iqs620_pwm_private, + notifier); + iqs62x = iqs620_pwm->iqs62x; + + mutex_lock(&iqs620_pwm->lock); + + /* + * The parent MFD driver already prints an error message in the event + * of a device reset, so nothing else is printed here unless there is + * an additional failure. + */ + ret = regmap_write(iqs62x->regmap, IQS620_PWM_DUTY_CYCLE, + iqs620_pwm->duty_val); + if (ret) + goto err_mutex; + + ret = regmap_update_bits(iqs62x->regmap, IQS620_PWR_SETTINGS, + IQS620_PWR_SETTINGS_PWM_OUT, + iqs620_pwm->out_en ? 0xFF : 0); + +err_mutex: + mutex_unlock(&iqs620_pwm->lock); + + if (ret) { + dev_err(iqs620_pwm->chip.dev, + "Failed to re-initialize device: %d\n", ret); + return NOTIFY_BAD; + } + + return NOTIFY_OK; +} + +static const struct pwm_ops iqs620_pwm_ops = { + .apply = iqs620_pwm_apply, + .get_state = iqs620_pwm_get_state, + .owner = THIS_MODULE, +}; + +static void iqs620_pwm_notifier_unregister(void *context) +{ + struct iqs620_pwm_private *iqs620_pwm = context; + int ret; + + ret = blocking_notifier_chain_unregister(&iqs620_pwm->iqs62x->nh, + &iqs620_pwm->notifier); + if (ret) + dev_err(iqs620_pwm->chip.dev, + "Failed to unregister notifier: %d\n", ret); +} + +static int iqs620_pwm_probe(struct platform_device *pdev) +{ + struct iqs62x_core *iqs62x = dev_get_drvdata(pdev->dev.parent); + struct iqs620_pwm_private *iqs620_pwm; + unsigned int val; + int ret; + + iqs620_pwm = devm_kzalloc(&pdev->dev, sizeof(*iqs620_pwm), GFP_KERNEL); + if (!iqs620_pwm) + return -ENOMEM; + + platform_set_drvdata(pdev, iqs620_pwm); + iqs620_pwm->iqs62x = iqs62x; + + ret = regmap_read(iqs62x->regmap, IQS620_PWR_SETTINGS, &val); + if (ret) + return ret; + iqs620_pwm->out_en = val & IQS620_PWR_SETTINGS_PWM_OUT; + + ret = regmap_read(iqs62x->regmap, IQS620_PWM_DUTY_CYCLE, &val); + if (ret) + return ret; + iqs620_pwm->duty_val = val; + + iqs620_pwm->chip.dev = &pdev->dev; + iqs620_pwm->chip.ops = &iqs620_pwm_ops; + iqs620_pwm->chip.base = -1; + iqs620_pwm->chip.npwm = 1; + + mutex_init(&iqs620_pwm->lock); + + iqs620_pwm->notifier.notifier_call = iqs620_pwm_notifier; + ret = blocking_notifier_chain_register(&iqs620_pwm->iqs62x->nh, + &iqs620_pwm->notifier); + if (ret) { + dev_err(&pdev->dev, "Failed to register notifier: %d\n", ret); + return ret; + } + + ret = devm_add_action_or_reset(&pdev->dev, + iqs620_pwm_notifier_unregister, + iqs620_pwm); + if (ret) + return ret; + + ret = pwmchip_add(&iqs620_pwm->chip); + if (ret) + dev_err(&pdev->dev, "Failed to add device: %d\n", ret); + + return ret; +} + +static int iqs620_pwm_remove(struct platform_device *pdev) +{ + struct iqs620_pwm_private *iqs620_pwm = platform_get_drvdata(pdev); + int ret; + + ret = pwmchip_remove(&iqs620_pwm->chip); + if (ret) + dev_err(&pdev->dev, "Failed to remove device: %d\n", ret); + + return ret; +} + +static struct platform_driver iqs620_pwm_platform_driver = { + .driver = { + .name = "iqs620a-pwm", + }, + .probe = iqs620_pwm_probe, + .remove = iqs620_pwm_remove, +}; +module_platform_driver(iqs620_pwm_platform_driver); + +MODULE_AUTHOR("Jeff LaBundy "); +MODULE_DESCRIPTION("Azoteq IQS620A PWM Generator"); +MODULE_LICENSE("GPL"); +MODULE_ALIAS("platform:iqs620a-pwm"); -- GitLab From 79799562bf087b30d9dd0fddf5bed2d3b038be08 Mon Sep 17 00:00:00 2001 From: Andrzej Pietrasiewicz Date: Tue, 14 Apr 2020 20:00:57 +0200 Subject: [PATCH 0108/1971] thermal: int3400_thermal: Statically initialize .get_mode()/.set_mode() ops int3400_thermal_ops is used inside int3400_thermal_probe() only after the assignments, which can just as well be made statically at struct's initizer. Signed-off-by: Andrzej Pietrasiewicz Reviewed-by: Bartlomiej Zolnierkiewicz Signed-off-by: Daniel Lezcano Link: https://lore.kernel.org/r/20200414180105.20042-2-andrzej.p@collabora.com --- drivers/thermal/intel/int340x_thermal/int3400_thermal.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/drivers/thermal/intel/int340x_thermal/int3400_thermal.c b/drivers/thermal/intel/int340x_thermal/int3400_thermal.c index ceef89c956bd4..e802922a13cfc 100644 --- a/drivers/thermal/intel/int340x_thermal/int3400_thermal.c +++ b/drivers/thermal/intel/int340x_thermal/int3400_thermal.c @@ -271,6 +271,8 @@ static int int3400_thermal_set_mode(struct thermal_zone_device *thermal, static struct thermal_zone_device_ops int3400_thermal_ops = { .get_temp = int3400_thermal_get_temp, + .get_mode = int3400_thermal_get_mode, + .set_mode = int3400_thermal_set_mode, }; static struct thermal_zone_params int3400_thermal_params = { @@ -309,9 +311,6 @@ static int int3400_thermal_probe(struct platform_device *pdev) platform_set_drvdata(pdev, priv); - int3400_thermal_ops.get_mode = int3400_thermal_get_mode; - int3400_thermal_ops.set_mode = int3400_thermal_set_mode; - priv->thermal = thermal_zone_device_register("INT3400 Thermal", 0, 0, priv, &int3400_thermal_ops, &int3400_thermal_params, 0, 0); -- GitLab From e0442d76213981ab48e8ea0874bb6c47e3af5a36 Mon Sep 17 00:00:00 2001 From: Dejin Zheng Date: Thu, 9 Apr 2020 21:52:24 +0800 Subject: [PATCH 0109/1971] i2c: busses: convert to devm_platform_ioremap_resource use devm_platform_ioremap_resource() to simplify code, it contains platform_get_resource and devm_ioremap_resource. Reviewed-by: Barry Song Acked-by: Bartosz Golaszewski Acked-by: Robert Richter Acked-by: Thor Thayer Acked-by: Manivannan Sadhasivam Reviewed-by: Bjorn Andersson Signed-off-by: Dejin Zheng Signed-off-by: Wolfram Sang --- drivers/i2c/busses/i2c-altera.c | 4 +--- drivers/i2c/busses/i2c-axxia.c | 4 +--- drivers/i2c/busses/i2c-bcm-kona.c | 4 +--- drivers/i2c/busses/i2c-davinci.c | 4 +--- drivers/i2c/busses/i2c-digicolor.c | 4 +--- drivers/i2c/busses/i2c-emev2.c | 4 +--- drivers/i2c/busses/i2c-exynos5.c | 4 +--- drivers/i2c/busses/i2c-hix5hd2.c | 4 +--- drivers/i2c/busses/i2c-img-scb.c | 4 +--- drivers/i2c/busses/i2c-jz4780.c | 4 +--- drivers/i2c/busses/i2c-lpc2k.c | 4 +--- drivers/i2c/busses/i2c-meson.c | 4 +--- drivers/i2c/busses/i2c-mv64xxx.c | 4 +--- drivers/i2c/busses/i2c-octeon-platdrv.c | 4 +--- drivers/i2c/busses/i2c-owl.c | 4 +--- drivers/i2c/busses/i2c-qup.c | 4 +--- drivers/i2c/busses/i2c-rk3x.c | 4 +--- drivers/i2c/busses/i2c-sirf.c | 4 +--- drivers/i2c/busses/i2c-stu300.c | 4 +--- drivers/i2c/busses/i2c-sun6i-p2wi.c | 4 +--- drivers/i2c/busses/i2c-synquacer.c | 4 +--- drivers/i2c/busses/i2c-xlp9xx.c | 4 +--- drivers/i2c/busses/i2c-xlr.c | 4 +--- drivers/i2c/busses/i2c-zx2967.c | 4 +--- 24 files changed, 24 insertions(+), 72 deletions(-) diff --git a/drivers/i2c/busses/i2c-altera.c b/drivers/i2c/busses/i2c-altera.c index 20ef63820c776..083699d74f9ac 100644 --- a/drivers/i2c/busses/i2c-altera.c +++ b/drivers/i2c/busses/i2c-altera.c @@ -382,7 +382,6 @@ static const struct i2c_algorithm altr_i2c_algo = { static int altr_i2c_probe(struct platform_device *pdev) { struct altr_i2c_dev *idev = NULL; - struct resource *res; int irq, ret; u32 val; @@ -390,8 +389,7 @@ static int altr_i2c_probe(struct platform_device *pdev) if (!idev) return -ENOMEM; - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - idev->base = devm_ioremap_resource(&pdev->dev, res); + idev->base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(idev->base)) return PTR_ERR(idev->base); diff --git a/drivers/i2c/busses/i2c-axxia.c b/drivers/i2c/busses/i2c-axxia.c index be3681d08a8da..d45e2290c3f72 100644 --- a/drivers/i2c/busses/i2c-axxia.c +++ b/drivers/i2c/busses/i2c-axxia.c @@ -734,7 +734,6 @@ static int axxia_i2c_probe(struct platform_device *pdev) { struct device_node *np = pdev->dev.of_node; struct axxia_i2c_dev *idev = NULL; - struct resource *res; void __iomem *base; int ret = 0; @@ -742,8 +741,7 @@ static int axxia_i2c_probe(struct platform_device *pdev) if (!idev) return -ENOMEM; - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - base = devm_ioremap_resource(&pdev->dev, res); + base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(base)) return PTR_ERR(base); diff --git a/drivers/i2c/busses/i2c-bcm-kona.c b/drivers/i2c/busses/i2c-bcm-kona.c index 572aebbb254e7..3e63f706f66b2 100644 --- a/drivers/i2c/busses/i2c-bcm-kona.c +++ b/drivers/i2c/busses/i2c-bcm-kona.c @@ -750,7 +750,6 @@ static int bcm_kona_i2c_probe(struct platform_device *pdev) int rc = 0; struct bcm_kona_i2c_dev *dev; struct i2c_adapter *adap; - struct resource *iomem; /* Allocate memory for private data structure */ dev = devm_kzalloc(&pdev->dev, sizeof(*dev), GFP_KERNEL); @@ -762,8 +761,7 @@ static int bcm_kona_i2c_probe(struct platform_device *pdev) init_completion(&dev->done); /* Map hardware registers */ - iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0); - dev->base = devm_ioremap_resource(dev->device, iomem); + dev->base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(dev->base)) return -ENOMEM; diff --git a/drivers/i2c/busses/i2c-davinci.c b/drivers/i2c/busses/i2c-davinci.c index e3ceb256a3809..232a7679b69b8 100644 --- a/drivers/i2c/busses/i2c-davinci.c +++ b/drivers/i2c/busses/i2c-davinci.c @@ -761,7 +761,6 @@ static int davinci_i2c_probe(struct platform_device *pdev) { struct davinci_i2c_dev *dev; struct i2c_adapter *adap; - struct resource *mem; struct i2c_bus_recovery_info *rinfo; int r, irq; @@ -814,8 +813,7 @@ static int davinci_i2c_probe(struct platform_device *pdev) if (IS_ERR(dev->clk)) return PTR_ERR(dev->clk); - mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); - dev->base = devm_ioremap_resource(&pdev->dev, mem); + dev->base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(dev->base)) { return PTR_ERR(dev->base); } diff --git a/drivers/i2c/busses/i2c-digicolor.c b/drivers/i2c/busses/i2c-digicolor.c index 056a5c4f0833c..332f004374796 100644 --- a/drivers/i2c/busses/i2c-digicolor.c +++ b/drivers/i2c/busses/i2c-digicolor.c @@ -290,7 +290,6 @@ static int dc_i2c_probe(struct platform_device *pdev) { struct device_node *np = pdev->dev.of_node; struct dc_i2c *i2c; - struct resource *r; int ret = 0, irq; i2c = devm_kzalloc(&pdev->dev, sizeof(struct dc_i2c), GFP_KERNEL); @@ -311,8 +310,7 @@ static int dc_i2c_probe(struct platform_device *pdev) if (IS_ERR(i2c->clk)) return PTR_ERR(i2c->clk); - r = platform_get_resource(pdev, IORESOURCE_MEM, 0); - i2c->regs = devm_ioremap_resource(&pdev->dev, r); + i2c->regs = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(i2c->regs)) return PTR_ERR(i2c->regs); diff --git a/drivers/i2c/busses/i2c-emev2.c b/drivers/i2c/busses/i2c-emev2.c index 959d4912ec0d5..1a319352e51b4 100644 --- a/drivers/i2c/busses/i2c-emev2.c +++ b/drivers/i2c/busses/i2c-emev2.c @@ -361,15 +361,13 @@ static const struct i2c_algorithm em_i2c_algo = { static int em_i2c_probe(struct platform_device *pdev) { struct em_i2c_device *priv; - struct resource *r; int ret; priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL); if (!priv) return -ENOMEM; - r = platform_get_resource(pdev, IORESOURCE_MEM, 0); - priv->base = devm_ioremap_resource(&pdev->dev, r); + priv->base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(priv->base)) return PTR_ERR(priv->base); diff --git a/drivers/i2c/busses/i2c-exynos5.c b/drivers/i2c/busses/i2c-exynos5.c index 527030953ba13..ba7033c9e157b 100644 --- a/drivers/i2c/busses/i2c-exynos5.c +++ b/drivers/i2c/busses/i2c-exynos5.c @@ -736,7 +736,6 @@ static int exynos5_i2c_probe(struct platform_device *pdev) { struct device_node *np = pdev->dev.of_node; struct exynos5_i2c *i2c; - struct resource *mem; int ret; i2c = devm_kzalloc(&pdev->dev, sizeof(struct exynos5_i2c), GFP_KERNEL); @@ -762,8 +761,7 @@ static int exynos5_i2c_probe(struct platform_device *pdev) if (ret) return ret; - mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); - i2c->regs = devm_ioremap_resource(&pdev->dev, mem); + i2c->regs = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(i2c->regs)) { ret = PTR_ERR(i2c->regs); goto err_clk; diff --git a/drivers/i2c/busses/i2c-hix5hd2.c b/drivers/i2c/busses/i2c-hix5hd2.c index 6610304b6dc61..ab15b1ec2ab3c 100644 --- a/drivers/i2c/busses/i2c-hix5hd2.c +++ b/drivers/i2c/busses/i2c-hix5hd2.c @@ -388,7 +388,6 @@ static int hix5hd2_i2c_probe(struct platform_device *pdev) { struct device_node *np = pdev->dev.of_node; struct hix5hd2_i2c_priv *priv; - struct resource *mem; unsigned int freq; int irq, ret; @@ -409,8 +408,7 @@ static int hix5hd2_i2c_probe(struct platform_device *pdev) } } - mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); - priv->regs = devm_ioremap_resource(&pdev->dev, mem); + priv->regs = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(priv->regs)) return PTR_ERR(priv->regs); diff --git a/drivers/i2c/busses/i2c-img-scb.c b/drivers/i2c/busses/i2c-img-scb.c index 422097a31c95a..c937ea79300c1 100644 --- a/drivers/i2c/busses/i2c-img-scb.c +++ b/drivers/i2c/busses/i2c-img-scb.c @@ -1330,7 +1330,6 @@ static int img_i2c_probe(struct platform_device *pdev) { struct device_node *node = pdev->dev.of_node; struct img_i2c *i2c; - struct resource *res; int irq, ret; u32 val; @@ -1338,8 +1337,7 @@ static int img_i2c_probe(struct platform_device *pdev) if (!i2c) return -ENOMEM; - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - i2c->base = devm_ioremap_resource(&pdev->dev, res); + i2c->base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(i2c->base)) return PTR_ERR(i2c->base); diff --git a/drivers/i2c/busses/i2c-jz4780.c b/drivers/i2c/busses/i2c-jz4780.c index b426fc9569387..ba831df6661ef 100644 --- a/drivers/i2c/busses/i2c-jz4780.c +++ b/drivers/i2c/busses/i2c-jz4780.c @@ -763,7 +763,6 @@ static int jz4780_i2c_probe(struct platform_device *pdev) int ret = 0; unsigned int clk_freq = 0; unsigned short tmp; - struct resource *r; struct jz4780_i2c *i2c; i2c = devm_kzalloc(&pdev->dev, sizeof(struct jz4780_i2c), GFP_KERNEL); @@ -787,8 +786,7 @@ static int jz4780_i2c_probe(struct platform_device *pdev) init_completion(&i2c->trans_waitq); spin_lock_init(&i2c->lock); - r = platform_get_resource(pdev, IORESOURCE_MEM, 0); - i2c->iomem = devm_ioremap_resource(&pdev->dev, r); + i2c->iomem = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(i2c->iomem)) return PTR_ERR(i2c->iomem); diff --git a/drivers/i2c/busses/i2c-lpc2k.c b/drivers/i2c/busses/i2c-lpc2k.c index 13b0c12e2dbaf..197cb64b2a42d 100644 --- a/drivers/i2c/busses/i2c-lpc2k.c +++ b/drivers/i2c/busses/i2c-lpc2k.c @@ -346,7 +346,6 @@ static const struct i2c_algorithm i2c_lpc2k_algorithm = { static int i2c_lpc2k_probe(struct platform_device *pdev) { struct lpc2k_i2c *i2c; - struct resource *res; u32 bus_clk_rate; u32 scl_high; u32 clkrate; @@ -356,8 +355,7 @@ static int i2c_lpc2k_probe(struct platform_device *pdev) if (!i2c) return -ENOMEM; - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - i2c->base = devm_ioremap_resource(&pdev->dev, res); + i2c->base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(i2c->base)) return PTR_ERR(i2c->base); diff --git a/drivers/i2c/busses/i2c-meson.c b/drivers/i2c/busses/i2c-meson.c index 06b3bed78421f..b3bd869281d36 100644 --- a/drivers/i2c/busses/i2c-meson.c +++ b/drivers/i2c/busses/i2c-meson.c @@ -397,7 +397,6 @@ static int meson_i2c_probe(struct platform_device *pdev) { struct device_node *np = pdev->dev.of_node; struct meson_i2c *i2c; - struct resource *mem; struct i2c_timings timings; int irq, ret = 0; @@ -422,8 +421,7 @@ static int meson_i2c_probe(struct platform_device *pdev) return PTR_ERR(i2c->clk); } - mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); - i2c->regs = devm_ioremap_resource(&pdev->dev, mem); + i2c->regs = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(i2c->regs)) return PTR_ERR(i2c->regs); diff --git a/drivers/i2c/busses/i2c-mv64xxx.c b/drivers/i2c/busses/i2c-mv64xxx.c index 9b8f1d8552eaf..829b8c98ae51b 100644 --- a/drivers/i2c/busses/i2c-mv64xxx.c +++ b/drivers/i2c/busses/i2c-mv64xxx.c @@ -877,7 +877,6 @@ mv64xxx_i2c_probe(struct platform_device *pd) { struct mv64xxx_i2c_data *drv_data; struct mv64xxx_i2c_pdata *pdata = dev_get_platdata(&pd->dev); - struct resource *r; int rc; if ((!pdata && !pd->dev.of_node)) @@ -888,8 +887,7 @@ mv64xxx_i2c_probe(struct platform_device *pd) if (!drv_data) return -ENOMEM; - r = platform_get_resource(pd, IORESOURCE_MEM, 0); - drv_data->reg_base = devm_ioremap_resource(&pd->dev, r); + drv_data->reg_base = devm_platform_ioremap_resource(pd, 0); if (IS_ERR(drv_data->reg_base)) return PTR_ERR(drv_data->reg_base); diff --git a/drivers/i2c/busses/i2c-octeon-platdrv.c b/drivers/i2c/busses/i2c-octeon-platdrv.c index 64bda83e65ac1..0c227963c8d69 100644 --- a/drivers/i2c/busses/i2c-octeon-platdrv.c +++ b/drivers/i2c/busses/i2c-octeon-platdrv.c @@ -136,7 +136,6 @@ static int octeon_i2c_probe(struct platform_device *pdev) { struct device_node *node = pdev->dev.of_node; int irq, result = 0, hlc_irq = 0; - struct resource *res_mem; struct octeon_i2c *i2c; bool cn78xx_style; @@ -167,8 +166,7 @@ static int octeon_i2c_probe(struct platform_device *pdev) i2c->roff.twsi_int = 0x10; i2c->roff.sw_twsi_ext = 0x18; - res_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); - i2c->twsi_base = devm_ioremap_resource(&pdev->dev, res_mem); + i2c->twsi_base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(i2c->twsi_base)) { result = PTR_ERR(i2c->twsi_base); goto out; diff --git a/drivers/i2c/busses/i2c-owl.c b/drivers/i2c/busses/i2c-owl.c index 3ab8be62c581b..fba6efc1dca85 100644 --- a/drivers/i2c/busses/i2c-owl.c +++ b/drivers/i2c/busses/i2c-owl.c @@ -396,15 +396,13 @@ static int owl_i2c_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct owl_i2c_dev *i2c_dev; - struct resource *res; int ret, irq; i2c_dev = devm_kzalloc(dev, sizeof(*i2c_dev), GFP_KERNEL); if (!i2c_dev) return -ENOMEM; - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - i2c_dev->base = devm_ioremap_resource(dev, res); + i2c_dev->base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(i2c_dev->base)) return PTR_ERR(i2c_dev->base); diff --git a/drivers/i2c/busses/i2c-qup.c b/drivers/i2c/busses/i2c-qup.c index 748872a9b0fc7..155dcde70fc97 100644 --- a/drivers/i2c/busses/i2c-qup.c +++ b/drivers/i2c/busses/i2c-qup.c @@ -1660,7 +1660,6 @@ static int qup_i2c_probe(struct platform_device *pdev) static const int blk_sizes[] = {4, 16, 32}; struct qup_i2c_dev *qup; unsigned long one_bit_t; - struct resource *res; u32 io_mode, hw_ver, size; int ret, fs_div, hs_div; u32 src_clk_freq = DEFAULT_SRC_CLK; @@ -1757,8 +1756,7 @@ nodma: return -EINVAL; } - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - qup->base = devm_ioremap_resource(qup->dev, res); + qup->base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(qup->base)) return PTR_ERR(qup->base); diff --git a/drivers/i2c/busses/i2c-rk3x.c b/drivers/i2c/busses/i2c-rk3x.c index 73272d4296bba..fbe41c8cc126a 100644 --- a/drivers/i2c/busses/i2c-rk3x.c +++ b/drivers/i2c/busses/i2c-rk3x.c @@ -1193,7 +1193,6 @@ static int rk3x_i2c_probe(struct platform_device *pdev) struct device_node *np = pdev->dev.of_node; const struct of_device_id *match; struct rk3x_i2c *i2c; - struct resource *mem; int ret = 0; int bus_nr; u32 value; @@ -1223,8 +1222,7 @@ static int rk3x_i2c_probe(struct platform_device *pdev) spin_lock_init(&i2c->lock); init_waitqueue_head(&i2c->wait); - mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); - i2c->regs = devm_ioremap_resource(&pdev->dev, mem); + i2c->regs = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(i2c->regs)) return PTR_ERR(i2c->regs); diff --git a/drivers/i2c/busses/i2c-sirf.c b/drivers/i2c/busses/i2c-sirf.c index a459e00c6851f..d7f72ec331e80 100644 --- a/drivers/i2c/busses/i2c-sirf.c +++ b/drivers/i2c/busses/i2c-sirf.c @@ -271,7 +271,6 @@ static int i2c_sirfsoc_probe(struct platform_device *pdev) { struct sirfsoc_i2c *siic; struct i2c_adapter *adap; - struct resource *mem_res; struct clk *clk; int bitrate; int ctrl_speed; @@ -309,8 +308,7 @@ static int i2c_sirfsoc_probe(struct platform_device *pdev) adap = &siic->adapter; adap->class = I2C_CLASS_DEPRECATED; - mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - siic->base = devm_ioremap_resource(&pdev->dev, mem_res); + siic->base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(siic->base)) { err = PTR_ERR(siic->base); goto out; diff --git a/drivers/i2c/busses/i2c-stu300.c b/drivers/i2c/busses/i2c-stu300.c index ba6b60caa45e3..64d739baf4808 100644 --- a/drivers/i2c/busses/i2c-stu300.c +++ b/drivers/i2c/busses/i2c-stu300.c @@ -860,7 +860,6 @@ static int stu300_probe(struct platform_device *pdev) { struct stu300_dev *dev; struct i2c_adapter *adap; - struct resource *res; int bus_nr; int ret = 0; @@ -876,8 +875,7 @@ static int stu300_probe(struct platform_device *pdev) } dev->pdev = pdev; - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - dev->virtbase = devm_ioremap_resource(&pdev->dev, res); + dev->virtbase = devm_platform_ioremap_resource(pdev, 0); dev_dbg(&pdev->dev, "initialize bus device I2C%d on virtual " "base %p\n", bus_nr, dev->virtbase); if (IS_ERR(dev->virtbase)) diff --git a/drivers/i2c/busses/i2c-sun6i-p2wi.c b/drivers/i2c/busses/i2c-sun6i-p2wi.c index e5293f0b33184..17c27f79029ba 100644 --- a/drivers/i2c/busses/i2c-sun6i-p2wi.c +++ b/drivers/i2c/busses/i2c-sun6i-p2wi.c @@ -187,7 +187,6 @@ static int p2wi_probe(struct platform_device *pdev) struct device_node *childnp; unsigned long parent_clk_freq; u32 clk_freq = I2C_MAX_STANDARD_MODE_FREQ; - struct resource *r; struct p2wi *p2wi; u32 slave_addr; int clk_div; @@ -231,8 +230,7 @@ static int p2wi_probe(struct platform_device *pdev) p2wi->slave_addr = slave_addr; } - r = platform_get_resource(pdev, IORESOURCE_MEM, 0); - p2wi->regs = devm_ioremap_resource(dev, r); + p2wi->regs = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(p2wi->regs)) return PTR_ERR(p2wi->regs); diff --git a/drivers/i2c/busses/i2c-synquacer.c b/drivers/i2c/busses/i2c-synquacer.c index 9099d0a67ace2..8cc91a8da5a0f 100644 --- a/drivers/i2c/busses/i2c-synquacer.c +++ b/drivers/i2c/busses/i2c-synquacer.c @@ -536,7 +536,6 @@ static const struct i2c_adapter synquacer_i2c_ops = { static int synquacer_i2c_probe(struct platform_device *pdev) { struct synquacer_i2c *i2c; - struct resource *r; u32 bus_speed; int ret; @@ -574,8 +573,7 @@ static int synquacer_i2c_probe(struct platform_device *pdev) return -EINVAL; } - r = platform_get_resource(pdev, IORESOURCE_MEM, 0); - i2c->base = devm_ioremap_resource(&pdev->dev, r); + i2c->base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(i2c->base)) return PTR_ERR(i2c->base); diff --git a/drivers/i2c/busses/i2c-xlp9xx.c b/drivers/i2c/busses/i2c-xlp9xx.c index 391c878a7cdc9..286faa9c855ea 100644 --- a/drivers/i2c/busses/i2c-xlp9xx.c +++ b/drivers/i2c/busses/i2c-xlp9xx.c @@ -506,15 +506,13 @@ static int xlp9xx_i2c_smbus_setup(struct xlp9xx_i2c_dev *priv, static int xlp9xx_i2c_probe(struct platform_device *pdev) { struct xlp9xx_i2c_dev *priv; - struct resource *res; int err = 0; priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL); if (!priv) return -ENOMEM; - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - priv->base = devm_ioremap_resource(&pdev->dev, res); + priv->base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(priv->base)) return PTR_ERR(priv->base); diff --git a/drivers/i2c/busses/i2c-xlr.c b/drivers/i2c/busses/i2c-xlr.c index 282f161a8b082..126d1393e548b 100644 --- a/drivers/i2c/busses/i2c-xlr.c +++ b/drivers/i2c/busses/i2c-xlr.c @@ -362,7 +362,6 @@ static int xlr_i2c_probe(struct platform_device *pdev) { const struct of_device_id *match; struct xlr_i2c_private *priv; - struct resource *res; struct clk *clk; unsigned long clk_rate; unsigned long clk_div; @@ -380,8 +379,7 @@ static int xlr_i2c_probe(struct platform_device *pdev) else priv->cfg = &xlr_i2c_config_default; - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - priv->iobase = devm_ioremap_resource(&pdev->dev, res); + priv->iobase = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(priv->iobase)) return PTR_ERR(priv->iobase); diff --git a/drivers/i2c/busses/i2c-zx2967.c b/drivers/i2c/busses/i2c-zx2967.c index 5f3318559b8d5..8db9519695a6e 100644 --- a/drivers/i2c/busses/i2c-zx2967.c +++ b/drivers/i2c/busses/i2c-zx2967.c @@ -502,7 +502,6 @@ static int zx2967_i2c_probe(struct platform_device *pdev) { struct zx2967_i2c *i2c; void __iomem *reg_base; - struct resource *res; struct clk *clk; int ret; @@ -510,8 +509,7 @@ static int zx2967_i2c_probe(struct platform_device *pdev) if (!i2c) return -ENOMEM; - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - reg_base = devm_ioremap_resource(&pdev->dev, res); + reg_base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(reg_base)) return PTR_ERR(reg_base); -- GitLab From c02fb2b8067a4b940884fc58289adad0e3866d10 Mon Sep 17 00:00:00 2001 From: Dejin Zheng Date: Tue, 14 Apr 2020 21:48:27 +0800 Subject: [PATCH 0110/1971] i2c: busses: convert to devm_platform_get_and_ioremap_resource use devm_platform_get_and_ioremap_resource() to simplify code, which contains platform_get_resource() and devm_ioremap_resource(), it also get the resource for use by the following code. Signed-off-by: Dejin Zheng Signed-off-by: Wolfram Sang --- drivers/i2c/busses/i2c-cadence.c | 3 +-- drivers/i2c/busses/i2c-pca-platform.c | 3 +-- drivers/i2c/busses/i2c-rcar.c | 4 +--- drivers/i2c/busses/i2c-stm32f7.c | 3 +-- 4 files changed, 4 insertions(+), 9 deletions(-) diff --git a/drivers/i2c/busses/i2c-cadence.c b/drivers/i2c/busses/i2c-cadence.c index 89d58f7d2a253..803c802611eb5 100644 --- a/drivers/i2c/busses/i2c-cadence.c +++ b/drivers/i2c/busses/i2c-cadence.c @@ -906,8 +906,7 @@ static int cdns_i2c_probe(struct platform_device *pdev) id->quirks = data->quirks; } - r_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); - id->membase = devm_ioremap_resource(&pdev->dev, r_mem); + id->membase = devm_platform_get_and_ioremap_resource(pdev, 0, &r_mem); if (IS_ERR(id->membase)) return PTR_ERR(id->membase); diff --git a/drivers/i2c/busses/i2c-pca-platform.c b/drivers/i2c/busses/i2c-pca-platform.c index 635dd697ac0bb..546426a470cc0 100644 --- a/drivers/i2c/busses/i2c-pca-platform.c +++ b/drivers/i2c/busses/i2c-pca-platform.c @@ -149,8 +149,7 @@ static int i2c_pca_pf_probe(struct platform_device *pdev) if (!i2c) return -ENOMEM; - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - i2c->reg_base = devm_ioremap_resource(&pdev->dev, res); + i2c->reg_base = devm_platform_get_and_ioremap_resource(pdev, 0, &res); if (IS_ERR(i2c->reg_base)) return PTR_ERR(i2c->reg_base); diff --git a/drivers/i2c/busses/i2c-rcar.c b/drivers/i2c/busses/i2c-rcar.c index 3b5397aa4ca60..a45c4bf1ec014 100644 --- a/drivers/i2c/busses/i2c-rcar.c +++ b/drivers/i2c/busses/i2c-rcar.c @@ -938,9 +938,7 @@ static int rcar_i2c_probe(struct platform_device *pdev) return PTR_ERR(priv->clk); } - priv->res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - - priv->io = devm_ioremap_resource(dev, priv->res); + priv->io = devm_platform_get_and_ioremap_resource(pdev, 0, &priv->res); if (IS_ERR(priv->io)) return PTR_ERR(priv->io); diff --git a/drivers/i2c/busses/i2c-stm32f7.c b/drivers/i2c/busses/i2c-stm32f7.c index 330ffed011e02..cf48f8df4423a 100644 --- a/drivers/i2c/busses/i2c-stm32f7.c +++ b/drivers/i2c/busses/i2c-stm32f7.c @@ -1940,8 +1940,7 @@ static int stm32f7_i2c_probe(struct platform_device *pdev) if (!i2c_dev) return -ENOMEM; - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - i2c_dev->base = devm_ioremap_resource(&pdev->dev, res); + i2c_dev->base = devm_platform_get_and_ioremap_resource(pdev, 0, &res); if (IS_ERR(i2c_dev->base)) return PTR_ERR(i2c_dev->base); phy_addr = (dma_addr_t)res->start; -- GitLab From f27237c174fd9653033330e4e532cd9d153ce824 Mon Sep 17 00:00:00 2001 From: Adam Honse Date: Fri, 10 Apr 2020 15:48:44 -0500 Subject: [PATCH 0111/1971] i2c: piix4: Detect secondary SMBus controller on AMD AM4 chipsets The AMD X370 and other AM4 chipsets (A/B/X 3/4/5 parts) and Threadripper equivalents have a secondary SMBus controller at I/O port address 0x0B20. This bus is used by several manufacturers to control motherboard RGB lighting via embedded controllers. I have been using this bus in my OpenRGB project to control the Aura RGB on many motherboards and ASRock also uses this bus for their Polychrome RGB controller. I am not aware of any CZ-compatible platforms which do not have the second SMBus channel. All of AMD's AM4- and Threadripper- series chipsets that OpenRGB users have tested appear to have this secondary bus. I also noticed this secondary bus is present on older AMD platforms including my FM1 home server. Bugzilla: https://bugzilla.kernel.org/show_bug.cgi?id=202587 Signed-off-by: Adam Honse Reviewed-by: Jean Delvare Reviewed-by: Sebastian Reichel Tested-by: Sebastian Reichel Signed-off-by: Wolfram Sang --- drivers/i2c/busses/i2c-piix4.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/i2c/busses/i2c-piix4.c b/drivers/i2c/busses/i2c-piix4.c index 30ded6422e7b2..69740a4ff1db2 100644 --- a/drivers/i2c/busses/i2c-piix4.c +++ b/drivers/i2c/busses/i2c-piix4.c @@ -977,7 +977,8 @@ static int piix4_probe(struct pci_dev *dev, const struct pci_device_id *id) } if (dev->vendor == PCI_VENDOR_ID_AMD && - dev->device == PCI_DEVICE_ID_AMD_HUDSON2_SMBUS) { + (dev->device == PCI_DEVICE_ID_AMD_HUDSON2_SMBUS || + dev->device == PCI_DEVICE_ID_AMD_KERNCZ_SMBUS)) { retval = piix4_setup_sb800(dev, id, 1); } -- GitLab From def00b32f02729eb55088f83b0f6e076cdbb3bc3 Mon Sep 17 00:00:00 2001 From: Wolfram Sang Date: Wed, 1 Apr 2020 21:30:18 +0200 Subject: [PATCH 0112/1971] i2c: refactor parsing of timings When I wanted to print the chosen values to debug output, I concluded that a helper function to parse one timing would be helpful. Signed-off-by: Wolfram Sang Signed-off-by: Wolfram Sang --- drivers/i2c/i2c-core-base.c | 69 ++++++++++++++++--------------------- 1 file changed, 30 insertions(+), 39 deletions(-) diff --git a/drivers/i2c/i2c-core-base.c b/drivers/i2c/i2c-core-base.c index 5cc0b0ec5570b..a7c21d4cbd901 100644 --- a/drivers/i2c/i2c-core-base.c +++ b/drivers/i2c/i2c-core-base.c @@ -1588,6 +1588,18 @@ void i2c_del_adapter(struct i2c_adapter *adap) } EXPORT_SYMBOL(i2c_del_adapter); +static void i2c_parse_timing(struct device *dev, char *prop_name, u32 *cur_val_p, + u32 def_val, bool use_def) +{ + int ret; + + ret = device_property_read_u32(dev, prop_name, cur_val_p); + if (ret && use_def) + *cur_val_p = def_val; + + dev_dbg(dev, "%s: %u\n", prop_name, *cur_val_p); +} + /** * i2c_parse_fw_timings - get I2C related timing parameters from firmware * @dev: The device to scan for I2C timing properties @@ -1606,49 +1618,28 @@ EXPORT_SYMBOL(i2c_del_adapter); */ void i2c_parse_fw_timings(struct device *dev, struct i2c_timings *t, bool use_defaults) { - int ret; - - ret = device_property_read_u32(dev, "clock-frequency", &t->bus_freq_hz); - if (ret && use_defaults) - t->bus_freq_hz = I2C_MAX_STANDARD_MODE_FREQ; - - ret = device_property_read_u32(dev, "i2c-scl-rising-time-ns", &t->scl_rise_ns); - if (ret && use_defaults) { - if (t->bus_freq_hz <= I2C_MAX_STANDARD_MODE_FREQ) - t->scl_rise_ns = 1000; - else if (t->bus_freq_hz <= I2C_MAX_FAST_MODE_FREQ) - t->scl_rise_ns = 300; - else - t->scl_rise_ns = 120; - } - - ret = device_property_read_u32(dev, "i2c-scl-falling-time-ns", &t->scl_fall_ns); - if (ret && use_defaults) { - if (t->bus_freq_hz <= I2C_MAX_FAST_MODE_FREQ) - t->scl_fall_ns = 300; - else - t->scl_fall_ns = 120; - } - - ret = device_property_read_u32(dev, "i2c-scl-internal-delay-ns", &t->scl_int_delay_ns); - if (ret && use_defaults) - t->scl_int_delay_ns = 0; + bool u = use_defaults; + u32 d; - ret = device_property_read_u32(dev, "i2c-sda-falling-time-ns", &t->sda_fall_ns); - if (ret && use_defaults) - t->sda_fall_ns = t->scl_fall_ns; + i2c_parse_timing(dev, "clock-frequency", &t->bus_freq_hz, + I2C_MAX_STANDARD_MODE_FREQ, u); - ret = device_property_read_u32(dev, "i2c-sda-hold-time-ns", &t->sda_hold_ns); - if (ret && use_defaults) - t->sda_hold_ns = 0; + d = t->bus_freq_hz <= I2C_MAX_STANDARD_MODE_FREQ ? 1000 : + t->bus_freq_hz <= I2C_MAX_FAST_MODE_FREQ ? 300 : 120; + i2c_parse_timing(dev, "i2c-scl-rising-time-ns", &t->scl_rise_ns, d, u); - ret = device_property_read_u32(dev, "i2c-digital-filter-width-ns", &t->digital_filter_width_ns); - if (ret && use_defaults) - t->digital_filter_width_ns = 0; + d = t->bus_freq_hz <= I2C_MAX_FAST_MODE_FREQ ? 300 : 120; + i2c_parse_timing(dev, "i2c-scl-falling-time-ns", &t->scl_fall_ns, d, u); - ret = device_property_read_u32(dev, "i2c-analog-filter-cutoff-frequency", &t->analog_filter_cutoff_freq_hz); - if (ret && use_defaults) - t->analog_filter_cutoff_freq_hz = 0; + i2c_parse_timing(dev, "i2c-scl-internal-delay-ns", + &t->scl_int_delay_ns, 0, u); + i2c_parse_timing(dev, "i2c-sda-falling-time-ns", &t->sda_fall_ns, + t->scl_fall_ns, u); + i2c_parse_timing(dev, "i2c-sda-hold-time-ns", &t->sda_hold_ns, 0, u); + i2c_parse_timing(dev, "i2c-digital-filter-width-ns", + &t->digital_filter_width_ns, 0, u); + i2c_parse_timing(dev, "i2c-analog-filter-cutoff-frequency", + &t->analog_filter_cutoff_freq_hz, 0, u); } EXPORT_SYMBOL_GPL(i2c_parse_fw_timings); -- GitLab From be1b92c133cc91b2fb00b8c5f6b8a922fa8003da Mon Sep 17 00:00:00 2001 From: Geert Uytterhoeven Date: Tue, 24 Mar 2020 14:56:47 +0100 Subject: [PATCH 0113/1971] i2c: i801: Use GPIO_LOOKUP() helper macro i801_add_mux() fills in the GPIO lookup table by manually populating an array of gpiod_lookup structures. Use the existing GPIO_LOOKUP() helper macro instead, to relax a dependency on the gpiod_lookup structure's member names. Signed-off-by: Geert Uytterhoeven Reviewed-by: Jean Delvare Signed-off-by: Wolfram Sang --- drivers/i2c/busses/i2c-i801.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c index a9c03f5c34825..4f333889489c8 100644 --- a/drivers/i2c/busses/i2c-i801.c +++ b/drivers/i2c/busses/i2c-i801.c @@ -1439,9 +1439,9 @@ static int i801_add_mux(struct i801_priv *priv) return -ENOMEM; lookup->dev_id = "i2c-mux-gpio"; for (i = 0; i < mux_config->n_gpios; i++) { - lookup->table[i].chip_label = mux_config->gpio_chip; - lookup->table[i].chip_hwnum = mux_config->gpios[i]; - lookup->table[i].con_id = "mux"; + lookup->table[i] = (struct gpiod_lookup) + GPIO_LOOKUP(mux_config->gpio_chip, + mux_config->gpios[i], "mux", 0); } gpiod_add_lookup_table(lookup); priv->lookup = lookup; -- GitLab From 79d48da381ca977c60e17bdebd2e6f7c4c390df8 Mon Sep 17 00:00:00 2001 From: Fabrice Gasnier Date: Thu, 19 Mar 2020 19:21:55 +0100 Subject: [PATCH 0114/1971] i2c: i2c-stm32f7: improve nack debug message Add information on slave addr in the nack debug message. Signed-off-by: Fabrice Gasnier Signed-off-by: Alain Volmat Reviewed-by: Pierre-Yves MORDRET Signed-off-by: Wolfram Sang --- drivers/i2c/busses/i2c-stm32f7.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/i2c/busses/i2c-stm32f7.c b/drivers/i2c/busses/i2c-stm32f7.c index cf48f8df4423a..8102e33a67539 100644 --- a/drivers/i2c/busses/i2c-stm32f7.c +++ b/drivers/i2c/busses/i2c-stm32f7.c @@ -1462,7 +1462,8 @@ static irqreturn_t stm32f7_i2c_isr_event(int irq, void *data) /* NACK received */ if (status & STM32F7_I2C_ISR_NACKF) { - dev_dbg(i2c_dev->dev, "<%s>: Receive NACK\n", __func__); + dev_dbg(i2c_dev->dev, "<%s>: Receive NACK (addr %x)\n", + __func__, f7_msg->addr); writel_relaxed(STM32F7_I2C_ICR_NACKCF, base + STM32F7_I2C_ICR); f7_msg->result = -ENXIO; } -- GitLab From 85f8fcaeed33315e19905af23f2693a81956688a Mon Sep 17 00:00:00 2001 From: Wan Ahmad Zainie Date: Tue, 7 Apr 2020 21:34:38 +0800 Subject: [PATCH 0115/1971] i2c: designware: Calculate SCL timing parameter for Fast Mode Plus Custom parameters for HCNT/LCNT are not available for OF based system. Thus, we will use existing SCL timing parameter calculation functions for Fast Mode Plus. Signed-off-by: Wan Ahmad Zainie Reviewed-by: Andy Shevchenko Acked-by: Jarkko Nikula Signed-off-by: Wolfram Sang --- drivers/i2c/busses/i2c-designware-master.c | 19 ++++++++++++++++--- 1 file changed, 16 insertions(+), 3 deletions(-) diff --git a/drivers/i2c/busses/i2c-designware-master.c b/drivers/i2c/busses/i2c-designware-master.c index 3a58eef209360..23038d7272da3 100644 --- a/drivers/i2c/busses/i2c-designware-master.c +++ b/drivers/i2c/busses/i2c-designware-master.c @@ -76,14 +76,27 @@ static int i2c_dw_set_timings_master(struct dw_i2c_dev *dev) */ if (t->bus_freq_hz == 1000000) { /* - * Check are fast mode plus parameters available and use - * fast mode if not. + * Check are Fast Mode Plus parameters available. Calculate + * SCL timing parameters for Fast Mode Plus if not set. */ if (dev->fp_hcnt && dev->fp_lcnt) { dev->fs_hcnt = dev->fp_hcnt; dev->fs_lcnt = dev->fp_lcnt; - fp_str = " Plus"; + } else { + ic_clk = i2c_dw_clk_rate(dev); + dev->fs_hcnt = + i2c_dw_scl_hcnt(ic_clk, + 260, /* tHIGH = 260 ns */ + sda_falling_time, + 0, /* DW default */ + 0); /* No offset */ + dev->fs_lcnt = + i2c_dw_scl_lcnt(ic_clk, + 500, /* tLOW = 500 ns */ + scl_falling_time, + 0); /* No offset */ } + fp_str = " Plus"; } /* * Calculate SCL timing parameters for fast mode if not set. They are -- GitLab From 35eba185fd1a3c61556a3050edbddc58d953303f Mon Sep 17 00:00:00 2001 From: Wan Ahmad Zainie Date: Tue, 7 Apr 2020 21:34:39 +0800 Subject: [PATCH 0116/1971] i2c: designware: Calculate SCL timing parameter for High Speed Mode Custom parameters for HCNT/LCNT are not available for OF based system. Thus, we will use existing SCL timing parameter calculation functions for High Speed Mode too. The value for the parameters tSYMBOL and tLOW is taken from DesignWare DW_apb_i2c Databook v2.01a, section 3.15.4.6. The calculation should assume higher bus load since it gives slower timing parameter. Signed-off-by: Wan Ahmad Zainie Reviewed-by: Andy Shevchenko Acked-by: Jarkko Nikula Signed-off-by: Wolfram Sang --- drivers/i2c/busses/i2c-designware-master.c | 18 +++++++++++++++--- 1 file changed, 15 insertions(+), 3 deletions(-) diff --git a/drivers/i2c/busses/i2c-designware-master.c b/drivers/i2c/busses/i2c-designware-master.c index 23038d7272da3..b6c17b550d31e 100644 --- a/drivers/i2c/busses/i2c-designware-master.c +++ b/drivers/i2c/busses/i2c-designware-master.c @@ -129,10 +129,22 @@ static int i2c_dw_set_timings_master(struct dw_i2c_dev *dev) dev->master_cfg |= DW_IC_CON_SPEED_FAST; dev->hs_hcnt = 0; dev->hs_lcnt = 0; - } else if (dev->hs_hcnt && dev->hs_lcnt) { - dev_dbg(dev->dev, "High Speed Mode HCNT:LCNT = %d:%d\n", - dev->hs_hcnt, dev->hs_lcnt); + } else if (!dev->hs_hcnt || !dev->hs_lcnt) { + ic_clk = i2c_dw_clk_rate(dev); + dev->hs_hcnt = + i2c_dw_scl_hcnt(ic_clk, + 160, /* tHIGH = 160 ns */ + sda_falling_time, + 0, /* DW default */ + 0); /* No offset */ + dev->hs_lcnt = + i2c_dw_scl_lcnt(ic_clk, + 320, /* tLOW = 320 ns */ + scl_falling_time, + 0); /* No offset */ } + dev_dbg(dev->dev, "High Speed Mode HCNT:LCNT = %d:%d\n", + dev->hs_hcnt, dev->hs_lcnt); } ret = i2c_dw_set_sda_hold(dev); -- GitLab From 022467444515c15ac8f3f690f3dff5f21d04c627 Mon Sep 17 00:00:00 2001 From: Peter Ujfalusi Date: Fri, 3 Apr 2020 17:19:50 +0300 Subject: [PATCH 0117/1971] dmaengine: ti: k3-udma: Drop COMPILE_TEST for the drivers for now It is not possible to compile test the UDMA stack right now due to dependencies to T_SCI_PROTOCOL and TI_SCI_INTA_IRQCHIP and their dependencies. Remove the COMPILE_TEST until it is actually possible to compile test the drivers. Reported-by: Linus Torvalds Signed-off-by: Peter Ujfalusi Acked-by: Linus Torvalds Link: https://lore.kernel.org/r/20200403141950.9359-1-peter.ujfalusi@ti.com Signed-off-by: Vinod Koul --- drivers/dma/ti/Kconfig | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/dma/ti/Kconfig b/drivers/dma/ti/Kconfig index f76e06651f804..79618fac119a7 100644 --- a/drivers/dma/ti/Kconfig +++ b/drivers/dma/ti/Kconfig @@ -36,7 +36,7 @@ config DMA_OMAP config TI_K3_UDMA bool "Texas Instruments UDMA support" - depends on ARCH_K3 || COMPILE_TEST + depends on ARCH_K3 depends on TI_SCI_PROTOCOL depends on TI_SCI_INTA_IRQCHIP select DMA_ENGINE @@ -49,7 +49,7 @@ config TI_K3_UDMA config TI_K3_UDMA_GLUE_LAYER bool "Texas Instruments UDMA Glue layer for non DMAengine users" - depends on ARCH_K3 || COMPILE_TEST + depends on ARCH_K3 depends on TI_K3_UDMA help Say y here to support the K3 NAVSS DMA glue interface -- GitLab From a4e688535a0829980d5ef1516c0713777a874c62 Mon Sep 17 00:00:00 2001 From: Peter Ujfalusi Date: Fri, 27 Mar 2020 16:42:28 +0200 Subject: [PATCH 0118/1971] dmaengine: ti: k3-udma: Disable memcopy via MCU NAVSS on am654 Trace of a test for DMA memcpy domains slipped into the glue layer commit. The memcpy support should be disabled on the MCU UDMAP. Fixes: d702419134133 ("dmaengine: ti: k3-udma: Add glue layer for non DMAengine users") Signed-off-by: Peter Ujfalusi Link: https://lore.kernel.org/r/20200327144228.11101-1-peter.ujfalusi@ti.com Signed-off-by: Vinod Koul --- drivers/dma/ti/k3-udma.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/dma/ti/k3-udma.c b/drivers/dma/ti/k3-udma.c index a9c0251adf1a4..fa53234d8f097 100644 --- a/drivers/dma/ti/k3-udma.c +++ b/drivers/dma/ti/k3-udma.c @@ -3187,7 +3187,7 @@ static struct udma_match_data am654_main_data = { static struct udma_match_data am654_mcu_data = { .psil_base = 0x6000, - .enable_memcpy_support = true, /* TEST: DMA domains */ + .enable_memcpy_support = false, .statictr_z_mask = GENMASK(11, 0), .rchan_oes_offset = 0x2000, .tpl_levels = 2, -- GitLab From c2ce6bbcfc9f142b426717bc5aa3f9ba20485a65 Mon Sep 17 00:00:00 2001 From: Dave Jiang Date: Wed, 15 Apr 2020 09:13:12 -0700 Subject: [PATCH 0119/1971] dmaengine: idxd: export hw version through sysfs Some user apps would like to know the hardware version in order to determine the variation of the hardware. Export the hardware version number to userspace via sysfs. Signed-off-by: Dave Jiang Link: https://lore.kernel.org/r/158696714008.39484.13401950732606906479.stgit@djiang5-desk3.ch.intel.com Signed-off-by: Vinod Koul --- Documentation/ABI/stable/sysfs-driver-dma-idxd | 6 ++++++ drivers/dma/idxd/sysfs.c | 11 +++++++++++ 2 files changed, 17 insertions(+) diff --git a/Documentation/ABI/stable/sysfs-driver-dma-idxd b/Documentation/ABI/stable/sysfs-driver-dma-idxd index f4be46cc6cb60..b5bebf642db69 100644 --- a/Documentation/ABI/stable/sysfs-driver-dma-idxd +++ b/Documentation/ABI/stable/sysfs-driver-dma-idxd @@ -1,3 +1,9 @@ +What: sys/bus/dsa/devices/dsa/version +Date: Apr 15, 2020 +KernelVersion: 5.8.0 +Contact: dmaengine@vger.kernel.org +Description: The hardware version number. + What: sys/bus/dsa/devices/dsa/cdev_major Date: Oct 25, 2019 KernelVersion: 5.6.0 diff --git a/drivers/dma/idxd/sysfs.c b/drivers/dma/idxd/sysfs.c index 3999827970aba..052dae5d6dddb 100644 --- a/drivers/dma/idxd/sysfs.c +++ b/drivers/dma/idxd/sysfs.c @@ -1092,6 +1092,16 @@ static const struct attribute_group *idxd_wq_attribute_groups[] = { }; /* IDXD device attribs */ +static ssize_t version_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct idxd_device *idxd = + container_of(dev, struct idxd_device, conf_dev); + + return sprintf(buf, "%#x\n", idxd->hw.version); +} +static DEVICE_ATTR_RO(version); + static ssize_t max_work_queues_size_show(struct device *dev, struct device_attribute *attr, char *buf) @@ -1313,6 +1323,7 @@ static ssize_t cdev_major_show(struct device *dev, static DEVICE_ATTR_RO(cdev_major); static struct attribute *idxd_device_attributes[] = { + &dev_attr_version.attr, &dev_attr_max_groups.attr, &dev_attr_max_work_queues.attr, &dev_attr_max_work_queues_size.attr, -- GitLab From 1a351b10b9671fc2fac767c40a1c4373b9bf5092 Mon Sep 17 00:00:00 2001 From: Radu Pirea Date: Mon, 6 Jan 2020 12:43:36 +0200 Subject: [PATCH 0120/1971] i2c: cadence: Added slave support Added support for I2C slave functionality Signed-off-by: Chirag Parekh Signed-off-by: Michal Simek Signed-off-by: Radu Pirea Signed-off-by: Wolfram Sang --- drivers/i2c/busses/i2c-cadence.c | 320 +++++++++++++++++++++++++++++-- 1 file changed, 309 insertions(+), 11 deletions(-) diff --git a/drivers/i2c/busses/i2c-cadence.c b/drivers/i2c/busses/i2c-cadence.c index 803c802611eb5..4b72398af505d 100644 --- a/drivers/i2c/busses/i2c-cadence.c +++ b/drivers/i2c/busses/i2c-cadence.c @@ -23,6 +23,7 @@ #define CDNS_I2C_ISR_OFFSET 0x10 /* IRQ Status Register, RW */ #define CDNS_I2C_XFER_SIZE_OFFSET 0x14 /* Transfer Size Register, RW */ #define CDNS_I2C_TIME_OUT_OFFSET 0x1C /* Time Out Register, RW */ +#define CDNS_I2C_IMR_OFFSET 0x20 /* IRQ Mask Register, RO */ #define CDNS_I2C_IER_OFFSET 0x24 /* IRQ Enable Register, WO */ #define CDNS_I2C_IDR_OFFSET 0x28 /* IRQ Disable Register, WO */ @@ -40,9 +41,17 @@ #define CDNS_I2C_CR_DIVB_SHIFT 8 #define CDNS_I2C_CR_DIVB_MASK (0x3f << CDNS_I2C_CR_DIVB_SHIFT) +#define CDNS_I2C_CR_MASTER_EN_MASK (CDNS_I2C_CR_NEA | \ + CDNS_I2C_CR_ACK_EN | \ + CDNS_I2C_CR_MS) + +#define CDNS_I2C_CR_SLAVE_EN_MASK ~CDNS_I2C_CR_MASTER_EN_MASK + /* Status Register Bit mask definitions */ #define CDNS_I2C_SR_BA BIT(8) +#define CDNS_I2C_SR_TXDV BIT(6) #define CDNS_I2C_SR_RXDV BIT(5) +#define CDNS_I2C_SR_RXRW BIT(3) /* * I2C Address Register Bit mask definitions @@ -91,6 +100,14 @@ CDNS_I2C_IXR_DATA | \ CDNS_I2C_IXR_COMP) +#define CDNS_I2C_IXR_SLAVE_INTR_MASK (CDNS_I2C_IXR_RX_UNF | \ + CDNS_I2C_IXR_TX_OVF | \ + CDNS_I2C_IXR_RX_OVF | \ + CDNS_I2C_IXR_TO | \ + CDNS_I2C_IXR_NACK | \ + CDNS_I2C_IXR_DATA | \ + CDNS_I2C_IXR_COMP) + #define CDNS_I2C_TIMEOUT msecs_to_jiffies(1000) /* timeout for pm runtime autosuspend */ #define CNDS_I2C_PM_TIMEOUT 1000 /* ms */ @@ -114,6 +131,32 @@ #define cdns_i2c_readreg(offset) readl_relaxed(id->membase + offset) #define cdns_i2c_writereg(val, offset) writel_relaxed(val, id->membase + offset) +#if IS_ENABLED(CONFIG_I2C_SLAVE) +/** + * enum cdns_i2c_mode - I2C Controller current operating mode + * + * @CDNS_I2C_MODE_SLAVE: I2C controller operating in slave mode + * @CDNS_I2C_MODE_MASTER: I2C Controller operating in master mode + */ +enum cdns_i2c_mode { + CDNS_I2C_MODE_SLAVE, + CDNS_I2C_MODE_MASTER, +}; + +/** + * enum cdns_i2c_slave_mode - Slave state when I2C is operating in slave mode + * + * @CDNS_I2C_SLAVE_STATE_IDLE: I2C slave idle + * @CDNS_I2C_SLAVE_STATE_SEND: I2C slave sending data to master + * @CDNS_I2C_SLAVE_STATE_RECV: I2C slave receiving data from master + */ +enum cdns_i2c_slave_state { + CDNS_I2C_SLAVE_STATE_IDLE, + CDNS_I2C_SLAVE_STATE_SEND, + CDNS_I2C_SLAVE_STATE_RECV, +}; +#endif + /** * struct cdns_i2c - I2C device private data structure * @@ -135,6 +178,10 @@ * @clk: Pointer to struct clk * @clk_rate_change_nb: Notifier block for clock rate changes * @quirks: flag for broken hold bit usage in r1p10 + * @ctrl_reg_diva_divb: value of fields DIV_A and DIV_B from CR register + * @slave: Registered slave instance. + * @dev_mode: I2C operating role(master/slave). + * @slave_state: I2C Slave state(idle/read/write). */ struct cdns_i2c { struct device *dev; @@ -155,6 +202,12 @@ struct cdns_i2c { struct clk *clk; struct notifier_block clk_rate_change_nb; u32 quirks; +#if IS_ENABLED(CONFIG_I2C_SLAVE) + u16 ctrl_reg_diva_divb; + struct i2c_client *slave; + enum cdns_i2c_mode dev_mode; + enum cdns_i2c_slave_state slave_state; +#endif }; struct cdns_platform_data { @@ -183,17 +236,155 @@ static inline bool cdns_is_holdquirk(struct cdns_i2c *id, bool hold_wrkaround) (id->curr_recv_count == CDNS_I2C_FIFO_DEPTH + 1)); } +#if IS_ENABLED(CONFIG_I2C_SLAVE) +static void cdns_i2c_set_mode(enum cdns_i2c_mode mode, struct cdns_i2c *id) +{ + /* Disable all interrupts */ + cdns_i2c_writereg(CDNS_I2C_IXR_ALL_INTR_MASK, CDNS_I2C_IDR_OFFSET); + + /* Clear FIFO and transfer size */ + cdns_i2c_writereg(CDNS_I2C_CR_CLR_FIFO, CDNS_I2C_CR_OFFSET); + + /* Update device mode and state */ + id->dev_mode = mode; + id->slave_state = CDNS_I2C_SLAVE_STATE_IDLE; + + switch (mode) { + case CDNS_I2C_MODE_MASTER: + /* Enable i2c master */ + cdns_i2c_writereg(id->ctrl_reg_diva_divb | + CDNS_I2C_CR_MASTER_EN_MASK, + CDNS_I2C_CR_OFFSET); + /* + * This delay is needed to give the IP some time to switch to + * the master mode. With lower values(like 110 us) i2cdetect + * will not detect any slave and without this delay, the IP will + * trigger a timeout interrupt. + */ + usleep_range(115, 125); + break; + case CDNS_I2C_MODE_SLAVE: + /* Enable i2c slave */ + cdns_i2c_writereg(id->ctrl_reg_diva_divb & + CDNS_I2C_CR_SLAVE_EN_MASK, + CDNS_I2C_CR_OFFSET); + + /* Setting slave address */ + cdns_i2c_writereg(id->slave->addr & CDNS_I2C_ADDR_MASK, + CDNS_I2C_ADDR_OFFSET); + + /* Enable slave send/receive interrupts */ + cdns_i2c_writereg(CDNS_I2C_IXR_SLAVE_INTR_MASK, + CDNS_I2C_IER_OFFSET); + break; + } +} + +static void cdns_i2c_slave_rcv_data(struct cdns_i2c *id) +{ + u8 bytes; + unsigned char data; + + /* Prepare backend for data reception */ + if (id->slave_state == CDNS_I2C_SLAVE_STATE_IDLE) { + id->slave_state = CDNS_I2C_SLAVE_STATE_RECV; + i2c_slave_event(id->slave, I2C_SLAVE_WRITE_REQUESTED, NULL); + } + + /* Fetch number of bytes to receive */ + bytes = cdns_i2c_readreg(CDNS_I2C_XFER_SIZE_OFFSET); + + /* Read data and send to backend */ + while (bytes--) { + data = cdns_i2c_readreg(CDNS_I2C_DATA_OFFSET); + i2c_slave_event(id->slave, I2C_SLAVE_WRITE_RECEIVED, &data); + } +} + +static void cdns_i2c_slave_send_data(struct cdns_i2c *id) +{ + u8 data; + + /* Prepare backend for data transmission */ + if (id->slave_state == CDNS_I2C_SLAVE_STATE_IDLE) { + id->slave_state = CDNS_I2C_SLAVE_STATE_SEND; + i2c_slave_event(id->slave, I2C_SLAVE_READ_REQUESTED, &data); + } else { + i2c_slave_event(id->slave, I2C_SLAVE_READ_PROCESSED, &data); + } + + /* Send data over bus */ + cdns_i2c_writereg(data, CDNS_I2C_DATA_OFFSET); +} + /** - * cdns_i2c_isr - Interrupt handler for the I2C device - * @irq: irq number for the I2C device - * @ptr: void pointer to cdns_i2c structure + * cdns_i2c_slave_isr - Interrupt handler for the I2C device in slave role + * @ptr: Pointer to I2C device private data + * + * This function handles the data interrupt and transfer complete interrupt of + * the I2C device in slave role. + * + * Return: IRQ_HANDLED always + */ +static irqreturn_t cdns_i2c_slave_isr(void *ptr) +{ + struct cdns_i2c *id = ptr; + unsigned int isr_status, i2c_status; + + /* Fetch the interrupt status */ + isr_status = cdns_i2c_readreg(CDNS_I2C_ISR_OFFSET); + cdns_i2c_writereg(isr_status, CDNS_I2C_ISR_OFFSET); + + /* Ignore masked interrupts */ + isr_status &= ~cdns_i2c_readreg(CDNS_I2C_IMR_OFFSET); + + /* Fetch transfer mode (send/receive) */ + i2c_status = cdns_i2c_readreg(CDNS_I2C_SR_OFFSET); + + /* Handle data send/receive */ + if (i2c_status & CDNS_I2C_SR_RXRW) { + /* Send data to master */ + if (isr_status & CDNS_I2C_IXR_DATA) + cdns_i2c_slave_send_data(id); + + if (isr_status & CDNS_I2C_IXR_COMP) { + id->slave_state = CDNS_I2C_SLAVE_STATE_IDLE; + i2c_slave_event(id->slave, I2C_SLAVE_STOP, NULL); + } + } else { + /* Receive data from master */ + if (isr_status & CDNS_I2C_IXR_DATA) + cdns_i2c_slave_rcv_data(id); + + if (isr_status & CDNS_I2C_IXR_COMP) { + cdns_i2c_slave_rcv_data(id); + id->slave_state = CDNS_I2C_SLAVE_STATE_IDLE; + i2c_slave_event(id->slave, I2C_SLAVE_STOP, NULL); + } + } + + /* Master indicated xfer stop or fifo underflow/overflow */ + if (isr_status & (CDNS_I2C_IXR_NACK | CDNS_I2C_IXR_RX_OVF | + CDNS_I2C_IXR_RX_UNF | CDNS_I2C_IXR_TX_OVF)) { + id->slave_state = CDNS_I2C_SLAVE_STATE_IDLE; + i2c_slave_event(id->slave, I2C_SLAVE_STOP, NULL); + cdns_i2c_writereg(CDNS_I2C_CR_CLR_FIFO, CDNS_I2C_CR_OFFSET); + } + + return IRQ_HANDLED; +} +#endif + +/** + * cdns_i2c_master_isr - Interrupt handler for the I2C device in master role + * @ptr: Pointer to I2C device private data * * This function handles the data interrupt, transfer complete interrupt and - * the error interrupts of the I2C device. + * the error interrupts of the I2C device in master role. * * Return: IRQ_HANDLED always */ -static irqreturn_t cdns_i2c_isr(int irq, void *ptr) +static irqreturn_t cdns_i2c_master_isr(void *ptr) { unsigned int isr_status, avail_bytes, updatetx; unsigned int bytes_to_send; @@ -357,6 +548,27 @@ static irqreturn_t cdns_i2c_isr(int irq, void *ptr) return status; } +/** + * cdns_i2c_isr - Interrupt handler for the I2C device + * @irq: irq number for the I2C device + * @ptr: void pointer to cdns_i2c structure + * + * This function passes the control to slave/master based on current role of + * i2c controller. + * + * Return: IRQ_HANDLED always + */ +static irqreturn_t cdns_i2c_isr(int irq, void *ptr) +{ +#if IS_ENABLED(CONFIG_I2C_SLAVE) + struct cdns_i2c *id = ptr; + + if (id->dev_mode == CDNS_I2C_MODE_SLAVE) + return cdns_i2c_slave_isr(ptr); +#endif + return cdns_i2c_master_isr(ptr); +} + /** * cdns_i2c_mrecv - Prepare and start a master receive operation * @id: pointer to the i2c device structure @@ -577,10 +789,28 @@ static int cdns_i2c_master_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, u32 reg; struct cdns_i2c *id = adap->algo_data; bool hold_quirk; +#if IS_ENABLED(CONFIG_I2C_SLAVE) + bool change_role = false; +#endif ret = pm_runtime_get_sync(id->dev); if (ret < 0) return ret; + +#if IS_ENABLED(CONFIG_I2C_SLAVE) + /* Check i2c operating mode and switch if possible */ + if (id->dev_mode == CDNS_I2C_MODE_SLAVE) { + if (id->slave_state != CDNS_I2C_SLAVE_STATE_IDLE) + return -EAGAIN; + + /* Set mode to master */ + cdns_i2c_set_mode(CDNS_I2C_MODE_MASTER, id); + + /* Mark flag to change role once xfer is completed */ + change_role = true; + } +#endif + /* Check if the bus is free */ if (cdns_i2c_readreg(CDNS_I2C_SR_OFFSET) & CDNS_I2C_SR_BA) { ret = -EAGAIN; @@ -639,7 +869,15 @@ static int cdns_i2c_master_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, } ret = num; + out: + +#if IS_ENABLED(CONFIG_I2C_SLAVE) + /* Switch i2c mode to slave */ + if (change_role) + cdns_i2c_set_mode(CDNS_I2C_MODE_SLAVE, id); +#endif + pm_runtime_mark_last_busy(id->dev); pm_runtime_put_autosuspend(id->dev); return ret; @@ -653,14 +891,67 @@ out: */ static u32 cdns_i2c_func(struct i2c_adapter *adap) { - return I2C_FUNC_I2C | I2C_FUNC_10BIT_ADDR | - (I2C_FUNC_SMBUS_EMUL & ~I2C_FUNC_SMBUS_QUICK) | - I2C_FUNC_SMBUS_BLOCK_DATA; + u32 func = I2C_FUNC_I2C | I2C_FUNC_10BIT_ADDR | + (I2C_FUNC_SMBUS_EMUL & ~I2C_FUNC_SMBUS_QUICK) | + I2C_FUNC_SMBUS_BLOCK_DATA; + +#if IS_ENABLED(CONFIG_I2C_SLAVE) + func |= I2C_FUNC_SLAVE; +#endif + + return func; } +#if IS_ENABLED(CONFIG_I2C_SLAVE) +static int cdns_reg_slave(struct i2c_client *slave) +{ + int ret; + struct cdns_i2c *id = container_of(slave->adapter, struct cdns_i2c, + adap); + + if (id->slave) + return -EBUSY; + + if (slave->flags & I2C_CLIENT_TEN) + return -EAFNOSUPPORT; + + ret = pm_runtime_get_sync(id->dev); + if (ret < 0) + return ret; + + /* Store slave information */ + id->slave = slave; + + /* Enable I2C slave */ + cdns_i2c_set_mode(CDNS_I2C_MODE_SLAVE, id); + + return 0; +} + +static int cdns_unreg_slave(struct i2c_client *slave) +{ + struct cdns_i2c *id = container_of(slave->adapter, struct cdns_i2c, + adap); + + pm_runtime_put(id->dev); + + /* Remove slave information */ + id->slave = NULL; + + /* Enable I2C master */ + cdns_i2c_set_mode(CDNS_I2C_MODE_MASTER, id); + + return 0; +} +#endif + static const struct i2c_algorithm cdns_i2c_algo = { .master_xfer = cdns_i2c_master_xfer, .functionality = cdns_i2c_func, +#if IS_ENABLED(CONFIG_I2C_SLAVE) + .reg_slave = cdns_reg_slave, + .unreg_slave = cdns_unreg_slave, +#endif }; /** @@ -755,7 +1046,10 @@ static int cdns_i2c_setclk(unsigned long clk_in, struct cdns_i2c *id) ctrl_reg |= ((div_a << CDNS_I2C_CR_DIVA_SHIFT) | (div_b << CDNS_I2C_CR_DIVB_SHIFT)); cdns_i2c_writereg(ctrl_reg, CDNS_I2C_CR_OFFSET); - +#if IS_ENABLED(CONFIG_I2C_SLAVE) + id->ctrl_reg_diva_divb = ctrl_reg & (CDNS_I2C_CR_DIVA_MASK | + CDNS_I2C_CR_DIVB_MASK); +#endif return 0; } @@ -948,8 +1242,12 @@ static int cdns_i2c_probe(struct platform_device *pdev) if (ret || (id->i2c_clk > I2C_MAX_FAST_MODE_FREQ)) id->i2c_clk = I2C_MAX_STANDARD_MODE_FREQ; - cdns_i2c_writereg(CDNS_I2C_CR_ACK_EN | CDNS_I2C_CR_NEA | CDNS_I2C_CR_MS, - CDNS_I2C_CR_OFFSET); +#if IS_ENABLED(CONFIG_I2C_SLAVE) + /* Set initial mode to master */ + id->dev_mode = CDNS_I2C_MODE_MASTER; + id->slave_state = CDNS_I2C_SLAVE_STATE_IDLE; +#endif + cdns_i2c_writereg(CDNS_I2C_CR_MASTER_EN_MASK, CDNS_I2C_CR_OFFSET); ret = cdns_i2c_setclk(id->input_clk, id); if (ret) { -- GitLab From 5429ef62bcf360aae06740cbe065be01e5cfb6fc Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Wed, 22 Jan 2020 19:38:21 +0000 Subject: [PATCH 0121/1971] compiler/gcc: Raise minimum GCC version for kernel builds to 4.8 It is very rare to see versions of GCC prior to 4.8 being used to build the mainline kernel. These old compilers are also know to have codegen issues which can lead to silent miscompilation: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=58145 Raise the minimum GCC version for kernel build to 4.8 and remove some tautological Kconfig dependencies as a consequence. Cc: Masahiro Yamada Acked-by: Arnd Bergmann Reviewed-by: Nick Desaulniers Signed-off-by: Will Deacon --- Documentation/process/changes.rst | 2 +- arch/arm/crypto/Kconfig | 12 ++++++------ crypto/Kconfig | 1 - include/linux/compiler-gcc.h | 5 ++--- init/Kconfig | 1 - scripts/gcc-plugins/Kconfig | 2 +- 6 files changed, 10 insertions(+), 13 deletions(-) diff --git a/Documentation/process/changes.rst b/Documentation/process/changes.rst index 91c5ff8e161e3..5cfb54c2aaa6e 100644 --- a/Documentation/process/changes.rst +++ b/Documentation/process/changes.rst @@ -29,7 +29,7 @@ you probably needn't concern yourself with pcmciautils. ====================== =============== ======================================== Program Minimal version Command to check the version ====================== =============== ======================================== -GNU C 4.6 gcc --version +GNU C 4.8 gcc --version GNU make 3.81 make --version binutils 2.23 ld -v flex 2.5.35 flex --version diff --git a/arch/arm/crypto/Kconfig b/arch/arm/crypto/Kconfig index 2674de6ada1fa..c9bf2df85cb90 100644 --- a/arch/arm/crypto/Kconfig +++ b/arch/arm/crypto/Kconfig @@ -30,7 +30,7 @@ config CRYPTO_SHA1_ARM_NEON config CRYPTO_SHA1_ARM_CE tristate "SHA1 digest algorithm (ARM v8 Crypto Extensions)" - depends on KERNEL_MODE_NEON && (CC_IS_CLANG || GCC_VERSION >= 40800) + depends on KERNEL_MODE_NEON select CRYPTO_SHA1_ARM select CRYPTO_HASH help @@ -39,7 +39,7 @@ config CRYPTO_SHA1_ARM_CE config CRYPTO_SHA2_ARM_CE tristate "SHA-224/256 digest algorithm (ARM v8 Crypto Extensions)" - depends on KERNEL_MODE_NEON && (CC_IS_CLANG || GCC_VERSION >= 40800) + depends on KERNEL_MODE_NEON select CRYPTO_SHA256_ARM select CRYPTO_HASH help @@ -96,7 +96,7 @@ config CRYPTO_AES_ARM_BS config CRYPTO_AES_ARM_CE tristate "Accelerated AES using ARMv8 Crypto Extensions" - depends on KERNEL_MODE_NEON && (CC_IS_CLANG || GCC_VERSION >= 40800) + depends on KERNEL_MODE_NEON select CRYPTO_SKCIPHER select CRYPTO_LIB_AES select CRYPTO_SIMD @@ -106,7 +106,7 @@ config CRYPTO_AES_ARM_CE config CRYPTO_GHASH_ARM_CE tristate "PMULL-accelerated GHASH using NEON/ARMv8 Crypto Extensions" - depends on KERNEL_MODE_NEON && (CC_IS_CLANG || GCC_VERSION >= 40800) + depends on KERNEL_MODE_NEON select CRYPTO_HASH select CRYPTO_CRYPTD select CRYPTO_GF128MUL @@ -118,13 +118,13 @@ config CRYPTO_GHASH_ARM_CE config CRYPTO_CRCT10DIF_ARM_CE tristate "CRCT10DIF digest algorithm using PMULL instructions" - depends on KERNEL_MODE_NEON && (CC_IS_CLANG || GCC_VERSION >= 40800) + depends on KERNEL_MODE_NEON depends on CRC_T10DIF select CRYPTO_HASH config CRYPTO_CRC32_ARM_CE tristate "CRC32(C) digest algorithm using CRC and/or PMULL instructions" - depends on KERNEL_MODE_NEON && (CC_IS_CLANG || GCC_VERSION >= 40800) + depends on KERNEL_MODE_NEON depends on CRC32 select CRYPTO_HASH diff --git a/crypto/Kconfig b/crypto/Kconfig index c24a47406f8f5..34a8c5bfd0620 100644 --- a/crypto/Kconfig +++ b/crypto/Kconfig @@ -316,7 +316,6 @@ config CRYPTO_AEGIS128 config CRYPTO_AEGIS128_SIMD bool "Support SIMD acceleration for AEGIS-128" depends on CRYPTO_AEGIS128 && ((ARM || ARM64) && KERNEL_MODE_NEON) - depends on !ARM || CC_IS_CLANG || GCC_VERSION >= 40800 default y config CRYPTO_AEGIS128_AESNI_SSE2 diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h index d7ee4c6bad482..e2f7252732617 100644 --- a/include/linux/compiler-gcc.h +++ b/include/linux/compiler-gcc.h @@ -10,7 +10,8 @@ + __GNUC_MINOR__ * 100 \ + __GNUC_PATCHLEVEL__) -#if GCC_VERSION < 40600 +/* https://gcc.gnu.org/bugzilla/show_bug.cgi?id=58145 */ +#if GCC_VERSION < 40800 # error Sorry, your compiler is too old - please upgrade it. #endif @@ -126,9 +127,7 @@ #if defined(CONFIG_ARCH_USE_BUILTIN_BSWAP) && !defined(__CHECKER__) #define __HAVE_BUILTIN_BSWAP32__ #define __HAVE_BUILTIN_BSWAP64__ -#if GCC_VERSION >= 40800 #define __HAVE_BUILTIN_BSWAP16__ -#endif #endif /* CONFIG_ARCH_USE_BUILTIN_BSWAP && !__CHECKER__ */ #if GCC_VERSION >= 70000 diff --git a/init/Kconfig b/init/Kconfig index 9e22ee8fbd75e..035d38a4f9add 100644 --- a/init/Kconfig +++ b/init/Kconfig @@ -1285,7 +1285,6 @@ config LD_DEAD_CODE_DATA_ELIMINATION bool "Dead code and data elimination (EXPERIMENTAL)" depends on HAVE_LD_DEAD_CODE_DATA_ELIMINATION depends on EXPERT - depends on !(FUNCTION_TRACER && CC_IS_GCC && GCC_VERSION < 40800) depends on $(cc-option,-ffunction-sections -fdata-sections) depends on $(ld-option,--gc-sections) help diff --git a/scripts/gcc-plugins/Kconfig b/scripts/gcc-plugins/Kconfig index 013ba3a576691..ce0b99fb58471 100644 --- a/scripts/gcc-plugins/Kconfig +++ b/scripts/gcc-plugins/Kconfig @@ -8,7 +8,7 @@ config HAVE_GCC_PLUGINS menuconfig GCC_PLUGINS bool "GCC plugins" depends on HAVE_GCC_PLUGINS - depends on CC_IS_GCC && GCC_VERSION >= 40800 + depends on CC_IS_GCC depends on $(success,$(srctree)/scripts/gcc-plugin.sh $(CC)) default y help -- GitLab From 514cc55b01eb7f34f9d1d4518f81e42a633c784e Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Tue, 17 Dec 2019 14:20:25 +0000 Subject: [PATCH 0122/1971] netfilter: Avoid assigning 'const' pointer to non-const pointer MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit nf_remove_net_hook() uses WRITE_ONCE() to assign a 'const' pointer to a 'non-const' pointer. Cleanups to the implementation of WRITE_ONCE() mean that this will give rise to a compiler warning, just like a plain old assignment would do: | In file included from ./include/linux/export.h:43, | from ./include/linux/linkage.h:7, | from ./include/linux/kernel.h:8, | from net/netfilter/core.c:9: | net/netfilter/core.c: In function ‘nf_remove_net_hook’: | ./include/linux/compiler.h:216:30: warning: assignment discards ‘const’ qualifier from pointer target type [-Wdiscarded-qualifiers] | *(volatile typeof(x) *)&(x) = (val); \ | ^ | net/netfilter/core.c:379:3: note: in expansion of macro ‘WRITE_ONCE’ | WRITE_ONCE(orig_ops[i], &dummy_ops); | ^~~~~~~~~~ Follow the pattern used elsewhere in this file and add a cast to 'void *' to squash the warning. Cc: Pablo Neira Ayuso Cc: Jozsef Kadlecsik Cc: Florian Westphal Cc: "David S. Miller" Reviewed-by: Nick Desaulniers Signed-off-by: Will Deacon --- net/netfilter/core.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/netfilter/core.c b/net/netfilter/core.c index 78f046ec506fe..3ac7c8c1548de 100644 --- a/net/netfilter/core.c +++ b/net/netfilter/core.c @@ -376,7 +376,7 @@ static bool nf_remove_net_hook(struct nf_hook_entries *old, if (orig_ops[i] != unreg) continue; WRITE_ONCE(old->hooks[i].hook, accept_all); - WRITE_ONCE(orig_ops[i], &dummy_ops); + WRITE_ONCE(orig_ops[i], (void *)&dummy_ops); return true; } -- GitLab From 9a8939490d401fefddf53cd5e4cb3e20a52b98a7 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Tue, 14 Apr 2020 23:13:50 +0100 Subject: [PATCH 0123/1971] net: tls: Avoid assigning 'const' pointer to non-const pointer MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit tls_build_proto() uses WRITE_ONCE() to assign a 'const' pointer to a 'non-const' pointer. Cleanups to the implementation of WRITE_ONCE() mean that this will give rise to a compiler warning, just like a plain old assignment would do: | net/tls/tls_main.c: In function ‘tls_build_proto’: | ./include/linux/compiler.h:229:30: warning: assignment discards ‘const’ qualifier from pointer target type [-Wdiscarded-qualifiers] | net/tls/tls_main.c:640:4: note: in expansion of macro ‘smp_store_release’ | 640 | smp_store_release(&saved_tcpv6_prot, prot); | | ^~~~~~~~~~~~~~~~~ Drop the const qualifier from the local 'prot' variable, as it isn't needed. Cc: Boris Pismenny Cc: Aviad Yehezkel Cc: John Fastabend Cc: Daniel Borkmann Signed-off-by: Will Deacon --- net/tls/tls_main.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/tls/tls_main.c b/net/tls/tls_main.c index 156efce50dbd1..b33e11c27cfa4 100644 --- a/net/tls/tls_main.c +++ b/net/tls/tls_main.c @@ -629,7 +629,7 @@ struct tls_context *tls_ctx_create(struct sock *sk) static void tls_build_proto(struct sock *sk) { int ip_ver = sk->sk_family == AF_INET6 ? TLSV6 : TLSV4; - const struct proto *prot = READ_ONCE(sk->sk_prot); + struct proto *prot = READ_ONCE(sk->sk_prot); /* Build IPv6 TLS whenever the address of tcpv6 _prot changes */ if (ip_ver == TLSV6 && -- GitLab From 9b4fb5cec031f81ef436bf2cfd9fc265e25f6e45 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Thu, 19 Dec 2019 17:40:21 +0000 Subject: [PATCH 0124/1971] fault_inject: Don't rely on "return value" from WRITE_ONCE() It's a bit weird that WRITE_ONCE() evaluates to the value it stores and it's different to smp_store_release(), which can't be used this way. In preparation for preventing this in WRITE_ONCE(), change the fault injection code to use a local variable instead. Cc: Akinobu Mita Signed-off-by: Will Deacon --- lib/fault-inject.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/lib/fault-inject.c b/lib/fault-inject.c index 8186ca84910bc..ce12621b42756 100644 --- a/lib/fault-inject.c +++ b/lib/fault-inject.c @@ -106,7 +106,9 @@ bool should_fail(struct fault_attr *attr, ssize_t size) unsigned int fail_nth = READ_ONCE(current->fail_nth); if (fail_nth) { - if (!WRITE_ONCE(current->fail_nth, fail_nth - 1)) + fail_nth--; + WRITE_ONCE(current->fail_nth, fail_nth); + if (!fail_nth) goto fail; return false; -- GitLab From c6a771d932332568df9f46a3b53507c578e8c8e8 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Tue, 14 Apr 2020 22:22:47 +0100 Subject: [PATCH 0125/1971] arm64: csum: Disable KASAN for do_csum() do_csum() over-reads the source buffer and therefore abuses READ_ONCE_NOCHECK() to avoid tripping up KASAN. In preparation for READ_ONCE_NOCHECK() becoming a macro, and therefore losing its '__no_sanitize_address' annotation, just annotate do_csum() explicitly and fall back to normal loads. Cc: Mark Rutland Cc: Robin Murphy Signed-off-by: Will Deacon --- arch/arm64/lib/csum.c | 20 ++++++++++++-------- 1 file changed, 12 insertions(+), 8 deletions(-) diff --git a/arch/arm64/lib/csum.c b/arch/arm64/lib/csum.c index 60eccae2abad2..78b87a64ca0a3 100644 --- a/arch/arm64/lib/csum.c +++ b/arch/arm64/lib/csum.c @@ -14,7 +14,11 @@ static u64 accumulate(u64 sum, u64 data) return tmp + (tmp >> 64); } -unsigned int do_csum(const unsigned char *buff, int len) +/* + * We over-read the buffer and this makes KASAN unhappy. Instead, disable + * instrumentation and call kasan explicitly. + */ +unsigned int __no_sanitize_address do_csum(const unsigned char *buff, int len) { unsigned int offset, shift, sum; const u64 *ptr; @@ -42,7 +46,7 @@ unsigned int do_csum(const unsigned char *buff, int len) * odd/even alignment, and means we can ignore it until the very end. */ shift = offset * 8; - data = READ_ONCE_NOCHECK(*ptr++); + data = *ptr++; #ifdef __LITTLE_ENDIAN data = (data >> shift) << shift; #else @@ -58,10 +62,10 @@ unsigned int do_csum(const unsigned char *buff, int len) while (unlikely(len > 64)) { __uint128_t tmp1, tmp2, tmp3, tmp4; - tmp1 = READ_ONCE_NOCHECK(*(__uint128_t *)ptr); - tmp2 = READ_ONCE_NOCHECK(*(__uint128_t *)(ptr + 2)); - tmp3 = READ_ONCE_NOCHECK(*(__uint128_t *)(ptr + 4)); - tmp4 = READ_ONCE_NOCHECK(*(__uint128_t *)(ptr + 6)); + tmp1 = *(__uint128_t *)ptr; + tmp2 = *(__uint128_t *)(ptr + 2); + tmp3 = *(__uint128_t *)(ptr + 4); + tmp4 = *(__uint128_t *)(ptr + 6); len -= 64; ptr += 8; @@ -85,7 +89,7 @@ unsigned int do_csum(const unsigned char *buff, int len) __uint128_t tmp; sum64 = accumulate(sum64, data); - tmp = READ_ONCE_NOCHECK(*(__uint128_t *)ptr); + tmp = *(__uint128_t *)ptr; len -= 16; ptr += 2; @@ -100,7 +104,7 @@ unsigned int do_csum(const unsigned char *buff, int len) } if (len > 0) { sum64 = accumulate(sum64, data); - data = READ_ONCE_NOCHECK(*ptr); + data = *ptr; len -= 8; } /* -- GitLab From 5c2602e5fcefa5a581ce20521552d7d446a3a9aa Mon Sep 17 00:00:00 2001 From: Martin Blumenstingl Date: Tue, 14 Apr 2020 21:50:30 +0200 Subject: [PATCH 0126/1971] clk: meson: gxbb: Prepare the GPU clock tree to change at runtime The "mali_0" or "mali_1" clock trees should not be updated while the clock is running. Enforce this by setting CLK_SET_RATE_GATE on the "mali_0" and "mali_1" gates. This makes the CCF switch to the "mali_1" tree when "mali_0" is currently active and vice versa, which is exactly what the vendor driver does when updating the frequency of the mali clock. Also propagate set_rate requests from the gate to the divider and from the divider to the the mux so the GPU clock frequency can be updated at runtime (which will be required for GPU DVFS). Don't propagate rate changes to the mux parents because we don't want to change the MPLL clocks (these are reserved for audio). Signed-off-by: Martin Blumenstingl Signed-off-by: Jerome Brunet Acked-by: Neil Armstrong Link: https://lore.kernel.org/r/20200414195031.224021-2-martin.blumenstingl@googlemail.com --- drivers/clk/meson/gxbb.c | 40 ++++++++++++++++++++++------------------ 1 file changed, 22 insertions(+), 18 deletions(-) diff --git a/drivers/clk/meson/gxbb.c b/drivers/clk/meson/gxbb.c index 5fd6a574f8c3c..0a68af6eec3dd 100644 --- a/drivers/clk/meson/gxbb.c +++ b/drivers/clk/meson/gxbb.c @@ -957,7 +957,9 @@ static struct clk_regmap gxbb_sar_adc_clk = { /* * The MALI IP is clocked by two identical clocks (mali_0 and mali_1) - * muxed by a glitch-free switch. + * muxed by a glitch-free switch. The CCF can manage this glitch-free + * mux because it does top-to-bottom updates the each clock tree and + * switches to the "inactive" one when CLK_SET_RATE_GATE is set. */ static const struct clk_parent_data gxbb_mali_0_1_parent_data[] = { @@ -980,14 +982,15 @@ static struct clk_regmap gxbb_mali_0_sel = { .hw.init = &(struct clk_init_data){ .name = "mali_0_sel", .ops = &clk_regmap_mux_ops, - /* - * bits 10:9 selects from 8 possible parents: - * xtal, gp0_pll, mpll2, mpll1, fclk_div7, - * fclk_div4, fclk_div3, fclk_div5 - */ .parent_data = gxbb_mali_0_1_parent_data, .num_parents = 8, - .flags = CLK_SET_RATE_NO_REPARENT, + /* + * Don't request the parent to change the rate because + * all GPU frequencies can be derived from the fclk_* + * clocks and one special GP0_PLL setting. This is + * important because we need the MPLL clocks for audio. + */ + .flags = 0, }, }; @@ -1004,7 +1007,7 @@ static struct clk_regmap gxbb_mali_0_div = { &gxbb_mali_0_sel.hw }, .num_parents = 1, - .flags = CLK_SET_RATE_NO_REPARENT, + .flags = CLK_SET_RATE_PARENT, }, }; @@ -1020,7 +1023,7 @@ static struct clk_regmap gxbb_mali_0 = { &gxbb_mali_0_div.hw }, .num_parents = 1, - .flags = CLK_SET_RATE_PARENT, + .flags = CLK_SET_RATE_GATE | CLK_SET_RATE_PARENT, }, }; @@ -1033,14 +1036,15 @@ static struct clk_regmap gxbb_mali_1_sel = { .hw.init = &(struct clk_init_data){ .name = "mali_1_sel", .ops = &clk_regmap_mux_ops, - /* - * bits 10:9 selects from 8 possible parents: - * xtal, gp0_pll, mpll2, mpll1, fclk_div7, - * fclk_div4, fclk_div3, fclk_div5 - */ .parent_data = gxbb_mali_0_1_parent_data, .num_parents = 8, - .flags = CLK_SET_RATE_NO_REPARENT, + /* + * Don't request the parent to change the rate because + * all GPU frequencies can be derived from the fclk_* + * clocks and one special GP0_PLL setting. This is + * important because we need the MPLL clocks for audio. + */ + .flags = 0, }, }; @@ -1057,7 +1061,7 @@ static struct clk_regmap gxbb_mali_1_div = { &gxbb_mali_1_sel.hw }, .num_parents = 1, - .flags = CLK_SET_RATE_NO_REPARENT, + .flags = CLK_SET_RATE_PARENT, }, }; @@ -1073,7 +1077,7 @@ static struct clk_regmap gxbb_mali_1 = { &gxbb_mali_1_div.hw }, .num_parents = 1, - .flags = CLK_SET_RATE_PARENT, + .flags = CLK_SET_RATE_GATE | CLK_SET_RATE_PARENT, }, }; @@ -1093,7 +1097,7 @@ static struct clk_regmap gxbb_mali = { .ops = &clk_regmap_mux_ops, .parent_hws = gxbb_mali_parent_hws, .num_parents = 2, - .flags = CLK_SET_RATE_NO_REPARENT, + .flags = CLK_SET_RATE_PARENT, }, }; -- GitLab From 6dde0ae30a9a883778154f8377ed50a2c7752211 Mon Sep 17 00:00:00 2001 From: Martin Blumenstingl Date: Tue, 14 Apr 2020 21:50:31 +0200 Subject: [PATCH 0127/1971] clk: meson: g12a: Prepare the GPU clock tree to change at runtime The "mali_0" or "mali_1" clock trees should not be updated while the clock is running. Enforce this by setting CLK_SET_RATE_GATE on the "mali_0" and "mali_1" gates. This makes the CCF switch to the "mali_1" tree when "mali_0" is currently active and vice versa, which is exactly what the vendor driver does when updating the frequency of the mali clock. Also propagate set_rate requests from the gate to the divider and from the divider to the the mux so the GPU clock frequency can be updated at runtime (which will be required for GPU DVFS). Don't propagate rate changes to the mux parents because we don't want to change the MPLL clocks (these are reserved for audio). Signed-off-by: Martin Blumenstingl Signed-off-by: Jerome Brunet Acked-by: Neil Armstrong Link: https://lore.kernel.org/r/20200414195031.224021-3-martin.blumenstingl@googlemail.com --- drivers/clk/meson/g12a.c | 30 ++++++++++++++++++++++-------- 1 file changed, 22 insertions(+), 8 deletions(-) diff --git a/drivers/clk/meson/g12a.c b/drivers/clk/meson/g12a.c index fad616cac01e0..30c15766ebb16 100644 --- a/drivers/clk/meson/g12a.c +++ b/drivers/clk/meson/g12a.c @@ -3702,7 +3702,9 @@ static struct clk_regmap g12a_hdmi = { /* * The MALI IP is clocked by two identical clocks (mali_0 and mali_1) - * muxed by a glitch-free switch. + * muxed by a glitch-free switch. The CCF can manage this glitch-free + * mux because it does top-to-bottom updates the each clock tree and + * switches to the "inactive" one when CLK_SET_RATE_GATE is set. */ static const struct clk_parent_data g12a_mali_0_1_parent_data[] = { { .fw_name = "xtal", }, @@ -3726,7 +3728,13 @@ static struct clk_regmap g12a_mali_0_sel = { .ops = &clk_regmap_mux_ops, .parent_data = g12a_mali_0_1_parent_data, .num_parents = 8, - .flags = CLK_SET_RATE_NO_REPARENT, + /* + * Don't request the parent to change the rate because + * all GPU frequencies can be derived from the fclk_* + * clocks and one special GP0_PLL setting. This is + * important because we need the MPLL clocks for audio. + */ + .flags = 0, }, }; @@ -3743,7 +3751,7 @@ static struct clk_regmap g12a_mali_0_div = { &g12a_mali_0_sel.hw }, .num_parents = 1, - .flags = CLK_SET_RATE_NO_REPARENT, + .flags = CLK_SET_RATE_PARENT, }, }; @@ -3759,7 +3767,7 @@ static struct clk_regmap g12a_mali_0 = { &g12a_mali_0_div.hw }, .num_parents = 1, - .flags = CLK_SET_RATE_PARENT, + .flags = CLK_SET_RATE_GATE | CLK_SET_RATE_PARENT, }, }; @@ -3774,7 +3782,13 @@ static struct clk_regmap g12a_mali_1_sel = { .ops = &clk_regmap_mux_ops, .parent_data = g12a_mali_0_1_parent_data, .num_parents = 8, - .flags = CLK_SET_RATE_NO_REPARENT, + /* + * Don't request the parent to change the rate because + * all GPU frequencies can be derived from the fclk_* + * clocks and one special GP0_PLL setting. This is + * important because we need the MPLL clocks for audio. + */ + .flags = 0, }, }; @@ -3791,7 +3805,7 @@ static struct clk_regmap g12a_mali_1_div = { &g12a_mali_1_sel.hw }, .num_parents = 1, - .flags = CLK_SET_RATE_NO_REPARENT, + .flags = CLK_SET_RATE_PARENT, }, }; @@ -3807,7 +3821,7 @@ static struct clk_regmap g12a_mali_1 = { &g12a_mali_1_div.hw }, .num_parents = 1, - .flags = CLK_SET_RATE_PARENT, + .flags = CLK_SET_RATE_GATE | CLK_SET_RATE_PARENT, }, }; @@ -3827,7 +3841,7 @@ static struct clk_regmap g12a_mali = { .ops = &clk_regmap_mux_ops, .parent_hws = g12a_mali_parent_hws, .num_parents = 2, - .flags = CLK_SET_RATE_NO_REPARENT, + .flags = CLK_SET_RATE_PARENT, }, }; -- GitLab From a5460b5e5fb82656807840d40d3deaecad094044 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Mon, 16 Dec 2019 16:51:45 +0000 Subject: [PATCH 0128/1971] READ_ONCE: Simplify implementations of {READ,WRITE}_ONCE() The implementations of {READ,WRITE}_ONCE() suffer from a significant amount of indirection and complexity due to a historic GCC bug: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=58145 which was originally worked around by 230fa253df63 ("kernel: Provide READ_ONCE and ASSIGN_ONCE"). Since GCC 4.8 is fairly vintage at this point and we emit a warning if we detect it during the build, return {READ,WRITE}_ONCE() to their former glory with an implementation that is easier to understand and, crucially, more amenable to optimisation. A side effect of this simplification is that WRITE_ONCE() no longer returns a value, but nobody seems to be relying on that and the new behaviour is aligned with smp_store_release(). Suggested-by: Linus Torvalds Cc: Peter Zijlstra Cc: Michael Ellerman Cc: Arnd Bergmann Cc: Christian Borntraeger Signed-off-by: Will Deacon --- include/linux/compiler.h | 118 +++++++++++++-------------------------- 1 file changed, 39 insertions(+), 79 deletions(-) diff --git a/include/linux/compiler.h b/include/linux/compiler.h index 034b0a644efcc..338111a448d0c 100644 --- a/include/linux/compiler.h +++ b/include/linux/compiler.h @@ -177,60 +177,6 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val, # define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __LINE__) #endif -#include - -#define __READ_ONCE_SIZE \ -({ \ - switch (size) { \ - case 1: *(__u8 *)res = *(volatile __u8 *)p; break; \ - case 2: *(__u16 *)res = *(volatile __u16 *)p; break; \ - case 4: *(__u32 *)res = *(volatile __u32 *)p; break; \ - case 8: *(__u64 *)res = *(volatile __u64 *)p; break; \ - default: \ - barrier(); \ - __builtin_memcpy((void *)res, (const void *)p, size); \ - barrier(); \ - } \ -}) - -static __always_inline -void __read_once_size(const volatile void *p, void *res, int size) -{ - __READ_ONCE_SIZE; -} - -#ifdef CONFIG_KASAN -/* - * We can't declare function 'inline' because __no_sanitize_address confilcts - * with inlining. Attempt to inline it may cause a build failure. - * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=67368 - * '__maybe_unused' allows us to avoid defined-but-not-used warnings. - */ -# define __no_kasan_or_inline __no_sanitize_address notrace __maybe_unused -#else -# define __no_kasan_or_inline __always_inline -#endif - -static __no_kasan_or_inline -void __read_once_size_nocheck(const volatile void *p, void *res, int size) -{ - __READ_ONCE_SIZE; -} - -static __always_inline void __write_once_size(volatile void *p, void *res, int size) -{ - switch (size) { - case 1: *(volatile __u8 *)p = *(__u8 *)res; break; - case 2: *(volatile __u16 *)p = *(__u16 *)res; break; - case 4: *(volatile __u32 *)p = *(__u32 *)res; break; - case 8: *(volatile __u64 *)p = *(__u64 *)res; break; - default: - barrier(); - __builtin_memcpy((void *)p, (const void *)res, size); - barrier(); - } -} - /* * Prevent the compiler from merging or refetching reads or writes. The * compiler is also forbidden from reordering successive instances of @@ -240,11 +186,7 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s * statements. * * These two macros will also work on aggregate data types like structs or - * unions. If the size of the accessed data type exceeds the word size of - * the machine (e.g., 32 bits or 64 bits) READ_ONCE() and WRITE_ONCE() will - * fall back to memcpy(). There's at least two memcpy()s: one for the - * __builtin_memcpy() and then one for the macro doing the copy of variable - * - '__u' allocated on the stack. + * unions. * * Their two major use cases are: (1) Mediating communication between * process-level code and irq/NMI handlers, all running on the same CPU, @@ -256,23 +198,49 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s #include #include -#define __READ_ONCE(x, check) \ +#define __READ_ONCE(x) (*(volatile typeof(x) *)&(x)) + +#define READ_ONCE(x) \ ({ \ - union { typeof(x) __val; char __c[1]; } __u; \ - if (check) \ - __read_once_size(&(x), __u.__c, sizeof(x)); \ - else \ - __read_once_size_nocheck(&(x), __u.__c, sizeof(x)); \ - smp_read_barrier_depends(); /* Enforce dependency ordering from x */ \ - __u.__val; \ + typeof(x) __x = __READ_ONCE(x); \ + smp_read_barrier_depends(); \ + __x; \ }) -#define READ_ONCE(x) __READ_ONCE(x, 1) + +#define WRITE_ONCE(x, val) \ +do { \ + *(volatile typeof(x) *)&(x) = (val); \ +} while (0) + +#ifdef CONFIG_KASAN +/* + * We can't declare function 'inline' because __no_sanitize_address conflicts + * with inlining. Attempt to inline it may cause a build failure. + * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=67368 + * '__maybe_unused' allows us to avoid defined-but-not-used warnings. + */ +# define __no_kasan_or_inline __no_sanitize_address notrace __maybe_unused +#else +# define __no_kasan_or_inline __always_inline +#endif + +static __no_kasan_or_inline +unsigned long __read_once_word_nocheck(const void *addr) +{ + return __READ_ONCE(*(unsigned long *)addr); +} /* - * Use READ_ONCE_NOCHECK() instead of READ_ONCE() if you need - * to hide memory access from KASAN. + * Use READ_ONCE_NOCHECK() instead of READ_ONCE() if you need to load a + * word from memory atomically but without telling KASAN. This is usually + * used by unwinding code when walking the stack of a running process. */ -#define READ_ONCE_NOCHECK(x) __READ_ONCE(x, 0) +#define READ_ONCE_NOCHECK(x) \ +({ \ + unsigned long __x = __read_once_word_nocheck(&(x)); \ + smp_read_barrier_depends(); \ + __x; \ +}) static __no_kasan_or_inline unsigned long read_word_at_a_time(const void *addr) @@ -281,14 +249,6 @@ unsigned long read_word_at_a_time(const void *addr) return *(unsigned long *)addr; } -#define WRITE_ONCE(x, val) \ -({ \ - union { typeof(x) __val; char __c[1]; } __u = \ - { .__val = (__force typeof(x)) (val) }; \ - __write_once_size(&(x), __u.__c, sizeof(x)); \ - __u.__val; \ -}) - #endif /* __KERNEL__ */ /* -- GitLab From 9e343b467c70379e66b8b771d96f03ae23eba351 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Fri, 13 Dec 2019 14:47:02 +0000 Subject: [PATCH 0129/1971] READ_ONCE: Enforce atomicity for {READ,WRITE}_ONCE() memory accesses {READ,WRITE}_ONCE() cannot guarantee atomicity for arbitrary data sizes. This can be surprising to callers that might incorrectly be expecting atomicity for accesses to aggregate structures, although there are other callers where tearing is actually permissable (e.g. if they are using something akin to sequence locking to protect the access). Linus sayeth: | We could also look at being stricter for the normal READ/WRITE_ONCE(), | and require that they are | | (a) regular integer types | | (b) fit in an atomic word | | We actually did (b) for a while, until we noticed that we do it on | loff_t's etc and relaxed the rules. But maybe we could have a | "non-atomic" version of READ/WRITE_ONCE() that is used for the | questionable cases? The slight snag is that we also have to support 64-bit accesses on 32-bit architectures, as these appear to be widespread and tend to work out ok if either the architecture supports atomic 64-bit accesses (x86, armv7) or if the variable being accesses represents a virtual address and therefore only requires 32-bit atomicity in practice. Take a step in that direction by introducing a variant of 'compiletime_assert_atomic_type()' and use it to check the pointer argument to {READ,WRITE}_ONCE(). Expose __{READ,WRITE}_ONCE() variants which are allowed to tear and convert the one broken caller over to the new macros. Suggested-by: Linus Torvalds Cc: Peter Zijlstra Cc: Michael Ellerman Cc: Arnd Bergmann Signed-off-by: Will Deacon --- drivers/xen/time.c | 2 +- include/linux/compiler.h | 33 ++++++++++++++++++++++++++++++--- 2 files changed, 31 insertions(+), 4 deletions(-) diff --git a/drivers/xen/time.c b/drivers/xen/time.c index 0968859c29d0b..108edbcbc040f 100644 --- a/drivers/xen/time.c +++ b/drivers/xen/time.c @@ -64,7 +64,7 @@ static void xen_get_runstate_snapshot_cpu_delta( do { state_time = get64(&state->state_entry_time); rmb(); /* Hypervisor might update data. */ - *res = READ_ONCE(*state); + *res = __READ_ONCE(*state); rmb(); /* Hypervisor might update data. */ } while (get64(&state->state_entry_time) != state_time || (state_time & XEN_RUNSTATE_UPDATE)); diff --git a/include/linux/compiler.h b/include/linux/compiler.h index 338111a448d0c..50bb2461648f9 100644 --- a/include/linux/compiler.h +++ b/include/linux/compiler.h @@ -198,20 +198,37 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val, #include #include -#define __READ_ONCE(x) (*(volatile typeof(x) *)&(x)) +/* + * Use __READ_ONCE() instead of READ_ONCE() if you do not require any + * atomicity or dependency ordering guarantees. Note that this may result + * in tears! + */ +#define __READ_ONCE(x) (*(const volatile typeof(x) *)&(x)) -#define READ_ONCE(x) \ +#define __READ_ONCE_SCALAR(x) \ ({ \ typeof(x) __x = __READ_ONCE(x); \ smp_read_barrier_depends(); \ __x; \ }) -#define WRITE_ONCE(x, val) \ +#define READ_ONCE(x) \ +({ \ + compiletime_assert_rwonce_type(x); \ + __READ_ONCE_SCALAR(x); \ +}) + +#define __WRITE_ONCE(x, val) \ do { \ *(volatile typeof(x) *)&(x) = (val); \ } while (0) +#define WRITE_ONCE(x, val) \ +do { \ + compiletime_assert_rwonce_type(x); \ + __WRITE_ONCE(x, val); \ +} while (0) + #ifdef CONFIG_KASAN /* * We can't declare function 'inline' because __no_sanitize_address conflicts @@ -313,6 +330,16 @@ static inline void *offset_to_ptr(const int *off) compiletime_assert(__native_word(t), \ "Need native word sized stores/loads for atomicity.") +/* + * Yes, this permits 64-bit accesses on 32-bit architectures. These will + * actually be atomic in many cases (namely x86), but for others we rely on + * the access being split into 2x32-bit accesses for a 32-bit quantity (e.g. + * a virtual address) and a strong prevailing wind. + */ +#define compiletime_assert_rwonce_type(t) \ + compiletime_assert(__native_word(t) || sizeof(t) == sizeof(long long), \ + "Unsupported access size for {READ,WRITE}_ONCE().") + /* &a[0] degrades to a pointer: a different type from an array */ #define __must_be_array(a) BUILD_BUG_ON_ZERO(__same_type((a), &(a)[0])) -- GitLab From dee081bf8f824cabeb7c7495367d5dad0a444eb1 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Thu, 19 Dec 2019 14:22:31 +0000 Subject: [PATCH 0130/1971] READ_ONCE: Drop pointer qualifiers when reading from scalar types Passing a volatile-qualified pointer to READ_ONCE() is an absolute trainwreck for code generation: the use of 'typeof()' to define a temporary variable inside the macro means that the final evaluation in macro scope ends up forcing a read back from the stack. When stack protector is enabled (the default for arm64, at least), this causes the compiler to vomit up all sorts of junk. Unfortunately, dropping pointer qualifiers inside the macro poses quite a challenge, especially since the pointed-to type is permitted to be an aggregate, and this is relied upon by mm/ code accessing things like 'pmd_t'. Based on numerous hacks and discussions on the mailing list, this is the best I've managed to come up with. Introduce '__unqual_scalar_typeof()' which takes an expression and, if the expression is an optionally qualified 8, 16, 32 or 64-bit scalar type, evaluates to the unqualified type. Other input types, including aggregates, remain unchanged. Hopefully READ_ONCE() on volatile aggregate pointers isn't something we do on a fast-path. Cc: Peter Zijlstra Cc: Arnd Bergmann Suggested-by: Linus Torvalds Reported-by: Michael Ellerman Signed-off-by: Will Deacon --- include/linux/compiler.h | 6 +++--- include/linux/compiler_types.h | 21 +++++++++++++++++++++ 2 files changed, 24 insertions(+), 3 deletions(-) diff --git a/include/linux/compiler.h b/include/linux/compiler.h index 50bb2461648f9..c363d8debc43f 100644 --- a/include/linux/compiler.h +++ b/include/linux/compiler.h @@ -203,13 +203,13 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val, * atomicity or dependency ordering guarantees. Note that this may result * in tears! */ -#define __READ_ONCE(x) (*(const volatile typeof(x) *)&(x)) +#define __READ_ONCE(x) (*(const volatile __unqual_scalar_typeof(x) *)&(x)) #define __READ_ONCE_SCALAR(x) \ ({ \ - typeof(x) __x = __READ_ONCE(x); \ + __unqual_scalar_typeof(x) __x = __READ_ONCE(x); \ smp_read_barrier_depends(); \ - __x; \ + (typeof(x))__x; \ }) #define READ_ONCE(x) \ diff --git a/include/linux/compiler_types.h b/include/linux/compiler_types.h index e970f97a7fcb1..233066c92f6f3 100644 --- a/include/linux/compiler_types.h +++ b/include/linux/compiler_types.h @@ -210,6 +210,27 @@ struct ftrace_likely_data { /* Are two types/vars the same type (ignoring qualifiers)? */ #define __same_type(a, b) __builtin_types_compatible_p(typeof(a), typeof(b)) +/* + * __unqual_scalar_typeof(x) - Declare an unqualified scalar type, leaving + * non-scalar types unchanged. + * + * We build this out of a couple of helper macros in a vain attempt to + * help you keep your lunch down while reading it. + */ +#define __pick_scalar_type(x, type, otherwise) \ + __builtin_choose_expr(__same_type(x, type), (type)0, otherwise) + +#define __pick_integer_type(x, type, otherwise) \ + __pick_scalar_type(x, unsigned type, \ + __pick_scalar_type(x, signed type, otherwise)) + +#define __unqual_scalar_typeof(x) typeof( \ + __pick_integer_type(x, char, \ + __pick_integer_type(x, short, \ + __pick_integer_type(x, int, \ + __pick_integer_type(x, long, \ + __pick_integer_type(x, long long, x)))))) + /* Is this type a native word size -- useful for atomic operations */ #define __native_word(t) \ (sizeof(t) == sizeof(char) || sizeof(t) == sizeof(short) || \ -- GitLab From 549887271a961a79375b2a55bf675515b9107778 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Thu, 19 Dec 2019 16:22:56 +0000 Subject: [PATCH 0131/1971] locking/barriers: Use '__unqual_scalar_typeof' for load-acquire macros Passing volatile-qualified pointers to the asm-generic implementations of the load-acquire macros results in a re-load from the stack due to the temporary result variable inheriting the volatile semantics thanks to the use of 'typeof()'. Define these temporary variables using 'unqual_scalar_typeof' to drop the volatile qualifier in the case that they are scalar types. Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Arnd Bergmann Signed-off-by: Will Deacon --- include/asm-generic/barrier.h | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/include/asm-generic/barrier.h b/include/asm-generic/barrier.h index 85b28eb80b11f..2eacaf7d62f61 100644 --- a/include/asm-generic/barrier.h +++ b/include/asm-generic/barrier.h @@ -128,10 +128,10 @@ do { \ #ifndef __smp_load_acquire #define __smp_load_acquire(p) \ ({ \ - typeof(*p) ___p1 = READ_ONCE(*p); \ + __unqual_scalar_typeof(*p) ___p1 = READ_ONCE(*p); \ compiletime_assert_atomic_type(*p); \ __smp_mb(); \ - ___p1; \ + (typeof(*p))___p1; \ }) #endif @@ -183,10 +183,10 @@ do { \ #ifndef smp_load_acquire #define smp_load_acquire(p) \ ({ \ - typeof(*p) ___p1 = READ_ONCE(*p); \ + __unqual_scalar_typeof(*p) ___p1 = READ_ONCE(*p); \ compiletime_assert_atomic_type(*p); \ barrier(); \ - ___p1; \ + (typeof(*p))___p1; \ }) #endif @@ -229,14 +229,14 @@ do { \ #ifndef smp_cond_load_relaxed #define smp_cond_load_relaxed(ptr, cond_expr) ({ \ typeof(ptr) __PTR = (ptr); \ - typeof(*ptr) VAL; \ + __unqual_scalar_typeof(*ptr) VAL; \ for (;;) { \ VAL = READ_ONCE(*__PTR); \ if (cond_expr) \ break; \ cpu_relax(); \ } \ - VAL; \ + (typeof(*ptr))VAL; \ }) #endif @@ -250,10 +250,10 @@ do { \ */ #ifndef smp_cond_load_acquire #define smp_cond_load_acquire(ptr, cond_expr) ({ \ - typeof(*ptr) _val; \ + __unqual_scalar_typeof(*ptr) _val; \ _val = smp_cond_load_relaxed(ptr, cond_expr); \ smp_acquire__after_ctrl_dep(); \ - _val; \ + (typeof(*ptr))_val; \ }) #endif -- GitLab From 10223c5286f7389c022e9e91f12c49918790cf36 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Thu, 19 Dec 2019 17:11:43 +0000 Subject: [PATCH 0132/1971] arm64: barrier: Use '__unqual_scalar_typeof' for acquire/release macros Passing volatile-qualified pointers to the arm64 implementations of the load-acquire/store-release macros results in a re-load from the stack and a bunch of associated stack-protector churn due to the temporary result variable inheriting the volatile semantics thanks to the use of 'typeof()'. Define these temporary variables using 'unqual_scalar_typeof' to drop the volatile qualifier in the case that they are scalar types. Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Arnd Bergmann Acked-by: Mark Rutland Signed-off-by: Will Deacon --- arch/arm64/include/asm/barrier.h | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/arch/arm64/include/asm/barrier.h b/arch/arm64/include/asm/barrier.h index 7d9cc5ec4971d..fb4c27506ef41 100644 --- a/arch/arm64/include/asm/barrier.h +++ b/arch/arm64/include/asm/barrier.h @@ -76,8 +76,8 @@ static inline unsigned long array_index_mask_nospec(unsigned long idx, #define __smp_store_release(p, v) \ do { \ typeof(p) __p = (p); \ - union { typeof(*p) __val; char __c[1]; } __u = \ - { .__val = (__force typeof(*p)) (v) }; \ + union { __unqual_scalar_typeof(*p) __val; char __c[1]; } __u = \ + { .__val = (__force __unqual_scalar_typeof(*p)) (v) }; \ compiletime_assert_atomic_type(*p); \ kasan_check_write(__p, sizeof(*p)); \ switch (sizeof(*p)) { \ @@ -110,7 +110,7 @@ do { \ #define __smp_load_acquire(p) \ ({ \ - union { typeof(*p) __val; char __c[1]; } __u; \ + union { __unqual_scalar_typeof(*p) __val; char __c[1]; } __u; \ typeof(p) __p = (p); \ compiletime_assert_atomic_type(*p); \ kasan_check_read(__p, sizeof(*p)); \ @@ -136,33 +136,33 @@ do { \ : "Q" (*__p) : "memory"); \ break; \ } \ - __u.__val; \ + (typeof(*p))__u.__val; \ }) #define smp_cond_load_relaxed(ptr, cond_expr) \ ({ \ typeof(ptr) __PTR = (ptr); \ - typeof(*ptr) VAL; \ + __unqual_scalar_typeof(*ptr) VAL; \ for (;;) { \ VAL = READ_ONCE(*__PTR); \ if (cond_expr) \ break; \ __cmpwait_relaxed(__PTR, VAL); \ } \ - VAL; \ + (typeof(*ptr))VAL; \ }) #define smp_cond_load_acquire(ptr, cond_expr) \ ({ \ typeof(ptr) __PTR = (ptr); \ - typeof(*ptr) VAL; \ + __unqual_scalar_typeof(*ptr) VAL; \ for (;;) { \ VAL = smp_load_acquire(__PTR); \ if (cond_expr) \ break; \ __cmpwait_relaxed(__PTR, VAL); \ } \ - VAL; \ + (typeof(*ptr))VAL; \ }) #include -- GitLab From 10415533a9062c9e5e27c421d9c232f0cae108fd Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Wed, 22 Jan 2020 19:36:29 +0000 Subject: [PATCH 0133/1971] gcov: Remove old GCC 3.4 support The kernel requires at least GCC 4.8 in order to build, and so there is no need to cater for the pre-4.7 gcov format. Remove the obsolete code. Acked-by: Peter Oberparleiter Reviewed-by: Nick Desaulniers Signed-off-by: Will Deacon --- kernel/gcov/Kconfig | 24 -- kernel/gcov/Makefile | 3 +- kernel/gcov/gcc_3_4.c | 573 ------------------------------------------ 3 files changed, 1 insertion(+), 599 deletions(-) delete mode 100644 kernel/gcov/gcc_3_4.c diff --git a/kernel/gcov/Kconfig b/kernel/gcov/Kconfig index 3941a9c48f833..feaad597b3f4a 100644 --- a/kernel/gcov/Kconfig +++ b/kernel/gcov/Kconfig @@ -51,28 +51,4 @@ config GCOV_PROFILE_ALL larger and run slower. Also be sure to exclude files from profiling which are not linked to the kernel image to prevent linker errors. -choice - prompt "Specify GCOV format" - depends on GCOV_KERNEL - depends on CC_IS_GCC - ---help--- - The gcov format is usually determined by the GCC version, and the - default is chosen according to your GCC version. However, there are - exceptions where format changes are integrated in lower-version GCCs. - In such a case, change this option to adjust the format used in the - kernel accordingly. - -config GCOV_FORMAT_3_4 - bool "GCC 3.4 format" - depends on GCC_VERSION < 40700 - ---help--- - Select this option to use the format defined by GCC 3.4. - -config GCOV_FORMAT_4_7 - bool "GCC 4.7 format" - ---help--- - Select this option to use the format defined by GCC 4.7. - -endchoice - endmenu diff --git a/kernel/gcov/Makefile b/kernel/gcov/Makefile index d66a74b0f1004..16f8ecc7d8821 100644 --- a/kernel/gcov/Makefile +++ b/kernel/gcov/Makefile @@ -2,6 +2,5 @@ ccflags-y := -DSRCTREE='"$(srctree)"' -DOBJTREE='"$(objtree)"' obj-y := base.o fs.o -obj-$(CONFIG_GCOV_FORMAT_3_4) += gcc_base.o gcc_3_4.o -obj-$(CONFIG_GCOV_FORMAT_4_7) += gcc_base.o gcc_4_7.o +obj-$(CONFIG_CC_IS_GCC) += gcc_base.o gcc_4_7.o obj-$(CONFIG_CC_IS_CLANG) += clang.o diff --git a/kernel/gcov/gcc_3_4.c b/kernel/gcov/gcc_3_4.c deleted file mode 100644 index acb83558e5df4..0000000000000 --- a/kernel/gcov/gcc_3_4.c +++ /dev/null @@ -1,573 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * This code provides functions to handle gcc's profiling data format - * introduced with gcc 3.4. Future versions of gcc may change the gcov - * format (as happened before), so all format-specific information needs - * to be kept modular and easily exchangeable. - * - * This file is based on gcc-internal definitions. Functions and data - * structures are defined to be compatible with gcc counterparts. - * For a better understanding, refer to gcc source: gcc/gcov-io.h. - * - * Copyright IBM Corp. 2009 - * Author(s): Peter Oberparleiter - * - * Uses gcc-internal data definitions. - */ - -#include -#include -#include -#include -#include -#include "gcov.h" - -#define GCOV_COUNTERS 5 - -static struct gcov_info *gcov_info_head; - -/** - * struct gcov_fn_info - profiling meta data per function - * @ident: object file-unique function identifier - * @checksum: function checksum - * @n_ctrs: number of values per counter type belonging to this function - * - * This data is generated by gcc during compilation and doesn't change - * at run-time. - */ -struct gcov_fn_info { - unsigned int ident; - unsigned int checksum; - unsigned int n_ctrs[]; -}; - -/** - * struct gcov_ctr_info - profiling data per counter type - * @num: number of counter values for this type - * @values: array of counter values for this type - * @merge: merge function for counter values of this type (unused) - * - * This data is generated by gcc during compilation and doesn't change - * at run-time with the exception of the values array. - */ -struct gcov_ctr_info { - unsigned int num; - gcov_type *values; - void (*merge)(gcov_type *, unsigned int); -}; - -/** - * struct gcov_info - profiling data per object file - * @version: gcov version magic indicating the gcc version used for compilation - * @next: list head for a singly-linked list - * @stamp: time stamp - * @filename: name of the associated gcov data file - * @n_functions: number of instrumented functions - * @functions: function data - * @ctr_mask: mask specifying which counter types are active - * @counts: counter data per counter type - * - * This data is generated by gcc during compilation and doesn't change - * at run-time with the exception of the next pointer. - */ -struct gcov_info { - unsigned int version; - struct gcov_info *next; - unsigned int stamp; - const char *filename; - unsigned int n_functions; - const struct gcov_fn_info *functions; - unsigned int ctr_mask; - struct gcov_ctr_info counts[]; -}; - -/** - * gcov_info_filename - return info filename - * @info: profiling data set - */ -const char *gcov_info_filename(struct gcov_info *info) -{ - return info->filename; -} - -/** - * gcov_info_version - return info version - * @info: profiling data set - */ -unsigned int gcov_info_version(struct gcov_info *info) -{ - return info->version; -} - -/** - * gcov_info_next - return next profiling data set - * @info: profiling data set - * - * Returns next gcov_info following @info or first gcov_info in the chain if - * @info is %NULL. - */ -struct gcov_info *gcov_info_next(struct gcov_info *info) -{ - if (!info) - return gcov_info_head; - - return info->next; -} - -/** - * gcov_info_link - link/add profiling data set to the list - * @info: profiling data set - */ -void gcov_info_link(struct gcov_info *info) -{ - info->next = gcov_info_head; - gcov_info_head = info; -} - -/** - * gcov_info_unlink - unlink/remove profiling data set from the list - * @prev: previous profiling data set - * @info: profiling data set - */ -void gcov_info_unlink(struct gcov_info *prev, struct gcov_info *info) -{ - if (prev) - prev->next = info->next; - else - gcov_info_head = info->next; -} - -/** - * gcov_info_within_module - check if a profiling data set belongs to a module - * @info: profiling data set - * @mod: module - * - * Returns true if profiling data belongs module, false otherwise. - */ -bool gcov_info_within_module(struct gcov_info *info, struct module *mod) -{ - return within_module((unsigned long)info, mod); -} - -/* Symbolic links to be created for each profiling data file. */ -const struct gcov_link gcov_link[] = { - { OBJ_TREE, "gcno" }, /* Link to .gcno file in $(objtree). */ - { 0, NULL}, -}; - -/* - * Determine whether a counter is active. Based on gcc magic. Doesn't change - * at run-time. - */ -static int counter_active(struct gcov_info *info, unsigned int type) -{ - return (1 << type) & info->ctr_mask; -} - -/* Determine number of active counters. Based on gcc magic. */ -static unsigned int num_counter_active(struct gcov_info *info) -{ - unsigned int i; - unsigned int result = 0; - - for (i = 0; i < GCOV_COUNTERS; i++) { - if (counter_active(info, i)) - result++; - } - return result; -} - -/** - * gcov_info_reset - reset profiling data to zero - * @info: profiling data set - */ -void gcov_info_reset(struct gcov_info *info) -{ - unsigned int active = num_counter_active(info); - unsigned int i; - - for (i = 0; i < active; i++) { - memset(info->counts[i].values, 0, - info->counts[i].num * sizeof(gcov_type)); - } -} - -/** - * gcov_info_is_compatible - check if profiling data can be added - * @info1: first profiling data set - * @info2: second profiling data set - * - * Returns non-zero if profiling data can be added, zero otherwise. - */ -int gcov_info_is_compatible(struct gcov_info *info1, struct gcov_info *info2) -{ - return (info1->stamp == info2->stamp); -} - -/** - * gcov_info_add - add up profiling data - * @dest: profiling data set to which data is added - * @source: profiling data set which is added - * - * Adds profiling counts of @source to @dest. - */ -void gcov_info_add(struct gcov_info *dest, struct gcov_info *source) -{ - unsigned int i; - unsigned int j; - - for (i = 0; i < num_counter_active(dest); i++) { - for (j = 0; j < dest->counts[i].num; j++) { - dest->counts[i].values[j] += - source->counts[i].values[j]; - } - } -} - -/* Get size of function info entry. Based on gcc magic. */ -static size_t get_fn_size(struct gcov_info *info) -{ - size_t size; - - size = sizeof(struct gcov_fn_info) + num_counter_active(info) * - sizeof(unsigned int); - if (__alignof__(struct gcov_fn_info) > sizeof(unsigned int)) - size = ALIGN(size, __alignof__(struct gcov_fn_info)); - return size; -} - -/* Get address of function info entry. Based on gcc magic. */ -static struct gcov_fn_info *get_fn_info(struct gcov_info *info, unsigned int fn) -{ - return (struct gcov_fn_info *) - ((char *) info->functions + fn * get_fn_size(info)); -} - -/** - * gcov_info_dup - duplicate profiling data set - * @info: profiling data set to duplicate - * - * Return newly allocated duplicate on success, %NULL on error. - */ -struct gcov_info *gcov_info_dup(struct gcov_info *info) -{ - struct gcov_info *dup; - unsigned int i; - unsigned int active; - - /* Duplicate gcov_info. */ - active = num_counter_active(info); - dup = kzalloc(struct_size(dup, counts, active), GFP_KERNEL); - if (!dup) - return NULL; - dup->version = info->version; - dup->stamp = info->stamp; - dup->n_functions = info->n_functions; - dup->ctr_mask = info->ctr_mask; - /* Duplicate filename. */ - dup->filename = kstrdup(info->filename, GFP_KERNEL); - if (!dup->filename) - goto err_free; - /* Duplicate table of functions. */ - dup->functions = kmemdup(info->functions, info->n_functions * - get_fn_size(info), GFP_KERNEL); - if (!dup->functions) - goto err_free; - /* Duplicate counter arrays. */ - for (i = 0; i < active ; i++) { - struct gcov_ctr_info *ctr = &info->counts[i]; - size_t size = ctr->num * sizeof(gcov_type); - - dup->counts[i].num = ctr->num; - dup->counts[i].merge = ctr->merge; - dup->counts[i].values = vmalloc(size); - if (!dup->counts[i].values) - goto err_free; - memcpy(dup->counts[i].values, ctr->values, size); - } - return dup; - -err_free: - gcov_info_free(dup); - return NULL; -} - -/** - * gcov_info_free - release memory for profiling data set duplicate - * @info: profiling data set duplicate to free - */ -void gcov_info_free(struct gcov_info *info) -{ - unsigned int active = num_counter_active(info); - unsigned int i; - - for (i = 0; i < active ; i++) - vfree(info->counts[i].values); - kfree(info->functions); - kfree(info->filename); - kfree(info); -} - -/** - * struct type_info - iterator helper array - * @ctr_type: counter type - * @offset: index of the first value of the current function for this type - * - * This array is needed to convert the in-memory data format into the in-file - * data format: - * - * In-memory: - * for each counter type - * for each function - * values - * - * In-file: - * for each function - * for each counter type - * values - * - * See gcc source gcc/gcov-io.h for more information on data organization. - */ -struct type_info { - int ctr_type; - unsigned int offset; -}; - -/** - * struct gcov_iterator - specifies current file position in logical records - * @info: associated profiling data - * @record: record type - * @function: function number - * @type: counter type - * @count: index into values array - * @num_types: number of counter types - * @type_info: helper array to get values-array offset for current function - */ -struct gcov_iterator { - struct gcov_info *info; - - int record; - unsigned int function; - unsigned int type; - unsigned int count; - - int num_types; - struct type_info type_info[]; -}; - -static struct gcov_fn_info *get_func(struct gcov_iterator *iter) -{ - return get_fn_info(iter->info, iter->function); -} - -static struct type_info *get_type(struct gcov_iterator *iter) -{ - return &iter->type_info[iter->type]; -} - -/** - * gcov_iter_new - allocate and initialize profiling data iterator - * @info: profiling data set to be iterated - * - * Return file iterator on success, %NULL otherwise. - */ -struct gcov_iterator *gcov_iter_new(struct gcov_info *info) -{ - struct gcov_iterator *iter; - - iter = kzalloc(struct_size(iter, type_info, num_counter_active(info)), - GFP_KERNEL); - if (iter) - iter->info = info; - - return iter; -} - -/** - * gcov_iter_free - release memory for iterator - * @iter: file iterator to free - */ -void gcov_iter_free(struct gcov_iterator *iter) -{ - kfree(iter); -} - -/** - * gcov_iter_get_info - return profiling data set for given file iterator - * @iter: file iterator - */ -struct gcov_info *gcov_iter_get_info(struct gcov_iterator *iter) -{ - return iter->info; -} - -/** - * gcov_iter_start - reset file iterator to starting position - * @iter: file iterator - */ -void gcov_iter_start(struct gcov_iterator *iter) -{ - int i; - - iter->record = 0; - iter->function = 0; - iter->type = 0; - iter->count = 0; - iter->num_types = 0; - for (i = 0; i < GCOV_COUNTERS; i++) { - if (counter_active(iter->info, i)) { - iter->type_info[iter->num_types].ctr_type = i; - iter->type_info[iter->num_types++].offset = 0; - } - } -} - -/* Mapping of logical record number to actual file content. */ -#define RECORD_FILE_MAGIC 0 -#define RECORD_GCOV_VERSION 1 -#define RECORD_TIME_STAMP 2 -#define RECORD_FUNCTION_TAG 3 -#define RECORD_FUNCTON_TAG_LEN 4 -#define RECORD_FUNCTION_IDENT 5 -#define RECORD_FUNCTION_CHECK 6 -#define RECORD_COUNT_TAG 7 -#define RECORD_COUNT_LEN 8 -#define RECORD_COUNT 9 - -/** - * gcov_iter_next - advance file iterator to next logical record - * @iter: file iterator - * - * Return zero if new position is valid, non-zero if iterator has reached end. - */ -int gcov_iter_next(struct gcov_iterator *iter) -{ - switch (iter->record) { - case RECORD_FILE_MAGIC: - case RECORD_GCOV_VERSION: - case RECORD_FUNCTION_TAG: - case RECORD_FUNCTON_TAG_LEN: - case RECORD_FUNCTION_IDENT: - case RECORD_COUNT_TAG: - /* Advance to next record */ - iter->record++; - break; - case RECORD_COUNT: - /* Advance to next count */ - iter->count++; - /* fall through */ - case RECORD_COUNT_LEN: - if (iter->count < get_func(iter)->n_ctrs[iter->type]) { - iter->record = 9; - break; - } - /* Advance to next counter type */ - get_type(iter)->offset += iter->count; - iter->count = 0; - iter->type++; - /* fall through */ - case RECORD_FUNCTION_CHECK: - if (iter->type < iter->num_types) { - iter->record = 7; - break; - } - /* Advance to next function */ - iter->type = 0; - iter->function++; - /* fall through */ - case RECORD_TIME_STAMP: - if (iter->function < iter->info->n_functions) - iter->record = 3; - else - iter->record = -1; - break; - } - /* Check for EOF. */ - if (iter->record == -1) - return -EINVAL; - else - return 0; -} - -/** - * seq_write_gcov_u32 - write 32 bit number in gcov format to seq_file - * @seq: seq_file handle - * @v: value to be stored - * - * Number format defined by gcc: numbers are recorded in the 32 bit - * unsigned binary form of the endianness of the machine generating the - * file. - */ -static int seq_write_gcov_u32(struct seq_file *seq, u32 v) -{ - return seq_write(seq, &v, sizeof(v)); -} - -/** - * seq_write_gcov_u64 - write 64 bit number in gcov format to seq_file - * @seq: seq_file handle - * @v: value to be stored - * - * Number format defined by gcc: numbers are recorded in the 32 bit - * unsigned binary form of the endianness of the machine generating the - * file. 64 bit numbers are stored as two 32 bit numbers, the low part - * first. - */ -static int seq_write_gcov_u64(struct seq_file *seq, u64 v) -{ - u32 data[2]; - - data[0] = (v & 0xffffffffUL); - data[1] = (v >> 32); - return seq_write(seq, data, sizeof(data)); -} - -/** - * gcov_iter_write - write data for current pos to seq_file - * @iter: file iterator - * @seq: seq_file handle - * - * Return zero on success, non-zero otherwise. - */ -int gcov_iter_write(struct gcov_iterator *iter, struct seq_file *seq) -{ - int rc = -EINVAL; - - switch (iter->record) { - case RECORD_FILE_MAGIC: - rc = seq_write_gcov_u32(seq, GCOV_DATA_MAGIC); - break; - case RECORD_GCOV_VERSION: - rc = seq_write_gcov_u32(seq, iter->info->version); - break; - case RECORD_TIME_STAMP: - rc = seq_write_gcov_u32(seq, iter->info->stamp); - break; - case RECORD_FUNCTION_TAG: - rc = seq_write_gcov_u32(seq, GCOV_TAG_FUNCTION); - break; - case RECORD_FUNCTON_TAG_LEN: - rc = seq_write_gcov_u32(seq, 2); - break; - case RECORD_FUNCTION_IDENT: - rc = seq_write_gcov_u32(seq, get_func(iter)->ident); - break; - case RECORD_FUNCTION_CHECK: - rc = seq_write_gcov_u32(seq, get_func(iter)->checksum); - break; - case RECORD_COUNT_TAG: - rc = seq_write_gcov_u32(seq, - GCOV_TAG_FOR_COUNTER(get_type(iter)->ctr_type)); - break; - case RECORD_COUNT_LEN: - rc = seq_write_gcov_u32(seq, - get_func(iter)->n_ctrs[iter->type] * 2); - break; - case RECORD_COUNT: - rc = seq_write_gcov_u64(seq, - iter->info->counts[iter->type]. - values[iter->count + get_type(iter)->offset]); - break; - } - return rc; -} -- GitLab From da9953b729c12ece6d35fd15d236457eee679228 Mon Sep 17 00:00:00 2001 From: Jaegeuk Kim Date: Thu, 2 Apr 2020 09:32:35 -0700 Subject: [PATCH 0134/1971] f2fs: introduce sysfs/data_io_flag to attach REQ_META/FUA This patch introduces a way to attach REQ_META/FUA explicitly to all the data writes given temperature. -> attach REQ_FUA to Hot Data writes -> attach REQ_FUA to Hot|Warm Data writes -> attach REQ_FUA to Hot|Warm|Cold Data writes -> attach REQ_FUA to Hot|Warm|Cold Data writes as well as REQ_META to Hot Data writes Reviewed-by: Chao Yu Signed-off-by: Jaegeuk Kim --- Documentation/ABI/testing/sysfs-fs-f2fs | 9 +++++++++ fs/f2fs/data.c | 23 +++++++++++++++++++++++ fs/f2fs/f2fs.h | 3 +++ fs/f2fs/sysfs.c | 3 ++- 4 files changed, 37 insertions(+), 1 deletion(-) diff --git a/Documentation/ABI/testing/sysfs-fs-f2fs b/Documentation/ABI/testing/sysfs-fs-f2fs index bd8a0d19abe67..c8620ea7022a7 100644 --- a/Documentation/ABI/testing/sysfs-fs-f2fs +++ b/Documentation/ABI/testing/sysfs-fs-f2fs @@ -323,3 +323,12 @@ What: /sys/fs/f2fs//mounted_time_sec Date: February 2020 Contact: "Jaegeuk Kim" Description: Show the mounted time in secs of this partition. + +What: /sys/fs/f2fs//data_io_flag +Date: April 2020 +Contact: "Jaegeuk Kim" +Description: Give a way to attach REQ_META|FUA to data writes + given temperature-based bits. Now the bits indicate: + * REQ_META | REQ_FUA | + * 5 | 4 | 3 | 2 | 1 | 0 | + * Cold | Warm | Hot | Cold | Warm | Hot | diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c index cdf2f626bea7a..accd28728642a 100644 --- a/fs/f2fs/data.c +++ b/fs/f2fs/data.c @@ -513,6 +513,28 @@ void f2fs_submit_bio(struct f2fs_sb_info *sbi, __submit_bio(sbi, bio, type); } +static void __attach_data_io_flag(struct f2fs_io_info *fio) +{ + struct f2fs_sb_info *sbi = fio->sbi; + unsigned int temp_mask = (1 << NR_TEMP_TYPE) - 1; + unsigned int fua_flag = sbi->data_io_flag & temp_mask; + unsigned int meta_flag = (sbi->data_io_flag >> NR_TEMP_TYPE) & + temp_mask; + /* + * data io flag bits per temp: + * REQ_META | REQ_FUA | + * 5 | 4 | 3 | 2 | 1 | 0 | + * Cold | Warm | Hot | Cold | Warm | Hot | + */ + if (fio->type != DATA) + return; + + if ((1 << fio->temp) & meta_flag) + fio->op_flags |= REQ_META; + if ((1 << fio->temp) & fua_flag) + fio->op_flags |= REQ_FUA; +} + static void __submit_merged_bio(struct f2fs_bio_info *io) { struct f2fs_io_info *fio = &io->fio; @@ -520,6 +542,7 @@ static void __submit_merged_bio(struct f2fs_bio_info *io) if (!io->bio) return; + __attach_data_io_flag(fio); bio_set_op_attrs(io->bio, fio->op, fio->op_flags); if (is_read_io(fio->op)) diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h index ba470d5687fe0..c2788738aa0d4 100644 --- a/fs/f2fs/f2fs.h +++ b/fs/f2fs/f2fs.h @@ -1507,6 +1507,9 @@ struct f2fs_sb_info { unsigned long long write_iostat[NR_IO_TYPE]; bool iostat_enable; + /* to attach REQ_META|REQ_FUA flags */ + unsigned int data_io_flag; + /* For sysfs suppport */ struct kobject s_kobj; struct completion s_kobj_unregister; diff --git a/fs/f2fs/sysfs.c b/fs/f2fs/sysfs.c index e3bbbef9b4f09..aeebfb5024a22 100644 --- a/fs/f2fs/sysfs.c +++ b/fs/f2fs/sysfs.c @@ -372,7 +372,6 @@ out: return count; } - if (!strcmp(a->attr.name, "iostat_enable")) { sbi->iostat_enable = !!t; if (!sbi->iostat_enable) @@ -543,6 +542,7 @@ F2FS_RW_ATTR(F2FS_SBI, f2fs_super_block, extension_list, extension_list); F2FS_RW_ATTR(FAULT_INFO_RATE, f2fs_fault_info, inject_rate, inject_rate); F2FS_RW_ATTR(FAULT_INFO_TYPE, f2fs_fault_info, inject_type, inject_type); #endif +F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, data_io_flag, data_io_flag); F2FS_GENERAL_RO_ATTR(dirty_segments); F2FS_GENERAL_RO_ATTR(free_segments); F2FS_GENERAL_RO_ATTR(lifetime_write_kbytes); @@ -622,6 +622,7 @@ static struct attribute *f2fs_attrs[] = { ATTR_LIST(inject_rate), ATTR_LIST(inject_type), #endif + ATTR_LIST(data_io_flag), ATTR_LIST(dirty_segments), ATTR_LIST(free_segments), ATTR_LIST(unusable), -- GitLab From f82cdff1aa7f6dcf6625c7219342a6e5c977098d Mon Sep 17 00:00:00 2001 From: Tony Luck Date: Wed, 15 Apr 2020 12:58:26 -0700 Subject: [PATCH 0135/1971] x86/mce: Drop bogus comment about mce.kflags The bit definitions for kflags are for internal use only. A late edit moved them from uapi/asm/mce.h to the internal x86 , but the comment saying "See below" was accidentally left here. Delete "See below". Just labelling this field as internal kernel use is sufficient. Fixes: 1de08dccd383 ("x86/mce: Add a struct mce.kflags field") Signed-off-by: Tony Luck Signed-off-by: Borislav Petkov Link: https://lkml.kernel.org/r/20200415195826.GA13681@agluck-desk2.amr.corp.intel.com --- arch/x86/include/uapi/asm/mce.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/x86/include/uapi/asm/mce.h b/arch/x86/include/uapi/asm/mce.h index 5b59d80f1d4e1..db9adc081c5af 100644 --- a/arch/x86/include/uapi/asm/mce.h +++ b/arch/x86/include/uapi/asm/mce.h @@ -35,7 +35,7 @@ struct mce { __u64 ipid; /* MCA_IPID MSR: only valid on SMCA systems */ __u64 ppin; /* Protected Processor Inventory Number */ __u32 microcode; /* Microcode revision */ - __u64 kflags; /* Internal kernel use. See below */ + __u64 kflags; /* Internal kernel use */ }; #define MCE_GET_RECORD_LEN _IOR('M', 1, int) -- GitLab From fc6f5d0a4983e2ba9451cc1377a8da5a82cdc52d Mon Sep 17 00:00:00 2001 From: Alan Mikhak Date: Wed, 15 Apr 2020 10:27:09 -0700 Subject: [PATCH 0136/1971] dmaengine: dw-edma: Decouple dw-edma-core.c from struct pci_dev Decouple dw-edma-core.c from struct pci_dev as a step toward integration of dw-edma with pci-epf-test so the latter can initiate dma operations locally from the endpoint side. A barrier to such integration is the dependency of dw_edma_probe() and other functions in dw-edma-core.c on struct pci_dev. The Synopsys DesignWare dw-edma driver was designed to run on host side of PCIe link to initiate DMA operations remotely using eDMA channels of PCIe controller on the endpoint side. This can be inferred from seeing that dw-edma uses struct pci_dev and accesses hardware registers of dma channels across the bus using BAR0 and BAR2. The ops field of struct dw_edma in dw-edma-core.h is currenty undefined: const struct dw_edma_core_ops *ops; However, the kernel builds without failure even when dw-edma driver is enabled. Instead of removing the currently undefined and usued ops field, define struct dw_edma_core_ops and use the ops field to decouple dw-edma-core.c from struct pci_dev. Signed-off-by: Alan Mikhak Acked-by: Gustavo Pimentel Link: https://lore.kernel.org/r/1586971629-30196-1-git-send-email-alan.mikhak@sifive.com Signed-off-by: Vinod Koul --- drivers/dma/dw-edma/dw-edma-core.c | 29 ++++++++++++++++++++--------- drivers/dma/dw-edma/dw-edma-core.h | 4 ++++ drivers/dma/dw-edma/dw-edma-pcie.c | 10 ++++++++++ 3 files changed, 34 insertions(+), 9 deletions(-) diff --git a/drivers/dma/dw-edma/dw-edma-core.c b/drivers/dma/dw-edma/dw-edma-core.c index ff392c01bad1f..db401eb113221 100644 --- a/drivers/dma/dw-edma/dw-edma-core.c +++ b/drivers/dma/dw-edma/dw-edma-core.c @@ -14,7 +14,7 @@ #include #include #include -#include +#include #include "dw-edma-core.h" #include "dw-edma-v0-core.h" @@ -781,7 +781,7 @@ static int dw_edma_irq_request(struct dw_edma_chip *chip, if (dw->nr_irqs == 1) { /* Common IRQ shared among all channels */ - err = request_irq(pci_irq_vector(to_pci_dev(dev), 0), + err = request_irq(dw->ops->irq_vector(dev, 0), dw_edma_interrupt_common, IRQF_SHARED, dw->name, &dw->irq[0]); if (err) { @@ -789,7 +789,7 @@ static int dw_edma_irq_request(struct dw_edma_chip *chip, return err; } - get_cached_msi_msg(pci_irq_vector(to_pci_dev(dev), 0), + get_cached_msi_msg(dw->ops->irq_vector(dev, 0), &dw->irq[0].msi); } else { /* Distribute IRQs equally among all channels */ @@ -804,7 +804,7 @@ static int dw_edma_irq_request(struct dw_edma_chip *chip, dw_edma_add_irq_mask(&rd_mask, *rd_alloc, dw->rd_ch_cnt); for (i = 0; i < (*wr_alloc + *rd_alloc); i++) { - err = request_irq(pci_irq_vector(to_pci_dev(dev), i), + err = request_irq(dw->ops->irq_vector(dev, i), i < *wr_alloc ? dw_edma_interrupt_write : dw_edma_interrupt_read, @@ -815,7 +815,7 @@ static int dw_edma_irq_request(struct dw_edma_chip *chip, return err; } - get_cached_msi_msg(pci_irq_vector(to_pci_dev(dev), i), + get_cached_msi_msg(dw->ops->irq_vector(dev, i), &dw->irq[i].msi); } @@ -827,12 +827,23 @@ static int dw_edma_irq_request(struct dw_edma_chip *chip, int dw_edma_probe(struct dw_edma_chip *chip) { - struct device *dev = chip->dev; - struct dw_edma *dw = chip->dw; + struct device *dev; + struct dw_edma *dw; u32 wr_alloc = 0; u32 rd_alloc = 0; int i, err; + if (!chip) + return -EINVAL; + + dev = chip->dev; + if (!dev) + return -EINVAL; + + dw = chip->dw; + if (!dw || !dw->irq || !dw->ops || !dw->ops->irq_vector) + return -EINVAL; + raw_spin_lock_init(&dw->lock); /* Find out how many write channels are supported by hardware */ @@ -884,7 +895,7 @@ int dw_edma_probe(struct dw_edma_chip *chip) err_irq_free: for (i = (dw->nr_irqs - 1); i >= 0; i--) - free_irq(pci_irq_vector(to_pci_dev(dev), i), &dw->irq[i]); + free_irq(dw->ops->irq_vector(dev, i), &dw->irq[i]); dw->nr_irqs = 0; @@ -904,7 +915,7 @@ int dw_edma_remove(struct dw_edma_chip *chip) /* Free irqs */ for (i = (dw->nr_irqs - 1); i >= 0; i--) - free_irq(pci_irq_vector(to_pci_dev(dev), i), &dw->irq[i]); + free_irq(dw->ops->irq_vector(dev, i), &dw->irq[i]); /* Power management */ pm_runtime_disable(dev); diff --git a/drivers/dma/dw-edma/dw-edma-core.h b/drivers/dma/dw-edma/dw-edma-core.h index 4e5f9f6e901ba..31fc50d317928 100644 --- a/drivers/dma/dw-edma/dw-edma-core.h +++ b/drivers/dma/dw-edma/dw-edma-core.h @@ -103,6 +103,10 @@ struct dw_edma_irq { struct dw_edma *dw; }; +struct dw_edma_core_ops { + int (*irq_vector)(struct device *dev, unsigned int nr); +}; + struct dw_edma { char name[20]; diff --git a/drivers/dma/dw-edma/dw-edma-pcie.c b/drivers/dma/dw-edma/dw-edma-pcie.c index dc85f55e1bb8c..1eafc602e17ea 100644 --- a/drivers/dma/dw-edma/dw-edma-pcie.c +++ b/drivers/dma/dw-edma/dw-edma-pcie.c @@ -54,6 +54,15 @@ static const struct dw_edma_pcie_data snps_edda_data = { .irqs = 1, }; +static int dw_edma_pcie_irq_vector(struct device *dev, unsigned int nr) +{ + return pci_irq_vector(to_pci_dev(dev), nr); +} + +static const struct dw_edma_core_ops dw_edma_pcie_core_ops = { + .irq_vector = dw_edma_pcie_irq_vector, +}; + static int dw_edma_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *pid) { @@ -151,6 +160,7 @@ static int dw_edma_pcie_probe(struct pci_dev *pdev, dw->version = pdata->version; dw->mode = pdata->mode; dw->nr_irqs = nr_irqs; + dw->ops = &dw_edma_pcie_core_ops; /* Debug info */ pci_dbg(pdev, "Version:\t%u\n", dw->version); -- GitLab From cde9a96ee24f796941bd187e0ded93a242ce5340 Mon Sep 17 00:00:00 2001 From: Yoshihiro Shimoda Date: Fri, 17 Apr 2020 17:07:08 +0900 Subject: [PATCH 0137/1971] dt-bindings: dma: renesas,rcar-dmac: convert bindings to json-schema Convert Renesas R-Car and RZ/G DMA Controller bindings documentation to json-schema. Signed-off-by: Yoshihiro Shimoda Reviewed-by: Geert Uytterhoeven Link: https://lore.kernel.org/r/1587110829-26609-2-git-send-email-yoshihiro.shimoda.uh@renesas.com Signed-off-by: Vinod Koul --- .../bindings/dma/renesas,rcar-dmac.txt | 117 -------------- .../bindings/dma/renesas,rcar-dmac.yaml | 150 ++++++++++++++++++ 2 files changed, 150 insertions(+), 117 deletions(-) delete mode 100644 Documentation/devicetree/bindings/dma/renesas,rcar-dmac.txt create mode 100644 Documentation/devicetree/bindings/dma/renesas,rcar-dmac.yaml diff --git a/Documentation/devicetree/bindings/dma/renesas,rcar-dmac.txt b/Documentation/devicetree/bindings/dma/renesas,rcar-dmac.txt deleted file mode 100644 index b7f81c63be8bd..0000000000000 --- a/Documentation/devicetree/bindings/dma/renesas,rcar-dmac.txt +++ /dev/null @@ -1,117 +0,0 @@ -* Renesas R-Car (RZ/G) DMA Controller Device Tree bindings - -Renesas R-Car (Gen 2/3) and RZ/G SoCs have multiple multi-channel DMA -controller instances named DMAC capable of serving multiple clients. Channels -can be dedicated to specific clients or shared between a large number of -clients. - -Each DMA client is connected to one dedicated port of the DMAC, identified by -an 8-bit port number called the MID/RID. A DMA controller can thus serve up to -256 clients in total. When the number of hardware channels is lower than the -number of clients to be served, channels must be shared between multiple DMA -clients. The association of DMA clients to DMAC channels is fully dynamic and -not described in these device tree bindings. - -Required Properties: - -- compatible: "renesas,dmac-", "renesas,rcar-dmac" as fallback. - Examples with soctypes are: - - "renesas,dmac-r8a7743" (RZ/G1M) - - "renesas,dmac-r8a7744" (RZ/G1N) - - "renesas,dmac-r8a7745" (RZ/G1E) - - "renesas,dmac-r8a77470" (RZ/G1C) - - "renesas,dmac-r8a774a1" (RZ/G2M) - - "renesas,dmac-r8a774b1" (RZ/G2N) - - "renesas,dmac-r8a774c0" (RZ/G2E) - - "renesas,dmac-r8a7790" (R-Car H2) - - "renesas,dmac-r8a7791" (R-Car M2-W) - - "renesas,dmac-r8a7792" (R-Car V2H) - - "renesas,dmac-r8a7793" (R-Car M2-N) - - "renesas,dmac-r8a7794" (R-Car E2) - - "renesas,dmac-r8a7795" (R-Car H3) - - "renesas,dmac-r8a7796" (R-Car M3-W) - - "renesas,dmac-r8a77961" (R-Car M3-W+) - - "renesas,dmac-r8a77965" (R-Car M3-N) - - "renesas,dmac-r8a77970" (R-Car V3M) - - "renesas,dmac-r8a77980" (R-Car V3H) - - "renesas,dmac-r8a77990" (R-Car E3) - - "renesas,dmac-r8a77995" (R-Car D3) - -- reg: base address and length of the registers block for the DMAC - -- interrupts: interrupt specifiers for the DMAC, one for each entry in - interrupt-names. -- interrupt-names: one entry for the error interrupt, named "error", plus one - entry per channel, named "ch%u", where %u is the channel number ranging from - zero to the number of channels minus one. - -- clock-names: "fck" for the functional clock -- clocks: a list of phandle + clock-specifier pairs, one for each entry - in clock-names. -- clock-names: must contain "fck" for the functional clock. - -- #dma-cells: must be <1>, the cell specifies the MID/RID of the DMAC port - connected to the DMA client -- dma-channels: number of DMA channels - -Example: R8A7790 (R-Car H2) SYS-DMACs - - dmac0: dma-controller@e6700000 { - compatible = "renesas,dmac-r8a7790", "renesas,rcar-dmac"; - reg = <0 0xe6700000 0 0x20000>; - interrupts = <0 197 IRQ_TYPE_LEVEL_HIGH - 0 200 IRQ_TYPE_LEVEL_HIGH - 0 201 IRQ_TYPE_LEVEL_HIGH - 0 202 IRQ_TYPE_LEVEL_HIGH - 0 203 IRQ_TYPE_LEVEL_HIGH - 0 204 IRQ_TYPE_LEVEL_HIGH - 0 205 IRQ_TYPE_LEVEL_HIGH - 0 206 IRQ_TYPE_LEVEL_HIGH - 0 207 IRQ_TYPE_LEVEL_HIGH - 0 208 IRQ_TYPE_LEVEL_HIGH - 0 209 IRQ_TYPE_LEVEL_HIGH - 0 210 IRQ_TYPE_LEVEL_HIGH - 0 211 IRQ_TYPE_LEVEL_HIGH - 0 212 IRQ_TYPE_LEVEL_HIGH - 0 213 IRQ_TYPE_LEVEL_HIGH - 0 214 IRQ_TYPE_LEVEL_HIGH>; - interrupt-names = "error", - "ch0", "ch1", "ch2", "ch3", - "ch4", "ch5", "ch6", "ch7", - "ch8", "ch9", "ch10", "ch11", - "ch12", "ch13", "ch14"; - clocks = <&mstp2_clks R8A7790_CLK_SYS_DMAC0>; - clock-names = "fck"; - #dma-cells = <1>; - dma-channels = <15>; - }; - - dmac1: dma-controller@e6720000 { - compatible = "renesas,dmac-r8a7790", "renesas,rcar-dmac"; - reg = <0 0xe6720000 0 0x20000>; - interrupts = <0 220 IRQ_TYPE_LEVEL_HIGH - 0 216 IRQ_TYPE_LEVEL_HIGH - 0 217 IRQ_TYPE_LEVEL_HIGH - 0 218 IRQ_TYPE_LEVEL_HIGH - 0 219 IRQ_TYPE_LEVEL_HIGH - 0 308 IRQ_TYPE_LEVEL_HIGH - 0 309 IRQ_TYPE_LEVEL_HIGH - 0 310 IRQ_TYPE_LEVEL_HIGH - 0 311 IRQ_TYPE_LEVEL_HIGH - 0 312 IRQ_TYPE_LEVEL_HIGH - 0 313 IRQ_TYPE_LEVEL_HIGH - 0 314 IRQ_TYPE_LEVEL_HIGH - 0 315 IRQ_TYPE_LEVEL_HIGH - 0 316 IRQ_TYPE_LEVEL_HIGH - 0 317 IRQ_TYPE_LEVEL_HIGH - 0 318 IRQ_TYPE_LEVEL_HIGH>; - interrupt-names = "error", - "ch0", "ch1", "ch2", "ch3", - "ch4", "ch5", "ch6", "ch7", - "ch8", "ch9", "ch10", "ch11", - "ch12", "ch13", "ch14"; - clocks = <&mstp2_clks R8A7790_CLK_SYS_DMAC1>; - clock-names = "fck"; - #dma-cells = <1>; - dma-channels = <15>; - }; diff --git a/Documentation/devicetree/bindings/dma/renesas,rcar-dmac.yaml b/Documentation/devicetree/bindings/dma/renesas,rcar-dmac.yaml new file mode 100644 index 0000000000000..b842dfd96a897 --- /dev/null +++ b/Documentation/devicetree/bindings/dma/renesas,rcar-dmac.yaml @@ -0,0 +1,150 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/dma/renesas,rcar-dmac.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Renesas R-Car and RZ/G DMA Controller + +maintainers: + - Yoshihiro Shimoda + +allOf: + - $ref: "dma-controller.yaml#" + +properties: + compatible: + items: + - enum: + - renesas,dmac-r8a7743 # RZ/G1M + - renesas,dmac-r8a7744 # RZ/G1N + - renesas,dmac-r8a7745 # RZ/G1E + - renesas,dmac-r8a77470 # RZ/G1C + - renesas,dmac-r8a774a1 # RZ/G2M + - renesas,dmac-r8a774b1 # RZ/G2N + - renesas,dmac-r8a774c0 # RZ/G2E + - renesas,dmac-r8a7790 # R-Car H2 + - renesas,dmac-r8a7791 # R-Car M2-W + - renesas,dmac-r8a7792 # R-Car V2H + - renesas,dmac-r8a7793 # R-Car M2-N + - renesas,dmac-r8a7794 # R-Car E2 + - renesas,dmac-r8a7795 # R-Car H3 + - renesas,dmac-r8a7796 # R-Car M3-W + - renesas,dmac-r8a77961 # R-Car M3-W+ + - renesas,dmac-r8a77965 # R-Car M3-N + - renesas,dmac-r8a77970 # R-Car V3M + - renesas,dmac-r8a77980 # R-Car V3H + - renesas,dmac-r8a77990 # R-Car E3 + - renesas,dmac-r8a77995 # R-Car D3 + - const: renesas,rcar-dmac + + reg: + maxItems: 1 + + interrupts: + minItems: 9 + maxItems: 17 + + interrupt-names: + minItems: 9 + maxItems: 17 + items: + - const: error + - pattern: "^ch([0-9]|1[0-5])$" + - pattern: "^ch([0-9]|1[0-5])$" + - pattern: "^ch([0-9]|1[0-5])$" + - pattern: "^ch([0-9]|1[0-5])$" + - pattern: "^ch([0-9]|1[0-5])$" + - pattern: "^ch([0-9]|1[0-5])$" + - pattern: "^ch([0-9]|1[0-5])$" + - pattern: "^ch([0-9]|1[0-5])$" + - pattern: "^ch([0-9]|1[0-5])$" + - pattern: "^ch([0-9]|1[0-5])$" + - pattern: "^ch([0-9]|1[0-5])$" + - pattern: "^ch([0-9]|1[0-5])$" + - pattern: "^ch([0-9]|1[0-5])$" + - pattern: "^ch([0-9]|1[0-5])$" + - pattern: "^ch([0-9]|1[0-5])$" + - pattern: "^ch([0-9]|1[0-5])$" + + clocks: + maxItems: 1 + + clock-names: + maxItems: 1 + items: + - const: fck + + '#dma-cells': + const: 1 + description: + The cell specifies the MID/RID of the DMAC port connected to + the DMA client. + + dma-channels: + minimum: 8 + maximum: 16 + + dma-channel-mask: true + + iommus: + minItems: 8 + maxItems: 16 + + power-domains: + maxItems: 1 + + resets: + maxItems: 1 + +required: + - compatible + - reg + - interrupts + - interrupt-names + - clocks + - clock-names + - '#dma-cells' + - dma-channels + - power-domains + - resets + +additionalProperties: false + +examples: + - | + #include + #include + #include + + dmac0: dma-controller@e6700000 { + compatible = "renesas,dmac-r8a7790", "renesas,rcar-dmac"; + reg = <0xe6700000 0x20000>; + interrupts = , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + interrupt-names = "error", + "ch0", "ch1", "ch2", "ch3", + "ch4", "ch5", "ch6", "ch7", + "ch8", "ch9", "ch10", "ch11", + "ch12", "ch13", "ch14"; + clocks = <&cpg CPG_MOD 219>; + clock-names = "fck"; + power-domains = <&sysc R8A7790_PD_ALWAYS_ON>; + resets = <&cpg 219>; + #dma-cells = <1>; + dma-channels = <15>; + }; -- GitLab From b3cb14310eb4b0355291c89a3807336be576f0b3 Mon Sep 17 00:00:00 2001 From: Yoshihiro Shimoda Date: Fri, 17 Apr 2020 17:07:09 +0900 Subject: [PATCH 0138/1971] dt-bindings: dma: renesas,usb-dmac: convert bindings to json-schema Convert Renesas R-Car USB-DMA Controller bindings documentation to json-schema. Signed-off-by: Yoshihiro Shimoda Reviewed-by: Geert Uytterhoeven Link: https://lore.kernel.org/r/1587110829-26609-3-git-send-email-yoshihiro.shimoda.uh@renesas.com Signed-off-by: Vinod Koul --- .../bindings/dma/renesas,usb-dmac.txt | 55 ---------- .../bindings/dma/renesas,usb-dmac.yaml | 102 ++++++++++++++++++ 2 files changed, 102 insertions(+), 55 deletions(-) delete mode 100644 Documentation/devicetree/bindings/dma/renesas,usb-dmac.txt create mode 100644 Documentation/devicetree/bindings/dma/renesas,usb-dmac.yaml diff --git a/Documentation/devicetree/bindings/dma/renesas,usb-dmac.txt b/Documentation/devicetree/bindings/dma/renesas,usb-dmac.txt deleted file mode 100644 index e8f6c42e80f2d..0000000000000 --- a/Documentation/devicetree/bindings/dma/renesas,usb-dmac.txt +++ /dev/null @@ -1,55 +0,0 @@ -* Renesas USB DMA Controller Device Tree bindings - -Required Properties: --compatible: "renesas,-usb-dmac", "renesas,usb-dmac" as fallback. - Examples with soctypes are: - - "renesas,r8a7743-usb-dmac" (RZ/G1M) - - "renesas,r8a7744-usb-dmac" (RZ/G1N) - - "renesas,r8a7745-usb-dmac" (RZ/G1E) - - "renesas,r8a77470-usb-dmac" (RZ/G1C) - - "renesas,r8a774a1-usb-dmac" (RZ/G2M) - - "renesas,r8a774b1-usb-dmac" (RZ/G2N) - - "renesas,r8a774c0-usb-dmac" (RZ/G2E) - - "renesas,r8a7790-usb-dmac" (R-Car H2) - - "renesas,r8a7791-usb-dmac" (R-Car M2-W) - - "renesas,r8a7793-usb-dmac" (R-Car M2-N) - - "renesas,r8a7794-usb-dmac" (R-Car E2) - - "renesas,r8a7795-usb-dmac" (R-Car H3) - - "renesas,r8a7796-usb-dmac" (R-Car M3-W) - - "renesas,r8a77961-usb-dmac" (R-Car M3-W+) - - "renesas,r8a77965-usb-dmac" (R-Car M3-N) - - "renesas,r8a77990-usb-dmac" (R-Car E3) - - "renesas,r8a77995-usb-dmac" (R-Car D3) -- reg: base address and length of the registers block for the DMAC -- interrupts: interrupt specifiers for the DMAC, one for each entry in - interrupt-names. -- interrupt-names: one entry per channel, named "ch%u", where %u is the - channel number ranging from zero to the number of channels minus one. -- clocks: a list of phandle + clock-specifier pairs. -- #dma-cells: must be <1>, the cell specifies the channel number of the DMAC - port connected to the DMA client. -- dma-channels: number of DMA channels - -Example: R8A7790 (R-Car H2) USB-DMACs - - usb_dmac0: dma-controller@e65a0000 { - compatible = "renesas,r8a7790-usb-dmac", "renesas,usb-dmac"; - reg = <0 0xe65a0000 0 0x100>; - interrupts = <0 109 IRQ_TYPE_LEVEL_HIGH - 0 109 IRQ_TYPE_LEVEL_HIGH>; - interrupt-names = "ch0", "ch1"; - clocks = <&mstp3_clks R8A7790_CLK_USBDMAC0>; - #dma-cells = <1>; - dma-channels = <2>; - }; - - usb_dmac1: dma-controller@e65b0000 { - compatible = "renesas,usb-dmac"; - reg = <0 0xe65b0000 0 0x100>; - interrupts = <0 110 IRQ_TYPE_LEVEL_HIGH - 0 110 IRQ_TYPE_LEVEL_HIGH>; - interrupt-names = "ch0", "ch1"; - clocks = <&mstp3_clks R8A7790_CLK_USBDMAC1>; - #dma-cells = <1>; - dma-channels = <2>; - }; diff --git a/Documentation/devicetree/bindings/dma/renesas,usb-dmac.yaml b/Documentation/devicetree/bindings/dma/renesas,usb-dmac.yaml new file mode 100644 index 0000000000000..9ca6d8ddf2320 --- /dev/null +++ b/Documentation/devicetree/bindings/dma/renesas,usb-dmac.yaml @@ -0,0 +1,102 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/dma/renesas,usb-dmac.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Renesas USB DMA Controller + +maintainers: + - Yoshihiro Shimoda + +allOf: + - $ref: "dma-controller.yaml#" + +properties: + compatible: + items: + - enum: + - renesas,r8a7743-usb-dmac # RZ/G1M + - renesas,r8a7744-usb-dmac # RZ/G1N + - renesas,r8a7745-usb-dmac # RZ/G1E + - renesas,r8a77470-usb-dmac # RZ/G1C + - renesas,r8a774a1-usb-dmac # RZ/G2M + - renesas,r8a774b1-usb-dmac # RZ/G2N + - renesas,r8a774c0-usb-dmac # RZ/G2E + - renesas,r8a7790-usb-dmac # R-Car H2 + - renesas,r8a7791-usb-dmac # R-Car M2-W + - renesas,r8a7793-usb-dmac # R-Car M2-N + - renesas,r8a7794-usb-dmac # R-Car E2 + - renesas,r8a7795-usb-dmac # R-Car H3 + - renesas,r8a7796-usb-dmac # R-Car M3-W + - renesas,r8a77961-usb-dmac # R-Car M3-W+ + - renesas,r8a77965-usb-dmac # R-Car M3-N + - renesas,r8a77990-usb-dmac # R-Car E3 + - renesas,r8a77995-usb-dmac # R-Car D3 + - const: renesas,usb-dmac + + reg: + maxItems: 1 + + interrupts: + minItems: 2 + maxItems: 2 + + interrupt-names: + items: + - pattern: ch0 + - pattern: ch1 + + clocks: + maxItems: 1 + + '#dma-cells': + const: 1 + description: + The cell specifies the channel number of the DMAC port connected to + the DMA client. + + dma-channels: + const: 2 + + iommus: + minItems: 2 + maxItems: 2 + + power-domains: + maxItems: 1 + + resets: + maxItems: 1 + +required: + - compatible + - reg + - interrupts + - interrupt-names + - clocks + - '#dma-cells' + - dma-channels + - power-domains + - resets + +additionalProperties: false + +examples: + - | + #include + #include + #include + + usb_dmac0: dma-controller@e65a0000 { + compatible = "renesas,r8a7790-usb-dmac", "renesas,usb-dmac"; + reg = <0xe65a0000 0x100>; + interrupts = , + ; + interrupt-names = "ch0", "ch1"; + clocks = <&cpg CPG_MOD 330>; + power-domains = <&sysc R8A7790_PD_ALWAYS_ON>; + resets = <&cpg 330>; + #dma-cells = <1>; + dma-channels = <2>; + }; -- GitLab From 2fea2906b5cbeffe49911e4735451af62fc121b4 Mon Sep 17 00:00:00 2001 From: Geert Uytterhoeven Date: Thu, 16 Apr 2020 12:30:54 +0200 Subject: [PATCH 0139/1971] dmaengine: Fix misspelling of "Analog Devices" According to https://www.analog.com/, the company name is spelled "Analog Devices". Signed-off-by: Geert Uytterhoeven Reviewed-by: Alexandru Ardelean Link: https://lore.kernel.org/r/20200416103058.15269-3-geert+renesas@glider.be [vkoul: make subsystem name dmaengine] Signed-off-by: Vinod Koul --- drivers/dma/Kconfig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig index 0924836443152..c35c0e03b40f0 100644 --- a/drivers/dma/Kconfig +++ b/drivers/dma/Kconfig @@ -106,7 +106,7 @@ config AXI_DMAC select REGMAP_MMIO help Enable support for the Analog Devices AXI-DMAC peripheral. This DMA - controller is often used in Analog Device's reference designs for FPGA + controller is often used in Analog Devices' reference designs for FPGA platforms. config BCM_SBA_RAID -- GitLab From bd2bf302eef21aafa6da2cf829b87a9e33150658 Mon Sep 17 00:00:00 2001 From: Leonid Ravich Date: Thu, 16 Apr 2020 20:06:21 +0300 Subject: [PATCH 0140/1971] dmaengine: ioat: fixing chunk sizing macros dependency changing macros which assumption is chunk size of 2M, which can be other size prepare for changing allocation chunk size. Acked-by: Dave Jiang Signed-off-by: Leonid Ravich Link: https://lore.kernel.org/r/20200416170628.16196-1-leonid.ravich@dell.com Signed-off-by: Vinod Koul --- drivers/dma/ioat/dma.c | 14 ++++++++------ drivers/dma/ioat/dma.h | 10 ++++++---- drivers/dma/ioat/init.c | 2 +- 3 files changed, 15 insertions(+), 11 deletions(-) diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c index 18c011e57592e..1e0e6c1d55336 100644 --- a/drivers/dma/ioat/dma.c +++ b/drivers/dma/ioat/dma.c @@ -332,8 +332,8 @@ ioat_alloc_ring_ent(struct dma_chan *chan, int idx, gfp_t flags) u8 *pos; off_t offs; - chunk = idx / IOAT_DESCS_PER_2M; - idx &= (IOAT_DESCS_PER_2M - 1); + chunk = idx / IOAT_DESCS_PER_CHUNK; + idx &= (IOAT_DESCS_PER_CHUNK - 1); offs = idx * IOAT_DESC_SZ; pos = (u8 *)ioat_chan->descs[chunk].virt + offs; phys = ioat_chan->descs[chunk].hw + offs; @@ -370,7 +370,8 @@ ioat_alloc_ring(struct dma_chan *c, int order, gfp_t flags) if (!ring) return NULL; - ioat_chan->desc_chunks = chunks = (total_descs * IOAT_DESC_SZ) / SZ_2M; + chunks = (total_descs * IOAT_DESC_SZ) / IOAT_CHUNK_SIZE; + ioat_chan->desc_chunks = chunks; for (i = 0; i < chunks; i++) { struct ioat_descs *descs = &ioat_chan->descs[i]; @@ -382,8 +383,9 @@ ioat_alloc_ring(struct dma_chan *c, int order, gfp_t flags) for (idx = 0; idx < i; idx++) { descs = &ioat_chan->descs[idx]; - dma_free_coherent(to_dev(ioat_chan), SZ_2M, - descs->virt, descs->hw); + dma_free_coherent(to_dev(ioat_chan), + IOAT_CHUNK_SIZE, + descs->virt, descs->hw); descs->virt = NULL; descs->hw = 0; } @@ -404,7 +406,7 @@ ioat_alloc_ring(struct dma_chan *c, int order, gfp_t flags) for (idx = 0; idx < ioat_chan->desc_chunks; idx++) { dma_free_coherent(to_dev(ioat_chan), - SZ_2M, + IOAT_CHUNK_SIZE, ioat_chan->descs[idx].virt, ioat_chan->descs[idx].hw); ioat_chan->descs[idx].virt = NULL; diff --git a/drivers/dma/ioat/dma.h b/drivers/dma/ioat/dma.h index b8e8e0b9693c7..5216c6b7c99e0 100644 --- a/drivers/dma/ioat/dma.h +++ b/drivers/dma/ioat/dma.h @@ -81,6 +81,11 @@ struct ioatdma_device { u32 msixpba; }; +#define IOAT_MAX_ORDER 16 +#define IOAT_MAX_DESCS (1 << IOAT_MAX_ORDER) +#define IOAT_CHUNK_SIZE (SZ_2M) +#define IOAT_DESCS_PER_CHUNK (IOAT_CHUNK_SIZE / IOAT_DESC_SZ) + struct ioat_descs { void *virt; dma_addr_t hw; @@ -128,7 +133,7 @@ struct ioatdma_chan { u16 produce; struct ioat_ring_ent **ring; spinlock_t prep_lock; - struct ioat_descs descs[2]; + struct ioat_descs descs[IOAT_MAX_DESCS / IOAT_DESCS_PER_CHUNK]; int desc_chunks; int intr_coalesce; int prev_intr_coalesce; @@ -301,9 +306,6 @@ static inline bool is_ioat_bug(unsigned long err) return !!err; } -#define IOAT_MAX_ORDER 16 -#define IOAT_MAX_DESCS 65536 -#define IOAT_DESCS_PER_2M 32768 static inline u32 ioat_ring_size(struct ioatdma_chan *ioat_chan) { diff --git a/drivers/dma/ioat/init.c b/drivers/dma/ioat/init.c index 60e9afbb896c3..58d13564f88b4 100644 --- a/drivers/dma/ioat/init.c +++ b/drivers/dma/ioat/init.c @@ -651,7 +651,7 @@ static void ioat_free_chan_resources(struct dma_chan *c) } for (i = 0; i < ioat_chan->desc_chunks; i++) { - dma_free_coherent(to_dev(ioat_chan), SZ_2M, + dma_free_coherent(to_dev(ioat_chan), IOAT_CHUNK_SIZE, ioat_chan->descs[i].virt, ioat_chan->descs[i].hw); ioat_chan->descs[i].virt = NULL; -- GitLab From a02254f8a676f5fdb98ebeb6cc420a71e652574a Mon Sep 17 00:00:00 2001 From: Leonid Ravich Date: Thu, 16 Apr 2020 20:06:22 +0300 Subject: [PATCH 0141/1971] dmaengine: ioat: Decreasing allocation chunk size 2M->512K requreing kmalloc of 2M high chance to fail in fragmented memory. IOAT ring requires 64k * 64B memory which will be achived by 512k * 8 allocation instead of 2M * 2. Acked-by: Dave Jiang Signed-off-by: Leonid Ravich Link: https://lore.kernel.org/r/20200416170628.16196-2-leonid.ravich@dell.com Signed-off-by: Vinod Koul --- drivers/dma/ioat/dma.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/dma/ioat/dma.h b/drivers/dma/ioat/dma.h index 5216c6b7c99e0..e6b622e1ba92e 100644 --- a/drivers/dma/ioat/dma.h +++ b/drivers/dma/ioat/dma.h @@ -83,7 +83,7 @@ struct ioatdma_device { #define IOAT_MAX_ORDER 16 #define IOAT_MAX_DESCS (1 << IOAT_MAX_ORDER) -#define IOAT_CHUNK_SIZE (SZ_2M) +#define IOAT_CHUNK_SIZE (SZ_512K) #define IOAT_DESCS_PER_CHUNK (IOAT_CHUNK_SIZE / IOAT_DESC_SZ) struct ioat_descs { -- GitLab From 2bc4bea33848e4c5f441fcec4a94497082c1cc9f Mon Sep 17 00:00:00 2001 From: Daeho Jeong Date: Mon, 30 Mar 2020 03:30:59 +0000 Subject: [PATCH 0142/1971] f2fs: add tracepoint for f2fs iostat Added a tracepoint to see iostat of f2fs. Default period of that is 3 second. This tracepoint can be used to be monitoring I/O statistics periodically. Signed-off-by: Daeho Jeong Reviewed-by: Chao Yu Signed-off-by: Jaegeuk Kim --- Documentation/ABI/testing/sysfs-fs-f2fs | 6 +++ fs/f2fs/f2fs.h | 16 +++++++- fs/f2fs/super.c | 1 + fs/f2fs/sysfs.c | 39 +++++++++++++++++++ include/trace/events/f2fs.h | 52 +++++++++++++++++++++++++ 5 files changed, 113 insertions(+), 1 deletion(-) diff --git a/Documentation/ABI/testing/sysfs-fs-f2fs b/Documentation/ABI/testing/sysfs-fs-f2fs index c8620ea7022a7..427f5b45c67f1 100644 --- a/Documentation/ABI/testing/sysfs-fs-f2fs +++ b/Documentation/ABI/testing/sysfs-fs-f2fs @@ -332,3 +332,9 @@ Description: Give a way to attach REQ_META|FUA to data writes * REQ_META | REQ_FUA | * 5 | 4 | 3 | 2 | 1 | 0 | * Cold | Warm | Hot | Cold | Warm | Hot | + +What: /sys/fs/f2fs//iostat_period_ms +Date: April 2020 +Contact: "Daeho Jeong" +Description: Give a way to change iostat_period time. 3secs by default. + The new iostat trace gives stats gap given the period. diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h index c2788738aa0d4..6cedbfb2067c5 100644 --- a/fs/f2fs/f2fs.h +++ b/fs/f2fs/f2fs.h @@ -1505,7 +1505,10 @@ struct f2fs_sb_info { /* For app/fs IO statistics */ spinlock_t iostat_lock; unsigned long long write_iostat[NR_IO_TYPE]; + unsigned long long prev_write_iostat[NR_IO_TYPE]; bool iostat_enable; + unsigned long iostat_next_period; + unsigned int iostat_period_ms; /* to attach REQ_META|REQ_FUA flags */ unsigned int data_io_flag; @@ -2999,16 +3002,25 @@ static inline int get_inline_xattr_addrs(struct inode *inode) sizeof((f2fs_inode)->field)) \ <= (F2FS_OLD_ATTRIBUTE_SIZE + (extra_isize))) \ +#define DEFAULT_IOSTAT_PERIOD_MS 3000 +#define MIN_IOSTAT_PERIOD_MS 100 +/* maximum period of iostat tracing is 1 day */ +#define MAX_IOSTAT_PERIOD_MS 8640000 + static inline void f2fs_reset_iostat(struct f2fs_sb_info *sbi) { int i; spin_lock(&sbi->iostat_lock); - for (i = 0; i < NR_IO_TYPE; i++) + for (i = 0; i < NR_IO_TYPE; i++) { sbi->write_iostat[i] = 0; + sbi->prev_write_iostat[i] = 0; + } spin_unlock(&sbi->iostat_lock); } +extern void f2fs_record_iostat(struct f2fs_sb_info *sbi); + static inline void f2fs_update_iostat(struct f2fs_sb_info *sbi, enum iostat_type type, unsigned long long io_bytes) { @@ -3022,6 +3034,8 @@ static inline void f2fs_update_iostat(struct f2fs_sb_info *sbi, sbi->write_iostat[APP_WRITE_IO] - sbi->write_iostat[APP_DIRECT_IO]; spin_unlock(&sbi->iostat_lock); + + f2fs_record_iostat(sbi); } #define __is_large_section(sbi) ((sbi)->segs_per_sec > 1) diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c index f2dfc21c6abb0..438296e17183d 100644 --- a/fs/f2fs/super.c +++ b/fs/f2fs/super.c @@ -3424,6 +3424,7 @@ try_onemore: /* init iostat info */ spin_lock_init(&sbi->iostat_lock); sbi->iostat_enable = false; + sbi->iostat_period_ms = DEFAULT_IOSTAT_PERIOD_MS; for (i = 0; i < NR_PAGE_TYPE; i++) { int n = (i == META) ? 1: NR_TEMP_TYPE; diff --git a/fs/f2fs/sysfs.c b/fs/f2fs/sysfs.c index aeebfb5024a22..d05cb68c26374 100644 --- a/fs/f2fs/sysfs.c +++ b/fs/f2fs/sysfs.c @@ -15,6 +15,7 @@ #include "f2fs.h" #include "segment.h" #include "gc.h" +#include static struct proc_dir_entry *f2fs_proc_root; @@ -379,6 +380,15 @@ out: return count; } + if (!strcmp(a->attr.name, "iostat_period_ms")) { + if (t < MIN_IOSTAT_PERIOD_MS || t > MAX_IOSTAT_PERIOD_MS) + return -EINVAL; + spin_lock(&sbi->iostat_lock); + sbi->iostat_period_ms = (unsigned int)t; + spin_unlock(&sbi->iostat_lock); + return count; + } + *ui = (unsigned int)t; return count; @@ -535,6 +545,7 @@ F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, gc_idle_interval, interval_time[GC_TIME]); F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, umount_discard_timeout, interval_time[UMOUNT_DISCARD_TIMEOUT]); F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, iostat_enable, iostat_enable); +F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, iostat_period_ms, iostat_period_ms); F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, readdir_ra, readdir_ra); F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, gc_pin_file_thresh, gc_pin_file_threshold); F2FS_RW_ATTR(F2FS_SBI, f2fs_super_block, extension_list, extension_list); @@ -615,6 +626,7 @@ static struct attribute *f2fs_attrs[] = { ATTR_LIST(gc_idle_interval), ATTR_LIST(umount_discard_timeout), ATTR_LIST(iostat_enable), + ATTR_LIST(iostat_period_ms), ATTR_LIST(readdir_ra), ATTR_LIST(gc_pin_file_thresh), ATTR_LIST(extension_list), @@ -751,6 +763,33 @@ static int __maybe_unused segment_bits_seq_show(struct seq_file *seq, return 0; } +void f2fs_record_iostat(struct f2fs_sb_info *sbi) +{ + unsigned long long iostat_diff[NR_IO_TYPE]; + int i; + + if (time_is_after_jiffies(sbi->iostat_next_period)) + return; + + /* Need double check under the lock */ + spin_lock(&sbi->iostat_lock); + if (time_is_after_jiffies(sbi->iostat_next_period)) { + spin_unlock(&sbi->iostat_lock); + return; + } + sbi->iostat_next_period = jiffies + + msecs_to_jiffies(sbi->iostat_period_ms); + + for (i = 0; i < NR_IO_TYPE; i++) { + iostat_diff[i] = sbi->write_iostat[i] - + sbi->prev_write_iostat[i]; + sbi->prev_write_iostat[i] = sbi->write_iostat[i]; + } + spin_unlock(&sbi->iostat_lock); + + trace_f2fs_iostat(sbi, iostat_diff); +} + static int __maybe_unused iostat_info_seq_show(struct seq_file *seq, void *offset) { diff --git a/include/trace/events/f2fs.h b/include/trace/events/f2fs.h index d97adfc327f03..e78c8696e2adc 100644 --- a/include/trace/events/f2fs.h +++ b/include/trace/events/f2fs.h @@ -1812,6 +1812,58 @@ DEFINE_EVENT(f2fs_zip_end, f2fs_decompress_pages_end, TP_ARGS(inode, cluster_idx, compressed_size, ret) ); +TRACE_EVENT(f2fs_iostat, + + TP_PROTO(struct f2fs_sb_info *sbi, unsigned long long *iostat), + + TP_ARGS(sbi, iostat), + + TP_STRUCT__entry( + __field(dev_t, dev) + __field(unsigned long long, app_dio) + __field(unsigned long long, app_bio) + __field(unsigned long long, app_wio) + __field(unsigned long long, app_mio) + __field(unsigned long long, fs_dio) + __field(unsigned long long, fs_nio) + __field(unsigned long long, fs_mio) + __field(unsigned long long, fs_gc_dio) + __field(unsigned long long, fs_gc_nio) + __field(unsigned long long, fs_cp_dio) + __field(unsigned long long, fs_cp_nio) + __field(unsigned long long, fs_cp_mio) + __field(unsigned long long, fs_discard) + ), + + TP_fast_assign( + __entry->dev = sbi->sb->s_dev; + __entry->app_dio = iostat[APP_DIRECT_IO]; + __entry->app_bio = iostat[APP_BUFFERED_IO]; + __entry->app_wio = iostat[APP_WRITE_IO]; + __entry->app_mio = iostat[APP_MAPPED_IO]; + __entry->fs_dio = iostat[FS_DATA_IO]; + __entry->fs_nio = iostat[FS_NODE_IO]; + __entry->fs_mio = iostat[FS_META_IO]; + __entry->fs_gc_dio = iostat[FS_GC_DATA_IO]; + __entry->fs_gc_nio = iostat[FS_GC_NODE_IO]; + __entry->fs_cp_dio = iostat[FS_CP_DATA_IO]; + __entry->fs_cp_nio = iostat[FS_CP_NODE_IO]; + __entry->fs_cp_mio = iostat[FS_CP_META_IO]; + __entry->fs_discard = iostat[FS_DISCARD]; + ), + + TP_printk("dev = (%d,%d), " + "app [write=%llu (direct=%llu, buffered=%llu), mapped=%llu], " + "fs [data=%llu, node=%llu, meta=%llu, discard=%llu], " + "gc [data=%llu, node=%llu], " + "cp [data=%llu, node=%llu, meta=%llu]", + show_dev(__entry->dev), __entry->app_wio, __entry->app_dio, + __entry->app_bio, __entry->app_mio, __entry->fs_dio, + __entry->fs_nio, __entry->fs_mio, __entry->fs_discard, + __entry->fs_gc_dio, __entry->fs_gc_nio, __entry->fs_cp_dio, + __entry->fs_cp_nio, __entry->fs_cp_mio) +); + #endif /* _TRACE_F2FS_H */ /* This part must be outside protection */ -- GitLab From 141af6ba5216d4d49de683582b600d5c9b51792c Mon Sep 17 00:00:00 2001 From: Sahitya Tummala Date: Wed, 15 Apr 2020 14:37:53 +0530 Subject: [PATCH 0143/1971] f2fs: fix long latency due to discard during umount F2FS already has a default timeout of 5 secs for discards that can be issued during umount, but it can take more than the 5 sec timeout if the underlying UFS device queue is already full and there are no more available free tags to be used. Fix this by submitting a small batch of discard requests so that it won't cause the device queue to be full at any time and thus doesn't incur its wait time in the umount context. Signed-off-by: Sahitya Tummala Reviewed-by: Chao Yu Signed-off-by: Jaegeuk Kim --- fs/f2fs/segment.c | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c index b7a9421472a79..90c75822fef62 100644 --- a/fs/f2fs/segment.c +++ b/fs/f2fs/segment.c @@ -1101,7 +1101,6 @@ static void __init_discard_policy(struct f2fs_sb_info *sbi, } else if (discard_type == DPOLICY_FSTRIM) { dpolicy->io_aware = false; } else if (discard_type == DPOLICY_UMOUNT) { - dpolicy->max_requests = UINT_MAX; dpolicy->io_aware = false; /* we need to issue all to keep CP_TRIMMED_FLAG */ dpolicy->granularity = 1; @@ -1463,6 +1462,8 @@ next: return issued; } +static unsigned int __wait_all_discard_cmd(struct f2fs_sb_info *sbi, + struct discard_policy *dpolicy); static int __issue_discard_cmd(struct f2fs_sb_info *sbi, struct discard_policy *dpolicy) @@ -1471,12 +1472,14 @@ static int __issue_discard_cmd(struct f2fs_sb_info *sbi, struct list_head *pend_list; struct discard_cmd *dc, *tmp; struct blk_plug plug; - int i, issued = 0; + int i, issued; bool io_interrupted = false; if (dpolicy->timeout) f2fs_update_time(sbi, UMOUNT_DISCARD_TIMEOUT); +retry: + issued = 0; for (i = MAX_PLIST_NUM - 1; i >= 0; i--) { if (dpolicy->timeout && f2fs_time_over(sbi, UMOUNT_DISCARD_TIMEOUT)) @@ -1523,6 +1526,11 @@ next: break; } + if (dpolicy->type == DPOLICY_UMOUNT && issued) { + __wait_all_discard_cmd(sbi, dpolicy); + goto retry; + } + if (!issued && io_interrupted) issued = -1; -- GitLab From 3fa6a8c5b55d063c6a759e0b354f9d7fc09ffbc0 Mon Sep 17 00:00:00 2001 From: Sahitya Tummala Date: Wed, 15 Apr 2020 09:35:54 +0530 Subject: [PATCH 0144/1971] f2fs: report the discard cmd errors properly In case a discard_cmd is split into several bios, the dc->error must not be overwritten once an error is reported by a bio. Also, move it under dc->lock. Signed-off-by: Sahitya Tummala Reviewed-by: Chao Yu Signed-off-by: Jaegeuk Kim --- fs/f2fs/segment.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c index 90c75822fef62..728ff6e316acd 100644 --- a/fs/f2fs/segment.c +++ b/fs/f2fs/segment.c @@ -1029,9 +1029,9 @@ static void f2fs_submit_discard_endio(struct bio *bio) struct discard_cmd *dc = (struct discard_cmd *)bio->bi_private; unsigned long flags; - dc->error = blk_status_to_errno(bio->bi_status); - spin_lock_irqsave(&dc->lock, flags); + if (!dc->error) + dc->error = blk_status_to_errno(bio->bi_status); dc->bio_ref--; if (!dc->bio_ref && dc->state == D_SUBMIT) { dc->state = D_DONE; -- GitLab From ce4c638cdd52b302247434daed4c127f258d9860 Mon Sep 17 00:00:00 2001 From: Chao Yu Date: Fri, 10 Apr 2020 18:07:20 +0800 Subject: [PATCH 0145/1971] f2fs: fix to handle error path of f2fs_ra_meta_pages() In f2fs_ra_meta_pages(), if f2fs_submit_page_bio() failed, we need to unlock page, fix it. Signed-off-by: Chao Yu Signed-off-by: Jaegeuk Kim --- fs/f2fs/checkpoint.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/fs/f2fs/checkpoint.c b/fs/f2fs/checkpoint.c index 852890b72d6ac..6be357c8e0020 100644 --- a/fs/f2fs/checkpoint.c +++ b/fs/f2fs/checkpoint.c @@ -220,6 +220,7 @@ int f2fs_ra_meta_pages(struct f2fs_sb_info *sbi, block_t start, int nrpages, .is_por = (type == META_POR), }; struct blk_plug plug; + int err; if (unlikely(type == META_POR)) fio.op_flags &= ~REQ_META; @@ -263,8 +264,8 @@ int f2fs_ra_meta_pages(struct f2fs_sb_info *sbi, block_t start, int nrpages, } fio.page = page; - f2fs_submit_page_bio(&fio); - f2fs_put_page(page, 0); + err = f2fs_submit_page_bio(&fio); + f2fs_put_page(page, err ? 1 : 0); } out: blk_finish_plug(&plug); -- GitLab From df423399757531c20d495bf6b8b83a8dcca3565c Mon Sep 17 00:00:00 2001 From: Sahitya Tummala Date: Thu, 16 Apr 2020 11:47:41 +0530 Subject: [PATCH 0146/1971] f2fs: Fix the accounting of dcc->undiscard_blks When a discard_cmd needs to be split due to dpolicy->max_requests, then for the remaining length it will be either merged into another cmd or a new discard_cmd will be created. In this case, there is double accounting of dcc->undiscard_blks for the remaining len, due to which it shows incorrect value in stats. Signed-off-by: Sahitya Tummala Reviewed-by: Chao Yu Signed-off-by: Jaegeuk Kim --- fs/f2fs/segment.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c index 728ff6e316acd..1c48ec866b8cd 100644 --- a/fs/f2fs/segment.c +++ b/fs/f2fs/segment.c @@ -1214,8 +1214,10 @@ submit: len = total_len; } - if (!err && len) + if (!err && len) { + dcc->undiscard_blks -= len; __update_discard_tree_range(sbi, bdev, lstart, start, len); + } return err; } -- GitLab From 8b83ac81f4283ae3bd05c9a7e15dca721014dd03 Mon Sep 17 00:00:00 2001 From: Chao Yu Date: Thu, 16 Apr 2020 18:16:56 +0800 Subject: [PATCH 0147/1971] f2fs: support read iostat Adds to support accounting read IOs from userspace/kernel. Signed-off-by: Chao Yu Signed-off-by: Jaegeuk Kim --- fs/f2fs/checkpoint.c | 5 ++++ fs/f2fs/data.c | 6 +++++ fs/f2fs/f2fs.h | 37 +++++++++++++++++++------- fs/f2fs/file.c | 12 ++++++++- fs/f2fs/gc.c | 6 +++++ fs/f2fs/node.c | 8 +++++- fs/f2fs/sysfs.c | 52 +++++++++++++++++++++++++------------ include/trace/events/f2fs.h | 23 ++++++++++++++-- 8 files changed, 118 insertions(+), 31 deletions(-) diff --git a/fs/f2fs/checkpoint.c b/fs/f2fs/checkpoint.c index 6be357c8e0020..5ba649e17c72b 100644 --- a/fs/f2fs/checkpoint.c +++ b/fs/f2fs/checkpoint.c @@ -86,6 +86,8 @@ repeat: return ERR_PTR(err); } + f2fs_update_iostat(sbi, FS_META_READ_IO, F2FS_BLKSIZE); + lock_page(page); if (unlikely(page->mapping != mapping)) { f2fs_put_page(page, 1); @@ -266,6 +268,9 @@ int f2fs_ra_meta_pages(struct f2fs_sb_info *sbi, block_t start, int nrpages, fio.page = page; err = f2fs_submit_page_bio(&fio); f2fs_put_page(page, err ? 1 : 0); + + if (!err) + f2fs_update_iostat(sbi, FS_META_READ_IO, F2FS_BLKSIZE); } out: blk_finish_plug(&plug); diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c index accd28728642a..199877cb57fe2 100644 --- a/fs/f2fs/data.c +++ b/fs/f2fs/data.c @@ -1033,6 +1033,7 @@ static int f2fs_submit_page_read(struct inode *inode, struct page *page, } ClearPageError(page); inc_page_count(sbi, F2FS_RD_DATA); + f2fs_update_iostat(sbi, FS_DATA_READ_IO, F2FS_BLKSIZE); __submit_bio(sbi, bio, DATA); return 0; } @@ -2038,6 +2039,7 @@ submit_and_realloc: goto submit_and_realloc; inc_page_count(F2FS_I_SB(inode), F2FS_RD_DATA); + f2fs_update_iostat(F2FS_I_SB(inode), FS_DATA_READ_IO, F2FS_BLKSIZE); ClearPageError(page); *last_block_in_bio = block_nr; goto out; @@ -2173,6 +2175,7 @@ submit_and_realloc: goto submit_and_realloc; inc_page_count(sbi, F2FS_RD_DATA); + f2fs_update_iostat(sbi, FS_DATA_READ_IO, F2FS_BLKSIZE); ClearPageError(page); *last_block_in_bio = blkaddr; } @@ -3526,6 +3529,9 @@ static ssize_t f2fs_direct_IO(struct kiocb *iocb, struct iov_iter *iter) } else if (err < 0) { f2fs_write_failed(mapping, offset + count); } + } else { + if (err > 0) + f2fs_update_iostat(sbi, APP_DIRECT_READ_IO, err); } out: diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h index 6cedbfb2067c5..3b9603266a2aa 100644 --- a/fs/f2fs/f2fs.h +++ b/fs/f2fs/f2fs.h @@ -1088,8 +1088,9 @@ enum cp_reason_type { }; enum iostat_type { - APP_DIRECT_IO, /* app direct IOs */ - APP_BUFFERED_IO, /* app buffered IOs */ + /* WRITE IO */ + APP_DIRECT_IO, /* app direct write IOs */ + APP_BUFFERED_IO, /* app buffered write IOs */ APP_WRITE_IO, /* app write IOs */ APP_MAPPED_IO, /* app mapped IOs */ FS_DATA_IO, /* data IOs from kworker/fsync/reclaimer */ @@ -1100,6 +1101,17 @@ enum iostat_type { FS_CP_DATA_IO, /* data IOs from checkpoint */ FS_CP_NODE_IO, /* node IOs from checkpoint */ FS_CP_META_IO, /* meta IOs from checkpoint */ + + /* READ IO */ + APP_DIRECT_READ_IO, /* app direct read IOs */ + APP_BUFFERED_READ_IO, /* app buffered read IOs */ + APP_READ_IO, /* app read IOs */ + APP_MAPPED_READ_IO, /* app mapped read IOs */ + FS_DATA_READ_IO, /* data read IOs */ + FS_NODE_READ_IO, /* node read IOs */ + FS_META_READ_IO, /* meta read IOs */ + + /* other */ FS_DISCARD, /* discard */ NR_IO_TYPE, }; @@ -1504,8 +1516,8 @@ struct f2fs_sb_info { /* For app/fs IO statistics */ spinlock_t iostat_lock; - unsigned long long write_iostat[NR_IO_TYPE]; - unsigned long long prev_write_iostat[NR_IO_TYPE]; + unsigned long long rw_iostat[NR_IO_TYPE]; + unsigned long long prev_rw_iostat[NR_IO_TYPE]; bool iostat_enable; unsigned long iostat_next_period; unsigned int iostat_period_ms; @@ -3013,8 +3025,8 @@ static inline void f2fs_reset_iostat(struct f2fs_sb_info *sbi) spin_lock(&sbi->iostat_lock); for (i = 0; i < NR_IO_TYPE; i++) { - sbi->write_iostat[i] = 0; - sbi->prev_write_iostat[i] = 0; + sbi->rw_iostat[i] = 0; + sbi->prev_rw_iostat[i] = 0; } spin_unlock(&sbi->iostat_lock); } @@ -3027,12 +3039,17 @@ static inline void f2fs_update_iostat(struct f2fs_sb_info *sbi, if (!sbi->iostat_enable) return; spin_lock(&sbi->iostat_lock); - sbi->write_iostat[type] += io_bytes; + sbi->rw_iostat[type] += io_bytes; if (type == APP_WRITE_IO || type == APP_DIRECT_IO) - sbi->write_iostat[APP_BUFFERED_IO] = - sbi->write_iostat[APP_WRITE_IO] - - sbi->write_iostat[APP_DIRECT_IO]; + sbi->rw_iostat[APP_BUFFERED_IO] = + sbi->rw_iostat[APP_WRITE_IO] - + sbi->rw_iostat[APP_DIRECT_IO]; + + if (type == APP_READ_IO || type == APP_DIRECT_READ_IO) + sbi->rw_iostat[APP_BUFFERED_READ_IO] = + sbi->rw_iostat[APP_READ_IO] - + sbi->rw_iostat[APP_DIRECT_READ_IO]; spin_unlock(&sbi->iostat_lock); f2fs_record_iostat(sbi); diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c index 6ab8f621a3c5a..a0a4413d6083b 100644 --- a/fs/f2fs/file.c +++ b/fs/f2fs/file.c @@ -40,6 +40,10 @@ static vm_fault_t f2fs_filemap_fault(struct vm_fault *vmf) ret = filemap_fault(vmf); up_read(&F2FS_I(inode)->i_mmap_sem); + if (!ret) + f2fs_update_iostat(F2FS_I_SB(inode), APP_MAPPED_READ_IO, + F2FS_BLKSIZE); + trace_f2fs_filemap_fault(inode, vmf->pgoff, (unsigned long)ret); return ret; @@ -3510,11 +3514,17 @@ static ssize_t f2fs_file_read_iter(struct kiocb *iocb, struct iov_iter *iter) { struct file *file = iocb->ki_filp; struct inode *inode = file_inode(file); + int ret; if (!f2fs_is_compress_backend_ready(inode)) return -EOPNOTSUPP; - return generic_file_read_iter(iocb, iter); + ret = generic_file_read_iter(iocb, iter); + + if (ret > 0) + f2fs_update_iostat(F2FS_I_SB(inode), APP_READ_IO, ret); + + return ret; } static ssize_t f2fs_file_write_iter(struct kiocb *iocb, struct iov_iter *from) diff --git a/fs/f2fs/gc.c b/fs/f2fs/gc.c index 26248c8936db0..28a8c79c8bdc3 100644 --- a/fs/f2fs/gc.c +++ b/fs/f2fs/gc.c @@ -737,6 +737,9 @@ got_it: goto put_encrypted_page; f2fs_put_page(fio.encrypted_page, 0); f2fs_put_page(page, 1); + + f2fs_update_iostat(sbi, FS_DATA_READ_IO, F2FS_BLKSIZE); + return 0; put_encrypted_page: f2fs_put_page(fio.encrypted_page, 1); @@ -840,6 +843,9 @@ static int move_data_block(struct inode *inode, block_t bidx, f2fs_put_page(mpage, 1); goto up_out; } + + f2fs_update_iostat(fio.sbi, FS_DATA_READ_IO, F2FS_BLKSIZE); + lock_page(mpage); if (unlikely(mpage->mapping != META_MAPPING(fio.sbi) || !PageUptodate(mpage))) { diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c index ecbd6bd14a494..4da0d8713df5c 100644 --- a/fs/f2fs/node.c +++ b/fs/f2fs/node.c @@ -1300,7 +1300,13 @@ static int read_node_page(struct page *page, int op_flags) } fio.new_blkaddr = fio.old_blkaddr = ni.blk_addr; - return f2fs_submit_page_bio(&fio); + + err = f2fs_submit_page_bio(&fio); + + if (!err) + f2fs_update_iostat(sbi, FS_NODE_READ_IO, F2FS_BLKSIZE); + + return err; } /* diff --git a/fs/f2fs/sysfs.c b/fs/f2fs/sysfs.c index d05cb68c26374..eaf8088548f08 100644 --- a/fs/f2fs/sysfs.c +++ b/fs/f2fs/sysfs.c @@ -781,9 +781,9 @@ void f2fs_record_iostat(struct f2fs_sb_info *sbi) msecs_to_jiffies(sbi->iostat_period_ms); for (i = 0; i < NR_IO_TYPE; i++) { - iostat_diff[i] = sbi->write_iostat[i] - - sbi->prev_write_iostat[i]; - sbi->prev_write_iostat[i] = sbi->write_iostat[i]; + iostat_diff[i] = sbi->rw_iostat[i] - + sbi->prev_rw_iostat[i]; + sbi->prev_rw_iostat[i] = sbi->rw_iostat[i]; } spin_unlock(&sbi->iostat_lock); @@ -802,33 +802,51 @@ static int __maybe_unused iostat_info_seq_show(struct seq_file *seq, seq_printf(seq, "time: %-16llu\n", now); - /* print app IOs */ + /* print app write IOs */ seq_printf(seq, "app buffered: %-16llu\n", - sbi->write_iostat[APP_BUFFERED_IO]); + sbi->rw_iostat[APP_BUFFERED_IO]); seq_printf(seq, "app direct: %-16llu\n", - sbi->write_iostat[APP_DIRECT_IO]); + sbi->rw_iostat[APP_DIRECT_IO]); seq_printf(seq, "app mapped: %-16llu\n", - sbi->write_iostat[APP_MAPPED_IO]); + sbi->rw_iostat[APP_MAPPED_IO]); - /* print fs IOs */ + /* print fs write IOs */ seq_printf(seq, "fs data: %-16llu\n", - sbi->write_iostat[FS_DATA_IO]); + sbi->rw_iostat[FS_DATA_IO]); seq_printf(seq, "fs node: %-16llu\n", - sbi->write_iostat[FS_NODE_IO]); + sbi->rw_iostat[FS_NODE_IO]); seq_printf(seq, "fs meta: %-16llu\n", - sbi->write_iostat[FS_META_IO]); + sbi->rw_iostat[FS_META_IO]); seq_printf(seq, "fs gc data: %-16llu\n", - sbi->write_iostat[FS_GC_DATA_IO]); + sbi->rw_iostat[FS_GC_DATA_IO]); seq_printf(seq, "fs gc node: %-16llu\n", - sbi->write_iostat[FS_GC_NODE_IO]); + sbi->rw_iostat[FS_GC_NODE_IO]); seq_printf(seq, "fs cp data: %-16llu\n", - sbi->write_iostat[FS_CP_DATA_IO]); + sbi->rw_iostat[FS_CP_DATA_IO]); seq_printf(seq, "fs cp node: %-16llu\n", - sbi->write_iostat[FS_CP_NODE_IO]); + sbi->rw_iostat[FS_CP_NODE_IO]); seq_printf(seq, "fs cp meta: %-16llu\n", - sbi->write_iostat[FS_CP_META_IO]); + sbi->rw_iostat[FS_CP_META_IO]); + + /* print app read IOs */ + seq_printf(seq, "app buffered: %-16llu\n", + sbi->rw_iostat[APP_BUFFERED_READ_IO]); + seq_printf(seq, "app direct: %-16llu\n", + sbi->rw_iostat[APP_DIRECT_READ_IO]); + seq_printf(seq, "app mapped: %-16llu\n", + sbi->rw_iostat[APP_MAPPED_READ_IO]); + + /* print fs read IOs */ + seq_printf(seq, "fs data: %-16llu\n", + sbi->rw_iostat[FS_DATA_READ_IO]); + seq_printf(seq, "fs node: %-16llu\n", + sbi->rw_iostat[FS_NODE_READ_IO]); + seq_printf(seq, "fs meta: %-16llu\n", + sbi->rw_iostat[FS_META_READ_IO]); + + /* print other IOs */ seq_printf(seq, "fs discard: %-16llu\n", - sbi->write_iostat[FS_DISCARD]); + sbi->rw_iostat[FS_DISCARD]); return 0; } diff --git a/include/trace/events/f2fs.h b/include/trace/events/f2fs.h index e78c8696e2adc..417a486f5c8a9 100644 --- a/include/trace/events/f2fs.h +++ b/include/trace/events/f2fs.h @@ -1832,6 +1832,13 @@ TRACE_EVENT(f2fs_iostat, __field(unsigned long long, fs_cp_dio) __field(unsigned long long, fs_cp_nio) __field(unsigned long long, fs_cp_mio) + __field(unsigned long long, app_drio) + __field(unsigned long long, app_brio) + __field(unsigned long long, app_rio) + __field(unsigned long long, app_mrio) + __field(unsigned long long, fs_drio) + __field(unsigned long long, fs_nrio) + __field(unsigned long long, fs_mrio) __field(unsigned long long, fs_discard) ), @@ -1849,6 +1856,13 @@ TRACE_EVENT(f2fs_iostat, __entry->fs_cp_dio = iostat[FS_CP_DATA_IO]; __entry->fs_cp_nio = iostat[FS_CP_NODE_IO]; __entry->fs_cp_mio = iostat[FS_CP_META_IO]; + __entry->app_drio = iostat[APP_DIRECT_READ_IO]; + __entry->app_brio = iostat[APP_BUFFERED_READ_IO]; + __entry->app_rio = iostat[APP_READ_IO]; + __entry->app_mrio = iostat[APP_MAPPED_READ_IO]; + __entry->fs_drio = iostat[FS_DATA_READ_IO]; + __entry->fs_nrio = iostat[FS_NODE_READ_IO]; + __entry->fs_mrio = iostat[FS_META_READ_IO]; __entry->fs_discard = iostat[FS_DISCARD]; ), @@ -1856,12 +1870,17 @@ TRACE_EVENT(f2fs_iostat, "app [write=%llu (direct=%llu, buffered=%llu), mapped=%llu], " "fs [data=%llu, node=%llu, meta=%llu, discard=%llu], " "gc [data=%llu, node=%llu], " - "cp [data=%llu, node=%llu, meta=%llu]", + "cp [data=%llu, node=%llu, meta=%llu], " + "app [read=%llu (direct=%llu, buffered=%llu), mapped=%llu], " + "fs [data=%llu, node=%llu, meta=%llu]", show_dev(__entry->dev), __entry->app_wio, __entry->app_dio, __entry->app_bio, __entry->app_mio, __entry->fs_dio, __entry->fs_nio, __entry->fs_mio, __entry->fs_discard, __entry->fs_gc_dio, __entry->fs_gc_nio, __entry->fs_cp_dio, - __entry->fs_cp_nio, __entry->fs_cp_mio) + __entry->fs_cp_nio, __entry->fs_cp_mio, + __entry->app_rio, __entry->app_drio, __entry->app_brio, + __entry->app_mrio, __entry->fs_drio, __entry->fs_nrio, + __entry->fs_mrio) ); #endif /* _TRACE_F2FS_H */ -- GitLab From b7ed0496d94143fd5029297f31dbaab90313c570 Mon Sep 17 00:00:00 2001 From: Todor Tomov Date: Tue, 7 Apr 2020 10:33:01 +0200 Subject: [PATCH 0148/1971] dt-bindings: i2c: Add binding for Qualcomm CCI I2C controller Add DT binding document for Qualcomm Camera Control Interface (CCI) I2C controller. Signed-off-by: Todor Tomov Signed-off-by: Vinod Koul Reviewed-by: Rob Herring Reviewed-by: Robert Foss Signed-off-by: Wolfram Sang --- .../devicetree/bindings/i2c/i2c-qcom-cci.txt | 92 +++++++++++++++++++ 1 file changed, 92 insertions(+) create mode 100644 Documentation/devicetree/bindings/i2c/i2c-qcom-cci.txt diff --git a/Documentation/devicetree/bindings/i2c/i2c-qcom-cci.txt b/Documentation/devicetree/bindings/i2c/i2c-qcom-cci.txt new file mode 100644 index 0000000000000..c6668b7c66e62 --- /dev/null +++ b/Documentation/devicetree/bindings/i2c/i2c-qcom-cci.txt @@ -0,0 +1,92 @@ +Qualcomm Camera Control Interface (CCI) I2C controller + +PROPERTIES: + +- compatible: + Usage: required + Value type: + Definition: must be one of: + "qcom,msm8916-cci" + "qcom,msm8996-cci" + "qcom,sdm845-cci" + +- reg + Usage: required + Value type: + Definition: base address CCI I2C controller and length of memory + mapped region. + +- interrupts: + Usage: required + Value type: + Definition: specifies the CCI I2C interrupt. The format of the + specifier is defined by the binding document describing + the node's interrupt parent. + +- clocks: + Usage: required + Value type: + Definition: a list of phandle, should contain an entry for each + entries in clock-names. + +- clock-names + Usage: required + Value type: + Definition: a list of clock names, must include "cci" clock. + +- power-domains + Usage: required for "qcom,msm8996-cci" + Value type: + Definition: + +SUBNODES: + +The CCI provides I2C masters for one (msm8916) or two i2c busses (msm8996 and +sdm845), described as subdevices named "i2c-bus@0" and "i2c-bus@1". + +PROPERTIES: + +- reg: + Usage: required + Value type: + Definition: Index of the CCI bus/master + +- clock-frequency: + Usage: optional + Value type: + Definition: Desired I2C bus clock frequency in Hz, defaults to 100 + kHz if omitted. + +Example: + + cci@a0c000 { + compatible = "qcom,msm8996-cci"; + #address-cells = <1>; + #size-cells = <0>; + reg = <0xa0c000 0x1000>; + interrupts = ; + clocks = <&mmcc MMSS_MMAGIC_AHB_CLK>, + <&mmcc CAMSS_TOP_AHB_CLK>, + <&mmcc CAMSS_CCI_AHB_CLK>, + <&mmcc CAMSS_CCI_CLK>, + <&mmcc CAMSS_AHB_CLK>; + clock-names = "mmss_mmagic_ahb", + "camss_top_ahb", + "cci_ahb", + "cci", + "camss_ahb"; + + i2c-bus@0 { + reg = <0>; + clock-frequency = <400000>; + #address-cells = <1>; + #size-cells = <0>; + }; + + i2c-bus@1 { + reg = <1>; + clock-frequency = <400000>; + #address-cells = <1>; + #size-cells = <0>; + }; + }; -- GitLab From e517526195de400158e05a08764d1fb61d579105 Mon Sep 17 00:00:00 2001 From: Loic Poulain Date: Tue, 7 Apr 2020 10:33:00 +0200 Subject: [PATCH 0149/1971] i2c: Add Qualcomm CCI I2C driver This commit adds I2C bus support for the Camera Control Interface (CCI) I2C controller found on the Qualcomm SoC processors. This I2C controller supports two masters and they are registered to the core. CCI versions supported in the driver are msm8916, msm8996 and sdm845. This is a rework of the patch posted by Vinod: https://patchwork.kernel.org/patch/10569961/ With additional fixes + most of the comments addressed. Signed-off-by: Todor Tomov Signed-off-by: Vinod Koul Signed-off-by: Loic Poulain Tested-by: Robert Foss Reviewed-by: Bjorn Andersson [wsa: removed err msg after platform_get_irq] Signed-off-by: Wolfram Sang --- drivers/i2c/busses/Kconfig | 10 + drivers/i2c/busses/Makefile | 1 + drivers/i2c/busses/i2c-qcom-cci.c | 791 ++++++++++++++++++++++++++++++ 3 files changed, 802 insertions(+) create mode 100644 drivers/i2c/busses/i2c-qcom-cci.c diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig index 2ddca08f8a76f..70a561bff74a6 100644 --- a/drivers/i2c/busses/Kconfig +++ b/drivers/i2c/busses/Kconfig @@ -885,6 +885,16 @@ config I2C_PXA_SLAVE is necessary for systems where the PXA may be a target on the I2C bus. +config I2C_QCOM_CCI + tristate "Qualcomm Camera Control Interface" + depends on ARCH_QCOM || COMPILE_TEST + help + If you say yes to this option, support will be included for the + built-in camera control interface on the Qualcomm SoCs. + + This driver can also be built as a module. If so, the module + will be called i2c-qcom-cci. + config I2C_QCOM_GENI tristate "Qualcomm Technologies Inc.'s GENI based I2C controller" depends on ARCH_QCOM || COMPILE_TEST diff --git a/drivers/i2c/busses/Makefile b/drivers/i2c/busses/Makefile index 25d60889713cd..c852d27370377 100644 --- a/drivers/i2c/busses/Makefile +++ b/drivers/i2c/busses/Makefile @@ -91,6 +91,7 @@ obj-$(CONFIG_I2C_PNX) += i2c-pnx.o obj-$(CONFIG_I2C_PUV3) += i2c-puv3.o obj-$(CONFIG_I2C_PXA) += i2c-pxa.o obj-$(CONFIG_I2C_PXA_PCI) += i2c-pxa-pci.o +obj-$(CONFIG_I2C_QCOM_CCI) += i2c-qcom-cci.o obj-$(CONFIG_I2C_QCOM_GENI) += i2c-qcom-geni.o obj-$(CONFIG_I2C_QUP) += i2c-qup.o obj-$(CONFIG_I2C_RIIC) += i2c-riic.o diff --git a/drivers/i2c/busses/i2c-qcom-cci.c b/drivers/i2c/busses/i2c-qcom-cci.c new file mode 100644 index 0000000000000..f13735beca584 --- /dev/null +++ b/drivers/i2c/busses/i2c-qcom-cci.c @@ -0,0 +1,791 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (c) 2012-2016, The Linux Foundation. All rights reserved. +// Copyright (c) 2017-20 Linaro Limited. + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define CCI_HW_VERSION 0x0 +#define CCI_RESET_CMD 0x004 +#define CCI_RESET_CMD_MASK 0x0f73f3f7 +#define CCI_RESET_CMD_M0_MASK 0x000003f1 +#define CCI_RESET_CMD_M1_MASK 0x0003f001 +#define CCI_QUEUE_START 0x008 +#define CCI_HALT_REQ 0x034 +#define CCI_HALT_REQ_I2C_M0_Q0Q1 BIT(0) +#define CCI_HALT_REQ_I2C_M1_Q0Q1 BIT(1) + +#define CCI_I2C_Mm_SCL_CTL(m) (0x100 + 0x100 * (m)) +#define CCI_I2C_Mm_SDA_CTL_0(m) (0x104 + 0x100 * (m)) +#define CCI_I2C_Mm_SDA_CTL_1(m) (0x108 + 0x100 * (m)) +#define CCI_I2C_Mm_SDA_CTL_2(m) (0x10c + 0x100 * (m)) +#define CCI_I2C_Mm_MISC_CTL(m) (0x110 + 0x100 * (m)) + +#define CCI_I2C_Mm_READ_DATA(m) (0x118 + 0x100 * (m)) +#define CCI_I2C_Mm_READ_BUF_LEVEL(m) (0x11c + 0x100 * (m)) +#define CCI_I2C_Mm_Qn_EXEC_WORD_CNT(m, n) (0x300 + 0x200 * (m) + 0x100 * (n)) +#define CCI_I2C_Mm_Qn_CUR_WORD_CNT(m, n) (0x304 + 0x200 * (m) + 0x100 * (n)) +#define CCI_I2C_Mm_Qn_CUR_CMD(m, n) (0x308 + 0x200 * (m) + 0x100 * (n)) +#define CCI_I2C_Mm_Qn_REPORT_STATUS(m, n) (0x30c + 0x200 * (m) + 0x100 * (n)) +#define CCI_I2C_Mm_Qn_LOAD_DATA(m, n) (0x310 + 0x200 * (m) + 0x100 * (n)) + +#define CCI_IRQ_GLOBAL_CLEAR_CMD 0xc00 +#define CCI_IRQ_MASK_0 0xc04 +#define CCI_IRQ_MASK_0_I2C_M0_RD_DONE BIT(0) +#define CCI_IRQ_MASK_0_I2C_M0_Q0_REPORT BIT(4) +#define CCI_IRQ_MASK_0_I2C_M0_Q1_REPORT BIT(8) +#define CCI_IRQ_MASK_0_I2C_M1_RD_DONE BIT(12) +#define CCI_IRQ_MASK_0_I2C_M1_Q0_REPORT BIT(16) +#define CCI_IRQ_MASK_0_I2C_M1_Q1_REPORT BIT(20) +#define CCI_IRQ_MASK_0_RST_DONE_ACK BIT(24) +#define CCI_IRQ_MASK_0_I2C_M0_Q0Q1_HALT_ACK BIT(25) +#define CCI_IRQ_MASK_0_I2C_M1_Q0Q1_HALT_ACK BIT(26) +#define CCI_IRQ_MASK_0_I2C_M0_ERROR 0x18000ee6 +#define CCI_IRQ_MASK_0_I2C_M1_ERROR 0x60ee6000 +#define CCI_IRQ_CLEAR_0 0xc08 +#define CCI_IRQ_STATUS_0 0xc0c +#define CCI_IRQ_STATUS_0_I2C_M0_RD_DONE BIT(0) +#define CCI_IRQ_STATUS_0_I2C_M0_Q0_REPORT BIT(4) +#define CCI_IRQ_STATUS_0_I2C_M0_Q1_REPORT BIT(8) +#define CCI_IRQ_STATUS_0_I2C_M1_RD_DONE BIT(12) +#define CCI_IRQ_STATUS_0_I2C_M1_Q0_REPORT BIT(16) +#define CCI_IRQ_STATUS_0_I2C_M1_Q1_REPORT BIT(20) +#define CCI_IRQ_STATUS_0_RST_DONE_ACK BIT(24) +#define CCI_IRQ_STATUS_0_I2C_M0_Q0Q1_HALT_ACK BIT(25) +#define CCI_IRQ_STATUS_0_I2C_M1_Q0Q1_HALT_ACK BIT(26) +#define CCI_IRQ_STATUS_0_I2C_M0_Q0_NACK_ERR BIT(27) +#define CCI_IRQ_STATUS_0_I2C_M0_Q1_NACK_ERR BIT(28) +#define CCI_IRQ_STATUS_0_I2C_M1_Q0_NACK_ERR BIT(29) +#define CCI_IRQ_STATUS_0_I2C_M1_Q1_NACK_ERR BIT(30) +#define CCI_IRQ_STATUS_0_I2C_M0_ERROR 0x18000ee6 +#define CCI_IRQ_STATUS_0_I2C_M1_ERROR 0x60ee6000 + +#define CCI_TIMEOUT (msecs_to_jiffies(100)) +#define NUM_MASTERS 2 +#define NUM_QUEUES 2 + +/* Max number of resources + 1 for a NULL terminator */ +#define CCI_RES_MAX 6 + +#define CCI_I2C_SET_PARAM 1 +#define CCI_I2C_REPORT 8 +#define CCI_I2C_WRITE 9 +#define CCI_I2C_READ 10 + +#define CCI_I2C_REPORT_IRQ_EN BIT(8) + +enum { + I2C_MODE_STANDARD, + I2C_MODE_FAST, + I2C_MODE_FAST_PLUS, +}; + +enum cci_i2c_queue_t { + QUEUE_0, + QUEUE_1 +}; + +struct hw_params { + u16 thigh; /* HIGH period of the SCL clock in clock ticks */ + u16 tlow; /* LOW period of the SCL clock */ + u16 tsu_sto; /* set-up time for STOP condition */ + u16 tsu_sta; /* set-up time for a repeated START condition */ + u16 thd_dat; /* data hold time */ + u16 thd_sta; /* hold time (repeated) START condition */ + u16 tbuf; /* bus free time between a STOP and START condition */ + u8 scl_stretch_en; + u16 trdhld; + u16 tsp; /* pulse width of spikes suppressed by the input filter */ +}; + +struct cci; + +struct cci_master { + struct i2c_adapter adap; + u16 master; + u8 mode; + int status; + struct completion irq_complete; + struct cci *cci; +}; + +struct cci_data { + unsigned int num_masters; + struct i2c_adapter_quirks quirks; + u16 queue_size[NUM_QUEUES]; + unsigned long cci_clk_rate; + struct hw_params params[3]; +}; + +struct cci { + struct device *dev; + void __iomem *base; + unsigned int irq; + const struct cci_data *data; + struct clk_bulk_data *clocks; + int nclocks; + struct cci_master master[NUM_MASTERS]; +}; + +static irqreturn_t cci_isr(int irq, void *dev) +{ + struct cci *cci = dev; + u32 val, reset = 0; + int ret = IRQ_NONE; + + val = readl(cci->base + CCI_IRQ_STATUS_0); + writel(val, cci->base + CCI_IRQ_CLEAR_0); + writel(0x1, cci->base + CCI_IRQ_GLOBAL_CLEAR_CMD); + + if (val & CCI_IRQ_STATUS_0_RST_DONE_ACK) { + complete(&cci->master[0].irq_complete); + if (cci->master[1].master) + complete(&cci->master[1].irq_complete); + ret = IRQ_HANDLED; + } + + if (val & CCI_IRQ_STATUS_0_I2C_M0_RD_DONE || + val & CCI_IRQ_STATUS_0_I2C_M0_Q0_REPORT || + val & CCI_IRQ_STATUS_0_I2C_M0_Q1_REPORT) { + cci->master[0].status = 0; + complete(&cci->master[0].irq_complete); + ret = IRQ_HANDLED; + } + + if (val & CCI_IRQ_STATUS_0_I2C_M1_RD_DONE || + val & CCI_IRQ_STATUS_0_I2C_M1_Q0_REPORT || + val & CCI_IRQ_STATUS_0_I2C_M1_Q1_REPORT) { + cci->master[1].status = 0; + complete(&cci->master[1].irq_complete); + ret = IRQ_HANDLED; + } + + if (unlikely(val & CCI_IRQ_STATUS_0_I2C_M0_Q0Q1_HALT_ACK)) { + reset = CCI_RESET_CMD_M0_MASK; + ret = IRQ_HANDLED; + } + + if (unlikely(val & CCI_IRQ_STATUS_0_I2C_M1_Q0Q1_HALT_ACK)) { + reset = CCI_RESET_CMD_M1_MASK; + ret = IRQ_HANDLED; + } + + if (unlikely(reset)) + writel(reset, cci->base + CCI_RESET_CMD); + + if (unlikely(val & CCI_IRQ_STATUS_0_I2C_M0_ERROR)) { + if (val & CCI_IRQ_STATUS_0_I2C_M0_Q0_NACK_ERR || + val & CCI_IRQ_STATUS_0_I2C_M0_Q1_NACK_ERR) + cci->master[0].status = -ENXIO; + else + cci->master[0].status = -EIO; + + writel(CCI_HALT_REQ_I2C_M0_Q0Q1, cci->base + CCI_HALT_REQ); + ret = IRQ_HANDLED; + } + + if (unlikely(val & CCI_IRQ_STATUS_0_I2C_M1_ERROR)) { + if (val & CCI_IRQ_STATUS_0_I2C_M1_Q0_NACK_ERR || + val & CCI_IRQ_STATUS_0_I2C_M1_Q1_NACK_ERR) + cci->master[0].status = -ENXIO; + else + cci->master[0].status = -EIO; + + writel(CCI_HALT_REQ_I2C_M1_Q0Q1, cci->base + CCI_HALT_REQ); + ret = IRQ_HANDLED; + } + + return ret; +} + +static int cci_halt(struct cci *cci, u8 master_num) +{ + struct cci_master *master; + u32 val; + + if (master_num >= cci->data->num_masters) { + dev_err(cci->dev, "Unsupported master idx (%u)\n", master_num); + return -EINVAL; + } + + val = BIT(master_num); + master = &cci->master[master_num]; + + reinit_completion(&master->irq_complete); + writel(val, cci->base + CCI_HALT_REQ); + + if (!wait_for_completion_timeout(&master->irq_complete, CCI_TIMEOUT)) { + dev_err(cci->dev, "CCI halt timeout\n"); + return -ETIMEDOUT; + } + + return 0; +} + +static int cci_reset(struct cci *cci) +{ + /* + * we reset the whole controller, here and for implicity use + * master[0].xxx for waiting on it. + */ + reinit_completion(&cci->master[0].irq_complete); + writel(CCI_RESET_CMD_MASK, cci->base + CCI_RESET_CMD); + + if (!wait_for_completion_timeout(&cci->master[0].irq_complete, + CCI_TIMEOUT)) { + dev_err(cci->dev, "CCI reset timeout\n"); + return -ETIMEDOUT; + } + + return 0; +} + +static int cci_init(struct cci *cci) +{ + u32 val = CCI_IRQ_MASK_0_I2C_M0_RD_DONE | + CCI_IRQ_MASK_0_I2C_M0_Q0_REPORT | + CCI_IRQ_MASK_0_I2C_M0_Q1_REPORT | + CCI_IRQ_MASK_0_I2C_M1_RD_DONE | + CCI_IRQ_MASK_0_I2C_M1_Q0_REPORT | + CCI_IRQ_MASK_0_I2C_M1_Q1_REPORT | + CCI_IRQ_MASK_0_RST_DONE_ACK | + CCI_IRQ_MASK_0_I2C_M0_Q0Q1_HALT_ACK | + CCI_IRQ_MASK_0_I2C_M1_Q0Q1_HALT_ACK | + CCI_IRQ_MASK_0_I2C_M0_ERROR | + CCI_IRQ_MASK_0_I2C_M1_ERROR; + int i; + + writel(val, cci->base + CCI_IRQ_MASK_0); + + for (i = 0; i < cci->data->num_masters; i++) { + int mode = cci->master[i].mode; + const struct hw_params *hw; + + if (!cci->master[i].cci) + continue; + + hw = &cci->data->params[mode]; + + val = hw->thigh << 16 | hw->tlow; + writel(val, cci->base + CCI_I2C_Mm_SCL_CTL(i)); + + val = hw->tsu_sto << 16 | hw->tsu_sta; + writel(val, cci->base + CCI_I2C_Mm_SDA_CTL_0(i)); + + val = hw->thd_dat << 16 | hw->thd_sta; + writel(val, cci->base + CCI_I2C_Mm_SDA_CTL_1(i)); + + val = hw->tbuf; + writel(val, cci->base + CCI_I2C_Mm_SDA_CTL_2(i)); + + val = hw->scl_stretch_en << 8 | hw->trdhld << 4 | hw->tsp; + writel(val, cci->base + CCI_I2C_Mm_MISC_CTL(i)); + } + + return 0; +} + +static int cci_run_queue(struct cci *cci, u8 master, u8 queue) +{ + u32 val; + + val = readl(cci->base + CCI_I2C_Mm_Qn_CUR_WORD_CNT(master, queue)); + writel(val, cci->base + CCI_I2C_Mm_Qn_EXEC_WORD_CNT(master, queue)); + + reinit_completion(&cci->master[master].irq_complete); + val = BIT(master * 2 + queue); + writel(val, cci->base + CCI_QUEUE_START); + + if (!wait_for_completion_timeout(&cci->master[master].irq_complete, + CCI_TIMEOUT)) { + dev_err(cci->dev, "master %d queue %d timeout\n", + master, queue); + cci_reset(cci); + cci_init(cci); + return -ETIMEDOUT; + } + + return cci->master[master].status; +} + +static int cci_validate_queue(struct cci *cci, u8 master, u8 queue) +{ + u32 val; + + val = readl(cci->base + CCI_I2C_Mm_Qn_CUR_WORD_CNT(master, queue)); + if (val == cci->data->queue_size[queue]) + return -EINVAL; + + if (!val) + return 0; + + val = CCI_I2C_REPORT | CCI_I2C_REPORT_IRQ_EN; + writel(val, cci->base + CCI_I2C_Mm_Qn_LOAD_DATA(master, queue)); + + return cci_run_queue(cci, master, queue); +} + +static int cci_i2c_read(struct cci *cci, u16 master, + u16 addr, u8 *buf, u16 len) +{ + u32 val, words_read, words_exp; + u8 queue = QUEUE_1; + int i, index = 0, ret; + bool first = true; + + /* + * Call validate queue to make sure queue is empty before starting. + * This is to avoid overflow / underflow of queue. + */ + ret = cci_validate_queue(cci, master, queue); + if (ret < 0) + return ret; + + val = CCI_I2C_SET_PARAM | (addr & 0x7f) << 4; + writel(val, cci->base + CCI_I2C_Mm_Qn_LOAD_DATA(master, queue)); + + val = CCI_I2C_READ | len << 4; + writel(val, cci->base + CCI_I2C_Mm_Qn_LOAD_DATA(master, queue)); + + ret = cci_run_queue(cci, master, queue); + if (ret < 0) + return ret; + + words_read = readl(cci->base + CCI_I2C_Mm_READ_BUF_LEVEL(master)); + words_exp = len / 4 + 1; + if (words_read != words_exp) { + dev_err(cci->dev, "words read = %d, words expected = %d\n", + words_read, words_exp); + return -EIO; + } + + do { + val = readl(cci->base + CCI_I2C_Mm_READ_DATA(master)); + + for (i = 0; i < 4 && index < len; i++) { + if (first) { + /* The LS byte of this register represents the + * first byte read from the slave during a read + * access. + */ + first = false; + continue; + } + buf[index++] = (val >> (i * 8)) & 0xff; + } + } while (--words_read); + + return 0; +} + +static int cci_i2c_write(struct cci *cci, u16 master, + u16 addr, u8 *buf, u16 len) +{ + u8 queue = QUEUE_0; + u8 load[12] = { 0 }; + int i = 0, j, ret; + u32 val; + + /* + * Call validate queue to make sure queue is empty before starting. + * This is to avoid overflow / underflow of queue. + */ + ret = cci_validate_queue(cci, master, queue); + if (ret < 0) + return ret; + + val = CCI_I2C_SET_PARAM | (addr & 0x7f) << 4; + writel(val, cci->base + CCI_I2C_Mm_Qn_LOAD_DATA(master, queue)); + + load[i++] = CCI_I2C_WRITE | len << 4; + + for (j = 0; j < len; j++) + load[i++] = buf[j]; + + for (j = 0; j < i; j += 4) { + val = load[j]; + val |= load[j + 1] << 8; + val |= load[j + 2] << 16; + val |= load[j + 3] << 24; + writel(val, cci->base + CCI_I2C_Mm_Qn_LOAD_DATA(master, queue)); + } + + val = CCI_I2C_REPORT | CCI_I2C_REPORT_IRQ_EN; + writel(val, cci->base + CCI_I2C_Mm_Qn_LOAD_DATA(master, queue)); + + return cci_run_queue(cci, master, queue); +} + +static int cci_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num) +{ + struct cci_master *cci_master = i2c_get_adapdata(adap); + struct cci *cci = cci_master->cci; + int i, ret; + + ret = pm_runtime_get_sync(cci->dev); + if (ret < 0) + goto err; + + for (i = 0; i < num; i++) { + if (msgs[i].flags & I2C_M_RD) + ret = cci_i2c_read(cci, cci_master->master, + msgs[i].addr, msgs[i].buf, + msgs[i].len); + else + ret = cci_i2c_write(cci, cci_master->master, + msgs[i].addr, msgs[i].buf, + msgs[i].len); + + if (ret < 0) + break; + } + + if (!ret) + ret = num; + +err: + pm_runtime_mark_last_busy(cci->dev); + pm_runtime_put_autosuspend(cci->dev); + + return ret; +} + +static u32 cci_func(struct i2c_adapter *adap) +{ + return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL; +} + +static const struct i2c_algorithm cci_algo = { + .master_xfer = cci_xfer, + .functionality = cci_func, +}; + +static int cci_enable_clocks(struct cci *cci) +{ + return clk_bulk_prepare_enable(cci->nclocks, cci->clocks); +} + +static void cci_disable_clocks(struct cci *cci) +{ + clk_bulk_disable_unprepare(cci->nclocks, cci->clocks); +} + +static int __maybe_unused cci_suspend_runtime(struct device *dev) +{ + struct cci *cci = dev_get_drvdata(dev); + + cci_disable_clocks(cci); + return 0; +} + +static int __maybe_unused cci_resume_runtime(struct device *dev) +{ + struct cci *cci = dev_get_drvdata(dev); + int ret; + + ret = cci_enable_clocks(cci); + if (ret) + return ret; + + cci_init(cci); + return 0; +} + +static int __maybe_unused cci_suspend(struct device *dev) +{ + if (!pm_runtime_suspended(dev)) + return cci_suspend_runtime(dev); + + return 0; +} + +static int __maybe_unused cci_resume(struct device *dev) +{ + cci_resume_runtime(dev); + pm_runtime_mark_last_busy(dev); + pm_request_autosuspend(dev); + + return 0; +} + +static const struct dev_pm_ops qcom_cci_pm = { + SET_SYSTEM_SLEEP_PM_OPS(cci_suspend, cci_resume) + SET_RUNTIME_PM_OPS(cci_suspend_runtime, cci_resume_runtime, NULL) +}; + +static int cci_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + unsigned long cci_clk_rate = 0; + struct device_node *child; + struct resource *r; + struct cci *cci; + int ret, i; + u32 val; + + cci = devm_kzalloc(dev, sizeof(*cci), GFP_KERNEL); + if (!cci) + return -ENOMEM; + + cci->dev = dev; + platform_set_drvdata(pdev, cci); + cci->data = device_get_match_data(dev); + if (!cci->data) + return -ENOENT; + + for_each_available_child_of_node(dev->of_node, child) { + u32 idx; + + ret = of_property_read_u32(child, "reg", &idx); + if (ret) { + dev_err(dev, "%pOF invalid 'reg' property", child); + continue; + } + + if (idx >= cci->data->num_masters) { + dev_err(dev, "%pOF invalid 'reg' value: %u (max is %u)", + child, idx, cci->data->num_masters - 1); + continue; + } + + cci->master[idx].adap.quirks = &cci->data->quirks; + cci->master[idx].adap.algo = &cci_algo; + cci->master[idx].adap.dev.parent = dev; + cci->master[idx].adap.dev.of_node = child; + cci->master[idx].master = idx; + cci->master[idx].cci = cci; + + i2c_set_adapdata(&cci->master[idx].adap, &cci->master[idx]); + snprintf(cci->master[idx].adap.name, + sizeof(cci->master[idx].adap.name), "Qualcomm-CCI"); + + cci->master[idx].mode = I2C_MODE_STANDARD; + ret = of_property_read_u32(child, "clock-frequency", &val); + if (!ret) { + if (val == 400000) + cci->master[idx].mode = I2C_MODE_FAST; + else if (val == 1000000) + cci->master[idx].mode = I2C_MODE_FAST_PLUS; + } + + init_completion(&cci->master[idx].irq_complete); + } + + /* Memory */ + + r = platform_get_resource(pdev, IORESOURCE_MEM, 0); + cci->base = devm_ioremap_resource(dev, r); + if (IS_ERR(cci->base)) + return PTR_ERR(cci->base); + + /* Clocks */ + + ret = devm_clk_bulk_get_all(dev, &cci->clocks); + if (ret < 1) { + dev_err(dev, "failed to get clocks %d\n", ret); + return ret; + } + cci->nclocks = ret; + + /* Retrieve CCI clock rate */ + for (i = 0; i < cci->nclocks; i++) { + if (!strcmp(cci->clocks[i].id, "cci")) { + cci_clk_rate = clk_get_rate(cci->clocks[i].clk); + break; + } + } + + if (cci_clk_rate != cci->data->cci_clk_rate) { + /* cci clock set by the bootloader or via assigned clock rate + * in DT. + */ + dev_warn(dev, "Found %lu cci clk rate while %lu was expected\n", + cci_clk_rate, cci->data->cci_clk_rate); + } + + ret = cci_enable_clocks(cci); + if (ret < 0) + return ret; + + /* Interrupt */ + + ret = platform_get_irq(pdev, 0); + if (ret < 0) + goto disable_clocks; + cci->irq = ret; + + ret = devm_request_irq(dev, cci->irq, cci_isr, 0, dev_name(dev), cci); + if (ret < 0) { + dev_err(dev, "request_irq failed, ret: %d\n", ret); + goto disable_clocks; + } + + val = readl(cci->base + CCI_HW_VERSION); + dev_dbg(dev, "CCI HW version = 0x%08x", val); + + ret = cci_reset(cci); + if (ret < 0) + goto error; + + ret = cci_init(cci); + if (ret < 0) + goto error; + + for (i = 0; i < cci->data->num_masters; i++) { + if (!cci->master[i].cci) + continue; + + ret = i2c_add_adapter(&cci->master[i].adap); + if (ret < 0) + goto error_i2c; + } + + pm_runtime_set_autosuspend_delay(dev, MSEC_PER_SEC); + pm_runtime_use_autosuspend(dev); + pm_runtime_set_active(dev); + pm_runtime_enable(dev); + + return 0; + +error_i2c: + for (; i >= 0; i--) { + if (cci->master[i].cci) + i2c_del_adapter(&cci->master[i].adap); + } +error: + disable_irq(cci->irq); +disable_clocks: + cci_disable_clocks(cci); + + return ret; +} + +static int cci_remove(struct platform_device *pdev) +{ + struct cci *cci = platform_get_drvdata(pdev); + int i; + + for (i = 0; i < cci->data->num_masters; i++) { + if (cci->master[i].cci) + i2c_del_adapter(&cci->master[i].adap); + cci_halt(cci, i); + } + + disable_irq(cci->irq); + pm_runtime_disable(&pdev->dev); + pm_runtime_set_suspended(&pdev->dev); + + return 0; +} + +static const struct cci_data cci_v1_data = { + .num_masters = 1, + .queue_size = { 64, 16 }, + .quirks = { + .max_write_len = 10, + .max_read_len = 12, + }, + .cci_clk_rate = 19200000, + .params[I2C_MODE_STANDARD] = { + .thigh = 78, + .tlow = 114, + .tsu_sto = 28, + .tsu_sta = 28, + .thd_dat = 10, + .thd_sta = 77, + .tbuf = 118, + .scl_stretch_en = 0, + .trdhld = 6, + .tsp = 1 + }, + .params[I2C_MODE_FAST] = { + .thigh = 20, + .tlow = 28, + .tsu_sto = 21, + .tsu_sta = 21, + .thd_dat = 13, + .thd_sta = 18, + .tbuf = 32, + .scl_stretch_en = 0, + .trdhld = 6, + .tsp = 3 + }, +}; + +static const struct cci_data cci_v2_data = { + .num_masters = 2, + .queue_size = { 64, 16 }, + .quirks = { + .max_write_len = 11, + .max_read_len = 12, + }, + .cci_clk_rate = 37500000, + .params[I2C_MODE_STANDARD] = { + .thigh = 201, + .tlow = 174, + .tsu_sto = 204, + .tsu_sta = 231, + .thd_dat = 22, + .thd_sta = 162, + .tbuf = 227, + .scl_stretch_en = 0, + .trdhld = 6, + .tsp = 3 + }, + .params[I2C_MODE_FAST] = { + .thigh = 38, + .tlow = 56, + .tsu_sto = 40, + .tsu_sta = 40, + .thd_dat = 22, + .thd_sta = 35, + .tbuf = 62, + .scl_stretch_en = 0, + .trdhld = 6, + .tsp = 3 + }, + .params[I2C_MODE_FAST_PLUS] = { + .thigh = 16, + .tlow = 22, + .tsu_sto = 17, + .tsu_sta = 18, + .thd_dat = 16, + .thd_sta = 15, + .tbuf = 24, + .scl_stretch_en = 0, + .trdhld = 3, + .tsp = 3 + }, +}; + +static const struct of_device_id cci_dt_match[] = { + { .compatible = "qcom,msm8916-cci", .data = &cci_v1_data}, + { .compatible = "qcom,msm8996-cci", .data = &cci_v2_data}, + { .compatible = "qcom,sdm845-cci", .data = &cci_v2_data}, + {} +}; +MODULE_DEVICE_TABLE(of, cci_dt_match); + +static struct platform_driver qcom_cci_driver = { + .probe = cci_probe, + .remove = cci_remove, + .driver = { + .name = "i2c-qcom-cci", + .of_match_table = cci_dt_match, + .pm = &qcom_cci_pm, + }, +}; + +module_platform_driver(qcom_cci_driver); + +MODULE_DESCRIPTION("Qualcomm Camera Control Interface driver"); +MODULE_AUTHOR("Todor Tomov "); +MODULE_AUTHOR("Loic Poulain "); +MODULE_LICENSE("GPL v2"); -- GitLab From e42688ed5cf5936fb55c78cc365dbe0944af7c63 Mon Sep 17 00:00:00 2001 From: Dejin Zheng Date: Thu, 16 Apr 2020 23:23:45 +0800 Subject: [PATCH 0150/1971] i2c: busses: remove duplicate dev_err() it will print an error message by itself when platform_get_irq() goes wrong. so don't need dev_err() in here again. Reviewed-by: Andy Shevchenko Suggested-by: Markus Elfring Signed-off-by: Dejin Zheng Signed-off-by: Wolfram Sang --- drivers/i2c/busses/i2c-altera.c | 4 +--- drivers/i2c/busses/i2c-axxia.c | 4 +--- drivers/i2c/busses/i2c-bcm-kona.c | 3 +-- drivers/i2c/busses/i2c-cht-wc.c | 6 ++---- drivers/i2c/busses/i2c-img-scb.c | 4 +--- drivers/i2c/busses/i2c-imx-lpi2c.c | 4 +--- drivers/i2c/busses/i2c-lpc2k.c | 4 +--- drivers/i2c/busses/i2c-meson.c | 4 +--- drivers/i2c/busses/i2c-omap.c | 4 +--- drivers/i2c/busses/i2c-owl.c | 4 +--- drivers/i2c/busses/i2c-pnx.c | 1 - drivers/i2c/busses/i2c-pxa.c | 4 +--- drivers/i2c/busses/i2c-qup.c | 4 +--- drivers/i2c/busses/i2c-rk3x.c | 4 +--- drivers/i2c/busses/i2c-sprd.c | 4 +--- drivers/i2c/busses/i2c-sun6i-p2wi.c | 4 +--- drivers/i2c/busses/i2c-synquacer.c | 4 +--- drivers/i2c/busses/i2c-uniphier-f.c | 4 +--- drivers/i2c/busses/i2c-uniphier.c | 4 +--- drivers/i2c/busses/i2c-xlp9xx.c | 4 +--- 20 files changed, 20 insertions(+), 58 deletions(-) diff --git a/drivers/i2c/busses/i2c-altera.c b/drivers/i2c/busses/i2c-altera.c index 083699d74f9ac..027faabe3d043 100644 --- a/drivers/i2c/busses/i2c-altera.c +++ b/drivers/i2c/busses/i2c-altera.c @@ -394,10 +394,8 @@ static int altr_i2c_probe(struct platform_device *pdev) return PTR_ERR(idev->base); irq = platform_get_irq(pdev, 0); - if (irq < 0) { - dev_err(&pdev->dev, "missing interrupt resource\n"); + if (irq < 0) return irq; - } idev->i2c_clk = devm_clk_get(&pdev->dev, NULL); if (IS_ERR(idev->i2c_clk)) { diff --git a/drivers/i2c/busses/i2c-axxia.c b/drivers/i2c/busses/i2c-axxia.c index d45e2290c3f72..5294b73beca85 100644 --- a/drivers/i2c/busses/i2c-axxia.c +++ b/drivers/i2c/busses/i2c-axxia.c @@ -746,10 +746,8 @@ static int axxia_i2c_probe(struct platform_device *pdev) return PTR_ERR(base); idev->irq = platform_get_irq(pdev, 0); - if (idev->irq < 0) { - dev_err(&pdev->dev, "missing interrupt resource\n"); + if (idev->irq < 0) return idev->irq; - } idev->i2c_clk = devm_clk_get(&pdev->dev, "i2c"); if (IS_ERR(idev->i2c_clk)) { diff --git a/drivers/i2c/busses/i2c-bcm-kona.c b/drivers/i2c/busses/i2c-bcm-kona.c index 3e63f706f66b2..ed5e1275ae46f 100644 --- a/drivers/i2c/busses/i2c-bcm-kona.c +++ b/drivers/i2c/busses/i2c-bcm-kona.c @@ -821,8 +821,7 @@ static int bcm_kona_i2c_probe(struct platform_device *pdev) /* Get the interrupt number */ dev->irq = platform_get_irq(pdev, 0); if (dev->irq < 0) { - dev_err(dev->device, "no irq resource\n"); - rc = -ENODEV; + rc = dev->irq; goto probe_disable_clk; } diff --git a/drivers/i2c/busses/i2c-cht-wc.c b/drivers/i2c/busses/i2c-cht-wc.c index 35e55feda763e..f80d79e973cd2 100644 --- a/drivers/i2c/busses/i2c-cht-wc.c +++ b/drivers/i2c/busses/i2c-cht-wc.c @@ -314,10 +314,8 @@ static int cht_wc_i2c_adap_i2c_probe(struct platform_device *pdev) int ret, reg, irq; irq = platform_get_irq(pdev, 0); - if (irq < 0) { - dev_err(&pdev->dev, "Error missing irq resource\n"); - return -EINVAL; - } + if (irq < 0) + return irq; adap = devm_kzalloc(&pdev->dev, sizeof(*adap), GFP_KERNEL); if (!adap) diff --git a/drivers/i2c/busses/i2c-img-scb.c b/drivers/i2c/busses/i2c-img-scb.c index c937ea79300c1..98a89301ed2a6 100644 --- a/drivers/i2c/busses/i2c-img-scb.c +++ b/drivers/i2c/busses/i2c-img-scb.c @@ -1342,10 +1342,8 @@ static int img_i2c_probe(struct platform_device *pdev) return PTR_ERR(i2c->base); irq = platform_get_irq(pdev, 0); - if (irq < 0) { - dev_err(&pdev->dev, "can't get irq number\n"); + if (irq < 0) return irq; - } i2c->sys_clk = devm_clk_get(&pdev->dev, "sys"); if (IS_ERR(i2c->sys_clk)) { diff --git a/drivers/i2c/busses/i2c-imx-lpi2c.c b/drivers/i2c/busses/i2c-imx-lpi2c.c index 94743ba581fe5..9db6ccded5e9e 100644 --- a/drivers/i2c/busses/i2c-imx-lpi2c.c +++ b/drivers/i2c/busses/i2c-imx-lpi2c.c @@ -551,10 +551,8 @@ static int lpi2c_imx_probe(struct platform_device *pdev) return PTR_ERR(lpi2c_imx->base); irq = platform_get_irq(pdev, 0); - if (irq < 0) { - dev_err(&pdev->dev, "can't get irq number\n"); + if (irq < 0) return irq; - } lpi2c_imx->adapter.owner = THIS_MODULE; lpi2c_imx->adapter.algo = &lpi2c_imx_algo; diff --git a/drivers/i2c/busses/i2c-lpc2k.c b/drivers/i2c/busses/i2c-lpc2k.c index 197cb64b2a42d..4e30c5267142c 100644 --- a/drivers/i2c/busses/i2c-lpc2k.c +++ b/drivers/i2c/busses/i2c-lpc2k.c @@ -360,10 +360,8 @@ static int i2c_lpc2k_probe(struct platform_device *pdev) return PTR_ERR(i2c->base); i2c->irq = platform_get_irq(pdev, 0); - if (i2c->irq < 0) { - dev_err(&pdev->dev, "can't get interrupt resource\n"); + if (i2c->irq < 0) return i2c->irq; - } init_waitqueue_head(&i2c->wait); diff --git a/drivers/i2c/busses/i2c-meson.c b/drivers/i2c/busses/i2c-meson.c index b3bd869281d36..c5dec572fc48e 100644 --- a/drivers/i2c/busses/i2c-meson.c +++ b/drivers/i2c/busses/i2c-meson.c @@ -426,10 +426,8 @@ static int meson_i2c_probe(struct platform_device *pdev) return PTR_ERR(i2c->regs); irq = platform_get_irq(pdev, 0); - if (irq < 0) { - dev_err(&pdev->dev, "can't find IRQ\n"); + if (irq < 0) return irq; - } ret = devm_request_irq(&pdev->dev, irq, meson_i2c_irq, 0, NULL, i2c); if (ret < 0) { diff --git a/drivers/i2c/busses/i2c-omap.c b/drivers/i2c/busses/i2c-omap.c index 71b4637c86b79..175c590b93b76 100644 --- a/drivers/i2c/busses/i2c-omap.c +++ b/drivers/i2c/busses/i2c-omap.c @@ -1365,10 +1365,8 @@ omap_i2c_probe(struct platform_device *pdev) u16 minor, major; irq = platform_get_irq(pdev, 0); - if (irq < 0) { - dev_err(&pdev->dev, "no irq resource?\n"); + if (irq < 0) return irq; - } omap = devm_kzalloc(&pdev->dev, sizeof(struct omap_i2c_dev), GFP_KERNEL); if (!omap) diff --git a/drivers/i2c/busses/i2c-owl.c b/drivers/i2c/busses/i2c-owl.c index fba6efc1dca85..672f1f239bd6f 100644 --- a/drivers/i2c/busses/i2c-owl.c +++ b/drivers/i2c/busses/i2c-owl.c @@ -407,10 +407,8 @@ static int owl_i2c_probe(struct platform_device *pdev) return PTR_ERR(i2c_dev->base); irq = platform_get_irq(pdev, 0); - if (irq < 0) { - dev_err(dev, "failed to get IRQ number\n"); + if (irq < 0) return irq; - } if (of_property_read_u32(dev->of_node, "clock-frequency", &i2c_dev->bus_freq)) diff --git a/drivers/i2c/busses/i2c-pnx.c b/drivers/i2c/busses/i2c-pnx.c index 686c06f316253..5d7207c10f1dc 100644 --- a/drivers/i2c/busses/i2c-pnx.c +++ b/drivers/i2c/busses/i2c-pnx.c @@ -720,7 +720,6 @@ static int i2c_pnx_probe(struct platform_device *pdev) alg_data->irq = platform_get_irq(pdev, 0); if (alg_data->irq < 0) { - dev_err(&pdev->dev, "Failed to get IRQ from platform resource\n"); ret = alg_data->irq; goto out_clock; } diff --git a/drivers/i2c/busses/i2c-pxa.c b/drivers/i2c/busses/i2c-pxa.c index 466e4f681d7a4..37246cbbe14c4 100644 --- a/drivers/i2c/busses/i2c-pxa.c +++ b/drivers/i2c/busses/i2c-pxa.c @@ -1267,10 +1267,8 @@ static int i2c_pxa_probe(struct platform_device *dev) return PTR_ERR(i2c->reg_base); irq = platform_get_irq(dev, 0); - if (irq < 0) { - dev_err(&dev->dev, "no irq resource: %d\n", irq); + if (irq < 0) return irq; - } /* Default adapter num to device id; i2c_pxa_probe_dt can override. */ i2c->adap.nr = dev->id; diff --git a/drivers/i2c/busses/i2c-qup.c b/drivers/i2c/busses/i2c-qup.c index 155dcde70fc97..0e3525fe613f8 100644 --- a/drivers/i2c/busses/i2c-qup.c +++ b/drivers/i2c/busses/i2c-qup.c @@ -1761,10 +1761,8 @@ nodma: return PTR_ERR(qup->base); qup->irq = platform_get_irq(pdev, 0); - if (qup->irq < 0) { - dev_err(qup->dev, "No IRQ defined\n"); + if (qup->irq < 0) return qup->irq; - } if (has_acpi_companion(qup->dev)) { ret = device_property_read_u32(qup->dev, diff --git a/drivers/i2c/busses/i2c-rk3x.c b/drivers/i2c/busses/i2c-rk3x.c index fbe41c8cc126a..bc698240c4aaa 100644 --- a/drivers/i2c/busses/i2c-rk3x.c +++ b/drivers/i2c/busses/i2c-rk3x.c @@ -1260,10 +1260,8 @@ static int rk3x_i2c_probe(struct platform_device *pdev) /* IRQ setup */ irq = platform_get_irq(pdev, 0); - if (irq < 0) { - dev_err(&pdev->dev, "cannot find rk3x IRQ\n"); + if (irq < 0) return irq; - } ret = devm_request_irq(&pdev->dev, irq, rk3x_i2c_irq, 0, dev_name(&pdev->dev), i2c); diff --git a/drivers/i2c/busses/i2c-sprd.c b/drivers/i2c/busses/i2c-sprd.c index 123a42bfe3b1b..19cda6742423d 100644 --- a/drivers/i2c/busses/i2c-sprd.c +++ b/drivers/i2c/busses/i2c-sprd.c @@ -492,10 +492,8 @@ static int sprd_i2c_probe(struct platform_device *pdev) return PTR_ERR(i2c_dev->base); i2c_dev->irq = platform_get_irq(pdev, 0); - if (i2c_dev->irq < 0) { - dev_err(&pdev->dev, "failed to get irq resource\n"); + if (i2c_dev->irq < 0) return i2c_dev->irq; - } i2c_set_adapdata(&i2c_dev->adap, i2c_dev); init_completion(&i2c_dev->complete); diff --git a/drivers/i2c/busses/i2c-sun6i-p2wi.c b/drivers/i2c/busses/i2c-sun6i-p2wi.c index 17c27f79029ba..2f6f6468214dd 100644 --- a/drivers/i2c/busses/i2c-sun6i-p2wi.c +++ b/drivers/i2c/busses/i2c-sun6i-p2wi.c @@ -236,10 +236,8 @@ static int p2wi_probe(struct platform_device *pdev) strlcpy(p2wi->adapter.name, pdev->name, sizeof(p2wi->adapter.name)); irq = platform_get_irq(pdev, 0); - if (irq < 0) { - dev_err(dev, "failed to retrieve irq: %d\n", irq); + if (irq < 0) return irq; - } p2wi->clk = devm_clk_get(dev, NULL); if (IS_ERR(p2wi->clk)) { diff --git a/drivers/i2c/busses/i2c-synquacer.c b/drivers/i2c/busses/i2c-synquacer.c index 8cc91a8da5a0f..c9a3dba6a75d3 100644 --- a/drivers/i2c/busses/i2c-synquacer.c +++ b/drivers/i2c/busses/i2c-synquacer.c @@ -578,10 +578,8 @@ static int synquacer_i2c_probe(struct platform_device *pdev) return PTR_ERR(i2c->base); i2c->irq = platform_get_irq(pdev, 0); - if (i2c->irq < 0) { - dev_err(&pdev->dev, "no IRQ resource found\n"); + if (i2c->irq < 0) return -ENODEV; - } ret = devm_request_irq(&pdev->dev, i2c->irq, synquacer_i2c_isr, 0, dev_name(&pdev->dev), i2c); diff --git a/drivers/i2c/busses/i2c-uniphier-f.c b/drivers/i2c/busses/i2c-uniphier-f.c index 2b258d54d68cf..cb4666c54a233 100644 --- a/drivers/i2c/busses/i2c-uniphier-f.c +++ b/drivers/i2c/busses/i2c-uniphier-f.c @@ -529,10 +529,8 @@ static int uniphier_fi2c_probe(struct platform_device *pdev) return PTR_ERR(priv->membase); irq = platform_get_irq(pdev, 0); - if (irq < 0) { - dev_err(dev, "failed to get IRQ number\n"); + if (irq < 0) return irq; - } if (of_property_read_u32(dev->of_node, "clock-frequency", &bus_speed)) bus_speed = I2C_MAX_STANDARD_MODE_FREQ; diff --git a/drivers/i2c/busses/i2c-uniphier.c b/drivers/i2c/busses/i2c-uniphier.c index 668b1fa2b0ef4..ee00a44bf4c71 100644 --- a/drivers/i2c/busses/i2c-uniphier.c +++ b/drivers/i2c/busses/i2c-uniphier.c @@ -324,10 +324,8 @@ static int uniphier_i2c_probe(struct platform_device *pdev) return PTR_ERR(priv->membase); irq = platform_get_irq(pdev, 0); - if (irq < 0) { - dev_err(dev, "failed to get IRQ number\n"); + if (irq < 0) return irq; - } if (of_property_read_u32(dev->of_node, "clock-frequency", &bus_speed)) bus_speed = I2C_MAX_STANDARD_MODE_FREQ; diff --git a/drivers/i2c/busses/i2c-xlp9xx.c b/drivers/i2c/busses/i2c-xlp9xx.c index 286faa9c855ea..f2241cedf5d3f 100644 --- a/drivers/i2c/busses/i2c-xlp9xx.c +++ b/drivers/i2c/busses/i2c-xlp9xx.c @@ -517,10 +517,8 @@ static int xlp9xx_i2c_probe(struct platform_device *pdev) return PTR_ERR(priv->base); priv->irq = platform_get_irq(pdev, 0); - if (priv->irq <= 0) { - dev_err(&pdev->dev, "invalid irq!\n"); + if (priv->irq <= 0) return priv->irq; - } /* SMBAlert irq */ priv->alert_data.irq = platform_get_irq(pdev, 1); if (priv->alert_data.irq <= 0) -- GitLab From c154703bc8dd2231ae81aafef5589b795b2b7e09 Mon Sep 17 00:00:00 2001 From: Christophe JAILLET Date: Sat, 18 Apr 2020 21:18:07 -0700 Subject: [PATCH 0151/1971] Input: tca6416-keypad - fix a typo in MODULE_DESCRIPTION This should be 'tca6416', not 'tca6146' Signed-off-by: Christophe JAILLET Link: https://lore.kernel.org/r/20200413152329.4435-1-christophe.jaillet@wanadoo.fr Signed-off-by: Dmitry Torokhov --- drivers/input/keyboard/tca6416-keypad.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/input/keyboard/tca6416-keypad.c b/drivers/input/keyboard/tca6416-keypad.c index 2a14769de6370..d2fe258774c60 100644 --- a/drivers/input/keyboard/tca6416-keypad.c +++ b/drivers/input/keyboard/tca6416-keypad.c @@ -374,5 +374,5 @@ static void __exit tca6416_keypad_exit(void) module_exit(tca6416_keypad_exit); MODULE_AUTHOR("Sriramakrishnan "); -MODULE_DESCRIPTION("Keypad driver over tca6146 IO expander"); +MODULE_DESCRIPTION("Keypad driver over tca6416 IO expander"); MODULE_LICENSE("GPL"); -- GitLab From ec4ba6c35b1bd748de50044e36040136f378b9b6 Mon Sep 17 00:00:00 2001 From: Christophe JAILLET Date: Sat, 18 Apr 2020 21:27:25 -0700 Subject: [PATCH 0152/1971] Input: spear-keyboard - fix a typo in a module name in Kconfig A 'y' is missing in spear-keyboard. Signed-off-by: Christophe JAILLET Link: https://lore.kernel.org/r/20200412095711.9107-1-christophe.jaillet@wanadoo.fr Signed-off-by: Dmitry Torokhov --- drivers/input/keyboard/Kconfig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/input/keyboard/Kconfig b/drivers/input/keyboard/Kconfig index 4706ff09f0e8a..f6c356e579c5c 100644 --- a/drivers/input/keyboard/Kconfig +++ b/drivers/input/keyboard/Kconfig @@ -691,7 +691,7 @@ config KEYBOARD_SPEAR Say Y here if you want to use the SPEAR keyboard. To compile this driver as a module, choose M here: the - module will be called spear-keboard. + module will be called spear-keyboard. config KEYBOARD_TC3589X tristate "TC3589X Keypad support" -- GitLab From 81b4d1d22ca0d0162360d3536b0eb3f6d5bfcf88 Mon Sep 17 00:00:00 2001 From: Kenny Levinsen Date: Sat, 18 Apr 2020 21:26:50 -0700 Subject: [PATCH 0153/1971] Input: evdev - use keyed wakeups Some processes, such as systemd, are only polling for EPOLLERR|EPOLLHUP. As evdev uses unkeyed wakeups, such a poll receives many spurious wakeups from uninteresting events. Use keyed wakeups to allow the wakeup target to more efficiently discard these uninteresting events. Signed-off-by: Kenny Levinsen Link: https://lore.kernel.org/r/20200410233557.3892-1-kl@kl.wtf Signed-off-by: Dmitry Torokhov --- drivers/input/evdev.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/drivers/input/evdev.c b/drivers/input/evdev.c index d7dd6fcf2db05..f54d3d31f61de 100644 --- a/drivers/input/evdev.c +++ b/drivers/input/evdev.c @@ -282,7 +282,8 @@ static void evdev_pass_values(struct evdev_client *client, spin_unlock(&client->buffer_lock); if (wakeup) - wake_up_interruptible(&evdev->wait); + wake_up_interruptible_poll(&evdev->wait, + EPOLLIN | EPOLLOUT | EPOLLRDNORM | EPOLLWRNORM); } /* @@ -443,7 +444,7 @@ static void evdev_hangup(struct evdev *evdev) kill_fasync(&client->fasync, SIGIO, POLL_HUP); spin_unlock(&evdev->client_lock); - wake_up_interruptible(&evdev->wait); + wake_up_interruptible_poll(&evdev->wait, EPOLLHUP | EPOLLERR); } static int evdev_release(struct inode *inode, struct file *file) @@ -958,7 +959,7 @@ static int evdev_revoke(struct evdev *evdev, struct evdev_client *client, client->revoked = true; evdev_ungrab(evdev, client); input_flush_device(&evdev->handle, file); - wake_up_interruptible(&evdev->wait); + wake_up_interruptible_poll(&evdev->wait, EPOLLHUP | EPOLLERR); return 0; } -- GitLab From 4ea8391e3556ad08ff0ea8fb282f9a550b8a3333 Mon Sep 17 00:00:00 2001 From: Linus Walleij Date: Sat, 18 Apr 2020 13:40:09 -0700 Subject: [PATCH 0154/1971] Input: delete unused GP2AP002A00F driver There is now an IIO driver for GP2AP002A00F and GP2AP002S00F in drivers/iio/light/gp2ap002.c. Delete this driver, it is unused in the kernel tree and new users can make use of the IIO driver. Signed-off-by: Linus Walleij Link: https://lore.kernel.org/r/20200417203059.8151-1-linus.walleij@linaro.org Signed-off-by: Dmitry Torokhov --- drivers/input/misc/Kconfig | 11 -- drivers/input/misc/Makefile | 1 - drivers/input/misc/gp2ap002a00f.c | 281 ----------------------------- include/linux/input/gp2ap002a00f.h | 23 --- 4 files changed, 316 deletions(-) delete mode 100644 drivers/input/misc/gp2ap002a00f.c delete mode 100644 include/linux/input/gp2ap002a00f.h diff --git a/drivers/input/misc/Kconfig b/drivers/input/misc/Kconfig index 7e2e658d551c1..293e55fb7a4e4 100644 --- a/drivers/input/misc/Kconfig +++ b/drivers/input/misc/Kconfig @@ -265,17 +265,6 @@ config INPUT_APANEL To compile this driver as a module, choose M here: the module will be called apanel. -config INPUT_GP2A - tristate "Sharp GP2AP002A00F I2C Proximity/Opto sensor driver" - depends on I2C - depends on GPIOLIB || COMPILE_TEST - help - Say Y here if you have a Sharp GP2AP002A00F proximity/als combo-chip - hooked to an I2C bus. - - To compile this driver as a module, choose M here: the - module will be called gp2ap002a00f. - config INPUT_GPIO_BEEPER tristate "Generic GPIO Beeper support" depends on GPIOLIB || COMPILE_TEST diff --git a/drivers/input/misc/Makefile b/drivers/input/misc/Makefile index 8fd187f314bdf..1db1815ce8032 100644 --- a/drivers/input/misc/Makefile +++ b/drivers/input/misc/Makefile @@ -33,7 +33,6 @@ obj-$(CONFIG_INPUT_E3X0_BUTTON) += e3x0-button.o obj-$(CONFIG_INPUT_DRV260X_HAPTICS) += drv260x.o obj-$(CONFIG_INPUT_DRV2665_HAPTICS) += drv2665.o obj-$(CONFIG_INPUT_DRV2667_HAPTICS) += drv2667.o -obj-$(CONFIG_INPUT_GP2A) += gp2ap002a00f.o obj-$(CONFIG_INPUT_GPIO_BEEPER) += gpio-beeper.o obj-$(CONFIG_INPUT_GPIO_DECODER) += gpio_decoder.o obj-$(CONFIG_INPUT_GPIO_VIBRA) += gpio-vibra.o diff --git a/drivers/input/misc/gp2ap002a00f.c b/drivers/input/misc/gp2ap002a00f.c deleted file mode 100644 index 90abda8eea67a..0000000000000 --- a/drivers/input/misc/gp2ap002a00f.c +++ /dev/null @@ -1,281 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * Copyright (C) 2011 Sony Ericsson Mobile Communications Inc. - * - * Author: Courtney Cavin - * Prepared for up-stream by: Oskar Andero - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include - -struct gp2a_data { - struct input_dev *input; - const struct gp2a_platform_data *pdata; - struct i2c_client *i2c_client; -}; - -enum gp2a_addr { - GP2A_ADDR_PROX = 0x0, - GP2A_ADDR_GAIN = 0x1, - GP2A_ADDR_HYS = 0x2, - GP2A_ADDR_CYCLE = 0x3, - GP2A_ADDR_OPMOD = 0x4, - GP2A_ADDR_CON = 0x6 -}; - -enum gp2a_controls { - /* Software Shutdown control: 0 = shutdown, 1 = normal operation */ - GP2A_CTRL_SSD = 0x01 -}; - -static int gp2a_report(struct gp2a_data *dt) -{ - int vo = gpio_get_value(dt->pdata->vout_gpio); - - input_report_switch(dt->input, SW_FRONT_PROXIMITY, !vo); - input_sync(dt->input); - - return 0; -} - -static irqreturn_t gp2a_irq(int irq, void *handle) -{ - struct gp2a_data *dt = handle; - - gp2a_report(dt); - - return IRQ_HANDLED; -} - -static int gp2a_enable(struct gp2a_data *dt) -{ - return i2c_smbus_write_byte_data(dt->i2c_client, GP2A_ADDR_OPMOD, - GP2A_CTRL_SSD); -} - -static int gp2a_disable(struct gp2a_data *dt) -{ - return i2c_smbus_write_byte_data(dt->i2c_client, GP2A_ADDR_OPMOD, - 0x00); -} - -static int gp2a_device_open(struct input_dev *dev) -{ - struct gp2a_data *dt = input_get_drvdata(dev); - int error; - - error = gp2a_enable(dt); - if (error < 0) { - dev_err(&dt->i2c_client->dev, - "unable to activate, err %d\n", error); - return error; - } - - gp2a_report(dt); - - return 0; -} - -static void gp2a_device_close(struct input_dev *dev) -{ - struct gp2a_data *dt = input_get_drvdata(dev); - int error; - - error = gp2a_disable(dt); - if (error < 0) - dev_err(&dt->i2c_client->dev, - "unable to deactivate, err %d\n", error); -} - -static int gp2a_initialize(struct gp2a_data *dt) -{ - int error; - - error = i2c_smbus_write_byte_data(dt->i2c_client, GP2A_ADDR_GAIN, - 0x08); - if (error < 0) - return error; - - error = i2c_smbus_write_byte_data(dt->i2c_client, GP2A_ADDR_HYS, - 0xc2); - if (error < 0) - return error; - - error = i2c_smbus_write_byte_data(dt->i2c_client, GP2A_ADDR_CYCLE, - 0x04); - if (error < 0) - return error; - - error = gp2a_disable(dt); - - return error; -} - -static int gp2a_probe(struct i2c_client *client, - const struct i2c_device_id *id) -{ - const struct gp2a_platform_data *pdata = dev_get_platdata(&client->dev); - struct gp2a_data *dt; - int error; - - if (!pdata) - return -EINVAL; - - if (pdata->hw_setup) { - error = pdata->hw_setup(client); - if (error < 0) - return error; - } - - error = gpio_request_one(pdata->vout_gpio, GPIOF_IN, GP2A_I2C_NAME); - if (error) - goto err_hw_shutdown; - - dt = kzalloc(sizeof(struct gp2a_data), GFP_KERNEL); - if (!dt) { - error = -ENOMEM; - goto err_free_gpio; - } - - dt->pdata = pdata; - dt->i2c_client = client; - - error = gp2a_initialize(dt); - if (error < 0) - goto err_free_mem; - - dt->input = input_allocate_device(); - if (!dt->input) { - error = -ENOMEM; - goto err_free_mem; - } - - input_set_drvdata(dt->input, dt); - - dt->input->open = gp2a_device_open; - dt->input->close = gp2a_device_close; - dt->input->name = GP2A_I2C_NAME; - dt->input->id.bustype = BUS_I2C; - dt->input->dev.parent = &client->dev; - - input_set_capability(dt->input, EV_SW, SW_FRONT_PROXIMITY); - - error = request_threaded_irq(client->irq, NULL, gp2a_irq, - IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING | - IRQF_ONESHOT, - GP2A_I2C_NAME, dt); - if (error) { - dev_err(&client->dev, "irq request failed\n"); - goto err_free_input_dev; - } - - error = input_register_device(dt->input); - if (error) { - dev_err(&client->dev, "device registration failed\n"); - goto err_free_irq; - } - - device_init_wakeup(&client->dev, pdata->wakeup); - i2c_set_clientdata(client, dt); - - return 0; - -err_free_irq: - free_irq(client->irq, dt); -err_free_input_dev: - input_free_device(dt->input); -err_free_mem: - kfree(dt); -err_free_gpio: - gpio_free(pdata->vout_gpio); -err_hw_shutdown: - if (pdata->hw_shutdown) - pdata->hw_shutdown(client); - return error; -} - -static int gp2a_remove(struct i2c_client *client) -{ - struct gp2a_data *dt = i2c_get_clientdata(client); - const struct gp2a_platform_data *pdata = dt->pdata; - - free_irq(client->irq, dt); - - input_unregister_device(dt->input); - kfree(dt); - - gpio_free(pdata->vout_gpio); - - if (pdata->hw_shutdown) - pdata->hw_shutdown(client); - - return 0; -} - -static int __maybe_unused gp2a_suspend(struct device *dev) -{ - struct i2c_client *client = to_i2c_client(dev); - struct gp2a_data *dt = i2c_get_clientdata(client); - int retval = 0; - - if (device_may_wakeup(&client->dev)) { - enable_irq_wake(client->irq); - } else { - mutex_lock(&dt->input->mutex); - if (dt->input->users) - retval = gp2a_disable(dt); - mutex_unlock(&dt->input->mutex); - } - - return retval; -} - -static int __maybe_unused gp2a_resume(struct device *dev) -{ - struct i2c_client *client = to_i2c_client(dev); - struct gp2a_data *dt = i2c_get_clientdata(client); - int retval = 0; - - if (device_may_wakeup(&client->dev)) { - disable_irq_wake(client->irq); - } else { - mutex_lock(&dt->input->mutex); - if (dt->input->users) - retval = gp2a_enable(dt); - mutex_unlock(&dt->input->mutex); - } - - return retval; -} - -static SIMPLE_DEV_PM_OPS(gp2a_pm, gp2a_suspend, gp2a_resume); - -static const struct i2c_device_id gp2a_i2c_id[] = { - { GP2A_I2C_NAME, 0 }, - { } -}; -MODULE_DEVICE_TABLE(i2c, gp2a_i2c_id); - -static struct i2c_driver gp2a_i2c_driver = { - .driver = { - .name = GP2A_I2C_NAME, - .pm = &gp2a_pm, - }, - .probe = gp2a_probe, - .remove = gp2a_remove, - .id_table = gp2a_i2c_id, -}; - -module_i2c_driver(gp2a_i2c_driver); - -MODULE_AUTHOR("Courtney Cavin "); -MODULE_DESCRIPTION("Sharp GP2AP002A00F I2C Proximity/Opto sensor driver"); -MODULE_LICENSE("GPL v2"); diff --git a/include/linux/input/gp2ap002a00f.h b/include/linux/input/gp2ap002a00f.h deleted file mode 100644 index 3614a13a82978..0000000000000 --- a/include/linux/input/gp2ap002a00f.h +++ /dev/null @@ -1,23 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef _GP2AP002A00F_H_ -#define _GP2AP002A00F_H_ - -#include - -#define GP2A_I2C_NAME "gp2ap002a00f" - -/** - * struct gp2a_platform_data - Sharp gp2ap002a00f proximity platform data - * @vout_gpio: The gpio connected to the object detected pin (VOUT) - * @wakeup: Set to true if the proximity can wake the device from suspend - * @hw_setup: Callback for setting up hardware such as gpios and vregs - * @hw_shutdown: Callback for properly shutting down hardware - */ -struct gp2a_platform_data { - int vout_gpio; - bool wakeup; - int (*hw_setup)(struct i2c_client *client); - int (*hw_shutdown)(struct i2c_client *client); -}; - -#endif -- GitLab From fdba377f962e50a2542e4d5709968c8d79f4815f Mon Sep 17 00:00:00 2001 From: Geert Uytterhoeven Date: Fri, 17 Apr 2020 16:11:16 +0200 Subject: [PATCH 0155/1971] MAINTAINERS: Add DT Bindings for Renesas Clock Generators The "RENESAS CLOCK DRIVERS" section lacks the related DT bindings. Add them. Signed-off-by: Geert Uytterhoeven Reviewed-by: Stephen Boyd Link: https://lore.kernel.org/r/20200417141116.23019-1-geert+renesas@glider.be --- MAINTAINERS | 1 + 1 file changed, 1 insertion(+) diff --git a/MAINTAINERS b/MAINTAINERS index e64e5db314976..1c4f7c60e9ecc 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -14334,6 +14334,7 @@ M: Geert Uytterhoeven L: linux-renesas-soc@vger.kernel.org S: Supported T: git git://git.kernel.org/pub/scm/linux/kernel/git/geert/renesas-drivers.git clk-renesas +F: Documentation/devicetree/bindings/clock/renesas,* F: drivers/clk/renesas/ RENESAS EMEV2 I2C DRIVER -- GitLab From bb15aded5144d36b2a050e1dad415876c0b94234 Mon Sep 17 00:00:00 2001 From: Sergei Shtylyov Date: Tue, 7 Apr 2020 23:56:43 +0300 Subject: [PATCH 0156/1971] mtd: spi-nor: move #define SPINOR_OP_WRDI The write disable (WRDI) opcode is not really specific to the SST flashes (anymore?) -- move the #define to the main opcode group, just before WREN. Signed-off-by: Sergei Shtylyov Signed-off-by: Tudor Ambarus --- include/linux/mtd/spi-nor.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/include/linux/mtd/spi-nor.h b/include/linux/mtd/spi-nor.h index 1e2af0ec1f03e..1cc8ed5d59edb 100644 --- a/include/linux/mtd/spi-nor.h +++ b/include/linux/mtd/spi-nor.h @@ -20,6 +20,7 @@ */ /* Flash opcodes. */ +#define SPINOR_OP_WRDI 0x04 /* Write disable */ #define SPINOR_OP_WREN 0x06 /* Write enable */ #define SPINOR_OP_RDSR 0x05 /* Read status register */ #define SPINOR_OP_WRSR 0x01 /* Write status register 1 byte */ @@ -80,7 +81,6 @@ /* Used for SST flashes only. */ #define SPINOR_OP_BP 0x02 /* Byte program */ -#define SPINOR_OP_WRDI 0x04 /* Write disable */ #define SPINOR_OP_AAI_WP 0xad /* Auto address increment word program */ /* Used for S3AN flashes only */ -- GitLab From 93920f61c2ad7edb01e63323832585796af75fc9 Mon Sep 17 00:00:00 2001 From: Mark Gross Date: Thu, 16 Apr 2020 17:32:42 +0200 Subject: [PATCH 0157/1971] x86/cpu: Add 'table' argument to cpu_matches() To make cpu_matches() reusable for other matching tables, have it take a pointer to a x86_cpu_id table as an argument. [ bp: Flip arguments order. ] Signed-off-by: Mark Gross Signed-off-by: Borislav Petkov Signed-off-by: Thomas Gleixner Reviewed-by: Josh Poimboeuf --- arch/x86/kernel/cpu/common.c | 25 ++++++++++++++----------- 1 file changed, 14 insertions(+), 11 deletions(-) diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index bed0cb83fe245..1131ae032bf28 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c @@ -1075,9 +1075,9 @@ static const __initconst struct x86_cpu_id cpu_vuln_whitelist[] = { {} }; -static bool __init cpu_matches(unsigned long which) +static bool __init cpu_matches(const struct x86_cpu_id *table, unsigned long which) { - const struct x86_cpu_id *m = x86_match_cpu(cpu_vuln_whitelist); + const struct x86_cpu_id *m = x86_match_cpu(table); return m && !!(m->driver_data & which); } @@ -1097,31 +1097,34 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c) u64 ia32_cap = x86_read_arch_cap_msr(); /* Set ITLB_MULTIHIT bug if cpu is not in the whitelist and not mitigated */ - if (!cpu_matches(NO_ITLB_MULTIHIT) && !(ia32_cap & ARCH_CAP_PSCHANGE_MC_NO)) + if (!cpu_matches(cpu_vuln_whitelist, NO_ITLB_MULTIHIT) && + !(ia32_cap & ARCH_CAP_PSCHANGE_MC_NO)) setup_force_cpu_bug(X86_BUG_ITLB_MULTIHIT); - if (cpu_matches(NO_SPECULATION)) + if (cpu_matches(cpu_vuln_whitelist, NO_SPECULATION)) return; setup_force_cpu_bug(X86_BUG_SPECTRE_V1); - if (!cpu_matches(NO_SPECTRE_V2)) + if (!cpu_matches(cpu_vuln_whitelist, NO_SPECTRE_V2)) setup_force_cpu_bug(X86_BUG_SPECTRE_V2); - if (!cpu_matches(NO_SSB) && !(ia32_cap & ARCH_CAP_SSB_NO) && + if (!cpu_matches(cpu_vuln_whitelist, NO_SSB) && + !(ia32_cap & ARCH_CAP_SSB_NO) && !cpu_has(c, X86_FEATURE_AMD_SSB_NO)) setup_force_cpu_bug(X86_BUG_SPEC_STORE_BYPASS); if (ia32_cap & ARCH_CAP_IBRS_ALL) setup_force_cpu_cap(X86_FEATURE_IBRS_ENHANCED); - if (!cpu_matches(NO_MDS) && !(ia32_cap & ARCH_CAP_MDS_NO)) { + if (!cpu_matches(cpu_vuln_whitelist, NO_MDS) && + !(ia32_cap & ARCH_CAP_MDS_NO)) { setup_force_cpu_bug(X86_BUG_MDS); - if (cpu_matches(MSBDS_ONLY)) + if (cpu_matches(cpu_vuln_whitelist, MSBDS_ONLY)) setup_force_cpu_bug(X86_BUG_MSBDS_ONLY); } - if (!cpu_matches(NO_SWAPGS)) + if (!cpu_matches(cpu_vuln_whitelist, NO_SWAPGS)) setup_force_cpu_bug(X86_BUG_SWAPGS); /* @@ -1139,7 +1142,7 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c) (ia32_cap & ARCH_CAP_TSX_CTRL_MSR))) setup_force_cpu_bug(X86_BUG_TAA); - if (cpu_matches(NO_MELTDOWN)) + if (cpu_matches(cpu_vuln_whitelist, NO_MELTDOWN)) return; /* Rogue Data Cache Load? No! */ @@ -1148,7 +1151,7 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c) setup_force_cpu_bug(X86_BUG_CPU_MELTDOWN); - if (cpu_matches(NO_L1TF)) + if (cpu_matches(cpu_vuln_whitelist, NO_L1TF)) return; setup_force_cpu_bug(X86_BUG_L1TF); -- GitLab From 7e5b3c267d256822407a22fdce6afdf9cd13f9fb Mon Sep 17 00:00:00 2001 From: Mark Gross Date: Thu, 16 Apr 2020 17:54:04 +0200 Subject: [PATCH 0158/1971] x86/speculation: Add Special Register Buffer Data Sampling (SRBDS) mitigation SRBDS is an MDS-like speculative side channel that can leak bits from the random number generator (RNG) across cores and threads. New microcode serializes the processor access during the execution of RDRAND and RDSEED. This ensures that the shared buffer is overwritten before it is released for reuse. While it is present on all affected CPU models, the microcode mitigation is not needed on models that enumerate ARCH_CAPABILITIES[MDS_NO] in the cases where TSX is not supported or has been disabled with TSX_CTRL. The mitigation is activated by default on affected processors and it increases latency for RDRAND and RDSEED instructions. Among other effects this will reduce throughput from /dev/urandom. * Enable administrator to configure the mitigation off when desired using either mitigations=off or srbds=off. * Export vulnerability status via sysfs * Rename file-scoped macros to apply for non-whitelist table initializations. [ bp: Massage, - s/VULNBL_INTEL_STEPPING/VULNBL_INTEL_STEPPINGS/g, - do not read arch cap MSR a second time in tsx_fused_off() - just pass it in, - flip check in cpu_set_bug_bits() to save an indentation level, - reflow comments. jpoimboe: s/Mitigated/Mitigation/ in user-visible strings tglx: Dropped the fused off magic for now ] Signed-off-by: Mark Gross Signed-off-by: Borislav Petkov Signed-off-by: Thomas Gleixner Reviewed-by: Tony Luck Reviewed-by: Pawan Gupta Reviewed-by: Josh Poimboeuf Tested-by: Neelima Krishnan --- .../ABI/testing/sysfs-devices-system-cpu | 1 + .../admin-guide/kernel-parameters.txt | 20 ++++ arch/x86/include/asm/cpufeatures.h | 2 + arch/x86/include/asm/msr-index.h | 4 + arch/x86/kernel/cpu/bugs.c | 106 ++++++++++++++++++ arch/x86/kernel/cpu/common.c | 31 +++++ arch/x86/kernel/cpu/cpu.h | 1 + drivers/base/cpu.c | 8 ++ 8 files changed, 173 insertions(+) diff --git a/Documentation/ABI/testing/sysfs-devices-system-cpu b/Documentation/ABI/testing/sysfs-devices-system-cpu index 2e0e3b45d02a7..b39531a3c5bc7 100644 --- a/Documentation/ABI/testing/sysfs-devices-system-cpu +++ b/Documentation/ABI/testing/sysfs-devices-system-cpu @@ -492,6 +492,7 @@ What: /sys/devices/system/cpu/vulnerabilities /sys/devices/system/cpu/vulnerabilities/spec_store_bypass /sys/devices/system/cpu/vulnerabilities/l1tf /sys/devices/system/cpu/vulnerabilities/mds + /sys/devices/system/cpu/vulnerabilities/srbds /sys/devices/system/cpu/vulnerabilities/tsx_async_abort /sys/devices/system/cpu/vulnerabilities/itlb_multihit Date: January 2018 diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt index f2a93c8679e88..f720463bd918e 100644 --- a/Documentation/admin-guide/kernel-parameters.txt +++ b/Documentation/admin-guide/kernel-parameters.txt @@ -4757,6 +4757,26 @@ the kernel will oops in either "warn" or "fatal" mode. + srbds= [X86,INTEL] + Control the Special Register Buffer Data Sampling + (SRBDS) mitigation. + + Certain CPUs are vulnerable to an MDS-like + exploit which can leak bits from the random + number generator. + + By default, this issue is mitigated by + microcode. However, the microcode fix can cause + the RDRAND and RDSEED instructions to become + much slower. Among other effects, this will + result in reduced throughput from /dev/urandom. + + The microcode mitigation can be disabled with + the following option: + + off: Disable mitigation and remove + performance impact to RDRAND and RDSEED + srcutree.counter_wrap_check [KNL] Specifies how frequently to check for grace-period sequence counter wrap for the diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h index db189945e9b0c..02dabc9e77b05 100644 --- a/arch/x86/include/asm/cpufeatures.h +++ b/arch/x86/include/asm/cpufeatures.h @@ -362,6 +362,7 @@ #define X86_FEATURE_AVX512_4FMAPS (18*32+ 3) /* AVX-512 Multiply Accumulation Single precision */ #define X86_FEATURE_FSRM (18*32+ 4) /* Fast Short Rep Mov */ #define X86_FEATURE_AVX512_VP2INTERSECT (18*32+ 8) /* AVX-512 Intersect for D/Q */ +#define X86_FEATURE_SRBDS_CTRL (18*32+ 9) /* "" SRBDS mitigation MSR available */ #define X86_FEATURE_MD_CLEAR (18*32+10) /* VERW clears CPU buffers */ #define X86_FEATURE_TSX_FORCE_ABORT (18*32+13) /* "" TSX_FORCE_ABORT */ #define X86_FEATURE_PCONFIG (18*32+18) /* Intel PCONFIG */ @@ -407,5 +408,6 @@ #define X86_BUG_SWAPGS X86_BUG(21) /* CPU is affected by speculation through SWAPGS */ #define X86_BUG_TAA X86_BUG(22) /* CPU is affected by TSX Async Abort(TAA) */ #define X86_BUG_ITLB_MULTIHIT X86_BUG(23) /* CPU may incur MCE during certain page attribute changes */ +#define X86_BUG_SRBDS X86_BUG(24) /* CPU may leak RNG bits if not mitigated */ #endif /* _ASM_X86_CPUFEATURES_H */ diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h index 12c9684d59ba6..3efde600a674c 100644 --- a/arch/x86/include/asm/msr-index.h +++ b/arch/x86/include/asm/msr-index.h @@ -128,6 +128,10 @@ #define TSX_CTRL_RTM_DISABLE BIT(0) /* Disable RTM feature */ #define TSX_CTRL_CPUID_CLEAR BIT(1) /* Disable TSX enumeration */ +/* SRBDS support */ +#define MSR_IA32_MCU_OPT_CTRL 0x00000123 +#define RNGDS_MITG_DIS BIT(0) + #define MSR_IA32_SYSENTER_CS 0x00000174 #define MSR_IA32_SYSENTER_ESP 0x00000175 #define MSR_IA32_SYSENTER_EIP 0x00000176 diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c index ed54b3b21c396..56978cb061493 100644 --- a/arch/x86/kernel/cpu/bugs.c +++ b/arch/x86/kernel/cpu/bugs.c @@ -41,6 +41,7 @@ static void __init l1tf_select_mitigation(void); static void __init mds_select_mitigation(void); static void __init mds_print_mitigation(void); static void __init taa_select_mitigation(void); +static void __init srbds_select_mitigation(void); /* The base value of the SPEC_CTRL MSR that always has to be preserved. */ u64 x86_spec_ctrl_base; @@ -108,6 +109,7 @@ void __init check_bugs(void) l1tf_select_mitigation(); mds_select_mitigation(); taa_select_mitigation(); + srbds_select_mitigation(); /* * As MDS and TAA mitigations are inter-related, print MDS @@ -397,6 +399,97 @@ static int __init tsx_async_abort_parse_cmdline(char *str) } early_param("tsx_async_abort", tsx_async_abort_parse_cmdline); +#undef pr_fmt +#define pr_fmt(fmt) "SRBDS: " fmt + +enum srbds_mitigations { + SRBDS_MITIGATION_OFF, + SRBDS_MITIGATION_UCODE_NEEDED, + SRBDS_MITIGATION_FULL, + SRBDS_MITIGATION_TSX_OFF, + SRBDS_MITIGATION_HYPERVISOR, +}; + +static enum srbds_mitigations srbds_mitigation __ro_after_init = SRBDS_MITIGATION_FULL; + +static const char * const srbds_strings[] = { + [SRBDS_MITIGATION_OFF] = "Vulnerable", + [SRBDS_MITIGATION_UCODE_NEEDED] = "Vulnerable: No microcode", + [SRBDS_MITIGATION_FULL] = "Mitigation: Microcode", + [SRBDS_MITIGATION_TSX_OFF] = "Mitigation: TSX disabled", + [SRBDS_MITIGATION_HYPERVISOR] = "Unknown: Dependent on hypervisor status", +}; + +static bool srbds_off; + +void update_srbds_msr(void) +{ + u64 mcu_ctrl; + + if (!boot_cpu_has_bug(X86_BUG_SRBDS)) + return; + + if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) + return; + + if (srbds_mitigation == SRBDS_MITIGATION_UCODE_NEEDED) + return; + + rdmsrl(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl); + + switch (srbds_mitigation) { + case SRBDS_MITIGATION_OFF: + case SRBDS_MITIGATION_TSX_OFF: + mcu_ctrl |= RNGDS_MITG_DIS; + break; + case SRBDS_MITIGATION_FULL: + mcu_ctrl &= ~RNGDS_MITG_DIS; + break; + default: + break; + } + + wrmsrl(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl); +} + +static void __init srbds_select_mitigation(void) +{ + u64 ia32_cap; + + if (!boot_cpu_has_bug(X86_BUG_SRBDS)) + return; + + /* + * Check to see if this is one of the MDS_NO systems supporting + * TSX that are only exposed to SRBDS when TSX is enabled. + */ + ia32_cap = x86_read_arch_cap_msr(); + if ((ia32_cap & ARCH_CAP_MDS_NO) && !boot_cpu_has(X86_FEATURE_RTM)) + srbds_mitigation = SRBDS_MITIGATION_TSX_OFF; + else if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) + srbds_mitigation = SRBDS_MITIGATION_HYPERVISOR; + else if (!boot_cpu_has(X86_FEATURE_SRBDS_CTRL)) + srbds_mitigation = SRBDS_MITIGATION_UCODE_NEEDED; + else if (cpu_mitigations_off() || srbds_off) + srbds_mitigation = SRBDS_MITIGATION_OFF; + + update_srbds_msr(); + pr_info("%s\n", srbds_strings[srbds_mitigation]); +} + +static int __init srbds_parse_cmdline(char *str) +{ + if (!str) + return -EINVAL; + + if (!boot_cpu_has_bug(X86_BUG_SRBDS)) + return 0; + + srbds_off = !strcmp(str, "off"); + return 0; +} +early_param("srbds", srbds_parse_cmdline); + #undef pr_fmt #define pr_fmt(fmt) "Spectre V1 : " fmt @@ -1528,6 +1621,11 @@ static char *ibpb_state(void) return ""; } +static ssize_t srbds_show_state(char *buf) +{ + return sprintf(buf, "%s\n", srbds_strings[srbds_mitigation]); +} + static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr, char *buf, unsigned int bug) { @@ -1572,6 +1670,9 @@ static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr case X86_BUG_ITLB_MULTIHIT: return itlb_multihit_show_state(buf); + case X86_BUG_SRBDS: + return srbds_show_state(buf); + default: break; } @@ -1618,4 +1719,9 @@ ssize_t cpu_show_itlb_multihit(struct device *dev, struct device_attribute *attr { return cpu_show_common(dev, attr, buf, X86_BUG_ITLB_MULTIHIT); } + +ssize_t cpu_show_srbds(struct device *dev, struct device_attribute *attr, char *buf) +{ + return cpu_show_common(dev, attr, buf, X86_BUG_SRBDS); +} #endif diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index 1131ae032bf28..8293ee5149758 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c @@ -1075,6 +1075,27 @@ static const __initconst struct x86_cpu_id cpu_vuln_whitelist[] = { {} }; +#define VULNBL_INTEL_STEPPINGS(model, steppings, issues) \ + X86_MATCH_VENDOR_FAM_MODEL_STEPPINGS_FEATURE(INTEL, 6, \ + INTEL_FAM6_##model, steppings, \ + X86_FEATURE_ANY, issues) + +#define SRBDS BIT(0) + +static const struct x86_cpu_id cpu_vuln_blacklist[] __initconst = { + VULNBL_INTEL_STEPPINGS(IVYBRIDGE, X86_STEPPING_ANY, SRBDS), + VULNBL_INTEL_STEPPINGS(HASWELL, X86_STEPPING_ANY, SRBDS), + VULNBL_INTEL_STEPPINGS(HASWELL_L, X86_STEPPING_ANY, SRBDS), + VULNBL_INTEL_STEPPINGS(HASWELL_G, X86_STEPPING_ANY, SRBDS), + VULNBL_INTEL_STEPPINGS(BROADWELL_G, X86_STEPPING_ANY, SRBDS), + VULNBL_INTEL_STEPPINGS(BROADWELL, X86_STEPPING_ANY, SRBDS), + VULNBL_INTEL_STEPPINGS(SKYLAKE_L, X86_STEPPING_ANY, SRBDS), + VULNBL_INTEL_STEPPINGS(SKYLAKE, X86_STEPPING_ANY, SRBDS), + VULNBL_INTEL_STEPPINGS(KABYLAKE_L, X86_STEPPINGS(0x0, 0xC), SRBDS), + VULNBL_INTEL_STEPPINGS(KABYLAKE, X86_STEPPINGS(0x0, 0xD), SRBDS), + {} +}; + static bool __init cpu_matches(const struct x86_cpu_id *table, unsigned long which) { const struct x86_cpu_id *m = x86_match_cpu(table); @@ -1142,6 +1163,15 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c) (ia32_cap & ARCH_CAP_TSX_CTRL_MSR))) setup_force_cpu_bug(X86_BUG_TAA); + /* + * SRBDS affects CPUs which support RDRAND or RDSEED and are listed + * in the vulnerability blacklist. + */ + if ((cpu_has(c, X86_FEATURE_RDRAND) || + cpu_has(c, X86_FEATURE_RDSEED)) && + cpu_matches(cpu_vuln_blacklist, SRBDS)) + setup_force_cpu_bug(X86_BUG_SRBDS); + if (cpu_matches(cpu_vuln_whitelist, NO_MELTDOWN)) return; @@ -1594,6 +1624,7 @@ void identify_secondary_cpu(struct cpuinfo_x86 *c) mtrr_ap_init(); validate_apic_and_package_id(c); x86_spec_ctrl_setup_ap(); + update_srbds_msr(); } static __init int setup_noclflush(char *arg) diff --git a/arch/x86/kernel/cpu/cpu.h b/arch/x86/kernel/cpu/cpu.h index 37fdefd14f287..fb538fccd24c0 100644 --- a/arch/x86/kernel/cpu/cpu.h +++ b/arch/x86/kernel/cpu/cpu.h @@ -77,6 +77,7 @@ extern void detect_ht(struct cpuinfo_x86 *c); unsigned int aperfmperf_get_khz(int cpu); extern void x86_spec_ctrl_setup_ap(void); +extern void update_srbds_msr(void); extern u64 x86_read_arch_cap_msr(void); diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c index 9a1c00fbbaefc..d2136ab9b14a0 100644 --- a/drivers/base/cpu.c +++ b/drivers/base/cpu.c @@ -562,6 +562,12 @@ ssize_t __weak cpu_show_itlb_multihit(struct device *dev, return sprintf(buf, "Not affected\n"); } +ssize_t __weak cpu_show_srbds(struct device *dev, + struct device_attribute *attr, char *buf) +{ + return sprintf(buf, "Not affected\n"); +} + static DEVICE_ATTR(meltdown, 0444, cpu_show_meltdown, NULL); static DEVICE_ATTR(spectre_v1, 0444, cpu_show_spectre_v1, NULL); static DEVICE_ATTR(spectre_v2, 0444, cpu_show_spectre_v2, NULL); @@ -570,6 +576,7 @@ static DEVICE_ATTR(l1tf, 0444, cpu_show_l1tf, NULL); static DEVICE_ATTR(mds, 0444, cpu_show_mds, NULL); static DEVICE_ATTR(tsx_async_abort, 0444, cpu_show_tsx_async_abort, NULL); static DEVICE_ATTR(itlb_multihit, 0444, cpu_show_itlb_multihit, NULL); +static DEVICE_ATTR(srbds, 0444, cpu_show_srbds, NULL); static struct attribute *cpu_root_vulnerabilities_attrs[] = { &dev_attr_meltdown.attr, @@ -580,6 +587,7 @@ static struct attribute *cpu_root_vulnerabilities_attrs[] = { &dev_attr_mds.attr, &dev_attr_tsx_async_abort.attr, &dev_attr_itlb_multihit.attr, + &dev_attr_srbds.attr, NULL }; -- GitLab From 7222a1b5b87417f22265c92deea76a6aecd0fb0f Mon Sep 17 00:00:00 2001 From: Mark Gross Date: Thu, 16 Apr 2020 18:21:51 +0200 Subject: [PATCH 0159/1971] x86/speculation: Add SRBDS vulnerability and mitigation documentation Add documentation for the SRBDS vulnerability and its mitigation. [ bp: Massage. jpoimboe: sysfs table strings. ] Signed-off-by: Mark Gross Signed-off-by: Borislav Petkov Reviewed-by: Tony Luck Reviewed-by: Josh Poimboeuf --- Documentation/admin-guide/hw-vuln/index.rst | 1 + .../special-register-buffer-data-sampling.rst | 148 ++++++++++++++++++ 2 files changed, 149 insertions(+) create mode 100644 Documentation/admin-guide/hw-vuln/special-register-buffer-data-sampling.rst diff --git a/Documentation/admin-guide/hw-vuln/index.rst b/Documentation/admin-guide/hw-vuln/index.rst index 0795e3c2643f2..ca4dbdd9016d5 100644 --- a/Documentation/admin-guide/hw-vuln/index.rst +++ b/Documentation/admin-guide/hw-vuln/index.rst @@ -14,3 +14,4 @@ are configurable at compile, boot or run time. mds tsx_async_abort multihit.rst + special-register-buffer-data-sampling.rst diff --git a/Documentation/admin-guide/hw-vuln/special-register-buffer-data-sampling.rst b/Documentation/admin-guide/hw-vuln/special-register-buffer-data-sampling.rst new file mode 100644 index 0000000000000..6a473da80b62a --- /dev/null +++ b/Documentation/admin-guide/hw-vuln/special-register-buffer-data-sampling.rst @@ -0,0 +1,148 @@ +.. SPDX-License-Identifier: GPL-2.0 + +SRBDS - Special Register Buffer Data Sampling +============================================= + +SRBDS is a hardware vulnerability that allows MDS :doc:`mds` techniques to +infer values returned from special register accesses. Special register +accesses are accesses to off core registers. According to Intel's evaluation, +the special register reads that have a security expectation of privacy are +RDRAND, RDSEED and SGX EGETKEY. + +When RDRAND, RDSEED and EGETKEY instructions are used, the data is moved +to the core through the special register mechanism that is susceptible +to MDS attacks. + +Affected processors +-------------------- +Core models (desktop, mobile, Xeon-E3) that implement RDRAND and/or RDSEED may +be affected. + +A processor is affected by SRBDS if its Family_Model and stepping is +in the following list, with the exception of the listed processors +exporting MDS_NO while Intel TSX is available yet not enabled. The +latter class of processors are only affected when Intel TSX is enabled +by software using TSX_CTRL_MSR otherwise they are not affected. + + ============= ============ ======== + common name Family_Model Stepping + ============= ============ ======== + Haswell 06_3CH All + Haswell_L 06_45H All + Haswell_G 06_46H All + + Broadwell_G 06_47H All + Broadwell 06_3DH All + + Skylake_L 06_4EH All + Skylake 06_5EH All + + Kabylake_L 06_8EH <=0xC + + Kabylake 06_9EH <=0xD + ============= ============ ======== + +Related CVEs +------------ + +The following CVE entry is related to this SRBDS issue: + + ============== ===== ===================================== + CVE-2020-0543 SRBDS Special Register Buffer Data Sampling + ============== ===== ===================================== + +Attack scenarios +---------------- +An unprivileged user can extract values returned from RDRAND and RDSEED +executed on another core or sibling thread using MDS techniques. + + +Mitigation mechanism +------------------- +Intel will release microcode updates that modify the RDRAND, RDSEED, and +EGETKEY instructions to overwrite secret special register data in the shared +staging buffer before the secret data can be accessed by another logical +processor. + +During execution of the RDRAND, RDSEED, or EGETKEY instructions, off-core +accesses from other logical processors will be delayed until the special +register read is complete and the secret data in the shared staging buffer is +overwritten. + +This has three effects on performance: + +#. RDRAND, RDSEED, or EGETKEY instructions have higher latency. + +#. Executing RDRAND at the same time on multiple logical processors will be + serialized, resulting in an overall reduction in the maximum RDRAND + bandwidth. + +#. Executing RDRAND, RDSEED or EGETKEY will delay memory accesses from other + logical processors that miss their core caches, with an impact similar to + legacy locked cache-line-split accesses. + +The microcode updates provide an opt-out mechanism (RNGDS_MITG_DIS) to disable +the mitigation for RDRAND and RDSEED instructions executed outside of Intel +Software Guard Extensions (Intel SGX) enclaves. On logical processors that +disable the mitigation using this opt-out mechanism, RDRAND and RDSEED do not +take longer to execute and do not impact performance of sibling logical +processors memory accesses. The opt-out mechanism does not affect Intel SGX +enclaves (including execution of RDRAND or RDSEED inside an enclave, as well +as EGETKEY execution). + +IA32_MCU_OPT_CTRL MSR Definition +-------------------------------- +Along with the mitigation for this issue, Intel added a new thread-scope +IA32_MCU_OPT_CTRL MSR, (address 0x123). The presence of this MSR and +RNGDS_MITG_DIS (bit 0) is enumerated by CPUID.(EAX=07H,ECX=0).EDX[SRBDS_CTRL = +9]==1. This MSR is introduced through the microcode update. + +Setting IA32_MCU_OPT_CTRL[0] (RNGDS_MITG_DIS) to 1 for a logical processor +disables the mitigation for RDRAND and RDSEED executed outside of an Intel SGX +enclave on that logical processor. Opting out of the mitigation for a +particular logical processor does not affect the RDRAND and RDSEED mitigations +for other logical processors. + +Note that inside of an Intel SGX enclave, the mitigation is applied regardless +of the value of RNGDS_MITG_DS. + +Mitigation control on the kernel command line +--------------------------------------------- +The kernel command line allows control over the SRBDS mitigation at boot time +with the option "srbds=". The option for this is: + + ============= ============================================================= + off This option disables SRBDS mitigation for RDRAND and RDSEED on + affected platforms. + ============= ============================================================= + +SRBDS System Information +----------------------- +The Linux kernel provides vulnerability status information through sysfs. For +SRBDS this can be accessed by the following sysfs file: +/sys/devices/system/cpu/vulnerabilities/srbds + +The possible values contained in this file are: + + ============================== ============================================= + Not affected Processor not vulnerable + Vulnerable Processor vulnerable and mitigation disabled + Vulnerable: No microcode Processor vulnerable and microcode is missing + mitigation + Mitigation: Microcode Processor is vulnerable and mitigation is in + effect. + Mitigation: TSX disabled Processor is only vulnerable when TSX is + enabled while this system was booted with TSX + disabled. + Unknown: Dependent on + hypervisor status Running on virtual guest processor that is + affected but with no way to know if host + processor is mitigated or vulnerable. + ============================== ============================================= + +SRBDS Default mitigation +------------------------ +This new microcode serializes processor access during execution of RDRAND, +RDSEED ensures that the shared buffer is overwritten before it is released for +reuse. Use the "srbds=off" kernel command line to disable the mitigation for +RDRAND and RDSEED. -- GitLab From 954fd81ce83b7077b8e7b7b0fbf3ebf19d4eaff9 Mon Sep 17 00:00:00 2001 From: Takahiro Kuwano Date: Mon, 20 Apr 2020 14:44:41 +0300 Subject: [PATCH 0160/1971] mtd: spi-nor: spansion: Enable dual and quad read for s25fl256s0 The s25fl256s0 supports dual and quad read like s25fl256s1. Enable it by adding SPI_NOR_DUAL_READ and SPI_NOR_QUAD_READ flags to the flash_info entry. Tested with the device and confirmed that is working. Signed-off-by: Takahiro Kuwano Signed-off-by: Tudor Ambarus --- drivers/mtd/spi-nor/spansion.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/drivers/mtd/spi-nor/spansion.c b/drivers/mtd/spi-nor/spansion.c index 6756202ace4b1..88183eba8ac11 100644 --- a/drivers/mtd/spi-nor/spansion.c +++ b/drivers/mtd/spi-nor/spansion.c @@ -22,7 +22,9 @@ static const struct flash_info spansion_parts[] = { { "s25fl128s1", INFO6(0x012018, 0x4d0180, 64 * 1024, 256, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | USE_CLSR) }, - { "s25fl256s0", INFO(0x010219, 0x4d00, 256 * 1024, 128, USE_CLSR) }, + { "s25fl256s0", INFO(0x010219, 0x4d00, 256 * 1024, 128, + SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | + USE_CLSR) }, { "s25fl256s1", INFO(0x010219, 0x4d01, 64 * 1024, 512, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | USE_CLSR) }, -- GitLab From 23aadcb9a8d59cd12fbf2cbe9be8479429bf0dae Mon Sep 17 00:00:00 2001 From: Anson Huang Date: Wed, 18 Mar 2020 09:39:25 +0800 Subject: [PATCH 0161/1971] clk: imx: clk-sscg-pll: Remove unnecessary blank lines Remove many unnecessary blank lines for cleanup. Signed-off-by: Anson Huang Reviewed-by: Abel Vesa Reviewed-by: Stephen Boyd Signed-off-by: Shawn Guo --- drivers/clk/imx/clk-sscg-pll.c | 10 ---------- 1 file changed, 10 deletions(-) diff --git a/drivers/clk/imx/clk-sscg-pll.c b/drivers/clk/imx/clk-sscg-pll.c index d4a2be16d1329..773d8a545cdfe 100644 --- a/drivers/clk/imx/clk-sscg-pll.c +++ b/drivers/clk/imx/clk-sscg-pll.c @@ -72,7 +72,6 @@ struct clk_sscg_pll_setup { int divr2, divf2; int divq; int bypass; - uint64_t vco1; uint64_t vco2; uint64_t fout; @@ -86,11 +85,8 @@ struct clk_sscg_pll_setup { struct clk_sscg_pll { struct clk_hw hw; const struct clk_ops ops; - void __iomem *base; - struct clk_sscg_pll_setup setup; - u8 parent; u8 bypass1; u8 bypass2; @@ -194,7 +190,6 @@ static int clk_sscg_pll2_find_setup(struct clk_sscg_pll_setup *setup, struct clk_sscg_pll_setup *temp_setup, uint64_t ref) { - int ret; if (ref < PLL_STAGE1_MIN_FREQ || ref > PLL_STAGE1_MAX_FREQ) @@ -253,7 +248,6 @@ static int clk_sscg_pll1_find_setup(struct clk_sscg_pll_setup *setup, struct clk_sscg_pll_setup *temp_setup, uint64_t ref) { - int ret; if (ref < PLL_REF_MIN_FREQ || ref > PLL_REF_MAX_FREQ) @@ -280,7 +274,6 @@ static int clk_sscg_pll_find_setup(struct clk_sscg_pll_setup *setup, temp_setup.fout_request = rate; switch (try_bypass) { - case PLL_BYPASS2: if (prate == rate) { setup->bypass = PLL_BYPASS2; @@ -288,11 +281,9 @@ static int clk_sscg_pll_find_setup(struct clk_sscg_pll_setup *setup, ret = 0; } break; - case PLL_BYPASS1: ret = clk_sscg_pll2_find_setup(setup, &temp_setup, prate); break; - case PLL_BYPASS_NONE: ret = clk_sscg_pll1_find_setup(setup, &temp_setup, prate); break; @@ -301,7 +292,6 @@ static int clk_sscg_pll_find_setup(struct clk_sscg_pll_setup *setup, return ret; } - static int clk_sscg_pll_is_prepared(struct clk_hw *hw) { struct clk_sscg_pll *pll = to_clk_sscg_pll(hw); -- GitLab From 9558b51ab739920aaf3c400d2df29e5ca4f19ec5 Mon Sep 17 00:00:00 2001 From: Anson Huang Date: Fri, 20 Mar 2020 07:44:03 +0800 Subject: [PATCH 0162/1971] clk: imx: clk-pllv3: Use readl_relaxed_poll_timeout() for PLL lock wait Use readl_relaxed_poll_timeout() for PLL lock wait which can simplify the code a lot. Signed-off-by: Anson Huang Reviewed-by: Abel Vesa Reviewed-by: Stephen Boyd Signed-off-by: Shawn Guo --- drivers/clk/imx/clk-pllv3.c | 16 +++++----------- 1 file changed, 5 insertions(+), 11 deletions(-) diff --git a/drivers/clk/imx/clk-pllv3.c b/drivers/clk/imx/clk-pllv3.c index df91a8244fb4d..a7db93030e025 100644 --- a/drivers/clk/imx/clk-pllv3.c +++ b/drivers/clk/imx/clk-pllv3.c @@ -7,6 +7,7 @@ #include #include #include +#include #include #include #include @@ -25,6 +26,8 @@ #define IMX7_ENET_PLL_POWER (0x1 << 5) #define IMX7_DDR_PLL_POWER (0x1 << 20) +#define PLL_LOCK_TIMEOUT 10000 + /** * struct clk_pllv3 - IMX PLL clock version 3 * @clk_hw: clock source @@ -53,23 +56,14 @@ struct clk_pllv3 { static int clk_pllv3_wait_lock(struct clk_pllv3 *pll) { - unsigned long timeout = jiffies + msecs_to_jiffies(10); u32 val = readl_relaxed(pll->base) & pll->power_bit; /* No need to wait for lock when pll is not powered up */ if ((pll->powerup_set && !val) || (!pll->powerup_set && val)) return 0; - /* Wait for PLL to lock */ - do { - if (readl_relaxed(pll->base) & BM_PLL_LOCK) - break; - if (time_after(jiffies, timeout)) - break; - usleep_range(50, 500); - } while (1); - - return readl_relaxed(pll->base) & BM_PLL_LOCK ? 0 : -ETIMEDOUT; + return readl_relaxed_poll_timeout(pll->base, val, val & BM_PLL_LOCK, + 500, PLL_LOCK_TIMEOUT); } static int clk_pllv3_prepare(struct clk_hw *hw) -- GitLab From bb737bbe48bea9854455cb61ea1dc06e92ce586c Mon Sep 17 00:00:00 2001 From: Vivek Goyal Date: Mon, 20 Apr 2020 17:01:34 +0200 Subject: [PATCH 0163/1971] virtiofs: schedule blocking async replies in separate worker In virtiofs (unlike in regular fuse) processing of async replies is serialized. This can result in a deadlock in rare corner cases when there's a circular dependency between the completion of two or more async replies. Such a deadlock can be reproduced with xfstests:generic/503 if TEST_DIR == SCRATCH_MNT (which is a misconfiguration): - Process A is waiting for page lock in worker thread context and blocked (virtio_fs_requests_done_work()). - Process B is holding page lock and waiting for pending writes to finish (fuse_wait_on_page_writeback()). - Write requests are waiting in virtqueue and can't complete because worker thread is blocked on page lock (process A). Fix this by creating a unique work_struct for each async reply that can block (O_DIRECT read). Fixes: a62a8ef9d97d ("virtio-fs: add virtiofs filesystem") Signed-off-by: Vivek Goyal Signed-off-by: Miklos Szeredi --- fs/fuse/file.c | 1 + fs/fuse/fuse_i.h | 1 + fs/fuse/virtio_fs.c | 106 +++++++++++++++++++++++++++++--------------- 3 files changed, 73 insertions(+), 35 deletions(-) diff --git a/fs/fuse/file.c b/fs/fuse/file.c index 9d67b830fb7a2..d400b71b98d55 100644 --- a/fs/fuse/file.c +++ b/fs/fuse/file.c @@ -712,6 +712,7 @@ static ssize_t fuse_async_req_send(struct fuse_conn *fc, spin_unlock(&io->lock); ia->ap.args.end = fuse_aio_complete_req; + ia->ap.args.may_block = io->should_dirty; err = fuse_simple_background(fc, &ia->ap.args, GFP_KERNEL); if (err) fuse_aio_complete_req(fc, &ia->ap.args, err); diff --git a/fs/fuse/fuse_i.h b/fs/fuse/fuse_i.h index ca344bf714045..d7cde216fc871 100644 --- a/fs/fuse/fuse_i.h +++ b/fs/fuse/fuse_i.h @@ -249,6 +249,7 @@ struct fuse_args { bool out_argvar:1; bool page_zeroing:1; bool page_replace:1; + bool may_block:1; struct fuse_in_arg in_args[3]; struct fuse_arg out_args[2]; void (*end)(struct fuse_conn *fc, struct fuse_args *args, int error); diff --git a/fs/fuse/virtio_fs.c b/fs/fuse/virtio_fs.c index bade747689033..0c6ef5d3c6ab8 100644 --- a/fs/fuse/virtio_fs.c +++ b/fs/fuse/virtio_fs.c @@ -60,6 +60,12 @@ struct virtio_fs_forget { struct virtio_fs_forget_req req; }; +struct virtio_fs_req_work { + struct fuse_req *req; + struct virtio_fs_vq *fsvq; + struct work_struct done_work; +}; + static int virtio_fs_enqueue_req(struct virtio_fs_vq *fsvq, struct fuse_req *req, bool in_flight); @@ -485,19 +491,67 @@ static void copy_args_from_argbuf(struct fuse_args *args, struct fuse_req *req) } /* Work function for request completion */ +static void virtio_fs_request_complete(struct fuse_req *req, + struct virtio_fs_vq *fsvq) +{ + struct fuse_pqueue *fpq = &fsvq->fud->pq; + struct fuse_conn *fc = fsvq->fud->fc; + struct fuse_args *args; + struct fuse_args_pages *ap; + unsigned int len, i, thislen; + struct page *page; + + /* + * TODO verify that server properly follows FUSE protocol + * (oh.uniq, oh.len) + */ + args = req->args; + copy_args_from_argbuf(args, req); + + if (args->out_pages && args->page_zeroing) { + len = args->out_args[args->out_numargs - 1].size; + ap = container_of(args, typeof(*ap), args); + for (i = 0; i < ap->num_pages; i++) { + thislen = ap->descs[i].length; + if (len < thislen) { + WARN_ON(ap->descs[i].offset); + page = ap->pages[i]; + zero_user_segment(page, len, thislen); + len = 0; + } else { + len -= thislen; + } + } + } + + spin_lock(&fpq->lock); + clear_bit(FR_SENT, &req->flags); + spin_unlock(&fpq->lock); + + fuse_request_end(fc, req); + spin_lock(&fsvq->lock); + dec_in_flight_req(fsvq); + spin_unlock(&fsvq->lock); +} + +static void virtio_fs_complete_req_work(struct work_struct *work) +{ + struct virtio_fs_req_work *w = + container_of(work, typeof(*w), done_work); + + virtio_fs_request_complete(w->req, w->fsvq); + kfree(w); +} + static void virtio_fs_requests_done_work(struct work_struct *work) { struct virtio_fs_vq *fsvq = container_of(work, struct virtio_fs_vq, done_work); struct fuse_pqueue *fpq = &fsvq->fud->pq; - struct fuse_conn *fc = fsvq->fud->fc; struct virtqueue *vq = fsvq->vq; struct fuse_req *req; - struct fuse_args_pages *ap; struct fuse_req *next; - struct fuse_args *args; - unsigned int len, i, thislen; - struct page *page; + unsigned int len; LIST_HEAD(reqs); /* Collect completed requests off the virtqueue */ @@ -515,38 +569,20 @@ static void virtio_fs_requests_done_work(struct work_struct *work) /* End requests */ list_for_each_entry_safe(req, next, &reqs, list) { - /* - * TODO verify that server properly follows FUSE protocol - * (oh.uniq, oh.len) - */ - args = req->args; - copy_args_from_argbuf(args, req); - - if (args->out_pages && args->page_zeroing) { - len = args->out_args[args->out_numargs - 1].size; - ap = container_of(args, typeof(*ap), args); - for (i = 0; i < ap->num_pages; i++) { - thislen = ap->descs[i].length; - if (len < thislen) { - WARN_ON(ap->descs[i].offset); - page = ap->pages[i]; - zero_user_segment(page, len, thislen); - len = 0; - } else { - len -= thislen; - } - } - } - - spin_lock(&fpq->lock); - clear_bit(FR_SENT, &req->flags); list_del_init(&req->list); - spin_unlock(&fpq->lock); - fuse_request_end(fc, req); - spin_lock(&fsvq->lock); - dec_in_flight_req(fsvq); - spin_unlock(&fsvq->lock); + /* blocking async request completes in a worker context */ + if (req->args->may_block) { + struct virtio_fs_req_work *w; + + w = kzalloc(sizeof(*w), GFP_NOFS | __GFP_NOFAIL); + INIT_WORK(&w->done_work, virtio_fs_complete_req_work); + w->fsvq = fsvq; + w->req = req; + schedule_work(&w->done_work); + } else { + virtio_fs_request_complete(req, fsvq); + } } } -- GitLab From a5d8422cc9598b11054faf34772d9ce68ae204fd Mon Sep 17 00:00:00 2001 From: Masayoshi Mizuma Date: Tue, 17 Dec 2019 15:55:20 -0500 Subject: [PATCH 0164/1971] virtiofs: Add mount option and atime behavior to the doc Add a section to show the mount option and a subsection to show the atime behavior. Signed-off-by: Masayoshi Mizuma Signed-off-by: Miklos Szeredi --- Documentation/filesystems/virtiofs.rst | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/Documentation/filesystems/virtiofs.rst b/Documentation/filesystems/virtiofs.rst index e06e4951cb395..fd4d2484e9497 100644 --- a/Documentation/filesystems/virtiofs.rst +++ b/Documentation/filesystems/virtiofs.rst @@ -39,6 +39,20 @@ Mount file system with tag ``myfs`` on ``/mnt``: Please see https://virtio-fs.gitlab.io/ for details on how to configure QEMU and the virtiofsd daemon. +Mount options +------------- + +virtiofs supports general VFS mount options, for example, remount, +ro, rw, context, etc. It also supports FUSE mount options. + +atime behavior +^^^^^^^^^^^^^^ + +The atime-related mount options, for example, noatime, strictatime, +are ignored. The atime behavior for virtiofs is the same as the +underlying filesystem of the directory that has been exported +on the host. + Internals ========= Since the virtio-fs device uses the FUSE protocol for file system requests, the -- GitLab From 0e9fb6f17ad5b386b75451328975a07d7d953c6d Mon Sep 17 00:00:00 2001 From: Vasily Averin Date: Mon, 19 Aug 2019 09:53:50 +0300 Subject: [PATCH 0165/1971] fuse: BUG_ON correction in fuse_dev_splice_write() commit 963545357202 ("fuse: reduce allocation size for splice_write") changed size of bufs array, so BUG_ON which checks the index of the array shold also be fixed. [SzM: turn BUG_ON into WARN_ON] Fixes: 963545357202 ("fuse: reduce allocation size for splice_write") Signed-off-by: Vasily Averin Signed-off-by: Miklos Szeredi --- fs/fuse/dev.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c index 97eec7522bf20..5c155437a455d 100644 --- a/fs/fuse/dev.c +++ b/fs/fuse/dev.c @@ -1977,8 +1977,9 @@ static ssize_t fuse_dev_splice_write(struct pipe_inode_info *pipe, struct pipe_buffer *ibuf; struct pipe_buffer *obuf; - BUG_ON(nbuf >= pipe->ring_size); - BUG_ON(tail == head); + if (WARN_ON(nbuf >= count || tail == head)) + goto out_free; + ibuf = &pipe->bufs[tail & mask]; obuf = &bufs[nbuf]; -- GitLab From 75d892588e959573b321895003462d88cae2cff3 Mon Sep 17 00:00:00 2001 From: Kirill Tkhai Date: Fri, 28 Feb 2020 15:15:24 +0300 Subject: [PATCH 0166/1971] fuse: Update stale comment in queue_interrupt() Fixes: 04ec5af0776e "fuse: export fuse_end_request()" Signed-off-by: Kirill Tkhai Signed-off-by: Miklos Szeredi --- fs/fuse/dev.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c index 5c155437a455d..6bda7d498f1ae 100644 --- a/fs/fuse/dev.c +++ b/fs/fuse/dev.c @@ -342,7 +342,7 @@ static int queue_interrupt(struct fuse_iqueue *fiq, struct fuse_req *req) list_add_tail(&req->intr_entry, &fiq->interrupts); /* * Pairs with smp_mb() implied by test_and_set_bit() - * from request_end(). + * from fuse_request_end(). */ smp_mb(); if (test_bit(FR_FINISHED, &req->flags)) { -- GitLab From 150ccc181588b018891dc973f47905d574677b21 Mon Sep 17 00:00:00 2001 From: Xiang Chen Date: Fri, 17 Apr 2020 14:07:57 +0800 Subject: [PATCH 0167/1971] mtd: spi-nor: Enable locking for n25q128a11 As 4bit block protection patchset for some micron models are merged, n25q128a11 also uses 4 bit Block Protection scheme, so enable locking for it. Tested it on n25q128a11, the locking functions work well. Signed-off-by: Xiang Chen Reviewed-by: Jungseung Lee Tested-by: Shreyas Joshi Signed-off-by: Tudor Ambarus --- drivers/mtd/spi-nor/micron-st.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/drivers/mtd/spi-nor/micron-st.c b/drivers/mtd/spi-nor/micron-st.c index 6c034b9718e2c..02c0b53f60971 100644 --- a/drivers/mtd/spi-nor/micron-st.c +++ b/drivers/mtd/spi-nor/micron-st.c @@ -29,7 +29,9 @@ static const struct flash_info st_parts[] = { { "n25q064a", INFO(0x20bb17, 0, 64 * 1024, 128, SECT_4K | SPI_NOR_QUAD_READ) }, { "n25q128a11", INFO(0x20bb18, 0, 64 * 1024, 256, - SECT_4K | USE_FSR | SPI_NOR_QUAD_READ) }, + SECT_4K | USE_FSR | SPI_NOR_QUAD_READ | + SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB | + SPI_NOR_4BIT_BP | SPI_NOR_BP3_SR_BIT6) }, { "n25q128a13", INFO(0x20ba18, 0, 64 * 1024, 256, SECT_4K | USE_FSR | SPI_NOR_QUAD_READ) }, { "mt25ql256a", INFO6(0x20ba19, 0x104400, 64 * 1024, 512, -- GitLab From f47ab3c2f5338828a67e89d5f688d2cef9605245 Mon Sep 17 00:00:00 2001 From: Bryan O'Donoghue Date: Sun, 29 Mar 2020 13:41:16 +0100 Subject: [PATCH 0168/1971] clk: qcom: msm8916: Fix the address location of pll->config_reg During the process of debugging a processor derived from the msm8916 which we found the new processor was not starting one of its PLLs. After tracing the addresses and writes that downstream was doing and comparing to upstream it became obvious that we were writing to a different register location than downstream when trying to configure the PLL. This error is also present in upstream msm8916. As an example clk-pll.c::clk_pll_recalc_rate wants to write to pll->config_reg updating the bit-field POST_DIV_RATIO. That bit-field is defined in PLL_USER_CTL not in PLL_CONFIG_CTL. Taking the BIMC PLL as an example lm80-p0436-13_c_qc_snapdragon_410_processor_hrd.pdf 0x01823010 GCC_BIMC_PLL_USER_CTL 0x01823014 GCC_BIMC_PLL_CONFIG_CTL This pattern is repeated for gpll0, gpll1, gpll2 and bimc_pll. This error is likely not apparent since the bootloader will already have initialized these PLLs. This patch corrects the location of config_reg from PLL_CONFIG_CTL to PLL_USER_CTL for all relevant PLLs on msm8916. Fixes commit 3966fab8b6ab ("clk: qcom: Add MSM8916 Global Clock Controller support") Cc: Georgi Djakov Cc: Andy Gross Cc: Bjorn Andersson Cc: Michael Turquette Cc: Stephen Boyd Signed-off-by: Bryan O'Donoghue Link: https://lkml.kernel.org/r/20200329124116.4185447-1-bryan.odonoghue@linaro.org Signed-off-by: Stephen Boyd --- drivers/clk/qcom/gcc-msm8916.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/clk/qcom/gcc-msm8916.c b/drivers/clk/qcom/gcc-msm8916.c index 4e329a7baf2ba..17e4a5a2a9fde 100644 --- a/drivers/clk/qcom/gcc-msm8916.c +++ b/drivers/clk/qcom/gcc-msm8916.c @@ -260,7 +260,7 @@ static struct clk_pll gpll0 = { .l_reg = 0x21004, .m_reg = 0x21008, .n_reg = 0x2100c, - .config_reg = 0x21014, + .config_reg = 0x21010, .mode_reg = 0x21000, .status_reg = 0x2101c, .status_bit = 17, @@ -287,7 +287,7 @@ static struct clk_pll gpll1 = { .l_reg = 0x20004, .m_reg = 0x20008, .n_reg = 0x2000c, - .config_reg = 0x20014, + .config_reg = 0x20010, .mode_reg = 0x20000, .status_reg = 0x2001c, .status_bit = 17, @@ -314,7 +314,7 @@ static struct clk_pll gpll2 = { .l_reg = 0x4a004, .m_reg = 0x4a008, .n_reg = 0x4a00c, - .config_reg = 0x4a014, + .config_reg = 0x4a010, .mode_reg = 0x4a000, .status_reg = 0x4a01c, .status_bit = 17, @@ -341,7 +341,7 @@ static struct clk_pll bimc_pll = { .l_reg = 0x23004, .m_reg = 0x23008, .n_reg = 0x2300c, - .config_reg = 0x23014, + .config_reg = 0x23010, .mode_reg = 0x23000, .status_reg = 0x2301c, .status_bit = 17, -- GitLab From f37b451f45a2fb0704b5d152563c921639f98eb8 Mon Sep 17 00:00:00 2001 From: Andy Shevchenko Date: Wed, 22 Apr 2020 16:05:39 +0300 Subject: [PATCH 0169/1971] libnvdimm: Replace guid_copy() with import_guid() where it makes sense There is a specific API to treat raw data as GUID, i.e. import_guid(). Use it instead of guid_copy() with explicit casting. Signed-off-by: Andy Shevchenko Reviewed-by: Ira Weiny Link: https://lore.kernel.org/r/20200422130539.45636-1-andriy.shevchenko@linux.intel.com Signed-off-by: Dan Williams --- drivers/acpi/nfit/core.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/acpi/nfit/core.c b/drivers/acpi/nfit/core.c index fa4500f9cfd13..7c138a4edc03e 100644 --- a/drivers/acpi/nfit/core.c +++ b/drivers/acpi/nfit/core.c @@ -2293,7 +2293,7 @@ static int acpi_nfit_init_interleave_set(struct acpi_nfit_desc *acpi_desc, nd_set = devm_kzalloc(dev, sizeof(*nd_set), GFP_KERNEL); if (!nd_set) return -ENOMEM; - guid_copy(&nd_set->type_guid, (guid_t *) spa->range_guid); + import_guid(&nd_set->type_guid, spa->range_guid); info = devm_kzalloc(dev, sizeof_nfit_set_info(nr), GFP_KERNEL); if (!info) -- GitLab From 0d8173f297dfedf1c11c7b6f9b1ec512c06d59a7 Mon Sep 17 00:00:00 2001 From: Lubomir Rintel Date: Sun, 19 Apr 2020 18:49:07 +0200 Subject: [PATCH 0170/1971] dmaengine: mmp_tdma: Drop "mmp_tdma: from error messages Drop a redundant "mmp_tdma:" from some error messages. The dev_err() appends mostly the same thing for us: [ 120.756530] mmp-tdma d42a0800.adma: mmp_tdma: unknown burst size. Signed-off-by: Lubomir Rintel Link: https://lore.kernel.org/r/20200419164912.670973-3-lkundrak@v3.sk Signed-off-by: Vinod Koul --- drivers/dma/mmp_tdma.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/dma/mmp_tdma.c b/drivers/dma/mmp_tdma.c index 10117f271b12b..fa00665efd9d8 100644 --- a/drivers/dma/mmp_tdma.c +++ b/drivers/dma/mmp_tdma.c @@ -235,7 +235,7 @@ static int mmp_tdma_config_chan(struct dma_chan *chan) tdcr |= TDCR_BURSTSZ_128B; break; default: - dev_err(tdmac->dev, "mmp_tdma: unknown burst size.\n"); + dev_err(tdmac->dev, "unknown burst size.\n"); return -EINVAL; } @@ -250,7 +250,7 @@ static int mmp_tdma_config_chan(struct dma_chan *chan) tdcr |= TDCR_SSZ_32_BITS; break; default: - dev_err(tdmac->dev, "mmp_tdma: unknown bus size.\n"); + dev_err(tdmac->dev, "unknown bus size.\n"); return -EINVAL; } } else if (tdmac->type == PXA910_SQU) { @@ -276,7 +276,7 @@ static int mmp_tdma_config_chan(struct dma_chan *chan) tdcr |= TDCR_BURSTSZ_SQU_32B; break; default: - dev_err(tdmac->dev, "mmp_tdma: unknown burst size.\n"); + dev_err(tdmac->dev, "unknown burst size.\n"); return -EINVAL; } } -- GitLab From 4719d4b71562182dcb86401898b0ee205ea28ee1 Mon Sep 17 00:00:00 2001 From: Lubomir Rintel Date: Sun, 19 Apr 2020 18:49:10 +0200 Subject: [PATCH 0171/1971] dmaengine: mmp_tdma: Log an error if channel is in wrong state Let's log an error if the channel can't be prepared because it is in an unexpected state. Signed-off-by: Lubomir Rintel Link: https://lore.kernel.org/r/20200419164912.670973-6-lkundrak@v3.sk Signed-off-by: Vinod Koul --- drivers/dma/mmp_tdma.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/drivers/dma/mmp_tdma.c b/drivers/dma/mmp_tdma.c index fa00665efd9d8..1597f6ebf3357 100644 --- a/drivers/dma/mmp_tdma.c +++ b/drivers/dma/mmp_tdma.c @@ -427,8 +427,10 @@ static struct dma_async_tx_descriptor *mmp_tdma_prep_dma_cyclic( int num_periods = buf_len / period_len; int i = 0, buf = 0; - if (tdmac->status != DMA_COMPLETE) + if (tdmac->status != DMA_COMPLETE) { + dev_err(tdmac->dev, "controller busy"); return NULL; + } if (period_len > TDMA_MAX_XFER_BYTES) { dev_err(tdmac->dev, -- GitLab From baed6b34ceea87bc6e88efd84a92c44559f2dc6c Mon Sep 17 00:00:00 2001 From: Lubomir Rintel Date: Sun, 19 Apr 2020 18:49:11 +0200 Subject: [PATCH 0172/1971] dmaengine: mmp_tdma: Fill in slave capabilities This makes dma_get_slave_caps() work with the device so that it could actually be used with soc-generic-dmaengine-pcm. Signed-off-by: Lubomir Rintel Link: https://lore.kernel.org/r/20200419164912.670973-7-lkundrak@v3.sk Signed-off-by: Vinod Koul --- drivers/dma/mmp_tdma.c | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/drivers/dma/mmp_tdma.c b/drivers/dma/mmp_tdma.c index 1597f6ebf3357..2e318558c6446 100644 --- a/drivers/dma/mmp_tdma.c +++ b/drivers/dma/mmp_tdma.c @@ -703,6 +703,17 @@ static int mmp_tdma_probe(struct platform_device *pdev) tdev->device.device_terminate_all = mmp_tdma_terminate_all; tdev->device.copy_align = DMAENGINE_ALIGN_8_BYTES; + tdev->device.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV); + if (type == MMP_AUD_TDMA) { + tdev->device.max_burst = SZ_128; + tdev->device.src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES); + tdev->device.dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES); + } else if (type == PXA910_SQU) { + tdev->device.max_burst = SZ_32; + } + tdev->device.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST; + tdev->device.descriptor_reuse = true; + dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)); platform_set_drvdata(pdev, tdev); -- GitLab From a6e26648e6e2b1d4024e7f827bb63dcc9880bb36 Mon Sep 17 00:00:00 2001 From: Lubomir Rintel Date: Sun, 19 Apr 2020 18:49:12 +0200 Subject: [PATCH 0173/1971] dmaengine: mmp_tdma: Remove the MMP_SRAM dependency A generic SRAM will driver for Device Tree enabled platforms will do as well. The non-DT drivers that use mmp_tdma to transfer audio samples to and from the audio SRAM should depend on MMP_SRAM themselves. Signed-off-by: Lubomir Rintel Link: https://lore.kernel.org/r/20200419164912.670973-8-lkundrak@v3.sk Signed-off-by: Vinod Koul --- drivers/dma/Kconfig | 2 -- 1 file changed, 2 deletions(-) diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig index c35c0e03b40f0..037ada9cc0563 100644 --- a/drivers/dma/Kconfig +++ b/drivers/dma/Kconfig @@ -394,12 +394,10 @@ config MMP_TDMA bool "MMP Two-Channel DMA support" depends on ARCH_MMP || COMPILE_TEST select DMA_ENGINE - select MMP_SRAM if ARCH_MMP select GENERIC_ALLOCATOR help Support the MMP Two-Channel DMA engine. This engine used for MMP Audio DMA and pxa910 SQU. - It needs sram driver under mach-mmp. config MOXART_DMA tristate "MOXART DMA support" -- GitLab From 5a87c506ed7690091327be752a858634b88f6034 Mon Sep 17 00:00:00 2001 From: Leonid Ravich Date: Thu, 23 Apr 2020 00:09:16 +0300 Subject: [PATCH 0174/1971] dmaengine: ioat: removing duplicate code from timeout handler moving duplicate code from timeout error handling to common function. Acked-by: Dave Jiang Signed-off-by: Leonid Ravich Link: https://lore.kernel.org/r/1587589761-32690-1-git-send-email-leonid.ravich@dell.com Signed-off-by: Vinod Koul --- drivers/dma/ioat/dma.c | 45 ++++++++++++++++++------------------------ 1 file changed, 19 insertions(+), 26 deletions(-) diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c index 1e0e6c1d55336..da59b2848252b 100644 --- a/drivers/dma/ioat/dma.c +++ b/drivers/dma/ioat/dma.c @@ -869,6 +869,23 @@ static void check_active(struct ioatdma_chan *ioat_chan) mod_timer(&ioat_chan->timer, jiffies + IDLE_TIMEOUT); } +static void ioat_reboot_chan(struct ioatdma_chan *ioat_chan) +{ + spin_lock_bh(&ioat_chan->prep_lock); + set_bit(IOAT_CHAN_DOWN, &ioat_chan->state); + spin_unlock_bh(&ioat_chan->prep_lock); + + ioat_abort_descs(ioat_chan); + dev_warn(to_dev(ioat_chan), "Reset channel...\n"); + ioat_reset_hw(ioat_chan); + dev_warn(to_dev(ioat_chan), "Restart channel...\n"); + ioat_restart_channel(ioat_chan); + + spin_lock_bh(&ioat_chan->prep_lock); + clear_bit(IOAT_CHAN_DOWN, &ioat_chan->state); + spin_unlock_bh(&ioat_chan->prep_lock); +} + void ioat_timer_event(struct timer_list *t) { struct ioatdma_chan *ioat_chan = from_timer(ioat_chan, t, timer); @@ -891,19 +908,7 @@ void ioat_timer_event(struct timer_list *t) if (test_bit(IOAT_RUN, &ioat_chan->state)) { spin_lock_bh(&ioat_chan->cleanup_lock); - spin_lock_bh(&ioat_chan->prep_lock); - set_bit(IOAT_CHAN_DOWN, &ioat_chan->state); - spin_unlock_bh(&ioat_chan->prep_lock); - - ioat_abort_descs(ioat_chan); - dev_warn(to_dev(ioat_chan), "Reset channel...\n"); - ioat_reset_hw(ioat_chan); - dev_warn(to_dev(ioat_chan), "Restart channel...\n"); - ioat_restart_channel(ioat_chan); - - spin_lock_bh(&ioat_chan->prep_lock); - clear_bit(IOAT_CHAN_DOWN, &ioat_chan->state); - spin_unlock_bh(&ioat_chan->prep_lock); + ioat_reboot_chan(ioat_chan); spin_unlock_bh(&ioat_chan->cleanup_lock); } @@ -939,19 +944,7 @@ void ioat_timer_event(struct timer_list *t) dev_dbg(to_dev(ioat_chan), "Active descriptors: %d\n", ioat_ring_active(ioat_chan)); - spin_lock_bh(&ioat_chan->prep_lock); - set_bit(IOAT_CHAN_DOWN, &ioat_chan->state); - spin_unlock_bh(&ioat_chan->prep_lock); - - ioat_abort_descs(ioat_chan); - dev_warn(to_dev(ioat_chan), "Resetting channel...\n"); - ioat_reset_hw(ioat_chan); - dev_warn(to_dev(ioat_chan), "Restarting channel...\n"); - ioat_restart_channel(ioat_chan); - - spin_lock_bh(&ioat_chan->prep_lock); - clear_bit(IOAT_CHAN_DOWN, &ioat_chan->state); - spin_unlock_bh(&ioat_chan->prep_lock); + ioat_reboot_chan(ioat_chan); spin_unlock_bh(&ioat_chan->cleanup_lock); return; } else -- GitLab From 2baedcb6a637ba6a5d626a58cf37aecd6db16ef0 Mon Sep 17 00:00:00 2001 From: Leonid Ravich Date: Thu, 23 Apr 2020 00:09:17 +0300 Subject: [PATCH 0175/1971] dmaengine: ioat: remove unnesesery double complition timer modification. removing unnecessary mod_timer from timeout handler incase of ioat_cleanup_preamble() is true for cleaner code Acked-by: Dave Jiang Signed-off-by: Leonid Ravich Link: https://lore.kernel.org/r/1587589761-32690-2-git-send-email-leonid.ravich@dell.com Signed-off-by: Vinod Koul --- drivers/dma/ioat/dma.c | 25 ++++++++++++++++--------- 1 file changed, 16 insertions(+), 9 deletions(-) diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c index da59b2848252b..55a8cf181816a 100644 --- a/drivers/dma/ioat/dma.c +++ b/drivers/dma/ioat/dma.c @@ -922,17 +922,23 @@ void ioat_timer_event(struct timer_list *t) spin_lock_bh(&ioat_chan->prep_lock); check_active(ioat_chan); spin_unlock_bh(&ioat_chan->prep_lock); - spin_unlock_bh(&ioat_chan->cleanup_lock); - return; + goto unlock_out; + } + + /* handle the missed cleanup case */ + if (ioat_cleanup_preamble(ioat_chan, &phys_complete)) { + /* timer restarted in ioat_cleanup_preamble + * and IOAT_COMPLETION_ACK cleared + */ + __cleanup(ioat_chan, phys_complete); + goto unlock_out; } /* if we haven't made progress and we have already * acknowledged a pending completion once, then be more * forceful with a restart */ - if (ioat_cleanup_preamble(ioat_chan, &phys_complete)) - __cleanup(ioat_chan, phys_complete); - else if (test_bit(IOAT_COMPLETION_ACK, &ioat_chan->state)) { + if (test_bit(IOAT_COMPLETION_ACK, &ioat_chan->state)) { u32 chanerr; chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET); @@ -945,12 +951,13 @@ void ioat_timer_event(struct timer_list *t) ioat_ring_active(ioat_chan)); ioat_reboot_chan(ioat_chan); - spin_unlock_bh(&ioat_chan->cleanup_lock); - return; - } else - set_bit(IOAT_COMPLETION_ACK, &ioat_chan->state); + goto unlock_out; + } + + set_bit(IOAT_COMPLETION_ACK, &ioat_chan->state); mod_timer(&ioat_chan->timer, jiffies + COMPLETION_TIMEOUT); +unlock_out: spin_unlock_bh(&ioat_chan->cleanup_lock); } -- GitLab From db474931df3eb54edbf761b8c98ba517bdf24463 Mon Sep 17 00:00:00 2001 From: Leonid Ravich Date: Thu, 23 Apr 2020 00:09:18 +0300 Subject: [PATCH 0176/1971] dmaengine: ioat: adding missed issue_pending to timeout handler completion timeout might trigger unnesesery DMA engine hw reboot in case of missed issue_pending() . Acked-by: Dave Jiang Signed-off-by: Leonid Ravich Link: https://lore.kernel.org/r/1587589761-32690-3-git-send-email-leonid.ravich@dell.com Signed-off-by: Vinod Koul --- drivers/dma/ioat/dma.c | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c index 55a8cf181816a..8ad0ad861c861 100644 --- a/drivers/dma/ioat/dma.c +++ b/drivers/dma/ioat/dma.c @@ -955,6 +955,15 @@ void ioat_timer_event(struct timer_list *t) goto unlock_out; } + /* handle missed issue pending case */ + if (ioat_ring_pending(ioat_chan)) { + dev_warn(to_dev(ioat_chan), + "Completion timeout with pending descriptors\n"); + spin_lock_bh(&ioat_chan->prep_lock); + __ioat_issue_pending(ioat_chan); + spin_unlock_bh(&ioat_chan->prep_lock); + } + set_bit(IOAT_COMPLETION_ACK, &ioat_chan->state); mod_timer(&ioat_chan->timer, jiffies + COMPLETION_TIMEOUT); unlock_out: -- GitLab From 435cbab95e3966cd8310addd9e9b758dce0e8b84 Mon Sep 17 00:00:00 2001 From: Jaegeuk Kim Date: Thu, 9 Apr 2020 10:25:21 -0700 Subject: [PATCH 0177/1971] f2fs: fix quota_sync failure due to f2fs_lock_op f2fs_quota_sync() uses f2fs_lock_op() before flushing dirty pages, but f2fs_write_data_page() returns EAGAIN. Likewise dentry blocks, we can just bypass getting the lock, since quota blocks are also maintained by checkpoint. Reviewed-by: Chao Yu Signed-off-by: Jaegeuk Kim --- fs/f2fs/compress.c | 8 +++++--- fs/f2fs/data.c | 4 ++-- 2 files changed, 7 insertions(+), 5 deletions(-) diff --git a/fs/f2fs/compress.c b/fs/f2fs/compress.c index df7b2d15eacde..26b071afe48a3 100644 --- a/fs/f2fs/compress.c +++ b/fs/f2fs/compress.c @@ -985,7 +985,7 @@ static int f2fs_write_compressed_pages(struct compress_ctx *cc, loff_t psize; int i, err; - if (!f2fs_trylock_op(sbi)) + if (!IS_NOQUOTA(inode) && !f2fs_trylock_op(sbi)) return -EAGAIN; set_new_dnode(&dn, cc->inode, NULL, NULL, 0); @@ -1092,7 +1092,8 @@ unlock_continue: set_inode_flag(inode, FI_FIRST_BLOCK_WRITTEN); f2fs_put_dnode(&dn); - f2fs_unlock_op(sbi); + if (!IS_NOQUOTA(inode)) + f2fs_unlock_op(sbi); spin_lock(&fi->i_size_lock); if (fi->last_disk_size < psize) @@ -1118,7 +1119,8 @@ out_put_cic: out_put_dnode: f2fs_put_dnode(&dn); out_unlock_op: - f2fs_unlock_op(sbi); + if (!IS_NOQUOTA(inode)) + f2fs_unlock_op(sbi); return -EAGAIN; } diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c index 199877cb57fe2..a5505aba2a1f1 100644 --- a/fs/f2fs/data.c +++ b/fs/f2fs/data.c @@ -2659,8 +2659,8 @@ write: f2fs_available_free_memory(sbi, BASE_CHECK)))) goto redirty_out; - /* Dentry blocks are controlled by checkpoint */ - if (S_ISDIR(inode->i_mode)) { + /* Dentry/quota blocks are controlled by checkpoint */ + if (S_ISDIR(inode->i_mode) || IS_NOQUOTA(inode)) { fio.need_lock = LOCK_DONE; err = f2fs_do_write_data_page(&fio); goto done; -- GitLab From 86d1ee00c86dd8472e18d76c6895e1e94a34d919 Mon Sep 17 00:00:00 2001 From: Stephan Gerhold Date: Sat, 25 Apr 2020 13:06:09 -0700 Subject: [PATCH 0178/1971] dt-bindings: mms114: document melfas,mms345l binding The mms114 driver now supports MMS345L; document the melfas,mms345l binding that is used for it. Signed-off-by: Stephan Gerhold Reviewed-by: Andi Shyti Acked-by: Rob Herring Link: https://lore.kernel.org/r/20200423102431.2715-2-stephan@gerhold.net Signed-off-by: Dmitry Torokhov --- Documentation/devicetree/bindings/input/touchscreen/mms114.txt | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/Documentation/devicetree/bindings/input/touchscreen/mms114.txt b/Documentation/devicetree/bindings/input/touchscreen/mms114.txt index 2cd954051d299..707234cfd7e6f 100644 --- a/Documentation/devicetree/bindings/input/touchscreen/mms114.txt +++ b/Documentation/devicetree/bindings/input/touchscreen/mms114.txt @@ -1,9 +1,10 @@ -* MELFAS MMS114/MMS152 touchscreen controller +* MELFAS MMS114/MMS152/MMS345L touchscreen controller Required properties: - compatible: should be one of: - "melfas,mms114" - "melfas,mms152" + - "melfas,mms345l" - reg: I2C address of the chip - interrupts: interrupt to which the chip is connected - touchscreen-size-x: See [1] -- GitLab From 7842087b0196d674ed877d768de8f2a34d7fdc53 Mon Sep 17 00:00:00 2001 From: Stephan Gerhold Date: Sat, 25 Apr 2020 13:06:31 -0700 Subject: [PATCH 0179/1971] Input: mms114 - add extra compatible for mms345l MMS345L is another first generation touch screen from Melfas, which uses mostly the same registers as MMS152. However, there is some garbage printed during initialization. Apparently MMS345L does not have the MMS152_COMPAT_GROUP register that is read+printed during initialization. TSP FW Rev: bootloader 0x6 / core 0x26 / config 0x26, Compat group: \x06 On earlier kernel versions the compat group was actually printed as an ASCII control character, seems like it gets escaped now. But we probably shouldn't print something from a random register. Add a separate "melfas,mms345l" compatible that avoids reading from the MMS152_COMPAT_GROUP register. This might also help in case there is some other device-specific quirk in the future. Signed-off-by: Stephan Gerhold Reviewed-by: Andi Shyti Link: https://lore.kernel.org/r/20200423102431.2715-1-stephan@gerhold.net Signed-off-by: Dmitry Torokhov --- drivers/input/touchscreen/mms114.c | 17 +++++++++++++++-- 1 file changed, 15 insertions(+), 2 deletions(-) diff --git a/drivers/input/touchscreen/mms114.c b/drivers/input/touchscreen/mms114.c index 69c6d559eeb00..b3dda1722bae1 100644 --- a/drivers/input/touchscreen/mms114.c +++ b/drivers/input/touchscreen/mms114.c @@ -54,6 +54,7 @@ enum mms_type { TYPE_MMS114 = 114, TYPE_MMS152 = 152, + TYPE_MMS345L = 345, }; struct mms114_data { @@ -250,6 +251,15 @@ static int mms114_get_version(struct mms114_data *data) int error; switch (data->type) { + case TYPE_MMS345L: + error = __mms114_read_reg(data, MMS152_FW_REV, 3, buf); + if (error) + return error; + + dev_info(dev, "TSP FW Rev: bootloader 0x%x / core 0x%x / config 0x%x\n", + buf[0], buf[1], buf[2]); + break; + case TYPE_MMS152: error = __mms114_read_reg(data, MMS152_FW_REV, 3, buf); if (error) @@ -287,8 +297,8 @@ static int mms114_setup_regs(struct mms114_data *data) if (error < 0) return error; - /* MMS152 has no configuration or power on registers */ - if (data->type == TYPE_MMS152) + /* Only MMS114 has configuration and power on registers */ + if (data->type != TYPE_MMS114) return 0; error = mms114_set_active(data, true); @@ -599,6 +609,9 @@ static const struct of_device_id mms114_dt_match[] = { }, { .compatible = "melfas,mms152", .data = (void *)TYPE_MMS152, + }, { + .compatible = "melfas,mms345l", + .data = (void *)TYPE_MMS345L, }, { } }; -- GitLab From e804f0a78e9e43f0105e1bc3e264894b3898e970 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bj=C3=B6rn=20Ard=C3=B6?= Date: Fri, 24 Apr 2020 13:30:36 +0200 Subject: [PATCH 0180/1971] i2c: slave-eeprom: Make it possible to pre-load eeprom data MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit If the slave eeprom has a "firmware-name" in devicetree, then pre-load the data in the eeprom with this file. Otherwise we init the eeprom with 0xFF. Signed-off-by: Patrick Williams Signed-off-by: Björn Ardö [wsa: some cosmetic changes] Signed-off-by: Wolfram Sang --- drivers/i2c/i2c-slave-eeprom.c | 25 +++++++++++++++++++++++++ 1 file changed, 25 insertions(+) diff --git a/drivers/i2c/i2c-slave-eeprom.c b/drivers/i2c/i2c-slave-eeprom.c index cb415b10642f7..f868dfc362a6c 100644 --- a/drivers/i2c/i2c-slave-eeprom.c +++ b/drivers/i2c/i2c-slave-eeprom.c @@ -18,6 +18,7 @@ */ #include +#include #include #include #include @@ -120,6 +121,26 @@ static ssize_t i2c_slave_eeprom_bin_write(struct file *filp, struct kobject *kob return count; } +static int i2c_slave_init_eeprom_data(struct eeprom_data *eeprom, struct i2c_client *client, + unsigned int size) +{ + const struct firmware *fw; + const char *eeprom_data; + int ret = device_property_read_string(&client->dev, "firmware-name", &eeprom_data); + + if (!ret) { + ret = request_firmware_into_buf(&fw, eeprom_data, &client->dev, + eeprom->buffer, size); + if (ret) + return ret; + release_firmware(fw); + } else { + /* An empty eeprom typically has all bits set to 1 */ + memset(eeprom->buffer, 0xff, size); + } + return 0; +} + static int i2c_slave_eeprom_probe(struct i2c_client *client, const struct i2c_device_id *id) { struct eeprom_data *eeprom; @@ -138,6 +159,10 @@ static int i2c_slave_eeprom_probe(struct i2c_client *client, const struct i2c_de spin_lock_init(&eeprom->buffer_lock); i2c_set_clientdata(client, eeprom); + ret = i2c_slave_init_eeprom_data(eeprom, client, size); + if (ret) + return ret; + sysfs_bin_attr_init(&eeprom->bin); eeprom->bin.attr.name = "slave-eeprom"; eeprom->bin.attr.mode = S_IRUSR | S_IWUSR; -- GitLab From ec43f74343796460c916cdc8ab350235c8b88638 Mon Sep 17 00:00:00 2001 From: Maxime Ripard Date: Fri, 24 Apr 2020 17:33:42 +0200 Subject: [PATCH 0181/1971] i2c: brcmstb: Allow to compile it on BCM2835 The BCM2711, supported by ARCH_BCM2835, also has a controller by the brcmstb driver so let's allow it to be compiled on that platform. Acked-by: Florian Fainelli Signed-off-by: Maxime Ripard Signed-off-by: Wolfram Sang --- drivers/i2c/busses/Kconfig | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig index 70a561bff74a6..2f6e39b41e6c0 100644 --- a/drivers/i2c/busses/Kconfig +++ b/drivers/i2c/busses/Kconfig @@ -475,8 +475,8 @@ config I2C_BCM_KONA config I2C_BRCMSTB tristate "BRCM Settop/DSL I2C controller" - depends on ARCH_BRCMSTB || BMIPS_GENERIC || ARCH_BCM_63XX || \ - COMPILE_TEST + depends on ARCH_BCM2835 || ARCH_BRCMSTB || BMIPS_GENERIC || \ + ARCH_BCM_63XX || COMPILE_TEST default y help If you say yes to this option, support will be included for the -- GitLab From e07a89775c71a246c99b9cc3fbe5f932b72765ed Mon Sep 17 00:00:00 2001 From: Etienne Carriere Date: Mon, 20 Apr 2020 17:17:06 +0200 Subject: [PATCH 0182/1971] i2c: stm32: don't print an error on probe deferral Do not print an error trace when deferring probe for some resource. Fix as well the error message in case of tx dma_request_chan failure. Signed-off-by: Etienne Carriere Signed-off-by: Alain Volmat Reviewed-by: Pierre-Yves MORDRET Signed-off-by: Wolfram Sang --- drivers/i2c/busses/i2c-stm32.c | 10 +++++++--- drivers/i2c/busses/i2c-stm32f4.c | 4 +++- drivers/i2c/busses/i2c-stm32f7.c | 7 +++++-- 3 files changed, 15 insertions(+), 6 deletions(-) diff --git a/drivers/i2c/busses/i2c-stm32.c b/drivers/i2c/busses/i2c-stm32.c index 1da347e6a3586..3f69a3bb61197 100644 --- a/drivers/i2c/busses/i2c-stm32.c +++ b/drivers/i2c/busses/i2c-stm32.c @@ -25,8 +25,9 @@ struct stm32_i2c_dma *stm32_i2c_dma_request(struct device *dev, /* Request and configure I2C TX dma channel */ dma->chan_tx = dma_request_chan(dev, "tx"); if (IS_ERR(dma->chan_tx)) { - dev_dbg(dev, "can't request DMA tx channel\n"); ret = PTR_ERR(dma->chan_tx); + if (ret != -EPROBE_DEFER) + dev_err(dev, "can't request DMA tx channel\n"); goto fail_al; } @@ -44,8 +45,10 @@ struct stm32_i2c_dma *stm32_i2c_dma_request(struct device *dev, /* Request and configure I2C RX dma channel */ dma->chan_rx = dma_request_chan(dev, "rx"); if (IS_ERR(dma->chan_rx)) { - dev_err(dev, "can't request DMA rx channel\n"); ret = PTR_ERR(dma->chan_rx); + if (ret != -EPROBE_DEFER) + dev_err(dev, "can't request DMA rx channel\n"); + goto fail_tx; } @@ -73,7 +76,8 @@ fail_tx: dma_release_channel(dma->chan_tx); fail_al: devm_kfree(dev, dma); - dev_info(dev, "can't use DMA\n"); + if (ret != -EPROBE_DEFER) + dev_info(dev, "can't use DMA\n"); return ERR_PTR(ret); } diff --git a/drivers/i2c/busses/i2c-stm32f4.c b/drivers/i2c/busses/i2c-stm32f4.c index d6a69dfcac3f6..48e2692843690 100644 --- a/drivers/i2c/busses/i2c-stm32f4.c +++ b/drivers/i2c/busses/i2c-stm32f4.c @@ -797,8 +797,10 @@ static int stm32f4_i2c_probe(struct platform_device *pdev) rst = devm_reset_control_get_exclusive(&pdev->dev, NULL); if (IS_ERR(rst)) { - dev_err(&pdev->dev, "Error: Missing controller reset\n"); ret = PTR_ERR(rst); + if (ret != -EPROBE_DEFER) + dev_err(&pdev->dev, "Error: Missing reset ctrl\n"); + goto clk_free; } reset_control_assert(rst); diff --git a/drivers/i2c/busses/i2c-stm32f7.c b/drivers/i2c/busses/i2c-stm32f7.c index 8102e33a67539..59a0dcd6a475f 100644 --- a/drivers/i2c/busses/i2c-stm32f7.c +++ b/drivers/i2c/busses/i2c-stm32f7.c @@ -1967,7 +1967,8 @@ static int stm32f7_i2c_probe(struct platform_device *pdev) i2c_dev->clk = devm_clk_get(&pdev->dev, NULL); if (IS_ERR(i2c_dev->clk)) { - dev_err(&pdev->dev, "Error: Missing controller clock\n"); + if (PTR_ERR(i2c_dev->clk) != -EPROBE_DEFER) + dev_err(&pdev->dev, "Failed to get controller clock\n"); return PTR_ERR(i2c_dev->clk); } @@ -1979,8 +1980,10 @@ static int stm32f7_i2c_probe(struct platform_device *pdev) rst = devm_reset_control_get(&pdev->dev, NULL); if (IS_ERR(rst)) { - dev_err(&pdev->dev, "Error: Missing controller reset\n"); ret = PTR_ERR(rst); + if (ret != -EPROBE_DEFER) + dev_err(&pdev->dev, "Error: Missing reset ctrl\n"); + goto clk_free; } reset_control_assert(rst); -- GitLab From dd16163ea24b986136e0d11b9ebd861cef9d308d Mon Sep 17 00:00:00 2001 From: Alain Volmat Date: Mon, 20 Apr 2020 16:57:56 +0200 Subject: [PATCH 0183/1971] dt-bindings: i2c: i2c-stm32f7: allow clock-frequency range For STM32F7, STM32H7 and STM32MP1 SoCs, if timing parameters match, the bus clock frequency can be from 1Hz to 1MHz. Signed-off-by: Alain Volmat Reviewed-by: Rob Herring Signed-off-by: Wolfram Sang --- Documentation/devicetree/bindings/i2c/st,stm32-i2c.yaml | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/Documentation/devicetree/bindings/i2c/st,stm32-i2c.yaml b/Documentation/devicetree/bindings/i2c/st,stm32-i2c.yaml index 900ec1ab6a478..b50a2f420b362 100644 --- a/Documentation/devicetree/bindings/i2c/st,stm32-i2c.yaml +++ b/Documentation/devicetree/bindings/i2c/st,stm32-i2c.yaml @@ -80,11 +80,11 @@ properties: clock-frequency: description: Desired I2C bus clock frequency in Hz. If not specified, the default 100 kHz frequency will be used. - For STM32F7, STM32H7 and STM32MP1 SoCs, Standard-mode, - Fast-mode and Fast-mode Plus are supported, possible - values are 100000, 400000 and 1000000. + For STM32F7, STM32H7 and STM32MP1 SoCs, if timing parameters + match, the bus clock frequency can be from 1Hz to 1MHz. default: 100000 - enum: [100000, 400000, 1000000] + minimum: 1 + maximum: 1000000 required: - compatible -- GitLab From 09cc9a3bce914d363a0f481b6603f2943fde9227 Mon Sep 17 00:00:00 2001 From: Alain Volmat Date: Mon, 20 Apr 2020 16:57:57 +0200 Subject: [PATCH 0184/1971] i2c: stm32f7: allows for any bus frequency Do not limitate to the 3 (100KHz, 400KHz, 1MHz) bus frequency but instead allows any frequency (if it matches timing requirements). Depending on the requested frequency, use the spec data from either Standard, Fast or Fast Plus mode. Hardcoding of min/max bus frequencies is removed and is instead computed. The driver do not use anymore speed identifier but instead handle directly the frequency and figure out the spec data (necessary for the computation of the timing register) based on the frequency. Signed-off-by: Alain Volmat Reviewed-by: Pierre-Yves MORDRET Signed-off-by: Wolfram Sang --- drivers/i2c/busses/i2c-stm32f7.c | 125 ++++++++++++++++--------------- 1 file changed, 64 insertions(+), 61 deletions(-) diff --git a/drivers/i2c/busses/i2c-stm32f7.c b/drivers/i2c/busses/i2c-stm32f7.c index 59a0dcd6a475f..9c9e10ea91991 100644 --- a/drivers/i2c/busses/i2c-stm32f7.c +++ b/drivers/i2c/busses/i2c-stm32f7.c @@ -189,8 +189,6 @@ struct stm32f7_i2c_regs { /** * struct stm32f7_i2c_spec - private i2c specification timing * @rate: I2C bus speed (Hz) - * @rate_min: 80% of I2C bus speed (Hz) - * @rate_max: 100% of I2C bus speed (Hz) * @fall_max: Max fall time of both SDA and SCL signals (ns) * @rise_max: Max rise time of both SDA and SCL signals (ns) * @hddat_min: Min data hold time (ns) @@ -201,8 +199,6 @@ struct stm32f7_i2c_regs { */ struct stm32f7_i2c_spec { u32 rate; - u32 rate_min; - u32 rate_max; u32 fall_max; u32 rise_max; u32 hddat_min; @@ -214,7 +210,6 @@ struct stm32f7_i2c_spec { /** * struct stm32f7_i2c_setup - private I2C timing setup parameters - * @speed: I2C speed mode (standard, Fast Plus) * @speed_freq: I2C speed frequency (Hz) * @clock_src: I2C clock source frequency (Hz) * @rise_time: Rise time (ns) @@ -224,7 +219,6 @@ struct stm32f7_i2c_spec { * @fmp_clr_offset: Fast Mode Plus clear register offset from set register */ struct stm32f7_i2c_setup { - enum stm32_i2c_speed speed; u32 speed_freq; u32 clock_src; u32 rise_time; @@ -287,7 +281,7 @@ struct stm32f7_i2c_msg { * @base: virtual memory area * @complete: completion of I2C message * @clk: hw i2c clock - * @speed: I2C clock frequency of the controller. Standard, Fast or Fast+ + * @bus_rate: I2C clock frequency of the controller * @msg: Pointer to data to be written * @msg_num: number of I2C messages to be executed * @msg_id: message identifiant @@ -314,7 +308,7 @@ struct stm32f7_i2c_dev { void __iomem *base; struct completion complete; struct clk *clk; - int speed; + unsigned int bus_rate; struct i2c_msg *msg; unsigned int msg_num; unsigned int msg_id; @@ -342,11 +336,9 @@ struct stm32f7_i2c_dev { * Table10. Characteristics of the SDA and SCL bus lines for Standard, Fast, * and Fast-mode Plus I2C-bus devices */ -static struct stm32f7_i2c_spec i2c_specs[] = { - [STM32_I2C_SPEED_STANDARD] = { +static struct stm32f7_i2c_spec stm32f7_i2c_specs[] = { + { .rate = I2C_MAX_STANDARD_MODE_FREQ, - .rate_min = I2C_MAX_STANDARD_MODE_FREQ * 8 / 10, /* 80% */ - .rate_max = I2C_MAX_STANDARD_MODE_FREQ, .fall_max = 300, .rise_max = 1000, .hddat_min = 0, @@ -355,10 +347,8 @@ static struct stm32f7_i2c_spec i2c_specs[] = { .l_min = 4700, .h_min = 4000, }, - [STM32_I2C_SPEED_FAST] = { + { .rate = I2C_MAX_FAST_MODE_FREQ, - .rate_min = I2C_MAX_FAST_MODE_FREQ * 8 / 10, /* 80% */ - .rate_max = I2C_MAX_FAST_MODE_FREQ, .fall_max = 300, .rise_max = 300, .hddat_min = 0, @@ -367,10 +357,8 @@ static struct stm32f7_i2c_spec i2c_specs[] = { .l_min = 1300, .h_min = 600, }, - [STM32_I2C_SPEED_FAST_PLUS] = { + { .rate = I2C_MAX_FAST_MODE_PLUS_FREQ, - .rate_min = I2C_MAX_FAST_MODE_PLUS_FREQ * 8 / 10, /* 80% */ - .rate_max = I2C_MAX_FAST_MODE_PLUS_FREQ, .fall_max = 100, .rise_max = 120, .hddat_min = 0, @@ -411,10 +399,23 @@ static void stm32f7_i2c_disable_irq(struct stm32f7_i2c_dev *i2c_dev, u32 mask) stm32f7_i2c_clr_bits(i2c_dev->base + STM32F7_I2C_CR1, mask); } +static struct stm32f7_i2c_spec *stm32f7_get_specs(u32 rate) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(stm32f7_i2c_specs); i++) + if (rate <= stm32f7_i2c_specs[i].rate) + return &stm32f7_i2c_specs[i]; + + return ERR_PTR(-EINVAL); +} + +#define RATE_MIN(rate) ((rate) * 8 / 10) static int stm32f7_i2c_compute_timing(struct stm32f7_i2c_dev *i2c_dev, struct stm32f7_i2c_setup *setup, struct stm32f7_i2c_timings *output) { + struct stm32f7_i2c_spec *specs; u32 p_prev = STM32F7_PRESC_MAX; u32 i2cclk = DIV_ROUND_CLOSEST(NSEC_PER_SEC, setup->clock_src); @@ -432,18 +433,19 @@ static int stm32f7_i2c_compute_timing(struct stm32f7_i2c_dev *i2c_dev, u16 p, l, a, h; int ret = 0; - if (setup->speed >= STM32_I2C_SPEED_END) { - dev_err(i2c_dev->dev, "speed out of bound {%d/%d}\n", - setup->speed, STM32_I2C_SPEED_END - 1); + specs = stm32f7_get_specs(setup->speed_freq); + if (specs == ERR_PTR(-EINVAL)) { + dev_err(i2c_dev->dev, "speed out of bound {%d}\n", + setup->speed_freq); return -EINVAL; } - if ((setup->rise_time > i2c_specs[setup->speed].rise_max) || - (setup->fall_time > i2c_specs[setup->speed].fall_max)) { + if ((setup->rise_time > specs->rise_max) || + (setup->fall_time > specs->fall_max)) { dev_err(i2c_dev->dev, "timings out of bound Rise{%d>%d}/Fall{%d>%d}\n", - setup->rise_time, i2c_specs[setup->speed].rise_max, - setup->fall_time, i2c_specs[setup->speed].fall_max); + setup->rise_time, specs->rise_max, + setup->fall_time, specs->fall_max); return -EINVAL; } @@ -454,12 +456,6 @@ static int stm32f7_i2c_compute_timing(struct stm32f7_i2c_dev *i2c_dev, return -EINVAL; } - if (setup->speed_freq > i2c_specs[setup->speed].rate) { - dev_err(i2c_dev->dev, "ERROR: Freq {%d/%d}\n", - setup->speed_freq, i2c_specs[setup->speed].rate); - return -EINVAL; - } - /* Analog and Digital Filters */ af_delay_min = (setup->analog_filter ? @@ -469,13 +465,13 @@ static int stm32f7_i2c_compute_timing(struct stm32f7_i2c_dev *i2c_dev, STM32F7_I2C_ANALOG_FILTER_DELAY_MAX : 0); dnf_delay = setup->dnf * i2cclk; - sdadel_min = i2c_specs[setup->speed].hddat_min + setup->fall_time - + sdadel_min = specs->hddat_min + setup->fall_time - af_delay_min - (setup->dnf + 3) * i2cclk; - sdadel_max = i2c_specs[setup->speed].vddat_max - setup->rise_time - + sdadel_max = specs->vddat_max - setup->rise_time - af_delay_max - (setup->dnf + 4) * i2cclk; - scldel_min = setup->rise_time + i2c_specs[setup->speed].sudat_min; + scldel_min = setup->rise_time + specs->sudat_min; if (sdadel_min < 0) sdadel_min = 0; @@ -530,8 +526,8 @@ static int stm32f7_i2c_compute_timing(struct stm32f7_i2c_dev *i2c_dev, tsync = af_delay_min + dnf_delay + (2 * i2cclk); s = NULL; - clk_max = NSEC_PER_SEC / i2c_specs[setup->speed].rate_min; - clk_min = NSEC_PER_SEC / i2c_specs[setup->speed].rate_max; + clk_max = NSEC_PER_SEC / RATE_MIN(setup->speed_freq); + clk_min = NSEC_PER_SEC / setup->speed_freq; /* * Among Prescaler possibilities discovered above figures out SCL Low @@ -549,7 +545,7 @@ static int stm32f7_i2c_compute_timing(struct stm32f7_i2c_dev *i2c_dev, for (l = 0; l < STM32F7_SCLL_MAX; l++) { u32 tscl_l = (l + 1) * prescaler + tsync; - if ((tscl_l < i2c_specs[setup->speed].l_min) || + if ((tscl_l < specs->l_min) || (i2cclk >= ((tscl_l - af_delay_min - dnf_delay) / 4))) { continue; @@ -561,7 +557,7 @@ static int stm32f7_i2c_compute_timing(struct stm32f7_i2c_dev *i2c_dev, setup->rise_time + setup->fall_time; if ((tscl >= clk_min) && (tscl <= clk_max) && - (tscl_h >= i2c_specs[setup->speed].h_min) && + (tscl_h >= specs->h_min) && (i2cclk < tscl_h)) { int clk_error = tscl - i2cbus; @@ -607,6 +603,17 @@ exit: return ret; } +static u32 stm32f7_get_lower_rate(u32 rate) +{ + int i = ARRAY_SIZE(stm32f7_i2c_specs); + + while (i--) + if (stm32f7_i2c_specs[i].rate < rate) + break; + + return stm32f7_i2c_specs[i].rate; +} + static int stm32f7_i2c_setup_timing(struct stm32f7_i2c_dev *i2c_dev, struct stm32f7_i2c_setup *setup) { @@ -619,18 +626,15 @@ static int stm32f7_i2c_setup_timing(struct stm32f7_i2c_dev *i2c_dev, i2c_parse_fw_timings(i2c_dev->dev, t, false); - if (t->bus_freq_hz >= I2C_MAX_FAST_MODE_PLUS_FREQ) - i2c_dev->speed = STM32_I2C_SPEED_FAST_PLUS; - else if (t->bus_freq_hz >= I2C_MAX_FAST_MODE_FREQ) - i2c_dev->speed = STM32_I2C_SPEED_FAST; - else - i2c_dev->speed = STM32_I2C_SPEED_STANDARD; + if (t->bus_freq_hz > I2C_MAX_FAST_MODE_PLUS_FREQ) { + dev_err(i2c_dev->dev, "Invalid bus speed (%i>%i)\n", + t->bus_freq_hz, I2C_MAX_FAST_MODE_PLUS_FREQ); + return -EINVAL; + } + setup->speed_freq = t->bus_freq_hz; i2c_dev->setup.rise_time = t->scl_rise_ns; i2c_dev->setup.fall_time = t->scl_fall_ns; - - setup->speed = i2c_dev->speed; - setup->speed_freq = i2c_specs[setup->speed].rate; setup->clock_src = clk_get_rate(i2c_dev->clk); if (!setup->clock_src) { @@ -644,17 +648,13 @@ static int stm32f7_i2c_setup_timing(struct stm32f7_i2c_dev *i2c_dev, if (ret) { dev_err(i2c_dev->dev, "failed to compute I2C timings.\n"); - if (i2c_dev->speed > STM32_I2C_SPEED_STANDARD) { - i2c_dev->speed--; - setup->speed = i2c_dev->speed; - setup->speed_freq = - i2c_specs[setup->speed].rate; - dev_warn(i2c_dev->dev, - "downgrade I2C Speed Freq to (%i)\n", - i2c_specs[setup->speed].rate); - } else { + if (setup->speed_freq <= I2C_MAX_STANDARD_MODE_FREQ) break; - } + setup->speed_freq = + stm32f7_get_lower_rate(setup->speed_freq); + dev_warn(i2c_dev->dev, + "downgrade I2C Speed Freq to (%i)\n", + setup->speed_freq); } } while (ret); @@ -663,13 +663,15 @@ static int stm32f7_i2c_setup_timing(struct stm32f7_i2c_dev *i2c_dev, return ret; } - dev_dbg(i2c_dev->dev, "I2C Speed(%i), Freq(%i), Clk Source(%i)\n", - setup->speed, setup->speed_freq, setup->clock_src); + dev_dbg(i2c_dev->dev, "I2C Speed(%i), Clk Source(%i)\n", + setup->speed_freq, setup->clock_src); dev_dbg(i2c_dev->dev, "I2C Rise(%i) and Fall(%i) Time\n", setup->rise_time, setup->fall_time); dev_dbg(i2c_dev->dev, "I2C Analog Filter(%s), DNF(%i)\n", (setup->analog_filter ? "On" : "Off"), setup->dnf); + i2c_dev->bus_rate = setup->speed_freq; + return 0; } @@ -1867,7 +1869,7 @@ static int stm32f7_i2c_write_fm_plus_bits(struct stm32f7_i2c_dev *i2c_dev, { int ret; - if (i2c_dev->speed != STM32_I2C_SPEED_FAST_PLUS || + if (i2c_dev->bus_rate <= I2C_MAX_FAST_MODE_FREQ || IS_ERR_OR_NULL(i2c_dev->regmap)) /* Optional */ return 0; @@ -2023,7 +2025,8 @@ static int stm32f7_i2c_probe(struct platform_device *pdev) if (ret) goto clk_free; - if (i2c_dev->speed == STM32_I2C_SPEED_FAST_PLUS) { + /* Setup Fast mode plus if necessary */ + if (i2c_dev->bus_rate > I2C_MAX_FAST_MODE_FREQ) { ret = stm32f7_i2c_setup_fm_plus_bits(pdev, i2c_dev); if (ret) goto clk_free; -- GitLab From 88fb09c409255a3deb75f5d133dd77e70a1075c9 Mon Sep 17 00:00:00 2001 From: Wolfram Sang Date: Wed, 15 Apr 2020 12:51:00 +0200 Subject: [PATCH 0185/1971] i2c: regroup documentation of bindings Some bindings are for the bus master, some are for the slaves. Regroup them and give them separate headings to make it clear. Also, remove references to "generic names" which is for nodes and not for compatibles. Signed-off-by: Wolfram Sang Reviewed-by: Rob Herring Signed-off-by: Wolfram Sang --- Documentation/devicetree/bindings/i2c/i2c.txt | 66 +++++++++++-------- 1 file changed, 39 insertions(+), 27 deletions(-) diff --git a/Documentation/devicetree/bindings/i2c/i2c.txt b/Documentation/devicetree/bindings/i2c/i2c.txt index 9a53df4243c64..819436b48faef 100644 --- a/Documentation/devicetree/bindings/i2c/i2c.txt +++ b/Documentation/devicetree/bindings/i2c/i2c.txt @@ -2,32 +2,26 @@ Generic device tree bindings for I2C busses =========================================== This document describes generic bindings which can be used to describe I2C -busses in a device tree. +busses and their child devices in a device tree. -Required properties -------------------- +Required properties (per bus) +----------------------------- - #address-cells - should be <1>. Read more about addresses below. - #size-cells - should be <0>. -- compatible - name of I2C bus controller following generic names - recommended practice. +- compatible - name of I2C bus controller For other required properties e.g. to describe register sets, clocks, etc. check the binding documentation of the specific driver. The cells properties above define that an address of children of an I2C bus -are described by a single value. This is usually a 7 bit address. However, -flags can be attached to the address. I2C_TEN_BIT_ADDRESS is used to mark a 10 -bit address. It is needed to avoid the ambiguity between e.g. a 7 bit address -of 0x50 and a 10 bit address of 0x050 which, in theory, can be on the same bus. -Another flag is I2C_OWN_SLAVE_ADDRESS to mark addresses on which we listen to -be devices ourselves. +are described by a single value. -Optional properties -------------------- +Optional properties (per bus) +----------------------------- These properties may not be supported by all drivers. However, if a driver -wants to support one of the below features, it should adapt the bindings below. +wants to support one of the below features, it should adapt these bindings. - clock-frequency frequency of bus clock in Hz. @@ -73,31 +67,49 @@ wants to support one of the below features, it should adapt the bindings below. i2c bus clock frequency (clock-frequency). Specified in Hz. -- interrupts - interrupts used by the device. - -- interrupt-names - "irq", "wakeup" and "smbus_alert" names are recognized by I2C core, - other names are left to individual drivers. - -- host-notify - device uses SMBus host notify protocol instead of interrupt line. - - multi-master states that there is another master active on this bus. The OS can use this information to adapt power management to keep the arbitration awake all the time, for example. -- wakeup-source - device can be used as a wakeup source. +Required properties (per child device) +-------------------------------------- + +- compatible + name of I2C slave device - reg - I2C slave addresses + One or many I2C slave addresses. These are usually a 7 bit addresses. + However, flags can be attached to an address. I2C_TEN_BIT_ADDRESS is + used to mark a 10 bit address. It is needed to avoid the ambiguity + between e.g. a 7 bit address of 0x50 and a 10 bit address of 0x050 + which, in theory, can be on the same bus. + Another flag is I2C_OWN_SLAVE_ADDRESS to mark addresses on which we + listen to be devices ourselves. + +Optional properties (per child device) +-------------------------------------- + +These properties may not be supported by all drivers. However, if a driver +wants to support one of the below features, it should adapt these bindings. + +- host-notify + device uses SMBus host notify protocol instead of interrupt line. + +- interrupts + interrupts used by the device. + +- interrupt-names + "irq", "wakeup" and "smbus_alert" names are recognized by I2C core, + other names are left to individual drivers. - reg-names Names of map programmable addresses. It can contain any map needing another address than default one. +- wakeup-source + device can be used as a wakeup source. + Binding may contain optional "interrupts" property, describing interrupts used by the device. I2C core will assign "irq" interrupt (or the very first interrupt if not using interrupt names) as primary interrupt for the slave. -- GitLab From aef6f2e7cec184fac0ef50958b759e9605ed8128 Mon Sep 17 00:00:00 2001 From: Dave Stevenson Date: Sat, 25 Apr 2020 23:38:07 +0200 Subject: [PATCH 0186/1971] i2c: brcmstb: Fix handling of optional interrupt If there is no interrupt defined then an error is logged due to the use of platform_get_irq. The driver handles not having the interrupt by falling back to polling, therefore make the appropriate call when claiming it. Signed-off-by: Dave Stevenson Signed-off-by: Stefan Wahren Acked-by: Florian Fainelli Signed-off-by: Wolfram Sang --- drivers/i2c/busses/i2c-brcmstb.c | 20 +++++++++++--------- 1 file changed, 11 insertions(+), 9 deletions(-) diff --git a/drivers/i2c/busses/i2c-brcmstb.c b/drivers/i2c/busses/i2c-brcmstb.c index 169a2836922d2..d4e0a0f6732ae 100644 --- a/drivers/i2c/busses/i2c-brcmstb.c +++ b/drivers/i2c/busses/i2c-brcmstb.c @@ -647,20 +647,22 @@ static int brcmstb_i2c_probe(struct platform_device *pdev) int_name = NULL; /* Get the interrupt number */ - dev->irq = platform_get_irq(pdev, 0); + dev->irq = platform_get_irq_optional(pdev, 0); /* disable the bsc interrupt line */ brcmstb_i2c_enable_disable_irq(dev, INT_DISABLE); /* register the ISR handler */ - rc = devm_request_irq(&pdev->dev, dev->irq, brcmstb_i2c_isr, - IRQF_SHARED, - int_name ? int_name : pdev->name, - dev); - - if (rc) { - dev_dbg(dev->device, "falling back to polling mode"); - dev->irq = -1; + if (dev->irq >= 0) { + rc = devm_request_irq(&pdev->dev, dev->irq, brcmstb_i2c_isr, + IRQF_SHARED, + int_name ? int_name : pdev->name, + dev); + + if (rc) { + dev_dbg(dev->device, "falling back to polling mode"); + dev->irq = -1; + } } if (of_property_read_u32(dev->device->of_node, -- GitLab From 1b9e68533299253f5c7d87ccc4cd39ed0dccb37d Mon Sep 17 00:00:00 2001 From: Jason Yan Date: Mon, 20 Apr 2020 12:28:16 +0800 Subject: [PATCH 0187/1971] i2c: qup: remove unneeded conversion to bool The '>' expression itself is bool, no need to convert it to bool again. Signed-off-by: Jason Yan Reviewed-by: Bjorn Andersson Signed-off-by: Wolfram Sang --- drivers/i2c/busses/i2c-qup.c | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/drivers/i2c/busses/i2c-qup.c b/drivers/i2c/busses/i2c-qup.c index 0e3525fe613f8..fbc04b60cfd1c 100644 --- a/drivers/i2c/busses/i2c-qup.c +++ b/drivers/i2c/busses/i2c-qup.c @@ -956,10 +956,8 @@ static void qup_i2c_conf_v1(struct qup_i2c_dev *qup) u32 qup_config = I2C_MINI_CORE | I2C_N_VAL; u32 io_mode = QUP_REPACK_EN; - blk->is_tx_blk_mode = - blk->total_tx_len > qup->out_fifo_sz ? true : false; - blk->is_rx_blk_mode = - blk->total_rx_len > qup->in_fifo_sz ? true : false; + blk->is_tx_blk_mode = blk->total_tx_len > qup->out_fifo_sz; + blk->is_rx_blk_mode = blk->total_rx_len > qup->in_fifo_sz; if (blk->is_tx_blk_mode) { io_mode |= QUP_OUTPUT_BLK_MODE; @@ -1528,9 +1526,9 @@ qup_i2c_determine_mode_v2(struct qup_i2c_dev *qup, qup->use_dma = true; } else { qup->blk.is_tx_blk_mode = max_tx_len > qup->out_fifo_sz - - QUP_MAX_TAGS_LEN ? true : false; + QUP_MAX_TAGS_LEN; qup->blk.is_rx_blk_mode = max_rx_len > qup->in_fifo_sz - - READ_RX_TAGS_LEN ? true : false; + READ_RX_TAGS_LEN; } return 0; -- GitLab From 6b98bf01d2af40141cc7d831a1d2064f6f25e45e Mon Sep 17 00:00:00 2001 From: Aishwarya R Date: Wed, 8 Apr 2020 15:33:53 +0530 Subject: [PATCH 0188/1971] i2c: powermac: Simplify reading the "reg" and "i2c-address" property Use of_property_read_u32 to read the "reg" and "i2c-address" property instead of using of_get_property to check the return values. Signed-off-by: Aishwarya R Tested-by: Erhard Furtner Signed-off-by: Wolfram Sang --- drivers/i2c/busses/i2c-powermac.c | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/drivers/i2c/busses/i2c-powermac.c b/drivers/i2c/busses/i2c-powermac.c index d565714c1f13a..81506c2dab657 100644 --- a/drivers/i2c/busses/i2c-powermac.c +++ b/drivers/i2c/busses/i2c-powermac.c @@ -207,18 +207,18 @@ static u32 i2c_powermac_get_addr(struct i2c_adapter *adap, struct pmac_i2c_bus *bus, struct device_node *node) { - const __be32 *prop; - int len; + u32 prop; + int ret; /* First check for valid "reg" */ - prop = of_get_property(node, "reg", &len); - if (prop && (len >= sizeof(int))) - return (be32_to_cpup(prop) & 0xff) >> 1; + ret = of_property_read_u32(node, "reg", &prop); + if (ret == 0) + return (prop & 0xff) >> 1; /* Then check old-style "i2c-address" */ - prop = of_get_property(node, "i2c-address", &len); - if (prop && (len >= sizeof(int))) - return (be32_to_cpup(prop) & 0xff) >> 1; + ret = of_property_read_u32(node, "i2c-address", &prop); + if (ret == 0) + return (prop & 0xff) >> 1; /* Now handle some devices with missing "reg" properties */ if (of_node_name_eq(node, "cereal")) -- GitLab From e14d796d8339405d0daff98cdaf1465706a90ffb Mon Sep 17 00:00:00 2001 From: Rayagonda Kokatanur Date: Sun, 22 Mar 2020 23:53:22 +0530 Subject: [PATCH 0189/1971] i2c: iproc: add support for SMBUS quick cmd Add support for SMBUS quick command. SMBUS quick command passes single bit of information to the slave (target) device. Can be used to turn slave device on or off. By default i2c_detect tool uses the smbus quick cmd to try and detect devices. Without this support it will not detect some slaves. Signed-off-by: Rayagonda Kokatanur Signed-off-by: Wolfram Sang --- drivers/i2c/busses/i2c-bcm-iproc.c | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/drivers/i2c/busses/i2c-bcm-iproc.c b/drivers/i2c/busses/i2c-bcm-iproc.c index 44be0926b5663..2d11f4d3f5c15 100644 --- a/drivers/i2c/busses/i2c-bcm-iproc.c +++ b/drivers/i2c/busses/i2c-bcm-iproc.c @@ -79,6 +79,7 @@ #define M_CMD_STATUS_RX_FIFO_FULL 0x6 #define M_CMD_PROTOCOL_SHIFT 9 #define M_CMD_PROTOCOL_MASK 0xf +#define M_CMD_PROTOCOL_QUICK 0x0 #define M_CMD_PROTOCOL_BLK_WR 0x7 #define M_CMD_PROTOCOL_BLK_RD 0x8 #define M_CMD_PROTOCOL_PROCESS 0xa @@ -765,7 +766,11 @@ static int bcm_iproc_i2c_xfer_internal(struct bcm_iproc_i2c_dev *iproc_i2c, * number of bytes to read */ val = BIT(M_CMD_START_BUSY_SHIFT); - if (msg->flags & I2C_M_RD) { + + if (msg->len == 0) { + /* SMBUS QUICK Command (Read/Write) */ + val |= (M_CMD_PROTOCOL_QUICK << M_CMD_PROTOCOL_SHIFT); + } else if (msg->flags & I2C_M_RD) { u32 protocol; iproc_i2c->rx_bytes = 0; @@ -827,8 +832,7 @@ static uint32_t bcm_iproc_i2c_functionality(struct i2c_adapter *adap) { u32 val; - /* We do not support the SMBUS Quick command */ - val = I2C_FUNC_I2C | (I2C_FUNC_SMBUS_EMUL & ~I2C_FUNC_SMBUS_QUICK); + val = I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL; if (adap->algo->reg_slave) val |= I2C_FUNC_SLAVE; -- GitLab From 84c0eb212c88176142a0aaf1f896d9bddd8bcb7a Mon Sep 17 00:00:00 2001 From: Wolfram Sang Date: Thu, 26 Mar 2020 22:09:52 +0100 Subject: [PATCH 0190/1971] platform/mellanox: mlxreg-hotplug: convert to use i2c_new_client_device() Move away from the deprecated API and return the shiny new ERRPTR where useful. Signed-off-by: Wolfram Sang Acked-by: Andy Shevchenko Signed-off-by: Wolfram Sang --- drivers/platform/mellanox/mlxreg-hotplug.c | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/drivers/platform/mellanox/mlxreg-hotplug.c b/drivers/platform/mellanox/mlxreg-hotplug.c index 77be37a1fbcf5..ed48917af1626 100644 --- a/drivers/platform/mellanox/mlxreg-hotplug.c +++ b/drivers/platform/mellanox/mlxreg-hotplug.c @@ -101,6 +101,7 @@ static int mlxreg_hotplug_device_create(struct mlxreg_hotplug_priv_data *priv, struct mlxreg_core_data *data) { struct mlxreg_core_hotplug_platform_data *pdata; + struct i2c_client *client; /* Notify user by sending hwmon uevent. */ kobject_uevent(&priv->hwmon->kobj, KOBJ_CHANGE); @@ -121,18 +122,20 @@ static int mlxreg_hotplug_device_create(struct mlxreg_hotplug_priv_data *priv, return -EFAULT; } - data->hpdev.client = i2c_new_device(data->hpdev.adapter, - data->hpdev.brdinfo); - if (!data->hpdev.client) { + client = i2c_new_client_device(data->hpdev.adapter, + data->hpdev.brdinfo); + if (IS_ERR(client)) { dev_err(priv->dev, "Failed to create client %s at bus %d at addr 0x%02x\n", data->hpdev.brdinfo->type, data->hpdev.nr + pdata->shift_nr, data->hpdev.brdinfo->addr); i2c_put_adapter(data->hpdev.adapter); data->hpdev.adapter = NULL; - return -EFAULT; + return PTR_ERR(client); } + data->hpdev.client = client; + return 0; } -- GitLab From 7edfe3df2a9f469d8e80e4e1877a92377b5722ae Mon Sep 17 00:00:00 2001 From: Alan Mikhak Date: Wed, 22 Apr 2020 18:58:21 -0700 Subject: [PATCH 0191/1971] dmaengine: dw-edma: Check MSI descriptor before copying Modify dw_edma_irq_request() to check if a struct msi_desc entry exists before copying the contents of its struct msi_msg pointer. Without this sanity check, __get_cached_msi_msg() crashes when invoked by dw_edma_irq_request() running on a Linux-based PCIe endpoint device. MSI interrupt are not received by PCIe endpoint devices. If irq_get_msi_desc() returns null, then there is no cached struct msi_msg to be copied. Reported-by: kbuild test robot Signed-off-by: Alan Mikhak Acked-by: Gustavo Pimentel Link: https://lore.kernel.org/r/1587607101-31914-1-git-send-email-alan.mikhak@sifive.com Signed-off-by: Vinod Koul --- drivers/dma/dw-edma/dw-edma-core.c | 17 ++++++++++------- 1 file changed, 10 insertions(+), 7 deletions(-) diff --git a/drivers/dma/dw-edma/dw-edma-core.c b/drivers/dma/dw-edma/dw-edma-core.c index db401eb113221..306ab50462bee 100644 --- a/drivers/dma/dw-edma/dw-edma-core.c +++ b/drivers/dma/dw-edma/dw-edma-core.c @@ -13,6 +13,7 @@ #include #include #include +#include #include #include @@ -773,6 +774,7 @@ static int dw_edma_irq_request(struct dw_edma_chip *chip, u32 rd_mask = 1; int i, err = 0; u32 ch_cnt; + int irq; ch_cnt = dw->wr_ch_cnt + dw->rd_ch_cnt; @@ -781,16 +783,16 @@ static int dw_edma_irq_request(struct dw_edma_chip *chip, if (dw->nr_irqs == 1) { /* Common IRQ shared among all channels */ - err = request_irq(dw->ops->irq_vector(dev, 0), - dw_edma_interrupt_common, + irq = dw->ops->irq_vector(dev, 0); + err = request_irq(irq, dw_edma_interrupt_common, IRQF_SHARED, dw->name, &dw->irq[0]); if (err) { dw->nr_irqs = 0; return err; } - get_cached_msi_msg(dw->ops->irq_vector(dev, 0), - &dw->irq[0].msi); + if (irq_get_msi_desc(irq)) + get_cached_msi_msg(irq, &dw->irq[0].msi); } else { /* Distribute IRQs equally among all channels */ int tmp = dw->nr_irqs; @@ -804,7 +806,8 @@ static int dw_edma_irq_request(struct dw_edma_chip *chip, dw_edma_add_irq_mask(&rd_mask, *rd_alloc, dw->rd_ch_cnt); for (i = 0; i < (*wr_alloc + *rd_alloc); i++) { - err = request_irq(dw->ops->irq_vector(dev, i), + irq = dw->ops->irq_vector(dev, i); + err = request_irq(irq, i < *wr_alloc ? dw_edma_interrupt_write : dw_edma_interrupt_read, @@ -815,8 +818,8 @@ static int dw_edma_irq_request(struct dw_edma_chip *chip, return err; } - get_cached_msi_msg(dw->ops->irq_vector(dev, i), - &dw->irq[i].msi); + if (irq_get_msi_desc(irq)) + get_cached_msi_msg(irq, &dw->irq[i].msi); } dw->nr_irqs = i; -- GitLab From c0fca736098cbd5dce27640e625b20c67d5f99f1 Mon Sep 17 00:00:00 2001 From: Lubomir Rintel Date: Fri, 24 Apr 2020 23:50:20 +0200 Subject: [PATCH 0192/1971] dmaengine: mmp_tdma: Validate the transfer direction We only support DMA_DEV_TO_MEM and DMA_MEM_TO_DEV. Let's not do undefined things with other values and reject them. Signed-off-by: Lubomir Rintel Link: https://lore.kernel.org/r/20200424215020.105281-1-lkundrak@v3.sk Signed-off-by: Vinod Koul --- drivers/dma/mmp_tdma.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/drivers/dma/mmp_tdma.c b/drivers/dma/mmp_tdma.c index 2e318558c6446..42f8b2fb7684e 100644 --- a/drivers/dma/mmp_tdma.c +++ b/drivers/dma/mmp_tdma.c @@ -427,6 +427,11 @@ static struct dma_async_tx_descriptor *mmp_tdma_prep_dma_cyclic( int num_periods = buf_len / period_len; int i = 0, buf = 0; + if (!is_slave_direction(direction)) { + dev_err(tdmac->dev, "unsupported transfer direction\n"); + return NULL; + } + if (tdmac->status != DMA_COMPLETE) { dev_err(tdmac->dev, "controller busy"); return NULL; -- GitLab From 920c5974f0d3aff91b5751e7568b54752fb5d6da Mon Sep 17 00:00:00 2001 From: Christophe JAILLET Date: Mon, 27 Apr 2020 13:10:43 +0200 Subject: [PATCH 0193/1971] dmaengine: qcom_hidma: Simplify error handling path in hidma_probe There is no need to call 'hidma_debug_uninit()' in the error handling path. 'hidma_debug_init()' has not been called yet. Signed-off-by: Christophe JAILLET Link: https://lore.kernel.org/r/20200427111043.70218-1-christophe.jaillet@wanadoo.fr Signed-off-by: Vinod Koul --- drivers/dma/qcom/hidma.c | 1 - 1 file changed, 1 deletion(-) diff --git a/drivers/dma/qcom/hidma.c b/drivers/dma/qcom/hidma.c index 411f91fde7345..87490e125bc3c 100644 --- a/drivers/dma/qcom/hidma.c +++ b/drivers/dma/qcom/hidma.c @@ -897,7 +897,6 @@ uninit: if (msi) hidma_free_msis(dmadev); - hidma_debug_uninit(dmadev); hidma_ll_uninit(dmadev->lldev); dmafree: if (dmadev) -- GitLab From 86e673f7c974f7385d2bdfa91360ea4b800a85f8 Mon Sep 17 00:00:00 2001 From: Amelie Delaunay Date: Wed, 22 Apr 2020 12:29:03 +0200 Subject: [PATCH 0194/1971] dt-bindings: dma: add direct mode support through device tree in stm32-dma Direct mode or FIFO mode is computed by stm32-dma driver. Add a way for the user to force direct mode, by setting bit 2 in the bitfield value specifying DMA features in the device tree. Signed-off-by: Amelie Delaunay Link: https://lore.kernel.org/r/20200422102904.1448-2-amelie.delaunay@st.com Signed-off-by: Vinod Koul --- Documentation/devicetree/bindings/dma/st,stm32-dma.yaml | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/Documentation/devicetree/bindings/dma/st,stm32-dma.yaml b/Documentation/devicetree/bindings/dma/st,stm32-dma.yaml index 0c0ac11ad55fb..71987878e4ae1 100644 --- a/Documentation/devicetree/bindings/dma/st,stm32-dma.yaml +++ b/Documentation/devicetree/bindings/dma/st,stm32-dma.yaml @@ -36,6 +36,11 @@ description: | 0x1: 1/2 full FIFO 0x2: 3/4 full FIFO 0x3: full FIFO + -bit 2: DMA direct mode + 0x0: FIFO mode with threshold selectable with bit 0-1 + 0x1: Direct mode: each DMA request immediately initiates a transfer + from/to the memory, FIFO is bypassed. + maintainers: - Amelie Delaunay -- GitLab From 955b17665d2c221521c554ae6aa22c2486de1a27 Mon Sep 17 00:00:00 2001 From: Amelie Delaunay Date: Wed, 22 Apr 2020 12:29:04 +0200 Subject: [PATCH 0195/1971] dmaengine: stm32-dma: direct mode support through device tree Direct mode or FIFO mode is computed by stm32-dma driver. Add a way for the user to force direct mode, by setting bit 2 in the bitfield value specifying DMA features in the device tree. Signed-off-by: Amelie Delaunay Link: https://lore.kernel.org/r/20200422102904.1448-3-amelie.delaunay@st.com Signed-off-by: Vinod Koul --- drivers/dma/stm32-dma.c | 41 +++++++++++++++++++++++++++++++---------- 1 file changed, 31 insertions(+), 10 deletions(-) diff --git a/drivers/dma/stm32-dma.c b/drivers/dma/stm32-dma.c index 0ddbaa4b4f0b8..96ad1b3d24c68 100644 --- a/drivers/dma/stm32-dma.c +++ b/drivers/dma/stm32-dma.c @@ -117,6 +117,7 @@ #define STM32_DMA_FIFO_THRESHOLD_HALFFULL 0x01 #define STM32_DMA_FIFO_THRESHOLD_3QUARTERSFULL 0x02 #define STM32_DMA_FIFO_THRESHOLD_FULL 0x03 +#define STM32_DMA_FIFO_THRESHOLD_NONE 0x04 #define STM32_DMA_MAX_DATA_ITEMS 0xffff /* @@ -136,6 +137,9 @@ /* DMA Features */ #define STM32_DMA_THRESHOLD_FTR_MASK GENMASK(1, 0) #define STM32_DMA_THRESHOLD_FTR_GET(n) ((n) & STM32_DMA_THRESHOLD_FTR_MASK) +#define STM32_DMA_DIRECT_MODE_MASK BIT(2) +#define STM32_DMA_DIRECT_MODE_GET(n) (((n) & STM32_DMA_DIRECT_MODE_MASK) \ + >> 2) enum stm32_dma_width { STM32_DMA_BYTE, @@ -281,6 +285,9 @@ static bool stm32_dma_fifo_threshold_is_allowed(u32 burst, u32 threshold, { u32 remaining; + if (threshold == STM32_DMA_FIFO_THRESHOLD_NONE) + return false; + if (width != DMA_SLAVE_BUSWIDTH_UNDEFINED) { if (burst != 0) { /* @@ -302,6 +309,10 @@ static bool stm32_dma_fifo_threshold_is_allowed(u32 burst, u32 threshold, static bool stm32_dma_is_burst_possible(u32 buf_len, u32 threshold) { + /* If FIFO direct mode, burst is not possible */ + if (threshold == STM32_DMA_FIFO_THRESHOLD_NONE) + return false; + /* * Buffer or period length has to be aligned on FIFO depth. * Otherwise bytes may be stuck within FIFO at buffer or period @@ -657,6 +668,12 @@ static irqreturn_t stm32_dma_chan_irq(int irq, void *devid) dev_dbg(chan2dev(chan), "FIFO over/underrun\n"); } } + if (status & STM32_DMA_DMEI) { + stm32_dma_irq_clear(chan, STM32_DMA_DMEI); + status &= ~STM32_DMA_DMEI; + if (sfcr & STM32_DMA_SCR_DMEIE) + dev_dbg(chan2dev(chan), "Direct mode overrun\n"); + } if (status) { stm32_dma_irq_clear(chan, status); dev_err(chan2dev(chan), "DMA error: status=0x%08x\n", status); @@ -692,13 +709,13 @@ static int stm32_dma_set_xfer_param(struct stm32_dma_chan *chan, int src_bus_width, dst_bus_width; int src_burst_size, dst_burst_size; u32 src_maxburst, dst_maxburst, src_best_burst, dst_best_burst; - u32 dma_scr, threshold; + u32 dma_scr, fifoth; src_addr_width = chan->dma_sconfig.src_addr_width; dst_addr_width = chan->dma_sconfig.dst_addr_width; src_maxburst = chan->dma_sconfig.src_maxburst; dst_maxburst = chan->dma_sconfig.dst_maxburst; - threshold = chan->threshold; + fifoth = chan->threshold; switch (direction) { case DMA_MEM_TO_DEV: @@ -710,7 +727,7 @@ static int stm32_dma_set_xfer_param(struct stm32_dma_chan *chan, /* Set device burst size */ dst_best_burst = stm32_dma_get_best_burst(buf_len, dst_maxburst, - threshold, + fifoth, dst_addr_width); dst_burst_size = stm32_dma_get_burst(chan, dst_best_burst); @@ -718,7 +735,7 @@ static int stm32_dma_set_xfer_param(struct stm32_dma_chan *chan, return dst_burst_size; /* Set memory data size */ - src_addr_width = stm32_dma_get_max_width(buf_len, threshold); + src_addr_width = stm32_dma_get_max_width(buf_len, fifoth); chan->mem_width = src_addr_width; src_bus_width = stm32_dma_get_width(chan, src_addr_width); if (src_bus_width < 0) @@ -728,7 +745,7 @@ static int stm32_dma_set_xfer_param(struct stm32_dma_chan *chan, src_maxburst = STM32_DMA_MAX_BURST; src_best_burst = stm32_dma_get_best_burst(buf_len, src_maxburst, - threshold, + fifoth, src_addr_width); src_burst_size = stm32_dma_get_burst(chan, src_best_burst); if (src_burst_size < 0) @@ -742,7 +759,8 @@ static int stm32_dma_set_xfer_param(struct stm32_dma_chan *chan, /* Set FIFO threshold */ chan->chan_reg.dma_sfcr &= ~STM32_DMA_SFCR_FTH_MASK; - chan->chan_reg.dma_sfcr |= STM32_DMA_SFCR_FTH(threshold); + if (fifoth != STM32_DMA_FIFO_THRESHOLD_NONE) + chan->chan_reg.dma_sfcr |= STM32_DMA_SFCR_FTH(fifoth); /* Set peripheral address */ chan->chan_reg.dma_spar = chan->dma_sconfig.dst_addr; @@ -758,7 +776,7 @@ static int stm32_dma_set_xfer_param(struct stm32_dma_chan *chan, /* Set device burst size */ src_best_burst = stm32_dma_get_best_burst(buf_len, src_maxburst, - threshold, + fifoth, src_addr_width); chan->mem_burst = src_best_burst; src_burst_size = stm32_dma_get_burst(chan, src_best_burst); @@ -766,7 +784,7 @@ static int stm32_dma_set_xfer_param(struct stm32_dma_chan *chan, return src_burst_size; /* Set memory data size */ - dst_addr_width = stm32_dma_get_max_width(buf_len, threshold); + dst_addr_width = stm32_dma_get_max_width(buf_len, fifoth); chan->mem_width = dst_addr_width; dst_bus_width = stm32_dma_get_width(chan, dst_addr_width); if (dst_bus_width < 0) @@ -776,7 +794,7 @@ static int stm32_dma_set_xfer_param(struct stm32_dma_chan *chan, dst_maxburst = STM32_DMA_MAX_BURST; dst_best_burst = stm32_dma_get_best_burst(buf_len, dst_maxburst, - threshold, + fifoth, dst_addr_width); chan->mem_burst = dst_best_burst; dst_burst_size = stm32_dma_get_burst(chan, dst_best_burst); @@ -791,7 +809,8 @@ static int stm32_dma_set_xfer_param(struct stm32_dma_chan *chan, /* Set FIFO threshold */ chan->chan_reg.dma_sfcr &= ~STM32_DMA_SFCR_FTH_MASK; - chan->chan_reg.dma_sfcr |= STM32_DMA_SFCR_FTH(threshold); + if (fifoth != STM32_DMA_FIFO_THRESHOLD_NONE) + chan->chan_reg.dma_sfcr |= STM32_DMA_SFCR_FTH(fifoth); /* Set peripheral address */ chan->chan_reg.dma_spar = chan->dma_sconfig.src_addr; @@ -1216,6 +1235,8 @@ static void stm32_dma_set_config(struct stm32_dma_chan *chan, chan->chan_reg.dma_scr |= STM32_DMA_SCR_TEIE | STM32_DMA_SCR_TCIE; chan->threshold = STM32_DMA_THRESHOLD_FTR_GET(cfg->features); + if (STM32_DMA_DIRECT_MODE_GET(cfg->features)) + chan->threshold = STM32_DMA_FIFO_THRESHOLD_NONE; } static struct dma_chan *stm32_dma_of_xlate(struct of_phandle_args *dma_spec, -- GitLab From 85f78cec8494ec595ac73181bd5d8fc6d797f592 Mon Sep 17 00:00:00 2001 From: Andy Shevchenko Date: Fri, 24 Apr 2020 19:11:44 +0300 Subject: [PATCH 0196/1971] Revert "dmaengine: dmatest: timeout value of -1 should specify infinite wait" This reverts commit ed04b7c57c3383ed4573f1d1d1dbdc1108ba0bed. While it gives a good description what happens, the approach seems too confusing. Let's fix it in the following patch. Cc: Gary Hook Signed-off-by: Andy Shevchenko Link: https://lore.kernel.org/r/20200424161147.16895-3-andriy.shevchenko@linux.intel.com Signed-off-by: Vinod Koul --- drivers/dma/dmatest.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c index a2cadfa2e6d78..d02089850ec18 100644 --- a/drivers/dma/dmatest.c +++ b/drivers/dma/dmatest.c @@ -62,7 +62,7 @@ MODULE_PARM_DESC(pq_sources, static int timeout = 3000; module_param(timeout, uint, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(timeout, "Transfer Timeout in msec (default: 3000), " - "Pass 0xFFFFFFFF (4294967295) for maximum timeout"); + "Pass -1 for infinite timeout"); static bool noverify; module_param(noverify, bool, S_IRUGO | S_IWUSR); @@ -98,7 +98,7 @@ MODULE_PARM_DESC(transfer_size, "Optional custom transfer size in bytes (default * @iterations: iterations before stopping test * @xor_sources: number of xor source buffers * @pq_sources: number of p+q source buffers - * @timeout: transfer timeout in msec, 0 - 0xFFFFFFFF (4294967295) + * @timeout: transfer timeout in msec, -1 for infinite timeout */ struct dmatest_params { unsigned int buf_size; @@ -109,7 +109,7 @@ struct dmatest_params { unsigned int iterations; unsigned int xor_sources; unsigned int pq_sources; - unsigned int timeout; + int timeout; bool noverify; bool norandom; int alignment; -- GitLab From 35c5fc028511c743db5313f0ef8cac35141706e9 Mon Sep 17 00:00:00 2001 From: Andy Shevchenko Date: Fri, 24 Apr 2020 19:11:45 +0300 Subject: [PATCH 0197/1971] dmaengine: dmatest: Allow negative timeout value to specify infinite wait The dmatest module parameter 'timeout' is documented as accepting a -1 to mean "infinite timeout". However, an infinite timeout is not advised, nor possible since the module parameter is an unsigned int, which won't accept a negative value. Change the parameter type to be signed integer. Cc: Gary Hook Signed-off-by: Andy Shevchenko Link: https://lore.kernel.org/r/20200424161147.16895-4-andriy.shevchenko@linux.intel.com Signed-off-by: Vinod Koul --- drivers/dma/dmatest.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c index d02089850ec18..f869036614280 100644 --- a/drivers/dma/dmatest.c +++ b/drivers/dma/dmatest.c @@ -60,7 +60,7 @@ MODULE_PARM_DESC(pq_sources, "Number of p+q source buffers (default: 3)"); static int timeout = 3000; -module_param(timeout, uint, S_IRUGO | S_IWUSR); +module_param(timeout, int, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(timeout, "Transfer Timeout in msec (default: 3000), " "Pass -1 for infinite timeout"); -- GitLab From 7f2b722668e5334594a50e7055e785687a184644 Mon Sep 17 00:00:00 2001 From: Andy Shevchenko Date: Fri, 24 Apr 2020 19:11:46 +0300 Subject: [PATCH 0198/1971] dmaengine: dmatest: Describe members of struct dmatest_params Kernel documentation validator complains that not all members of struct dmatest_params are being described. Describe them all. Signed-off-by: Andy Shevchenko Link: https://lore.kernel.org/r/20200424161147.16895-5-andriy.shevchenko@linux.intel.com Signed-off-by: Vinod Koul --- drivers/dma/dmatest.c | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c index f869036614280..0f04e603d03eb 100644 --- a/drivers/dma/dmatest.c +++ b/drivers/dma/dmatest.c @@ -72,10 +72,6 @@ static bool norandom; module_param(norandom, bool, 0644); MODULE_PARM_DESC(norandom, "Disable random offset setup (default: random)"); -static bool polled; -module_param(polled, bool, S_IRUGO | S_IWUSR); -MODULE_PARM_DESC(polled, "Use polling for completion instead of interrupts"); - static bool verbose; module_param(verbose, bool, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(verbose, "Enable \"success\" result messages (default: off)"); @@ -88,6 +84,10 @@ static unsigned int transfer_size; module_param(transfer_size, uint, 0644); MODULE_PARM_DESC(transfer_size, "Optional custom transfer size in bytes (default: not used (0))"); +static bool polled; +module_param(polled, bool, S_IRUGO | S_IWUSR); +MODULE_PARM_DESC(polled, "Use polling for completion instead of interrupts"); + /** * struct dmatest_params - test parameters. * @buf_size: size of the memcpy test buffer @@ -99,6 +99,11 @@ MODULE_PARM_DESC(transfer_size, "Optional custom transfer size in bytes (default * @xor_sources: number of xor source buffers * @pq_sources: number of p+q source buffers * @timeout: transfer timeout in msec, -1 for infinite timeout + * @noverify: disable data verification + * @norandom: disable random offset setup + * @alignment: custom data address alignment taken as 2^alignment + * @transfer_size: custom transfer size in bytes + * @polled: use polling for completion instead of interrupts */ struct dmatest_params { unsigned int buf_size; -- GitLab From 5332f8b1d9dd17c258c9461f46aa148a8c850149 Mon Sep 17 00:00:00 2001 From: Andy Shevchenko Date: Fri, 24 Apr 2020 19:11:47 +0300 Subject: [PATCH 0199/1971] dmaengine: dmatest: Describe members of struct dmatest_info Kernel documentation validator complains that not all members of struct dmatest_info are being described. Describe them all. Signed-off-by: Andy Shevchenko Link: https://lore.kernel.org/r/20200424161147.16895-6-andriy.shevchenko@linux.intel.com Signed-off-by: Vinod Koul --- drivers/dma/dmatest.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c index 0f04e603d03eb..31235dc8c9048 100644 --- a/drivers/dma/dmatest.c +++ b/drivers/dma/dmatest.c @@ -125,7 +125,10 @@ struct dmatest_params { /** * struct dmatest_info - test information. * @params: test parameters + * @channels: channels under test + * @nr_channels: number of channels under test * @lock: access protection to the fields of this structure + * @did_init: module has been initialized completely */ static struct dmatest_info { /* Test parameters */ -- GitLab From eba9c444d34c9f10cbb463329c2c8e14f2adff25 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Mon, 13 Apr 2020 11:03:05 +0200 Subject: [PATCH 0200/1971] Improve KCSAN documentation a bit This commit simplifies and clarifies the highest level KCSAN Kconfig help text. Signed-off-by: Ingo Molnar Signed-off-by: Paul E. McKenney --- lib/Kconfig.kcsan | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/lib/Kconfig.kcsan b/lib/Kconfig.kcsan index 0f1447ff8f558..689b6b81f2726 100644 --- a/lib/Kconfig.kcsan +++ b/lib/Kconfig.kcsan @@ -4,17 +4,18 @@ config HAVE_ARCH_KCSAN bool menuconfig KCSAN - bool "KCSAN: dynamic race detector" + bool "KCSAN: dynamic data race detector" depends on HAVE_ARCH_KCSAN && DEBUG_KERNEL && !KASAN select STACKTRACE help - The Kernel Concurrency Sanitizer (KCSAN) is a dynamic race detector, - which relies on compile-time instrumentation, and uses a - watchpoint-based sampling approach to detect races. + The Kernel Concurrency Sanitizer (KCSAN) is a dynamic + data-race detector that relies on compile-time instrumentation. + KCSAN uses a watchpoint-based sampling approach to detect races. - KCSAN's primary purpose is to detect data races. KCSAN can also be - used to check properties, with the help of provided assertions, of - concurrent code where bugs do not manifest as data races. + While KCSAN's primary purpose is to detect data races, it + also provides assertions to check data access constraints. + These assertions can expose bugs that do not manifest as + data races. See for more details. -- GitLab From 52785b6ae8eded7ac99d65c92d989b702e5b4376 Mon Sep 17 00:00:00 2001 From: Wei Yongjun Date: Fri, 17 Apr 2020 02:58:37 +0000 Subject: [PATCH 0201/1971] kcsan: Use GFP_ATOMIC under spin lock A spin lock is held in insert_report_filterlist(), so the krealloc() should use GFP_ATOMIC. This commit therefore makes this change. Reviewed-by: Marco Elver Signed-off-by: Wei Yongjun Signed-off-by: Paul E. McKenney --- kernel/kcsan/debugfs.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/kernel/kcsan/debugfs.c b/kernel/kcsan/debugfs.c index 1a08664a7fabb..023e49c58d55e 100644 --- a/kernel/kcsan/debugfs.c +++ b/kernel/kcsan/debugfs.c @@ -230,7 +230,7 @@ static ssize_t insert_report_filterlist(const char *func) /* initial allocation */ report_filterlist.addrs = kmalloc_array(report_filterlist.size, - sizeof(unsigned long), GFP_KERNEL); + sizeof(unsigned long), GFP_ATOMIC); if (report_filterlist.addrs == NULL) { ret = -ENOMEM; goto out; @@ -240,7 +240,7 @@ static ssize_t insert_report_filterlist(const char *func) size_t new_size = report_filterlist.size * 2; unsigned long *new_addrs = krealloc(report_filterlist.addrs, - new_size * sizeof(unsigned long), GFP_KERNEL); + new_size * sizeof(unsigned long), GFP_ATOMIC); if (new_addrs == NULL) { /* leave filterlist itself untouched */ -- GitLab From 3798cc4d106e91382bfe016caa2edada27c2bb3f Mon Sep 17 00:00:00 2001 From: Josh Poimboeuf Date: Mon, 27 Apr 2020 20:46:13 +0200 Subject: [PATCH 0202/1971] x86/speculation: Add Ivy Bridge to affected list Make the docs match the code. Signed-off-by: Josh Poimboeuf Signed-off-by: Thomas Gleixner --- .../hw-vuln/special-register-buffer-data-sampling.rst | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/Documentation/admin-guide/hw-vuln/special-register-buffer-data-sampling.rst b/Documentation/admin-guide/hw-vuln/special-register-buffer-data-sampling.rst index 6a473da80b62a..47b1b3afac994 100644 --- a/Documentation/admin-guide/hw-vuln/special-register-buffer-data-sampling.rst +++ b/Documentation/admin-guide/hw-vuln/special-register-buffer-data-sampling.rst @@ -27,6 +27,8 @@ by software using TSX_CTRL_MSR otherwise they are not affected. ============= ============ ======== common name Family_Model Stepping ============= ============ ======== + IvyBridge 06_3AH All + Haswell 06_3CH All Haswell_L 06_45H All Haswell_G 06_46H All @@ -37,9 +39,8 @@ by software using TSX_CTRL_MSR otherwise they are not affected. Skylake_L 06_4EH All Skylake 06_5EH All - Kabylake_L 06_8EH <=0xC - - Kabylake 06_9EH <=0xD + Kabylake_L 06_8EH <= 0xC + Kabylake 06_9EH <= 0xD ============= ============ ======== Related CVEs -- GitLab From 6052abf8ff783a14dcd2416be2c029ebeba9ad98 Mon Sep 17 00:00:00 2001 From: Rajat Jain Date: Mon, 27 Apr 2020 17:50:45 -0700 Subject: [PATCH 0203/1971] Input: i8042 - attach fwnode to serio i8042 kbd device Attach the firmware node to the serio i8042 kbd device so that device properties can be passed from the firmware. Signed-off-by: Rajat Jain Link: https://lore.kernel.org/r/20200427210259.91330-1-rajatja@google.com Signed-off-by: Dmitry Torokhov --- drivers/input/serio/i8042-x86ia64io.h | 1 + drivers/input/serio/i8042.c | 3 +++ 2 files changed, 4 insertions(+) diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h index 08e919dbeb5d1..d0c39426ca2c8 100644 --- a/drivers/input/serio/i8042-x86ia64io.h +++ b/drivers/input/serio/i8042-x86ia64io.h @@ -938,6 +938,7 @@ static int i8042_pnp_kbd_probe(struct pnp_dev *dev, const struct pnp_device_id * } i8042_pnp_id_to_string(dev->id, i8042_kbd_firmware_id, sizeof(i8042_kbd_firmware_id)); + i8042_kbd_fwnode = dev_fwnode(&dev->dev); /* Keyboard ports are always supposed to be wakeup-enabled */ device_set_wakeup_enable(&dev->dev, true); diff --git a/drivers/input/serio/i8042.c b/drivers/input/serio/i8042.c index 20ff2bed3917a..0dddf273afd94 100644 --- a/drivers/input/serio/i8042.c +++ b/drivers/input/serio/i8042.c @@ -21,6 +21,7 @@ #include #include #include +#include #include @@ -124,6 +125,7 @@ MODULE_PARM_DESC(unmask_kbd_data, "Unconditional enable (may reveal sensitive da static bool i8042_bypass_aux_irq_test; static char i8042_kbd_firmware_id[128]; static char i8042_aux_firmware_id[128]; +static struct fwnode_handle *i8042_kbd_fwnode; #include "i8042.h" @@ -1335,6 +1337,7 @@ static int __init i8042_create_kbd_port(void) strlcpy(serio->phys, I8042_KBD_PHYS_DESC, sizeof(serio->phys)); strlcpy(serio->firmware_id, i8042_kbd_firmware_id, sizeof(serio->firmware_id)); + set_primary_fwnode(&serio->dev, i8042_kbd_fwnode); port->serio = serio; port->irq = I8042_KBD_IRQ; -- GitLab From 8f7b057abe283a4505747ea8275751479442de53 Mon Sep 17 00:00:00 2001 From: Rajat Jain Date: Mon, 27 Apr 2020 17:51:15 -0700 Subject: [PATCH 0204/1971] Input: atkbd - expose function row physical map to userspace Certain keyboards have their top-row keys intended for actions such as "Browser back", "Browser Refresh", "Fullscreen" etc as their primary mode, thus they will send scan codes for those actions. Further, they don't have a dedicated "Fn" key so don't have the capability to generate function key codes (e.g. F1, F2 etc..). However in this case, if userspace still wants to "synthesize" those function keys using the top row action keys, it needs to know the physical position of the top row keys. (Essentially a mapping between usage codes and a physical location in the top row). This patch enhances the atkbd driver to receive such a mapping from the firmware / device tree, and expose it to userspace in the form of a function-row-physmap attribute. The attribute would be a space separated ordered list of physical codes, for the keys in the function row, in left-to-right order. The attribute will only be present if the kernel knows about such mapping, otherwise the attribute shall not be visible. Signed-off-by: Rajat Jain Link: https://lore.kernel.org/r/20200427210259.91330-2-rajatja@google.com Signed-off-by: Dmitry Torokhov --- drivers/input/keyboard/atkbd.c | 56 ++++++++++++++++++++++++++++++++++ 1 file changed, 56 insertions(+) diff --git a/drivers/input/keyboard/atkbd.c b/drivers/input/keyboard/atkbd.c index 7e3eae54c1926..358e91f8888ff 100644 --- a/drivers/input/keyboard/atkbd.c +++ b/drivers/input/keyboard/atkbd.c @@ -24,6 +24,7 @@ #include #include #include +#include #define DRIVER_DESC "AT and PS/2 keyboard driver" @@ -63,6 +64,8 @@ static bool atkbd_terminal; module_param_named(terminal, atkbd_terminal, bool, 0); MODULE_PARM_DESC(terminal, "Enable break codes on an IBM Terminal keyboard connected via AT/PS2"); +#define MAX_FUNCTION_ROW_KEYS 24 + /* * Scancode to keycode tables. These are just the default setting, and * are loadable via a userland utility. @@ -230,6 +233,9 @@ struct atkbd { /* Serializes reconnect(), attr->set() and event work */ struct mutex mutex; + + u32 function_row_physmap[MAX_FUNCTION_ROW_KEYS]; + int num_function_row_keys; }; /* @@ -283,6 +289,7 @@ static struct device_attribute atkbd_attr_##_name = \ __ATTR(_name, S_IRUGO, atkbd_do_show_##_name, NULL); ATKBD_DEFINE_RO_ATTR(err_count); +ATKBD_DEFINE_RO_ATTR(function_row_physmap); static struct attribute *atkbd_attributes[] = { &atkbd_attr_extra.attr, @@ -292,11 +299,42 @@ static struct attribute *atkbd_attributes[] = { &atkbd_attr_softrepeat.attr, &atkbd_attr_softraw.attr, &atkbd_attr_err_count.attr, + &atkbd_attr_function_row_physmap.attr, NULL }; +static ssize_t atkbd_show_function_row_physmap(struct atkbd *atkbd, char *buf) +{ + ssize_t size = 0; + int i; + + if (!atkbd->num_function_row_keys) + return 0; + + for (i = 0; i < atkbd->num_function_row_keys; i++) + size += scnprintf(buf + size, PAGE_SIZE - size, "%02X ", + atkbd->function_row_physmap[i]); + size += scnprintf(buf + size, PAGE_SIZE - size, "\n"); + return size; +} + +static umode_t atkbd_attr_is_visible(struct kobject *kobj, + struct attribute *attr, int i) +{ + struct device *dev = container_of(kobj, struct device, kobj); + struct serio *serio = to_serio_port(dev); + struct atkbd *atkbd = serio_get_drvdata(serio); + + if (attr == &atkbd_attr_function_row_physmap.attr && + !atkbd->num_function_row_keys) + return 0; + + return attr->mode; +} + static struct attribute_group atkbd_attribute_group = { .attrs = atkbd_attributes, + .is_visible = atkbd_attr_is_visible, }; static const unsigned int xl_table[] = { @@ -1121,6 +1159,22 @@ static void atkbd_set_device_attrs(struct atkbd *atkbd) } } +static void atkbd_parse_fwnode_data(struct serio *serio) +{ + struct atkbd *atkbd = serio_get_drvdata(serio); + struct device *dev = &serio->dev; + int n; + + /* Parse "function-row-physmap" property */ + n = device_property_count_u32(dev, "function-row-physmap"); + if (n > 0 && n <= MAX_FUNCTION_ROW_KEYS && + !device_property_read_u32_array(dev, "function-row-physmap", + atkbd->function_row_physmap, n)) { + atkbd->num_function_row_keys = n; + dev_dbg(dev, "FW reported %d function-row key locations\n", n); + } +} + /* * atkbd_connect() is called when the serio module finds an interface * that isn't handled yet by an appropriate device driver. We check if @@ -1184,6 +1238,8 @@ static int atkbd_connect(struct serio *serio, struct serio_driver *drv) atkbd->id = 0xab00; } + atkbd_parse_fwnode_data(serio); + atkbd_set_keycode_table(atkbd); atkbd_set_device_attrs(atkbd); -- GitLab From 9d17ad2369dcc9beda0785cf5960222dbdd6c9df Mon Sep 17 00:00:00 2001 From: Rajat Jain Date: Mon, 27 Apr 2020 17:53:46 -0700 Subject: [PATCH 0205/1971] Input: atkbd - receive and use physcode->keycode mapping from FW Allow the firmware to specify the mapping between the scan code and the linux keycode. This takes the form of a "linux,keymap" property which is an array of u32 values, each value specifying mapping for a key. Signed-off-by: Rajat Jain Link: https://lore.kernel.org/r/20200427210259.91330-3-rajatja@google.com Signed-off-by: Dmitry Torokhov --- drivers/input/keyboard/atkbd.c | 41 +++++++++++++++++++++++++++++++++- 1 file changed, 40 insertions(+), 1 deletion(-) diff --git a/drivers/input/keyboard/atkbd.c b/drivers/input/keyboard/atkbd.c index 358e91f8888ff..6ec28265771df 100644 --- a/drivers/input/keyboard/atkbd.c +++ b/drivers/input/keyboard/atkbd.c @@ -66,6 +66,9 @@ MODULE_PARM_DESC(terminal, "Enable break codes on an IBM Terminal keyboard conne #define MAX_FUNCTION_ROW_KEYS 24 +#define SCANCODE(keymap) ((keymap >> 16) & 0xFFFF) +#define KEYCODE(keymap) (keymap & 0xFFFF) + /* * Scancode to keycode tables. These are just the default setting, and * are loadable via a userland utility. @@ -1032,6 +1035,39 @@ static unsigned int atkbd_oqo_01plus_scancode_fixup(struct atkbd *atkbd, return code; } +static int atkbd_get_keymap_from_fwnode(struct atkbd *atkbd) +{ + struct device *dev = &atkbd->ps2dev.serio->dev; + int i, n; + u32 *ptr; + u16 scancode, keycode; + + /* Parse "linux,keymap" property */ + n = device_property_count_u32(dev, "linux,keymap"); + if (n <= 0 || n > ATKBD_KEYMAP_SIZE) + return -ENXIO; + + ptr = kcalloc(n, sizeof(u32), GFP_KERNEL); + if (!ptr) + return -ENOMEM; + + if (device_property_read_u32_array(dev, "linux,keymap", ptr, n)) { + dev_err(dev, "problem parsing FW keymap property\n"); + kfree(ptr); + return -EINVAL; + } + + memset(atkbd->keycode, 0, sizeof(atkbd->keycode)); + for (i = 0; i < n; i++) { + scancode = SCANCODE(ptr[i]); + keycode = KEYCODE(ptr[i]); + atkbd->keycode[scancode] = keycode; + } + + kfree(ptr); + return 0; +} + /* * atkbd_set_keycode_table() initializes keyboard's keycode table * according to the selected scancode set @@ -1039,13 +1075,16 @@ static unsigned int atkbd_oqo_01plus_scancode_fixup(struct atkbd *atkbd, static void atkbd_set_keycode_table(struct atkbd *atkbd) { + struct device *dev = &atkbd->ps2dev.serio->dev; unsigned int scancode; int i, j; memset(atkbd->keycode, 0, sizeof(atkbd->keycode)); bitmap_zero(atkbd->force_release_mask, ATKBD_KEYMAP_SIZE); - if (atkbd->translated) { + if (!atkbd_get_keymap_from_fwnode(atkbd)) { + dev_dbg(dev, "Using FW keymap\n"); + } else if (atkbd->translated) { for (i = 0; i < 128; i++) { scancode = atkbd_unxlate_table[i]; atkbd->keycode[i] = atkbd_set2_keycode[scancode]; -- GitLab From 40a571bc408bf10ac8be0eadf838483b9cf90141 Mon Sep 17 00:00:00 2001 From: Sergei Shtylyov Date: Fri, 3 Apr 2020 23:41:27 +0300 Subject: [PATCH 0206/1971] mtd: spi-nor: fix kernel-doc for 'struct spi_nor' When introducing 'struct spi_nor', a number of issues was added in its kernel-doc comment: - double article in the heading kernel-doc comment; - "point" instead of "pointer" for the 'mtd' and 'dev' fields; - "a" articles instead of "an" for the 'dev' field; - acronyms in the lower case for the 'dev' field; - missing "pointer to" for the 'priv' field. Fix all of those at once... Fixes: 6e602ef73334 ("mtd: spi-nor: add the basic data structures") Signed-off-by: Sergei Shtylyov Signed-off-by: Tudor Ambarus --- include/linux/mtd/spi-nor.h | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/include/linux/mtd/spi-nor.h b/include/linux/mtd/spi-nor.h index 1cc8ed5d59edb..d3fcf7654040e 100644 --- a/include/linux/mtd/spi-nor.h +++ b/include/linux/mtd/spi-nor.h @@ -327,10 +327,10 @@ struct spi_nor_manufacturer; struct spi_nor_flash_parameter; /** - * struct spi_nor - Structure for defining a the SPI NOR layer - * @mtd: point to a mtd_info structure + * struct spi_nor - Structure for defining the SPI NOR layer + * @mtd: pointer to an mtd_info structure * @lock: the lock for the read/write/erase/lock/unlock operations - * @dev: point to a spi device, or a spi nor controller device. + * @dev: pointer to an SPI device or an SPI NOR controller device * @spimem: point to the spi mem device * @bouncebuf: bounce buffer used when the buffer passed by the MTD * layer is not DMA-able @@ -354,7 +354,7 @@ struct spi_nor_flash_parameter; * settings that can be overwritten by the spi_nor_fixups * hooks, or dynamically when parsing the SFDP tables. * @dirmap: pointers to struct spi_mem_dirmap_desc for reads/writes. - * @priv: the private data + * @priv: pointer to the private data */ struct spi_nor { struct mtd_info mtd; -- GitLab From ba0aa311b0eb9d0a66a1b6b6fdeaa3b7584b798a Mon Sep 17 00:00:00 2001 From: Sergei Shtylyov Date: Fri, 3 Apr 2020 23:44:16 +0300 Subject: [PATCH 0207/1971] mtd: spi-nor: fix kernel-doc for spi_nor::mtd When embedding 'struct mtd_info' within 'struct spi_nor', the kernel-doc comment was forgotten. Fix it by dropping the "pointer to" part from the comment. Fixes: 1976367173a4 ("mtd: spi-nor: embed struct mtd_info within struct spi_nor") Signed-off-by: Sergei Shtylyov Signed-off-by: Tudor Ambarus --- include/linux/mtd/spi-nor.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/include/linux/mtd/spi-nor.h b/include/linux/mtd/spi-nor.h index d3fcf7654040e..20077881c955d 100644 --- a/include/linux/mtd/spi-nor.h +++ b/include/linux/mtd/spi-nor.h @@ -328,7 +328,7 @@ struct spi_nor_flash_parameter; /** * struct spi_nor - Structure for defining the SPI NOR layer - * @mtd: pointer to an mtd_info structure + * @mtd: an mtd_info structure * @lock: the lock for the read/write/erase/lock/unlock operations * @dev: pointer to an SPI device or an SPI NOR controller device * @spimem: point to the spi mem device -- GitLab From ba053dd3b4d841e17e910d0b91e22d9ea813fa39 Mon Sep 17 00:00:00 2001 From: Sergei Shtylyov Date: Fri, 3 Apr 2020 23:45:49 +0300 Subject: [PATCH 0208/1971] mtd: spi-nor: fix kernel-doc for spi_nor::reg_proto When adding the '{read|write|reg}_proto' fields to 'struct spi_nor', a colon was missed in the comment for the spi_nor::reg_proto' -- add it. Fixes: cfc5604c488c ("mtd: spi-nor: introduce SPI 1-2-2 and SPI 1-4-4 protocols") Signed-off-by: Sergei Shtylyov Signed-off-by: Tudor Ambarus --- include/linux/mtd/spi-nor.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/include/linux/mtd/spi-nor.h b/include/linux/mtd/spi-nor.h index 20077881c955d..1b2143d921ac0 100644 --- a/include/linux/mtd/spi-nor.h +++ b/include/linux/mtd/spi-nor.h @@ -347,7 +347,7 @@ struct spi_nor_flash_parameter; * @flags: flag options for the current SPI-NOR (SNOR_F_*) * @read_proto: the SPI protocol for read operations * @write_proto: the SPI protocol for write operations - * @reg_proto the SPI protocol for read_reg/write_reg/erase operations + * @reg_proto: the SPI protocol for read_reg/write_reg/erase operations * @controller_ops: SPI NOR controller driver specific operations. * @params: [FLASH-SPECIFIC] SPI-NOR flash parameters and settings. * The structure includes legacy flash parameters and -- GitLab From 80cb801144265866ce29fabe5ea7ac8588aa673c Mon Sep 17 00:00:00 2001 From: Sergei Shtylyov Date: Fri, 3 Apr 2020 23:49:48 +0300 Subject: [PATCH 0209/1971] mtd: spi-nor: fix kernel-doc for spi_nor::info When adding the 'info' field to 'struct spi_nor', some acronyms were in lower case and some in upper case and the JEDEC acronym mistyped -- fix these issues. Fixes: 46dde01f6bab ("mtd: spi-nor: add spi_nor_init() function") Signed-off-by: Sergei Shtylyov Signed-off-by: Tudor Ambarus --- include/linux/mtd/spi-nor.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/include/linux/mtd/spi-nor.h b/include/linux/mtd/spi-nor.h index 1b2143d921ac0..3333103c519b5 100644 --- a/include/linux/mtd/spi-nor.h +++ b/include/linux/mtd/spi-nor.h @@ -335,7 +335,7 @@ struct spi_nor_flash_parameter; * @bouncebuf: bounce buffer used when the buffer passed by the MTD * layer is not DMA-able * @bouncebuf_size: size of the bounce buffer - * @info: spi-nor part JDEC MFR id and other info + * @info: SPI NOR part JEDEC MFR ID and other info * @manufacturer: spi-nor manufacturer * @page_size: the page size of the SPI NOR * @addr_width: number of address bytes -- GitLab From 1f241ad2a093b889122bd6bfdce57551d21bba5b Mon Sep 17 00:00:00 2001 From: Sergei Shtylyov Date: Fri, 3 Apr 2020 23:50:57 +0300 Subject: [PATCH 0210/1971] mtd: spi-nor: fix kernel-doc for spi_nor::spimem When adding the 'spimem' field to 'struct spi_nor', a grammar mistake ("point" instead of "pointer") was made -- fix it and convert the SPI acronym to uppercase and fully spell out "memory", while at it... Fixes: b35b9a10362 ("mtd: spi-nor: Move m25p80 code in spi-nor.c") Signed-off-by: Sergei Shtylyov Signed-off-by: Tudor Ambarus --- include/linux/mtd/spi-nor.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/include/linux/mtd/spi-nor.h b/include/linux/mtd/spi-nor.h index 3333103c519b5..bebff2729c189 100644 --- a/include/linux/mtd/spi-nor.h +++ b/include/linux/mtd/spi-nor.h @@ -331,7 +331,7 @@ struct spi_nor_flash_parameter; * @mtd: an mtd_info structure * @lock: the lock for the read/write/erase/lock/unlock operations * @dev: pointer to an SPI device or an SPI NOR controller device - * @spimem: point to the spi mem device + * @spimem: pointer to the SPI memory device * @bouncebuf: bounce buffer used when the buffer passed by the MTD * layer is not DMA-able * @bouncebuf_size: size of the bounce buffer -- GitLab From d207b0b355e7c970889bcd04b93f42fab5c1a553 Mon Sep 17 00:00:00 2001 From: Sergei Shtylyov Date: Tue, 21 Apr 2020 22:38:42 +0300 Subject: [PATCH 0211/1971] mtd: spi-nor: core: fix kernel-doc typo for spi_nor_manufacturer_init_params() When spi_nor_manufacturer_init_params() was added, the kernel-doc for it contained a typo: 'struct spi-nor' instead of 'struct spi_nor' -- fix it. Fixes: ce0b6f3f3c43 ("mtd: spi-nor: Add default_init() hook to tweak flash parameters") Signed-off-by: Sergei Shtylyov Signed-off-by: Tudor Ambarus --- drivers/mtd/spi-nor/core.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/mtd/spi-nor/core.c b/drivers/mtd/spi-nor/core.c index cc68ea84318ed..f6f3491fcd761 100644 --- a/drivers/mtd/spi-nor/core.c +++ b/drivers/mtd/spi-nor/core.c @@ -2675,7 +2675,7 @@ static int spi_nor_setup(struct spi_nor *nor, /** * spi_nor_manufacturer_init_params() - Initialize the flash's parameters and * settings based on MFR register and ->default_init() hook. - * @nor: pointer to a 'struct spi-nor'. + * @nor: pointer to a 'struct spi_nor'. */ static void spi_nor_manufacturer_init_params(struct spi_nor *nor) { -- GitLab From f4363e64c204411c8f446921e0ac06d18806652c Mon Sep 17 00:00:00 2001 From: Sergei Shtylyov Date: Tue, 21 Apr 2020 22:40:05 +0300 Subject: [PATCH 0212/1971] mtd: spi-nor: core: fix kernel-doc typo for spi_nor_[{info|sfdp}_]init_params() When spi_nor_info_init_params(), spi_nor_sfdp_init_params(), and spi_nor_init_params() were added, the kernel-doc for them contained a typo: 'struct spi-nor' instead of 'struct spi_nor' -- fix them. Fixes: 1c1d8d98e1c7 ("mtd: spi-nor: Split spi_nor_init_params()") Signed-off-by: Sergei Shtylyov Signed-off-by: Tudor Ambarus --- drivers/mtd/spi-nor/core.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/mtd/spi-nor/core.c b/drivers/mtd/spi-nor/core.c index f6f3491fcd761..757ac0e21554d 100644 --- a/drivers/mtd/spi-nor/core.c +++ b/drivers/mtd/spi-nor/core.c @@ -2690,7 +2690,7 @@ static void spi_nor_manufacturer_init_params(struct spi_nor *nor) /** * spi_nor_sfdp_init_params() - Initialize the flash's parameters and settings * based on JESD216 SFDP standard. - * @nor: pointer to a 'struct spi-nor'. + * @nor: pointer to a 'struct spi_nor'. * * The method has a roll-back mechanism: in case the SFDP parsing fails, the * legacy flash parameters and settings will be restored. @@ -2712,7 +2712,7 @@ static void spi_nor_sfdp_init_params(struct spi_nor *nor) /** * spi_nor_info_init_params() - Initialize the flash's parameters and settings * based on nor->info data. - * @nor: pointer to a 'struct spi-nor'. + * @nor: pointer to a 'struct spi_nor'. */ static void spi_nor_info_init_params(struct spi_nor *nor) { @@ -2841,7 +2841,7 @@ static void spi_nor_late_init_params(struct spi_nor *nor) /** * spi_nor_init_params() - Initialize the flash's parameters and settings. - * @nor: pointer to a 'struct spi-nor'. + * @nor: pointer to a 'struct spi_nor'. * * The flash parameters and settings are initialized based on a sequence of * calls that are ordered by priority: -- GitLab From 04b8edad262eec0d153005973dfbdd83423c0dcb Mon Sep 17 00:00:00 2001 From: Mason Yang Date: Thu, 23 Apr 2020 16:38:42 +0800 Subject: [PATCH 0213/1971] mtd: spi-nor: macronix: Add support for mx25l51245g mx25l51245g is a mass production for new design and replace mx66l51235l(phase out). Validated by read, erase, read back, write and read back on Xilinx Zynq PicoZed FPGA board which included Macronix SPI Host (driver/spi/spi-mxic.c). Signed-off-by: Mason Yang Signed-off-by: Tudor Ambarus --- drivers/mtd/spi-nor/macronix.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/mtd/spi-nor/macronix.c b/drivers/mtd/spi-nor/macronix.c index ab0f963d630c1..c864ac811a782 100644 --- a/drivers/mtd/spi-nor/macronix.c +++ b/drivers/mtd/spi-nor/macronix.c @@ -67,6 +67,9 @@ static const struct flash_info macronix_parts[] = { SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) }, { "mx25l25655e", INFO(0xc22619, 0, 64 * 1024, 512, 0) }, + { "mx25l51245g", INFO(0xc2201a, 0, 64 * 1024, 1024, + SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | + SPI_NOR_4B_OPCODES) }, { "mx66l51235l", INFO(0xc2201a, 0, 64 * 1024, 1024, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | SPI_NOR_4B_OPCODES) }, -- GitLab From 9f09e37d154469eb989918940fba64d8b1f8c42c Mon Sep 17 00:00:00 2001 From: Mason Yang Date: Thu, 23 Apr 2020 16:38:43 +0800 Subject: [PATCH 0214/1971] mtd: spi-nor: macronix: Add support for mx25u51245g mx25u51245g is a mass production for new design and replace mx66u51235f(phase out). Validated by read, erase, read back, write and read back on Xilinx Zynq PicoZed FPGA board which included Macronix SPI Host (driver/spi/spi-mxic.c). Signed-off-by: Mason Yang Signed-off-by: Tudor Ambarus --- drivers/mtd/spi-nor/macronix.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/mtd/spi-nor/macronix.c b/drivers/mtd/spi-nor/macronix.c index c864ac811a782..96735d83c77c3 100644 --- a/drivers/mtd/spi-nor/macronix.c +++ b/drivers/mtd/spi-nor/macronix.c @@ -63,6 +63,9 @@ static const struct flash_info macronix_parts[] = { .fixups = &mx25l25635_fixups }, { "mx25u25635f", INFO(0xc22539, 0, 64 * 1024, 512, SECT_4K | SPI_NOR_4B_OPCODES) }, + { "mx25u51245g", INFO(0xc2253a, 0, 64 * 1024, 1024, + SECT_4K | SPI_NOR_DUAL_READ | + SPI_NOR_QUAD_READ | SPI_NOR_4B_OPCODES) }, { "mx25v8035f", INFO(0xc22314, 0, 64 * 1024, 16, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) }, -- GitLab From 6424962816ded55a01299c1458a2d21fadc779cc Mon Sep 17 00:00:00 2001 From: Lad Prabhakar Date: Thu, 23 Apr 2020 22:40:46 +0100 Subject: [PATCH 0215/1971] dt-bindings: clock: renesas: cpg-mssr: Document r8a7742 binding Add binding documentation for the RZ/G1H (R8A7742) Clock Pulse Generator driver. Signed-off-by: Lad Prabhakar Reviewed-by: Marian-Cristian Rotariu Link: https://lore.kernel.org/r/1587678050-23468-7-git-send-email-prabhakar.mahadev-lad.rj@bp.renesas.com Signed-off-by: Geert Uytterhoeven --- Documentation/devicetree/bindings/clock/renesas,cpg-mssr.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/Documentation/devicetree/bindings/clock/renesas,cpg-mssr.yaml b/Documentation/devicetree/bindings/clock/renesas,cpg-mssr.yaml index 9cd102e5fed54..c745bd60719ab 100644 --- a/Documentation/devicetree/bindings/clock/renesas,cpg-mssr.yaml +++ b/Documentation/devicetree/bindings/clock/renesas,cpg-mssr.yaml @@ -25,6 +25,7 @@ properties: compatible: enum: - renesas,r7s9210-cpg-mssr # RZ/A2 + - renesas,r8a7742-cpg-mssr # RZ/G1H - renesas,r8a7743-cpg-mssr # RZ/G1M - renesas,r8a7744-cpg-mssr # RZ/G1N - renesas,r8a7745-cpg-mssr # RZ/G1E -- GitLab From 770ae40cd6d23eb331572b8913f51dc715c9d460 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Niklas=20S=C3=B6derlund?= Date: Sun, 16 Feb 2020 14:02:52 +0100 Subject: [PATCH 0216/1971] MAINTAINERS: Add entry for Renesas R-Car thermal drivers MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Add an entry to make myself a maintainer of the Renesas R-Car thermal drivers. Signed-off-by: Niklas Söderlund Acked-by: Yoshihiro Shimoda Signed-off-by: Daniel Lezcano Link: https://lore.kernel.org/r/20200216130252.125100-1-niklas.soderlund+renesas@ragnatech.se --- MAINTAINERS | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/MAINTAINERS b/MAINTAINERS index e64e5db314976..728c6cf55c35f 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -14366,6 +14366,15 @@ F: Documentation/devicetree/bindings/i2c/renesas,iic.txt F: drivers/i2c/busses/i2c-rcar.c F: drivers/i2c/busses/i2c-sh_mobile.c +RENESAS R-CAR THERMAL DRIVERS +M: Niklas Söderlund +L: linux-renesas-soc@vger.kernel.org +S: Supported +F: Documentation/devicetree/bindings/thermal/rcar-gen3-thermal.txt +F: Documentation/devicetree/bindings/thermal/rcar-thermal.txt +F: drivers/thermal/rcar_gen3_thermal.c +F: drivers/thermal/rcar_thermal.c + RENESAS RIIC DRIVER M: Chris Brandt S: Supported -- GitLab From fc5be29f9b2cd9fc7243bf3c26ed042f2868fa8b Mon Sep 17 00:00:00 2001 From: Kejia Hu Date: Tue, 28 Apr 2020 08:04:37 +0100 Subject: [PATCH 0217/1971] power: reset: qcom-pon: reg write mask depends on pon generation Instead of hardcode the mask, it should be depends on which generation of pon it was. Signed-off-by: Kejia Hu Fixes: fce5430f6a86 ("reset: qcom-pon: Add support for gen2 pon") Signed-off-by: Sebastian Reichel --- drivers/power/reset/qcom-pon.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/power/reset/qcom-pon.c b/drivers/power/reset/qcom-pon.c index 22a743a0bf28c..4a688741a88a4 100644 --- a/drivers/power/reset/qcom-pon.c +++ b/drivers/power/reset/qcom-pon.c @@ -34,7 +34,8 @@ static int pm8916_reboot_mode_write(struct reboot_mode_driver *reboot, ret = regmap_update_bits(pon->regmap, pon->baseaddr + PON_SOFT_RB_SPARE, - 0xfc, magic << pon->reason_shift); + GENMASK(7, pon->reason_shift), + magic << pon->reason_shift); if (ret < 0) dev_err(pon->dev, "update reboot mode bits failed\n"); -- GitLab From ddd86c9534e186b4f043b7b828f984b90f9ff40a Mon Sep 17 00:00:00 2001 From: Jason Yan Date: Sun, 26 Apr 2020 17:42:50 +0800 Subject: [PATCH 0218/1971] power: supply: ab8500_fg: remove comparison to bool Fix the following coccicheck warning: drivers/power/supply/ab8500_fg.c:2402:5-24: WARNING: Comparison to bool Signed-off-by: Jason Yan Reviewed-by: Linus Walleij Signed-off-by: Sebastian Reichel --- drivers/power/supply/ab8500_fg.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/power/supply/ab8500_fg.c b/drivers/power/supply/ab8500_fg.c index b96f90a82ecfc..751c4f6c74871 100644 --- a/drivers/power/supply/ab8500_fg.c +++ b/drivers/power/supply/ab8500_fg.c @@ -2399,7 +2399,7 @@ static void ab8500_fg_reinit_work(struct work_struct *work) struct ab8500_fg *di = container_of(work, struct ab8500_fg, fg_reinit_work.work); - if (di->flags.calibrate == false) { + if (!di->flags.calibrate) { dev_dbg(di->dev, "Resetting FG state machine to init.\n"); ab8500_fg_clear_cap_samples(di); ab8500_fg_calc_cap_discharge_voltage(di, true); -- GitLab From f3912a5d56838f78ef2999368e4c7b132262125a Mon Sep 17 00:00:00 2001 From: Yuanjiang Yu Date: Mon, 20 Apr 2020 11:42:04 +0800 Subject: [PATCH 0219/1971] power: supply: sc27xx: Set 'no_thermal' flag for SC27xx fuel gauge There is no thermal zone should be created for the SC27XX FGU power supply, thus set the 'no_thermal' flag as true. Signed-off-by: Yuanjiang Yu Signed-off-by: Baolin Wang Signed-off-by: Sebastian Reichel --- drivers/power/supply/sc27xx_fuel_gauge.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/power/supply/sc27xx_fuel_gauge.c b/drivers/power/supply/sc27xx_fuel_gauge.c index a7c8a8453db13..9dcd55f1d9f3b 100644 --- a/drivers/power/supply/sc27xx_fuel_gauge.c +++ b/drivers/power/supply/sc27xx_fuel_gauge.c @@ -705,6 +705,7 @@ static const struct power_supply_desc sc27xx_fgu_desc = { .set_property = sc27xx_fgu_set_property, .external_power_changed = sc27xx_fgu_external_power_changed, .property_is_writeable = sc27xx_fgu_property_is_writeable, + .no_thermal = true, }; static void sc27xx_fgu_adjust_cap(struct sc27xx_fgu_data *data, int cap) -- GitLab From 8720b255f5280fdf89decfa9ff31720aff1c4e49 Mon Sep 17 00:00:00 2001 From: Yuanjiang Yu Date: Mon, 20 Apr 2020 11:42:05 +0800 Subject: [PATCH 0220/1971] power: supply: sc27xx: Allow to change the battery full capacity The battery full capacity can be affected by the temperature or the servicing time or other factors, so some platforms will track the real battery full capacity in charger manager service. Thus we should allow to change the battery full capacity by setting the 'POWER_SUPPLY_PROP_ENERGY_FULL_DESIGN' property as writeable. Signed-off-by: Yuanjiang Yu Signed-off-by: Baolin Wang Signed-off-by: Sebastian Reichel --- drivers/power/supply/sc27xx_fuel_gauge.c | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/drivers/power/supply/sc27xx_fuel_gauge.c b/drivers/power/supply/sc27xx_fuel_gauge.c index 9dcd55f1d9f3b..5970d4a780160 100644 --- a/drivers/power/supply/sc27xx_fuel_gauge.c +++ b/drivers/power/supply/sc27xx_fuel_gauge.c @@ -656,6 +656,11 @@ static int sc27xx_fgu_set_property(struct power_supply *psy, ret = 0; break; + case POWER_SUPPLY_PROP_ENERGY_FULL_DESIGN: + data->total_cap = val->intval / 1000; + ret = 0; + break; + default: ret = -EINVAL; } @@ -676,7 +681,8 @@ static int sc27xx_fgu_property_is_writeable(struct power_supply *psy, enum power_supply_property psp) { return psp == POWER_SUPPLY_PROP_CAPACITY || - psp == POWER_SUPPLY_PROP_CALIBRATE; + psp == POWER_SUPPLY_PROP_CALIBRATE || + psp == POWER_SUPPLY_PROP_ENERGY_FULL_DESIGN; } static enum power_supply_property sc27xx_fgu_props[] = { -- GitLab From e5431c340852ad14c54f449451f884b161aae31f Mon Sep 17 00:00:00 2001 From: Yuanjiang Yu Date: Mon, 20 Apr 2020 11:42:06 +0800 Subject: [PATCH 0221/1971] power: supply: sc27xx: Add CURRENT_NOW/VOLTAGE_NOW properties support Add new properties to get present current and voltage of the fuel gauge. Signed-off-by: Yuanjiang Yu Signed-off-by: Baolin Wang Signed-off-by: Sebastian Reichel --- drivers/power/supply/sc27xx_fuel_gauge.c | 60 +++++++++++++++++++++++- 1 file changed, 58 insertions(+), 2 deletions(-) diff --git a/drivers/power/supply/sc27xx_fuel_gauge.c b/drivers/power/supply/sc27xx_fuel_gauge.c index 5970d4a780160..82d0c64ef269a 100644 --- a/drivers/power/supply/sc27xx_fuel_gauge.c +++ b/drivers/power/supply/sc27xx_fuel_gauge.c @@ -42,6 +42,8 @@ #define SC27XX_FGU_USER_AREA_SET 0xa0 #define SC27XX_FGU_USER_AREA_CLEAR 0xa4 #define SC27XX_FGU_USER_AREA_STATUS 0xa8 +#define SC27XX_FGU_VOLTAGE_BUF 0xd0 +#define SC27XX_FGU_CURRENT_BUF 0xf0 #define SC27XX_WRITE_SELCLB_EN BIT(0) #define SC27XX_FGU_CLBCNT_MASK GENMASK(15, 0) @@ -376,6 +378,44 @@ static int sc27xx_fgu_get_clbcnt(struct sc27xx_fgu_data *data, int *clb_cnt) return 0; } +static int sc27xx_fgu_get_vol_now(struct sc27xx_fgu_data *data, int *val) +{ + int ret; + u32 vol; + + ret = regmap_read(data->regmap, data->base + SC27XX_FGU_VOLTAGE_BUF, + &vol); + if (ret) + return ret; + + /* + * It is ADC values reading from registers which need to convert to + * corresponding voltage values. + */ + *val = sc27xx_fgu_adc_to_voltage(data, vol); + + return 0; +} + +static int sc27xx_fgu_get_cur_now(struct sc27xx_fgu_data *data, int *val) +{ + int ret; + u32 cur; + + ret = regmap_read(data->regmap, data->base + SC27XX_FGU_CURRENT_BUF, + &cur); + if (ret) + return ret; + + /* + * It is ADC values reading from registers which need to convert to + * corresponding current values. + */ + *val = sc27xx_fgu_adc_to_current(data, cur - SC27XX_FGU_CUR_BASIC_ADC); + + return 0; +} + static int sc27xx_fgu_get_capacity(struct sc27xx_fgu_data *data, int *cap) { int ret, cur_clbcnt, delta_clbcnt, delta_cap, temp; @@ -577,7 +617,7 @@ static int sc27xx_fgu_get_property(struct power_supply *psy, val->intval = value; break; - case POWER_SUPPLY_PROP_VOLTAGE_NOW: + case POWER_SUPPLY_PROP_VOLTAGE_AVG: ret = sc27xx_fgu_get_vbat_vol(data, &value); if (ret) goto error; @@ -601,7 +641,6 @@ static int sc27xx_fgu_get_property(struct power_supply *psy, val->intval = value; break; - case POWER_SUPPLY_PROP_CURRENT_NOW: case POWER_SUPPLY_PROP_CURRENT_AVG: ret = sc27xx_fgu_get_current(data, &value); if (ret) @@ -625,6 +664,22 @@ static int sc27xx_fgu_get_property(struct power_supply *psy, break; + case POWER_SUPPLY_PROP_VOLTAGE_NOW: + ret = sc27xx_fgu_get_vol_now(data, &value); + if (ret) + goto error; + + val->intval = value * 1000; + break; + + case POWER_SUPPLY_PROP_CURRENT_NOW: + ret = sc27xx_fgu_get_cur_now(data, &value); + if (ret) + goto error; + + val->intval = value * 1000; + break; + default: ret = -EINVAL; break; @@ -694,6 +749,7 @@ static enum power_supply_property sc27xx_fgu_props[] = { POWER_SUPPLY_PROP_CAPACITY, POWER_SUPPLY_PROP_VOLTAGE_NOW, POWER_SUPPLY_PROP_VOLTAGE_OCV, + POWER_SUPPLY_PROP_VOLTAGE_AVG, POWER_SUPPLY_PROP_CURRENT_NOW, POWER_SUPPLY_PROP_CURRENT_AVG, POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE, -- GitLab From 7c1c5e38fd31e89391211c0b3e5000666c7d6126 Mon Sep 17 00:00:00 2001 From: Yuanjiang Yu Date: Mon, 20 Apr 2020 11:42:07 +0800 Subject: [PATCH 0222/1971] power: supply: sc27xx: Add boot voltage support Add new property to allow to get the voltage measured during boot time. Signed-off-by: Yuanjiang Yu Signed-off-by: Baolin Wang Signed-off-by: Sebastian Reichel --- drivers/power/supply/sc27xx_fuel_gauge.c | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/drivers/power/supply/sc27xx_fuel_gauge.c b/drivers/power/supply/sc27xx_fuel_gauge.c index 82d0c64ef269a..be42e814ea34b 100644 --- a/drivers/power/supply/sc27xx_fuel_gauge.c +++ b/drivers/power/supply/sc27xx_fuel_gauge.c @@ -84,6 +84,7 @@ * @init_clbcnt: the initial coulomb counter * @max_volt: the maximum constant input voltage in millivolt * @min_volt: the minimum drained battery voltage in microvolt + * @boot_volt: the voltage measured during boot in microvolt * @table_len: the capacity table length * @resist_table_len: the resistance table length * @cur_1000ma_adc: ADC value corresponding to 1000 mA @@ -109,6 +110,7 @@ struct sc27xx_fgu_data { int init_clbcnt; int max_volt; int min_volt; + int boot_volt; int table_len; int resist_table_len; int cur_1000ma_adc; @@ -321,6 +323,7 @@ static int sc27xx_fgu_get_boot_capacity(struct sc27xx_fgu_data *data, int *cap) volt = sc27xx_fgu_adc_to_voltage(data, volt); ocv = volt * 1000 - oci * data->internal_resist; + data->boot_volt = ocv; /* * Parse the capacity table to look up the correct capacity percent @@ -680,6 +683,10 @@ static int sc27xx_fgu_get_property(struct power_supply *psy, val->intval = value * 1000; break; + case POWER_SUPPLY_PROP_VOLTAGE_BOOT: + val->intval = data->boot_volt; + break; + default: ret = -EINVAL; break; @@ -750,6 +757,7 @@ static enum power_supply_property sc27xx_fgu_props[] = { POWER_SUPPLY_PROP_VOLTAGE_NOW, POWER_SUPPLY_PROP_VOLTAGE_OCV, POWER_SUPPLY_PROP_VOLTAGE_AVG, + POWER_SUPPLY_PROP_VOLTAGE_BOOT, POWER_SUPPLY_PROP_CURRENT_NOW, POWER_SUPPLY_PROP_CURRENT_AVG, POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE, -- GitLab From aa86e90794c8a76c38f2897aff6c1eaa3b96bba6 Mon Sep 17 00:00:00 2001 From: Tang Bin Date: Wed, 15 Apr 2020 12:27:27 +0800 Subject: [PATCH 0223/1971] power: supply: axp288_charger: Omit superfluous error message In the axp288_charger_probe(), when get irq failed, the function platform_get_irq() logs an error message, so remove redundant message here. Signed-off-by: Tang Bin Signed-off-by: Shengju Zhang Reviewed-by: Hans de Goede Signed-off-by: Sebastian Reichel --- drivers/power/supply/axp288_charger.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/drivers/power/supply/axp288_charger.c b/drivers/power/supply/axp288_charger.c index cf4c67b2d2359..9d981b76c1e72 100644 --- a/drivers/power/supply/axp288_charger.c +++ b/drivers/power/supply/axp288_charger.c @@ -880,10 +880,9 @@ static int axp288_charger_probe(struct platform_device *pdev) /* Register charger interrupts */ for (i = 0; i < CHRG_INTR_END; i++) { pirq = platform_get_irq(info->pdev, i); - if (pirq < 0) { - dev_err(&pdev->dev, "Failed to get IRQ: %d\n", pirq); + if (pirq < 0) return pirq; - } + info->irq[i] = regmap_irq_get_virq(info->regmap_irqc, pirq); if (info->irq[i] < 0) { dev_warn(&info->pdev->dev, -- GitLab From 1e54afe9fcfe99cf913526cbcb0128e4a91d0621 Mon Sep 17 00:00:00 2001 From: Abel Vesa Date: Wed, 15 Apr 2020 11:02:45 +0300 Subject: [PATCH 0224/1971] clk: imx: gate2: Allow single bit gating clock Audiomix on i.MX8MP registers two gates that share the same enable count but use the same bit to control the gate instead of two bits. By adding the flag IMX_CLK_GATE2_SINGLE_BIT we allow the gate2 to use the generic gate ops for enable, disable and is_enabled. For the disable_unused, nothing happens if this flag is specified. Signed-off-by: Abel Vesa Reviewed-by: Stephen Boyd Signed-off-by: Shawn Guo --- drivers/clk/imx/clk-gate2.c | 31 +++++++++++++++++++++++-------- drivers/clk/imx/clk.h | 13 +++++++++++++ 2 files changed, 36 insertions(+), 8 deletions(-) diff --git a/drivers/clk/imx/clk-gate2.c b/drivers/clk/imx/clk-gate2.c index ce0060e8873eb..b87ab3c3ba1ea 100644 --- a/drivers/clk/imx/clk-gate2.c +++ b/drivers/clk/imx/clk-gate2.c @@ -41,21 +41,26 @@ static int clk_gate2_enable(struct clk_hw *hw) struct clk_gate2 *gate = to_clk_gate2(hw); u32 reg; unsigned long flags; + int ret = 0; spin_lock_irqsave(gate->lock, flags); if (gate->share_count && (*gate->share_count)++ > 0) goto out; - reg = readl(gate->reg); - reg &= ~(3 << gate->bit_idx); - reg |= gate->cgr_val << gate->bit_idx; - writel(reg, gate->reg); + if (gate->flags & IMX_CLK_GATE2_SINGLE_BIT) { + ret = clk_gate_ops.enable(hw); + } else { + reg = readl(gate->reg); + reg &= ~(3 << gate->bit_idx); + reg |= gate->cgr_val << gate->bit_idx; + writel(reg, gate->reg); + } out: spin_unlock_irqrestore(gate->lock, flags); - return 0; + return ret; } static void clk_gate2_disable(struct clk_hw *hw) @@ -73,9 +78,13 @@ static void clk_gate2_disable(struct clk_hw *hw) goto out; } - reg = readl(gate->reg); - reg &= ~(3 << gate->bit_idx); - writel(reg, gate->reg); + if (gate->flags & IMX_CLK_GATE2_SINGLE_BIT) { + clk_gate_ops.disable(hw); + } else { + reg = readl(gate->reg); + reg &= ~(3 << gate->bit_idx); + writel(reg, gate->reg); + } out: spin_unlock_irqrestore(gate->lock, flags); @@ -95,6 +104,9 @@ static int clk_gate2_is_enabled(struct clk_hw *hw) { struct clk_gate2 *gate = to_clk_gate2(hw); + if (gate->flags & IMX_CLK_GATE2_SINGLE_BIT) + return clk_gate_ops.is_enabled(hw); + return clk_gate2_reg_is_enabled(gate->reg, gate->bit_idx); } @@ -104,6 +116,9 @@ static void clk_gate2_disable_unused(struct clk_hw *hw) unsigned long flags; u32 reg; + if (gate->flags & IMX_CLK_GATE2_SINGLE_BIT) + return; + spin_lock_irqsave(gate->lock, flags); if (!gate->share_count || *gate->share_count == 0) { diff --git a/drivers/clk/imx/clk.h b/drivers/clk/imx/clk.h index f074dd8ec42e7..01ff1dbfbdef5 100644 --- a/drivers/clk/imx/clk.h +++ b/drivers/clk/imx/clk.h @@ -5,6 +5,8 @@ #include #include +#define IMX_CLK_GATE2_SINGLE_BIT 1 + extern spinlock_t imx_ccm_lock; void imx_check_clocks(struct clk *clks[], unsigned int count); @@ -355,6 +357,17 @@ static inline struct clk_hw *imx_clk_hw_gate2_shared2(const char *name, &imx_ccm_lock, share_count); } +static inline struct clk_hw *imx_dev_clk_hw_gate_shared(struct device *dev, + const char *name, const char *parent, + void __iomem *reg, u8 shift, + unsigned int *share_count) +{ + return clk_hw_register_gate2(NULL, name, parent, CLK_SET_RATE_PARENT | + CLK_OPS_PARENT_ENABLE, reg, shift, 0x3, + IMX_CLK_GATE2_SINGLE_BIT, + &imx_ccm_lock, share_count); +} + static inline struct clk *imx_clk_gate2_cgr(const char *name, const char *parent, void __iomem *reg, u8 shift, u8 cgr_val) { -- GitLab From 55a8b3cdff54a0e6faa43a41c8e1eb47e56c6831 Mon Sep 17 00:00:00 2001 From: Abel Vesa Date: Wed, 15 Apr 2020 11:02:46 +0300 Subject: [PATCH 0225/1971] clk: imx: pll14xx: Add the device as argument when registering In order to allow runtime PM, the device needs to be passed on to the register function. Audiomix clock controller, used on i.MX8MP and future platforms, registers a pll14xx and has runtime PM support. Signed-off-by: Abel Vesa Reviewed-by: Stephen Boyd Signed-off-by: Shawn Guo --- drivers/clk/imx/clk-pll14xx.c | 8 ++++---- drivers/clk/imx/clk.h | 13 ++++++++++--- 2 files changed, 14 insertions(+), 7 deletions(-) diff --git a/drivers/clk/imx/clk-pll14xx.c b/drivers/clk/imx/clk-pll14xx.c index a83bbbee77d99..f9eb189b93c0d 100644 --- a/drivers/clk/imx/clk-pll14xx.c +++ b/drivers/clk/imx/clk-pll14xx.c @@ -378,9 +378,9 @@ static const struct clk_ops clk_pll1443x_ops = { .set_rate = clk_pll1443x_set_rate, }; -struct clk_hw *imx_clk_hw_pll14xx(const char *name, const char *parent_name, - void __iomem *base, - const struct imx_pll14xx_clk *pll_clk) +struct clk_hw *imx_dev_clk_hw_pll14xx(struct device *dev, const char *name, + const char *parent_name, void __iomem *base, + const struct imx_pll14xx_clk *pll_clk) { struct clk_pll14xx *pll; struct clk_hw *hw; @@ -426,7 +426,7 @@ struct clk_hw *imx_clk_hw_pll14xx(const char *name, const char *parent_name, hw = &pll->hw; - ret = clk_hw_register(NULL, hw); + ret = clk_hw_register(dev, hw); if (ret) { pr_err("%s: failed to register pll %s %d\n", __func__, name, ret); diff --git a/drivers/clk/imx/clk.h b/drivers/clk/imx/clk.h index 01ff1dbfbdef5..fcd9952a27cf0 100644 --- a/drivers/clk/imx/clk.h +++ b/drivers/clk/imx/clk.h @@ -133,9 +133,9 @@ struct clk *imx_clk_pll14xx(const char *name, const char *parent_name, #define imx_clk_pll14xx(name, parent_name, base, pll_clk) \ to_clk(imx_clk_hw_pll14xx(name, parent_name, base, pll_clk)) -struct clk_hw *imx_clk_hw_pll14xx(const char *name, const char *parent_name, - void __iomem *base, - const struct imx_pll14xx_clk *pll_clk); +struct clk_hw *imx_dev_clk_hw_pll14xx(struct device *dev, const char *name, + const char *parent_name, void __iomem *base, + const struct imx_pll14xx_clk *pll_clk); struct clk_hw *imx_clk_hw_pllv1(enum imx_pllv1_type type, const char *name, const char *parent, void __iomem *base); @@ -242,6 +242,13 @@ static inline struct clk *to_clk(struct clk_hw *hw) return hw->clk; } +static inline struct clk_hw *imx_clk_hw_pll14xx(const char *name, const char *parent_name, + void __iomem *base, + const struct imx_pll14xx_clk *pll_clk) +{ + return imx_dev_clk_hw_pll14xx(NULL, name, parent_name, base, pll_clk); +} + static inline struct clk_hw *imx_clk_hw_fixed(const char *name, int rate) { return clk_hw_register_fixed_rate(NULL, name, NULL, 0, rate); -- GitLab From 01d5bea4d390eb171af845034a152dee9e997b70 Mon Sep 17 00:00:00 2001 From: Abel Vesa Date: Wed, 15 Apr 2020 11:02:47 +0300 Subject: [PATCH 0226/1971] clk: imx: Add helpers for passing the device as argument All the imx clocks that need to be registered by the audiomix need to pass on the device so that the runtime PM support could work properly. Signed-off-by: Abel Vesa Reviewed-by: Stephen Boyd Reviewed-by: Peng Fan Signed-off-by: Shawn Guo --- drivers/clk/imx/clk.h | 29 +++++++++++++++++++++++++++++ 1 file changed, 29 insertions(+) diff --git a/drivers/clk/imx/clk.h b/drivers/clk/imx/clk.h index fcd9952a27cf0..b91b1b18a4a21 100644 --- a/drivers/clk/imx/clk.h +++ b/drivers/clk/imx/clk.h @@ -319,6 +319,13 @@ static inline struct clk_hw *imx_clk_hw_gate(const char *name, const char *paren shift, 0, &imx_ccm_lock); } +static inline struct clk_hw *imx_dev_clk_hw_gate(struct device *dev, const char *name, + const char *parent, void __iomem *reg, u8 shift) +{ + return clk_hw_register_gate(dev, name, parent, CLK_SET_RATE_PARENT, reg, + shift, 0, &imx_ccm_lock); +} + static inline struct clk_hw *imx_clk_hw_gate_dis(const char *name, const char *parent, void __iomem *reg, u8 shift) { @@ -431,6 +438,15 @@ static inline struct clk_hw *imx_clk_hw_mux(const char *name, void __iomem *reg, width, 0, &imx_ccm_lock); } +static inline struct clk_hw *imx_dev_clk_hw_mux(struct device *dev, + const char *name, void __iomem *reg, u8 shift, + u8 width, const char * const *parents, int num_parents) +{ + return clk_hw_register_mux(dev, name, parents, num_parents, + CLK_SET_RATE_NO_REPARENT | CLK_SET_PARENT_GATE, + reg, shift, width, 0, &imx_ccm_lock); +} + static inline struct clk *imx_clk_mux2(const char *name, void __iomem *reg, u8 shift, u8 width, const char * const *parents, int num_parents) @@ -493,6 +509,19 @@ static inline struct clk_hw *imx_clk_hw_mux_flags(const char *name, reg, shift, width, 0, &imx_ccm_lock); } +static inline struct clk_hw *imx_dev_clk_hw_mux_flags(struct device *dev, + const char *name, + void __iomem *reg, u8 shift, + u8 width, + const char * const *parents, + int num_parents, + unsigned long flags) +{ + return clk_hw_register_mux(dev, name, parents, num_parents, + flags | CLK_SET_RATE_NO_REPARENT, + reg, shift, width, 0, &imx_ccm_lock); +} + struct clk_hw *imx_clk_hw_cpu(const char *name, const char *parent_name, struct clk *div, struct clk *mux, struct clk *pll, struct clk *step); -- GitLab From 849af490b6a674ba0636fbf95ac048b87ad2eb94 Mon Sep 17 00:00:00 2001 From: Abel Vesa Date: Wed, 15 Apr 2020 11:02:48 +0300 Subject: [PATCH 0227/1971] dt-bindings: clocks: imx8mp: Add ids for audiomix clocks Add all the clock ids for the audiomix clocks. Signed-off-by: Abel Vesa Acked-by: Rob Herring Reviewed-by: Stephen Boyd Signed-off-by: Shawn Guo --- include/dt-bindings/clock/imx8mp-clock.h | 62 ++++++++++++++++++++++++ 1 file changed, 62 insertions(+) diff --git a/include/dt-bindings/clock/imx8mp-clock.h b/include/dt-bindings/clock/imx8mp-clock.h index 47ab082238b47..305433f9cc074 100644 --- a/include/dt-bindings/clock/imx8mp-clock.h +++ b/include/dt-bindings/clock/imx8mp-clock.h @@ -298,4 +298,66 @@ #define IMX8MP_CLK_END 289 +#define IMX8MP_CLK_AUDIOMIX_SAI1_IPG 0 +#define IMX8MP_CLK_AUDIOMIX_SAI1_MCLK1 1 +#define IMX8MP_CLK_AUDIOMIX_SAI1_MCLK2 2 +#define IMX8MP_CLK_AUDIOMIX_SAI1_MCLK3 3 +#define IMX8MP_CLK_AUDIOMIX_SAI2_IPG 4 +#define IMX8MP_CLK_AUDIOMIX_SAI2_MCLK1 5 +#define IMX8MP_CLK_AUDIOMIX_SAI2_MCLK2 6 +#define IMX8MP_CLK_AUDIOMIX_SAI2_MCLK3 7 +#define IMX8MP_CLK_AUDIOMIX_SAI3_IPG 8 +#define IMX8MP_CLK_AUDIOMIX_SAI3_MCLK1 9 +#define IMX8MP_CLK_AUDIOMIX_SAI3_MCLK2 10 +#define IMX8MP_CLK_AUDIOMIX_SAI3_MCLK3 11 +#define IMX8MP_CLK_AUDIOMIX_SAI5_IPG 12 +#define IMX8MP_CLK_AUDIOMIX_SAI5_MCLK1 13 +#define IMX8MP_CLK_AUDIOMIX_SAI5_MCLK2 14 +#define IMX8MP_CLK_AUDIOMIX_SAI5_MCLK3 15 +#define IMX8MP_CLK_AUDIOMIX_SAI6_IPG 16 +#define IMX8MP_CLK_AUDIOMIX_SAI6_MCLK1 17 +#define IMX8MP_CLK_AUDIOMIX_SAI6_MCLK2 18 +#define IMX8MP_CLK_AUDIOMIX_SAI6_MCLK3 19 +#define IMX8MP_CLK_AUDIOMIX_SAI7_IPG 20 +#define IMX8MP_CLK_AUDIOMIX_SAI7_MCLK1 21 +#define IMX8MP_CLK_AUDIOMIX_SAI7_MCLK2 22 +#define IMX8MP_CLK_AUDIOMIX_SAI7_MCLK3 23 +#define IMX8MP_CLK_AUDIOMIX_ASRC_IPG 24 +#define IMX8MP_CLK_AUDIOMIX_PDM_IPG 25 +#define IMX8MP_CLK_AUDIOMIX_SDMA2_ROOT 26 +#define IMX8MP_CLK_AUDIOMIX_SDMA3_ROOT 27 +#define IMX8MP_CLK_AUDIOMIX_SPBA2_ROOT 28 +#define IMX8MP_CLK_AUDIOMIX_DSP_ROOT 29 +#define IMX8MP_CLK_AUDIOMIX_DSPDBG_ROOT 30 +#define IMX8MP_CLK_AUDIOMIX_EARC_IPG 31 +#define IMX8MP_CLK_AUDIOMIX_OCRAMA_IPG 32 +#define IMX8MP_CLK_AUDIOMIX_AUD2HTX_IPG 33 +#define IMX8MP_CLK_AUDIOMIX_EDMA_ROOT 34 +#define IMX8MP_CLK_AUDIOMIX_AUDPLL_ROOT 35 +#define IMX8MP_CLK_AUDIOMIX_MU2_ROOT 36 +#define IMX8MP_CLK_AUDIOMIX_MU3_ROOT 37 +#define IMX8MP_CLK_AUDIOMIX_EARC_PHY 38 +#define IMX8MP_CLK_AUDIOMIX_PDM_ROOT 39 +#define IMX8MP_CLK_AUDIOMIX_SAI1_MCLK1_SEL 40 +#define IMX8MP_CLK_AUDIOMIX_SAI1_MCLK2_SEL 41 +#define IMX8MP_CLK_AUDIOMIX_SAI2_MCLK1_SEL 42 +#define IMX8MP_CLK_AUDIOMIX_SAI2_MCLK2_SEL 43 +#define IMX8MP_CLK_AUDIOMIX_SAI3_MCLK1_SEL 44 +#define IMX8MP_CLK_AUDIOMIX_SAI3_MCLK2_SEL 45 +#define IMX8MP_CLK_AUDIOMIX_SAI4_MCLK1_SEL 46 +#define IMX8MP_CLK_AUDIOMIX_SAI4_MCLK2_SEL 47 +#define IMX8MP_CLK_AUDIOMIX_SAI5_MCLK1_SEL 48 +#define IMX8MP_CLK_AUDIOMIX_SAI5_MCLK2_SEL 49 +#define IMX8MP_CLK_AUDIOMIX_SAI6_MCLK1_SEL 50 +#define IMX8MP_CLK_AUDIOMIX_SAI6_MCLK2_SEL 51 +#define IMX8MP_CLK_AUDIOMIX_SAI7_MCLK1_SEL 52 +#define IMX8MP_CLK_AUDIOMIX_SAI7_MCLK2_SEL 53 +#define IMX8MP_CLK_AUDIOMIX_PDM_SEL 54 +#define IMX8MP_CLK_AUDIOMIX_SAI_PLL_REF_SEL 55 +#define IMX8MP_CLK_AUDIOMIX_SAI_PLL 56 +#define IMX8MP_CLK_AUDIOMIX_SAI_PLL_BYPASS 57 +#define IMX8MP_CLK_AUDIOMIX_SAI_PLL_OUT 58 + +#define IMX8MP_CLK_AUDIOMIX_END 59 + #endif -- GitLab From f80ff13135cb44a9be96f695d19212ae952ee5f4 Mon Sep 17 00:00:00 2001 From: Jungseung Lee Date: Tue, 21 Apr 2020 15:33:13 +0900 Subject: [PATCH 0228/1971] mtd: spi-nor: micron-st: Enable locking for n25q00 n25q00 uses the 4 bit Block Protection scheme and supports Top/Bottom protection via the BP and TB bits of the Status Register. Enable locking for n25q00. Tested with cirrus controller. Signed-off-by: Jungseung Lee Signed-off-by: Tudor Ambarus --- drivers/mtd/spi-nor/micron-st.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/mtd/spi-nor/micron-st.c b/drivers/mtd/spi-nor/micron-st.c index 02c0b53f60971..3dca5b9af3b64 100644 --- a/drivers/mtd/spi-nor/micron-st.c +++ b/drivers/mtd/spi-nor/micron-st.c @@ -61,6 +61,8 @@ static const struct flash_info st_parts[] = { SPI_NOR_4BIT_BP | SPI_NOR_BP3_SR_BIT6) }, { "n25q00", INFO(0x20ba21, 0, 64 * 1024, 2048, SECT_4K | USE_FSR | SPI_NOR_QUAD_READ | + SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB | + SPI_NOR_4BIT_BP | SPI_NOR_BP3_SR_BIT6 | NO_CHIP_ERASE) }, { "n25q00a", INFO(0x20bb21, 0, 64 * 1024, 2048, SECT_4K | USE_FSR | SPI_NOR_QUAD_READ | -- GitLab From 8aadd77cd27172988414408f43bb9e5bef01b14d Mon Sep 17 00:00:00 2001 From: Tudor Ambarus Date: Tue, 21 Apr 2020 06:31:31 +0000 Subject: [PATCH 0229/1971] mtd: spi-nor: Uniformize the return value in spi_nor_*_ready() spi_nor_ready() returns 1 if ready, 0 if not ready and -errno on errors. Do the same in all the spi_nor_*_ready() children. Signed-off-by: Tudor Ambarus Reviewed-by: Vignesh Raghavendra --- drivers/mtd/spi-nor/core.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/mtd/spi-nor/core.c b/drivers/mtd/spi-nor/core.c index 757ac0e21554d..2fede06677598 100644 --- a/drivers/mtd/spi-nor/core.c +++ b/drivers/mtd/spi-nor/core.c @@ -640,7 +640,7 @@ static int spi_nor_fsr_ready(struct spi_nor *nor) return -EIO; } - return nor->bouncebuf[0] & FSR_READY; + return !!(nor->bouncebuf[0] & FSR_READY); } /** -- GitLab From b8469159632818064084397a010f34e156a393db Mon Sep 17 00:00:00 2001 From: Tudor Ambarus Date: Tue, 21 Apr 2020 06:31:32 +0000 Subject: [PATCH 0230/1971] mtd: spi-nor: Fix description of the sr_ready() return value The functions return 1 if ready, 0 if not ready, -errno on errors. Signed-off-by: Tudor Ambarus Reviewed-by: Vignesh Raghavendra --- drivers/mtd/spi-nor/core.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/mtd/spi-nor/core.c b/drivers/mtd/spi-nor/core.c index 2fede06677598..1ab4386a099a9 100644 --- a/drivers/mtd/spi-nor/core.c +++ b/drivers/mtd/spi-nor/core.c @@ -499,7 +499,7 @@ int spi_nor_xread_sr(struct spi_nor *nor, u8 *sr) * the flash is ready for new commands. * @nor: pointer to 'struct spi_nor'. * - * Return: 0 on success, -errno otherwise. + * Return: 1 if ready, 0 if not ready, -errno on errors. */ static int spi_nor_xsr_ready(struct spi_nor *nor) { @@ -542,7 +542,7 @@ static void spi_nor_clear_sr(struct spi_nor *nor) * for new commands. * @nor: pointer to 'struct spi_nor'. * - * Return: 0 on success, -errno otherwise. + * Return: 1 if ready, 0 if not ready, -errno on errors. */ static int spi_nor_sr_ready(struct spi_nor *nor) { @@ -606,7 +606,7 @@ static void spi_nor_clear_fsr(struct spi_nor *nor) * ready for new commands. * @nor: pointer to 'struct spi_nor'. * - * Return: 0 on success, -errno otherwise. + * Return: 1 if ready, 0 if not ready, -errno on errors. */ static int spi_nor_fsr_ready(struct spi_nor *nor) { @@ -647,7 +647,7 @@ static int spi_nor_fsr_ready(struct spi_nor *nor) * spi_nor_ready() - Query the flash to see if it is ready for new commands. * @nor: pointer to 'struct spi_nor'. * - * Return: 0 on success, -errno otherwise. + * Return: 1 if ready, 0 if not ready, -errno on errors. */ static int spi_nor_ready(struct spi_nor *nor) { -- GitLab From da1978ac3d6cf278dedf5edbf350445a0fff2f08 Mon Sep 17 00:00:00 2001 From: Martin Blumenstingl Date: Fri, 17 Apr 2020 20:41:24 +0200 Subject: [PATCH 0231/1971] clk: meson: meson8b: Fix the first parent of vid_pll_in_sel Use hdmi_pll_lvds_out as parent of the vid_pll_in_sel clock. It's not easy to see that the vendor kernel does the same, but it actually does. meson_clk_pll_ops in mainline still cannot fully recalculate all rates from the HDMI PLL registers because some register bits (at the time of writing it's unknown which bits are used for this) double the HDMI PLL output rate (compared to simply considering M, N and FRAC) for some (but not all) PLL settings. Update the vid_pll_in_sel parent so our clock calculation works for simple clock settings like the CVBS output (where no rate doubling is going on). The PLL ops need to be fixed later on for more complex clock settings (all HDMI rates). Fixes: 6cb57c678bb70 ("clk: meson: meson8b: add the read-only video clock trees") Suggested-by: Neil Armstrong Signed-off-by: Martin Blumenstingl Signed-off-by: Jerome Brunet Link: https://lore.kernel.org/r/20200417184127.1319871-2-martin.blumenstingl@googlemail.com --- drivers/clk/meson/meson8b.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/clk/meson/meson8b.c b/drivers/clk/meson/meson8b.c index 7c55c695cbaed..90d284ffc780c 100644 --- a/drivers/clk/meson/meson8b.c +++ b/drivers/clk/meson/meson8b.c @@ -1077,7 +1077,7 @@ static struct clk_regmap meson8b_vid_pll_in_sel = { * Meson8m2: vid2_pll */ .parent_hws = (const struct clk_hw *[]) { - &meson8b_hdmi_pll_dco.hw + &meson8b_hdmi_pll_lvds_out.hw }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, -- GitLab From 0d3051c790ed2ef6bd91b92b07220313f06b95b3 Mon Sep 17 00:00:00 2001 From: Martin Blumenstingl Date: Fri, 17 Apr 2020 20:41:25 +0200 Subject: [PATCH 0232/1971] clk: meson: meson8b: Fix the polarity of the RESET_N lines CLKC_RESET_VID_DIVIDER_CNTL_RESET_N_POST and CLKC_RESET_VID_DIVIDER_CNTL_RESET_N_PRE are active low. This means: - asserting them requires setting the register value to 0 - de-asserting them requires setting the register value to 1 Set the register value accordingly for these two reset lines by setting the inverted the register value compared to all other reset lines. Fixes: 189621726bc2f6 ("clk: meson: meson8b: register the built-in reset controller") Signed-off-by: Martin Blumenstingl Signed-off-by: Jerome Brunet Link: https://lore.kernel.org/r/20200417184127.1319871-3-martin.blumenstingl@googlemail.com --- drivers/clk/meson/meson8b.c | 79 ++++++++++++++++++++++++++----------- 1 file changed, 56 insertions(+), 23 deletions(-) diff --git a/drivers/clk/meson/meson8b.c b/drivers/clk/meson/meson8b.c index 90d284ffc780c..1dec8d5404a14 100644 --- a/drivers/clk/meson/meson8b.c +++ b/drivers/clk/meson/meson8b.c @@ -3506,54 +3506,87 @@ static struct clk_regmap *const meson8b_clk_regmaps[] = { static const struct meson8b_clk_reset_line { u32 reg; u8 bit_idx; + bool active_low; } meson8b_clk_reset_bits[] = { [CLKC_RESET_L2_CACHE_SOFT_RESET] = { - .reg = HHI_SYS_CPU_CLK_CNTL0, .bit_idx = 30 + .reg = HHI_SYS_CPU_CLK_CNTL0, + .bit_idx = 30, + .active_low = false, }, [CLKC_RESET_AXI_64_TO_128_BRIDGE_A5_SOFT_RESET] = { - .reg = HHI_SYS_CPU_CLK_CNTL0, .bit_idx = 29 + .reg = HHI_SYS_CPU_CLK_CNTL0, + .bit_idx = 29, + .active_low = false, }, [CLKC_RESET_SCU_SOFT_RESET] = { - .reg = HHI_SYS_CPU_CLK_CNTL0, .bit_idx = 28 + .reg = HHI_SYS_CPU_CLK_CNTL0, + .bit_idx = 28, + .active_low = false, }, [CLKC_RESET_CPU3_SOFT_RESET] = { - .reg = HHI_SYS_CPU_CLK_CNTL0, .bit_idx = 27 + .reg = HHI_SYS_CPU_CLK_CNTL0, + .bit_idx = 27, + .active_low = false, }, [CLKC_RESET_CPU2_SOFT_RESET] = { - .reg = HHI_SYS_CPU_CLK_CNTL0, .bit_idx = 26 + .reg = HHI_SYS_CPU_CLK_CNTL0, + .bit_idx = 26, + .active_low = false, }, [CLKC_RESET_CPU1_SOFT_RESET] = { - .reg = HHI_SYS_CPU_CLK_CNTL0, .bit_idx = 25 + .reg = HHI_SYS_CPU_CLK_CNTL0, + .bit_idx = 25, + .active_low = false, }, [CLKC_RESET_CPU0_SOFT_RESET] = { - .reg = HHI_SYS_CPU_CLK_CNTL0, .bit_idx = 24 + .reg = HHI_SYS_CPU_CLK_CNTL0, + .bit_idx = 24, + .active_low = false, }, [CLKC_RESET_A5_GLOBAL_RESET] = { - .reg = HHI_SYS_CPU_CLK_CNTL0, .bit_idx = 18 + .reg = HHI_SYS_CPU_CLK_CNTL0, + .bit_idx = 18, + .active_low = false, }, [CLKC_RESET_A5_AXI_SOFT_RESET] = { - .reg = HHI_SYS_CPU_CLK_CNTL0, .bit_idx = 17 + .reg = HHI_SYS_CPU_CLK_CNTL0, + .bit_idx = 17, + .active_low = false, }, [CLKC_RESET_A5_ABP_SOFT_RESET] = { - .reg = HHI_SYS_CPU_CLK_CNTL0, .bit_idx = 16 + .reg = HHI_SYS_CPU_CLK_CNTL0, + .bit_idx = 16, + .active_low = false, }, [CLKC_RESET_AXI_64_TO_128_BRIDGE_MMC_SOFT_RESET] = { - .reg = HHI_SYS_CPU_CLK_CNTL1, .bit_idx = 30 + .reg = HHI_SYS_CPU_CLK_CNTL1, + .bit_idx = 30, + .active_low = false, }, [CLKC_RESET_VID_CLK_CNTL_SOFT_RESET] = { - .reg = HHI_VID_CLK_CNTL, .bit_idx = 15 + .reg = HHI_VID_CLK_CNTL, + .bit_idx = 15, + .active_low = false, }, [CLKC_RESET_VID_DIVIDER_CNTL_SOFT_RESET_POST] = { - .reg = HHI_VID_DIVIDER_CNTL, .bit_idx = 7 + .reg = HHI_VID_DIVIDER_CNTL, + .bit_idx = 7, + .active_low = false, }, [CLKC_RESET_VID_DIVIDER_CNTL_SOFT_RESET_PRE] = { - .reg = HHI_VID_DIVIDER_CNTL, .bit_idx = 3 + .reg = HHI_VID_DIVIDER_CNTL, + .bit_idx = 3, + .active_low = false, }, [CLKC_RESET_VID_DIVIDER_CNTL_RESET_N_POST] = { - .reg = HHI_VID_DIVIDER_CNTL, .bit_idx = 1 + .reg = HHI_VID_DIVIDER_CNTL, + .bit_idx = 1, + .active_low = true, }, [CLKC_RESET_VID_DIVIDER_CNTL_RESET_N_PRE] = { - .reg = HHI_VID_DIVIDER_CNTL, .bit_idx = 0 + .reg = HHI_VID_DIVIDER_CNTL, + .bit_idx = 0, + .active_low = true, }, }; @@ -3562,22 +3595,22 @@ static int meson8b_clk_reset_update(struct reset_controller_dev *rcdev, { struct meson8b_clk_reset *meson8b_clk_reset = container_of(rcdev, struct meson8b_clk_reset, reset); - unsigned long flags; const struct meson8b_clk_reset_line *reset; + unsigned int value = 0; + unsigned long flags; if (id >= ARRAY_SIZE(meson8b_clk_reset_bits)) return -EINVAL; reset = &meson8b_clk_reset_bits[id]; + if (assert != reset->active_low) + value = BIT(reset->bit_idx); + spin_lock_irqsave(&meson_clk_lock, flags); - if (assert) - regmap_update_bits(meson8b_clk_reset->regmap, reset->reg, - BIT(reset->bit_idx), BIT(reset->bit_idx)); - else - regmap_update_bits(meson8b_clk_reset->regmap, reset->reg, - BIT(reset->bit_idx), 0); + regmap_update_bits(meson8b_clk_reset->regmap, reset->reg, + BIT(reset->bit_idx), value); spin_unlock_irqrestore(&meson_clk_lock, flags); -- GitLab From 8bb629cfb28f4dad9d47f69249366e50ae5edc25 Mon Sep 17 00:00:00 2001 From: Martin Blumenstingl Date: Fri, 17 Apr 2020 20:41:26 +0200 Subject: [PATCH 0233/1971] clk: meson: meson8b: Fix the vclk_div{1, 2, 4, 6, 12}_en gate bits The DIV{1,2,4,6,12}_EN bits are actually located in HHI_VID_CLK_CNTL register: - HHI_VID_CLK_CNTL[0] = DIV1_EN - HHI_VID_CLK_CNTL[1] = DIV2_EN - HHI_VID_CLK_CNTL[2] = DIV4_EN - HHI_VID_CLK_CNTL[3] = DIV6_EN - HHI_VID_CLK_CNTL[4] = DIV12_EN Update the bits accordingly so we will enable the bits in the correct register once we switch these clocks to be mutable. Fixes: 6cb57c678bb70e ("clk: meson: meson8b: add the read-only video clock trees") Signed-off-by: Martin Blumenstingl Signed-off-by: Jerome Brunet Link: https://lore.kernel.org/r/20200417184127.1319871-4-martin.blumenstingl@googlemail.com --- drivers/clk/meson/meson8b.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/drivers/clk/meson/meson8b.c b/drivers/clk/meson/meson8b.c index 1dec8d5404a14..6d1727e62b553 100644 --- a/drivers/clk/meson/meson8b.c +++ b/drivers/clk/meson/meson8b.c @@ -1213,7 +1213,7 @@ static struct clk_regmap meson8b_vclk_in_en = { static struct clk_regmap meson8b_vclk_div1_gate = { .data = &(struct clk_regmap_gate_data){ - .offset = HHI_VID_CLK_DIV, + .offset = HHI_VID_CLK_CNTL, .bit_idx = 0, }, .hw.init = &(struct clk_init_data){ @@ -1243,7 +1243,7 @@ static struct clk_fixed_factor meson8b_vclk_div2_div = { static struct clk_regmap meson8b_vclk_div2_div_gate = { .data = &(struct clk_regmap_gate_data){ - .offset = HHI_VID_CLK_DIV, + .offset = HHI_VID_CLK_CNTL, .bit_idx = 1, }, .hw.init = &(struct clk_init_data){ @@ -1273,7 +1273,7 @@ static struct clk_fixed_factor meson8b_vclk_div4_div = { static struct clk_regmap meson8b_vclk_div4_div_gate = { .data = &(struct clk_regmap_gate_data){ - .offset = HHI_VID_CLK_DIV, + .offset = HHI_VID_CLK_CNTL, .bit_idx = 2, }, .hw.init = &(struct clk_init_data){ @@ -1303,7 +1303,7 @@ static struct clk_fixed_factor meson8b_vclk_div6_div = { static struct clk_regmap meson8b_vclk_div6_div_gate = { .data = &(struct clk_regmap_gate_data){ - .offset = HHI_VID_CLK_DIV, + .offset = HHI_VID_CLK_CNTL, .bit_idx = 3, }, .hw.init = &(struct clk_init_data){ @@ -1333,7 +1333,7 @@ static struct clk_fixed_factor meson8b_vclk_div12_div = { static struct clk_regmap meson8b_vclk_div12_div_gate = { .data = &(struct clk_regmap_gate_data){ - .offset = HHI_VID_CLK_DIV, + .offset = HHI_VID_CLK_CNTL, .bit_idx = 4, }, .hw.init = &(struct clk_init_data){ -- GitLab From 16afd70af5b21b6d73a03b9c36f78b9cf004a0dd Mon Sep 17 00:00:00 2001 From: Martin Blumenstingl Date: Fri, 17 Apr 2020 20:41:27 +0200 Subject: [PATCH 0234/1971] clk: meson: meson8b: Make the CCF use the glitch-free VPU mux The "vpu_0" or "vpu_1" clock trees should not be updated while the clock is running. Enforce this by setting CLK_SET_RATE_GATE on the "vpu_0" and "vpu_1" gates. This makes the CCF switch to the "vpu_1" tree when "vpu_0" is currently active and vice versa, which is exactly what the vendor driver does when updating the frequency of the VPU clock. Signed-off-by: Martin Blumenstingl Signed-off-by: Jerome Brunet Link: https://lore.kernel.org/r/20200417184127.1319871-5-martin.blumenstingl@googlemail.com --- drivers/clk/meson/meson8b.c | 14 +++++++++++--- 1 file changed, 11 insertions(+), 3 deletions(-) diff --git a/drivers/clk/meson/meson8b.c b/drivers/clk/meson/meson8b.c index 6d1727e62b553..811af1c114562 100644 --- a/drivers/clk/meson/meson8b.c +++ b/drivers/clk/meson/meson8b.c @@ -2063,7 +2063,7 @@ static struct clk_regmap meson8b_vpu_0 = { &meson8b_vpu_0_div.hw }, .num_parents = 1, - .flags = CLK_SET_RATE_PARENT, + .flags = CLK_SET_RATE_GATE | CLK_SET_RATE_PARENT, }, }; @@ -2134,10 +2134,18 @@ static struct clk_regmap meson8b_vpu_1 = { &meson8b_vpu_1_div.hw }, .num_parents = 1, - .flags = CLK_SET_RATE_PARENT, + .flags = CLK_SET_RATE_GATE | CLK_SET_RATE_PARENT, }, }; +/* + * The VPU clock has two two identical clock trees (vpu_0 and vpu_1) + * muxed by a glitch-free switch on Meson8b and Meson8m2. The CCF can + * actually manage this glitch-free mux because it does top-to-bottom + * updates the each clock tree and switches to the "inactive" one when + * CLK_SET_RATE_GATE is set. + * Meson8 only has vpu_0 and no glitch-free mux. + */ static struct clk_regmap meson8b_vpu = { .data = &(struct clk_regmap_mux_data){ .offset = HHI_VPU_CLK_CNTL, @@ -2152,7 +2160,7 @@ static struct clk_regmap meson8b_vpu = { &meson8b_vpu_1.hw, }, .num_parents = 2, - .flags = CLK_SET_RATE_NO_REPARENT, + .flags = CLK_SET_RATE_PARENT, }, }; -- GitLab From 7440f518dad9d861d76c64956641eeddd3586f75 Mon Sep 17 00:00:00 2001 From: Sudip Mukherjee Date: Fri, 24 Apr 2020 17:19:44 +0100 Subject: [PATCH 0235/1971] thermal/drivers/ti-soc-thermal: Avoid dereferencing ERR_PTR On error the function ti_bandgap_get_sensor_data() returns the error code in ERR_PTR() but we only checked if the return value is NULL or not. And, so we can dereference an error code inside ERR_PTR. While at it, convert a check to IS_ERR_OR_NULL. Signed-off-by: Sudip Mukherjee Reviewed-by: Amit Kucheria Signed-off-by: Daniel Lezcano Link: https://lore.kernel.org/r/20200424161944.6044-1-sudipm.mukherjee@gmail.com --- drivers/thermal/ti-soc-thermal/ti-thermal-common.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/thermal/ti-soc-thermal/ti-thermal-common.c b/drivers/thermal/ti-soc-thermal/ti-thermal-common.c index d3e959d01606c..85776db4bf346 100644 --- a/drivers/thermal/ti-soc-thermal/ti-thermal-common.c +++ b/drivers/thermal/ti-soc-thermal/ti-thermal-common.c @@ -169,7 +169,7 @@ int ti_thermal_expose_sensor(struct ti_bandgap *bgp, int id, data = ti_bandgap_get_sensor_data(bgp, id); - if (!data || IS_ERR(data)) + if (!IS_ERR_OR_NULL(data)) data = ti_thermal_build_data(bgp, id); if (!data) @@ -196,7 +196,7 @@ int ti_thermal_remove_sensor(struct ti_bandgap *bgp, int id) data = ti_bandgap_get_sensor_data(bgp, id); - if (data && data->ti_thermal) { + if (!IS_ERR_OR_NULL(data) && data->ti_thermal) { if (data->our_zone) thermal_zone_device_unregister(data->ti_thermal); } @@ -262,7 +262,7 @@ int ti_thermal_unregister_cpu_cooling(struct ti_bandgap *bgp, int id) data = ti_bandgap_get_sensor_data(bgp, id); - if (data) { + if (!IS_ERR_OR_NULL(data)) { cpufreq_cooling_unregister(data->cool_dev); if (data->policy) cpufreq_cpu_put(data->policy); -- GitLab From 21b01cc879cc3ecef013b3576db2925adfd881aa Mon Sep 17 00:00:00 2001 From: Geordan Neukum Date: Mon, 30 Mar 2020 01:33:55 -0400 Subject: [PATCH 0236/1971] power: supply: max17042_battery: Add support for the TTE_NOW prop The max170{42,47,50,55} family of fuel gauges all provide time-to-empty estimation. As such, let's export this as a property. Signed-off-by: Geordan Neukum Signed-off-by: Sebastian Reichel --- drivers/power/supply/max17042_battery.c | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/drivers/power/supply/max17042_battery.c b/drivers/power/supply/max17042_battery.c index 69ec4295d55dd..f284547913d6f 100644 --- a/drivers/power/supply/max17042_battery.c +++ b/drivers/power/supply/max17042_battery.c @@ -87,6 +87,7 @@ static enum power_supply_property max17042_battery_props[] = { POWER_SUPPLY_PROP_SCOPE, POWER_SUPPLY_PROP_CURRENT_NOW, POWER_SUPPLY_PROP_CURRENT_AVG, + POWER_SUPPLY_PROP_TIME_TO_EMPTY_NOW, }; static int max17042_get_temperature(struct max17042_chip *chip, int *temp) @@ -411,6 +412,13 @@ static int max17042_get_property(struct power_supply *psy, return -EINVAL; } break; + case POWER_SUPPLY_PROP_TIME_TO_EMPTY_NOW: + ret = regmap_read(map, MAX17042_TTE, &data); + if (ret < 0) + return ret; + + val->intval = data * 5625 / 1000; + break; default: return -EINVAL; } -- GitLab From e8208a71ac0be22afccf5ccc7d5bc6c7a351bc18 Mon Sep 17 00:00:00 2001 From: Lad Prabhakar Date: Mon, 27 Apr 2020 15:41:00 +0100 Subject: [PATCH 0237/1971] clk: renesas: cpg-mssr: Add R8A7742 support Add RZ/G1H (R8A7742) Clock Pulse Generator / Module Standby and Software Reset support, using the CPG/MSSR driver core and the common R-Car Gen2 (and RZ/G) code. Signed-off-by: Lad Prabhakar Reviewed-by: Marian-Cristian Rotariu Link: https://lore.kernel.org/r/1587998460-7804-1-git-send-email-prabhakar.mahadev-lad.rj@bp.renesas.com Signed-off-by: Geert Uytterhoeven --- drivers/clk/renesas/Kconfig | 5 + drivers/clk/renesas/Makefile | 1 + drivers/clk/renesas/r8a7742-cpg-mssr.c | 275 +++++++++++++++++++++++++ drivers/clk/renesas/renesas-cpg-mssr.c | 6 + drivers/clk/renesas/renesas-cpg-mssr.h | 1 + 5 files changed, 288 insertions(+) create mode 100644 drivers/clk/renesas/r8a7742-cpg-mssr.c diff --git a/drivers/clk/renesas/Kconfig b/drivers/clk/renesas/Kconfig index ac2dd92ce2ef6..149787b0005d7 100644 --- a/drivers/clk/renesas/Kconfig +++ b/drivers/clk/renesas/Kconfig @@ -8,6 +8,7 @@ config CLK_RENESAS select CLK_R7S9210 if ARCH_R7S9210 select CLK_R8A73A4 if ARCH_R8A73A4 select CLK_R8A7740 if ARCH_R8A7740 + select CLK_R8A7742 if ARCH_R8A7742 select CLK_R8A7743 if ARCH_R8A7743 || ARCH_R8A7744 select CLK_R8A7745 if ARCH_R8A7745 select CLK_R8A77470 if ARCH_R8A77470 @@ -55,6 +56,10 @@ config CLK_R8A7740 select CLK_RENESAS_CPG_MSTP select CLK_RENESAS_DIV6 +config CLK_R8A7742 + bool "RZ/G1H clock support" if COMPILE_TEST + select CLK_RCAR_GEN2_CPG + config CLK_R8A7743 bool "RZ/G1M clock support" if COMPILE_TEST select CLK_RCAR_GEN2_CPG diff --git a/drivers/clk/renesas/Makefile b/drivers/clk/renesas/Makefile index 4a722bc5aac75..a4066f9b34ef3 100644 --- a/drivers/clk/renesas/Makefile +++ b/drivers/clk/renesas/Makefile @@ -5,6 +5,7 @@ obj-$(CONFIG_CLK_RZA1) += clk-rz.o obj-$(CONFIG_CLK_R7S9210) += r7s9210-cpg-mssr.o obj-$(CONFIG_CLK_R8A73A4) += clk-r8a73a4.o obj-$(CONFIG_CLK_R8A7740) += clk-r8a7740.o +obj-$(CONFIG_CLK_R8A7742) += r8a7742-cpg-mssr.o obj-$(CONFIG_CLK_R8A7743) += r8a7743-cpg-mssr.o obj-$(CONFIG_CLK_R8A7745) += r8a7745-cpg-mssr.o obj-$(CONFIG_CLK_R8A77470) += r8a77470-cpg-mssr.o diff --git a/drivers/clk/renesas/r8a7742-cpg-mssr.c b/drivers/clk/renesas/r8a7742-cpg-mssr.c new file mode 100644 index 0000000000000..e919828668a48 --- /dev/null +++ b/drivers/clk/renesas/r8a7742-cpg-mssr.c @@ -0,0 +1,275 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * r8a7742 Clock Pulse Generator / Module Standby and Software Reset + * + * Copyright (C) 2020 Renesas Electronics Corp. + */ + +#include +#include +#include +#include + +#include + +#include "renesas-cpg-mssr.h" +#include "rcar-gen2-cpg.h" + +enum clk_ids { + /* Core Clock Outputs exported to DT */ + LAST_DT_CORE_CLK = R8A7742_CLK_OSC, + + /* External Input Clocks */ + CLK_EXTAL, + CLK_USB_EXTAL, + + /* Internal Core Clocks */ + CLK_MAIN, + CLK_PLL0, + CLK_PLL1, + CLK_PLL3, + CLK_PLL1_DIV2, + + /* Module Clocks */ + MOD_CLK_BASE +}; + +static const struct cpg_core_clk r8a7742_core_clks[] __initconst = { + /* External Clock Inputs */ + DEF_INPUT("extal", CLK_EXTAL), + DEF_INPUT("usb_extal", CLK_USB_EXTAL), + + /* Internal Core Clocks */ + DEF_BASE(".main", CLK_MAIN, CLK_TYPE_GEN2_MAIN, CLK_EXTAL), + DEF_BASE(".pll0", CLK_PLL0, CLK_TYPE_GEN2_PLL0, CLK_MAIN), + DEF_BASE(".pll1", CLK_PLL1, CLK_TYPE_GEN2_PLL1, CLK_MAIN), + DEF_BASE(".pll3", CLK_PLL3, CLK_TYPE_GEN2_PLL3, CLK_MAIN), + + DEF_FIXED(".pll1_div2", CLK_PLL1_DIV2, CLK_PLL1, 2, 1), + + /* Core Clock Outputs */ + DEF_BASE("z", R8A7742_CLK_Z, CLK_TYPE_GEN2_Z, CLK_PLL0), + DEF_BASE("lb", R8A7742_CLK_LB, CLK_TYPE_GEN2_LB, CLK_PLL1), + DEF_BASE("sdh", R8A7742_CLK_SDH, CLK_TYPE_GEN2_SDH, CLK_PLL1), + DEF_BASE("sd0", R8A7742_CLK_SD0, CLK_TYPE_GEN2_SD0, CLK_PLL1), + DEF_BASE("sd1", R8A7742_CLK_SD1, CLK_TYPE_GEN2_SD1, CLK_PLL1), + DEF_BASE("qspi", R8A7742_CLK_QSPI, CLK_TYPE_GEN2_QSPI, CLK_PLL1_DIV2), + DEF_BASE("rcan", R8A7742_CLK_RCAN, CLK_TYPE_GEN2_RCAN, CLK_USB_EXTAL), + + DEF_FIXED("z2", R8A7742_CLK_Z2, CLK_PLL1, 2, 1), + DEF_FIXED("zg", R8A7742_CLK_ZG, CLK_PLL1, 3, 1), + DEF_FIXED("zx", R8A7742_CLK_ZX, CLK_PLL1, 3, 1), + DEF_FIXED("zs", R8A7742_CLK_ZS, CLK_PLL1, 6, 1), + DEF_FIXED("hp", R8A7742_CLK_HP, CLK_PLL1, 12, 1), + DEF_FIXED("b", R8A7742_CLK_B, CLK_PLL1, 12, 1), + DEF_FIXED("p", R8A7742_CLK_P, CLK_PLL1, 24, 1), + DEF_FIXED("cl", R8A7742_CLK_CL, CLK_PLL1, 48, 1), + DEF_FIXED("m2", R8A7742_CLK_M2, CLK_PLL1, 8, 1), + DEF_FIXED("zb3", R8A7742_CLK_ZB3, CLK_PLL3, 4, 1), + DEF_FIXED("zb3d2", R8A7742_CLK_ZB3D2, CLK_PLL3, 8, 1), + DEF_FIXED("ddr", R8A7742_CLK_DDR, CLK_PLL3, 8, 1), + DEF_FIXED("mp", R8A7742_CLK_MP, CLK_PLL1_DIV2, 15, 1), + DEF_FIXED("cp", R8A7742_CLK_CP, CLK_EXTAL, 2, 1), + DEF_FIXED("r", R8A7742_CLK_R, CLK_PLL1, 49152, 1), + DEF_FIXED("osc", R8A7742_CLK_OSC, CLK_PLL1, 12288, 1), + + DEF_DIV6P1("sd2", R8A7742_CLK_SD2, CLK_PLL1_DIV2, 0x078), + DEF_DIV6P1("sd3", R8A7742_CLK_SD3, CLK_PLL1_DIV2, 0x26c), + DEF_DIV6P1("mmc0", R8A7742_CLK_MMC0, CLK_PLL1_DIV2, 0x240), + DEF_DIV6P1("mmc1", R8A7742_CLK_MMC1, CLK_PLL1_DIV2, 0x244), +}; + +static const struct mssr_mod_clk r8a7742_mod_clks[] __initconst = { + DEF_MOD("msiof0", 0, R8A7742_CLK_MP), + DEF_MOD("vcp1", 100, R8A7742_CLK_ZS), + DEF_MOD("vcp0", 101, R8A7742_CLK_ZS), + DEF_MOD("vpc1", 102, R8A7742_CLK_ZS), + DEF_MOD("vpc0", 103, R8A7742_CLK_ZS), + DEF_MOD("tmu1", 111, R8A7742_CLK_P), + DEF_MOD("3dg", 112, R8A7742_CLK_ZG), + DEF_MOD("2d-dmac", 115, R8A7742_CLK_ZS), + DEF_MOD("fdp1-2", 117, R8A7742_CLK_ZS), + DEF_MOD("fdp1-1", 118, R8A7742_CLK_ZS), + DEF_MOD("fdp1-0", 119, R8A7742_CLK_ZS), + DEF_MOD("tmu3", 121, R8A7742_CLK_P), + DEF_MOD("tmu2", 122, R8A7742_CLK_P), + DEF_MOD("cmt0", 124, R8A7742_CLK_R), + DEF_MOD("tmu0", 125, R8A7742_CLK_CP), + DEF_MOD("vsp1du1", 127, R8A7742_CLK_ZS), + DEF_MOD("vsp1du0", 128, R8A7742_CLK_ZS), + DEF_MOD("vsp1-sy", 131, R8A7742_CLK_ZS), + DEF_MOD("scifa2", 202, R8A7742_CLK_MP), + DEF_MOD("scifa1", 203, R8A7742_CLK_MP), + DEF_MOD("scifa0", 204, R8A7742_CLK_MP), + DEF_MOD("msiof2", 205, R8A7742_CLK_MP), + DEF_MOD("scifb0", 206, R8A7742_CLK_MP), + DEF_MOD("scifb1", 207, R8A7742_CLK_MP), + DEF_MOD("msiof1", 208, R8A7742_CLK_MP), + DEF_MOD("msiof3", 215, R8A7742_CLK_MP), + DEF_MOD("scifb2", 216, R8A7742_CLK_MP), + DEF_MOD("sys-dmac1", 218, R8A7742_CLK_ZS), + DEF_MOD("sys-dmac0", 219, R8A7742_CLK_ZS), + DEF_MOD("iic2", 300, R8A7742_CLK_HP), + DEF_MOD("tpu0", 304, R8A7742_CLK_CP), + DEF_MOD("mmcif1", 305, R8A7742_CLK_MMC1), + DEF_MOD("scif2", 310, R8A7742_CLK_P), + DEF_MOD("sdhi3", 311, R8A7742_CLK_SD3), + DEF_MOD("sdhi2", 312, R8A7742_CLK_SD2), + DEF_MOD("sdhi1", 313, R8A7742_CLK_SD1), + DEF_MOD("sdhi0", 314, R8A7742_CLK_SD0), + DEF_MOD("mmcif0", 315, R8A7742_CLK_MMC0), + DEF_MOD("iic0", 318, R8A7742_CLK_HP), + DEF_MOD("pciec", 319, R8A7742_CLK_MP), + DEF_MOD("iic1", 323, R8A7742_CLK_HP), + DEF_MOD("usb3.0", 328, R8A7742_CLK_MP), + DEF_MOD("cmt1", 329, R8A7742_CLK_R), + DEF_MOD("usbhs-dmac0", 330, R8A7742_CLK_HP), + DEF_MOD("usbhs-dmac1", 331, R8A7742_CLK_HP), + DEF_MOD("rwdt", 402, R8A7742_CLK_R), + DEF_MOD("irqc", 407, R8A7742_CLK_CP), + DEF_MOD("intc-sys", 408, R8A7742_CLK_ZS), + DEF_MOD("audio-dmac1", 501, R8A7742_CLK_HP), + DEF_MOD("audio-dmac0", 502, R8A7742_CLK_HP), + DEF_MOD("thermal", 522, CLK_EXTAL), + DEF_MOD("pwm", 523, R8A7742_CLK_P), + DEF_MOD("usb-ehci", 703, R8A7742_CLK_MP), + DEF_MOD("usbhs", 704, R8A7742_CLK_HP), + DEF_MOD("hscif1", 716, R8A7742_CLK_ZS), + DEF_MOD("hscif0", 717, R8A7742_CLK_ZS), + DEF_MOD("scif1", 720, R8A7742_CLK_P), + DEF_MOD("scif0", 721, R8A7742_CLK_P), + DEF_MOD("du2", 722, R8A7742_CLK_ZX), + DEF_MOD("du1", 723, R8A7742_CLK_ZX), + DEF_MOD("du0", 724, R8A7742_CLK_ZX), + DEF_MOD("lvds1", 725, R8A7742_CLK_ZX), + DEF_MOD("lvds0", 726, R8A7742_CLK_ZX), + DEF_MOD("r-gp2d", 807, R8A7742_CLK_ZX), + DEF_MOD("vin3", 808, R8A7742_CLK_ZG), + DEF_MOD("vin2", 809, R8A7742_CLK_ZG), + DEF_MOD("vin1", 810, R8A7742_CLK_ZG), + DEF_MOD("vin0", 811, R8A7742_CLK_ZG), + DEF_MOD("etheravb", 812, R8A7742_CLK_HP), + DEF_MOD("ether", 813, R8A7742_CLK_P), + DEF_MOD("sata1", 814, R8A7742_CLK_ZS), + DEF_MOD("sata0", 815, R8A7742_CLK_ZS), + DEF_MOD("imr-x2-1", 820, R8A7742_CLK_ZG), + DEF_MOD("imr-x2-0", 821, R8A7742_CLK_HP), + DEF_MOD("imr-lsx2-1", 822, R8A7742_CLK_P), + DEF_MOD("imr-lsx2-0", 823, R8A7742_CLK_ZS), + DEF_MOD("gpio5", 907, R8A7742_CLK_CP), + DEF_MOD("gpio4", 908, R8A7742_CLK_CP), + DEF_MOD("gpio3", 909, R8A7742_CLK_CP), + DEF_MOD("gpio2", 910, R8A7742_CLK_CP), + DEF_MOD("gpio1", 911, R8A7742_CLK_CP), + DEF_MOD("gpio0", 912, R8A7742_CLK_CP), + DEF_MOD("can1", 915, R8A7742_CLK_P), + DEF_MOD("can0", 916, R8A7742_CLK_P), + DEF_MOD("qspi_mod", 917, R8A7742_CLK_QSPI), + DEF_MOD("iicdvfs", 926, R8A7742_CLK_CP), + DEF_MOD("i2c3", 928, R8A7742_CLK_HP), + DEF_MOD("i2c2", 929, R8A7742_CLK_HP), + DEF_MOD("i2c1", 930, R8A7742_CLK_HP), + DEF_MOD("i2c0", 931, R8A7742_CLK_HP), + DEF_MOD("ssi-all", 1005, R8A7742_CLK_P), + DEF_MOD("ssi9", 1006, MOD_CLK_ID(1005)), + DEF_MOD("ssi8", 1007, MOD_CLK_ID(1005)), + DEF_MOD("ssi7", 1008, MOD_CLK_ID(1005)), + DEF_MOD("ssi6", 1009, MOD_CLK_ID(1005)), + DEF_MOD("ssi5", 1010, MOD_CLK_ID(1005)), + DEF_MOD("ssi4", 1011, MOD_CLK_ID(1005)), + DEF_MOD("ssi3", 1012, MOD_CLK_ID(1005)), + DEF_MOD("ssi2", 1013, MOD_CLK_ID(1005)), + DEF_MOD("ssi1", 1014, MOD_CLK_ID(1005)), + DEF_MOD("ssi0", 1015, MOD_CLK_ID(1005)), + DEF_MOD("scu-all", 1017, R8A7742_CLK_P), + DEF_MOD("scu-dvc1", 1018, MOD_CLK_ID(1017)), + DEF_MOD("scu-dvc0", 1019, MOD_CLK_ID(1017)), + DEF_MOD("scu-ctu1-mix1", 1020, MOD_CLK_ID(1017)), + DEF_MOD("scu-ctu0-mix0", 1021, MOD_CLK_ID(1017)), + DEF_MOD("scu-src9", 1022, MOD_CLK_ID(1017)), + DEF_MOD("scu-src8", 1023, MOD_CLK_ID(1017)), + DEF_MOD("scu-src7", 1024, MOD_CLK_ID(1017)), + DEF_MOD("scu-src6", 1025, MOD_CLK_ID(1017)), + DEF_MOD("scu-src5", 1026, MOD_CLK_ID(1017)), + DEF_MOD("scu-src4", 1027, MOD_CLK_ID(1017)), + DEF_MOD("scu-src3", 1028, MOD_CLK_ID(1017)), + DEF_MOD("scu-src2", 1029, MOD_CLK_ID(1017)), + DEF_MOD("scu-src1", 1030, MOD_CLK_ID(1017)), + DEF_MOD("scu-src0", 1031, MOD_CLK_ID(1017)), +}; + +static const unsigned int r8a7742_crit_mod_clks[] __initconst = { + MOD_CLK_ID(402), /* RWDT */ + MOD_CLK_ID(408), /* INTC-SYS (GIC) */ +}; + +/* + * CPG Clock Data + */ + +/* + * MD EXTAL PLL0 PLL1 PLL3 + * 14 13 19 (MHz) *1 *1 + *--------------------------------------------------- + * 0 0 0 15 x172/2 x208/2 x106 + * 0 0 1 15 x172/2 x208/2 x88 + * 0 1 0 20 x130/2 x156/2 x80 + * 0 1 1 20 x130/2 x156/2 x66 + * 1 0 0 26 / 2 x200/2 x240/2 x122 + * 1 0 1 26 / 2 x200/2 x240/2 x102 + * 1 1 0 30 / 2 x172/2 x208/2 x106 + * 1 1 1 30 / 2 x172/2 x208/2 x88 + * + * *1 : Table 7.5a indicates VCO output (PLLx = VCO/2) + */ +#define CPG_PLL_CONFIG_INDEX(md) ((((md) & BIT(14)) >> 12) | \ + (((md) & BIT(13)) >> 12) | \ + (((md) & BIT(19)) >> 19)) + +static const struct rcar_gen2_cpg_pll_config cpg_pll_configs[8] __initconst = { + /* EXTAL div PLL1 mult PLL3 mult */ + { 1, 208, 106, }, + { 1, 208, 88, }, + { 1, 156, 80, }, + { 1, 156, 66, }, + { 2, 240, 122, }, + { 2, 240, 102, }, + { 2, 208, 106, }, + { 2, 208, 88, }, +}; + +static int __init r8a7742_cpg_mssr_init(struct device *dev) +{ + const struct rcar_gen2_cpg_pll_config *cpg_pll_config; + u32 cpg_mode; + int error; + + error = rcar_rst_read_mode_pins(&cpg_mode); + if (error) + return error; + + cpg_pll_config = &cpg_pll_configs[CPG_PLL_CONFIG_INDEX(cpg_mode)]; + + return rcar_gen2_cpg_init(cpg_pll_config, 2, cpg_mode); +} + +const struct cpg_mssr_info r8a7742_cpg_mssr_info __initconst = { + /* Core Clocks */ + .core_clks = r8a7742_core_clks, + .num_core_clks = ARRAY_SIZE(r8a7742_core_clks), + .last_dt_core_clk = LAST_DT_CORE_CLK, + .num_total_core_clks = MOD_CLK_BASE, + + /* Module Clocks */ + .mod_clks = r8a7742_mod_clks, + .num_mod_clks = ARRAY_SIZE(r8a7742_mod_clks), + .num_hw_mod_clks = 12 * 32, + + /* Critical Module Clocks */ + .crit_mod_clks = r8a7742_crit_mod_clks, + .num_crit_mod_clks = ARRAY_SIZE(r8a7742_crit_mod_clks), + + /* Callbacks */ + .init = r8a7742_cpg_mssr_init, + .cpg_clk_register = rcar_gen2_cpg_clk_register, +}; diff --git a/drivers/clk/renesas/renesas-cpg-mssr.c b/drivers/clk/renesas/renesas-cpg-mssr.c index a2663fbbd7a51..8f6dff362869b 100644 --- a/drivers/clk/renesas/renesas-cpg-mssr.c +++ b/drivers/clk/renesas/renesas-cpg-mssr.c @@ -673,6 +673,12 @@ static const struct of_device_id cpg_mssr_match[] = { .data = &r7s9210_cpg_mssr_info, }, #endif +#ifdef CONFIG_CLK_R8A7742 + { + .compatible = "renesas,r8a7742-cpg-mssr", + .data = &r8a7742_cpg_mssr_info, + }, +#endif #ifdef CONFIG_CLK_R8A7743 { .compatible = "renesas,r8a7743-cpg-mssr", diff --git a/drivers/clk/renesas/renesas-cpg-mssr.h b/drivers/clk/renesas/renesas-cpg-mssr.h index 3b852ba0ecec7..55a18ef0efafc 100644 --- a/drivers/clk/renesas/renesas-cpg-mssr.h +++ b/drivers/clk/renesas/renesas-cpg-mssr.h @@ -155,6 +155,7 @@ struct cpg_mssr_info { }; extern const struct cpg_mssr_info r7s9210_cpg_mssr_info; +extern const struct cpg_mssr_info r8a7742_cpg_mssr_info; extern const struct cpg_mssr_info r8a7743_cpg_mssr_info; extern const struct cpg_mssr_info r8a7745_cpg_mssr_info; extern const struct cpg_mssr_info r8a77470_cpg_mssr_info; -- GitLab From e2f022c10ed3b50ba1d2bb1f037b0e7a84cb1c3e Mon Sep 17 00:00:00 2001 From: Geert Uytterhoeven Date: Mon, 27 Apr 2020 21:34:46 +0200 Subject: [PATCH 0238/1971] clk: renesas: rcar-gen2: Remove superfluous CLK_RENESAS_DIV6 selects MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit CLK_RENESAS_CPG_MSSR selects CLK_RENESAS_DIV6, and CLK_RCAR_GEN2_CPG selects CLK_RENESAS_CPG_MSSR, so there is no longer a need for the individual R-Car Gen2 clock driver options to select CLK_RENESAS_DIV6. Signed-off-by: Geert Uytterhoeven Reviewed-by: Niklas Söderlund Link: https://lore.kernel.org/r/20200427193446.29738-1-geert+renesas@glider.be --- drivers/clk/renesas/Kconfig | 3 --- 1 file changed, 3 deletions(-) diff --git a/drivers/clk/renesas/Kconfig b/drivers/clk/renesas/Kconfig index 149787b0005d7..9eb79bf906430 100644 --- a/drivers/clk/renesas/Kconfig +++ b/drivers/clk/renesas/Kconfig @@ -95,12 +95,10 @@ config CLK_R8A7779 config CLK_R8A7790 bool "R-Car H2 clock support" if COMPILE_TEST select CLK_RCAR_GEN2_CPG - select CLK_RENESAS_DIV6 config CLK_R8A7791 bool "R-Car M2-W/N clock support" if COMPILE_TEST select CLK_RCAR_GEN2_CPG - select CLK_RENESAS_DIV6 config CLK_R8A7792 bool "R-Car V2H clock support" if COMPILE_TEST @@ -109,7 +107,6 @@ config CLK_R8A7792 config CLK_R8A7794 bool "R-Car E2 clock support" if COMPILE_TEST select CLK_RCAR_GEN2_CPG - select CLK_RENESAS_DIV6 config CLK_R8A7795 bool "R-Car H3 clock support" if COMPILE_TEST -- GitLab From 08736e83079929bdbff8ba91c6bdb3d3e57e0abd Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Wed, 29 Apr 2020 16:23:23 +0300 Subject: [PATCH 0239/1971] i2c: stm32f7: prevent array underflow in stm32f7_get_lower_rate() We want to break with "i" set to zero whether we find the rate we want or not. In the current code, if we don't find the rate we want then it exits the loop with "i" set to -1 and results in an array underflow. Fixes: 09cc9a3bce91 ("i2c: stm32f7: allows for any bus frequency") Signed-off-by: Dan Carpenter Reviewed-by: Alain Volmat Signed-off-by: Wolfram Sang --- drivers/i2c/busses/i2c-stm32f7.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/i2c/busses/i2c-stm32f7.c b/drivers/i2c/busses/i2c-stm32f7.c index 9c9e10ea91991..bff3479fe122a 100644 --- a/drivers/i2c/busses/i2c-stm32f7.c +++ b/drivers/i2c/busses/i2c-stm32f7.c @@ -607,7 +607,7 @@ static u32 stm32f7_get_lower_rate(u32 rate) { int i = ARRAY_SIZE(stm32f7_i2c_specs); - while (i--) + while (--i) if (stm32f7_i2c_specs[i].rate < rate) break; -- GitLab From de51696cf5ec9dc834b35b343c45e90091755d06 Mon Sep 17 00:00:00 2001 From: Jason Yan Date: Sun, 26 Apr 2020 17:42:28 +0800 Subject: [PATCH 0240/1971] i2c: powermac: use true,false for bool variable In i2c_powermac_register_devices(), variable 'found_onyx' is bool and assigned '0' and 'true' in different places. Use 'false' instead of '0'. Signed-off-by: Jason Yan Signed-off-by: Wolfram Sang --- drivers/i2c/busses/i2c-powermac.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/i2c/busses/i2c-powermac.c b/drivers/i2c/busses/i2c-powermac.c index 81506c2dab657..3e38e114948b2 100644 --- a/drivers/i2c/busses/i2c-powermac.c +++ b/drivers/i2c/busses/i2c-powermac.c @@ -315,7 +315,7 @@ static void i2c_powermac_register_devices(struct i2c_adapter *adap, { struct i2c_client *newdev; struct device_node *node; - bool found_onyx = 0; + bool found_onyx = false; /* * In some cases we end up with the via-pmu node itself, in this -- GitLab From b359ed5184aebf9d987e54abc5dae7ac03ed29ae Mon Sep 17 00:00:00 2001 From: Jean-Philippe Brucker Date: Fri, 17 Apr 2020 16:23:26 +0200 Subject: [PATCH 0241/1971] mtd: cfi_cmdset_0001: Support the absence of protection registers MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The flash controller implemented by the Arm Base platform behaves like the Intel StrataFlash J3 device, but omits several features. In particular it doesn't implement a protection register, so "Number of Protection register fields" in the Primary Vendor-Specific Extended Query, is 0. The Intel StrataFlash J3 datasheet only lists 1 as a valid value for NumProtectionFields. It describes the field as: "Number of Protection register fields in JEDEC ID space. “00h,” indicates that 256 protection bytes are available" While a value of 0 may arguably not be architecturally valid, the driver's current behavior is certainly wrong: if NumProtectionFields is 0, read_pri_intelext() adds a negative value to the unsigned extra_size, and ends up in an infinite loop. Fix it by ignoring a NumProtectionFields of 0. Signed-off-by: Jean-Philippe Brucker Tested-by: Sudeep Holla Tested-by: Catalin Marinas Signed-off-by: Vignesh Raghavendra --- drivers/mtd/chips/cfi_cmdset_0001.c | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/drivers/mtd/chips/cfi_cmdset_0001.c b/drivers/mtd/chips/cfi_cmdset_0001.c index 142c0f9485fe1..42001c49833b9 100644 --- a/drivers/mtd/chips/cfi_cmdset_0001.c +++ b/drivers/mtd/chips/cfi_cmdset_0001.c @@ -420,8 +420,9 @@ read_pri_intelext(struct map_info *map, __u16 adr) extra_size = 0; /* Protection Register info */ - extra_size += (extp->NumProtectionFields - 1) * - sizeof(struct cfi_intelext_otpinfo); + if (extp->NumProtectionFields) + extra_size += (extp->NumProtectionFields - 1) * + sizeof(struct cfi_intelext_otpinfo); } if (extp->MinorVersion >= '1') { @@ -695,14 +696,16 @@ static int cfi_intelext_partition_fixup(struct mtd_info *mtd, */ if (extp && extp->MajorVersion == '1' && extp->MinorVersion >= '3' && extp->FeatureSupport & (1 << 9)) { + int offs = 0; struct cfi_private *newcfi; struct flchip *chip; struct flchip_shared *shared; - int offs, numregions, numparts, partshift, numvirtchips, i, j; + int numregions, numparts, partshift, numvirtchips, i, j; /* Protection Register info */ - offs = (extp->NumProtectionFields - 1) * - sizeof(struct cfi_intelext_otpinfo); + if (extp->NumProtectionFields) + offs = (extp->NumProtectionFields - 1) * + sizeof(struct cfi_intelext_otpinfo); /* Burst Read info */ offs += extp->extra[offs+1]+2; -- GitLab From 0edb259c98fcffef6916503282eb6e3133878b9a Mon Sep 17 00:00:00 2001 From: Daniel Golle Date: Mon, 30 Mar 2020 23:09:59 +0100 Subject: [PATCH 0242/1971] power: reset: introduce oxnas-restart Add reboot handler for Oxford OX820 chips as reboot currenly hangs on those boards. Code is based on ox820_assert_system_reset() found in https://github.com/kref/linux-oxnas.git in arch/arm/mach-oxnas/mach-ox820.c line 181. Signed-off-by: Daniel Golle Acked-by: Neil Armstrong Signed-off-by: Sebastian Reichel --- MAINTAINERS | 1 + drivers/power/reset/Kconfig | 7 + drivers/power/reset/Makefile | 1 + drivers/power/reset/oxnas-restart.c | 233 ++++++++++++++++++++++++++++ 4 files changed, 242 insertions(+) create mode 100644 drivers/power/reset/oxnas-restart.c diff --git a/MAINTAINERS b/MAINTAINERS index e64e5db314976..9821691396f83 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -2163,6 +2163,7 @@ L: linux-oxnas@groups.io (moderated for non-subscribers) S: Maintained F: arch/arm/boot/dts/ox8*.dts* F: arch/arm/mach-oxnas/ +F: drivers/power/reset/oxnas-restart.c N: oxnas ARM/PALM TREO SUPPORT diff --git a/drivers/power/reset/Kconfig b/drivers/power/reset/Kconfig index 8903803020805..4dfac618b9427 100644 --- a/drivers/power/reset/Kconfig +++ b/drivers/power/reset/Kconfig @@ -123,6 +123,13 @@ config POWER_RESET_OCELOT_RESET help This driver supports restart for Microsemi Ocelot SoC. +config POWER_RESET_OXNAS + bool "OXNAS SoC restart driver" + depends on ARCH_OXNAS + default MACH_OX820 + help + Restart support for OXNAS/PLXTECH OX820 SoC. + config POWER_RESET_PIIX4_POWEROFF tristate "Intel PIIX4 power-off driver" depends on PCI diff --git a/drivers/power/reset/Makefile b/drivers/power/reset/Makefile index da37f8b851dc2..5710ca4695170 100644 --- a/drivers/power/reset/Makefile +++ b/drivers/power/reset/Makefile @@ -12,6 +12,7 @@ obj-$(CONFIG_POWER_RESET_GPIO_RESTART) += gpio-restart.o obj-$(CONFIG_POWER_RESET_HISI) += hisi-reboot.o obj-$(CONFIG_POWER_RESET_MSM) += msm-poweroff.o obj-$(CONFIG_POWER_RESET_MT6323) += mt6323-poweroff.o +obj-$(CONFIG_POWER_RESET_OXNAS) += oxnas-restart.o obj-$(CONFIG_POWER_RESET_QCOM_PON) += qcom-pon.o obj-$(CONFIG_POWER_RESET_OCELOT_RESET) += ocelot-reset.o obj-$(CONFIG_POWER_RESET_PIIX4_POWEROFF) += piix4-poweroff.o diff --git a/drivers/power/reset/oxnas-restart.c b/drivers/power/reset/oxnas-restart.c new file mode 100644 index 0000000000000..13090bec058a0 --- /dev/null +++ b/drivers/power/reset/oxnas-restart.c @@ -0,0 +1,233 @@ +// SPDX-License-Identifier: (GPL-2.0) +/* + * oxnas SoC reset driver + * based on: + * Microsemi MIPS SoC reset driver + * and ox820_assert_system_reset() written by Ma Hajun + * + * Copyright (c) 2013 Ma Hajun + * Copyright (c) 2017 Microsemi Corporation + * Copyright (c) 2020 Daniel Golle + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* bit numbers of reset control register */ +#define OX820_SYS_CTRL_RST_SCU 0 +#define OX820_SYS_CTRL_RST_COPRO 1 +#define OX820_SYS_CTRL_RST_ARM0 2 +#define OX820_SYS_CTRL_RST_ARM1 3 +#define OX820_SYS_CTRL_RST_USBHS 4 +#define OX820_SYS_CTRL_RST_USBHSPHYA 5 +#define OX820_SYS_CTRL_RST_MACA 6 +#define OX820_SYS_CTRL_RST_MAC OX820_SYS_CTRL_RST_MACA +#define OX820_SYS_CTRL_RST_PCIEA 7 +#define OX820_SYS_CTRL_RST_SGDMA 8 +#define OX820_SYS_CTRL_RST_CIPHER 9 +#define OX820_SYS_CTRL_RST_DDR 10 +#define OX820_SYS_CTRL_RST_SATA 11 +#define OX820_SYS_CTRL_RST_SATA_LINK 12 +#define OX820_SYS_CTRL_RST_SATA_PHY 13 +#define OX820_SYS_CTRL_RST_PCIEPHY 14 +#define OX820_SYS_CTRL_RST_STATIC 15 +#define OX820_SYS_CTRL_RST_GPIO 16 +#define OX820_SYS_CTRL_RST_UART1 17 +#define OX820_SYS_CTRL_RST_UART2 18 +#define OX820_SYS_CTRL_RST_MISC 19 +#define OX820_SYS_CTRL_RST_I2S 20 +#define OX820_SYS_CTRL_RST_SD 21 +#define OX820_SYS_CTRL_RST_MACB 22 +#define OX820_SYS_CTRL_RST_PCIEB 23 +#define OX820_SYS_CTRL_RST_VIDEO 24 +#define OX820_SYS_CTRL_RST_DDR_PHY 25 +#define OX820_SYS_CTRL_RST_USBHSPHYB 26 +#define OX820_SYS_CTRL_RST_USBDEV 27 +#define OX820_SYS_CTRL_RST_ARMDBG 29 +#define OX820_SYS_CTRL_RST_PLLA 30 +#define OX820_SYS_CTRL_RST_PLLB 31 + +/* bit numbers of clock control register */ +#define OX820_SYS_CTRL_CLK_COPRO 0 +#define OX820_SYS_CTRL_CLK_DMA 1 +#define OX820_SYS_CTRL_CLK_CIPHER 2 +#define OX820_SYS_CTRL_CLK_SD 3 +#define OX820_SYS_CTRL_CLK_SATA 4 +#define OX820_SYS_CTRL_CLK_I2S 5 +#define OX820_SYS_CTRL_CLK_USBHS 6 +#define OX820_SYS_CTRL_CLK_MACA 7 +#define OX820_SYS_CTRL_CLK_MAC OX820_SYS_CTRL_CLK_MACA +#define OX820_SYS_CTRL_CLK_PCIEA 8 +#define OX820_SYS_CTRL_CLK_STATIC 9 +#define OX820_SYS_CTRL_CLK_MACB 10 +#define OX820_SYS_CTRL_CLK_PCIEB 11 +#define OX820_SYS_CTRL_CLK_REF600 12 +#define OX820_SYS_CTRL_CLK_USBDEV 13 +#define OX820_SYS_CTRL_CLK_DDR 14 +#define OX820_SYS_CTRL_CLK_DDRPHY 15 +#define OX820_SYS_CTRL_CLK_DDRCK 16 + +/* Regmap offsets */ +#define OX820_CLK_SET_REGOFFSET 0x2c +#define OX820_CLK_CLR_REGOFFSET 0x30 +#define OX820_RST_SET_REGOFFSET 0x34 +#define OX820_RST_CLR_REGOFFSET 0x38 +#define OX820_SECONDARY_SEL_REGOFFSET 0x14 +#define OX820_TERTIARY_SEL_REGOFFSET 0x8c +#define OX820_QUATERNARY_SEL_REGOFFSET 0x94 +#define OX820_DEBUG_SEL_REGOFFSET 0x9c +#define OX820_ALTERNATIVE_SEL_REGOFFSET 0xa4 +#define OX820_PULLUP_SEL_REGOFFSET 0xac +#define OX820_SEC_SECONDARY_SEL_REGOFFSET 0x100014 +#define OX820_SEC_TERTIARY_SEL_REGOFFSET 0x10008c +#define OX820_SEC_QUATERNARY_SEL_REGOFFSET 0x100094 +#define OX820_SEC_DEBUG_SEL_REGOFFSET 0x10009c +#define OX820_SEC_ALTERNATIVE_SEL_REGOFFSET 0x1000a4 +#define OX820_SEC_PULLUP_SEL_REGOFFSET 0x1000ac + +struct oxnas_restart_context { + struct regmap *sys_ctrl; + struct notifier_block restart_handler; +}; + +static int ox820_restart_handle(struct notifier_block *this, + unsigned long mode, void *cmd) +{ + struct oxnas_restart_context *ctx = container_of(this, struct + oxnas_restart_context, + restart_handler); + u32 value; + + /* + * Assert reset to cores as per power on defaults + * Don't touch the DDR interface as things will come to an impromptu + * stop NB Possibly should be asserting reset for PLLB, but there are + * timing concerns here according to the docs + */ + value = BIT(OX820_SYS_CTRL_RST_COPRO) | + BIT(OX820_SYS_CTRL_RST_USBHS) | + BIT(OX820_SYS_CTRL_RST_USBHSPHYA) | + BIT(OX820_SYS_CTRL_RST_MACA) | + BIT(OX820_SYS_CTRL_RST_PCIEA) | + BIT(OX820_SYS_CTRL_RST_SGDMA) | + BIT(OX820_SYS_CTRL_RST_CIPHER) | + BIT(OX820_SYS_CTRL_RST_SATA) | + BIT(OX820_SYS_CTRL_RST_SATA_LINK) | + BIT(OX820_SYS_CTRL_RST_SATA_PHY) | + BIT(OX820_SYS_CTRL_RST_PCIEPHY) | + BIT(OX820_SYS_CTRL_RST_STATIC) | + BIT(OX820_SYS_CTRL_RST_UART1) | + BIT(OX820_SYS_CTRL_RST_UART2) | + BIT(OX820_SYS_CTRL_RST_MISC) | + BIT(OX820_SYS_CTRL_RST_I2S) | + BIT(OX820_SYS_CTRL_RST_SD) | + BIT(OX820_SYS_CTRL_RST_MACB) | + BIT(OX820_SYS_CTRL_RST_PCIEB) | + BIT(OX820_SYS_CTRL_RST_VIDEO) | + BIT(OX820_SYS_CTRL_RST_USBHSPHYB) | + BIT(OX820_SYS_CTRL_RST_USBDEV); + + regmap_write(ctx->sys_ctrl, OX820_RST_SET_REGOFFSET, value); + + /* Release reset to cores as per power on defaults */ + regmap_write(ctx->sys_ctrl, OX820_RST_CLR_REGOFFSET, + BIT(OX820_SYS_CTRL_RST_GPIO)); + + /* + * Disable clocks to cores as per power-on defaults - must leave DDR + * related clocks enabled otherwise we'll stop rather abruptly. + */ + value = BIT(OX820_SYS_CTRL_CLK_COPRO) | + BIT(OX820_SYS_CTRL_CLK_DMA) | + BIT(OX820_SYS_CTRL_CLK_CIPHER) | + BIT(OX820_SYS_CTRL_CLK_SD) | + BIT(OX820_SYS_CTRL_CLK_SATA) | + BIT(OX820_SYS_CTRL_CLK_I2S) | + BIT(OX820_SYS_CTRL_CLK_USBHS) | + BIT(OX820_SYS_CTRL_CLK_MAC) | + BIT(OX820_SYS_CTRL_CLK_PCIEA) | + BIT(OX820_SYS_CTRL_CLK_STATIC) | + BIT(OX820_SYS_CTRL_CLK_MACB) | + BIT(OX820_SYS_CTRL_CLK_PCIEB) | + BIT(OX820_SYS_CTRL_CLK_REF600) | + BIT(OX820_SYS_CTRL_CLK_USBDEV); + + regmap_write(ctx->sys_ctrl, OX820_CLK_CLR_REGOFFSET, value); + + /* Enable clocks to cores as per power-on defaults */ + + /* Set sys-control pin mux'ing as per power-on defaults */ + regmap_write(ctx->sys_ctrl, OX820_SECONDARY_SEL_REGOFFSET, 0); + regmap_write(ctx->sys_ctrl, OX820_TERTIARY_SEL_REGOFFSET, 0); + regmap_write(ctx->sys_ctrl, OX820_QUATERNARY_SEL_REGOFFSET, 0); + regmap_write(ctx->sys_ctrl, OX820_DEBUG_SEL_REGOFFSET, 0); + regmap_write(ctx->sys_ctrl, OX820_ALTERNATIVE_SEL_REGOFFSET, 0); + regmap_write(ctx->sys_ctrl, OX820_PULLUP_SEL_REGOFFSET, 0); + + regmap_write(ctx->sys_ctrl, OX820_SEC_SECONDARY_SEL_REGOFFSET, 0); + regmap_write(ctx->sys_ctrl, OX820_SEC_TERTIARY_SEL_REGOFFSET, 0); + regmap_write(ctx->sys_ctrl, OX820_SEC_QUATERNARY_SEL_REGOFFSET, 0); + regmap_write(ctx->sys_ctrl, OX820_SEC_DEBUG_SEL_REGOFFSET, 0); + regmap_write(ctx->sys_ctrl, OX820_SEC_ALTERNATIVE_SEL_REGOFFSET, 0); + regmap_write(ctx->sys_ctrl, OX820_SEC_PULLUP_SEL_REGOFFSET, 0); + + /* + * No need to save any state, as the ROM loader can determine whether + * reset is due to power cycling or programatic action, just hit the + * (self-clearing) CPU reset bit of the block reset register + */ + value = + BIT(OX820_SYS_CTRL_RST_SCU) | + BIT(OX820_SYS_CTRL_RST_ARM0) | + BIT(OX820_SYS_CTRL_RST_ARM1); + + regmap_write(ctx->sys_ctrl, OX820_RST_SET_REGOFFSET, value); + + pr_emerg("Unable to restart system\n"); + return NOTIFY_DONE; +} + +static int ox820_restart_probe(struct platform_device *pdev) +{ + struct oxnas_restart_context *ctx; + struct regmap *sys_ctrl; + struct device *dev = &pdev->dev; + int err = 0; + + sys_ctrl = syscon_node_to_regmap(pdev->dev.of_node); + if (IS_ERR(sys_ctrl)) + return PTR_ERR(sys_ctrl); + + ctx = devm_kzalloc(&pdev->dev, sizeof(*ctx), GFP_KERNEL); + if (!ctx) + return -ENOMEM; + + ctx->sys_ctrl = sys_ctrl; + ctx->restart_handler.notifier_call = ox820_restart_handle; + ctx->restart_handler.priority = 192; + err = register_restart_handler(&ctx->restart_handler); + if (err) + dev_err(dev, "can't register restart notifier (err=%d)\n", err); + + return err; +} + +static const struct of_device_id ox820_restart_of_match[] = { + { .compatible = "oxsemi,ox820-sys-ctrl" }, + {} +}; + +static struct platform_driver ox820_restart_driver = { + .probe = ox820_restart_probe, + .driver = { + .name = "ox820-chip-reset", + .of_match_table = ox820_restart_of_match, + }, +}; +builtin_platform_driver(ox820_restart_driver); -- GitLab From de46e0289310b5e56994eafec2f0d2a5da095ea9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Micha=C5=82=20Miros=C5=82aw?= Date: Fri, 3 Apr 2020 22:20:32 +0200 Subject: [PATCH 0243/1971] power: supply: core: reduce power_supply_show_usb_type() parameters MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Reduce power_supply_show_usb_type() parameter count by folding power_supply_desc dereference into the function. This makes following patch making usb_types const easier. Signed-off-by: Michał Mirosław Signed-off-by: Sebastian Reichel --- drivers/power/supply/power_supply_sysfs.c | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/drivers/power/supply/power_supply_sysfs.c b/drivers/power/supply/power_supply_sysfs.c index f37ad4eae60b9..51de3f47b25db 100644 --- a/drivers/power/supply/power_supply_sysfs.c +++ b/drivers/power/supply/power_supply_sysfs.c @@ -78,8 +78,7 @@ static const char * const power_supply_scope_text[] = { }; static ssize_t power_supply_show_usb_type(struct device *dev, - enum power_supply_usb_type *usb_types, - ssize_t num_usb_types, + const struct power_supply_desc *desc, union power_supply_propval *value, char *buf) { @@ -88,8 +87,8 @@ static ssize_t power_supply_show_usb_type(struct device *dev, bool match = false; int i; - for (i = 0; i < num_usb_types; ++i) { - usb_type = usb_types[i]; + for (i = 0; i < desc->num_usb_types; ++i) { + usb_type = desc->usb_types[i]; if (value->intval == usb_type) { count += sprintf(buf + count, "[%s] ", @@ -163,8 +162,7 @@ static ssize_t power_supply_show_property(struct device *dev, power_supply_type_text[value.intval]); break; case POWER_SUPPLY_PROP_USB_TYPE: - ret = power_supply_show_usb_type(dev, psy->desc->usb_types, - psy->desc->num_usb_types, + ret = power_supply_show_usb_type(dev, psy->desc, &value, buf); break; case POWER_SUPPLY_PROP_SCOPE: -- GitLab From 4cb3825859dcc37eaf6994a32c3487d0a1fdda0b Mon Sep 17 00:00:00 2001 From: Sebastian Reichel Date: Mon, 13 Apr 2020 20:38:51 +0200 Subject: [PATCH 0244/1971] power: supply: charger-manager: Prepare for const properties This prepares the driver to work with the properties entry in power_supply_desc marked as const. Reviewed-by: Enric Balletbo i Serra Signed-off-by: Sebastian Reichel --- drivers/power/supply/charger-manager.c | 40 ++++++++++++++------------ 1 file changed, 22 insertions(+), 18 deletions(-) diff --git a/drivers/power/supply/charger-manager.c b/drivers/power/supply/charger-manager.c index a21e1a2673f80..415a9efa68161 100644 --- a/drivers/power/supply/charger-manager.c +++ b/drivers/power/supply/charger-manager.c @@ -1422,7 +1422,9 @@ static int charger_manager_prepare_sysfs(struct charger_manager *cm) } static int cm_init_thermal_data(struct charger_manager *cm, - struct power_supply *fuel_gauge) + struct power_supply *fuel_gauge, + enum power_supply_property *properties, + size_t *num_properties) { struct charger_desc *desc = cm->desc; union power_supply_propval val; @@ -1433,9 +1435,8 @@ static int cm_init_thermal_data(struct charger_manager *cm, POWER_SUPPLY_PROP_TEMP, &val); if (!ret) { - cm->charger_psy_desc.properties[cm->charger_psy_desc.num_properties] = - POWER_SUPPLY_PROP_TEMP; - cm->charger_psy_desc.num_properties++; + properties[*num_properties] = POWER_SUPPLY_PROP_TEMP; + (*num_properties)++; cm->desc->measure_battery_temp = true; } #ifdef CONFIG_THERMAL @@ -1446,9 +1447,8 @@ static int cm_init_thermal_data(struct charger_manager *cm, return PTR_ERR(cm->tzd_batt); /* Use external thermometer */ - cm->charger_psy_desc.properties[cm->charger_psy_desc.num_properties] = - POWER_SUPPLY_PROP_TEMP_AMBIENT; - cm->charger_psy_desc.num_properties++; + properties[*num_properties] = POWER_SUPPLY_PROP_TEMP_AMBIENT; + (*num_properties)++; cm->desc->measure_battery_temp = true; ret = 0; } @@ -1621,6 +1621,8 @@ static int charger_manager_probe(struct platform_device *pdev) int j = 0; union power_supply_propval val; struct power_supply *fuel_gauge; + enum power_supply_property *properties; + size_t num_properties; struct power_supply_config psy_cfg = {}; if (IS_ERR(desc)) { @@ -1717,18 +1719,17 @@ static int charger_manager_probe(struct platform_device *pdev) cm->charger_psy_desc.name = cm->psy_name_buf; /* Allocate for psy properties because they may vary */ - cm->charger_psy_desc.properties = - devm_kcalloc(&pdev->dev, + properties = devm_kcalloc(&pdev->dev, ARRAY_SIZE(default_charger_props) + NUM_CHARGER_PSY_OPTIONAL, - sizeof(enum power_supply_property), GFP_KERNEL); - if (!cm->charger_psy_desc.properties) + sizeof(*properties), GFP_KERNEL); + if (!properties) return -ENOMEM; - memcpy(cm->charger_psy_desc.properties, default_charger_props, + memcpy(properties, default_charger_props, sizeof(enum power_supply_property) * ARRAY_SIZE(default_charger_props)); - cm->charger_psy_desc.num_properties = psy_default.num_properties; + num_properties = psy_default.num_properties; /* Find which optional psy-properties are available */ fuel_gauge = power_supply_get_by_name(desc->psy_fuel_gauge); @@ -1739,25 +1740,28 @@ static int charger_manager_probe(struct platform_device *pdev) } if (!power_supply_get_property(fuel_gauge, POWER_SUPPLY_PROP_CHARGE_NOW, &val)) { - cm->charger_psy_desc.properties[cm->charger_psy_desc.num_properties] = + properties[num_properties] = POWER_SUPPLY_PROP_CHARGE_NOW; - cm->charger_psy_desc.num_properties++; + num_properties++; } if (!power_supply_get_property(fuel_gauge, POWER_SUPPLY_PROP_CURRENT_NOW, &val)) { - cm->charger_psy_desc.properties[cm->charger_psy_desc.num_properties] = + properties[num_properties] = POWER_SUPPLY_PROP_CURRENT_NOW; - cm->charger_psy_desc.num_properties++; + num_properties++; } - ret = cm_init_thermal_data(cm, fuel_gauge); + ret = cm_init_thermal_data(cm, fuel_gauge, properties, &num_properties); if (ret) { dev_err(&pdev->dev, "Failed to initialize thermal data\n"); cm->desc->measure_battery_temp = false; } power_supply_put(fuel_gauge); + cm->charger_psy_desc.properties = properties; + cm->charger_psy_desc.num_properties = num_properties; + INIT_DELAYED_WORK(&cm->fullbatt_vchk_work, fullbatt_vchk); /* Register sysfs entry for charger(regulator) */ -- GitLab From 191e6bcf50dd54786c3560420b815bce5b523a61 Mon Sep 17 00:00:00 2001 From: Sebastian Reichel Date: Mon, 13 Apr 2020 20:38:52 +0200 Subject: [PATCH 0245/1971] power: supply: generic-adc-battery: Prepare for const properties This prepares the driver to work with the properties entry in power_supply_desc marked as const. Reviewed-by: Enric Balletbo i Serra Signed-off-by: Sebastian Reichel --- drivers/power/supply/generic-adc-battery.c | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/drivers/power/supply/generic-adc-battery.c b/drivers/power/supply/generic-adc-battery.c index bc462d1ec9630..caa829738ef79 100644 --- a/drivers/power/supply/generic-adc-battery.c +++ b/drivers/power/supply/generic-adc-battery.c @@ -241,6 +241,7 @@ static int gab_probe(struct platform_device *pdev) struct power_supply_desc *psy_desc; struct power_supply_config psy_cfg = {}; struct gab_platform_data *pdata = pdev->dev.platform_data; + enum power_supply_property *properties; int ret = 0; int chan; int index = ARRAY_SIZE(gab_props); @@ -268,16 +269,16 @@ static int gab_probe(struct platform_device *pdev) * copying the static properties and allocating extra memory for holding * the extra configurable properties received from platform data. */ - psy_desc->properties = kcalloc(ARRAY_SIZE(gab_props) + - ARRAY_SIZE(gab_chan_name), - sizeof(*psy_desc->properties), - GFP_KERNEL); - if (!psy_desc->properties) { + properties = kcalloc(ARRAY_SIZE(gab_props) + + ARRAY_SIZE(gab_chan_name), + sizeof(*properties), + GFP_KERNEL); + if (!properties) { ret = -ENOMEM; goto first_mem_fail; } - memcpy(psy_desc->properties, gab_props, sizeof(gab_props)); + memcpy(properties, gab_props, sizeof(gab_props)); /* * getting channel from iio and copying the battery properties @@ -294,13 +295,11 @@ static int gab_probe(struct platform_device *pdev) int index2; for (index2 = 0; index2 < index; index2++) { - if (psy_desc->properties[index2] == - gab_dyn_props[chan]) + if (properties[index2] == gab_dyn_props[chan]) break; /* already known */ } if (index2 == index) /* really new */ - psy_desc->properties[index++] = - gab_dyn_props[chan]; + properties[index++] = gab_dyn_props[chan]; any = true; } } @@ -317,6 +316,7 @@ static int gab_probe(struct platform_device *pdev) * as come channels may be not be supported by the device.So * we need to take care of that. */ + psy_desc->properties = properties; psy_desc->num_properties = index; adc_bat->psy = power_supply_register(&pdev->dev, psy_desc, &psy_cfg); @@ -358,7 +358,7 @@ err_reg_fail: iio_channel_release(adc_bat->channel[chan]); } second_mem_fail: - kfree(psy_desc->properties); + kfree(properties); first_mem_fail: return ret; } -- GitLab From 9ba2353b2cc58ba13f7a7369208107f133f6a27b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Micha=C5=82=20Miros=C5=82aw?= Date: Fri, 3 Apr 2020 22:20:32 +0200 Subject: [PATCH 0246/1971] power: supply: core: allow to constify property lists MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Since tables pointed to by power_supply_desc->properties and ->usb_types are not expected to change after registration, mark the pointers accordingly Signed-off-by: Michał Mirosław Signed-off-by: Sebastian Reichel --- include/linux/power_supply.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/include/linux/power_supply.h b/include/linux/power_supply.h index dcd5a71e6c677..6a34df65d4d18 100644 --- a/include/linux/power_supply.h +++ b/include/linux/power_supply.h @@ -223,9 +223,9 @@ struct power_supply_config { struct power_supply_desc { const char *name; enum power_supply_type type; - enum power_supply_usb_type *usb_types; + const enum power_supply_usb_type *usb_types; size_t num_usb_types; - enum power_supply_property *properties; + const enum power_supply_property *properties; size_t num_properties; /* -- GitLab From 6b20464ad9fb5fd76ef6f219ce62156aa9639dcc Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Micha=C5=82=20Miros=C5=82aw?= Date: Fri, 3 Apr 2020 22:20:33 +0200 Subject: [PATCH 0247/1971] power: supply: core: fix HWMON temperature labels MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit tempX_label files are swapped compared to what power_supply_hwmon_temp_to_property() uses. Make them match. Cc: stable@vger.kernel.org Fixes: e67d4dfc9ff1 ("power: supply: Add HWMON compatibility layer") Signed-off-by: Michał Mirosław Signed-off-by: Sebastian Reichel --- drivers/power/supply/power_supply_hwmon.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/power/supply/power_supply_hwmon.c b/drivers/power/supply/power_supply_hwmon.c index 75cf861ba492d..67b6ee60085e2 100644 --- a/drivers/power/supply/power_supply_hwmon.c +++ b/drivers/power/supply/power_supply_hwmon.c @@ -144,7 +144,7 @@ static int power_supply_hwmon_read_string(struct device *dev, u32 attr, int channel, const char **str) { - *str = channel ? "temp" : "temp ambient"; + *str = channel ? "temp ambient" : "temp"; return 0; } -- GitLab From b0e4aa97ea4b58881b85697a6f5628d1839d8e59 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Micha=C5=82=20Miros=C5=82aw?= Date: Fri, 3 Apr 2020 22:20:34 +0200 Subject: [PATCH 0248/1971] power: supply: core: hide unused HWMON labels MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Currently HWMON emulation shows all labels (temp and ambient temp) regardless if power supply supports reading the values. Check that at least one property is enabled for each label. Signed-off-by: Michał Mirosław Signed-off-by: Sebastian Reichel --- drivers/power/supply/power_supply_hwmon.c | 42 +++++++++++++++++++++-- 1 file changed, 39 insertions(+), 3 deletions(-) diff --git a/drivers/power/supply/power_supply_hwmon.c b/drivers/power/supply/power_supply_hwmon.c index 67b6ee60085e2..af72e5693f658 100644 --- a/drivers/power/supply/power_supply_hwmon.c +++ b/drivers/power/supply/power_supply_hwmon.c @@ -98,6 +98,39 @@ static bool power_supply_hwmon_is_a_label(enum hwmon_sensor_types type, return type == hwmon_temp && attr == hwmon_temp_label; } +struct hwmon_type_attr_list { + const u32 *attrs; + size_t n_attrs; +}; + +static const u32 ps_temp_attrs[] = { + hwmon_temp_input, + hwmon_temp_min, hwmon_temp_max, + hwmon_temp_min_alarm, hwmon_temp_max_alarm, +}; + +static const struct hwmon_type_attr_list ps_type_attrs[hwmon_max] = { + [hwmon_temp] = { ps_temp_attrs, ARRAY_SIZE(ps_temp_attrs) }, +}; + +static bool power_supply_hwmon_has_input( + const struct power_supply_hwmon *psyhw, + enum hwmon_sensor_types type, int channel) +{ + const struct hwmon_type_attr_list *attr_list = &ps_type_attrs[type]; + size_t i; + + for (i = 0; i < attr_list->n_attrs; ++i) { + int prop = power_supply_hwmon_to_property(type, + attr_list->attrs[i], channel); + + if (prop >= 0 && test_bit(prop, psyhw->props)) + return true; + } + + return false; +} + static bool power_supply_hwmon_is_writable(enum hwmon_sensor_types type, u32 attr) { @@ -124,9 +157,12 @@ static umode_t power_supply_hwmon_is_visible(const void *data, const struct power_supply_hwmon *psyhw = data; int prop; - - if (power_supply_hwmon_is_a_label(type, attr)) - return 0444; + if (power_supply_hwmon_is_a_label(type, attr)) { + if (power_supply_hwmon_has_input(psyhw, type, channel)) + return 0444; + else + return 0; + } prop = power_supply_hwmon_to_property(type, attr, channel); if (prop < 0 || !test_bit(prop, psyhw->props)) -- GitLab From 97ed79f4931964069fc3be7ae5122c3026273f9e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Micha=C5=82=20Miros=C5=82aw?= Date: Fri, 1 May 2020 16:30:43 +0200 Subject: [PATCH 0249/1971] power: charger-manager: clarify num_properties starting value MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Initialize num_properties with length of the copied array instead of relying on previously memcpy'd value. This makes it clear how the array and the counter are related. Signed-off-by: Michał Mirosław Signed-off-by: Sebastian Reichel --- drivers/power/supply/charger-manager.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/power/supply/charger-manager.c b/drivers/power/supply/charger-manager.c index 415a9efa68161..2ef53dc1f2fb7 100644 --- a/drivers/power/supply/charger-manager.c +++ b/drivers/power/supply/charger-manager.c @@ -1729,7 +1729,7 @@ static int charger_manager_probe(struct platform_device *pdev) memcpy(properties, default_charger_props, sizeof(enum power_supply_property) * ARRAY_SIZE(default_charger_props)); - num_properties = psy_default.num_properties; + num_properties = ARRAY_SIZE(default_charger_props); /* Find which optional psy-properties are available */ fuel_gauge = power_supply_get_by_name(desc->psy_fuel_gauge); -- GitLab From e83a2e443468664af5421ef5a3499980a4493248 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Micha=C5=82=20Miros=C5=82aw?= Date: Fri, 1 May 2020 17:11:18 +0200 Subject: [PATCH 0250/1971] power: supply: core: tabularize HWMON temperature labels MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Rework power_supply_hwmon_read_string() to check it's parameters. This allows to extend it later with labels for other types of measurements. Signed-off-by: Michał Mirosław Signed-off-by: Sebastian Reichel --- drivers/power/supply/power_supply_hwmon.c | 20 +++++++++++++++++++- 1 file changed, 19 insertions(+), 1 deletion(-) diff --git a/drivers/power/supply/power_supply_hwmon.c b/drivers/power/supply/power_supply_hwmon.c index af72e5693f658..f5d538485aaad 100644 --- a/drivers/power/supply/power_supply_hwmon.c +++ b/drivers/power/supply/power_supply_hwmon.c @@ -13,6 +13,11 @@ struct power_supply_hwmon { unsigned long *props; }; +static const char *const ps_temp_label[] = { + "temp", + "ambient temp", +}; + static int power_supply_hwmon_in_to_property(u32 attr) { switch (attr) { @@ -180,7 +185,20 @@ static int power_supply_hwmon_read_string(struct device *dev, u32 attr, int channel, const char **str) { - *str = channel ? "temp ambient" : "temp"; + switch (type) { + case hwmon_temp: + *str = ps_temp_label[channel]; + break; + default: + /* unreachable, but see: + * gcc bug #51513 [1] and clang bug #978 [2] + * + * [1] https://gcc.gnu.org/bugzilla/show_bug.cgi?id=51513 + * [2] https://github.com/ClangBuiltLinux/linux/issues/978 + */ + break; + } + return 0; } -- GitLab From a29ae8600d50ece1856b062a39ed296b8b952259 Mon Sep 17 00:00:00 2001 From: Martin Blumenstingl Date: Fri, 1 May 2020 23:57:17 +0200 Subject: [PATCH 0251/1971] clk: meson: meson8b: Don't rely on u-boot to init all GP_PLL registers Not all u-boot versions initialize the HHI_GP_PLL_CNTL[2-5] registers. In that case all HHI_GPLL_PLL_CNTL[1-5] registers are 0x0 and when booting Linux the PLL fails to lock. The initialization sequence from u-boot is: - put the PLL into reset - write 0x59C88000 to HHI_GP_PLL_CNTL2 - write 0xCA463823 to HHI_GP_PLL_CNTL3 - write 0x0286A027 to HHI_GP_PLL_CNTL4 - write 0x00003000 to HHI_GP_PLL_CNTL5 - set M, N, OD and the enable bit - take the PLL out of reset - check if it has locked - disable the PLL In Linux we already initialize M, N, OD, the enable and the reset bits. Also the HHI_GP_PLL_CNTL[2-5] registers with these magic values (the exact meaning is unknown) so the PLL can lock when the vendor u-boot did not initialize these registers yet. Fixes: b882964b376f21 ("clk: meson: meson8b: add support for the GP_PLL clock on Meson8m2") Signed-off-by: Martin Blumenstingl Signed-off-by: Jerome Brunet Link: https://lore.kernel.org/r/20200501215717.735393-1-martin.blumenstingl@googlemail.com --- drivers/clk/meson/meson8b.c | 9 +++++++++ drivers/clk/meson/meson8b.h | 4 ++++ 2 files changed, 13 insertions(+) diff --git a/drivers/clk/meson/meson8b.c b/drivers/clk/meson/meson8b.c index 811af1c114562..edc09d050ecf4 100644 --- a/drivers/clk/meson/meson8b.c +++ b/drivers/clk/meson/meson8b.c @@ -1918,6 +1918,13 @@ static struct clk_regmap meson8b_mali = { }, }; +static const struct reg_sequence meson8m2_gp_pll_init_regs[] = { + { .reg = HHI_GP_PLL_CNTL2, .def = 0x59c88000 }, + { .reg = HHI_GP_PLL_CNTL3, .def = 0xca463823 }, + { .reg = HHI_GP_PLL_CNTL4, .def = 0x0286a027 }, + { .reg = HHI_GP_PLL_CNTL5, .def = 0x00003000 }, +}; + static const struct pll_params_table meson8m2_gp_pll_params_table[] = { PLL_PARAMS(182, 3), { /* sentinel */ }, @@ -1951,6 +1958,8 @@ static struct clk_regmap meson8m2_gp_pll_dco = { .width = 1, }, .table = meson8m2_gp_pll_params_table, + .init_regs = meson8m2_gp_pll_init_regs, + .init_count = ARRAY_SIZE(meson8m2_gp_pll_init_regs), }, .hw.init = &(struct clk_init_data){ .name = "gp_pll_dco", diff --git a/drivers/clk/meson/meson8b.h b/drivers/clk/meson/meson8b.h index 94ce3ef0c1d5b..cd38ae2a9cb5a 100644 --- a/drivers/clk/meson/meson8b.h +++ b/drivers/clk/meson/meson8b.h @@ -20,6 +20,10 @@ * [0] http://dn.odroid.com/S805/Datasheet/S805_Datasheet%20V0.8%2020150126.pdf */ #define HHI_GP_PLL_CNTL 0x40 /* 0x10 offset in data sheet */ +#define HHI_GP_PLL_CNTL2 0x44 /* 0x11 offset in data sheet */ +#define HHI_GP_PLL_CNTL3 0x48 /* 0x12 offset in data sheet */ +#define HHI_GP_PLL_CNTL4 0x4C /* 0x13 offset in data sheet */ +#define HHI_GP_PLL_CNTL5 0x50 /* 0x14 offset in data sheet */ #define HHI_VIID_CLK_DIV 0x128 /* 0x4a offset in data sheet */ #define HHI_VIID_CLK_CNTL 0x12c /* 0x4b offset in data sheet */ #define HHI_GCLK_MPEG0 0x140 /* 0x50 offset in data sheet */ -- GitLab From 516431ae669bf4e8d6e6ce5e5347e9f9739b0947 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Thu, 23 Apr 2020 14:19:14 -0400 Subject: [PATCH 0252/1971] i915: switch query_{topology,engine}_info() to copy_to_user() Signed-off-by: Al Viro --- drivers/gpu/drm/i915/i915_query.c | 16 ++++++---------- 1 file changed, 6 insertions(+), 10 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_query.c b/drivers/gpu/drm/i915/i915_query.c index ef25ce6e395e2..ad8e55fe1e598 100644 --- a/drivers/gpu/drm/i915/i915_query.c +++ b/drivers/gpu/drm/i915/i915_query.c @@ -25,10 +25,6 @@ static int copy_query_item(void *query_hdr, size_t query_sz, query_sz)) return -EFAULT; - if (!access_ok(u64_to_user_ptr(query_item->data_ptr), - total_length)) - return -EFAULT; - return 0; } @@ -72,20 +68,20 @@ static int query_topology_info(struct drm_i915_private *dev_priv, topo.eu_offset = slice_length + subslice_length; topo.eu_stride = sseu->eu_stride; - if (__copy_to_user(u64_to_user_ptr(query_item->data_ptr), + if (copy_to_user(u64_to_user_ptr(query_item->data_ptr), &topo, sizeof(topo))) return -EFAULT; - if (__copy_to_user(u64_to_user_ptr(query_item->data_ptr + sizeof(topo)), + if (copy_to_user(u64_to_user_ptr(query_item->data_ptr + sizeof(topo)), &sseu->slice_mask, slice_length)) return -EFAULT; - if (__copy_to_user(u64_to_user_ptr(query_item->data_ptr + + if (copy_to_user(u64_to_user_ptr(query_item->data_ptr + sizeof(topo) + slice_length), sseu->subslice_mask, subslice_length)) return -EFAULT; - if (__copy_to_user(u64_to_user_ptr(query_item->data_ptr + + if (copy_to_user(u64_to_user_ptr(query_item->data_ptr + sizeof(topo) + slice_length + subslice_length), sseu->eu_mask, eu_length)) @@ -131,14 +127,14 @@ query_engine_info(struct drm_i915_private *i915, info.engine.engine_instance = engine->uabi_instance; info.capabilities = engine->uabi_capabilities; - if (__copy_to_user(info_ptr, &info, sizeof(info))) + if (copy_to_user(info_ptr, &info, sizeof(info))) return -EFAULT; query.num_engines++; info_ptr++; } - if (__copy_to_user(query_ptr, &query, sizeof(query))) + if (copy_to_user(query_ptr, &query, sizeof(query))) return -EFAULT; return len; -- GitLab From 502f78c8d784eabc0f75c6372f61ca796c8af707 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Thu, 23 Apr 2020 14:29:05 -0400 Subject: [PATCH 0253/1971] i915: switch copy_perf_config_registers_or_number() to unsafe_put_user() ... and the rest of query_perf_config_data() to normal uaccess primitives Signed-off-by: Al Viro --- drivers/gpu/drm/i915/i915_query.c | 46 +++++++++++-------------------- drivers/gpu/drm/i915/i915_reg.h | 2 +- 2 files changed, 17 insertions(+), 31 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_query.c b/drivers/gpu/drm/i915/i915_query.c index ad8e55fe1e598..e75c528ebbe00 100644 --- a/drivers/gpu/drm/i915/i915_query.c +++ b/drivers/gpu/drm/i915/i915_query.c @@ -154,10 +154,6 @@ static int can_copy_perf_config_registers_or_number(u32 user_n_regs, if (user_n_regs < kernel_n_regs) return -EINVAL; - if (!access_ok(u64_to_user_ptr(user_regs_ptr), - 2 * sizeof(u32) * kernel_n_regs)) - return -EFAULT; - return 0; } @@ -166,6 +162,7 @@ static int copy_perf_config_registers_or_number(const struct i915_oa_reg *kernel u64 user_regs_ptr, u32 *user_n_regs) { + u32 __user *p = u64_to_user_ptr(user_regs_ptr); u32 r; if (*user_n_regs == 0) { @@ -175,25 +172,19 @@ static int copy_perf_config_registers_or_number(const struct i915_oa_reg *kernel *user_n_regs = kernel_n_regs; - for (r = 0; r < kernel_n_regs; r++) { - u32 __user *user_reg_ptr = - u64_to_user_ptr(user_regs_ptr + sizeof(u32) * r * 2); - u32 __user *user_val_ptr = - u64_to_user_ptr(user_regs_ptr + sizeof(u32) * r * 2 + - sizeof(u32)); - int ret; - - ret = __put_user(i915_mmio_reg_offset(kernel_regs[r].addr), - user_reg_ptr); - if (ret) - return -EFAULT; + if (!user_write_access_begin(p, 2 * sizeof(u32) * kernel_n_regs)) + return -EFAULT; - ret = __put_user(kernel_regs[r].value, user_val_ptr); - if (ret) - return -EFAULT; + for (r = 0; r < kernel_n_regs; r++, p += 2) { + unsafe_put_user(i915_mmio_reg_offset(kernel_regs[r].addr), + p, Efault); + unsafe_put_user(kernel_regs[r].value, p + 1, Efault); } - + user_write_access_end(); return 0; +Efault: + user_write_access_end(); + return -EFAULT; } static int query_perf_config_data(struct drm_i915_private *i915, @@ -229,10 +220,7 @@ static int query_perf_config_data(struct drm_i915_private *i915, return -EINVAL; } - if (!access_ok(user_query_config_ptr, total_size)) - return -EFAULT; - - if (__get_user(flags, &user_query_config_ptr->flags)) + if (get_user(flags, &user_query_config_ptr->flags)) return -EFAULT; if (flags != 0) @@ -245,7 +233,7 @@ static int query_perf_config_data(struct drm_i915_private *i915, BUILD_BUG_ON(sizeof(user_query_config_ptr->uuid) >= sizeof(uuid)); memset(&uuid, 0, sizeof(uuid)); - if (__copy_from_user(uuid, user_query_config_ptr->uuid, + if (copy_from_user(uuid, user_query_config_ptr->uuid, sizeof(user_query_config_ptr->uuid))) return -EFAULT; @@ -259,7 +247,7 @@ static int query_perf_config_data(struct drm_i915_private *i915, } rcu_read_unlock(); } else { - if (__get_user(config_id, &user_query_config_ptr->config)) + if (get_user(config_id, &user_query_config_ptr->config)) return -EFAULT; oa_config = i915_perf_get_oa_config(perf, config_id); @@ -267,8 +255,7 @@ static int query_perf_config_data(struct drm_i915_private *i915, if (!oa_config) return -ENOENT; - if (__copy_from_user(&user_config, user_config_ptr, - sizeof(user_config))) { + if (copy_from_user(&user_config, user_config_ptr, sizeof(user_config))) { ret = -EFAULT; goto out; } @@ -314,8 +301,7 @@ static int query_perf_config_data(struct drm_i915_private *i915, memcpy(user_config.uuid, oa_config->uuid, sizeof(user_config.uuid)); - if (__copy_to_user(user_config_ptr, &user_config, - sizeof(user_config))) { + if (copy_to_user(user_config_ptr, &user_config, sizeof(user_config))) { ret = -EFAULT; goto out; } diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 59e64acc2c568..3733b9e20976d 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -186,7 +186,7 @@ typedef struct { #define INVALID_MMIO_REG _MMIO(0) -static inline u32 i915_mmio_reg_offset(i915_reg_t reg) +static __always_inline u32 i915_mmio_reg_offset(i915_reg_t reg) { return reg.reg; } -- GitLab From fd7cd8da232d892f2b7dc4766f8a05a81043ee5e Mon Sep 17 00:00:00 2001 From: Al Viro Date: Wed, 19 Feb 2020 11:20:34 -0500 Subject: [PATCH 0254/1971] i915 compat ioctl(): just use drm_ioctl_kernel() compat_alloc_user_space() is a bad kludge; the sooner it goes, the better... Signed-off-by: Al Viro --- drivers/gpu/drm/i915/i915_ioc32.c | 14 +++++--------- 1 file changed, 5 insertions(+), 9 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_ioc32.c b/drivers/gpu/drm/i915/i915_ioc32.c index 8e45ca3d2ede1..55b97c3a3dde6 100644 --- a/drivers/gpu/drm/i915/i915_ioc32.c +++ b/drivers/gpu/drm/i915/i915_ioc32.c @@ -47,20 +47,16 @@ static int compat_i915_getparam(struct file *file, unsigned int cmd, unsigned long arg) { struct drm_i915_getparam32 req32; - drm_i915_getparam_t __user *request; + struct drm_i915_getparam req; if (copy_from_user(&req32, (void __user *)arg, sizeof(req32))) return -EFAULT; - request = compat_alloc_user_space(sizeof(*request)); - if (!access_ok(request, sizeof(*request)) || - __put_user(req32.param, &request->param) || - __put_user((void __user *)(unsigned long)req32.value, - &request->value)) - return -EFAULT; + req.param = req32.param; + req.value = compat_ptr(req32.value); - return drm_ioctl(file, DRM_IOCTL_I915_GETPARAM, - (unsigned long)request); + return drm_ioctl_kernel(file, i915_getparam_ioctl, &req, + DRM_RENDER_ALLOW); } static drm_ioctl_compat_t *i915_compat_ioctls[] = { -- GitLab From 598caf1a4032bd52040a57739342a0a97eace757 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Thu, 23 Apr 2020 14:32:45 -0400 Subject: [PATCH 0255/1971] i915: alloc_oa_regs(): get rid of pointless access_ok() Signed-off-by: Al Viro --- drivers/gpu/drm/i915/i915_perf.c | 3 --- 1 file changed, 3 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c index 551be589d6f48..a52461016935b 100644 --- a/drivers/gpu/drm/i915/i915_perf.c +++ b/drivers/gpu/drm/i915/i915_perf.c @@ -3949,9 +3949,6 @@ static struct i915_oa_reg *alloc_oa_regs(struct i915_perf *perf, if (!n_regs) return NULL; - if (!access_ok(regs, n_regs * sizeof(u32) * 2)) - return ERR_PTR(-EFAULT); - /* No is_valid function means we're not allowing any register to be programmed. */ GEM_BUG_ON(!is_valid); if (!is_valid) -- GitLab From 7b3f0c4c56b08a86f890cad3599242c78c683aa9 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Sat, 25 Apr 2020 13:13:48 -0400 Subject: [PATCH 0256/1971] i915:get_engines(): get rid of pointless access_ok() Signed-off-by: Al Viro --- drivers/gpu/drm/i915/gem/i915_gem_context.c | 5 ----- 1 file changed, 5 deletions(-) diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context.c b/drivers/gpu/drm/i915/gem/i915_gem_context.c index 68326ad3b2e09..55cb6dbfe61f2 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_context.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_context.c @@ -1924,11 +1924,6 @@ get_engines(struct i915_gem_context *ctx, } user = u64_to_user_ptr(args->value); - if (!access_ok(user, size)) { - err = -EFAULT; - goto err_free; - } - if (put_user(0, &user->extensions)) { err = -EFAULT; goto err_free; -- GitLab From 21d4cdf8b3c4352c38a36acee4c5bdc44cf5a1bc Mon Sep 17 00:00:00 2001 From: Dan Murphy Date: Fri, 1 May 2020 12:29:13 -0500 Subject: [PATCH 0257/1971] dt-bindings: power: Convert power_supply text to yaml Convert the power_supply.txt to power-supply.yaml. This conversion entailed fixing up the binding to being yaml and dt checker compliant. Added a note in the power_supply.txt to reference the power-supply.yaml Signed-off-by: Dan Murphy Signed-off-by: Sebastian Reichel --- .../bindings/power/supply/power-supply.yaml | 40 +++++++++++++++++++ .../bindings/power/supply/power_supply.txt | 25 +----------- 2 files changed, 42 insertions(+), 23 deletions(-) create mode 100644 Documentation/devicetree/bindings/power/supply/power-supply.yaml diff --git a/Documentation/devicetree/bindings/power/supply/power-supply.yaml b/Documentation/devicetree/bindings/power/supply/power-supply.yaml new file mode 100644 index 0000000000000..3bb02bb3a2d8b --- /dev/null +++ b/Documentation/devicetree/bindings/power/supply/power-supply.yaml @@ -0,0 +1,40 @@ +# SPDX-License-Identifier: GPL-2.0 +%YAML 1.2 +--- +$id: "http://devicetree.org/schemas/power/supply/power-supply.yaml#" +$schema: "http://devicetree.org/meta-schemas/core.yaml#" + +title: Power Supply Core Support + +maintainers: + - Sebastian Reichel + +properties: + power-supplies: + $ref: /schemas/types.yaml#/definitions/phandle-array + description: + This property is added to a supply in order to list the devices which + supply it power, referenced by their phandles. + +examples: + - | + power { + #address-cells = <1>; + #size-cells = <0>; + + usb_charger:charger@e { + compatible = "some,usb-charger"; + reg = <0xe>; + }; + + ac_charger:charger@c { + compatible = "some,ac-charger"; + reg = <0xc>; + }; + + battery:battery@b { + compatible = "some,battery"; + reg = <0xb>; + power-supplies = <&usb_charger>, <&ac_charger>; + }; + }; diff --git a/Documentation/devicetree/bindings/power/supply/power_supply.txt b/Documentation/devicetree/bindings/power/supply/power_supply.txt index 8391bfa0edac5..d9693e0545090 100644 --- a/Documentation/devicetree/bindings/power/supply/power_supply.txt +++ b/Documentation/devicetree/bindings/power/supply/power_supply.txt @@ -1,23 +1,2 @@ -Power Supply Core Support - -Optional Properties: - - power-supplies : This property is added to a supply in order to list the - devices which supply it power, referenced by their phandles. - -Example: - - usb-charger: power@e { - compatible = "some,usb-charger"; - ... - }; - - ac-charger: power@c { - compatible = "some,ac-charger"; - ... - }; - - battery@b { - compatible = "some,battery"; - ... - power-supplies = <&usb-charger>, <&ac-charger>; - }; +This binding has been converted to yaml please see power-supply.yaml in this +directory. -- GitLab From 1d7a7128a2e9e1f137c99b0a44e94d70a77343e3 Mon Sep 17 00:00:00 2001 From: Qiushi Wu Date: Sat, 2 May 2020 18:33:38 -0500 Subject: [PATCH 0258/1971] power: supply: core: fix memory leak in HWMON error path In function power_supply_add_hwmon_sysfs(), psyhw->props is allocated by bitmap_zalloc(). But this pointer is not deallocated when devm_add_action fail, which lead to a memory leak bug. To fix this, we replace devm_add_action with devm_add_action_or_reset. Cc: stable@kernel.org Fixes: e67d4dfc9ff19 ("power: supply: Add HWMON compatibility layer") Signed-off-by: Qiushi Wu Signed-off-by: Sebastian Reichel --- drivers/power/supply/power_supply_hwmon.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/power/supply/power_supply_hwmon.c b/drivers/power/supply/power_supply_hwmon.c index f5d538485aaad..7fe4b6b6ddc89 100644 --- a/drivers/power/supply/power_supply_hwmon.c +++ b/drivers/power/supply/power_supply_hwmon.c @@ -358,7 +358,7 @@ int power_supply_add_hwmon_sysfs(struct power_supply *psy) goto error; } - ret = devm_add_action(dev, power_supply_hwmon_bitmap_free, + ret = devm_add_action_or_reset(dev, power_supply_hwmon_bitmap_free, psyhw->props); if (ret) goto error; -- GitLab From 9521244c3f1d30f0e796b08752cf987408023a12 Mon Sep 17 00:00:00 2001 From: Tobias Schramm Date: Tue, 14 Apr 2020 14:52:06 +0200 Subject: [PATCH 0259/1971] dt-bindings: Document cellwise vendor-prefix Signed-off-by: Tobias Schramm Acked-by: Rob Herring Signed-off-by: Sebastian Reichel --- Documentation/devicetree/bindings/vendor-prefixes.yaml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/Documentation/devicetree/bindings/vendor-prefixes.yaml b/Documentation/devicetree/bindings/vendor-prefixes.yaml index d3891386d6710..58cf4e8b8d56a 100644 --- a/Documentation/devicetree/bindings/vendor-prefixes.yaml +++ b/Documentation/devicetree/bindings/vendor-prefixes.yaml @@ -179,6 +179,8 @@ patternProperties: description: Cadence Design Systems Inc. "^cdtech,.*": description: CDTech(H.K.) Electronics Limited + "^cellwise,.*": + description: CellWise Microelectronics Co., Ltd "^ceva,.*": description: Ceva, Inc. "^chipidea,.*": -- GitLab From 6e776188635ace8d1f5c032d8c8e8f0d4fa33b64 Mon Sep 17 00:00:00 2001 From: Tobias Schramm Date: Tue, 14 Apr 2020 14:52:07 +0200 Subject: [PATCH 0260/1971] dt-bindings: power: supply: add cw2015_battery bindings This patch adds the dts binding schema for the cw2015 fuel gauge. Signed-off-by: Tobias Schramm Acked-by: Rob Herring Signed-off-by: Sebastian Reichel --- .../bindings/power/supply/cw2015_battery.yaml | 82 +++++++++++++++++++ 1 file changed, 82 insertions(+) create mode 100644 Documentation/devicetree/bindings/power/supply/cw2015_battery.yaml diff --git a/Documentation/devicetree/bindings/power/supply/cw2015_battery.yaml b/Documentation/devicetree/bindings/power/supply/cw2015_battery.yaml new file mode 100644 index 0000000000000..4a265d4234b90 --- /dev/null +++ b/Documentation/devicetree/bindings/power/supply/cw2015_battery.yaml @@ -0,0 +1,82 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/power/supply/cw2015_battery.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Battery driver for CW2015 shuntless fuel gauge by CellWise. + +maintainers: + - Tobias Schramm + +description: | + The driver can utilize information from a simple-battery linked via a + phandle in monitored-battery. If specified the driver uses the + charge-full-design-microamp-hours property of the battery. + +properties: + compatible: + const: cellwise,cw2015 + + reg: + maxItems: 1 + + cellwise,battery-profile: + description: | + This property specifies characteristics of the battery used. The format + of this binary blob is kept secret by CellWise. The only way to obtain + it is to mail two batteries to a test facility of CellWise and receive + back a test report with the binary blob. + allOf: + - $ref: /schemas/types.yaml#definitions/uint8-array + items: + - minItems: 64 + maxItems: 64 + + cellwise,monitor-interval-ms: + description: + Specifies the interval in milliseconds gauge values are polled at + minimum: 250 + + power-supplies: + description: + Specifies supplies used for charging the battery connected to this gauge + allOf: + - $ref: /schemas/types.yaml#/definitions/phandle-array + - minItems: 1 + maxItems: 8 # Should be enough + + monitored-battery: + description: + Specifies the phandle of a simple-battery connected to this gauge + $ref: /schemas/types.yaml#/definitions/phandle + +required: + - compatible + - reg + +examples: + - | + i2c { + #address-cells = <1>; + #size-cells = <0>; + + cw2015@62 { + compatible = "cellwise,cw201x"; + reg = <0x62>; + cellwise,battery-profile = /bits/ 8 < + 0x17 0x67 0x80 0x73 0x6E 0x6C 0x6B 0x63 + 0x77 0x51 0x5C 0x58 0x50 0x4C 0x48 0x36 + 0x15 0x0C 0x0C 0x19 0x5B 0x7D 0x6F 0x69 + 0x69 0x5B 0x0C 0x29 0x20 0x40 0x52 0x59 + 0x57 0x56 0x54 0x4F 0x3B 0x1F 0x7F 0x17 + 0x06 0x1A 0x30 0x5A 0x85 0x93 0x96 0x2D + 0x48 0x77 0x9C 0xB3 0x80 0x52 0x94 0xCB + 0x2F 0x00 0x64 0xA5 0xB5 0x11 0xF0 0x11 + >; + cellwise,monitor-interval-ms = <5000>; + monitored-battery = <&bat>; + power-supplies = <&mains_charger>, <&usb_charger>; + }; + }; + -- GitLab From b4c7715c10c106a041b0b3fabd26151c214ea394 Mon Sep 17 00:00:00 2001 From: Tobias Schramm Date: Tue, 14 Apr 2020 14:52:08 +0200 Subject: [PATCH 0261/1971] power: supply: add CellWise cw2015 fuel gauge driver This patch adds a driver for the CellWise cw2015 fuel gauge. The CellWise cw2015 is a shuntless, single-cell Li-Ion fuel gauge used in the pine64 Pinebook Pro laptop and some Raspberry Pi UPS HATs. Signed-off-by: Tobias Schramm Reviewed-by: Andy Shevchenko Signed-off-by: Sebastian Reichel --- MAINTAINERS | 6 + drivers/power/supply/Kconfig | 11 + drivers/power/supply/Makefile | 1 + drivers/power/supply/cw2015_battery.c | 749 ++++++++++++++++++++++++++ 4 files changed, 767 insertions(+) create mode 100644 drivers/power/supply/cw2015_battery.c diff --git a/MAINTAINERS b/MAINTAINERS index 9821691396f83..2774b8ba0eb52 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -3932,6 +3932,12 @@ F: arch/powerpc/include/uapi/asm/spu*.h F: arch/powerpc/oprofile/*cell* F: arch/powerpc/platforms/cell/ +CELLWISE CW2015 BATTERY DRIVER +M: Tobias Schrammm +S: Maintained +F: Documentation/devicetree/bindings/power/supply/cw2015_battery.yaml +F: drivers/power/supply/cw2015_battery.c + CEPH COMMON CODE (LIBCEPH) M: Ilya Dryomov M: Jeff Layton diff --git a/drivers/power/supply/Kconfig b/drivers/power/supply/Kconfig index d37ec0d03237f..efdcf49f27bfe 100644 --- a/drivers/power/supply/Kconfig +++ b/drivers/power/supply/Kconfig @@ -116,6 +116,17 @@ config BATTERY_CPCAP Say Y here to enable support for battery on Motorola phones and tablets such as droid 4. +config BATTERY_CW2015 + tristate "CW2015 Battery driver" + depends on I2C + select REGMAP_I2C + help + Say Y here to enable support for the cellwise cw2015 + battery fuel gauge (used in the Pinebook Pro & others) + + This driver can also be built as a module. If so, the module will be + called cw2015_battery. + config BATTERY_DS2760 tristate "DS2760 battery driver (HP iPAQ & others)" depends on W1 diff --git a/drivers/power/supply/Makefile b/drivers/power/supply/Makefile index 6c7da920ea830..69727a10e835b 100644 --- a/drivers/power/supply/Makefile +++ b/drivers/power/supply/Makefile @@ -24,6 +24,7 @@ obj-$(CONFIG_BATTERY_ACT8945A) += act8945a_charger.o obj-$(CONFIG_BATTERY_AXP20X) += axp20x_battery.o obj-$(CONFIG_CHARGER_AXP20X) += axp20x_ac_power.o obj-$(CONFIG_BATTERY_CPCAP) += cpcap-battery.o +obj-$(CONFIG_BATTERY_CW2015) += cw2015_battery.o obj-$(CONFIG_BATTERY_DS2760) += ds2760_battery.o obj-$(CONFIG_BATTERY_DS2780) += ds2780_battery.o obj-$(CONFIG_BATTERY_DS2781) += ds2781_battery.o diff --git a/drivers/power/supply/cw2015_battery.c b/drivers/power/supply/cw2015_battery.c new file mode 100644 index 0000000000000..8603da6f45930 --- /dev/null +++ b/drivers/power/supply/cw2015_battery.c @@ -0,0 +1,749 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Fuel gauge driver for CellWise 2013 / 2015 + * + * Copyright (C) 2012, RockChip + * Copyright (C) 2020, Tobias Schramm + * + * Authors: xuhuicong + * Authors: Tobias Schramm + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define CW2015_SIZE_BATINFO 64 + +#define CW2015_RESET_TRIES 5 + +#define CW2015_REG_VERSION 0x00 +#define CW2015_REG_VCELL 0x02 +#define CW2015_REG_SOC 0x04 +#define CW2015_REG_RRT_ALERT 0x06 +#define CW2015_REG_CONFIG 0x08 +#define CW2015_REG_MODE 0x0A +#define CW2015_REG_BATINFO 0x10 + +#define CW2015_MODE_SLEEP_MASK GENMASK(7, 6) +#define CW2015_MODE_SLEEP (0x03 << 6) +#define CW2015_MODE_NORMAL (0x00 << 6) +#define CW2015_MODE_QUICK_START (0x03 << 4) +#define CW2015_MODE_RESTART (0x0f << 0) + +#define CW2015_CONFIG_UPDATE_FLG (0x01 << 1) +#define CW2015_ATHD(x) ((x) << 3) +#define CW2015_MASK_ATHD GENMASK(7, 3) +#define CW2015_MASK_SOC GENMASK(12, 0) + +/* reset gauge of no valid state of charge could be polled for 40s */ +#define CW2015_BAT_SOC_ERROR_MS (40 * MSEC_PER_SEC) +/* reset gauge if state of charge stuck for half an hour during charging */ +#define CW2015_BAT_CHARGING_STUCK_MS (1800 * MSEC_PER_SEC) + +/* poll interval from CellWise GPL Android driver example */ +#define CW2015_DEFAULT_POLL_INTERVAL_MS 8000 + +#define CW2015_AVERAGING_SAMPLES 3 + +struct cw_battery { + struct device *dev; + struct workqueue_struct *battery_workqueue; + struct delayed_work battery_delay_work; + struct regmap *regmap; + struct power_supply *rk_bat; + struct power_supply_battery_info battery; + u8 *bat_profile; + + bool charger_attached; + bool battery_changed; + + int soc; + int voltage_mv; + int status; + int time_to_empty; + int charge_count; + + u32 poll_interval_ms; + u8 alert_level; + + unsigned int read_errors; + unsigned int charge_stuck_cnt; +}; + +static int cw_read_word(struct cw_battery *cw_bat, u8 reg, u16 *val) +{ + __be16 value; + int ret; + + ret = regmap_bulk_read(cw_bat->regmap, reg, &value, sizeof(value)); + if (ret) + return ret; + + *val = be16_to_cpu(value); + return 0; +} + +int cw_update_profile(struct cw_battery *cw_bat) +{ + int ret; + unsigned int reg_val; + u8 reset_val; + + /* make sure gauge is not in sleep mode */ + ret = regmap_read(cw_bat->regmap, CW2015_REG_MODE, ®_val); + if (ret) + return ret; + + reset_val = reg_val; + if ((reg_val & CW2015_MODE_SLEEP_MASK) == CW2015_MODE_SLEEP) { + dev_err(cw_bat->dev, + "Gauge is in sleep mode, can't update battery info\n"); + return -EINVAL; + } + + /* write new battery info */ + ret = regmap_raw_write(cw_bat->regmap, CW2015_REG_BATINFO, + cw_bat->bat_profile, + CW2015_SIZE_BATINFO); + if (ret) + return ret; + + /* set config update flag */ + reg_val |= CW2015_CONFIG_UPDATE_FLG; + reg_val &= ~CW2015_MASK_ATHD; + reg_val |= CW2015_ATHD(cw_bat->alert_level); + ret = regmap_write(cw_bat->regmap, CW2015_REG_CONFIG, reg_val); + if (ret) + return ret; + + /* reset gauge to apply new battery profile */ + reset_val &= ~CW2015_MODE_RESTART; + reg_val = reset_val | CW2015_MODE_RESTART; + ret = regmap_write(cw_bat->regmap, CW2015_REG_MODE, reg_val); + if (ret) + return ret; + + /* wait for gauge to reset */ + msleep(20); + + /* clear reset flag */ + ret = regmap_write(cw_bat->regmap, CW2015_REG_MODE, reset_val); + if (ret) + return ret; + + /* wait for gauge to become ready */ + ret = regmap_read_poll_timeout(cw_bat->regmap, CW2015_REG_SOC, + reg_val, reg_val <= 100, + 10 * USEC_PER_MSEC, 10 * USEC_PER_SEC); + if (ret) + dev_err(cw_bat->dev, + "Gauge did not become ready after profile upload\n"); + else + dev_dbg(cw_bat->dev, "Battery profile updated\n"); + + return ret; +} + +static int cw_init(struct cw_battery *cw_bat) +{ + int ret; + unsigned int reg_val = CW2015_MODE_SLEEP; + + if ((reg_val & CW2015_MODE_SLEEP_MASK) == CW2015_MODE_SLEEP) { + reg_val = CW2015_MODE_NORMAL; + ret = regmap_write(cw_bat->regmap, CW2015_REG_MODE, reg_val); + if (ret) + return ret; + } + + ret = regmap_read(cw_bat->regmap, CW2015_REG_CONFIG, ®_val); + if (ret) + return ret; + + if ((reg_val & CW2015_MASK_ATHD) != CW2015_ATHD(cw_bat->alert_level)) { + dev_dbg(cw_bat->dev, "Setting new alert level\n"); + reg_val &= ~CW2015_MASK_ATHD; + reg_val |= ~CW2015_ATHD(cw_bat->alert_level); + ret = regmap_write(cw_bat->regmap, CW2015_REG_CONFIG, reg_val); + if (ret) + return ret; + } + + ret = regmap_read(cw_bat->regmap, CW2015_REG_CONFIG, ®_val); + if (ret) + return ret; + + if (!(reg_val & CW2015_CONFIG_UPDATE_FLG)) { + dev_dbg(cw_bat->dev, + "Battery profile not present, uploading battery profile\n"); + if (cw_bat->bat_profile) { + ret = cw_update_profile(cw_bat); + if (ret) { + dev_err(cw_bat->dev, + "Failed to upload battery profile\n"); + return ret; + } + } else { + dev_warn(cw_bat->dev, + "No profile specified, continuing without profile\n"); + } + } else if (cw_bat->bat_profile) { + u8 bat_info[CW2015_SIZE_BATINFO]; + + ret = regmap_raw_read(cw_bat->regmap, CW2015_REG_BATINFO, + bat_info, CW2015_SIZE_BATINFO); + if (ret) { + dev_err(cw_bat->dev, + "Failed to read stored battery profile\n"); + return ret; + } + + if (memcmp(bat_info, cw_bat->bat_profile, CW2015_SIZE_BATINFO)) { + dev_warn(cw_bat->dev, "Replacing stored battery profile\n"); + ret = cw_update_profile(cw_bat); + if (ret) + return ret; + } + } else { + dev_warn(cw_bat->dev, + "Can't check current battery profile, no profile provided\n"); + } + + dev_dbg(cw_bat->dev, "Battery profile configured\n"); + return 0; +} + +static int cw_power_on_reset(struct cw_battery *cw_bat) +{ + int ret; + unsigned char reset_val; + + reset_val = CW2015_MODE_SLEEP; + ret = regmap_write(cw_bat->regmap, CW2015_REG_MODE, reset_val); + if (ret) + return ret; + + /* wait for gauge to enter sleep */ + msleep(20); + + reset_val = CW2015_MODE_NORMAL; + ret = regmap_write(cw_bat->regmap, CW2015_REG_MODE, reset_val); + if (ret) + return ret; + + ret = cw_init(cw_bat); + if (ret) + return ret; + return 0; +} + +#define HYSTERESIS(current, previous, up, down) \ + (((current) < (previous) + (up)) && ((current) > (previous) - (down))) + +static int cw_get_soc(struct cw_battery *cw_bat) +{ + unsigned int soc; + int ret; + + ret = regmap_read(cw_bat->regmap, CW2015_REG_SOC, &soc); + if (ret) + return ret; + + if (soc > 100) { + int max_error_cycles = + CW2015_BAT_SOC_ERROR_MS / cw_bat->poll_interval_ms; + + dev_err(cw_bat->dev, "Invalid SoC %d%%\n", soc); + cw_bat->read_errors++; + if (cw_bat->read_errors > max_error_cycles) { + dev_warn(cw_bat->dev, + "Too many invalid SoC reports, resetting gauge\n"); + cw_power_on_reset(cw_bat); + cw_bat->read_errors = 0; + } + return cw_bat->soc; + } + cw_bat->read_errors = 0; + + /* Reset gauge if stuck while charging */ + if (cw_bat->status == POWER_SUPPLY_STATUS_CHARGING && soc == cw_bat->soc) { + int max_stuck_cycles = + CW2015_BAT_CHARGING_STUCK_MS / cw_bat->poll_interval_ms; + + cw_bat->charge_stuck_cnt++; + if (cw_bat->charge_stuck_cnt > max_stuck_cycles) { + dev_warn(cw_bat->dev, + "SoC stuck @%u%%, resetting gauge\n", soc); + cw_power_on_reset(cw_bat); + cw_bat->charge_stuck_cnt = 0; + } + } else { + cw_bat->charge_stuck_cnt = 0; + } + + /* Ignore voltage dips during charge */ + if (cw_bat->charger_attached && HYSTERESIS(soc, cw_bat->soc, 0, 3)) + soc = cw_bat->soc; + + /* Ignore voltage spikes during discharge */ + if (!cw_bat->charger_attached && HYSTERESIS(soc, cw_bat->soc, 3, 0)) + soc = cw_bat->soc; + + return soc; +} + +static int cw_get_voltage(struct cw_battery *cw_bat) +{ + int ret, i, voltage_mv; + u16 reg_val; + u32 avg = 0; + + for (i = 0; i < CW2015_AVERAGING_SAMPLES; i++) { + ret = cw_read_word(cw_bat, CW2015_REG_VCELL, ®_val); + if (ret) + return ret; + + avg += reg_val; + } + avg /= CW2015_AVERAGING_SAMPLES; + + /* + * 305 uV per ADC step + * Use 312 / 1024 as efficient approximation of 305 / 1000 + * Negligible error of 0.1% + */ + voltage_mv = avg * 312 / 1024; + + dev_dbg(cw_bat->dev, "Read voltage: %d mV, raw=0x%04x\n", + voltage_mv, reg_val); + return voltage_mv; +} + +static int cw_get_time_to_empty(struct cw_battery *cw_bat) +{ + int ret; + u16 value16; + + ret = cw_read_word(cw_bat, CW2015_REG_RRT_ALERT, &value16); + if (ret) + return ret; + + return value16 & CW2015_MASK_SOC; +} + +static void cw_update_charge_status(struct cw_battery *cw_bat) +{ + int ret; + + ret = power_supply_am_i_supplied(cw_bat->rk_bat); + if (ret < 0) { + dev_warn(cw_bat->dev, "Failed to get supply state: %d\n", ret); + } else { + bool charger_attached; + + charger_attached = !!ret; + if (cw_bat->charger_attached != charger_attached) { + cw_bat->battery_changed = true; + if (charger_attached) + cw_bat->charge_count++; + } + cw_bat->charger_attached = charger_attached; + } +} + +static void cw_update_soc(struct cw_battery *cw_bat) +{ + int soc; + + soc = cw_get_soc(cw_bat); + if (soc < 0) + dev_err(cw_bat->dev, "Failed to get SoC from gauge: %d\n", soc); + else if (cw_bat->soc != soc) { + cw_bat->soc = soc; + cw_bat->battery_changed = true; + } +} + +static void cw_update_voltage(struct cw_battery *cw_bat) +{ + int voltage_mv; + + voltage_mv = cw_get_voltage(cw_bat); + if (voltage_mv < 0) + dev_err(cw_bat->dev, "Failed to get voltage from gauge: %d\n", + voltage_mv); + else + cw_bat->voltage_mv = voltage_mv; +} + +static void cw_update_status(struct cw_battery *cw_bat) +{ + int status = POWER_SUPPLY_STATUS_DISCHARGING; + + if (cw_bat->charger_attached) { + if (cw_bat->soc >= 100) + status = POWER_SUPPLY_STATUS_FULL; + else + status = POWER_SUPPLY_STATUS_CHARGING; + } + + if (cw_bat->status != status) + cw_bat->battery_changed = true; + cw_bat->status = status; +} + +static void cw_update_time_to_empty(struct cw_battery *cw_bat) +{ + int time_to_empty; + + time_to_empty = cw_get_time_to_empty(cw_bat); + if (time_to_empty < 0) + dev_err(cw_bat->dev, "Failed to get time to empty from gauge: %d\n", + time_to_empty); + else if (cw_bat->time_to_empty != time_to_empty) { + cw_bat->time_to_empty = time_to_empty; + cw_bat->battery_changed = true; + } +} + +static void cw_bat_work(struct work_struct *work) +{ + struct delayed_work *delay_work; + struct cw_battery *cw_bat; + int ret; + unsigned int reg_val; + + delay_work = to_delayed_work(work); + cw_bat = container_of(delay_work, struct cw_battery, battery_delay_work); + ret = regmap_read(cw_bat->regmap, CW2015_REG_MODE, ®_val); + if (ret) { + dev_err(cw_bat->dev, "Failed to read mode from gauge: %d\n", ret); + } else { + if ((reg_val & CW2015_MODE_SLEEP_MASK) == CW2015_MODE_SLEEP) { + int i; + + for (i = 0; i < CW2015_RESET_TRIES; i++) { + if (!cw_power_on_reset(cw_bat)) + break; + } + } + cw_update_soc(cw_bat); + cw_update_voltage(cw_bat); + cw_update_charge_status(cw_bat); + cw_update_status(cw_bat); + cw_update_time_to_empty(cw_bat); + } + dev_dbg(cw_bat->dev, "charger_attached = %d\n", cw_bat->charger_attached); + dev_dbg(cw_bat->dev, "status = %d\n", cw_bat->status); + dev_dbg(cw_bat->dev, "soc = %d%%\n", cw_bat->soc); + dev_dbg(cw_bat->dev, "voltage = %dmV\n", cw_bat->voltage_mv); + + if (cw_bat->battery_changed) + power_supply_changed(cw_bat->rk_bat); + cw_bat->battery_changed = false; + + queue_delayed_work(cw_bat->battery_workqueue, + &cw_bat->battery_delay_work, + msecs_to_jiffies(cw_bat->poll_interval_ms)); +} + +static bool cw_battery_valid_time_to_empty(struct cw_battery *cw_bat) +{ + return cw_bat->time_to_empty > 0 && + cw_bat->time_to_empty < CW2015_MASK_SOC && + cw_bat->status == POWER_SUPPLY_STATUS_DISCHARGING; +} + +static int cw_battery_get_property(struct power_supply *psy, + enum power_supply_property psp, + union power_supply_propval *val) +{ + struct cw_battery *cw_bat; + + cw_bat = power_supply_get_drvdata(psy); + switch (psp) { + case POWER_SUPPLY_PROP_CAPACITY: + val->intval = cw_bat->soc; + break; + + case POWER_SUPPLY_PROP_STATUS: + val->intval = cw_bat->status; + break; + + case POWER_SUPPLY_PROP_PRESENT: + val->intval = !!cw_bat->voltage_mv; + break; + + case POWER_SUPPLY_PROP_VOLTAGE_NOW: + val->intval = cw_bat->voltage_mv * 1000; + break; + + case POWER_SUPPLY_PROP_TIME_TO_EMPTY_NOW: + if (cw_battery_valid_time_to_empty(cw_bat)) + val->intval = cw_bat->time_to_empty; + else + val->intval = 0; + break; + + case POWER_SUPPLY_PROP_TECHNOLOGY: + val->intval = POWER_SUPPLY_TECHNOLOGY_LION; + break; + + case POWER_SUPPLY_PROP_CHARGE_COUNTER: + val->intval = cw_bat->charge_count; + break; + + case POWER_SUPPLY_PROP_CHARGE_FULL: + case POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN: + if (cw_bat->battery.charge_full_design_uah > 0) + val->intval = cw_bat->battery.charge_full_design_uah; + else + val->intval = 0; + break; + + case POWER_SUPPLY_PROP_CURRENT_NOW: + if (cw_battery_valid_time_to_empty(cw_bat) && + cw_bat->battery.charge_full_design_uah > 0) { + /* calculate remaining capacity */ + val->intval = cw_bat->battery.charge_full_design_uah; + val->intval = val->intval * cw_bat->soc / 100; + + /* estimate current based on time to empty */ + val->intval = 60 * val->intval / cw_bat->time_to_empty; + } else { + val->intval = 0; + } + + break; + + default: + break; + } + return 0; +} + +static enum power_supply_property cw_battery_properties[] = { + POWER_SUPPLY_PROP_CAPACITY, + POWER_SUPPLY_PROP_STATUS, + POWER_SUPPLY_PROP_PRESENT, + POWER_SUPPLY_PROP_VOLTAGE_NOW, + POWER_SUPPLY_PROP_TIME_TO_EMPTY_NOW, + POWER_SUPPLY_PROP_TECHNOLOGY, + POWER_SUPPLY_PROP_CHARGE_COUNTER, + POWER_SUPPLY_PROP_CHARGE_FULL, + POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN, + POWER_SUPPLY_PROP_CURRENT_NOW, +}; + +static const struct power_supply_desc cw2015_bat_desc = { + .name = "cw2015-battery", + .type = POWER_SUPPLY_TYPE_BATTERY, + .properties = cw_battery_properties, + .num_properties = ARRAY_SIZE(cw_battery_properties), + .get_property = cw_battery_get_property, +}; + +static int cw2015_parse_properties(struct cw_battery *cw_bat) +{ + struct device *dev = cw_bat->dev; + int length; + int ret; + + length = device_property_count_u8(dev, "cellwise,battery-profile"); + if (length < 0) { + dev_warn(cw_bat->dev, + "No battery-profile found, using current flash contents\n"); + } else if (length != CW2015_SIZE_BATINFO) { + dev_err(cw_bat->dev, "battery-profile must be %d bytes\n", + CW2015_SIZE_BATINFO); + return -EINVAL; + } else { + cw_bat->bat_profile = devm_kzalloc(dev, length, GFP_KERNEL); + if (!cw_bat->bat_profile) + return -ENOMEM; + + ret = device_property_read_u8_array(dev, + "cellwise,battery-profile", + cw_bat->bat_profile, + length); + if (ret) + return ret; + } + + ret = device_property_read_u32(dev, "cellwise,monitor-interval-ms", + &cw_bat->poll_interval_ms); + if (ret) { + dev_dbg(cw_bat->dev, "Using default poll interval\n"); + cw_bat->poll_interval_ms = CW2015_DEFAULT_POLL_INTERVAL_MS; + } + + return 0; +} + +static const struct regmap_range regmap_ranges_rd_yes[] = { + regmap_reg_range(CW2015_REG_VERSION, CW2015_REG_VERSION), + regmap_reg_range(CW2015_REG_VCELL, CW2015_REG_CONFIG), + regmap_reg_range(CW2015_REG_MODE, CW2015_REG_MODE), + regmap_reg_range(CW2015_REG_BATINFO, + CW2015_REG_BATINFO + CW2015_SIZE_BATINFO - 1), +}; + +static const struct regmap_access_table regmap_rd_table = { + .yes_ranges = regmap_ranges_rd_yes, + .n_yes_ranges = 4, +}; + +static const struct regmap_range regmap_ranges_wr_yes[] = { + regmap_reg_range(CW2015_REG_RRT_ALERT, CW2015_REG_CONFIG), + regmap_reg_range(CW2015_REG_MODE, CW2015_REG_MODE), + regmap_reg_range(CW2015_REG_BATINFO, + CW2015_REG_BATINFO + CW2015_SIZE_BATINFO - 1), +}; + +static const struct regmap_access_table regmap_wr_table = { + .yes_ranges = regmap_ranges_wr_yes, + .n_yes_ranges = 3, +}; + +static const struct regmap_range regmap_ranges_vol_yes[] = { + regmap_reg_range(CW2015_REG_VCELL, CW2015_REG_SOC + 1), +}; + +static const struct regmap_access_table regmap_vol_table = { + .yes_ranges = regmap_ranges_vol_yes, + .n_yes_ranges = 1, +}; + +static const struct regmap_config cw2015_regmap_config = { + .reg_bits = 8, + .val_bits = 8, + .rd_table = ®map_rd_table, + .wr_table = ®map_wr_table, + .volatile_table = ®map_vol_table, + .max_register = CW2015_REG_BATINFO + CW2015_SIZE_BATINFO - 1, +}; + +static int cw_bat_probe(struct i2c_client *client) +{ + int ret; + struct cw_battery *cw_bat; + struct power_supply_config psy_cfg = { 0 }; + + cw_bat = devm_kzalloc(&client->dev, sizeof(*cw_bat), GFP_KERNEL); + if (!cw_bat) + return -ENOMEM; + + i2c_set_clientdata(client, cw_bat); + cw_bat->dev = &client->dev; + cw_bat->soc = 1; + + ret = cw2015_parse_properties(cw_bat); + if (ret) { + dev_err(cw_bat->dev, "Failed to parse cw2015 properties\n"); + return ret; + } + + cw_bat->regmap = devm_regmap_init_i2c(client, &cw2015_regmap_config); + if (IS_ERR(cw_bat->regmap)) { + dev_err(cw_bat->dev, "Failed to allocate regmap: %ld\n", + PTR_ERR(cw_bat->regmap)); + return PTR_ERR(cw_bat->regmap); + } + + ret = cw_init(cw_bat); + if (ret) { + dev_err(cw_bat->dev, "Init failed: %d\n", ret); + return ret; + } + + psy_cfg.drv_data = cw_bat; + psy_cfg.fwnode = dev_fwnode(cw_bat->dev); + + cw_bat->rk_bat = devm_power_supply_register(&client->dev, + &cw2015_bat_desc, + &psy_cfg); + if (IS_ERR(cw_bat->rk_bat)) { + dev_err(cw_bat->dev, "Failed to register power supply\n"); + return PTR_ERR(cw_bat->rk_bat); + } + + ret = power_supply_get_battery_info(cw_bat->rk_bat, &cw_bat->battery); + if (ret) { + dev_warn(cw_bat->dev, + "No monitored battery, some properties will be missing\n"); + } + + cw_bat->battery_workqueue = create_singlethread_workqueue("rk_battery"); + INIT_DELAYED_WORK(&cw_bat->battery_delay_work, cw_bat_work); + queue_delayed_work(cw_bat->battery_workqueue, + &cw_bat->battery_delay_work, msecs_to_jiffies(10)); + return 0; +} + +static int __maybe_unused cw_bat_suspend(struct device *dev) +{ + struct i2c_client *client = to_i2c_client(dev); + struct cw_battery *cw_bat = i2c_get_clientdata(client); + + cancel_delayed_work_sync(&cw_bat->battery_delay_work); + return 0; +} + +static int __maybe_unused cw_bat_resume(struct device *dev) +{ + struct i2c_client *client = to_i2c_client(dev); + struct cw_battery *cw_bat = i2c_get_clientdata(client); + + queue_delayed_work(cw_bat->battery_workqueue, + &cw_bat->battery_delay_work, 0); + return 0; +} + +SIMPLE_DEV_PM_OPS(cw_bat_pm_ops, cw_bat_suspend, cw_bat_resume); + +static int cw_bat_remove(struct i2c_client *client) +{ + struct cw_battery *cw_bat = i2c_get_clientdata(client); + + cancel_delayed_work_sync(&cw_bat->battery_delay_work); + power_supply_put_battery_info(cw_bat->rk_bat, &cw_bat->battery); + return 0; +} + +static const struct i2c_device_id cw_bat_id_table[] = { + { "cw2015", 0 }, + { } +}; + +static const struct of_device_id cw2015_of_match[] = { + { .compatible = "cellwise,cw2015" }, + { } +}; +MODULE_DEVICE_TABLE(of, cw2015_of_match); + +static struct i2c_driver cw_bat_driver = { + .driver = { + .name = "cw2015", + .pm = &cw_bat_pm_ops, + }, + .probe_new = cw_bat_probe, + .remove = cw_bat_remove, + .id_table = cw_bat_id_table, +}; + +module_i2c_driver(cw_bat_driver); + +MODULE_AUTHOR("xhc"); +MODULE_AUTHOR("Tobias Schramm "); +MODULE_DESCRIPTION("cw2015/cw2013 battery driver"); +MODULE_LICENSE("GPL"); -- GitLab From 5956fca78f5c2a6390ab59919a277c3ef169f909 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Micha=C5=82=20Miros=C5=82aw?= Date: Sun, 3 May 2020 17:21:10 +0200 Subject: [PATCH 0262/1971] power: bq25890: simplify chip name property getter MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Driver rejects unknown chips early in the probe(), so when bq25890_power_supply_get_property() is made reachable, bq->chip_version will already be set to correct value - there is no need to check it again. Signed-off-by: Michał Mirosław Signed-off-by: Sebastian Reichel --- drivers/power/supply/bq25890_charger.c | 19 ++++++++----------- 1 file changed, 8 insertions(+), 11 deletions(-) diff --git a/drivers/power/supply/bq25890_charger.c b/drivers/power/supply/bq25890_charger.c index aebd1253dbc93..d44ef48ce8f98 100644 --- a/drivers/power/supply/bq25890_charger.c +++ b/drivers/power/supply/bq25890_charger.c @@ -32,6 +32,13 @@ enum bq25890_chip_version { BQ25896, }; +static const char *const bq25890_chip_name[] = { + "BQ25890", + "BQ25892", + "BQ25895", + "BQ25896", +}; + enum bq25890_fields { F_EN_HIZ, F_EN_ILIM, F_IILIM, /* Reg00 */ F_BHOT, F_BCOLD, F_VINDPM_OFS, /* Reg01 */ @@ -400,17 +407,7 @@ static int bq25890_power_supply_get_property(struct power_supply *psy, break; case POWER_SUPPLY_PROP_MODEL_NAME: - if (bq->chip_version == BQ25890) - val->strval = "BQ25890"; - else if (bq->chip_version == BQ25892) - val->strval = "BQ25892"; - else if (bq->chip_version == BQ25895) - val->strval = "BQ25895"; - else if (bq->chip_version == BQ25896) - val->strval = "BQ25896"; - else - val->strval = "UNKNOWN"; - + val->strval = bq25890_chip_name[bq->chip_version]; break; case POWER_SUPPLY_PROP_ONLINE: -- GitLab From a6a48fac96ae74f76422f908e09fdc9a6722050f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Micha=C5=82=20Miros=C5=82aw?= Date: Sun, 3 May 2020 17:21:10 +0200 Subject: [PATCH 0263/1971] power: bq25890: make property table const MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Property list should not change, so mark it const. Signed-off-by: Michał Mirosław Signed-off-by: Sebastian Reichel --- drivers/power/supply/bq25890_charger.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/power/supply/bq25890_charger.c b/drivers/power/supply/bq25890_charger.c index d44ef48ce8f98..9e862e2a99cf0 100644 --- a/drivers/power/supply/bq25890_charger.c +++ b/drivers/power/supply/bq25890_charger.c @@ -668,7 +668,7 @@ static int bq25890_hw_init(struct bq25890_device *bq) return 0; } -static enum power_supply_property bq25890_power_supply_props[] = { +static const enum power_supply_property bq25890_power_supply_props[] = { POWER_SUPPLY_PROP_MANUFACTURER, POWER_SUPPLY_PROP_MODEL_NAME, POWER_SUPPLY_PROP_STATUS, -- GitLab From a9c2419406b8e50351b691d966dc4b7254678dbd Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Micha=C5=82=20Miros=C5=82aw?= Date: Sun, 3 May 2020 17:21:10 +0200 Subject: [PATCH 0264/1971] power: bq25890: remove redundant I2C bus check MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit regmap initialization will check I2C adapter functionality. Remove redundant check in the driver. Signed-off-by: Michał Mirosław Signed-off-by: Sebastian Reichel --- drivers/power/supply/bq25890_charger.c | 6 ------ 1 file changed, 6 deletions(-) diff --git a/drivers/power/supply/bq25890_charger.c b/drivers/power/supply/bq25890_charger.c index 9e862e2a99cf0..c4a69fd69f341 100644 --- a/drivers/power/supply/bq25890_charger.c +++ b/drivers/power/supply/bq25890_charger.c @@ -878,17 +878,11 @@ static int bq25890_fw_probe(struct bq25890_device *bq) static int bq25890_probe(struct i2c_client *client, const struct i2c_device_id *id) { - struct i2c_adapter *adapter = client->adapter; struct device *dev = &client->dev; struct bq25890_device *bq; int ret; int i; - if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA)) { - dev_err(dev, "No support for SMBUS_BYTE_DATA\n"); - return -ENODEV; - } - bq = devm_kzalloc(dev, sizeof(*bq), GFP_KERNEL); if (!bq) return -ENOMEM; -- GitLab From 72d9cd9cdc18b53c1df935dc9d40ea7bf057387f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Micha=C5=82=20Miros=C5=82aw?= Date: Sun, 3 May 2020 17:21:11 +0200 Subject: [PATCH 0265/1971] power: bq25890: protect view of the chip's state MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Extend bq->lock over whole updating of the chip's state. Might get useful later for switching ADC modes correctly. Signed-off-by: Michał Mirosław Signed-off-by: Sebastian Reichel --- drivers/power/supply/bq25890_charger.c | 82 ++++++++------------------ 1 file changed, 26 insertions(+), 56 deletions(-) diff --git a/drivers/power/supply/bq25890_charger.c b/drivers/power/supply/bq25890_charger.c index c4a69fd69f341..9339e216651ff 100644 --- a/drivers/power/supply/bq25890_charger.c +++ b/drivers/power/supply/bq25890_charger.c @@ -510,74 +510,50 @@ static int bq25890_get_chip_state(struct bq25890_device *bq, return 0; } -static bool bq25890_state_changed(struct bq25890_device *bq, - struct bq25890_state *new_state) -{ - struct bq25890_state old_state; - - mutex_lock(&bq->lock); - old_state = bq->state; - mutex_unlock(&bq->lock); - - return (old_state.chrg_status != new_state->chrg_status || - old_state.chrg_fault != new_state->chrg_fault || - old_state.online != new_state->online || - old_state.bat_fault != new_state->bat_fault || - old_state.boost_fault != new_state->boost_fault || - old_state.vsys_status != new_state->vsys_status); -} - -static void bq25890_handle_state_change(struct bq25890_device *bq, - struct bq25890_state *new_state) +static irqreturn_t __bq25890_handle_irq(struct bq25890_device *bq) { + struct bq25890_state new_state; int ret; - struct bq25890_state old_state; - mutex_lock(&bq->lock); - old_state = bq->state; - mutex_unlock(&bq->lock); + ret = bq25890_get_chip_state(bq, &new_state); + if (ret < 0) + return IRQ_NONE; - if (!new_state->online) { /* power removed */ + if (!memcmp(&bq->state, &new_state, sizeof(new_state))) + return IRQ_NONE; + + if (!new_state.online && bq->state.online) { /* power removed */ /* disable ADC */ ret = bq25890_field_write(bq, F_CONV_START, 0); if (ret < 0) goto error; - } else if (!old_state.online) { /* power inserted */ + } else if (new_state.online && !bq->state.online) { /* power inserted */ /* enable ADC, to have control of charge current/voltage */ ret = bq25890_field_write(bq, F_CONV_START, 1); if (ret < 0) goto error; } - return; + bq->state = new_state; + power_supply_changed(bq->charger); + return IRQ_HANDLED; error: - dev_err(bq->dev, "Error communicating with the chip.\n"); + dev_err(bq->dev, "Error communicating with the chip: %pe\n", + ERR_PTR(ret)); + return IRQ_HANDLED; } static irqreturn_t bq25890_irq_handler_thread(int irq, void *private) { struct bq25890_device *bq = private; - int ret; - struct bq25890_state state; - - ret = bq25890_get_chip_state(bq, &state); - if (ret < 0) - goto handled; - - if (!bq25890_state_changed(bq, &state)) - goto handled; - - bq25890_handle_state_change(bq, &state); + irqreturn_t ret; mutex_lock(&bq->lock); - bq->state = state; + ret = __bq25890_handle_irq(bq); mutex_unlock(&bq->lock); - power_supply_changed(bq->charger); - -handled: - return IRQ_HANDLED; + return ret; } static int bq25890_chip_reset(struct bq25890_device *bq) @@ -607,7 +583,6 @@ static int bq25890_hw_init(struct bq25890_device *bq) { int ret; int i; - struct bq25890_state state; const struct { enum bq25890_fields id; @@ -655,16 +630,12 @@ static int bq25890_hw_init(struct bq25890_device *bq) return ret; } - ret = bq25890_get_chip_state(bq, &state); + ret = bq25890_get_chip_state(bq, &bq->state); if (ret < 0) { dev_dbg(bq->dev, "Get state failed %d\n", ret); return ret; } - mutex_lock(&bq->lock); - bq->state = state; - mutex_unlock(&bq->lock); - return 0; } @@ -1001,19 +972,16 @@ static int bq25890_suspend(struct device *dev) static int bq25890_resume(struct device *dev) { int ret; - struct bq25890_state state; struct bq25890_device *bq = dev_get_drvdata(dev); - ret = bq25890_get_chip_state(bq, &state); + mutex_lock(&bq->lock); + + ret = bq25890_get_chip_state(bq, &bq->state); if (ret < 0) return ret; - mutex_lock(&bq->lock); - bq->state = state; - mutex_unlock(&bq->lock); - /* Re-enable ADC only if charger is plugged in. */ - if (state.online) { + if (bq->state.online) { ret = bq25890_field_write(bq, F_CONV_START, 1); if (ret < 0) return ret; @@ -1022,6 +990,8 @@ static int bq25890_resume(struct device *dev) /* signal userspace, maybe state changed while suspended */ power_supply_changed(bq->charger); + mutex_unlock(&bq->lock); + return 0; } #endif -- GitLab From 833d88f3fd50a54544a7e71edef0877bb7b769c1 Mon Sep 17 00:00:00 2001 From: Andy Shevchenko Date: Wed, 29 Apr 2020 15:21:50 +0300 Subject: [PATCH 0266/1971] dmaengine: Include dmaengine.h into dmaengine.c MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Compiler is not happy about non-static functions due to missed inclusion .../dmaengine.c:682:18: warning: no previous prototype for ‘dma_get_slave_channel’ [-Wmissing-prototypes] 682 | struct dma_chan *dma_get_slave_channel(struct dma_chan *chan) | ^~~~~~~~~~~~~~~~~~~~~ .../dmaengine.c:713:18: warning: no previous prototype for ‘dma_get_any_slave_channel’ [-Wmissing-prototypes] 713 | struct dma_chan *dma_get_any_slave_channel(struct dma_device *device) | ^~~~~~~~~~~~~~~~~~~~~~~~~ Include missed header to satisfy compiler. Signed-off-by: Andy Shevchenko Link: https://lore.kernel.org/r/20200429122151.50989-1-andriy.shevchenko@linux.intel.com Signed-off-by: Vinod Koul --- drivers/dma/dmaengine.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c index 4830ba658ce18..489e7f2ca3c80 100644 --- a/drivers/dma/dmaengine.c +++ b/drivers/dma/dmaengine.c @@ -53,6 +53,8 @@ #include #include +#include "dmaengine.h" + static DEFINE_MUTEX(dma_list_mutex); static DEFINE_IDA(dma_ida); static LIST_HEAD(dma_device_list); -- GitLab From 9872e23d6879ee04c7fe8372328195d12e977071 Mon Sep 17 00:00:00 2001 From: Andy Shevchenko Date: Wed, 29 Apr 2020 15:21:51 +0300 Subject: [PATCH 0267/1971] dmaengine: Fix doc strings to satisfy validation script The validation kernel doc script complains about undescribed function parameters .../dmaengine.c:155: warning: Function parameter or member 'dev' not descr ibed in 'dev_to_dma_chan' .../dmaengine.c:251: warning: cannot understand function prototype: 'dma_cap_mask_t dma_cap_mask_all; ' .../dmaengine.c:257: warning: cannot understand function prototype: 'struct dma_chan_tbl_ent ' .../dmaengine.c:264: warning: cannot understand function prototype: 'struct dma_chan_tbl_ent __percpu *channel_table[DMA_TX_TYPE_END]; ' .../dmaengine.c:304: warning: Function parameter or member 'chan' not described in 'dma_chan_is_local' .../dmaengine.c:304: warning: Function parameter or member 'cpu' not described in 'dma_chan_is_local' .../dmaengine.c:414: warning: Function parameter or member 'chan' not described in 'balance_ref_count' .../dmaengine.c:447: warning: Function parameter or member 'chan' not described in 'dma_chan_get' .../dmaengine.c:494: warning: Function parameter or member 'chan' not described in 'dma_chan_put' Add descriptions to the function parameters and in some cases update existing text as well. Signed-off-by: Andy Shevchenko Link: https://lore.kernel.org/r/20200429122151.50989-2-andriy.shevchenko@linux.intel.com Signed-off-by: Vinod Koul --- drivers/dma/dmaengine.c | 96 +++++++++++++++++++++-------------------- 1 file changed, 50 insertions(+), 46 deletions(-) diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c index 489e7f2ca3c80..4e07a74fb2af4 100644 --- a/drivers/dma/dmaengine.c +++ b/drivers/dma/dmaengine.c @@ -147,9 +147,9 @@ static inline void dmaengine_debug_unregister(struct dma_device *dma_dev) { } /** * dev_to_dma_chan - convert a device pointer to its sysfs container object - * @dev - device node + * @dev: device node * - * Must be called under dma_list_mutex + * Must be called under dma_list_mutex. */ static struct dma_chan *dev_to_dma_chan(struct device *dev) { @@ -249,22 +249,18 @@ static struct class dma_devclass = { /* --- client and device registration --- */ -/** - * dma_cap_mask_all - enable iteration over all operation types - */ +/* enable iteration over all operation types */ static dma_cap_mask_t dma_cap_mask_all; /** - * dma_chan_tbl_ent - tracks channel allocations per core/operation - * @chan - associated channel for this entry + * struct dma_chan_tbl_ent - tracks channel allocations per core/operation + * @chan: associated channel for this entry */ struct dma_chan_tbl_ent { struct dma_chan *chan; }; -/** - * channel_table - percpu lookup table for memory-to-memory offload providers - */ +/* percpu lookup table for memory-to-memory offload providers */ static struct dma_chan_tbl_ent __percpu *channel_table[DMA_TX_TYPE_END]; static int __init dma_channel_table_init(void) @@ -301,8 +297,11 @@ static int __init dma_channel_table_init(void) arch_initcall(dma_channel_table_init); /** - * dma_chan_is_local - returns true if the channel is in the same numa-node as - * the cpu + * dma_chan_is_local - checks if the channel is in the same NUMA-node as the CPU + * @chan: DMA channel to test + * @cpu: CPU index which the channel should be close to + * + * Returns true if the channel is in the same NUMA-node as the CPU. */ static bool dma_chan_is_local(struct dma_chan *chan, int cpu) { @@ -312,14 +311,14 @@ static bool dma_chan_is_local(struct dma_chan *chan, int cpu) } /** - * min_chan - returns the channel with min count and in the same numa-node as - * the cpu - * @cap: capability to match - * @cpu: cpu index which the channel should be close to + * min_chan - finds the channel with min count and in the same NUMA-node as the CPU + * @cap: capability to match + * @cpu: CPU index which the channel should be close to * - * If some channels are close to the given cpu, the one with the lowest - * reference count is returned. Otherwise, cpu is ignored and only the + * If some channels are close to the given CPU, the one with the lowest + * reference count is returned. Otherwise, CPU is ignored and only the * reference count is taken into account. + * * Must be called under dma_list_mutex. */ static struct dma_chan *min_chan(enum dma_transaction_type cap, int cpu) @@ -357,10 +356,11 @@ static struct dma_chan *min_chan(enum dma_transaction_type cap, int cpu) /** * dma_channel_rebalance - redistribute the available channels * - * Optimize for cpu isolation (each cpu gets a dedicated channel for an - * operation type) in the SMP case, and operation isolation (avoid - * multi-tasking channels) in the non-SMP case. Must be called under - * dma_list_mutex. + * Optimize for CPU isolation (each CPU gets a dedicated channel for an + * operation type) in the SMP case, and operation isolation (avoid + * multi-tasking channels) in the non-SMP case. + * + * Must be called under dma_list_mutex. */ static void dma_channel_rebalance(void) { @@ -410,9 +410,9 @@ static struct module *dma_chan_to_owner(struct dma_chan *chan) /** * balance_ref_count - catch up the channel reference count - * @chan - channel to balance ->client_count versus dmaengine_ref_count + * @chan: channel to balance ->client_count versus dmaengine_ref_count * - * balance_ref_count must be called under dma_list_mutex + * Must be called under dma_list_mutex. */ static void balance_ref_count(struct dma_chan *chan) { @@ -442,10 +442,10 @@ static void dma_device_put(struct dma_device *device) } /** - * dma_chan_get - try to grab a dma channel's parent driver module - * @chan - channel to grab + * dma_chan_get - try to grab a DMA channel's parent driver module + * @chan: channel to grab * - * Must be called under dma_list_mutex + * Must be called under dma_list_mutex. */ static int dma_chan_get(struct dma_chan *chan) { @@ -489,10 +489,10 @@ module_put_out: } /** - * dma_chan_put - drop a reference to a dma channel's parent driver module - * @chan - channel to release + * dma_chan_put - drop a reference to a DMA channel's parent driver module + * @chan: channel to release * - * Must be called under dma_list_mutex + * Must be called under dma_list_mutex. */ static void dma_chan_put(struct dma_chan *chan) { @@ -543,7 +543,7 @@ EXPORT_SYMBOL(dma_sync_wait); /** * dma_find_channel - find a channel to carry out the operation - * @tx_type: transaction type + * @tx_type: transaction type */ struct dma_chan *dma_find_channel(enum dma_transaction_type tx_type) { @@ -683,7 +683,7 @@ static struct dma_chan *find_candidate(struct dma_device *device, /** * dma_get_slave_channel - try to get specific channel exclusively - * @chan: target channel + * @chan: target channel */ struct dma_chan *dma_get_slave_channel(struct dma_chan *chan) { @@ -737,10 +737,10 @@ EXPORT_SYMBOL_GPL(dma_get_any_slave_channel); /** * __dma_request_channel - try to allocate an exclusive channel - * @mask: capabilities that the channel must satisfy - * @fn: optional callback to disposition available channels - * @fn_param: opaque parameter to pass to dma_filter_fn - * @np: device node to look for DMA channels + * @mask: capabilities that the channel must satisfy + * @fn: optional callback to disposition available channels + * @fn_param: opaque parameter to pass to dma_filter_fn() + * @np: device node to look for DMA channels * * Returns pointer to appropriate DMA channel on success or NULL. */ @@ -883,7 +883,7 @@ EXPORT_SYMBOL_GPL(dma_request_slave_channel); /** * dma_request_chan_by_mask - allocate a channel satisfying certain capabilities - * @mask: capabilities that the channel must satisfy + * @mask: capabilities that the channel must satisfy * * Returns pointer to appropriate DMA channel on success or an error pointer. */ @@ -974,7 +974,7 @@ void dmaengine_get(void) EXPORT_SYMBOL(dmaengine_get); /** - * dmaengine_put - let dma drivers be removed when ref_count == 0 + * dmaengine_put - let DMA drivers be removed when ref_count == 0 */ void dmaengine_put(void) { @@ -1146,7 +1146,7 @@ EXPORT_SYMBOL_GPL(dma_async_device_channel_unregister); /** * dma_async_device_register - registers DMA devices found - * @device: &dma_device + * @device: pointer to &struct dma_device * * After calling this routine the structure should not be freed except in the * device_release() callback which will be called after @@ -1315,7 +1315,7 @@ EXPORT_SYMBOL(dma_async_device_register); /** * dma_async_device_unregister - unregister a DMA device - * @device: &dma_device + * @device: pointer to &struct dma_device * * This routine is called by dma driver exit routines, dmaengine holds module * references to prevent it being called while channels are in use. @@ -1351,7 +1351,7 @@ static void dmam_device_release(struct device *dev, void *res) /** * dmaenginem_async_device_register - registers DMA devices found - * @device: &dma_device + * @device: pointer to &struct dma_device * * The operation is managed and will be undone on driver detach. */ @@ -1588,8 +1588,9 @@ int dmaengine_desc_set_metadata_len(struct dma_async_tx_descriptor *desc, } EXPORT_SYMBOL_GPL(dmaengine_desc_set_metadata_len); -/* dma_wait_for_async_tx - spin wait for a transaction to complete - * @tx: in-flight transaction to wait on +/** + * dma_wait_for_async_tx - spin wait for a transaction to complete + * @tx: in-flight transaction to wait on */ enum dma_status dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx) @@ -1612,9 +1613,12 @@ dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx) } EXPORT_SYMBOL_GPL(dma_wait_for_async_tx); -/* dma_run_dependencies - helper routine for dma drivers to process - * (start) dependent operations on their target channel - * @tx: transaction with dependencies +/** + * dma_run_dependencies - process dependent operations on the target channel + * @tx: transaction with dependencies + * + * Helper routine for DMA drivers to process (start) dependent operations + * on their target channel. */ void dma_run_dependencies(struct dma_async_tx_descriptor *tx) { -- GitLab From 3b4ff4eb904fef04c36b39052ca8eb31fa41fad0 Mon Sep 17 00:00:00 2001 From: He Zhe Date: Wed, 4 Mar 2020 14:39:07 +0800 Subject: [PATCH 0268/1971] x86/mcelog: Add compat_ioctl for 32-bit mcelog support A 32-bit version of mcelog issuing ioctls on /dev/mcelog causes errors like the following: MCE_GET_RECORD_LEN: Inappropriate ioctl for device This is due to a missing compat_ioctl callback. Assign to it compat_ptr_ioctl() as a generic implementation of the .compat_ioctl file operation to ioctl functions that either ignore the argument or pass a pointer to a compatible data type. [ bp: Massage commit message. ] Signed-off-by: He Zhe Signed-off-by: Borislav Petkov Acked-by: Tony Luck Link: https://lkml.kernel.org/r/1583303947-49858-1-git-send-email-zhe.he@windriver.com --- arch/x86/kernel/cpu/mce/dev-mcelog.c | 1 + 1 file changed, 1 insertion(+) diff --git a/arch/x86/kernel/cpu/mce/dev-mcelog.c b/arch/x86/kernel/cpu/mce/dev-mcelog.c index c033e7ea9e3c3..a4fd5287f02f0 100644 --- a/arch/x86/kernel/cpu/mce/dev-mcelog.c +++ b/arch/x86/kernel/cpu/mce/dev-mcelog.c @@ -329,6 +329,7 @@ static const struct file_operations mce_chrdev_ops = { .write = mce_chrdev_write, .poll = mce_chrdev_poll, .unlocked_ioctl = mce_chrdev_ioctl, + .compat_ioctl = compat_ptr_ioctl, .llseek = no_llseek, }; -- GitLab From bd96f1b2f43a39310cc576bb4faf2ea24317a4c9 Mon Sep 17 00:00:00 2001 From: Alan Mikhak Date: Tue, 28 Apr 2020 18:10:33 -0700 Subject: [PATCH 0269/1971] dmaengine: dw-edma: support local dma device transfer semantics Modify dw_edma_device_transfer() to also support the semantics of dma device transfer for additional use cases involving pcitest utility as a local initiator. For its original use case, dw-edma supported the semantics of dma device transfer from the perspective of a remote initiator who is located across the PCIe bus from dma channel hardware. To a remote initiator, DMA_DEV_TO_MEM means using a remote dma WRITE channel to transfer from remote memory to local memory. A WRITE channel would be employed on the remote device in order to move the contents of remote memory to the bus destined for local memory. To a remote initiator, DMA_MEM_TO_DEV means using a remote dma READ channel to transfer from local memory to remote memory. A READ channel would be employed on the remote device in order to move the contents of local memory to the bus destined for remote memory. >From the perspective of a local dma initiator who is co-located on the same side of the PCIe bus as the dma channel hardware, the semantics of dma device transfer are flipped. To a local initiator, DMA_DEV_TO_MEM means using a local dma READ channel to transfer from remote memory to local memory. A READ channel would be employed on the local device in order to move the contents of remote memory to the bus destined for local memory. To a local initiator, DMA_MEM_TO_DEV means using a local dma WRITE channel to transfer from local memory to remote memory. A WRITE channel would be employed on the local device in order to move the contents of local memory to the bus destined for remote memory. To support local dma initiators, dw_edma_device_transfer() is modified to now examine the direction field of struct dma_slave_config for the channel which initiators can configure by calling dmaengine_slave_config(). If direction is configured as either DMA_DEV_TO_MEM or DMA_MEM_TO_DEV, local initiator semantics are used. If direction is a value other than DMA_DEV_TO_MEM nor DMA_MEM_TO_DEV, then remote initiator semantics are used. This should maintain backward compatibility with the original use case of dw-edma. The dw-edma-test utility is an example of a remote initiator. From reading its patch, dw-edma-test does not specifically set the direction field of struct dma_slave_config. Since dw_edma_device_transfer() also does not check the direction field of struct dma_slave_config, it seems safe to use this convention in dw-edma to support both local and remote initiator semantics. Signed-off-by: Alan Mikhak Link: https://lore.kernel.org/r/1588122633-1552-1-git-send-email-alan.mikhak@sifive.com Signed-off-by: Vinod Koul --- drivers/dma/dw-edma/dw-edma-core.c | 27 ++++++++++++++++++++------- 1 file changed, 20 insertions(+), 7 deletions(-) diff --git a/drivers/dma/dw-edma/dw-edma-core.c b/drivers/dma/dw-edma/dw-edma-core.c index 306ab50462bee..ed430ad9b3dd8 100644 --- a/drivers/dma/dw-edma/dw-edma-core.c +++ b/drivers/dma/dw-edma/dw-edma-core.c @@ -323,7 +323,7 @@ static struct dma_async_tx_descriptor * dw_edma_device_transfer(struct dw_edma_transfer *xfer) { struct dw_edma_chan *chan = dchan2dw_edma_chan(xfer->dchan); - enum dma_transfer_direction direction = xfer->direction; + enum dma_transfer_direction dir = xfer->direction; phys_addr_t src_addr, dst_addr; struct scatterlist *sg = NULL; struct dw_edma_chunk *chunk; @@ -332,10 +332,26 @@ dw_edma_device_transfer(struct dw_edma_transfer *xfer) u32 cnt; int i; - if ((direction == DMA_MEM_TO_DEV && chan->dir == EDMA_DIR_WRITE) || - (direction == DMA_DEV_TO_MEM && chan->dir == EDMA_DIR_READ)) + if (!chan->configured) return NULL; + switch (chan->config.direction) { + case DMA_DEV_TO_MEM: /* local dma */ + if (dir == DMA_DEV_TO_MEM && chan->dir == EDMA_DIR_READ) + break; + return NULL; + case DMA_MEM_TO_DEV: /* local dma */ + if (dir == DMA_MEM_TO_DEV && chan->dir == EDMA_DIR_WRITE) + break; + return NULL; + default: /* remote dma */ + if (dir == DMA_MEM_TO_DEV && chan->dir == EDMA_DIR_READ) + break; + if (dir == DMA_DEV_TO_MEM && chan->dir == EDMA_DIR_WRITE) + break; + return NULL; + } + if (xfer->cyclic) { if (!xfer->xfer.cyclic.len || !xfer->xfer.cyclic.cnt) return NULL; @@ -344,9 +360,6 @@ dw_edma_device_transfer(struct dw_edma_transfer *xfer) return NULL; } - if (!chan->configured) - return NULL; - desc = dw_edma_alloc_desc(chan); if (unlikely(!desc)) goto err_alloc; @@ -387,7 +400,7 @@ dw_edma_device_transfer(struct dw_edma_transfer *xfer) chunk->ll_region.sz += burst->sz; desc->alloc_sz += burst->sz; - if (direction == DMA_DEV_TO_MEM) { + if (chan->dir == EDMA_DIR_WRITE) { burst->sar = src_addr; if (xfer->cyclic) { burst->dar = xfer->xfer.cyclic.paddr; -- GitLab From 868d4d37a2c64e2b44b05c89500b2b39421d3adb Mon Sep 17 00:00:00 2001 From: Russell King Date: Mon, 27 Apr 2020 19:48:36 +0100 Subject: [PATCH 0270/1971] i2c: pxa: use official address byte helper i2c-pxa was created before i2c_8bit_addr_from_msg() was implemented, and used its own i2c_pxa_addr_byte() which is functionally the same. Sadly, it was never updated to use this new helper. Switch it over. Signed-off-by: Russell King Signed-off-by: Wolfram Sang --- drivers/i2c/busses/i2c-pxa.c | 18 ++++-------------- 1 file changed, 4 insertions(+), 14 deletions(-) diff --git a/drivers/i2c/busses/i2c-pxa.c b/drivers/i2c/busses/i2c-pxa.c index 37246cbbe14c4..835e9150e6cdc 100644 --- a/drivers/i2c/busses/i2c-pxa.c +++ b/drivers/i2c/busses/i2c-pxa.c @@ -716,16 +716,6 @@ static void i2c_pxa_slave_stop(struct pxa_i2c *i2c) * PXA I2C Master mode */ -static inline unsigned int i2c_pxa_addr_byte(struct i2c_msg *msg) -{ - unsigned int addr = (msg->addr & 0x7f) << 1; - - if (msg->flags & I2C_M_RD) - addr |= 1; - - return addr; -} - static inline void i2c_pxa_start_message(struct pxa_i2c *i2c) { u32 icr; @@ -733,8 +723,8 @@ static inline void i2c_pxa_start_message(struct pxa_i2c *i2c) /* * Step 1: target slave address into IDBR */ - writel(i2c_pxa_addr_byte(i2c->msg), _IDBR(i2c)); - i2c->req_slave_addr = i2c_pxa_addr_byte(i2c->msg); + i2c->req_slave_addr = i2c_8bit_addr_from_msg(i2c->msg); + writel(i2c->req_slave_addr, _IDBR(i2c)); /* * Step 2: initiate the write. @@ -1047,8 +1037,8 @@ static void i2c_pxa_irq_txempty(struct pxa_i2c *i2c, u32 isr) /* * Write the next address. */ - writel(i2c_pxa_addr_byte(i2c->msg), _IDBR(i2c)); - i2c->req_slave_addr = i2c_pxa_addr_byte(i2c->msg); + i2c->req_slave_addr = i2c_8bit_addr_from_msg(i2c->msg); + writel(i2c->req_slave_addr, _IDBR(i2c)); /* * And trigger a repeated start, and send the byte. -- GitLab From fa8d74a9c548aca75f3497df99fe9c09f4f16870 Mon Sep 17 00:00:00 2001 From: Russell King Date: Mon, 27 Apr 2020 19:48:41 +0100 Subject: [PATCH 0271/1971] i2c: pxa: remove unneeded includes i2c-pxa does not need linux/sched.h nor linux/time.h includes, so remove these. Signed-off-by: Russell King Signed-off-by: Wolfram Sang --- drivers/i2c/busses/i2c-pxa.c | 4 ---- 1 file changed, 4 deletions(-) diff --git a/drivers/i2c/busses/i2c-pxa.c b/drivers/i2c/busses/i2c-pxa.c index 835e9150e6cdc..449f9002e4876 100644 --- a/drivers/i2c/busses/i2c-pxa.c +++ b/drivers/i2c/busses/i2c-pxa.c @@ -20,8 +20,6 @@ #include #include #include -#include -#include #include #include #include @@ -34,8 +32,6 @@ #include #include -#include - struct pxa_reg_layout { u32 ibmr; u32 idbr; -- GitLab From 8de32da283e39d07ea4cea09a9b89447cacc3b4c Mon Sep 17 00:00:00 2001 From: Russell King Date: Mon, 27 Apr 2020 19:48:46 +0100 Subject: [PATCH 0272/1971] i2c: pxa: re-arrange includes to be in alphabetical order Arrange the includes to be in alphabetical order to help avoid duplicated includes. Signed-off-by: Russell King Signed-off-by: Wolfram Sang --- drivers/i2c/busses/i2c-pxa.c | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/drivers/i2c/busses/i2c-pxa.c b/drivers/i2c/busses/i2c-pxa.c index 449f9002e4876..ab80b020cd997 100644 --- a/drivers/i2c/busses/i2c-pxa.c +++ b/drivers/i2c/busses/i2c-pxa.c @@ -16,21 +16,21 @@ * Dec 2004: Added support for PXA27x and slave device probing [Liam Girdwood] * Feb 2005: Rework slave mode handling [RMK] */ -#include -#include -#include -#include +#include #include +#include #include +#include +#include #include +#include +#include +#include #include #include #include -#include -#include -#include -#include #include +#include struct pxa_reg_layout { u32 ibmr; -- GitLab From 1ae49a15eea0b7c7ca26aa112abc7ec878d69d70 Mon Sep 17 00:00:00 2001 From: Russell King Date: Mon, 27 Apr 2020 19:48:51 +0100 Subject: [PATCH 0273/1971] i2c: pxa: re-arrange functions to flow better Re-arrange the PXA I2C code to avoid forward declarations, and keep similar functionality (e.g. the non-IRQ mode support) together. This improves code readability. Signed-off-by: Russell King Signed-off-by: Wolfram Sang --- drivers/i2c/busses/i2c-pxa.c | 325 +++++++++++++++++------------------ 1 file changed, 162 insertions(+), 163 deletions(-) diff --git a/drivers/i2c/busses/i2c-pxa.c b/drivers/i2c/busses/i2c-pxa.c index ab80b020cd997..bac1f5578f0e5 100644 --- a/drivers/i2c/busses/i2c-pxa.c +++ b/drivers/i2c/busses/i2c-pxa.c @@ -326,7 +326,6 @@ static void i2c_pxa_scream_blue_murder(struct pxa_i2c *i2c, const char *why) #endif /* ifdef DEBUG / else */ static void i2c_pxa_master_complete(struct pxa_i2c *i2c, int ret); -static irqreturn_t i2c_pxa_handler(int this_irq, void *dev_id); static inline int i2c_pxa_is_slavemode(struct pxa_i2c *i2c) { @@ -741,34 +740,6 @@ static inline void i2c_pxa_stop_message(struct pxa_i2c *i2c) writel(icr, _ICR(i2c)); } -static int i2c_pxa_pio_set_master(struct pxa_i2c *i2c) -{ - /* make timeout the same as for interrupt based functions */ - long timeout = 2 * DEF_TIMEOUT; - - /* - * Wait for the bus to become free. - */ - while (timeout-- && readl(_ISR(i2c)) & (ISR_IBB | ISR_UB)) { - udelay(1000); - show_state(i2c); - } - - if (timeout < 0) { - show_state(i2c); - dev_err(&i2c->adap.dev, - "i2c_pxa: timeout waiting for bus free\n"); - return I2C_RETRY; - } - - /* - * Set master mode. - */ - writel(readl(_ICR(i2c)) | ICR_SCLE, _ICR(i2c)); - - return 0; -} - /* * PXA I2C send master code * 1. Load master code to IDBR and send it. @@ -797,140 +768,6 @@ static int i2c_pxa_send_mastercode(struct pxa_i2c *i2c) return (timeout == 0) ? I2C_RETRY : 0; } -static int i2c_pxa_do_pio_xfer(struct pxa_i2c *i2c, - struct i2c_msg *msg, int num) -{ - unsigned long timeout = 500000; /* 5 seconds */ - int ret = 0; - - ret = i2c_pxa_pio_set_master(i2c); - if (ret) - goto out; - - i2c->msg = msg; - i2c->msg_num = num; - i2c->msg_idx = 0; - i2c->msg_ptr = 0; - i2c->irqlogidx = 0; - - i2c_pxa_start_message(i2c); - - while (i2c->msg_num > 0 && --timeout) { - i2c_pxa_handler(0, i2c); - udelay(10); - } - - i2c_pxa_stop_message(i2c); - - /* - * We place the return code in i2c->msg_idx. - */ - ret = i2c->msg_idx; - -out: - if (timeout == 0) { - i2c_pxa_scream_blue_murder(i2c, "timeout"); - ret = I2C_RETRY; - } - - return ret; -} - -/* - * We are protected by the adapter bus mutex. - */ -static int i2c_pxa_do_xfer(struct pxa_i2c *i2c, struct i2c_msg *msg, int num) -{ - long timeout; - int ret; - - /* - * Wait for the bus to become free. - */ - ret = i2c_pxa_wait_bus_not_busy(i2c); - if (ret) { - dev_err(&i2c->adap.dev, "i2c_pxa: timeout waiting for bus free\n"); - goto out; - } - - /* - * Set master mode. - */ - ret = i2c_pxa_set_master(i2c); - if (ret) { - dev_err(&i2c->adap.dev, "i2c_pxa_set_master: error %d\n", ret); - goto out; - } - - if (i2c->high_mode) { - ret = i2c_pxa_send_mastercode(i2c); - if (ret) { - dev_err(&i2c->adap.dev, "i2c_pxa_send_mastercode timeout\n"); - goto out; - } - } - - spin_lock_irq(&i2c->lock); - - i2c->msg = msg; - i2c->msg_num = num; - i2c->msg_idx = 0; - i2c->msg_ptr = 0; - i2c->irqlogidx = 0; - - i2c_pxa_start_message(i2c); - - spin_unlock_irq(&i2c->lock); - - /* - * The rest of the processing occurs in the interrupt handler. - */ - timeout = wait_event_timeout(i2c->wait, i2c->msg_num == 0, HZ * 5); - i2c_pxa_stop_message(i2c); - - /* - * We place the return code in i2c->msg_idx. - */ - ret = i2c->msg_idx; - - if (!timeout && i2c->msg_num) { - i2c_pxa_scream_blue_murder(i2c, "timeout"); - ret = I2C_RETRY; - } - - out: - return ret; -} - -static int i2c_pxa_pio_xfer(struct i2c_adapter *adap, - struct i2c_msg msgs[], int num) -{ - struct pxa_i2c *i2c = adap->algo_data; - int ret, i; - - /* If the I2C controller is disabled we need to reset it - (probably due to a suspend/resume destroying state). We do - this here as we can then avoid worrying about resuming the - controller before its users. */ - if (!(readl(_ICR(i2c)) & ICR_IUE)) - i2c_pxa_reset(i2c); - - for (i = adap->retries; i >= 0; i--) { - ret = i2c_pxa_do_pio_xfer(i2c, msgs, num); - if (ret != I2C_RETRY) - goto out; - - if (i2c_debug) - dev_dbg(&adap->dev, "Retrying transmission\n"); - udelay(100); - } - i2c_pxa_scream_blue_murder(i2c, "exhausted retries"); - ret = -EREMOTEIO; - out: - i2c_pxa_set_slave(i2c, ret); - return ret; -} - /* * i2c_pxa_master_complete - complete the message and wake up. */ @@ -1137,6 +974,71 @@ static irqreturn_t i2c_pxa_handler(int this_irq, void *dev_id) return IRQ_HANDLED; } +/* + * We are protected by the adapter bus mutex. + */ +static int i2c_pxa_do_xfer(struct pxa_i2c *i2c, struct i2c_msg *msg, int num) +{ + long timeout; + int ret; + + /* + * Wait for the bus to become free. + */ + ret = i2c_pxa_wait_bus_not_busy(i2c); + if (ret) { + dev_err(&i2c->adap.dev, "i2c_pxa: timeout waiting for bus free\n"); + goto out; + } + + /* + * Set master mode. + */ + ret = i2c_pxa_set_master(i2c); + if (ret) { + dev_err(&i2c->adap.dev, "i2c_pxa_set_master: error %d\n", ret); + goto out; + } + + if (i2c->high_mode) { + ret = i2c_pxa_send_mastercode(i2c); + if (ret) { + dev_err(&i2c->adap.dev, "i2c_pxa_send_mastercode timeout\n"); + goto out; + } + } + + spin_lock_irq(&i2c->lock); + + i2c->msg = msg; + i2c->msg_num = num; + i2c->msg_idx = 0; + i2c->msg_ptr = 0; + i2c->irqlogidx = 0; + + i2c_pxa_start_message(i2c); + + spin_unlock_irq(&i2c->lock); + + /* + * The rest of the processing occurs in the interrupt handler. + */ + timeout = wait_event_timeout(i2c->wait, i2c->msg_num == 0, HZ * 5); + i2c_pxa_stop_message(i2c); + + /* + * We place the return code in i2c->msg_idx. + */ + ret = i2c->msg_idx; + + if (!timeout && i2c->msg_num) { + i2c_pxa_scream_blue_murder(i2c, "timeout"); + ret = I2C_RETRY; + } + + out: + return ret; +} static int i2c_pxa_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num) { @@ -1174,6 +1076,103 @@ static const struct i2c_algorithm i2c_pxa_algorithm = { #endif }; +/* Non-interrupt mode support */ +static int i2c_pxa_pio_set_master(struct pxa_i2c *i2c) +{ + /* make timeout the same as for interrupt based functions */ + long timeout = 2 * DEF_TIMEOUT; + + /* + * Wait for the bus to become free. + */ + while (timeout-- && readl(_ISR(i2c)) & (ISR_IBB | ISR_UB)) { + udelay(1000); + show_state(i2c); + } + + if (timeout < 0) { + show_state(i2c); + dev_err(&i2c->adap.dev, + "i2c_pxa: timeout waiting for bus free\n"); + return I2C_RETRY; + } + + /* + * Set master mode. + */ + writel(readl(_ICR(i2c)) | ICR_SCLE, _ICR(i2c)); + + return 0; +} + +static int i2c_pxa_do_pio_xfer(struct pxa_i2c *i2c, + struct i2c_msg *msg, int num) +{ + unsigned long timeout = 500000; /* 5 seconds */ + int ret = 0; + + ret = i2c_pxa_pio_set_master(i2c); + if (ret) + goto out; + + i2c->msg = msg; + i2c->msg_num = num; + i2c->msg_idx = 0; + i2c->msg_ptr = 0; + i2c->irqlogidx = 0; + + i2c_pxa_start_message(i2c); + + while (i2c->msg_num > 0 && --timeout) { + i2c_pxa_handler(0, i2c); + udelay(10); + } + + i2c_pxa_stop_message(i2c); + + /* + * We place the return code in i2c->msg_idx. + */ + ret = i2c->msg_idx; + +out: + if (timeout == 0) { + i2c_pxa_scream_blue_murder(i2c, "timeout"); + ret = I2C_RETRY; + } + + return ret; +} + +static int i2c_pxa_pio_xfer(struct i2c_adapter *adap, + struct i2c_msg msgs[], int num) +{ + struct pxa_i2c *i2c = adap->algo_data; + int ret, i; + + /* If the I2C controller is disabled we need to reset it + (probably due to a suspend/resume destroying state). We do + this here as we can then avoid worrying about resuming the + controller before its users. */ + if (!(readl(_ICR(i2c)) & ICR_IUE)) + i2c_pxa_reset(i2c); + + for (i = adap->retries; i >= 0; i--) { + ret = i2c_pxa_do_pio_xfer(i2c, msgs, num); + if (ret != I2C_RETRY) + goto out; + + if (i2c_debug) + dev_dbg(&adap->dev, "Retrying transmission\n"); + udelay(100); + } + i2c_pxa_scream_blue_murder(i2c, "exhausted retries"); + ret = -EREMOTEIO; + out: + i2c_pxa_set_slave(i2c, ret); + return ret; +} + static const struct i2c_algorithm i2c_pxa_pio_algorithm = { .master_xfer = i2c_pxa_pio_xfer, .functionality = i2c_pxa_functionality, -- GitLab From 940695aa36f18f8cd5fc703db601b4cd2662a055 Mon Sep 17 00:00:00 2001 From: Russell King Date: Mon, 27 Apr 2020 19:48:56 +0100 Subject: [PATCH 0274/1971] i2c: pxa: re-arrange register field definitions Arrange the register field definitions to be grouped together, rather than the Armada-3700 definitions being separated from the rest of the definitions. Signed-off-by: Russell King Signed-off-by: Wolfram Sang --- drivers/i2c/busses/i2c-pxa.c | 113 ++++++++++++++++------------------- 1 file changed, 53 insertions(+), 60 deletions(-) diff --git a/drivers/i2c/busses/i2c-pxa.c b/drivers/i2c/busses/i2c-pxa.c index bac1f5578f0e5..e018cee8f559a 100644 --- a/drivers/i2c/busses/i2c-pxa.c +++ b/drivers/i2c/busses/i2c-pxa.c @@ -32,6 +32,56 @@ #include #include +/* I2C register field definitions */ +#define ICR_START (1 << 0) /* start bit */ +#define ICR_STOP (1 << 1) /* stop bit */ +#define ICR_ACKNAK (1 << 2) /* send ACK(0) or NAK(1) */ +#define ICR_TB (1 << 3) /* transfer byte bit */ +#define ICR_MA (1 << 4) /* master abort */ +#define ICR_SCLE (1 << 5) /* master clock enable */ +#define ICR_IUE (1 << 6) /* unit enable */ +#define ICR_GCD (1 << 7) /* general call disable */ +#define ICR_ITEIE (1 << 8) /* enable tx interrupts */ +#define ICR_IRFIE (1 << 9) /* enable rx interrupts */ +#define ICR_BEIE (1 << 10) /* enable bus error ints */ +#define ICR_SSDIE (1 << 11) /* slave STOP detected int enable */ +#define ICR_ALDIE (1 << 12) /* enable arbitration interrupt */ +#define ICR_SADIE (1 << 13) /* slave address detected int enable */ +#define ICR_UR (1 << 14) /* unit reset */ +#define ICR_FM (1 << 15) /* fast mode */ +#define ICR_HS (1 << 16) /* High Speed mode */ +#define ICR_A3700_FM (1 << 16) /* fast mode for armada-3700 */ +#define ICR_A3700_HS (1 << 17) /* high speed mode for armada-3700 */ +#define ICR_GPIOEN (1 << 19) /* enable GPIO mode for SCL in HS */ + +#define ISR_RWM (1 << 0) /* read/write mode */ +#define ISR_ACKNAK (1 << 1) /* ack/nak status */ +#define ISR_UB (1 << 2) /* unit busy */ +#define ISR_IBB (1 << 3) /* bus busy */ +#define ISR_SSD (1 << 4) /* slave stop detected */ +#define ISR_ALD (1 << 5) /* arbitration loss detected */ +#define ISR_ITE (1 << 6) /* tx buffer empty */ +#define ISR_IRF (1 << 7) /* rx buffer full */ +#define ISR_GCAD (1 << 8) /* general call address detected */ +#define ISR_SAD (1 << 9) /* slave address detected */ +#define ISR_BED (1 << 10) /* bus error no ACK/NAK */ + +#define ILCR_SLV_SHIFT 0 +#define ILCR_SLV_MASK (0x1FF << ILCR_SLV_SHIFT) +#define ILCR_FLV_SHIFT 9 +#define ILCR_FLV_MASK (0x1FF << ILCR_FLV_SHIFT) +#define ILCR_HLVL_SHIFT 18 +#define ILCR_HLVL_MASK (0x1FF << ILCR_HLVL_SHIFT) +#define ILCR_HLVH_SHIFT 27 +#define ILCR_HLVH_MASK (0x1F << ILCR_HLVH_SHIFT) + +#define IWCR_CNT_SHIFT 0 +#define IWCR_CNT_MASK (0x1F << IWCR_CNT_SHIFT) +#define IWCR_HS_CNT1_SHIFT 5 +#define IWCR_HS_CNT1_MASK (0x1F << IWCR_HS_CNT1_SHIFT) +#define IWCR_HS_CNT2_SHIFT 10 +#define IWCR_HS_CNT2_MASK (0x1F << IWCR_HS_CNT2_SHIFT) + struct pxa_reg_layout { u32 ibmr; u32 idbr; @@ -52,12 +102,7 @@ enum pxa_i2c_types { REGS_A3700, }; -#define ICR_BUSMODE_FM (1 << 16) /* shifted fast mode for armada-3700 */ -#define ICR_BUSMODE_HS (1 << 17) /* shifted high speed mode for armada-3700 */ - -/* - * I2C registers definitions - */ +/* I2C register layout definitions */ static struct pxa_reg_layout pxa_reg_layout[] = { [REGS_PXA2XX] = { .ibmr = 0x00, @@ -95,8 +140,8 @@ static struct pxa_reg_layout pxa_reg_layout[] = { .icr = 0x08, .isr = 0x0c, .isar = 0x10, - .fm = ICR_BUSMODE_FM, - .hs = ICR_BUSMODE_HS, + .fm = ICR_A3700_FM, + .hs = ICR_A3700_HS, }, }; @@ -110,58 +155,6 @@ static const struct platform_device_id i2c_pxa_id_table[] = { }; MODULE_DEVICE_TABLE(platform, i2c_pxa_id_table); -/* - * I2C bit definitions - */ - -#define ICR_START (1 << 0) /* start bit */ -#define ICR_STOP (1 << 1) /* stop bit */ -#define ICR_ACKNAK (1 << 2) /* send ACK(0) or NAK(1) */ -#define ICR_TB (1 << 3) /* transfer byte bit */ -#define ICR_MA (1 << 4) /* master abort */ -#define ICR_SCLE (1 << 5) /* master clock enable */ -#define ICR_IUE (1 << 6) /* unit enable */ -#define ICR_GCD (1 << 7) /* general call disable */ -#define ICR_ITEIE (1 << 8) /* enable tx interrupts */ -#define ICR_IRFIE (1 << 9) /* enable rx interrupts */ -#define ICR_BEIE (1 << 10) /* enable bus error ints */ -#define ICR_SSDIE (1 << 11) /* slave STOP detected int enable */ -#define ICR_ALDIE (1 << 12) /* enable arbitration interrupt */ -#define ICR_SADIE (1 << 13) /* slave address detected int enable */ -#define ICR_UR (1 << 14) /* unit reset */ -#define ICR_FM (1 << 15) /* fast mode */ -#define ICR_HS (1 << 16) /* High Speed mode */ -#define ICR_GPIOEN (1 << 19) /* enable GPIO mode for SCL in HS */ - -#define ISR_RWM (1 << 0) /* read/write mode */ -#define ISR_ACKNAK (1 << 1) /* ack/nak status */ -#define ISR_UB (1 << 2) /* unit busy */ -#define ISR_IBB (1 << 3) /* bus busy */ -#define ISR_SSD (1 << 4) /* slave stop detected */ -#define ISR_ALD (1 << 5) /* arbitration loss detected */ -#define ISR_ITE (1 << 6) /* tx buffer empty */ -#define ISR_IRF (1 << 7) /* rx buffer full */ -#define ISR_GCAD (1 << 8) /* general call address detected */ -#define ISR_SAD (1 << 9) /* slave address detected */ -#define ISR_BED (1 << 10) /* bus error no ACK/NAK */ - -/* bit field shift & mask */ -#define ILCR_SLV_SHIFT 0 -#define ILCR_SLV_MASK (0x1FF << ILCR_SLV_SHIFT) -#define ILCR_FLV_SHIFT 9 -#define ILCR_FLV_MASK (0x1FF << ILCR_FLV_SHIFT) -#define ILCR_HLVL_SHIFT 18 -#define ILCR_HLVL_MASK (0x1FF << ILCR_HLVL_SHIFT) -#define ILCR_HLVH_SHIFT 27 -#define ILCR_HLVH_MASK (0x1F << ILCR_HLVH_SHIFT) - -#define IWCR_CNT_SHIFT 0 -#define IWCR_CNT_MASK (0x1F << IWCR_CNT_SHIFT) -#define IWCR_HS_CNT1_SHIFT 5 -#define IWCR_HS_CNT1_MASK (0x1F << IWCR_HS_CNT1_SHIFT) -#define IWCR_HS_CNT2_SHIFT 10 -#define IWCR_HS_CNT2_MASK (0x1F << IWCR_HS_CNT2_SHIFT) - struct pxa_i2c { spinlock_t lock; wait_queue_head_t wait; -- GitLab From f8e5d3cb31cbebce6955dda462710b4c6f6d1c5a Mon Sep 17 00:00:00 2001 From: Russell King Date: Mon, 27 Apr 2020 19:49:01 +0100 Subject: [PATCH 0275/1971] i2c: pxa: add and use definitions for IBMR register Add definitions for the bits in the IBMR register, and use them in the code. This improves readability. Signed-off-by: Russell King Signed-off-by: Wolfram Sang --- drivers/i2c/busses/i2c-pxa.c | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/drivers/i2c/busses/i2c-pxa.c b/drivers/i2c/busses/i2c-pxa.c index e018cee8f559a..6af6c72456463 100644 --- a/drivers/i2c/busses/i2c-pxa.c +++ b/drivers/i2c/busses/i2c-pxa.c @@ -33,6 +33,9 @@ #include /* I2C register field definitions */ +#define IBMR_SDAS (1 << 0) +#define IBMR_SCLS (1 << 1) + #define ICR_START (1 << 0) /* start bit */ #define ICR_STOP (1 << 1) /* stop bit */ #define ICR_ACKNAK (1 << 2) /* send ACK(0) or NAK(1) */ @@ -334,7 +337,7 @@ static void i2c_pxa_abort(struct pxa_i2c *i2c) return; } - while ((i > 0) && (readl(_IBMR(i2c)) & 0x1) == 0) { + while ((i > 0) && (readl(_IBMR(i2c)) & IBMR_SDAS) == 0) { unsigned long icr = readl(_ICR(i2c)); icr &= ~ICR_START; @@ -389,7 +392,8 @@ static int i2c_pxa_wait_master(struct pxa_i2c *i2c) * quick check of the i2c lines themselves to ensure they've * gone high... */ - if ((readl(_ISR(i2c)) & (ISR_UB | ISR_IBB)) == 0 && readl(_IBMR(i2c)) == 3) { + if ((readl(_ISR(i2c)) & (ISR_UB | ISR_IBB)) == 0 && + readl(_IBMR(i2c)) == (IBMR_SCLS | IBMR_SDAS)) { if (i2c_debug > 0) dev_dbg(&i2c->adap.dev, "%s: done\n", __func__); return 1; @@ -584,7 +588,7 @@ static void i2c_pxa_slave_start(struct pxa_i2c *i2c, u32 isr) timeout = 0x10000; while (1) { - if ((readl(_IBMR(i2c)) & 2) == 2) + if ((readl(_IBMR(i2c)) & IBMR_SCLS) == IBMR_SCLS) break; timeout--; @@ -679,7 +683,7 @@ static void i2c_pxa_slave_start(struct pxa_i2c *i2c, u32 isr) timeout = 0x10000; while (1) { - if ((readl(_IBMR(i2c)) & 2) == 2) + if ((readl(_IBMR(i2c)) & IBMR_SCLS) == IBMR_SCLS) break; timeout--; -- GitLab From ee478936ddb779c1081b6683734da42cebbd5df1 Mon Sep 17 00:00:00 2001 From: Russell King Date: Mon, 27 Apr 2020 19:49:07 +0100 Subject: [PATCH 0276/1971] i2c: pxa: always set fm and hs members for each type Always set the fm and hs members of struct pxa_reg_layout. These members are already taking space, we don't need code as well. Signed-off-by: Russell King Signed-off-by: Wolfram Sang --- drivers/i2c/busses/i2c-pxa.c | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/drivers/i2c/busses/i2c-pxa.c b/drivers/i2c/busses/i2c-pxa.c index 6af6c72456463..3286b5f8bf178 100644 --- a/drivers/i2c/busses/i2c-pxa.c +++ b/drivers/i2c/busses/i2c-pxa.c @@ -113,6 +113,8 @@ static struct pxa_reg_layout pxa_reg_layout[] = { .icr = 0x10, .isr = 0x18, .isar = 0x20, + .fm = ICR_FM, + .hs = ICR_HS, }, [REGS_PXA3XX] = { .ibmr = 0x00, @@ -120,6 +122,8 @@ static struct pxa_reg_layout pxa_reg_layout[] = { .icr = 0x08, .isr = 0x0c, .isar = 0x10, + .fm = ICR_FM, + .hs = ICR_HS, }, [REGS_CE4100] = { .ibmr = 0x14, @@ -127,6 +131,8 @@ static struct pxa_reg_layout pxa_reg_layout[] = { .icr = 0x00, .isr = 0x04, /* no isar register */ + .fm = ICR_FM, + .hs = ICR_HS, }, [REGS_PXA910] = { .ibmr = 0x00, @@ -136,6 +142,8 @@ static struct pxa_reg_layout pxa_reg_layout[] = { .isar = 0x20, .ilcr = 0x28, .iwcr = 0x30, + .fm = ICR_FM, + .hs = ICR_HS, }, [REGS_A3700] = { .ibmr = 0x00, @@ -1279,8 +1287,8 @@ static int i2c_pxa_probe(struct platform_device *dev) i2c->reg_idbr = i2c->reg_base + pxa_reg_layout[i2c_type].idbr; i2c->reg_icr = i2c->reg_base + pxa_reg_layout[i2c_type].icr; i2c->reg_isr = i2c->reg_base + pxa_reg_layout[i2c_type].isr; - i2c->fm_mask = pxa_reg_layout[i2c_type].fm ? : ICR_FM; - i2c->hs_mask = pxa_reg_layout[i2c_type].hs ? : ICR_HS; + i2c->fm_mask = pxa_reg_layout[i2c_type].fm; + i2c->hs_mask = pxa_reg_layout[i2c_type].hs; if (i2c_type != REGS_CE4100) i2c->reg_isar = i2c->reg_base + pxa_reg_layout[i2c_type].isar; -- GitLab From 79622f372b8679916bf4e38aabc6df2659d1e109 Mon Sep 17 00:00:00 2001 From: Russell King Date: Mon, 27 Apr 2020 19:49:12 +0100 Subject: [PATCH 0277/1971] i2c: pxa: move private definitions to i2c-pxa.c Move driver-private definitions out of the i2c-pxa.h platform data header file into the driver itself. Nothing outside of the driver makes use of these constants. Signed-off-by: Russell King Signed-off-by: Wolfram Sang --- drivers/i2c/busses/i2c-pxa.c | 43 ++++++++++++++++++++++++ include/linux/platform_data/i2c-pxa.h | 48 --------------------------- 2 files changed, 43 insertions(+), 48 deletions(-) diff --git a/drivers/i2c/busses/i2c-pxa.c b/drivers/i2c/busses/i2c-pxa.c index 3286b5f8bf178..9c811a09eac97 100644 --- a/drivers/i2c/busses/i2c-pxa.c +++ b/drivers/i2c/busses/i2c-pxa.c @@ -85,6 +85,49 @@ #define IWCR_HS_CNT2_SHIFT 10 #define IWCR_HS_CNT2_MASK (0x1F << IWCR_HS_CNT2_SHIFT) +/* need a longer timeout if we're dealing with the fact we may well be + * looking at a multi-master environment + */ +#define DEF_TIMEOUT 32 + +#define BUS_ERROR (-EREMOTEIO) +#define XFER_NAKED (-ECONNREFUSED) +#define I2C_RETRY (-2000) /* an error has occurred retry transmit */ + +/* ICR initialize bit values + * + * 15 FM 0 (100 kHz operation) + * 14 UR 0 (No unit reset) + * 13 SADIE 0 (Disables the unit from interrupting on slave addresses + * matching its slave address) + * 12 ALDIE 0 (Disables the unit from interrupt when it loses arbitration + * in master mode) + * 11 SSDIE 0 (Disables interrupts from a slave stop detected, in slave mode) + * 10 BEIE 1 (Enable interrupts from detected bus errors, no ACK sent) + * 9 IRFIE 1 (Enable interrupts from full buffer received) + * 8 ITEIE 1 (Enables the I2C unit to interrupt when transmit buffer empty) + * 7 GCD 1 (Disables i2c unit response to general call messages as a slave) + * 6 IUE 0 (Disable unit until we change settings) + * 5 SCLE 1 (Enables the i2c clock output for master mode (drives SCL) + * 4 MA 0 (Only send stop with the ICR stop bit) + * 3 TB 0 (We are not transmitting a byte initially) + * 2 ACKNAK 0 (Send an ACK after the unit receives a byte) + * 1 STOP 0 (Do not send a STOP) + * 0 START 0 (Do not send a START) + */ +#define I2C_ICR_INIT (ICR_BEIE | ICR_IRFIE | ICR_ITEIE | ICR_GCD | ICR_SCLE) + +/* I2C status register init values + * + * 10 BED 1 (Clear bus error detected) + * 9 SAD 1 (Clear slave address detected) + * 7 IRF 1 (Clear IDBR Receive Full) + * 6 ITE 1 (Clear IDBR Transmit Empty) + * 5 ALD 1 (Clear Arbitration Loss Detected) + * 4 SSD 1 (Clear Slave Stop Detected) + */ +#define I2C_ISR_INIT 0x7FF /* status register init */ + struct pxa_reg_layout { u32 ibmr; u32 idbr; diff --git a/include/linux/platform_data/i2c-pxa.h b/include/linux/platform_data/i2c-pxa.h index 6a9b28399b397..24953981bd9fe 100644 --- a/include/linux/platform_data/i2c-pxa.h +++ b/include/linux/platform_data/i2c-pxa.h @@ -7,54 +7,6 @@ #ifndef _I2C_PXA_H_ #define _I2C_PXA_H_ -#if 0 -#define DEF_TIMEOUT 3 -#else -/* need a longer timeout if we're dealing with the fact we may well be - * looking at a multi-master environment -*/ -#define DEF_TIMEOUT 32 -#endif - -#define BUS_ERROR (-EREMOTEIO) -#define XFER_NAKED (-ECONNREFUSED) -#define I2C_RETRY (-2000) /* an error has occurred retry transmit */ - -/* ICR initialize bit values -* -* 15. FM 0 (100 Khz operation) -* 14. UR 0 (No unit reset) -* 13. SADIE 0 (Disables the unit from interrupting on slave addresses -* matching its slave address) -* 12. ALDIE 0 (Disables the unit from interrupt when it loses arbitration -* in master mode) -* 11. SSDIE 0 (Disables interrupts from a slave stop detected, in slave mode) -* 10. BEIE 1 (Enable interrupts from detected bus errors, no ACK sent) -* 9. IRFIE 1 (Enable interrupts from full buffer received) -* 8. ITEIE 1 (Enables the I2C unit to interrupt when transmit buffer empty) -* 7. GCD 1 (Disables i2c unit response to general call messages as a slave) -* 6. IUE 0 (Disable unit until we change settings) -* 5. SCLE 1 (Enables the i2c clock output for master mode (drives SCL) -* 4. MA 0 (Only send stop with the ICR stop bit) -* 3. TB 0 (We are not transmitting a byte initially) -* 2. ACKNAK 0 (Send an ACK after the unit receives a byte) -* 1. STOP 0 (Do not send a STOP) -* 0. START 0 (Do not send a START) -* -*/ -#define I2C_ICR_INIT (ICR_BEIE | ICR_IRFIE | ICR_ITEIE | ICR_GCD | ICR_SCLE) - -/* I2C status register init values - * - * 10. BED 1 (Clear bus error detected) - * 9. SAD 1 (Clear slave address detected) - * 7. IRF 1 (Clear IDBR Receive Full) - * 6. ITE 1 (Clear IDBR Transmit Empty) - * 5. ALD 1 (Clear Arbitration Loss Detected) - * 4. SSD 1 (Clear Slave Stop Detected) - */ -#define I2C_ISR_INIT 0x7FF /* status register init */ - struct i2c_pxa_platform_data { unsigned int class; unsigned int use_pio :1; -- GitLab From 70aee287cf4535d19ea3983b2af3414119963253 Mon Sep 17 00:00:00 2001 From: Russell King Date: Mon, 27 Apr 2020 19:49:17 +0100 Subject: [PATCH 0278/1971] i2c: pxa: move DT IDs along side platform IDs Move the ID tables into one place, near the device dependent data. Signed-off-by: Russell King Signed-off-by: Wolfram Sang --- drivers/i2c/busses/i2c-pxa.c | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/drivers/i2c/busses/i2c-pxa.c b/drivers/i2c/busses/i2c-pxa.c index 9c811a09eac97..902cda4ef8c9d 100644 --- a/drivers/i2c/busses/i2c-pxa.c +++ b/drivers/i2c/busses/i2c-pxa.c @@ -199,6 +199,15 @@ static struct pxa_reg_layout pxa_reg_layout[] = { }, }; +static const struct of_device_id i2c_pxa_dt_ids[] = { + { .compatible = "mrvl,pxa-i2c", .data = (void *)REGS_PXA2XX }, + { .compatible = "mrvl,pwri2c", .data = (void *)REGS_PXA3XX }, + { .compatible = "mrvl,mmp-twsi", .data = (void *)REGS_PXA910 }, + { .compatible = "marvell,armada-3700-i2c", .data = (void *)REGS_A3700 }, + {} +}; +MODULE_DEVICE_TABLE(of, i2c_pxa_dt_ids); + static const struct platform_device_id i2c_pxa_id_table[] = { { "pxa2xx-i2c", REGS_PXA2XX }, { "pxa3xx-pwri2c", REGS_PXA3XX }, @@ -1230,15 +1239,6 @@ static const struct i2c_algorithm i2c_pxa_pio_algorithm = { #endif }; -static const struct of_device_id i2c_pxa_dt_ids[] = { - { .compatible = "mrvl,pxa-i2c", .data = (void *)REGS_PXA2XX }, - { .compatible = "mrvl,pwri2c", .data = (void *)REGS_PXA3XX }, - { .compatible = "mrvl,mmp-twsi", .data = (void *)REGS_PXA910 }, - { .compatible = "marvell,armada-3700-i2c", .data = (void *)REGS_A3700 }, - {} -}; -MODULE_DEVICE_TABLE(of, i2c_pxa_dt_ids); - static int i2c_pxa_probe_dt(struct platform_device *pdev, struct pxa_i2c *i2c, enum pxa_i2c_types *i2c_types) { -- GitLab From 88b73ee7ca4c90baf136ed5a8377fc5a9b73ac08 Mon Sep 17 00:00:00 2001 From: Russell King Date: Mon, 27 Apr 2020 19:49:22 +0100 Subject: [PATCH 0279/1971] i2c: pxa: fix i2c_pxa_scream_blue_murder() debug output The IRQ log output is supposed to appear on a single line. However, commit 3a2dc1677b60 ("i2c: pxa: Update debug function to dump more info on error") resulted in it being printed one-entry-per-line, which is excessively long. Fixing this is not a trivial matter; using pr_cont() doesn't work as the previous dev_dbg() may not have been compiled in, or may be dynamic. Since the rest of this function output is at error level, and is also debug output, promote this to error level as well to avoid this problem. Reduce the number of always zero prefix digits to save screen real- estate. Signed-off-by: Russell King Signed-off-by: Wolfram Sang --- drivers/i2c/busses/i2c-pxa.c | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/drivers/i2c/busses/i2c-pxa.c b/drivers/i2c/busses/i2c-pxa.c index 902cda4ef8c9d..41047abe67bff 100644 --- a/drivers/i2c/busses/i2c-pxa.c +++ b/drivers/i2c/busses/i2c-pxa.c @@ -363,11 +363,10 @@ static void i2c_pxa_scream_blue_murder(struct pxa_i2c *i2c, const char *why) dev_err(dev, "IBMR: %08x IDBR: %08x ICR: %08x ISR: %08x\n", readl(_IBMR(i2c)), readl(_IDBR(i2c)), readl(_ICR(i2c)), readl(_ISR(i2c))); - dev_dbg(dev, "log: "); + dev_err(dev, "log:"); for (i = 0; i < i2c->irqlogidx; i++) - pr_debug("[%08x:%08x] ", i2c->isrlog[i], i2c->icrlog[i]); - - pr_debug("\n"); + pr_cont(" [%03x:%05x]", i2c->isrlog[i], i2c->icrlog[i]); + pr_cont("\n"); } #else /* ifdef DEBUG */ -- GitLab From bb82ba690757fb483b5848d3e2f577a102e8a9c8 Mon Sep 17 00:00:00 2001 From: Russell King Date: Mon, 27 Apr 2020 19:49:27 +0100 Subject: [PATCH 0280/1971] i2c: pxa: clean up decode_bits() Clean up decode_bits() to use pr_cont(), and move the newline into the function rather than at its two callsites. Avoid printing an unnecessary space before the newline. Signed-off-by: Russell King Signed-off-by: Wolfram Sang --- drivers/i2c/busses/i2c-pxa.c | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/drivers/i2c/busses/i2c-pxa.c b/drivers/i2c/busses/i2c-pxa.c index 41047abe67bff..7aa35def464b1 100644 --- a/drivers/i2c/busses/i2c-pxa.c +++ b/drivers/i2c/busses/i2c-pxa.c @@ -286,13 +286,14 @@ struct bits { static inline void decode_bits(const char *prefix, const struct bits *bits, int num, u32 val) { - printk("%s %08x: ", prefix, val); + printk("%s %08x:", prefix, val); while (num--) { const char *str = val & bits->mask ? bits->set : bits->unset; if (str) - printk("%s ", str); + pr_cont(" %s", str); bits++; } + pr_cont("\n"); } static const struct bits isr_bits[] = { @@ -312,7 +313,6 @@ static const struct bits isr_bits[] = { static void decode_ISR(unsigned int val) { decode_bits(KERN_DEBUG "ISR", isr_bits, ARRAY_SIZE(isr_bits), val); - printk("\n"); } static const struct bits icr_bits[] = { @@ -337,7 +337,6 @@ static const struct bits icr_bits[] = { static void decode_ICR(unsigned int val) { decode_bits(KERN_DEBUG "ICR", icr_bits, ARRAY_SIZE(icr_bits), val); - printk("\n"); } #endif -- GitLab From e896be5ad1017aa710f8b979036a43d3e5b2f38d Mon Sep 17 00:00:00 2001 From: Russell King Date: Mon, 27 Apr 2020 19:49:32 +0100 Subject: [PATCH 0281/1971] i2c: pxa: fix i2c_pxa_wait_bus_not_busy() boundary condition Fix i2c_pxa_wait_bus_not_busy()'s boundary conditions, so that a coincidental success and timeout results in the function returning success. Signed-off-by: Russell King Signed-off-by: Wolfram Sang --- drivers/i2c/busses/i2c-pxa.c | 17 ++++++++++++----- 1 file changed, 12 insertions(+), 5 deletions(-) diff --git a/drivers/i2c/busses/i2c-pxa.c b/drivers/i2c/busses/i2c-pxa.c index 7aa35def464b1..c1e50c0b9756b 100644 --- a/drivers/i2c/busses/i2c-pxa.c +++ b/drivers/i2c/busses/i2c-pxa.c @@ -416,19 +416,26 @@ static void i2c_pxa_abort(struct pxa_i2c *i2c) static int i2c_pxa_wait_bus_not_busy(struct pxa_i2c *i2c) { int timeout = DEF_TIMEOUT; + u32 isr; - while (timeout-- && readl(_ISR(i2c)) & (ISR_IBB | ISR_UB)) { - if ((readl(_ISR(i2c)) & ISR_SAD) != 0) + while (1) { + isr = readl(_ISR(i2c)); + if (!(isr & (ISR_IBB | ISR_UB))) + return 0; + + if (isr & ISR_SAD) timeout += 4; + if (!timeout--) + break; + msleep(2); show_state(i2c); } - if (timeout < 0) - show_state(i2c); + show_state(i2c); - return timeout < 0 ? I2C_RETRY : 0; + return I2C_RETRY; } static int i2c_pxa_wait_master(struct pxa_i2c *i2c) -- GitLab From 73371d5f313aa056a451f84b652623da6054f89b Mon Sep 17 00:00:00 2001 From: Codrin Ciubotariu Date: Tue, 25 Feb 2020 17:50:09 +0200 Subject: [PATCH 0282/1971] i2c: at91: Send bus clear command if SDA is down After a transfer timeout, some faulty I2C slave devices might hold down the SDA pin. We can generate a bus clear command, hoping that the slave might release the pins. If the CLEAR command is not supported, we will use gpio recovery, if available, to reset the bus. Signed-off-by: Codrin Ciubotariu Acked-by: Ludovic Desroches Signed-off-by: Wolfram Sang --- drivers/i2c/busses/i2c-at91-core.c | 2 ++ drivers/i2c/busses/i2c-at91-master.c | 49 ++++++++++++++++++++++++---- drivers/i2c/busses/i2c-at91.h | 7 +++- 3 files changed, 50 insertions(+), 8 deletions(-) diff --git a/drivers/i2c/busses/i2c-at91-core.c b/drivers/i2c/busses/i2c-at91-core.c index 3da1a8acecb5b..e14edd2361085 100644 --- a/drivers/i2c/busses/i2c-at91-core.c +++ b/drivers/i2c/busses/i2c-at91-core.c @@ -131,6 +131,7 @@ static struct at91_twi_pdata sama5d2_config = { .has_dig_filtr = true, .has_adv_dig_filtr = true, .has_ana_filtr = true, + .has_clear_cmd = false, /* due to errata, CLEAR cmd is not working */ }; static struct at91_twi_pdata sam9x60_config = { @@ -142,6 +143,7 @@ static struct at91_twi_pdata sam9x60_config = { .has_dig_filtr = true, .has_adv_dig_filtr = true, .has_ana_filtr = true, + .has_clear_cmd = true, }; static const struct of_device_id atmel_twi_dt_ids[] = { diff --git a/drivers/i2c/busses/i2c-at91-master.c b/drivers/i2c/busses/i2c-at91-master.c index 0aba51a7df327..776e95962ab6c 100644 --- a/drivers/i2c/busses/i2c-at91-master.c +++ b/drivers/i2c/busses/i2c-at91-master.c @@ -480,7 +480,6 @@ static int at91_do_twi_transfer(struct at91_twi_dev *dev) unsigned long time_left; bool has_unre_flag = dev->pdata->has_unre_flag; bool has_alt_cmd = dev->pdata->has_alt_cmd; - struct i2c_bus_recovery_info *rinfo = &dev->rinfo; /* * WARNING: the TXCOMP bit in the Status Register is NOT a clear on @@ -641,11 +640,12 @@ error: AT91_TWI_THRCLR | AT91_TWI_LOCKCLR); } - if (rinfo->get_sda && !(rinfo->get_sda(&dev->adapter))) { - dev_dbg(dev->dev, - "SDA is down; clear bus using gpio\n"); - i2c_recover_bus(&dev->adapter); - } + /* + * some faulty I2C slave devices might hold SDA down; + * we can send a bus clear command, hoping that the pins will be + * released + */ + i2c_recover_bus(&dev->adapter); return ret; } @@ -830,7 +830,7 @@ static void at91_unprepare_twi_recovery(struct i2c_adapter *adap) pinctrl_select_state(dev->pinctrl, dev->pinctrl_pins_default); } -static int at91_init_twi_recovery_info(struct platform_device *pdev, +static int at91_init_twi_recovery_gpio(struct platform_device *pdev, struct at91_twi_dev *dev) { struct i2c_bus_recovery_info *rinfo = &dev->rinfo; @@ -880,6 +880,41 @@ static int at91_init_twi_recovery_info(struct platform_device *pdev, return 0; } +static int at91_twi_recover_bus_cmd(struct i2c_adapter *adap) +{ + struct at91_twi_dev *dev = i2c_get_adapdata(adap); + + dev->transfer_status |= at91_twi_read(dev, AT91_TWI_SR); + if (!(dev->transfer_status & AT91_TWI_SDA)) { + dev_dbg(dev->dev, "SDA is down; sending bus clear command\n"); + if (dev->use_alt_cmd) { + unsigned int acr; + + acr = at91_twi_read(dev, AT91_TWI_ACR); + acr &= ~AT91_TWI_ACR_DATAL_MASK; + at91_twi_write(dev, AT91_TWI_ACR, acr); + } + at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_CLEAR); + } + + return 0; +} + +static int at91_init_twi_recovery_info(struct platform_device *pdev, + struct at91_twi_dev *dev) +{ + struct i2c_bus_recovery_info *rinfo = &dev->rinfo; + bool has_clear_cmd = dev->pdata->has_clear_cmd; + + if (!has_clear_cmd) + return at91_init_twi_recovery_gpio(pdev, dev); + + rinfo->recover_bus = at91_twi_recover_bus_cmd; + dev->adapter.bus_recovery_info = rinfo; + + return 0; +} + int at91_twi_probe_master(struct platform_device *pdev, u32 phy_addr, struct at91_twi_dev *dev) { diff --git a/drivers/i2c/busses/i2c-at91.h b/drivers/i2c/busses/i2c-at91.h index f57a6cab96b40..7e7b4955ca7f7 100644 --- a/drivers/i2c/busses/i2c-at91.h +++ b/drivers/i2c/busses/i2c-at91.h @@ -36,6 +36,7 @@ #define AT91_TWI_SVDIS BIT(5) /* Slave Transfer Disable */ #define AT91_TWI_QUICK BIT(6) /* SMBus quick command */ #define AT91_TWI_SWRST BIT(7) /* Software Reset */ +#define AT91_TWI_CLEAR BIT(15) /* Bus clear command */ #define AT91_TWI_ACMEN BIT(16) /* Alternative Command Mode Enable */ #define AT91_TWI_ACMDIS BIT(17) /* Alternative Command Mode Disable */ #define AT91_TWI_THRCLR BIT(24) /* Transmit Holding Register Clear */ @@ -69,6 +70,8 @@ #define AT91_TWI_NACK BIT(8) /* Not Acknowledged */ #define AT91_TWI_EOSACC BIT(11) /* End Of Slave Access */ #define AT91_TWI_LOCK BIT(23) /* TWI Lock due to Frame Errors */ +#define AT91_TWI_SCL BIT(24) /* TWI SCL status */ +#define AT91_TWI_SDA BIT(25) /* TWI SDA status */ #define AT91_TWI_INT_MASK \ (AT91_TWI_TXCOMP | AT91_TWI_RXRDY | AT91_TWI_TXRDY | AT91_TWI_NACK \ @@ -81,7 +84,8 @@ #define AT91_TWI_THR 0x0034 /* Transmit Holding Register */ #define AT91_TWI_ACR 0x0040 /* Alternative Command Register */ -#define AT91_TWI_ACR_DATAL(len) ((len) & 0xff) +#define AT91_TWI_ACR_DATAL_MASK GENMASK(15, 0) +#define AT91_TWI_ACR_DATAL(len) ((len) & AT91_TWI_ACR_DATAL_MASK) #define AT91_TWI_ACR_DIR BIT(8) #define AT91_TWI_FILTR 0x0044 @@ -118,6 +122,7 @@ struct at91_twi_pdata { bool has_dig_filtr; bool has_adv_dig_filtr; bool has_ana_filtr; + bool has_clear_cmd; struct at_dma_slave dma_slave; }; -- GitLab From 782fe98b9350fd64b4d1a651a1da1d303e03b32c Mon Sep 17 00:00:00 2001 From: YueHaibing Date: Tue, 5 May 2020 16:30:01 +0800 Subject: [PATCH 0283/1971] clk: Remove unused inline function clk_debug_reparent There's no callers in-tree anymore. Signed-off-by: YueHaibing Link: https://lkml.kernel.org/r/20200505083001.52564-1-yuehaibing@huawei.com Signed-off-by: Stephen Boyd --- drivers/clk/clk.c | 4 ---- 1 file changed, 4 deletions(-) diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c index 39c59f063aa07..5501041c91fc6 100644 --- a/drivers/clk/clk.c +++ b/drivers/clk/clk.c @@ -3295,10 +3295,6 @@ static int __init clk_debug_init(void) late_initcall(clk_debug_init); #else static inline void clk_debug_register(struct clk_core *core) { } -static inline void clk_debug_reparent(struct clk_core *core, - struct clk_core *new_parent) -{ -} static inline void clk_debug_unregister(struct clk_core *core) { } -- GitLab From 4fe02fefe7a6ca8914d73dfafaad5053aa5d1ef9 Mon Sep 17 00:00:00 2001 From: Christophe JAILLET Date: Sun, 3 May 2020 21:03:27 +0200 Subject: [PATCH 0284/1971] clk: clk-xgene: Fix a typo in Kconfig s/Sypport/Support Signed-off-by: Christophe JAILLET Link: https://lkml.kernel.org/r/20200503190327.153249-1-christophe.jaillet@wanadoo.fr Signed-off-by: Stephen Boyd --- drivers/clk/Kconfig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/clk/Kconfig b/drivers/clk/Kconfig index bcb257baed06d..99ddc14784931 100644 --- a/drivers/clk/Kconfig +++ b/drivers/clk/Kconfig @@ -252,7 +252,7 @@ config COMMON_CLK_XGENE default ARCH_XGENE depends on ARM64 || COMPILE_TEST ---help--- - Sypport for the APM X-Gene SoC reference, PLL, and device clocks. + Support for the APM X-Gene SoC reference, PLL, and device clocks. config COMMON_CLK_LOCHNAGAR tristate "Cirrus Logic Lochnagar clock driver" -- GitLab From 38d6d848845ebbbe8f3354a972e815a073bb61f7 Mon Sep 17 00:00:00 2001 From: Stephen Boyd Date: Wed, 8 Apr 2020 23:44:07 -0700 Subject: [PATCH 0285/1971] ARM: Remove redundant COMMON_CLK selects MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The mulitplatform config already selects COMMON_CLK, so selecting it again is not useful. Remove these selects from ARM platforms that are part of the multiplatform build. Reviewed-by: "Andreas Färber" # actions Acked-by: Manivannan Sadhasivam # actions Cc: Russell King Cc: Alexander Shiyan Cc: Lubomir Rintel Cc: Cc: Arnd Bergmann Signed-off-by: Stephen Boyd Reviewed-by: Arnd Bergmann Link: https://lkml.kernel.org/r/20200409064416.83340-2-sboyd@kernel.org --- arch/arm/mach-actions/Kconfig | 1 - arch/arm/mach-clps711x/Kconfig | 1 - arch/arm/mach-mmp/Kconfig | 1 - 3 files changed, 3 deletions(-) diff --git a/arch/arm/mach-actions/Kconfig b/arch/arm/mach-actions/Kconfig index b5e0ac965ec0d..00fb4babccdd9 100644 --- a/arch/arm/mach-actions/Kconfig +++ b/arch/arm/mach-actions/Kconfig @@ -7,7 +7,6 @@ menuconfig ARCH_ACTIONS select ARM_GLOBAL_TIMER select CACHE_L2X0 select CLKSRC_ARM_GLOBAL_TIMER_SCHED_CLOCK - select COMMON_CLK select GENERIC_IRQ_CHIP select HAVE_ARM_SCU if SMP select HAVE_ARM_TWD if SMP diff --git a/arch/arm/mach-clps711x/Kconfig b/arch/arm/mach-clps711x/Kconfig index fc9188b54dd66..ba497a2032e91 100644 --- a/arch/arm/mach-clps711x/Kconfig +++ b/arch/arm/mach-clps711x/Kconfig @@ -5,7 +5,6 @@ menuconfig ARCH_CLPS711X select AUTO_ZRELADDR select TIMER_OF select CLPS711X_TIMER - select COMMON_CLK select CPU_ARM720T select GENERIC_CLOCKEVENTS select GPIOLIB diff --git a/arch/arm/mach-mmp/Kconfig b/arch/arm/mach-mmp/Kconfig index b58a03b18bdef..6fe1550f43ec6 100644 --- a/arch/arm/mach-mmp/Kconfig +++ b/arch/arm/mach-mmp/Kconfig @@ -110,7 +110,6 @@ config MACH_MMP_DT depends on ARCH_MULTI_V5 select PINCTRL select PINCTRL_SINGLE - select COMMON_CLK select ARCH_HAS_RESET_CONTROLLER select CPU_MOHAWK help -- GitLab From e8bd633bc05278ffb4f4ad39050bd9a33acca947 Mon Sep 17 00:00:00 2001 From: Stephen Boyd Date: Wed, 8 Apr 2020 23:44:08 -0700 Subject: [PATCH 0286/1971] ARM: Remove redundant CLKDEV_LOOKUP selects These platforms select COMMON_CLK indirectly through use of the ARCH_MULTIPLATFORM config option that they depend on implicitly via some V7/V6/V5 multi platform config option. The COMMON_CLK config option already selects CLKDEV_LOOKUP so it's redundant to have this selected again. Cc: Tony Prisk Cc: Russell King Cc: Cc: Arnd Bergmann Signed-off-by: Stephen Boyd Reviewed-by: Arnd Bergmann Link: https://lkml.kernel.org/r/20200409064416.83340-3-sboyd@kernel.org --- arch/arm/Kconfig | 3 --- arch/arm/mach-vt8500/Kconfig | 1 - 2 files changed, 4 deletions(-) diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index 66a04f6f47753..6b56a4bc3b9f5 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig @@ -434,7 +434,6 @@ config ARCH_PXA select ARM_CPU_SUSPEND if PM select AUTO_ZRELADDR select COMMON_CLK - select CLKDEV_LOOKUP select CLKSRC_PXA select CLKSRC_MMIO select TIMER_OF @@ -473,7 +472,6 @@ config ARCH_SA1100 bool "SA1100-based" select ARCH_MTD_XIP select ARCH_SPARSEMEM_ENABLE - select CLKDEV_LOOKUP select CLKSRC_MMIO select CLKSRC_PXA select TIMER_OF if OF @@ -494,7 +492,6 @@ config ARCH_SA1100 config ARCH_S3C24XX bool "Samsung S3C24XX SoCs" select ATAGS - select CLKDEV_LOOKUP select CLKSRC_SAMSUNG_PWM select GENERIC_CLOCKEVENTS select GPIO_SAMSUNG diff --git a/arch/arm/mach-vt8500/Kconfig b/arch/arm/mach-vt8500/Kconfig index 8841199058ea8..d01cdd9ad9c7c 100644 --- a/arch/arm/mach-vt8500/Kconfig +++ b/arch/arm/mach-vt8500/Kconfig @@ -2,7 +2,6 @@ config ARCH_VT8500 bool select GPIOLIB - select CLKDEV_LOOKUP select VT8500_TIMER select PINCTRL help -- GitLab From d823836ad182c1488d3a7beb1018c5558d765bf0 Mon Sep 17 00:00:00 2001 From: Stephen Boyd Date: Wed, 8 Apr 2020 23:44:09 -0700 Subject: [PATCH 0287/1971] arm64: tegra: Remove redundant CLKDEV_LOOKUP selects The arm64 architecture selects COMMON_CLK at the toplevel ARM64 config. The COMMON_CLK config option already selects CLKDEV_LOOKUP so it's redundant to have this selected again for the Tegra specific config. Cc: Paul Walmsley Acked-by: Thierry Reding Cc: Catalin Marinas Cc: Will Deacon Cc: Cc: Arnd Bergmann Signed-off-by: Stephen Boyd Reviewed-by: Arnd Bergmann Link: https://lkml.kernel.org/r/20200409064416.83340-4-sboyd@kernel.org --- arch/arm64/Kconfig.platforms | 1 - 1 file changed, 1 deletion(-) diff --git a/arch/arm64/Kconfig.platforms b/arch/arm64/Kconfig.platforms index 55d70cfe0f9e1..0c7fcdec6d7f4 100644 --- a/arch/arm64/Kconfig.platforms +++ b/arch/arm64/Kconfig.platforms @@ -235,7 +235,6 @@ config ARCH_TEGRA bool "NVIDIA Tegra SoC Family" select ARCH_HAS_RESET_CONTROLLER select ARM_GIC_PM - select CLKDEV_LOOKUP select CLKSRC_MMIO select TIMER_OF select GENERIC_CLOCKEVENTS -- GitLab From ba76c40dbe2e1f6c2db1d8077917d18279523b81 Mon Sep 17 00:00:00 2001 From: Stephen Boyd Date: Wed, 8 Apr 2020 23:44:10 -0700 Subject: [PATCH 0288/1971] h8300: Remove redundant CLKDEV_LOOKUP selects The h8300 architecture selects COMMON_CLK already, and the COMMON_CLK config option already selects CLKDEV_LOOKUP so it's redundant to have this selected again. Cc: Yoshinori Sato Cc: uclinux-h8-devel@lists.sourceforge.jp Cc: Arnd Bergmann Signed-off-by: Stephen Boyd Reviewed-by: Arnd Bergmann Link: https://lkml.kernel.org/r/20200409064416.83340-5-sboyd@kernel.org --- arch/h8300/Kconfig | 1 - 1 file changed, 1 deletion(-) diff --git a/arch/h8300/Kconfig b/arch/h8300/Kconfig index ec800e9d5aadf..d11666d538fea 100644 --- a/arch/h8300/Kconfig +++ b/arch/h8300/Kconfig @@ -13,7 +13,6 @@ config H8300 select GENERIC_CPU_DEVICES select MODULES_USE_ELF_RELA select GENERIC_CLOCKEVENTS - select CLKDEV_LOOKUP select COMMON_CLK select ARCH_WANT_FRAME_POINTERS select OF -- GitLab From b62bc0474b9084f7de1d69adbb9bde248bbdf117 Mon Sep 17 00:00:00 2001 From: Stephen Boyd Date: Wed, 8 Apr 2020 23:44:11 -0700 Subject: [PATCH 0289/1971] MIPS: Remove redundant CLKDEV_LOOKUP selects The ATH79 config selects COMMON_CLK already, and the COMMON_CLK config option already selects CLKDEV_LOOKUP, and CLKDEV_LOOKUP already selects HAVE_CLK so it's redundant to have these selected again. Cc: Thomas Bogendoerfer Cc: Signed-off-by: Stephen Boyd Reviewed-by: Arnd Bergmann Link: https://lkml.kernel.org/r/20200409064416.83340-6-sboyd@kernel.org --- arch/mips/Kconfig | 2 -- 1 file changed, 2 deletions(-) diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig index 690718b3701af..4386bac2c7ac7 100644 --- a/arch/mips/Kconfig +++ b/arch/mips/Kconfig @@ -209,9 +209,7 @@ config ATH79 select DMA_NONCOHERENT select GPIOLIB select PINCTRL - select HAVE_CLK select COMMON_CLK - select CLKDEV_LOOKUP select IRQ_MIPS_CPU select SYS_HAS_CPU_MIPS32_R2 select SYS_HAS_EARLY_PRINTK -- GitLab From 3fd2fdb311fab622ed95568e2a90cb02fe06227c Mon Sep 17 00:00:00 2001 From: Stephen Boyd Date: Wed, 8 Apr 2020 23:44:12 -0700 Subject: [PATCH 0290/1971] mmc: meson-mx-sdio: Depend on OF_ADDRESS and not just OF Making COMMON_CLK a visible option causes the sparc allyesconfig to fail to build like so: sparc64-linux-ld: drivers/mmc/host/meson-mx-sdio.o: in function `meson_mx_mmc_remove': meson-mx-sdio.c:(.text+0x70): undefined reference to `of_platform_device_destroy' sparc64-linux-ld: drivers/mmc/host/meson-mx-sdio.o: in function `meson_mx_mmc_probe': meson-mx-sdio.c:(.text+0x9e4): undefined reference to `of_platform_device_create' sparc64-linux-ld: meson-mx-sdio.c:(.text+0xdd4): undefined reference to `of_platform_device_destroy' This is because the implementation of of_platform_device_destroy() is inside an #ifdef CONFIG_OF_ADDRESS section of drivers/of/platform.c. This driver already depends on OF being enabled, so let's tighten that constrain a little more so that it depends on OF_ADDRESS instead. This way we won't try to build this driver on platforms that don't have this function. Reported-by: kbuild test robot Cc: Neil Armstrong Cc: Ulf Hansson Signed-off-by: Stephen Boyd Link: https://lkml.kernel.org/r/20200409064416.83340-7-sboyd@kernel.org Acked-by: Ulf Hansson Reviewed-by: Arnd Bergmann --- drivers/mmc/host/Kconfig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig index 462b5352fea75..a53ecb658c5c8 100644 --- a/drivers/mmc/host/Kconfig +++ b/drivers/mmc/host/Kconfig @@ -409,7 +409,7 @@ config MMC_MESON_MX_SDIO tristate "Amlogic Meson6/Meson8/Meson8b SD/MMC Host Controller support" depends on ARCH_MESON || COMPILE_TEST depends on COMMON_CLK - depends on OF + depends on OF_ADDRESS help This selects support for the SD/MMC Host Controller on Amlogic Meson6, Meson8 and Meson8b SoCs. -- GitLab From bbd7ffdbef6888459f301c5889f3b14ada38b913 Mon Sep 17 00:00:00 2001 From: Stephen Boyd Date: Wed, 8 Apr 2020 23:44:13 -0700 Subject: [PATCH 0291/1971] clk: Allow the common clk framework to be selectable Enable build testing and configuration control of the common clk framework so that more code coverage and testing can be done on the common clk framework across various architectures. This also nicely removes the requirement that architectures must select the framework when they don't use it in architecture code. There's one snag with doing this, and that's making sure that randconfig builds don't select this option when some architecture or platform implements 'struct clk' outside of the common clk framework. Introduce a new config option 'HAVE_LEGACY_CLK' to indicate those platforms that haven't migrated to the common clk framework and therefore shouldn't be allowed to select this new config option. Also add a note that we hope one day to remove this config entirely. Based on a patch by Mark Brown . Cc: Mark Brown Cc: Geert Uytterhoeven Cc: Mark Salter Cc: Aurelien Jacquiot Cc: Jiaxun Yang Cc: Guan Xuetao Cc: Russell King Cc: Arnd Bergmann Cc: Yoshinori Sato Cc: Rich Felker Cc: Thomas Bogendoerfer Cc: Cc: Cc: Cc: Cc: Link: https://lore.kernel.org/r/1470915049-15249-1-git-send-email-broonie@kernel.org Signed-off-by: Stephen Boyd Link: https://lkml.kernel.org/r/20200409064416.83340-8-sboyd@kernel.org Reviewed-by: Mark Brown Reviewed-by: Arnd Bergmann --- arch/arm/Kconfig | 2 ++ arch/c6x/Kconfig | 1 + arch/m68k/Kconfig.cpu | 2 +- arch/mips/Kconfig | 5 +++-- arch/mips/loongson2ef/Kconfig | 2 +- arch/mips/ralink/Kconfig | 4 ++++ arch/sh/boards/Kconfig | 5 +++++ arch/unicore32/Kconfig | 2 +- drivers/clk/Kconfig | 17 +++++++++++++---- 9 files changed, 31 insertions(+), 9 deletions(-) diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index 6b56a4bc3b9f5..6e15460cf1f27 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig @@ -363,6 +363,7 @@ config ARCH_EP93XX select CPU_ARM920T select GENERIC_CLOCKEVENTS select GPIOLIB + select HAVE_LEGACY_CLK help This enables support for the Cirrus EP93xx series of CPUs. @@ -521,6 +522,7 @@ config ARCH_OMAP1 select GENERIC_IRQ_MULTI_HANDLER select GPIOLIB select HAVE_IDE + select HAVE_LEGACY_CLK select IRQ_DOMAIN select NEED_MACH_IO_H if PCCARD select NEED_MACH_MEMORY_H diff --git a/arch/c6x/Kconfig b/arch/c6x/Kconfig index e65e8d82442a2..6444ebfd06a66 100644 --- a/arch/c6x/Kconfig +++ b/arch/c6x/Kconfig @@ -11,6 +11,7 @@ config C6X select ARCH_HAS_SYNC_DMA_FOR_CPU select ARCH_HAS_SYNC_DMA_FOR_DEVICE select CLKDEV_LOOKUP + select HAVE_LEGACY_CLK select GENERIC_ATOMIC64 select GENERIC_IRQ_SHOW select HAVE_ARCH_TRACEHOOK diff --git a/arch/m68k/Kconfig.cpu b/arch/m68k/Kconfig.cpu index 60ac1cd8b96fb..bd2d29c22a105 100644 --- a/arch/m68k/Kconfig.cpu +++ b/arch/m68k/Kconfig.cpu @@ -28,7 +28,7 @@ config COLDFIRE select CPU_HAS_NO_MULDIV64 select GENERIC_CSUM select GPIOLIB - select HAVE_CLK + select HAVE_LEGACY_CLK endchoice diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig index 4386bac2c7ac7..da7530689a8b8 100644 --- a/arch/mips/Kconfig +++ b/arch/mips/Kconfig @@ -181,7 +181,7 @@ config AR7 select SYS_SUPPORTS_ZBOOT_UART16550 select GPIOLIB select VLYNQ - select HAVE_CLK + select HAVE_LEGACY_CLK help Support for the Texas Instruments AR7 System-on-a-Chip family: TNETD7100, 7200 and 7300. @@ -296,9 +296,9 @@ config BCM63XX select SYS_HAS_EARLY_PRINTK select SWAP_IO_SPACE select GPIOLIB - select HAVE_CLK select MIPS_L1_CACHE_SHIFT_4 select CLKDEV_LOOKUP + select HAVE_LEGACY_CLK help Support for BCM63XX based boards @@ -419,6 +419,7 @@ config LANTIQ select SWAP_IO_SPACE select BOOT_RAW select CLKDEV_LOOKUP + select HAVE_LEGACY_CLK select USE_OF select PINCTRL select PINCTRL_LANTIQ diff --git a/arch/mips/loongson2ef/Kconfig b/arch/mips/loongson2ef/Kconfig index 595dd48e1e4db..c9ec43afde73e 100644 --- a/arch/mips/loongson2ef/Kconfig +++ b/arch/mips/loongson2ef/Kconfig @@ -46,7 +46,7 @@ config LEMOTE_MACH2F select CSRC_R4K if ! MIPS_EXTERNAL_TIMER select DMA_NONCOHERENT select GENERIC_ISA_DMA_SUPPORT_BROKEN - select HAVE_CLK + select HAVE_LEGACY_CLK select FORCE_PCI select I8259 select IRQ_MIPS_CPU diff --git a/arch/mips/ralink/Kconfig b/arch/mips/ralink/Kconfig index 35c2ebd8f0945..c10d8b233ab15 100644 --- a/arch/mips/ralink/Kconfig +++ b/arch/mips/ralink/Kconfig @@ -27,18 +27,22 @@ choice config SOC_RT288X bool "RT288x" select MIPS_L1_CACHE_SHIFT_4 + select HAVE_LEGACY_CLK select HAVE_PCI config SOC_RT305X bool "RT305x" + select HAVE_LEGACY_CLK config SOC_RT3883 bool "RT3883" + select HAVE_LEGACY_CLK select HAVE_PCI config SOC_MT7620 bool "MT7620/8" select CPU_MIPSR2_IRQ_VI + select HAVE_LEGACY_CLK select HAVE_PCI config SOC_MT7621 diff --git a/arch/sh/boards/Kconfig b/arch/sh/boards/Kconfig index cee24c308337c..fb0ca0c1efe11 100644 --- a/arch/sh/boards/Kconfig +++ b/arch/sh/boards/Kconfig @@ -7,6 +7,11 @@ config SOLUTION_ENGINE config SH_ALPHA_BOARD bool +config SH_CUSTOM_CLK + def_bool y + depends on !SH_DEVICE_TREE + select HAVE_LEGACY_CLK + config SH_DEVICE_TREE bool select OF diff --git a/arch/unicore32/Kconfig b/arch/unicore32/Kconfig index 41fe944005f81..11ba1839d198e 100644 --- a/arch/unicore32/Kconfig +++ b/arch/unicore32/Kconfig @@ -70,7 +70,7 @@ config ARCH_PUV3 def_bool y select CPU_UCV2 select GENERIC_CLOCKEVENTS - select HAVE_CLK + select HAVE_LEGACY_CLK select GPIOLIB # CONFIGs for ARCH_PUV3 diff --git a/drivers/clk/Kconfig b/drivers/clk/Kconfig index bcb257baed06d..890bed62196db 100644 --- a/drivers/clk/Kconfig +++ b/drivers/clk/Kconfig @@ -7,8 +7,18 @@ config CLKDEV_LOOKUP config HAVE_CLK_PREPARE bool -config COMMON_CLK +config HAVE_LEGACY_CLK # TODO: Remove once all legacy users are migrated bool + select HAVE_CLK + help + Select this option when the clock API in is implemented + by platform/architecture code. This method is deprecated. Modern + code should select COMMON_CLK instead and not define a custom + 'struct clk'. + +menuconfig COMMON_CLK + bool "Common Clock Framework" + depends on !HAVE_LEGACY_CLK select HAVE_CLK_PREPARE select CLKDEV_LOOKUP select SRCU @@ -20,8 +30,7 @@ config COMMON_CLK Architectures utilizing the common struct clk should select this option. -menu "Common Clock Framework" - depends on COMMON_CLK +if COMMON_CLK config COMMON_CLK_WM831X tristate "Clock driver for WM831x/2x PMICs" @@ -362,4 +371,4 @@ source "drivers/clk/ti/Kconfig" source "drivers/clk/uniphier/Kconfig" source "drivers/clk/zynqmp/Kconfig" -endmenu +endif -- GitLab From 5099a722e9727fe9a93fac51e961735f40e5b6c8 Mon Sep 17 00:00:00 2001 From: Marco Elver Date: Wed, 1 Apr 2020 12:17:14 +0200 Subject: [PATCH 0292/1971] checkpatch: Warn about data_race() without comment Warn about applications of data_race() without a comment, to encourage documenting the reasoning behind why it was deemed safe. Suggested-by: Will Deacon Signed-off-by: Marco Elver Signed-off-by: Paul E. McKenney --- scripts/checkpatch.pl | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/scripts/checkpatch.pl b/scripts/checkpatch.pl index a63380c6b0d20..48bb9508e3009 100755 --- a/scripts/checkpatch.pl +++ b/scripts/checkpatch.pl @@ -5833,6 +5833,14 @@ sub process { } } +# check for data_race without a comment. + if ($line =~ /\bdata_race\s*\(/) { + if (!ctx_has_comment($first_line, $linenr)) { + WARN("DATA_RACE", + "data_race without comment\n" . $herecurr); + } + } + # check for smp_read_barrier_depends and read_barrier_depends if (!$file && $line =~ /\b(smp_|)read_barrier_depends\s*\(/) { WARN("READ_BARRIER_DEPENDS", -- GitLab From 19acd03d95dad1f50d06f28179a1866fca431fed Mon Sep 17 00:00:00 2001 From: Marco Elver Date: Fri, 24 Apr 2020 17:47:29 +0200 Subject: [PATCH 0293/1971] kcsan: Add __kcsan_{enable,disable}_current() variants The __kcsan_{enable,disable}_current() variants only call into KCSAN if KCSAN is enabled for the current compilation unit. Note: This is typically not what we want, as we usually want to ensure that even calls into other functions still have KCSAN disabled. These variants may safely be used in header files that are shared between regular kernel code and code that does not link the KCSAN runtime. Signed-off-by: Marco Elver Signed-off-by: Paul E. McKenney --- include/linux/kcsan-checks.h | 17 ++++++++++++++--- kernel/kcsan/core.c | 7 +++++++ 2 files changed, 21 insertions(+), 3 deletions(-) diff --git a/include/linux/kcsan-checks.h b/include/linux/kcsan-checks.h index ef95ddc491825..7b0b9c44f5f38 100644 --- a/include/linux/kcsan-checks.h +++ b/include/linux/kcsan-checks.h @@ -49,6 +49,7 @@ void kcsan_disable_current(void); * Supports nesting. */ void kcsan_enable_current(void); +void kcsan_enable_current_nowarn(void); /* Safe in uaccess regions. */ /** * kcsan_nestable_atomic_begin - begin nestable atomic region @@ -149,6 +150,7 @@ static inline void __kcsan_check_access(const volatile void *ptr, size_t size, static inline void kcsan_disable_current(void) { } static inline void kcsan_enable_current(void) { } +static inline void kcsan_enable_current_nowarn(void) { } static inline void kcsan_nestable_atomic_begin(void) { } static inline void kcsan_nestable_atomic_end(void) { } static inline void kcsan_flat_atomic_begin(void) { } @@ -165,15 +167,24 @@ static inline void kcsan_end_scoped_access(struct kcsan_scoped_access *sa) { } #endif /* CONFIG_KCSAN */ +#ifdef __SANITIZE_THREAD__ /* - * kcsan_*: Only calls into the runtime when the particular compilation unit has - * KCSAN instrumentation enabled. May be used in header files. + * Only calls into the runtime when the particular compilation unit has KCSAN + * instrumentation enabled. May be used in header files. */ -#ifdef __SANITIZE_THREAD__ #define kcsan_check_access __kcsan_check_access + +/* + * Only use these to disable KCSAN for accesses in the current compilation unit; + * calls into libraries may still perform KCSAN checks. + */ +#define __kcsan_disable_current kcsan_disable_current +#define __kcsan_enable_current kcsan_enable_current_nowarn #else static inline void kcsan_check_access(const volatile void *ptr, size_t size, int type) { } +static inline void __kcsan_enable_current(void) { } +static inline void __kcsan_disable_current(void) { } #endif /** diff --git a/kernel/kcsan/core.c b/kernel/kcsan/core.c index a572aae61b98d..a73a66cf79df6 100644 --- a/kernel/kcsan/core.c +++ b/kernel/kcsan/core.c @@ -625,6 +625,13 @@ void kcsan_enable_current(void) } EXPORT_SYMBOL(kcsan_enable_current); +void kcsan_enable_current_nowarn(void) +{ + if (get_ctx()->disable_count-- == 0) + kcsan_disable_current(); +} +EXPORT_SYMBOL(kcsan_enable_current_nowarn); + void kcsan_nestable_atomic_begin(void) { /* -- GitLab From ace7ade4f56cab0ea19d3665d7c14d713788c13d Mon Sep 17 00:00:00 2001 From: "J. Bruce Fields" Date: Mon, 13 Apr 2020 21:37:19 -0400 Subject: [PATCH 0294/1971] nfsd4: common stateid-printing code There's a problem with how I'm formatting stateids. Before I fix it, I'd like to move the stateid formatting into a common helper. Signed-off-by: J. Bruce Fields --- fs/nfsd/nfs4state.c | 21 +++++++++++++++++---- 1 file changed, 17 insertions(+), 4 deletions(-) diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c index c107caa565254..ded08aeae4979 100644 --- a/fs/nfsd/nfs4state.c +++ b/fs/nfsd/nfs4state.c @@ -2422,6 +2422,11 @@ static void nfs4_show_owner(struct seq_file *s, struct nfs4_stateowner *oo) seq_quote_mem(s, oo->so_owner.data, oo->so_owner.len); } +static void nfs4_show_stateid(struct seq_file *s, stateid_t *stid) +{ + seq_printf(s, "0x%16phN", stid); +} + static int nfs4_show_open(struct seq_file *s, struct nfs4_stid *st) { struct nfs4_ol_stateid *ols; @@ -2437,7 +2442,9 @@ static int nfs4_show_open(struct seq_file *s, struct nfs4_stid *st) nf = st->sc_file; file = find_any_file(nf); - seq_printf(s, "- 0x%16phN: { type: open, ", &st->sc_stateid); + seq_printf(s, "- "); + nfs4_show_stateid(s, &st->sc_stateid); + seq_printf(s, ": { type: open, "); access = bmap_to_share_mode(ols->st_access_bmap); deny = bmap_to_share_mode(ols->st_deny_bmap); @@ -2470,7 +2477,9 @@ static int nfs4_show_lock(struct seq_file *s, struct nfs4_stid *st) nf = st->sc_file; file = find_any_file(nf); - seq_printf(s, "- 0x%16phN: { type: lock, ", &st->sc_stateid); + seq_printf(s, "- "); + nfs4_show_stateid(s, &st->sc_stateid); + seq_printf(s, ": { type: lock, "); /* * Note: a lock stateid isn't really the same thing as a lock, @@ -2499,7 +2508,9 @@ static int nfs4_show_deleg(struct seq_file *s, struct nfs4_stid *st) nf = st->sc_file; file = nf->fi_deleg_file; - seq_printf(s, "- 0x%16phN: { type: deleg, ", &st->sc_stateid); + seq_printf(s, "- "); + nfs4_show_stateid(s, &st->sc_stateid); + seq_printf(s, ": { type: deleg, "); /* Kinda dead code as long as we only support read delegs: */ seq_printf(s, "access: %s, ", @@ -2521,7 +2532,9 @@ static int nfs4_show_layout(struct seq_file *s, struct nfs4_stid *st) ls = container_of(st, struct nfs4_layout_stateid, ls_stid); file = ls->ls_file; - seq_printf(s, "- 0x%16phN: { type: layout, ", &st->sc_stateid); + seq_printf(s, "- "); + nfs4_show_stateid(s, &st->sc_stateid); + seq_printf(s, ": { type: layout, "); /* XXX: What else would be useful? */ -- GitLab From ee590d259784806c32ad0523aa6a9c321dbf96a4 Mon Sep 17 00:00:00 2001 From: "J. Bruce Fields" Date: Mon, 13 Apr 2020 22:03:06 -0400 Subject: [PATCH 0295/1971] nfsd4: stid display should preserve on-the-wire byte order When we decode the stateid we byte-swap si_generation. But for simplicity's sake and ease of comparison with network traces, it's better to display the whole thing in network order. Reported-by: Scott Mayhew Signed-off-by: J. Bruce Fields --- fs/nfsd/nfs4state.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c index ded08aeae4979..6fed3bf00ca78 100644 --- a/fs/nfsd/nfs4state.c +++ b/fs/nfsd/nfs4state.c @@ -2424,7 +2424,8 @@ static void nfs4_show_owner(struct seq_file *s, struct nfs4_stateowner *oo) static void nfs4_show_stateid(struct seq_file *s, stateid_t *stid) { - seq_printf(s, "0x%16phN", stid); + seq_printf(s, "0x%.8x", stid->si_generation); + seq_printf(s, "%12phN", &stid->si_opaque); } static int nfs4_show_open(struct seq_file *s, struct nfs4_stid *st) -- GitLab From 580da465a0328d597df0087800717814a62da638 Mon Sep 17 00:00:00 2001 From: Achilles Gaikwad Date: Mon, 20 Apr 2020 18:20:31 +0530 Subject: [PATCH 0296/1971] nfsd4: add filename to states output Add filename to states output for ease of debugging. Signed-off-by: Achilles Gaikwad Signed-off-by: Kenneth Dsouza Signed-off-by: J. Bruce Fields --- fs/nfsd/nfs4state.c | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c index 6fed3bf00ca78..2c8567b32990e 100644 --- a/fs/nfsd/nfs4state.c +++ b/fs/nfsd/nfs4state.c @@ -2406,6 +2406,11 @@ static void states_stop(struct seq_file *s, void *v) spin_unlock(&clp->cl_lock); } +static void nfs4_show_fname(struct seq_file *s, struct nfsd_file *f) +{ + seq_printf(s, "filename: \"%pD2\"", f->nf_file); +} + static void nfs4_show_superblock(struct seq_file *s, struct nfsd_file *f) { struct inode *inode = f->nf_inode; @@ -2459,6 +2464,8 @@ static int nfs4_show_open(struct seq_file *s, struct nfs4_stid *st) nfs4_show_superblock(s, file); seq_printf(s, ", "); + nfs4_show_fname(s, file); + seq_printf(s, ", "); nfs4_show_owner(s, oo); seq_printf(s, " }\n"); nfsd_file_put(file); @@ -2492,6 +2499,8 @@ static int nfs4_show_lock(struct seq_file *s, struct nfs4_stid *st) nfs4_show_superblock(s, file); /* XXX: open stateid? */ seq_printf(s, ", "); + nfs4_show_fname(s, file); + seq_printf(s, ", "); nfs4_show_owner(s, oo); seq_printf(s, " }\n"); nfsd_file_put(file); @@ -2520,6 +2529,8 @@ static int nfs4_show_deleg(struct seq_file *s, struct nfs4_stid *st) /* XXX: lease time, whether it's being recalled. */ nfs4_show_superblock(s, file); + seq_printf(s, ", "); + nfs4_show_fname(s, file); seq_printf(s, " }\n"); return 0; @@ -2540,6 +2551,8 @@ static int nfs4_show_layout(struct seq_file *s, struct nfs4_stid *st) /* XXX: What else would be useful? */ nfs4_show_superblock(s, file); + seq_printf(s, ", "); + nfs4_show_fname(s, file); seq_printf(s, " }\n"); return 0; -- GitLab From c2d715a1af75443631b05a49edbd152aef7e8b93 Mon Sep 17 00:00:00 2001 From: "J. Bruce Fields" Date: Mon, 27 Apr 2020 17:59:01 -0400 Subject: [PATCH 0297/1971] nfsd: handle repeated BIND_CONN_TO_SESSION If the client attempts BIND_CONN_TO_SESSION on an already bound connection, it should be either a no-op or an error. Signed-off-by: J. Bruce Fields --- fs/nfsd/nfs4state.c | 54 +++++++++++++++++++++++++++++++++++---------- 1 file changed, 42 insertions(+), 12 deletions(-) diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c index 2c8567b32990e..fe88f5f23cf46 100644 --- a/fs/nfsd/nfs4state.c +++ b/fs/nfsd/nfs4state.c @@ -3485,6 +3485,45 @@ __be32 nfsd4_backchannel_ctl(struct svc_rqst *rqstp, return nfs_ok; } +static struct nfsd4_conn *__nfsd4_find_conn(struct svc_xprt *xpt, struct nfsd4_session *s) +{ + struct nfsd4_conn *c; + + list_for_each_entry(c, &s->se_conns, cn_persession) { + if (c->cn_xprt == xpt) { + return c; + } + } + return NULL; +} + +static __be32 nfsd4_match_existing_connection(struct svc_rqst *rqst, + struct nfsd4_session *session, u32 req) +{ + struct nfs4_client *clp = session->se_client; + struct svc_xprt *xpt = rqst->rq_xprt; + struct nfsd4_conn *c; + __be32 status; + + /* Following the last paragraph of RFC 5661 Section 18.34.3: */ + spin_lock(&clp->cl_lock); + c = __nfsd4_find_conn(xpt, session); + if (!c) + status = nfserr_noent; + else if (req == c->cn_flags) + status = nfs_ok; + else if (req == NFS4_CDFC4_FORE_OR_BOTH && + c->cn_flags != NFS4_CDFC4_BACK) + status = nfs_ok; + else if (req == NFS4_CDFC4_BACK_OR_BOTH && + c->cn_flags != NFS4_CDFC4_FORE) + status = nfs_ok; + else + status = nfserr_inval; + spin_unlock(&clp->cl_lock); + return status; +} + __be32 nfsd4_bind_conn_to_session(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, union nfsd4_op_u *u) @@ -3506,6 +3545,9 @@ __be32 nfsd4_bind_conn_to_session(struct svc_rqst *rqstp, status = nfserr_wrong_cred; if (!nfsd4_mach_creds_match(session->se_client, rqstp)) goto out; + status = nfsd4_match_existing_connection(rqstp, session, bcts->dir); + if (status == nfs_ok || status == nfserr_inval) + goto out; status = nfsd4_map_bcts_dir(&bcts->dir); if (status) goto out; @@ -3571,18 +3613,6 @@ out: return status; } -static struct nfsd4_conn *__nfsd4_find_conn(struct svc_xprt *xpt, struct nfsd4_session *s) -{ - struct nfsd4_conn *c; - - list_for_each_entry(c, &s->se_conns, cn_persession) { - if (c->cn_xprt == xpt) { - return c; - } - } - return NULL; -} - static __be32 nfsd4_sequence_check_conn(struct nfsd4_conn *new, struct nfsd4_session *ses) { struct nfs4_client *clp = ses->se_client; -- GitLab From 31fb4bf545e230f2c7db7e64cf012c67a8d374df Mon Sep 17 00:00:00 2001 From: YueHaibing Date: Tue, 5 May 2020 16:45:37 +0800 Subject: [PATCH 0298/1971] sunrpc: Remove unused function ip_map_update commit 49b28684fdba ("nfsd: Remove deprecated nfsctl system call and related code.") left behind this, remove it. Signed-off-by: YueHaibing Signed-off-by: J. Bruce Fields --- net/sunrpc/svcauth_unix.c | 9 --------- 1 file changed, 9 deletions(-) diff --git a/net/sunrpc/svcauth_unix.c b/net/sunrpc/svcauth_unix.c index 6c8f802c42612..97c0bddba7a30 100644 --- a/net/sunrpc/svcauth_unix.c +++ b/net/sunrpc/svcauth_unix.c @@ -332,15 +332,6 @@ static int __ip_map_update(struct cache_detail *cd, struct ip_map *ipm, return 0; } -static inline int ip_map_update(struct net *net, struct ip_map *ipm, - struct unix_domain *udom, time64_t expiry) -{ - struct sunrpc_net *sn; - - sn = net_generic(net, sunrpc_net_id); - return __ip_map_update(sn->ip_map_cache, ipm, udom, expiry); -} - void svcauth_unix_purge(struct net *net) { struct sunrpc_net *sn; -- GitLab From 50a19ad4b1ec531eb550183cb5d4ab9f25a56bf8 Mon Sep 17 00:00:00 2001 From: Marco Elver Date: Fri, 24 Apr 2020 17:47:30 +0200 Subject: [PATCH 0299/1971] objtool, kcsan: Add kcsan_disable_current() and kcsan_enable_current_nowarn() Both are safe to be called from uaccess contexts. Signed-off-by: Marco Elver Signed-off-by: Paul E. McKenney --- tools/objtool/check.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tools/objtool/check.c b/tools/objtool/check.c index b6a573d56f2ef..9122c20f5298d 100644 --- a/tools/objtool/check.c +++ b/tools/objtool/check.c @@ -472,6 +472,8 @@ static const char *uaccess_safe_builtin[] = { "kcsan_found_watchpoint", "kcsan_setup_watchpoint", "kcsan_check_scoped_accesses", + "kcsan_disable_current", + "kcsan_enable_current_nowarn", /* KCSAN/TSAN */ "__tsan_func_entry", "__tsan_func_exit", -- GitLab From 2cabeaf151294eceaa0f0e0fab2fe9fe66868869 Mon Sep 17 00:00:00 2001 From: Mathew King Date: Mon, 4 May 2020 14:29:27 -0600 Subject: [PATCH 0300/1971] power: supply: core: Cleanup power supply sysfs attribute list Make the device attribute list used to create sysfs attributes more robust by decoupling the list order from order of the enum defined in power_supply.h. This is done by using a designated initializer in the POWER_SUPPLY_ATTR macro. Signed-off-by: Mathew King Signed-off-by: Sebastian Reichel --- drivers/power/supply/power_supply_sysfs.c | 260 +++++++++++----------- 1 file changed, 132 insertions(+), 128 deletions(-) diff --git a/drivers/power/supply/power_supply_sysfs.c b/drivers/power/supply/power_supply_sysfs.c index 51de3f47b25db..cfc2550f494c5 100644 --- a/drivers/power/supply/power_supply_sysfs.c +++ b/drivers/power/supply/power_supply_sysfs.c @@ -18,26 +18,20 @@ #include "power_supply.h" -/* - * This is because the name "current" breaks the device attr macro. - * The "current" word resolves to "(get_current())" so instead of - * "current" "(get_current())" appears in the sysfs. - * - * The source of this definition is the device.h which calls __ATTR - * macro in sysfs.h which calls the __stringify macro. - * - * Only modification that the name is not tried to be resolved - * (as a macro let's say). - */ +#define MAX_PROP_NAME_LEN 30 -#define POWER_SUPPLY_ATTR(_name) \ -{ \ - .attr = { .name = #_name }, \ - .show = power_supply_show_property, \ - .store = power_supply_store_property, \ -} +struct power_supply_attr { + const char *prop_name; + char attr_name[MAX_PROP_NAME_LEN + 1]; + struct device_attribute dev_attr; +}; -static struct device_attribute power_supply_attrs[]; +#define POWER_SUPPLY_ATTR(_name) \ +[POWER_SUPPLY_PROP_ ## _name] = \ +{ \ + .prop_name = #_name, \ + .attr_name = #_name "\0", \ +} static const char * const power_supply_type_text[] = { "Unknown", "Battery", "UPS", "Mains", "USB", @@ -77,6 +71,91 @@ static const char * const power_supply_scope_text[] = { "Unknown", "System", "Device" }; +static struct power_supply_attr power_supply_attrs[] = { + /* Properties of type `int' */ + POWER_SUPPLY_ATTR(STATUS), + POWER_SUPPLY_ATTR(CHARGE_TYPE), + POWER_SUPPLY_ATTR(HEALTH), + POWER_SUPPLY_ATTR(PRESENT), + POWER_SUPPLY_ATTR(ONLINE), + POWER_SUPPLY_ATTR(AUTHENTIC), + POWER_SUPPLY_ATTR(TECHNOLOGY), + POWER_SUPPLY_ATTR(CYCLE_COUNT), + POWER_SUPPLY_ATTR(VOLTAGE_MAX), + POWER_SUPPLY_ATTR(VOLTAGE_MIN), + POWER_SUPPLY_ATTR(VOLTAGE_MAX_DESIGN), + POWER_SUPPLY_ATTR(VOLTAGE_MIN_DESIGN), + POWER_SUPPLY_ATTR(VOLTAGE_NOW), + POWER_SUPPLY_ATTR(VOLTAGE_AVG), + POWER_SUPPLY_ATTR(VOLTAGE_OCV), + POWER_SUPPLY_ATTR(VOLTAGE_BOOT), + POWER_SUPPLY_ATTR(CURRENT_MAX), + POWER_SUPPLY_ATTR(CURRENT_NOW), + POWER_SUPPLY_ATTR(CURRENT_AVG), + POWER_SUPPLY_ATTR(CURRENT_BOOT), + POWER_SUPPLY_ATTR(POWER_NOW), + POWER_SUPPLY_ATTR(POWER_AVG), + POWER_SUPPLY_ATTR(CHARGE_FULL_DESIGN), + POWER_SUPPLY_ATTR(CHARGE_EMPTY_DESIGN), + POWER_SUPPLY_ATTR(CHARGE_FULL), + POWER_SUPPLY_ATTR(CHARGE_EMPTY), + POWER_SUPPLY_ATTR(CHARGE_NOW), + POWER_SUPPLY_ATTR(CHARGE_AVG), + POWER_SUPPLY_ATTR(CHARGE_COUNTER), + POWER_SUPPLY_ATTR(CONSTANT_CHARGE_CURRENT), + POWER_SUPPLY_ATTR(CONSTANT_CHARGE_CURRENT_MAX), + POWER_SUPPLY_ATTR(CONSTANT_CHARGE_VOLTAGE), + POWER_SUPPLY_ATTR(CONSTANT_CHARGE_VOLTAGE_MAX), + POWER_SUPPLY_ATTR(CHARGE_CONTROL_LIMIT), + POWER_SUPPLY_ATTR(CHARGE_CONTROL_LIMIT_MAX), + POWER_SUPPLY_ATTR(CHARGE_CONTROL_START_THRESHOLD), + POWER_SUPPLY_ATTR(CHARGE_CONTROL_END_THRESHOLD), + POWER_SUPPLY_ATTR(INPUT_CURRENT_LIMIT), + POWER_SUPPLY_ATTR(INPUT_VOLTAGE_LIMIT), + POWER_SUPPLY_ATTR(INPUT_POWER_LIMIT), + POWER_SUPPLY_ATTR(ENERGY_FULL_DESIGN), + POWER_SUPPLY_ATTR(ENERGY_EMPTY_DESIGN), + POWER_SUPPLY_ATTR(ENERGY_FULL), + POWER_SUPPLY_ATTR(ENERGY_EMPTY), + POWER_SUPPLY_ATTR(ENERGY_NOW), + POWER_SUPPLY_ATTR(ENERGY_AVG), + POWER_SUPPLY_ATTR(CAPACITY), + POWER_SUPPLY_ATTR(CAPACITY_ALERT_MIN), + POWER_SUPPLY_ATTR(CAPACITY_ALERT_MAX), + POWER_SUPPLY_ATTR(CAPACITY_LEVEL), + POWER_SUPPLY_ATTR(TEMP), + POWER_SUPPLY_ATTR(TEMP_MAX), + POWER_SUPPLY_ATTR(TEMP_MIN), + POWER_SUPPLY_ATTR(TEMP_ALERT_MIN), + POWER_SUPPLY_ATTR(TEMP_ALERT_MAX), + POWER_SUPPLY_ATTR(TEMP_AMBIENT), + POWER_SUPPLY_ATTR(TEMP_AMBIENT_ALERT_MIN), + POWER_SUPPLY_ATTR(TEMP_AMBIENT_ALERT_MAX), + POWER_SUPPLY_ATTR(TIME_TO_EMPTY_NOW), + POWER_SUPPLY_ATTR(TIME_TO_EMPTY_AVG), + POWER_SUPPLY_ATTR(TIME_TO_FULL_NOW), + POWER_SUPPLY_ATTR(TIME_TO_FULL_AVG), + POWER_SUPPLY_ATTR(TYPE), + POWER_SUPPLY_ATTR(USB_TYPE), + POWER_SUPPLY_ATTR(SCOPE), + POWER_SUPPLY_ATTR(PRECHARGE_CURRENT), + POWER_SUPPLY_ATTR(CHARGE_TERM_CURRENT), + POWER_SUPPLY_ATTR(CALIBRATE), + /* Properties of type `const char *' */ + POWER_SUPPLY_ATTR(MODEL_NAME), + POWER_SUPPLY_ATTR(MANUFACTURER), + POWER_SUPPLY_ATTR(SERIAL_NUMBER), +}; + +static struct attribute * +__power_supply_attrs[ARRAY_SIZE(power_supply_attrs) + 1]; + +static enum power_supply_property dev_attr_psp(struct device_attribute *attr) +{ + return container_of(attr, struct power_supply_attr, dev_attr) - + power_supply_attrs; +} + static ssize_t power_supply_show_usb_type(struct device *dev, const struct power_supply_desc *desc, union power_supply_propval *value, @@ -116,7 +195,7 @@ static ssize_t power_supply_show_property(struct device *dev, char *buf) { ssize_t ret; struct power_supply *psy = dev_get_drvdata(dev); - enum power_supply_property psp = attr - power_supply_attrs; + enum power_supply_property psp = dev_attr_psp(attr); union power_supply_propval value; if (psp == POWER_SUPPLY_PROP_TYPE) { @@ -184,7 +263,7 @@ static ssize_t power_supply_store_property(struct device *dev, const char *buf, size_t count) { ssize_t ret; struct power_supply *psy = dev_get_drvdata(dev); - enum power_supply_property psp = attr - power_supply_attrs; + enum power_supply_property psp = dev_attr_psp(attr); union power_supply_propval value; switch (psp) { @@ -233,86 +312,6 @@ static ssize_t power_supply_store_property(struct device *dev, return count; } -/* Must be in the same order as POWER_SUPPLY_PROP_* */ -static struct device_attribute power_supply_attrs[] = { - /* Properties of type `int' */ - POWER_SUPPLY_ATTR(status), - POWER_SUPPLY_ATTR(charge_type), - POWER_SUPPLY_ATTR(health), - POWER_SUPPLY_ATTR(present), - POWER_SUPPLY_ATTR(online), - POWER_SUPPLY_ATTR(authentic), - POWER_SUPPLY_ATTR(technology), - POWER_SUPPLY_ATTR(cycle_count), - POWER_SUPPLY_ATTR(voltage_max), - POWER_SUPPLY_ATTR(voltage_min), - POWER_SUPPLY_ATTR(voltage_max_design), - POWER_SUPPLY_ATTR(voltage_min_design), - POWER_SUPPLY_ATTR(voltage_now), - POWER_SUPPLY_ATTR(voltage_avg), - POWER_SUPPLY_ATTR(voltage_ocv), - POWER_SUPPLY_ATTR(voltage_boot), - POWER_SUPPLY_ATTR(current_max), - POWER_SUPPLY_ATTR(current_now), - POWER_SUPPLY_ATTR(current_avg), - POWER_SUPPLY_ATTR(current_boot), - POWER_SUPPLY_ATTR(power_now), - POWER_SUPPLY_ATTR(power_avg), - POWER_SUPPLY_ATTR(charge_full_design), - POWER_SUPPLY_ATTR(charge_empty_design), - POWER_SUPPLY_ATTR(charge_full), - POWER_SUPPLY_ATTR(charge_empty), - POWER_SUPPLY_ATTR(charge_now), - POWER_SUPPLY_ATTR(charge_avg), - POWER_SUPPLY_ATTR(charge_counter), - POWER_SUPPLY_ATTR(constant_charge_current), - POWER_SUPPLY_ATTR(constant_charge_current_max), - POWER_SUPPLY_ATTR(constant_charge_voltage), - POWER_SUPPLY_ATTR(constant_charge_voltage_max), - POWER_SUPPLY_ATTR(charge_control_limit), - POWER_SUPPLY_ATTR(charge_control_limit_max), - POWER_SUPPLY_ATTR(charge_control_start_threshold), - POWER_SUPPLY_ATTR(charge_control_end_threshold), - POWER_SUPPLY_ATTR(input_current_limit), - POWER_SUPPLY_ATTR(input_voltage_limit), - POWER_SUPPLY_ATTR(input_power_limit), - POWER_SUPPLY_ATTR(energy_full_design), - POWER_SUPPLY_ATTR(energy_empty_design), - POWER_SUPPLY_ATTR(energy_full), - POWER_SUPPLY_ATTR(energy_empty), - POWER_SUPPLY_ATTR(energy_now), - POWER_SUPPLY_ATTR(energy_avg), - POWER_SUPPLY_ATTR(capacity), - POWER_SUPPLY_ATTR(capacity_alert_min), - POWER_SUPPLY_ATTR(capacity_alert_max), - POWER_SUPPLY_ATTR(capacity_level), - POWER_SUPPLY_ATTR(temp), - POWER_SUPPLY_ATTR(temp_max), - POWER_SUPPLY_ATTR(temp_min), - POWER_SUPPLY_ATTR(temp_alert_min), - POWER_SUPPLY_ATTR(temp_alert_max), - POWER_SUPPLY_ATTR(temp_ambient), - POWER_SUPPLY_ATTR(temp_ambient_alert_min), - POWER_SUPPLY_ATTR(temp_ambient_alert_max), - POWER_SUPPLY_ATTR(time_to_empty_now), - POWER_SUPPLY_ATTR(time_to_empty_avg), - POWER_SUPPLY_ATTR(time_to_full_now), - POWER_SUPPLY_ATTR(time_to_full_avg), - POWER_SUPPLY_ATTR(type), - POWER_SUPPLY_ATTR(usb_type), - POWER_SUPPLY_ATTR(scope), - POWER_SUPPLY_ATTR(precharge_current), - POWER_SUPPLY_ATTR(charge_term_current), - POWER_SUPPLY_ATTR(calibrate), - /* Properties of type `const char *' */ - POWER_SUPPLY_ATTR(model_name), - POWER_SUPPLY_ATTR(manufacturer), - POWER_SUPPLY_ATTR(serial_number), -}; - -static struct attribute * -__power_supply_attrs[ARRAY_SIZE(power_supply_attrs) + 1]; - static umode_t power_supply_attr_is_visible(struct kobject *kobj, struct attribute *attr, int attrno) @@ -322,6 +321,9 @@ static umode_t power_supply_attr_is_visible(struct kobject *kobj, umode_t mode = S_IRUSR | S_IRGRP | S_IROTH; int i; + if (!power_supply_attrs[attrno].prop_name) + return 0; + if (attrno == POWER_SUPPLY_PROP_TYPE) return mode; @@ -350,31 +352,38 @@ static const struct attribute_group *power_supply_attr_groups[] = { NULL, }; -void power_supply_init_attrs(struct device_type *dev_type) +static void str_to_lower(char *str) { - int i; - - dev_type->groups = power_supply_attr_groups; - - for (i = 0; i < ARRAY_SIZE(power_supply_attrs); i++) - __power_supply_attrs[i] = &power_supply_attrs[i].attr; + while (*str) { + *str = tolower(*str); + str++; + } } -static char *kstruprdup(const char *str, gfp_t gfp) +void power_supply_init_attrs(struct device_type *dev_type) { - char *ret, *ustr; + int i; - ustr = ret = kmalloc(strlen(str) + 1, gfp); + dev_type->groups = power_supply_attr_groups; - if (!ret) - return NULL; + for (i = 0; i < ARRAY_SIZE(power_supply_attrs); i++) { + struct device_attribute *attr; - while (*str) - *ustr++ = toupper(*str++); + if (!power_supply_attrs[i].prop_name) { + pr_warn("%s: Property %d skipped because is is missing from power_supply_attrs\n", + __func__, i); + sprintf(power_supply_attrs[i].attr_name, "_err_%d", i); + } else { + str_to_lower(power_supply_attrs[i].attr_name); + } - *ustr = 0; + attr = &power_supply_attrs[i].dev_attr; - return ret; + attr->attr.name = power_supply_attrs[i].attr_name; + attr->show = power_supply_show_property; + attr->store = power_supply_store_property; + __power_supply_attrs[i] = &attr->attr; + } } int power_supply_uevent(struct device *dev, struct kobj_uevent_env *env) @@ -382,7 +391,6 @@ int power_supply_uevent(struct device *dev, struct kobj_uevent_env *env) struct power_supply *psy = dev_get_drvdata(dev); int ret = 0, j; char *prop_buf; - char *attrname; if (!psy || !psy->desc) { dev_dbg(dev, "No power supply yet\n"); @@ -398,12 +406,14 @@ int power_supply_uevent(struct device *dev, struct kobj_uevent_env *env) return -ENOMEM; for (j = 0; j < psy->desc->num_properties; j++) { - struct device_attribute *attr; + struct power_supply_attr *pwr_attr; + struct device_attribute *dev_attr; char *line; - attr = &power_supply_attrs[psy->desc->properties[j]]; + pwr_attr = &power_supply_attrs[psy->desc->properties[j]]; + dev_attr = &pwr_attr->dev_attr; - ret = power_supply_show_property(dev, attr, prop_buf); + ret = power_supply_show_property(dev, dev_attr, prop_buf); if (ret == -ENODEV || ret == -ENODATA) { /* When a battery is absent, we expect -ENODEV. Don't abort; send the uevent with at least the the PRESENT=0 property */ @@ -418,14 +428,8 @@ int power_supply_uevent(struct device *dev, struct kobj_uevent_env *env) if (line) *line = 0; - attrname = kstruprdup(attr->attr.name, GFP_KERNEL); - if (!attrname) { - ret = -ENOMEM; - goto out; - } - - ret = add_uevent_var(env, "POWER_SUPPLY_%s=%s", attrname, prop_buf); - kfree(attrname); + ret = add_uevent_var(env, "POWER_SUPPLY_%s=%s", + pwr_attr->prop_name, prop_buf); if (ret) goto out; } -- GitLab From d960d91f86880d28a6dc8edb4e7b605bcf6354eb Mon Sep 17 00:00:00 2001 From: Mathew King Date: Mon, 4 May 2020 14:29:28 -0600 Subject: [PATCH 0301/1971] power: supply: core: Use designated initializer for property text arrays Use designated initializers for the sysfs power supply text values. This will help ensure that the text values are kept in sync with the enum values from power_supply.h. Signed-off-by: Mathew King Signed-off-by: Sebastian Reichel --- drivers/power/supply/power_supply_sysfs.c | 74 ++++++++++++++++++----- 1 file changed, 60 insertions(+), 14 deletions(-) diff --git a/drivers/power/supply/power_supply_sysfs.c b/drivers/power/supply/power_supply_sysfs.c index cfc2550f494c5..cfd5ed1f775f4 100644 --- a/drivers/power/supply/power_supply_sysfs.c +++ b/drivers/power/supply/power_supply_sysfs.c @@ -34,41 +34,87 @@ struct power_supply_attr { } static const char * const power_supply_type_text[] = { - "Unknown", "Battery", "UPS", "Mains", "USB", - "USB_DCP", "USB_CDP", "USB_ACA", "USB_C", - "USB_PD", "USB_PD_DRP", "BrickID" + [POWER_SUPPLY_TYPE_UNKNOWN] = "Unknown", + [POWER_SUPPLY_TYPE_BATTERY] = "Battery", + [POWER_SUPPLY_TYPE_UPS] = "UPS", + [POWER_SUPPLY_TYPE_MAINS] = "Mains", + [POWER_SUPPLY_TYPE_USB] = "USB", + [POWER_SUPPLY_TYPE_USB_DCP] = "USB_DCP", + [POWER_SUPPLY_TYPE_USB_CDP] = "USB_CDP", + [POWER_SUPPLY_TYPE_USB_ACA] = "USB_ACA", + [POWER_SUPPLY_TYPE_USB_TYPE_C] = "USB_C", + [POWER_SUPPLY_TYPE_USB_PD] = "USB_PD", + [POWER_SUPPLY_TYPE_USB_PD_DRP] = "USB_PD_DRP", + [POWER_SUPPLY_TYPE_APPLE_BRICK_ID] = "BrickID", }; static const char * const power_supply_usb_type_text[] = { - "Unknown", "SDP", "DCP", "CDP", "ACA", "C", - "PD", "PD_DRP", "PD_PPS", "BrickID" + [POWER_SUPPLY_USB_TYPE_UNKNOWN] = "Unknown", + [POWER_SUPPLY_USB_TYPE_SDP] = "SDP", + [POWER_SUPPLY_USB_TYPE_DCP] = "DCP", + [POWER_SUPPLY_USB_TYPE_CDP] = "CDP", + [POWER_SUPPLY_USB_TYPE_ACA] = "ACA", + [POWER_SUPPLY_USB_TYPE_C] = "C", + [POWER_SUPPLY_USB_TYPE_PD] = "PD", + [POWER_SUPPLY_USB_TYPE_PD_DRP] = "PD_DRP", + [POWER_SUPPLY_USB_TYPE_PD_PPS] = "PD_PPS", + [POWER_SUPPLY_USB_TYPE_APPLE_BRICK_ID] = "BrickID", }; static const char * const power_supply_status_text[] = { - "Unknown", "Charging", "Discharging", "Not charging", "Full" + [POWER_SUPPLY_STATUS_UNKNOWN] = "Unknown", + [POWER_SUPPLY_STATUS_CHARGING] = "Charging", + [POWER_SUPPLY_STATUS_DISCHARGING] = "Discharging", + [POWER_SUPPLY_STATUS_NOT_CHARGING] = "Not charging", + [POWER_SUPPLY_STATUS_FULL] = "Full", }; static const char * const power_supply_charge_type_text[] = { - "Unknown", "N/A", "Trickle", "Fast", "Standard", "Adaptive", "Custom" + [POWER_SUPPLY_CHARGE_TYPE_UNKNOWN] = "Unknown", + [POWER_SUPPLY_CHARGE_TYPE_NONE] = "N/A", + [POWER_SUPPLY_CHARGE_TYPE_TRICKLE] = "Trickle", + [POWER_SUPPLY_CHARGE_TYPE_FAST] = "Fast", + [POWER_SUPPLY_CHARGE_TYPE_STANDARD] = "Standard", + [POWER_SUPPLY_CHARGE_TYPE_ADAPTIVE] = "Adaptive", + [POWER_SUPPLY_CHARGE_TYPE_CUSTOM] = "Custom", }; static const char * const power_supply_health_text[] = { - "Unknown", "Good", "Overheat", "Dead", "Over voltage", - "Unspecified failure", "Cold", "Watchdog timer expire", - "Safety timer expire", "Over current" + [POWER_SUPPLY_HEALTH_UNKNOWN] = "Unknown", + [POWER_SUPPLY_HEALTH_GOOD] = "Good", + [POWER_SUPPLY_HEALTH_OVERHEAT] = "Overheat", + [POWER_SUPPLY_HEALTH_DEAD] = "Dead", + [POWER_SUPPLY_HEALTH_OVERVOLTAGE] = "Over voltage", + [POWER_SUPPLY_HEALTH_UNSPEC_FAILURE] = "Unspecified failure", + [POWER_SUPPLY_HEALTH_COLD] = "Cold", + [POWER_SUPPLY_HEALTH_WATCHDOG_TIMER_EXPIRE] = "Watchdog timer expire", + [POWER_SUPPLY_HEALTH_SAFETY_TIMER_EXPIRE] = "Safety timer expire", + [POWER_SUPPLY_HEALTH_OVERCURRENT] = "Over current", }; static const char * const power_supply_technology_text[] = { - "Unknown", "NiMH", "Li-ion", "Li-poly", "LiFe", "NiCd", - "LiMn" + [POWER_SUPPLY_TECHNOLOGY_UNKNOWN] = "Unknown", + [POWER_SUPPLY_TECHNOLOGY_NiMH] = "NiMH", + [POWER_SUPPLY_TECHNOLOGY_LION] = "Li-ion", + [POWER_SUPPLY_TECHNOLOGY_LIPO] = "Li-poly", + [POWER_SUPPLY_TECHNOLOGY_LiFe] = "LiFe", + [POWER_SUPPLY_TECHNOLOGY_NiCd] = "NiCd", + [POWER_SUPPLY_TECHNOLOGY_LiMn] = "LiMn", }; static const char * const power_supply_capacity_level_text[] = { - "Unknown", "Critical", "Low", "Normal", "High", "Full" + [POWER_SUPPLY_CAPACITY_LEVEL_UNKNOWN] = "Unknown", + [POWER_SUPPLY_CAPACITY_LEVEL_CRITICAL] = "Critical", + [POWER_SUPPLY_CAPACITY_LEVEL_LOW] = "Low", + [POWER_SUPPLY_CAPACITY_LEVEL_NORMAL] = "Normal", + [POWER_SUPPLY_CAPACITY_LEVEL_HIGH] = "High", + [POWER_SUPPLY_CAPACITY_LEVEL_FULL] = "Full", }; static const char * const power_supply_scope_text[] = { - "Unknown", "System", "Device" + [POWER_SUPPLY_SCOPE_UNKNOWN] = "Unknown", + [POWER_SUPPLY_SCOPE_SYSTEM] = "System", + [POWER_SUPPLY_SCOPE_DEVICE] = "Device", }; static struct power_supply_attr power_supply_attrs[] = { -- GitLab From 5b505366ac6c77a623e18fe18cedb2d4d3c9a7f3 Mon Sep 17 00:00:00 2001 From: Mathew King Date: Mon, 4 May 2020 14:29:29 -0600 Subject: [PATCH 0302/1971] power: supply: core: Add a macro that maps enum properties to text values Reduce the number of touch points to add a new enum property to the power_supply class by mapping the array of text values to the device attribute descriptor. A new enum property can now added by creating an array with the text values named POWER_SUPPLY_${PROPNAME}_TEXT and adding POWER_SUPPLY_ENUM_ATTR(${PROPNAME}) to the power_supply_attrs array. Signed-off-by: Mathew King Signed-off-by: Sebastian Reichel --- drivers/power/supply/power_supply_sysfs.c | 124 +++++++++------------- 1 file changed, 50 insertions(+), 74 deletions(-) diff --git a/drivers/power/supply/power_supply_sysfs.c b/drivers/power/supply/power_supply_sysfs.c index cfd5ed1f775f4..bf6e344cf4516 100644 --- a/drivers/power/supply/power_supply_sysfs.c +++ b/drivers/power/supply/power_supply_sysfs.c @@ -24,16 +24,26 @@ struct power_supply_attr { const char *prop_name; char attr_name[MAX_PROP_NAME_LEN + 1]; struct device_attribute dev_attr; + const char * const *text_values; + int text_values_len; }; -#define POWER_SUPPLY_ATTR(_name) \ -[POWER_SUPPLY_PROP_ ## _name] = \ -{ \ - .prop_name = #_name, \ - .attr_name = #_name "\0", \ +#define _POWER_SUPPLY_ATTR(_name, _text, _len) \ +[POWER_SUPPLY_PROP_ ## _name] = \ +{ \ + .prop_name = #_name, \ + .attr_name = #_name "\0", \ + .text_values = _text, \ + .text_values_len = _len, \ } -static const char * const power_supply_type_text[] = { +#define POWER_SUPPLY_ATTR(_name) _POWER_SUPPLY_ATTR(_name, NULL, 0) +#define _POWER_SUPPLY_ENUM_ATTR(_name, _text) \ + _POWER_SUPPLY_ATTR(_name, _text, ARRAY_SIZE(_text)) +#define POWER_SUPPLY_ENUM_ATTR(_name) \ + _POWER_SUPPLY_ENUM_ATTR(_name, POWER_SUPPLY_ ## _name ## _TEXT) + +static const char * const POWER_SUPPLY_TYPE_TEXT[] = { [POWER_SUPPLY_TYPE_UNKNOWN] = "Unknown", [POWER_SUPPLY_TYPE_BATTERY] = "Battery", [POWER_SUPPLY_TYPE_UPS] = "UPS", @@ -48,7 +58,7 @@ static const char * const power_supply_type_text[] = { [POWER_SUPPLY_TYPE_APPLE_BRICK_ID] = "BrickID", }; -static const char * const power_supply_usb_type_text[] = { +static const char * const POWER_SUPPLY_USB_TYPE_TEXT[] = { [POWER_SUPPLY_USB_TYPE_UNKNOWN] = "Unknown", [POWER_SUPPLY_USB_TYPE_SDP] = "SDP", [POWER_SUPPLY_USB_TYPE_DCP] = "DCP", @@ -61,7 +71,7 @@ static const char * const power_supply_usb_type_text[] = { [POWER_SUPPLY_USB_TYPE_APPLE_BRICK_ID] = "BrickID", }; -static const char * const power_supply_status_text[] = { +static const char * const POWER_SUPPLY_STATUS_TEXT[] = { [POWER_SUPPLY_STATUS_UNKNOWN] = "Unknown", [POWER_SUPPLY_STATUS_CHARGING] = "Charging", [POWER_SUPPLY_STATUS_DISCHARGING] = "Discharging", @@ -69,7 +79,7 @@ static const char * const power_supply_status_text[] = { [POWER_SUPPLY_STATUS_FULL] = "Full", }; -static const char * const power_supply_charge_type_text[] = { +static const char * const POWER_SUPPLY_CHARGE_TYPE_TEXT[] = { [POWER_SUPPLY_CHARGE_TYPE_UNKNOWN] = "Unknown", [POWER_SUPPLY_CHARGE_TYPE_NONE] = "N/A", [POWER_SUPPLY_CHARGE_TYPE_TRICKLE] = "Trickle", @@ -79,7 +89,7 @@ static const char * const power_supply_charge_type_text[] = { [POWER_SUPPLY_CHARGE_TYPE_CUSTOM] = "Custom", }; -static const char * const power_supply_health_text[] = { +static const char * const POWER_SUPPLY_HEALTH_TEXT[] = { [POWER_SUPPLY_HEALTH_UNKNOWN] = "Unknown", [POWER_SUPPLY_HEALTH_GOOD] = "Good", [POWER_SUPPLY_HEALTH_OVERHEAT] = "Overheat", @@ -92,7 +102,7 @@ static const char * const power_supply_health_text[] = { [POWER_SUPPLY_HEALTH_OVERCURRENT] = "Over current", }; -static const char * const power_supply_technology_text[] = { +static const char * const POWER_SUPPLY_TECHNOLOGY_TEXT[] = { [POWER_SUPPLY_TECHNOLOGY_UNKNOWN] = "Unknown", [POWER_SUPPLY_TECHNOLOGY_NiMH] = "NiMH", [POWER_SUPPLY_TECHNOLOGY_LION] = "Li-ion", @@ -102,7 +112,7 @@ static const char * const power_supply_technology_text[] = { [POWER_SUPPLY_TECHNOLOGY_LiMn] = "LiMn", }; -static const char * const power_supply_capacity_level_text[] = { +static const char * const POWER_SUPPLY_CAPACITY_LEVEL_TEXT[] = { [POWER_SUPPLY_CAPACITY_LEVEL_UNKNOWN] = "Unknown", [POWER_SUPPLY_CAPACITY_LEVEL_CRITICAL] = "Critical", [POWER_SUPPLY_CAPACITY_LEVEL_LOW] = "Low", @@ -111,7 +121,7 @@ static const char * const power_supply_capacity_level_text[] = { [POWER_SUPPLY_CAPACITY_LEVEL_FULL] = "Full", }; -static const char * const power_supply_scope_text[] = { +static const char * const POWER_SUPPLY_SCOPE_TEXT[] = { [POWER_SUPPLY_SCOPE_UNKNOWN] = "Unknown", [POWER_SUPPLY_SCOPE_SYSTEM] = "System", [POWER_SUPPLY_SCOPE_DEVICE] = "Device", @@ -119,13 +129,13 @@ static const char * const power_supply_scope_text[] = { static struct power_supply_attr power_supply_attrs[] = { /* Properties of type `int' */ - POWER_SUPPLY_ATTR(STATUS), - POWER_SUPPLY_ATTR(CHARGE_TYPE), - POWER_SUPPLY_ATTR(HEALTH), + POWER_SUPPLY_ENUM_ATTR(STATUS), + POWER_SUPPLY_ENUM_ATTR(CHARGE_TYPE), + POWER_SUPPLY_ENUM_ATTR(HEALTH), POWER_SUPPLY_ATTR(PRESENT), POWER_SUPPLY_ATTR(ONLINE), POWER_SUPPLY_ATTR(AUTHENTIC), - POWER_SUPPLY_ATTR(TECHNOLOGY), + POWER_SUPPLY_ENUM_ATTR(TECHNOLOGY), POWER_SUPPLY_ATTR(CYCLE_COUNT), POWER_SUPPLY_ATTR(VOLTAGE_MAX), POWER_SUPPLY_ATTR(VOLTAGE_MIN), @@ -168,7 +178,7 @@ static struct power_supply_attr power_supply_attrs[] = { POWER_SUPPLY_ATTR(CAPACITY), POWER_SUPPLY_ATTR(CAPACITY_ALERT_MIN), POWER_SUPPLY_ATTR(CAPACITY_ALERT_MAX), - POWER_SUPPLY_ATTR(CAPACITY_LEVEL), + POWER_SUPPLY_ENUM_ATTR(CAPACITY_LEVEL), POWER_SUPPLY_ATTR(TEMP), POWER_SUPPLY_ATTR(TEMP_MAX), POWER_SUPPLY_ATTR(TEMP_MIN), @@ -181,9 +191,9 @@ static struct power_supply_attr power_supply_attrs[] = { POWER_SUPPLY_ATTR(TIME_TO_EMPTY_AVG), POWER_SUPPLY_ATTR(TIME_TO_FULL_NOW), POWER_SUPPLY_ATTR(TIME_TO_FULL_AVG), - POWER_SUPPLY_ATTR(TYPE), + POWER_SUPPLY_ENUM_ATTR(TYPE), POWER_SUPPLY_ATTR(USB_TYPE), - POWER_SUPPLY_ATTR(SCOPE), + POWER_SUPPLY_ENUM_ATTR(SCOPE), POWER_SUPPLY_ATTR(PRECHARGE_CURRENT), POWER_SUPPLY_ATTR(CHARGE_TERM_CURRENT), POWER_SUPPLY_ATTR(CALIBRATE), @@ -196,10 +206,14 @@ static struct power_supply_attr power_supply_attrs[] = { static struct attribute * __power_supply_attrs[ARRAY_SIZE(power_supply_attrs) + 1]; +static struct power_supply_attr *to_ps_attr(struct device_attribute *attr) +{ + return container_of(attr, struct power_supply_attr, dev_attr); +} + static enum power_supply_property dev_attr_psp(struct device_attribute *attr) { - return container_of(attr, struct power_supply_attr, dev_attr) - - power_supply_attrs; + return to_ps_attr(attr) - power_supply_attrs; } static ssize_t power_supply_show_usb_type(struct device *dev, @@ -217,11 +231,11 @@ static ssize_t power_supply_show_usb_type(struct device *dev, if (value->intval == usb_type) { count += sprintf(buf + count, "[%s] ", - power_supply_usb_type_text[usb_type]); + POWER_SUPPLY_USB_TYPE_TEXT[usb_type]); match = true; } else { count += sprintf(buf + count, "%s ", - power_supply_usb_type_text[usb_type]); + POWER_SUPPLY_USB_TYPE_TEXT[usb_type]); } } @@ -241,6 +255,7 @@ static ssize_t power_supply_show_property(struct device *dev, char *buf) { ssize_t ret; struct power_supply *psy = dev_get_drvdata(dev); + struct power_supply_attr *ps_attr = to_ps_attr(attr); enum power_supply_property psp = dev_attr_psp(attr); union power_supply_propval value; @@ -261,38 +276,15 @@ static ssize_t power_supply_show_property(struct device *dev, } } + if (ps_attr->text_values_len > 0 && + value.intval < ps_attr->text_values_len && value.intval >= 0) { + return sprintf(buf, "%s\n", ps_attr->text_values[value.intval]); + } + switch (psp) { - case POWER_SUPPLY_PROP_STATUS: - ret = sprintf(buf, "%s\n", - power_supply_status_text[value.intval]); - break; - case POWER_SUPPLY_PROP_CHARGE_TYPE: - ret = sprintf(buf, "%s\n", - power_supply_charge_type_text[value.intval]); - break; - case POWER_SUPPLY_PROP_HEALTH: - ret = sprintf(buf, "%s\n", - power_supply_health_text[value.intval]); - break; - case POWER_SUPPLY_PROP_TECHNOLOGY: - ret = sprintf(buf, "%s\n", - power_supply_technology_text[value.intval]); - break; - case POWER_SUPPLY_PROP_CAPACITY_LEVEL: - ret = sprintf(buf, "%s\n", - power_supply_capacity_level_text[value.intval]); - break; - case POWER_SUPPLY_PROP_TYPE: - ret = sprintf(buf, "%s\n", - power_supply_type_text[value.intval]); - break; case POWER_SUPPLY_PROP_USB_TYPE: ret = power_supply_show_usb_type(dev, psy->desc, - &value, buf); - break; - case POWER_SUPPLY_PROP_SCOPE: - ret = sprintf(buf, "%s\n", - power_supply_scope_text[value.intval]); + &value, buf); break; case POWER_SUPPLY_PROP_MODEL_NAME ... POWER_SUPPLY_PROP_SERIAL_NUMBER: ret = sprintf(buf, "%s\n", value.strval); @@ -309,30 +301,14 @@ static ssize_t power_supply_store_property(struct device *dev, const char *buf, size_t count) { ssize_t ret; struct power_supply *psy = dev_get_drvdata(dev); + struct power_supply_attr *ps_attr = to_ps_attr(attr); enum power_supply_property psp = dev_attr_psp(attr); union power_supply_propval value; - switch (psp) { - case POWER_SUPPLY_PROP_STATUS: - ret = sysfs_match_string(power_supply_status_text, buf); - break; - case POWER_SUPPLY_PROP_CHARGE_TYPE: - ret = sysfs_match_string(power_supply_charge_type_text, buf); - break; - case POWER_SUPPLY_PROP_HEALTH: - ret = sysfs_match_string(power_supply_health_text, buf); - break; - case POWER_SUPPLY_PROP_TECHNOLOGY: - ret = sysfs_match_string(power_supply_technology_text, buf); - break; - case POWER_SUPPLY_PROP_CAPACITY_LEVEL: - ret = sysfs_match_string(power_supply_capacity_level_text, buf); - break; - case POWER_SUPPLY_PROP_SCOPE: - ret = sysfs_match_string(power_supply_scope_text, buf); - break; - default: - ret = -EINVAL; + ret = -EINVAL; + if (ps_attr->text_values_len > 0) { + ret = __sysfs_match_string(ps_attr->text_values, + ps_attr->text_values_len, buf); } /* -- GitLab From 2ad3d74e3c69fb371412f37d8226f2da3a3b6cc2 Mon Sep 17 00:00:00 2001 From: Mathew King Date: Mon, 4 May 2020 14:29:30 -0600 Subject: [PATCH 0303/1971] power: supply: core: Add type property to uevent env Add POWER_SUPPLY_TYPE to the uevent env for power supply. Type is a property of all power supplies and there is a sysfs entry for it but it is not included in the properties array of the power supply so explicitly add it to the udev env. Signed-off-by: Mathew King Signed-off-by: Sebastian Reichel --- drivers/power/supply/power_supply_sysfs.c | 61 ++++++++++++++--------- 1 file changed, 37 insertions(+), 24 deletions(-) diff --git a/drivers/power/supply/power_supply_sysfs.c b/drivers/power/supply/power_supply_sysfs.c index bf6e344cf4516..d21b4e0edf385 100644 --- a/drivers/power/supply/power_supply_sysfs.c +++ b/drivers/power/supply/power_supply_sysfs.c @@ -408,6 +408,37 @@ void power_supply_init_attrs(struct device_type *dev_type) } } +static int add_prop_uevent(struct device *dev, struct kobj_uevent_env *env, + enum power_supply_property prop, char *prop_buf) +{ + int ret = 0; + struct power_supply_attr *pwr_attr; + struct device_attribute *dev_attr; + char *line; + + pwr_attr = &power_supply_attrs[prop]; + dev_attr = &pwr_attr->dev_attr; + + ret = power_supply_show_property(dev, dev_attr, prop_buf); + if (ret == -ENODEV || ret == -ENODATA) { + /* + * When a battery is absent, we expect -ENODEV. Don't abort; + * send the uevent with at least the the PRESENT=0 property + */ + return 0; + } + + if (ret < 0) + return ret; + + line = strchr(prop_buf, '\n'); + if (line) + *line = 0; + + return add_uevent_var(env, "POWER_SUPPLY_%s=%s", + pwr_attr->prop_name, prop_buf); +} + int power_supply_uevent(struct device *dev, struct kobj_uevent_env *env) { struct power_supply *psy = dev_get_drvdata(dev); @@ -427,31 +458,13 @@ int power_supply_uevent(struct device *dev, struct kobj_uevent_env *env) if (!prop_buf) return -ENOMEM; - for (j = 0; j < psy->desc->num_properties; j++) { - struct power_supply_attr *pwr_attr; - struct device_attribute *dev_attr; - char *line; - - pwr_attr = &power_supply_attrs[psy->desc->properties[j]]; - dev_attr = &pwr_attr->dev_attr; - - ret = power_supply_show_property(dev, dev_attr, prop_buf); - if (ret == -ENODEV || ret == -ENODATA) { - /* When a battery is absent, we expect -ENODEV. Don't abort; - send the uevent with at least the the PRESENT=0 property */ - ret = 0; - continue; - } - - if (ret < 0) - goto out; - - line = strchr(prop_buf, '\n'); - if (line) - *line = 0; + ret = add_prop_uevent(dev, env, POWER_SUPPLY_PROP_TYPE, prop_buf); + if (ret) + goto out; - ret = add_uevent_var(env, "POWER_SUPPLY_%s=%s", - pwr_attr->prop_name, prop_buf); + for (j = 0; j < psy->desc->num_properties; j++) { + ret = add_prop_uevent(dev, env, psy->desc->properties[j], + prop_buf); if (ret) goto out; } -- GitLab From 12aceb89b0bce19eb89735f9de7a9983e4f0adae Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Fri, 1 May 2020 13:11:09 -0600 Subject: [PATCH 0304/1971] eventfd: convert to f_op->read_iter() eventfd is using ->read() as it's file_operations read handler, but this prevents passing in information about whether a given IO operation is blocking or not. We can only use the file flags for that. To support async (-EAGAIN/poll based) retries for io_uring, we need ->read_iter() support. Convert eventfd to using ->read_iter(). With ->read_iter(), we can support IOCB_NOWAIT. Ensure the fd setup is done such that we set file->f_mode with FMODE_NOWAIT. [missing include added] Signed-off-by: Jens Axboe Signed-off-by: Al Viro --- fs/eventfd.c | 64 +++++++++++++++++++++++++++++++--------------------- 1 file changed, 38 insertions(+), 26 deletions(-) diff --git a/fs/eventfd.c b/fs/eventfd.c index 78e41c7c3d05b..df466ef81dddf 100644 --- a/fs/eventfd.c +++ b/fs/eventfd.c @@ -23,6 +23,7 @@ #include #include #include +#include DEFINE_PER_CPU(int, eventfd_wake_count); @@ -216,32 +217,32 @@ int eventfd_ctx_remove_wait_queue(struct eventfd_ctx *ctx, wait_queue_entry_t *w } EXPORT_SYMBOL_GPL(eventfd_ctx_remove_wait_queue); -static ssize_t eventfd_read(struct file *file, char __user *buf, size_t count, - loff_t *ppos) +static ssize_t eventfd_read(struct kiocb *iocb, struct iov_iter *to) { + struct file *file = iocb->ki_filp; struct eventfd_ctx *ctx = file->private_data; - ssize_t res; __u64 ucnt = 0; DECLARE_WAITQUEUE(wait, current); - if (count < sizeof(ucnt)) + if (iov_iter_count(to) < sizeof(ucnt)) return -EINVAL; - spin_lock_irq(&ctx->wqh.lock); - res = -EAGAIN; - if (ctx->count > 0) - res = sizeof(ucnt); - else if (!(file->f_flags & O_NONBLOCK)) { + if (!ctx->count) { + if ((file->f_flags & O_NONBLOCK) || + (iocb->ki_flags & IOCB_NOWAIT)) { + spin_unlock_irq(&ctx->wqh.lock); + return -EAGAIN; + } __add_wait_queue(&ctx->wqh, &wait); for (;;) { set_current_state(TASK_INTERRUPTIBLE); - if (ctx->count > 0) { - res = sizeof(ucnt); + if (ctx->count) break; - } if (signal_pending(current)) { - res = -ERESTARTSYS; - break; + __remove_wait_queue(&ctx->wqh, &wait); + __set_current_state(TASK_RUNNING); + spin_unlock_irq(&ctx->wqh.lock); + return -ERESTARTSYS; } spin_unlock_irq(&ctx->wqh.lock); schedule(); @@ -250,17 +251,14 @@ static ssize_t eventfd_read(struct file *file, char __user *buf, size_t count, __remove_wait_queue(&ctx->wqh, &wait); __set_current_state(TASK_RUNNING); } - if (likely(res > 0)) { - eventfd_ctx_do_read(ctx, &ucnt); - if (waitqueue_active(&ctx->wqh)) - wake_up_locked_poll(&ctx->wqh, EPOLLOUT); - } + eventfd_ctx_do_read(ctx, &ucnt); + if (waitqueue_active(&ctx->wqh)) + wake_up_locked_poll(&ctx->wqh, EPOLLOUT); spin_unlock_irq(&ctx->wqh.lock); - - if (res > 0 && put_user(ucnt, (__u64 __user *)buf)) + if (unlikely(copy_to_iter(&ucnt, sizeof(ucnt), to) != sizeof(ucnt))) return -EFAULT; - return res; + return sizeof(ucnt); } static ssize_t eventfd_write(struct file *file, const char __user *buf, size_t count, @@ -329,7 +327,7 @@ static const struct file_operations eventfd_fops = { #endif .release = eventfd_release, .poll = eventfd_poll, - .read = eventfd_read, + .read_iter = eventfd_read, .write = eventfd_write, .llseek = noop_llseek, }; @@ -406,6 +404,7 @@ EXPORT_SYMBOL_GPL(eventfd_ctx_fileget); static int do_eventfd(unsigned int count, int flags) { struct eventfd_ctx *ctx; + struct file *file; int fd; /* Check the EFD_* constants for consistency. */ @@ -425,11 +424,24 @@ static int do_eventfd(unsigned int count, int flags) ctx->flags = flags; ctx->id = ida_simple_get(&eventfd_ida, 0, 0, GFP_KERNEL); - fd = anon_inode_getfd("[eventfd]", &eventfd_fops, ctx, - O_RDWR | (flags & EFD_SHARED_FCNTL_FLAGS)); + flags &= EFD_SHARED_FCNTL_FLAGS; + flags |= O_RDWR; + fd = get_unused_fd_flags(flags); if (fd < 0) - eventfd_free_ctx(ctx); + goto err; + + file = anon_inode_getfile("[eventfd]", &eventfd_fops, ctx, flags); + if (IS_ERR(file)) { + put_unused_fd(fd); + fd = PTR_ERR(file); + goto err; + } + file->f_mode |= FMODE_NOWAIT; + fd_install(fd, file); + return fd; +err: + eventfd_free_ctx(ctx); return fd; } -- GitLab From 78ad73421831247e46c31899a7bead02740e4bef Mon Sep 17 00:00:00 2001 From: Thierry Reding Date: Thu, 30 Apr 2020 13:31:58 +0200 Subject: [PATCH 0305/1971] Revert "i2c: tegra: Fix suspending in active runtime PM state" This reverts commit 9f42de8d4ec2304f10bbc51dc0484f3503d61196. It's not safe to use pm_runtime_force_{suspend,resume}(), especially during the noirq phase of suspend. See also the guidance provided in commit 1e2ef05bb8cf ("PM: Limit race conditions between runtime PM and system sleep (v2)"). Signed-off-by: Thierry Reding --- drivers/i2c/busses/i2c-tegra.c | 9 --------- 1 file changed, 9 deletions(-) diff --git a/drivers/i2c/busses/i2c-tegra.c b/drivers/i2c/busses/i2c-tegra.c index 4c4d17ddc96b9..7c88611c732c4 100644 --- a/drivers/i2c/busses/i2c-tegra.c +++ b/drivers/i2c/busses/i2c-tegra.c @@ -1769,14 +1769,9 @@ static int tegra_i2c_remove(struct platform_device *pdev) static int __maybe_unused tegra_i2c_suspend(struct device *dev) { struct tegra_i2c_dev *i2c_dev = dev_get_drvdata(dev); - int err; i2c_mark_adapter_suspended(&i2c_dev->adapter); - err = pm_runtime_force_suspend(dev); - if (err < 0) - return err; - return 0; } @@ -1797,10 +1792,6 @@ static int __maybe_unused tegra_i2c_resume(struct device *dev) if (err) return err; - err = pm_runtime_force_resume(dev); - if (err < 0) - return err; - i2c_mark_adapter_resumed(&i2c_dev->adapter); return 0; -- GitLab From 44c99904cf61f945d02ac9976ab10dd5ccaea393 Mon Sep 17 00:00:00 2001 From: Thierry Reding Date: Fri, 13 Dec 2019 14:44:17 +0100 Subject: [PATCH 0306/1971] i2c: tegra: Restore pinmux on system resume Depending on the board design, the I2C controllers found on Tegra SoCs may require pinmuxing in order to function. This is done as part of the driver's runtime suspend/resume operations. However, the PM core does not allow devices to go into runtime suspend during system sleep to avoid potential races with the suspend/resume of their parents. As a result of this, when Tegra SoCs resume from system suspend, their I2C controllers may have lost the pinmux state in hardware, whereas the pinctrl subsystem is not aware of this. To fix this, make sure that if the I2C controller is not runtime suspended, the runtime suspend code is still executed in order to disable the module clock (which we don't need to be enabled during sleep) and set the pinmux to the idle state. Conversely, make sure that the I2C controller is properly resumed when waking up from sleep so that pinmux settings are properly restored. This fixes a bug seen with DDC transactions to an HDMI monitor timing out when resuming from system suspend. Signed-off-by: Thierry Reding --- drivers/i2c/busses/i2c-tegra.c | 23 +++++++++++++++++++---- 1 file changed, 19 insertions(+), 4 deletions(-) diff --git a/drivers/i2c/busses/i2c-tegra.c b/drivers/i2c/busses/i2c-tegra.c index 7c88611c732c4..119377da71fd8 100644 --- a/drivers/i2c/busses/i2c-tegra.c +++ b/drivers/i2c/busses/i2c-tegra.c @@ -1769,10 +1769,14 @@ static int tegra_i2c_remove(struct platform_device *pdev) static int __maybe_unused tegra_i2c_suspend(struct device *dev) { struct tegra_i2c_dev *i2c_dev = dev_get_drvdata(dev); + int err = 0; i2c_mark_adapter_suspended(&i2c_dev->adapter); - return 0; + if (!pm_runtime_status_suspended(dev)) + err = tegra_i2c_runtime_suspend(dev); + + return err; } static int __maybe_unused tegra_i2c_resume(struct device *dev) @@ -1780,6 +1784,10 @@ static int __maybe_unused tegra_i2c_resume(struct device *dev) struct tegra_i2c_dev *i2c_dev = dev_get_drvdata(dev); int err; + /* + * We need to ensure that clocks are enabled so that registers can be + * restored in tegra_i2c_init(). + */ err = tegra_i2c_runtime_resume(dev); if (err) return err; @@ -1788,9 +1796,16 @@ static int __maybe_unused tegra_i2c_resume(struct device *dev) if (err) return err; - err = tegra_i2c_runtime_suspend(dev); - if (err) - return err; + /* + * In case we are runtime suspended, disable clocks again so that we + * don't unbalance the clock reference counts during the next runtime + * resume transition. + */ + if (pm_runtime_status_suspended(dev)) { + err = tegra_i2c_runtime_suspend(dev); + if (err) + return err; + } i2c_mark_adapter_resumed(&i2c_dev->adapter); -- GitLab From 26ca88aaea26cce966e4c9c4eaf2e0bb9a9f28ea Mon Sep 17 00:00:00 2001 From: Thierry Reding Date: Wed, 6 May 2020 20:47:25 +0200 Subject: [PATCH 0307/1971] i2c: tegra: Keep IRQs enabled during suspend/resume One of the I2C controllers on Tegra SoCs is typically connected to a system PMIC, which provides controls for critical power supplies for most platforms. Some drivers, such as PCI, need to disable these regulators during a very late stage during suspend and resume them at a very early stage during resume. To support these use-cases, keep interrupts disabled during suspend/ resume. Suggested-by: Dmitry Osipenko Signed-off-by: Thierry Reding --- drivers/i2c/busses/i2c-tegra.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/i2c/busses/i2c-tegra.c b/drivers/i2c/busses/i2c-tegra.c index 119377da71fd8..26673be867bcb 100644 --- a/drivers/i2c/busses/i2c-tegra.c +++ b/drivers/i2c/busses/i2c-tegra.c @@ -1694,8 +1694,8 @@ static int tegra_i2c_probe(struct platform_device *pdev) irq_set_status_flags(i2c_dev->irq, IRQ_NOAUTOEN); - ret = devm_request_irq(&pdev->dev, i2c_dev->irq, - tegra_i2c_isr, 0, dev_name(&pdev->dev), i2c_dev); + ret = devm_request_irq(&pdev->dev, i2c_dev->irq, tegra_i2c_isr, + IRQF_NO_SUSPEND, dev_name(&pdev->dev), i2c_dev); if (ret) { dev_err(&pdev->dev, "Failed to request irq %i\n", i2c_dev->irq); goto release_dma; -- GitLab From 566c05f7cd9a5c6a257d19d4fb91ce35b3b27fb6 Mon Sep 17 00:00:00 2001 From: Dmitry Osipenko Date: Tue, 24 Mar 2020 22:12:16 +0300 Subject: [PATCH 0308/1971] i2c: tegra: Better handle case where CPU0 is busy for a long time Boot CPU0 always handle I2C interrupt and under some rare circumstances (like running KASAN + NFS root) it may stuck in uninterruptible state for a significant time. In this case we will get timeout if I2C transfer is running on a sibling CPU, despite of IRQ being raised. In order to handle this rare condition, the IRQ status needs to be checked after completion timeout. Signed-off-by: Dmitry Osipenko Signed-off-by: Thierry Reding --- drivers/i2c/busses/i2c-tegra.c | 27 +++++++++++++++------------ 1 file changed, 15 insertions(+), 12 deletions(-) diff --git a/drivers/i2c/busses/i2c-tegra.c b/drivers/i2c/busses/i2c-tegra.c index 26673be867bcb..9232038ccb40c 100644 --- a/drivers/i2c/busses/i2c-tegra.c +++ b/drivers/i2c/busses/i2c-tegra.c @@ -996,14 +996,13 @@ tegra_i2c_poll_completion_timeout(struct tegra_i2c_dev *i2c_dev, do { u32 status = i2c_readl(i2c_dev, I2C_INT_STATUS); - if (status) { + if (status) tegra_i2c_isr(i2c_dev->irq, i2c_dev); - if (completion_done(complete)) { - s64 delta = ktime_ms_delta(ktimeout, ktime); + if (completion_done(complete)) { + s64 delta = ktime_ms_delta(ktimeout, ktime); - return msecs_to_jiffies(delta) ?: 1; - } + return msecs_to_jiffies(delta) ?: 1; } ktime = ktime_get(); @@ -1030,14 +1029,18 @@ tegra_i2c_wait_completion_timeout(struct tegra_i2c_dev *i2c_dev, disable_irq(i2c_dev->irq); /* - * There is a chance that completion may happen after IRQ - * synchronization, which is done by disable_irq(). + * Under some rare circumstances (like running KASAN + + * NFS root) CPU, which handles interrupt, may stuck in + * uninterruptible state for a significant time. In this + * case we will get timeout if I2C transfer is running on + * a sibling CPU, despite of IRQ being raised. + * + * In order to handle this rare condition, the IRQ status + * needs to be checked after timeout. */ - if (ret == 0 && completion_done(complete)) { - dev_warn(i2c_dev->dev, - "completion done after timeout\n"); - ret = 1; - } + if (ret == 0) + ret = tegra_i2c_poll_completion_timeout(i2c_dev, + complete, 0); } return ret; -- GitLab From a70ff65601f1016cb164d2847e9f370e2ed22b39 Mon Sep 17 00:00:00 2001 From: Dmitry Osipenko Date: Tue, 24 Mar 2020 22:12:17 +0300 Subject: [PATCH 0309/1971] i2c: tegra: Synchronize DMA before termination DMA transfer could be completed, but CPU (which handles DMA interrupt) may get too busy and can't handle the interrupt in a timely manner, despite of DMA IRQ being raised. In this case the DMA state needs to synchronized before terminating DMA transfer in order not to miss the DMA transfer completion. Signed-off-by: Dmitry Osipenko Signed-off-by: Thierry Reding --- drivers/i2c/busses/i2c-tegra.c | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/drivers/i2c/busses/i2c-tegra.c b/drivers/i2c/busses/i2c-tegra.c index 9232038ccb40c..7c92e91a1cc25 100644 --- a/drivers/i2c/busses/i2c-tegra.c +++ b/drivers/i2c/busses/i2c-tegra.c @@ -1219,6 +1219,15 @@ static int tegra_i2c_xfer_msg(struct tegra_i2c_dev *i2c_dev, time_left = tegra_i2c_wait_completion_timeout( i2c_dev, &i2c_dev->dma_complete, xfer_time); + /* + * Synchronize DMA first, since dmaengine_terminate_sync() + * performs synchronization after the transfer's termination + * and we want to get a completion if transfer succeeded. + */ + dmaengine_synchronize(i2c_dev->msg_read ? + i2c_dev->rx_dma_chan : + i2c_dev->tx_dma_chan); + dmaengine_terminate_sync(i2c_dev->msg_read ? i2c_dev->rx_dma_chan : i2c_dev->tx_dma_chan); -- GitLab From d29fbcdb051fbe8e6aee87dd5a7df723f299494d Mon Sep 17 00:00:00 2001 From: Nishad Kamdar Date: Sat, 25 Apr 2020 18:49:08 +0530 Subject: [PATCH 0310/1971] f2fs: Use the correct style for SPDX License Identifier This patch corrects the SPDX License Identifier style in header files related to F2FS File System support. For C header files Documentation/process/license-rules.rst mandates C-like comments (opposed to C source files where C++ style should be used). Changes made by using a script provided by Joe Perches here: https://lkml.org/lkml/2019/2/7/46. Suggested-by: Joe Perches Signed-off-by: Nishad Kamdar Reviewed-by: Chao Yu Signed-off-by: Jaegeuk Kim --- fs/f2fs/acl.h | 2 +- fs/f2fs/f2fs.h | 2 +- fs/f2fs/gc.h | 2 +- fs/f2fs/node.h | 2 +- fs/f2fs/segment.h | 2 +- fs/f2fs/trace.h | 2 +- fs/f2fs/xattr.h | 2 +- 7 files changed, 7 insertions(+), 7 deletions(-) diff --git a/fs/f2fs/acl.h b/fs/f2fs/acl.h index b96823c59b15a..124868c13f80f 100644 --- a/fs/f2fs/acl.h +++ b/fs/f2fs/acl.h @@ -1,4 +1,4 @@ -// SPDX-License-Identifier: GPL-2.0 +/* SPDX-License-Identifier: GPL-2.0 */ /* * fs/f2fs/acl.h * diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h index 3b9603266a2aa..d036a31a97e84 100644 --- a/fs/f2fs/f2fs.h +++ b/fs/f2fs/f2fs.h @@ -1,4 +1,4 @@ -// SPDX-License-Identifier: GPL-2.0 +/* SPDX-License-Identifier: GPL-2.0 */ /* * fs/f2fs/f2fs.h * diff --git a/fs/f2fs/gc.h b/fs/f2fs/gc.h index bbac9d3787bd3..db3c61046aa42 100644 --- a/fs/f2fs/gc.h +++ b/fs/f2fs/gc.h @@ -1,4 +1,4 @@ -// SPDX-License-Identifier: GPL-2.0 +/* SPDX-License-Identifier: GPL-2.0 */ /* * fs/f2fs/gc.h * diff --git a/fs/f2fs/node.h b/fs/f2fs/node.h index e05af5df56485..6a2011deea23c 100644 --- a/fs/f2fs/node.h +++ b/fs/f2fs/node.h @@ -1,4 +1,4 @@ -// SPDX-License-Identifier: GPL-2.0 +/* SPDX-License-Identifier: GPL-2.0 */ /* * fs/f2fs/node.h * diff --git a/fs/f2fs/segment.h b/fs/f2fs/segment.h index 7a83bd530812f..cba16cca51895 100644 --- a/fs/f2fs/segment.h +++ b/fs/f2fs/segment.h @@ -1,4 +1,4 @@ -// SPDX-License-Identifier: GPL-2.0 +/* SPDX-License-Identifier: GPL-2.0 */ /* * fs/f2fs/segment.h * diff --git a/fs/f2fs/trace.h b/fs/f2fs/trace.h index e8075fc5b2284..789f6aa727fcf 100644 --- a/fs/f2fs/trace.h +++ b/fs/f2fs/trace.h @@ -1,4 +1,4 @@ -// SPDX-License-Identifier: GPL-2.0 +/* SPDX-License-Identifier: GPL-2.0 */ /* * f2fs IO tracer * diff --git a/fs/f2fs/xattr.h b/fs/f2fs/xattr.h index 938fcd20565dc..6a192e6c7a9eb 100644 --- a/fs/f2fs/xattr.h +++ b/fs/f2fs/xattr.h @@ -1,4 +1,4 @@ -// SPDX-License-Identifier: GPL-2.0 +/* SPDX-License-Identifier: GPL-2.0 */ /* * fs/f2fs/xattr.h * -- GitLab From 3c57f75182452cc954f5e12374601b72574f4709 Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Fri, 1 May 2020 16:35:23 -0700 Subject: [PATCH 0311/1971] f2fs: use strcmp() in parse_options() Remove the pointless string length checks. Just use strcmp(). Signed-off-by: Eric Biggers Reviewed-by: Chao Yu Signed-off-by: Jaegeuk Kim --- fs/f2fs/super.c | 44 ++++++++++++++++---------------------------- 1 file changed, 16 insertions(+), 28 deletions(-) diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c index 438296e17183d..d0a705e493320 100644 --- a/fs/f2fs/super.c +++ b/fs/f2fs/super.c @@ -427,11 +427,11 @@ static int parse_options(struct super_block *sb, char *options) if (!name) return -ENOMEM; - if (strlen(name) == 2 && !strncmp(name, "on", 2)) { + if (!strcmp(name, "on")) { F2FS_OPTION(sbi).bggc_mode = BGGC_MODE_ON; - } else if (strlen(name) == 3 && !strncmp(name, "off", 3)) { + } else if (!strcmp(name, "off")) { F2FS_OPTION(sbi).bggc_mode = BGGC_MODE_OFF; - } else if (strlen(name) == 4 && !strncmp(name, "sync", 4)) { + } else if (!strcmp(name, "sync")) { F2FS_OPTION(sbi).bggc_mode = BGGC_MODE_SYNC; } else { kvfree(name); @@ -591,16 +591,14 @@ static int parse_options(struct super_block *sb, char *options) if (!name) return -ENOMEM; - if (strlen(name) == 8 && - !strncmp(name, "adaptive", 8)) { + if (!strcmp(name, "adaptive")) { if (f2fs_sb_has_blkzoned(sbi)) { f2fs_warn(sbi, "adaptive mode is not allowed with zoned block device feature"); kvfree(name); return -EINVAL; } F2FS_OPTION(sbi).fs_mode = FS_MODE_ADAPTIVE; - } else if (strlen(name) == 3 && - !strncmp(name, "lfs", 3)) { + } else if (!strcmp(name, "lfs")) { F2FS_OPTION(sbi).fs_mode = FS_MODE_LFS; } else { kvfree(name); @@ -725,14 +723,11 @@ static int parse_options(struct super_block *sb, char *options) name = match_strdup(&args[0]); if (!name) return -ENOMEM; - if (strlen(name) == 10 && - !strncmp(name, "user-based", 10)) { + if (!strcmp(name, "user-based")) { F2FS_OPTION(sbi).whint_mode = WHINT_MODE_USER; - } else if (strlen(name) == 3 && - !strncmp(name, "off", 3)) { + } else if (!strcmp(name, "off")) { F2FS_OPTION(sbi).whint_mode = WHINT_MODE_OFF; - } else if (strlen(name) == 8 && - !strncmp(name, "fs-based", 8)) { + } else if (!strcmp(name, "fs-based")) { F2FS_OPTION(sbi).whint_mode = WHINT_MODE_FS; } else { kvfree(name); @@ -745,11 +740,9 @@ static int parse_options(struct super_block *sb, char *options) if (!name) return -ENOMEM; - if (strlen(name) == 7 && - !strncmp(name, "default", 7)) { + if (!strcmp(name, "default")) { F2FS_OPTION(sbi).alloc_mode = ALLOC_MODE_DEFAULT; - } else if (strlen(name) == 5 && - !strncmp(name, "reuse", 5)) { + } else if (!strcmp(name, "reuse")) { F2FS_OPTION(sbi).alloc_mode = ALLOC_MODE_REUSE; } else { kvfree(name); @@ -761,14 +754,11 @@ static int parse_options(struct super_block *sb, char *options) name = match_strdup(&args[0]); if (!name) return -ENOMEM; - if (strlen(name) == 5 && - !strncmp(name, "posix", 5)) { + if (!strcmp(name, "posix")) { F2FS_OPTION(sbi).fsync_mode = FSYNC_MODE_POSIX; - } else if (strlen(name) == 6 && - !strncmp(name, "strict", 6)) { + } else if (!strcmp(name, "strict")) { F2FS_OPTION(sbi).fsync_mode = FSYNC_MODE_STRICT; - } else if (strlen(name) == 9 && - !strncmp(name, "nobarrier", 9)) { + } else if (!strcmp(name, "nobarrier")) { F2FS_OPTION(sbi).fsync_mode = FSYNC_MODE_NOBARRIER; } else { @@ -823,15 +813,13 @@ static int parse_options(struct super_block *sb, char *options) name = match_strdup(&args[0]); if (!name) return -ENOMEM; - if (strlen(name) == 3 && !strcmp(name, "lzo")) { + if (!strcmp(name, "lzo")) { F2FS_OPTION(sbi).compress_algorithm = COMPRESS_LZO; - } else if (strlen(name) == 3 && - !strcmp(name, "lz4")) { + } else if (!strcmp(name, "lz4")) { F2FS_OPTION(sbi).compress_algorithm = COMPRESS_LZ4; - } else if (strlen(name) == 4 && - !strcmp(name, "zstd")) { + } else if (!strcmp(name, "zstd")) { F2FS_OPTION(sbi).compress_algorithm = COMPRESS_ZSTD; } else { -- GitLab From b5f4684b5f5fe6ea3c0d57acf5acdf8e8fbfe503 Mon Sep 17 00:00:00 2001 From: Chao Yu Date: Sat, 29 Feb 2020 18:49:06 +0800 Subject: [PATCH 0312/1971] f2fs: remove redundant compress inode check due to f2fs_post_read_required() has did that. Signed-off-by: Chao Yu Signed-off-by: Jaegeuk Kim --- fs/f2fs/f2fs.h | 2 -- 1 file changed, 2 deletions(-) diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h index d036a31a97e84..a3809b092798e 100644 --- a/fs/f2fs/f2fs.h +++ b/fs/f2fs/f2fs.h @@ -4047,8 +4047,6 @@ static inline bool f2fs_force_buffered_io(struct inode *inode, return true; if (f2fs_is_multi_device(sbi)) return true; - if (f2fs_compressed_file(inode)) - return true; /* * for blkzoned device, fallback direct IO to buffered IO, so * all IOs can be serialized by log-structured write. -- GitLab From 3265d3db1f16395cfc6b8ea9b31b4001d98d05ef Mon Sep 17 00:00:00 2001 From: Chao Yu Date: Wed, 18 Mar 2020 16:22:59 +0800 Subject: [PATCH 0313/1971] f2fs: support partial truncation on compressed inode Supports to truncate compressed/normal cluster partially on compressed inode. Signed-off-by: Chao Yu Signed-off-by: Jaegeuk Kim --- fs/f2fs/compress.c | 49 ++++++++++++++++++++++++++++++++++++++++++++++ fs/f2fs/f2fs.h | 2 ++ fs/f2fs/file.c | 19 +++++++++++++----- 3 files changed, 65 insertions(+), 5 deletions(-) diff --git a/fs/f2fs/compress.c b/fs/f2fs/compress.c index 26b071afe48a3..d6283e351c959 100644 --- a/fs/f2fs/compress.c +++ b/fs/f2fs/compress.c @@ -954,6 +954,55 @@ bool f2fs_compress_write_end(struct inode *inode, void *fsdata, return first_index; } +int f2fs_truncate_partial_cluster(struct inode *inode, u64 from, bool lock) +{ + void *fsdata = NULL; + struct page *pagep; + int log_cluster_size = F2FS_I(inode)->i_log_cluster_size; + pgoff_t start_idx = from >> (PAGE_SHIFT + log_cluster_size) << + log_cluster_size; + int err; + + err = f2fs_is_compressed_cluster(inode, start_idx); + if (err < 0) + return err; + + /* truncate normal cluster */ + if (!err) + return f2fs_do_truncate_blocks(inode, from, lock); + + /* truncate compressed cluster */ + err = f2fs_prepare_compress_overwrite(inode, &pagep, + start_idx, &fsdata); + + /* should not be a normal cluster */ + f2fs_bug_on(F2FS_I_SB(inode), err == 0); + + if (err <= 0) + return err; + + if (err > 0) { + struct page **rpages = fsdata; + int cluster_size = F2FS_I(inode)->i_cluster_size; + int i; + + for (i = cluster_size - 1; i >= 0; i--) { + loff_t start = rpages[i]->index << PAGE_SHIFT; + + if (from <= start) { + zero_user_segment(rpages[i], 0, PAGE_SIZE); + } else { + zero_user_segment(rpages[i], from - start, + PAGE_SIZE); + break; + } + } + + f2fs_compress_write_end(inode, fsdata, start_idx, true); + } + return 0; +} + static int f2fs_write_compressed_pages(struct compress_ctx *cc, int *submitted, struct writeback_control *wbc, diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h index a3809b092798e..cc7a7a0c02d6a 100644 --- a/fs/f2fs/f2fs.h +++ b/fs/f2fs/f2fs.h @@ -3105,6 +3105,7 @@ static inline void f2fs_clear_page_private(struct page *page) */ int f2fs_sync_file(struct file *file, loff_t start, loff_t end, int datasync); void f2fs_truncate_data_blocks(struct dnode_of_data *dn); +int f2fs_do_truncate_blocks(struct inode *inode, u64 from, bool lock); int f2fs_truncate_blocks(struct inode *inode, u64 from, bool lock); int f2fs_truncate(struct inode *inode); int f2fs_getattr(const struct path *path, struct kstat *stat, @@ -3825,6 +3826,7 @@ int f2fs_prepare_compress_overwrite(struct inode *inode, struct page **pagep, pgoff_t index, void **fsdata); bool f2fs_compress_write_end(struct inode *inode, void *fsdata, pgoff_t index, unsigned copied); +int f2fs_truncate_partial_cluster(struct inode *inode, u64 from, bool lock); void f2fs_compress_write_end_io(struct bio *bio, struct page *page); bool f2fs_is_compress_backend_ready(struct inode *inode); void f2fs_decompress_pages(struct bio *bio, struct page *page, bool verity); diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c index a0a4413d6083b..175c661900876 100644 --- a/fs/f2fs/file.c +++ b/fs/f2fs/file.c @@ -647,9 +647,6 @@ static int truncate_partial_data_page(struct inode *inode, u64 from, return 0; } - if (f2fs_compressed_file(inode)) - return 0; - page = f2fs_get_lock_data_page(inode, index, true); if (IS_ERR(page)) return PTR_ERR(page) == -ENOENT ? 0 : PTR_ERR(page); @@ -665,7 +662,7 @@ truncate_out: return 0; } -static int do_truncate_blocks(struct inode *inode, u64 from, bool lock) +int f2fs_do_truncate_blocks(struct inode *inode, u64 from, bool lock) { struct f2fs_sb_info *sbi = F2FS_I_SB(inode); struct dnode_of_data dn; @@ -733,7 +730,9 @@ free_partial: int f2fs_truncate_blocks(struct inode *inode, u64 from, bool lock) { u64 free_from = from; + int err; +#ifdef CONFIG_F2FS_FS_COMPRESSION /* * for compressed file, only support cluster size * aligned truncation. @@ -748,8 +747,18 @@ int f2fs_truncate_blocks(struct inode *inode, u64 from, bool lock) free_from++; free_from <<= cluster_shift; } +#endif + + err = f2fs_do_truncate_blocks(inode, free_from, lock); + if (err) + return err; + +#ifdef CONFIG_F2FS_FS_COMPRESSION + if (from != free_from) + err = f2fs_truncate_partial_cluster(inode, from, lock); +#endif - return do_truncate_blocks(inode, free_from, lock); + return err; } int f2fs_truncate(struct inode *inode) -- GitLab From bf38fbad12b365223f9c5d4057f741bb03372737 Mon Sep 17 00:00:00 2001 From: Chao Yu Date: Sat, 28 Mar 2020 17:40:40 +0800 Subject: [PATCH 0314/1971] f2fs: support fiemap on compressed inode Map normal/compressed cluster of compressed inode correctly, and give the right fiemap flag FIEMAP_EXTENT_ENCODED on mapped compressed extent. Signed-off-by: Chao Yu Signed-off-by: Jaegeuk Kim --- fs/f2fs/data.c | 55 ++++++++++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 53 insertions(+), 2 deletions(-) diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c index a5505aba2a1f1..b79d643470d62 100644 --- a/fs/f2fs/data.c +++ b/fs/f2fs/data.c @@ -1832,6 +1832,25 @@ static int f2fs_xattr_fiemap(struct inode *inode, return (err < 0 ? err : 0); } +static loff_t max_inode_blocks(struct inode *inode) +{ + loff_t result = ADDRS_PER_INODE(inode); + loff_t leaf_count = ADDRS_PER_BLOCK(inode); + + /* two direct node blocks */ + result += (leaf_count * 2); + + /* two indirect node blocks */ + leaf_count *= NIDS_PER_BLOCK; + result += (leaf_count * 2); + + /* one double indirect node block */ + leaf_count *= NIDS_PER_BLOCK; + result += leaf_count; + + return result; +} + int f2fs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, u64 start, u64 len) { @@ -1841,6 +1860,8 @@ int f2fs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, u64 logical = 0, phys = 0, size = 0; u32 flags = 0; int ret = 0; + bool compr_cluster = false; + unsigned int cluster_size = F2FS_I(inode)->i_cluster_size; if (fieinfo->fi_flags & FIEMAP_FLAG_CACHE) { ret = f2fs_precache_extents(inode); @@ -1875,6 +1896,9 @@ next: memset(&map_bh, 0, sizeof(struct buffer_head)); map_bh.b_size = len; + if (compr_cluster) + map_bh.b_size = blk_to_logical(inode, cluster_size - 1); + ret = get_data_block(inode, start_blk, &map_bh, 0, F2FS_GET_BLOCK_FIEMAP, &next_pgofs); if (ret) @@ -1885,7 +1909,7 @@ next: start_blk = next_pgofs; if (blk_to_logical(inode, start_blk) < blk_to_logical(inode, - F2FS_I_SB(inode)->max_file_blocks)) + max_inode_blocks(inode))) goto prep_next; flags |= FIEMAP_EXTENT_LAST; @@ -1897,11 +1921,38 @@ next: ret = fiemap_fill_next_extent(fieinfo, logical, phys, size, flags); + if (ret) + goto out; + size = 0; } - if (start_blk > last_blk || ret) + if (start_blk > last_blk) goto out; + if (compr_cluster) { + compr_cluster = false; + + + logical = blk_to_logical(inode, start_blk - 1); + phys = blk_to_logical(inode, map_bh.b_blocknr); + size = blk_to_logical(inode, cluster_size); + + flags |= FIEMAP_EXTENT_ENCODED; + + start_blk += cluster_size - 1; + + if (start_blk > last_blk) + goto out; + + goto prep_next; + } + + if (map_bh.b_blocknr == COMPRESS_ADDR) { + compr_cluster = true; + start_blk++; + goto prep_next; + } + logical = blk_to_logical(inode, start_blk); phys = blk_to_logical(inode, map_bh.b_blocknr); size = map_bh.b_size; -- GitLab From c1c633878662341eb0c502b3cab6e8c5cc83f44c Mon Sep 17 00:00:00 2001 From: Chao Yu Date: Mon, 30 Mar 2020 17:13:29 +0800 Subject: [PATCH 0315/1971] f2fs: introduce f2fs_bmap_compress() to support bmap() on compressed inode: if queried block locates in non-compressed cluster, return its physical block address. Signed-off-by: Chao Yu Signed-off-by: Jaegeuk Kim --- fs/f2fs/data.c | 34 ++++++++++++++++++++++++++++++++++ 1 file changed, 34 insertions(+) diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c index b79d643470d62..ff8a92df8d995 100644 --- a/fs/f2fs/data.c +++ b/fs/f2fs/data.c @@ -3666,6 +3666,37 @@ static int f2fs_set_data_page_dirty(struct page *page) return 0; } + +static sector_t f2fs_bmap_compress(struct inode *inode, sector_t block) +{ +#ifdef CONFIG_F2FS_FS_COMPRESSION + struct dnode_of_data dn; + sector_t start_idx, blknr = 0; + int ret; + + start_idx = round_down(block, F2FS_I(inode)->i_cluster_size); + + set_new_dnode(&dn, inode, NULL, NULL, 0); + ret = f2fs_get_dnode_of_data(&dn, start_idx, LOOKUP_NODE); + if (ret) + return 0; + + if (dn.data_blkaddr != COMPRESS_ADDR) { + dn.ofs_in_node += block - start_idx; + blknr = f2fs_data_blkaddr(&dn); + if (!__is_valid_data_blkaddr(blknr)) + blknr = 0; + } + + f2fs_put_dnode(&dn); + + return blknr; +#else + return -EOPNOTSUPP; +#endif +} + + static sector_t f2fs_bmap(struct address_space *mapping, sector_t block) { struct inode *inode = mapping->host; @@ -3677,6 +3708,9 @@ static sector_t f2fs_bmap(struct address_space *mapping, sector_t block) if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) filemap_write_and_wait(mapping); + if (f2fs_compressed_file(inode)) + return f2fs_bmap_compress(inode, block); + return generic_block_bmap(mapping, block, get_data_block_bmap); } -- GitLab From cf5701bf749683cc85827fd68c0890838a2b7fd4 Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Wed, 6 May 2020 13:11:16 +0300 Subject: [PATCH 0316/1971] power: bq25890: unlock on error paths in bq25890_resume() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit We introduced some new locking here, but need to update the error paths so they unlock before returning. Fixes: 72d9cd9cdc18 ("power: bq25890: protect view of the chip's state") Signed-off-by: Dan Carpenter Reviewed-by: Michał Mirosław Signed-off-by: Sebastian Reichel --- drivers/power/supply/bq25890_charger.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/drivers/power/supply/bq25890_charger.c b/drivers/power/supply/bq25890_charger.c index 9339e216651ff..20b9824ef5acd 100644 --- a/drivers/power/supply/bq25890_charger.c +++ b/drivers/power/supply/bq25890_charger.c @@ -978,21 +978,22 @@ static int bq25890_resume(struct device *dev) ret = bq25890_get_chip_state(bq, &bq->state); if (ret < 0) - return ret; + goto unlock; /* Re-enable ADC only if charger is plugged in. */ if (bq->state.online) { ret = bq25890_field_write(bq, F_CONV_START, 1); if (ret < 0) - return ret; + goto unlock; } /* signal userspace, maybe state changed while suspended */ power_supply_changed(bq->charger); +unlock: mutex_unlock(&bq->lock); - return 0; + return ret; } #endif -- GitLab From 1a45732926f33245b5cabd37f03cdbf4a1818a16 Mon Sep 17 00:00:00 2001 From: ChenTao Date: Wed, 6 May 2020 19:45:18 +0800 Subject: [PATCH 0317/1971] power: supply: cw2015: Make some symbols static Fix the following warning: drivers/power/supply/cw2015_battery.c:96:5: warning: 'cw_update_profile' was not declared. Should it be static? drivers/power/supply/cw2015_battery.c:712:1: warning: 'cw_bat_pm_ops' was not declared. Should it be static? Reported-by: Hulk Robot Signed-off-by: ChenTao Acked-by: Tobias Schramm Tested-by: Tobias Schramm Signed-off-by: Sebastian Reichel --- drivers/power/supply/cw2015_battery.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/power/supply/cw2015_battery.c b/drivers/power/supply/cw2015_battery.c index 8603da6f45930..19f62ea957ee1 100644 --- a/drivers/power/supply/cw2015_battery.c +++ b/drivers/power/supply/cw2015_battery.c @@ -93,7 +93,7 @@ static int cw_read_word(struct cw_battery *cw_bat, u8 reg, u16 *val) return 0; } -int cw_update_profile(struct cw_battery *cw_bat) +static int cw_update_profile(struct cw_battery *cw_bat) { int ret; unsigned int reg_val; @@ -709,7 +709,7 @@ static int __maybe_unused cw_bat_resume(struct device *dev) return 0; } -SIMPLE_DEV_PM_OPS(cw_bat_pm_ops, cw_bat_suspend, cw_bat_resume); +static SIMPLE_DEV_PM_OPS(cw_bat_pm_ops, cw_bat_suspend, cw_bat_resume); static int cw_bat_remove(struct i2c_client *client) { -- GitLab From a776f560abcba90ee8959c4b7a7f6c9911ca2988 Mon Sep 17 00:00:00 2001 From: Serge Semin Date: Fri, 8 May 2020 02:38:44 +0300 Subject: [PATCH 0318/1971] dt-bindings: power: reset: Convert syscon-reboot-mode to DT schema Modern device tree bindings are supposed to be created as YAML-files in accordance with dt-schema. This commit replaces SYSCON reboot-mode legacy bare text bindings with YAML file. As before the bindings file states that the corresponding dts node is supposed to be compatible "syscon-reboot-mode" device and necessarily have an offset property to determine which register from the regmap is supposed to keep the mode on reboot. Signed-off-by: Serge Semin Reviewed-by: Rob Herring Signed-off-by: Sebastian Reichel --- .../power/reset/syscon-reboot-mode.txt | 35 ------------ .../power/reset/syscon-reboot-mode.yaml | 55 +++++++++++++++++++ 2 files changed, 55 insertions(+), 35 deletions(-) delete mode 100644 Documentation/devicetree/bindings/power/reset/syscon-reboot-mode.txt create mode 100644 Documentation/devicetree/bindings/power/reset/syscon-reboot-mode.yaml diff --git a/Documentation/devicetree/bindings/power/reset/syscon-reboot-mode.txt b/Documentation/devicetree/bindings/power/reset/syscon-reboot-mode.txt deleted file mode 100644 index f7ce1d8af04ae..0000000000000 --- a/Documentation/devicetree/bindings/power/reset/syscon-reboot-mode.txt +++ /dev/null @@ -1,35 +0,0 @@ -SYSCON reboot mode driver - -This driver gets reboot mode magic value form reboot-mode driver -and stores it in a SYSCON mapped register. Then the bootloader -can read it and take different action according to the magic -value stored. - -This DT node should be represented as a sub-node of a "syscon", "simple-mfd" -node. - -Required properties: -- compatible: should be "syscon-reboot-mode" -- offset: offset in the register map for the storage register (in bytes) - -Optional property: -- mask: bits mask of the bits in the register to store the reboot mode magic value, - default set to 0xffffffff if missing. - -The rest of the properties should follow the generic reboot-mode description -found in reboot-mode.txt - -Example: - pmu: pmu@20004000 { - compatible = "rockchip,rk3066-pmu", "syscon", "simple-mfd"; - reg = <0x20004000 0x100>; - - reboot-mode { - compatible = "syscon-reboot-mode"; - offset = <0x40>; - mode-normal = ; - mode-recovery = ; - mode-bootloader = ; - mode-loader = ; - }; - }; diff --git a/Documentation/devicetree/bindings/power/reset/syscon-reboot-mode.yaml b/Documentation/devicetree/bindings/power/reset/syscon-reboot-mode.yaml new file mode 100644 index 0000000000000..9b1ffceefe3de --- /dev/null +++ b/Documentation/devicetree/bindings/power/reset/syscon-reboot-mode.yaml @@ -0,0 +1,55 @@ +# SPDX-License-Identifier: GPL-2.0-only +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/power/reset/syscon-reboot-mode.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Generic SYSCON reboot mode driver + +maintainers: + - Sebastian Reichel + +description: | + This driver gets reboot mode magic value from reboot-mode driver + and stores it in a SYSCON mapped register. Then the bootloader + can read it and take different action according to the magic + value stored. The SYSCON mapped register is retrieved from the + parental dt-node plus the offset. So the SYSCON reboot-mode node + should be represented as a sub-node of a "syscon", "simple-mfd" node. + +properties: + compatible: + const: syscon-reboot-mode + + mask: + $ref: /schemas/types.yaml#/definitions/uint32 + description: Update only the register bits defined by the mask (32 bit) + + offset: + $ref: /schemas/types.yaml#/definitions/uint32 + description: Offset in the register map for the mode register (in bytes) + +patternProperties: + "^mode-.+": + $ref: /schemas/types.yaml#/definitions/uint32 + description: Vendor-specific mode value written to the mode register + +additionalProperties: false + +required: + - compatible + - offset + +examples: + - | + #include + + reboot-mode { + compatible = "syscon-reboot-mode"; + offset = <0x40>; + mode-normal = ; + mode-recovery = ; + mode-bootloader = ; + mode-loader = ; + }; +... -- GitLab From 52782c92ac85c4e393eb4a903a62e6c24afa633f Mon Sep 17 00:00:00 2001 From: "J. Bruce Fields" Date: Wed, 6 May 2020 12:09:34 -0400 Subject: [PATCH 0319/1971] kthread: save thread function It's handy to keep the kthread_fn just as a unique cookie to identify classes of kthreads. E.g. if you can verify that a given task is running your thread_fn, then you may know what sort of type kthread_data points to. We'll use this in nfsd to pass some information into the vfs. Note it will need kthread_data() exported too. Original-patch-by: Tejun Heo Signed-off-by: J. Bruce Fields --- include/linux/kthread.h | 1 + kernel/kthread.c | 17 +++++++++++++++++ 2 files changed, 18 insertions(+) diff --git a/include/linux/kthread.h b/include/linux/kthread.h index 8bbcaad7ef0f4..c2a274b79c429 100644 --- a/include/linux/kthread.h +++ b/include/linux/kthread.h @@ -57,6 +57,7 @@ bool kthread_should_stop(void); bool kthread_should_park(void); bool __kthread_should_park(struct task_struct *k); bool kthread_freezable_should_stop(bool *was_frozen); +void *kthread_func(struct task_struct *k); void *kthread_data(struct task_struct *k); void *kthread_probe_data(struct task_struct *k); int kthread_park(struct task_struct *k); diff --git a/kernel/kthread.c b/kernel/kthread.c index bfbfa481be3a5..b84fc7eec0358 100644 --- a/kernel/kthread.c +++ b/kernel/kthread.c @@ -46,6 +46,7 @@ struct kthread_create_info struct kthread { unsigned long flags; unsigned int cpu; + int (*threadfn)(void *); void *data; struct completion parked; struct completion exited; @@ -152,6 +153,20 @@ bool kthread_freezable_should_stop(bool *was_frozen) } EXPORT_SYMBOL_GPL(kthread_freezable_should_stop); +/** + * kthread_func - return the function specified on kthread creation + * @task: kthread task in question + * + * Returns NULL if the task is not a kthread. + */ +void *kthread_func(struct task_struct *task) +{ + if (task->flags & PF_KTHREAD) + return to_kthread(task)->threadfn; + return NULL; +} +EXPORT_SYMBOL_GPL(kthread_func); + /** * kthread_data - return data value specified on kthread creation * @task: kthread task in question @@ -164,6 +179,7 @@ void *kthread_data(struct task_struct *task) { return to_kthread(task)->data; } +EXPORT_SYMBOL_GPL(kthread_data); /** * kthread_probe_data - speculative version of kthread_data() @@ -244,6 +260,7 @@ static int kthread(void *_create) do_exit(-ENOMEM); } + self->threadfn = threadfn; self->data = data; init_completion(&self->exited); init_completion(&self->parked); -- GitLab From 28df3d1539de5090f7916f6fff03891b67f366f4 Mon Sep 17 00:00:00 2001 From: "J. Bruce Fields" Date: Fri, 28 Jul 2017 16:35:15 -0400 Subject: [PATCH 0320/1971] nfsd: clients don't need to break their own delegations We currently revoke read delegations on any write open or any operation that modifies file data or metadata (including rename, link, and unlink). But if the delegation in question is the only read delegation and is held by the client performing the operation, that's not really necessary. It's not always possible to prevent this in the NFSv4.0 case, because there's not always a way to determine which client an NFSv4.0 delegation came from. (In theory we could try to guess this from the transport layer, e.g., by assuming all traffic on a given TCP connection comes from the same client. But that's not really correct.) In the NFSv4.1 case the session layer always tells us the client. This patch should remove such self-conflicts in all cases where we can reliably determine the client from the compound. To do that we need to track "who" is performing a given (possibly lease-breaking) file operation. We're doing that by storing the information in the svc_rqst and using kthread_data() to map the current task back to a svc_rqst. Signed-off-by: J. Bruce Fields --- Documentation/filesystems/locking.rst | 2 ++ fs/locks.c | 3 +++ fs/nfsd/nfs4proc.c | 2 ++ fs/nfsd/nfs4state.c | 14 ++++++++++++++ fs/nfsd/nfsd.h | 2 ++ fs/nfsd/nfssvc.c | 6 ++++++ include/linux/fs.h | 1 + include/linux/sunrpc/svc.h | 1 + 8 files changed, 31 insertions(+) diff --git a/Documentation/filesystems/locking.rst b/Documentation/filesystems/locking.rst index 5057e4d9dcd1d..9fdcec4166142 100644 --- a/Documentation/filesystems/locking.rst +++ b/Documentation/filesystems/locking.rst @@ -425,6 +425,7 @@ prototypes:: int (*lm_grant)(struct file_lock *, struct file_lock *, int); void (*lm_break)(struct file_lock *); /* break_lease callback */ int (*lm_change)(struct file_lock **, int); + bool (*lm_breaker_owns_lease)(struct file_lock *); locking rules: @@ -435,6 +436,7 @@ lm_notify: yes yes no lm_grant: no no no lm_break: yes no no lm_change yes no no +lm_breaker_owns_lease: no no no ========== ============= ================= ========= buffer_head diff --git a/fs/locks.c b/fs/locks.c index b8a31c1c4fff3..a3f186846e93e 100644 --- a/fs/locks.c +++ b/fs/locks.c @@ -1557,6 +1557,9 @@ static bool leases_conflict(struct file_lock *lease, struct file_lock *breaker) { bool rc; + if (lease->fl_lmops->lm_breaker_owns_lease + && lease->fl_lmops->lm_breaker_owns_lease(lease)) + return false; if ((breaker->fl_flags & FL_LAYOUT) != (lease->fl_flags & FL_LAYOUT)) { rc = false; goto trace; diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c index 0e75f7fb5fec0..a6d73aa51ce4e 100644 --- a/fs/nfsd/nfs4proc.c +++ b/fs/nfsd/nfs4proc.c @@ -2302,6 +2302,8 @@ nfsd4_proc_compound(struct svc_rqst *rqstp) } check_if_stalefh_allowed(args); + rqstp->rq_lease_breaker = (void **)&cstate->clp; + trace_nfsd_compound(rqstp, args->opcnt); while (!status && resp->opcnt < args->opcnt) { op = &args->ops[resp->opcnt++]; diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c index fe88f5f23cf46..31388046c6fe9 100644 --- a/fs/nfsd/nfs4state.c +++ b/fs/nfsd/nfs4state.c @@ -4579,6 +4579,19 @@ nfsd_break_deleg_cb(struct file_lock *fl) return ret; } +static bool nfsd_breaker_owns_lease(struct file_lock *fl) +{ + struct nfs4_delegation *dl = fl->fl_owner; + struct svc_rqst *rqst; + struct nfs4_client *clp; + + if (!i_am_nfsd()) + return NULL; + rqst = kthread_data(current); + clp = *(rqst->rq_lease_breaker); + return dl->dl_stid.sc_client == clp; +} + static int nfsd_change_deleg_cb(struct file_lock *onlist, int arg, struct list_head *dispose) @@ -4590,6 +4603,7 @@ nfsd_change_deleg_cb(struct file_lock *onlist, int arg, } static const struct lock_manager_operations nfsd_lease_mng_ops = { + .lm_breaker_owns_lease = nfsd_breaker_owns_lease, .lm_break = nfsd_break_deleg_cb, .lm_change = nfsd_change_deleg_cb, }; diff --git a/fs/nfsd/nfsd.h b/fs/nfsd/nfsd.h index 2ab5569126b8a..36cdd81b6688a 100644 --- a/fs/nfsd/nfsd.h +++ b/fs/nfsd/nfsd.h @@ -88,6 +88,8 @@ int nfsd_pool_stats_release(struct inode *, struct file *); void nfsd_destroy(struct net *net); +bool i_am_nfsd(void); + struct nfsdfs_client { struct kref cl_ref; void (*cl_release)(struct kref *kref); diff --git a/fs/nfsd/nfssvc.c b/fs/nfsd/nfssvc.c index ca9fd348548b8..4f588c0eaaf44 100644 --- a/fs/nfsd/nfssvc.c +++ b/fs/nfsd/nfssvc.c @@ -601,6 +601,11 @@ static const struct svc_serv_ops nfsd_thread_sv_ops = { .svo_module = THIS_MODULE, }; +bool i_am_nfsd() +{ + return kthread_func(current) == nfsd; +} + int nfsd_create_serv(struct net *net) { int error; @@ -1011,6 +1016,7 @@ nfsd_dispatch(struct svc_rqst *rqstp, __be32 *statp) *statp = rpc_garbage_args; return 1; } + rqstp->rq_lease_breaker = NULL; /* * Give the xdr decoder a chance to change this if it wants * (necessary in the NFSv4.0 compound case) diff --git a/include/linux/fs.h b/include/linux/fs.h index 4f6f59b4f22a8..4b784560ffaf3 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -1045,6 +1045,7 @@ struct lock_manager_operations { bool (*lm_break)(struct file_lock *); int (*lm_change)(struct file_lock *, int, struct list_head *); void (*lm_setup)(struct file_lock *, void **); + bool (*lm_breaker_owns_lease)(struct file_lock *); }; struct lock_manager { diff --git a/include/linux/sunrpc/svc.h b/include/linux/sunrpc/svc.h index fd390894a5849..abf4a57ce4a7d 100644 --- a/include/linux/sunrpc/svc.h +++ b/include/linux/sunrpc/svc.h @@ -299,6 +299,7 @@ struct svc_rqst { struct net *rq_bc_net; /* pointer to backchannel's * net namespace */ + void ** rq_lease_breaker; /* The v4 client breaking a lease */ }; #define SVC_NET(rqst) (rqst->rq_xprt ? rqst->rq_xprt->xpt_net : rqst->rq_bc_net) -- GitLab From 255cdaf73412de13608fb776101402dca68bed2b Mon Sep 17 00:00:00 2001 From: Marco Felsch Date: Sat, 9 May 2020 14:34:35 -0700 Subject: [PATCH 0321/1971] Input: edt-ft5x06 - fix get_default register write access Since commit b6eba86030bf ("Input: edt-ft5x06 - add offset support for ev-ft5726") offset-x and offset-y is supported. Devices using those offset parameters don't support the offset parameter so we need to add the NO_REGISTER check for edt_ft5x06_ts_get_defaults(). Fixes: b6eba86030bf ("Input: edt-ft5x06 - add offset support for ev-ft5726") Signed-off-by: Marco Felsch Link: https://lore.kernel.org/r/20200227112819.16754-2-m.felsch@pengutronix.de Signed-off-by: Dmitry Torokhov --- drivers/input/touchscreen/edt-ft5x06.c | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/drivers/input/touchscreen/edt-ft5x06.c b/drivers/input/touchscreen/edt-ft5x06.c index d2587724c52ad..9b8450794a8a2 100644 --- a/drivers/input/touchscreen/edt-ft5x06.c +++ b/drivers/input/touchscreen/edt-ft5x06.c @@ -938,19 +938,25 @@ static void edt_ft5x06_ts_get_defaults(struct device *dev, error = device_property_read_u32(dev, "offset", &val); if (!error) { - edt_ft5x06_register_write(tsdata, reg_addr->reg_offset, val); + if (reg_addr->reg_offset != NO_REGISTER) + edt_ft5x06_register_write(tsdata, + reg_addr->reg_offset, val); tsdata->offset = val; } error = device_property_read_u32(dev, "offset-x", &val); if (!error) { - edt_ft5x06_register_write(tsdata, reg_addr->reg_offset_x, val); + if (reg_addr->reg_offset_x != NO_REGISTER) + edt_ft5x06_register_write(tsdata, + reg_addr->reg_offset_x, val); tsdata->offset_x = val; } error = device_property_read_u32(dev, "offset-y", &val); if (!error) { - edt_ft5x06_register_write(tsdata, reg_addr->reg_offset_y, val); + if (reg_addr->reg_offset_y != NO_REGISTER) + edt_ft5x06_register_write(tsdata, + reg_addr->reg_offset_y, val); tsdata->offset_y = val; } } -- GitLab From f4ee52f3ad8c9210030a9fe48bbb74dbc7c9f90e Mon Sep 17 00:00:00 2001 From: Marco Felsch Date: Sat, 9 May 2020 12:05:36 -0700 Subject: [PATCH 0322/1971] Input: edt-ft5x06 - move parameter restore into helper We need to restore the parameters if we switch between the factory/work mode and during the resume process if we switched off the power-supply. Therefore refactor edt_ft5x06_work_mode() and move the "restore the parameters" into a helper routine so we can reuse it later. Signed-off-by: Marco Felsch Link: https://lore.kernel.org/r/20200227112819.16754-3-m.felsch@pengutronix.de Signed-off-by: Dmitry Torokhov --- drivers/input/touchscreen/edt-ft5x06.c | 43 ++++++++++++++------------ 1 file changed, 24 insertions(+), 19 deletions(-) diff --git a/drivers/input/touchscreen/edt-ft5x06.c b/drivers/input/touchscreen/edt-ft5x06.c index 9b8450794a8a2..bb91070937962 100644 --- a/drivers/input/touchscreen/edt-ft5x06.c +++ b/drivers/input/touchscreen/edt-ft5x06.c @@ -527,6 +527,29 @@ static const struct attribute_group edt_ft5x06_attr_group = { .attrs = edt_ft5x06_attrs, }; +static void edt_ft5x06_restore_reg_parameters(struct edt_ft5x06_ts_data *tsdata) +{ + struct edt_reg_addr *reg_addr = &tsdata->reg_addr; + + edt_ft5x06_register_write(tsdata, reg_addr->reg_threshold, + tsdata->threshold); + edt_ft5x06_register_write(tsdata, reg_addr->reg_gain, + tsdata->gain); + if (reg_addr->reg_offset != NO_REGISTER) + edt_ft5x06_register_write(tsdata, reg_addr->reg_offset, + tsdata->offset); + if (reg_addr->reg_offset_x != NO_REGISTER) + edt_ft5x06_register_write(tsdata, reg_addr->reg_offset_x, + tsdata->offset_x); + if (reg_addr->reg_offset_y != NO_REGISTER) + edt_ft5x06_register_write(tsdata, reg_addr->reg_offset_y, + tsdata->offset_y); + if (reg_addr->reg_report_rate != NO_REGISTER) + edt_ft5x06_register_write(tsdata, reg_addr->reg_report_rate, + tsdata->report_rate); + +} + #ifdef CONFIG_DEBUG_FS static int edt_ft5x06_factory_mode(struct edt_ft5x06_ts_data *tsdata) { @@ -592,7 +615,6 @@ static int edt_ft5x06_work_mode(struct edt_ft5x06_ts_data *tsdata) { struct i2c_client *client = tsdata->client; int retries = EDT_SWITCH_MODE_RETRIES; - struct edt_reg_addr *reg_addr = &tsdata->reg_addr; int ret; int error; @@ -624,24 +646,7 @@ static int edt_ft5x06_work_mode(struct edt_ft5x06_ts_data *tsdata) kfree(tsdata->raw_buffer); tsdata->raw_buffer = NULL; - /* restore parameters */ - edt_ft5x06_register_write(tsdata, reg_addr->reg_threshold, - tsdata->threshold); - edt_ft5x06_register_write(tsdata, reg_addr->reg_gain, - tsdata->gain); - if (reg_addr->reg_offset != NO_REGISTER) - edt_ft5x06_register_write(tsdata, reg_addr->reg_offset, - tsdata->offset); - if (reg_addr->reg_offset_x != NO_REGISTER) - edt_ft5x06_register_write(tsdata, reg_addr->reg_offset_x, - tsdata->offset_x); - if (reg_addr->reg_offset_y != NO_REGISTER) - edt_ft5x06_register_write(tsdata, reg_addr->reg_offset_y, - tsdata->offset_y); - if (reg_addr->reg_report_rate != NO_REGISTER) - edt_ft5x06_register_write(tsdata, reg_addr->reg_report_rate, - tsdata->report_rate); - + edt_ft5x06_restore_reg_parameters(tsdata); enable_irq(client->irq); return 0; -- GitLab From 21d1611a83f8aa3907ca653b5851dd01951d4da6 Mon Sep 17 00:00:00 2001 From: Marco Felsch Date: Sat, 9 May 2020 12:10:40 -0700 Subject: [PATCH 0323/1971] Input: edt-ft5x06 - improve power management operations It is possible to bring the device into a deep sleep state. To exit this state the reset or wakeup pin must be toggeled as documented in [1]. Because of the poor documentation I used the several downstream kernels [2] and other applications notes [3] to indentify the related registers. Furthermore I added the support to disable the device completely which is obviously the most effective power-saving mechanism. This mechanism needs the reset pin to ensure the power-up/down sequence. We can't apply any of these power-saving mechanism if both pins are missing (not connected) or if it is a wakeup device. [1] https://www.newhavendisplay.com/appnotes/datasheets/touchpanel/FT5x26.pdf https://www.newhavendisplay.com/appnotes/datasheets/touchpanel/FT5x06.pdf [2] https://github.com/linux-sunxi/linux-sunxi/blob/sunxi-3.4/drivers/input/touchscreen/ft5x_ts.c https://github.com/Pablito2020/focaltech-touch-driver/blob/master/ft5336_driver.c [3] https://www.newhavendisplay.com/appnotes/datasheets/touchpanel/FT5x16_registers.pdf Signed-off-by: Marco Felsch Link: https://lore.kernel.org/r/20200227112819.16754-4-m.felsch@pengutronix.de Signed-off-by: Dmitry Torokhov --- drivers/input/touchscreen/edt-ft5x06.c | 142 +++++++++++++++++++++++-- 1 file changed, 132 insertions(+), 10 deletions(-) diff --git a/drivers/input/touchscreen/edt-ft5x06.c b/drivers/input/touchscreen/edt-ft5x06.c index bb91070937962..d423bd6bfeda4 100644 --- a/drivers/input/touchscreen/edt-ft5x06.c +++ b/drivers/input/touchscreen/edt-ft5x06.c @@ -38,6 +38,9 @@ #define WORK_REGISTER_NUM_X 0x33 #define WORK_REGISTER_NUM_Y 0x34 +#define PMOD_REGISTER_ACTIVE 0x00 +#define PMOD_REGISTER_HIBERNATE 0x03 + #define M09_REGISTER_THRESHOLD 0x80 #define M09_REGISTER_GAIN 0x92 #define M09_REGISTER_OFFSET 0x93 @@ -53,6 +56,7 @@ #define WORK_REGISTER_OPMODE 0x3c #define FACTORY_REGISTER_OPMODE 0x01 +#define PMOD_REGISTER_OPMODE 0xa5 #define TOUCH_EVENT_DOWN 0x00 #define TOUCH_EVENT_UP 0x01 @@ -65,6 +69,12 @@ #define EDT_RAW_DATA_RETRIES 100 #define EDT_RAW_DATA_DELAY 1000 /* usec */ +enum edt_pmode { + EDT_PMODE_NOT_SUPPORTED, + EDT_PMODE_HIBERNATE, + EDT_PMODE_POWEROFF, +}; + enum edt_ver { EDT_M06, EDT_M09, @@ -103,6 +113,7 @@ struct edt_ft5x06_ts_data { struct mutex mutex; bool factory_mode; + enum edt_pmode suspend_mode; int threshold; int gain; int offset; @@ -767,9 +778,8 @@ static const struct file_operations debugfs_raw_data_fops = { .read = edt_ft5x06_debugfs_raw_data_read, }; -static void -edt_ft5x06_ts_prepare_debugfs(struct edt_ft5x06_ts_data *tsdata, - const char *debugfs_name) +static void edt_ft5x06_ts_prepare_debugfs(struct edt_ft5x06_ts_data *tsdata, + const char *debugfs_name) { tsdata->debug_dir = debugfs_create_dir(debugfs_name, NULL); @@ -782,8 +792,7 @@ edt_ft5x06_ts_prepare_debugfs(struct edt_ft5x06_ts_data *tsdata, tsdata->debug_dir, tsdata, &debugfs_raw_data_fops); } -static void -edt_ft5x06_ts_teardown_debugfs(struct edt_ft5x06_ts_data *tsdata) +static void edt_ft5x06_ts_teardown_debugfs(struct edt_ft5x06_ts_data *tsdata) { debugfs_remove_recursive(tsdata->debug_dir); kfree(tsdata->raw_buffer); @@ -791,14 +800,17 @@ edt_ft5x06_ts_teardown_debugfs(struct edt_ft5x06_ts_data *tsdata) #else -static inline void -edt_ft5x06_ts_prepare_debugfs(struct edt_ft5x06_ts_data *tsdata, - const char *debugfs_name) +static int edt_ft5x06_factory_mode(struct edt_ft5x06_ts_data *tsdata) { + return -ENOSYS; } -static inline void -edt_ft5x06_ts_teardown_debugfs(struct edt_ft5x06_ts_data *tsdata) +static void edt_ft5x06_ts_prepare_debugfs(struct edt_ft5x06_ts_data *tsdata, + const char *debugfs_name) +{ +} + +static void edt_ft5x06_ts_teardown_debugfs(struct edt_ft5x06_ts_data *tsdata) { } @@ -1125,6 +1137,19 @@ static int edt_ft5x06_ts_probe(struct i2c_client *client, return error; } + /* + * Check which sleep modes we can support. Power-off requieres the + * reset-pin to ensure correct power-down/power-up behaviour. Start with + * the EDT_PMODE_POWEROFF test since this is the deepest possible sleep + * mode. + */ + if (tsdata->reset_gpio) + tsdata->suspend_mode = EDT_PMODE_POWEROFF; + else if (tsdata->wake_gpio) + tsdata->suspend_mode = EDT_PMODE_HIBERNATE; + else + tsdata->suspend_mode = EDT_PMODE_NOT_SUPPORTED; + if (tsdata->wake_gpio) { usleep_range(5000, 6000); gpiod_set_value_cansleep(tsdata->wake_gpio, 1); @@ -1238,6 +1263,102 @@ static int edt_ft5x06_ts_remove(struct i2c_client *client) return 0; } +static int __maybe_unused edt_ft5x06_ts_suspend(struct device *dev) +{ + struct i2c_client *client = to_i2c_client(dev); + struct edt_ft5x06_ts_data *tsdata = i2c_get_clientdata(client); + struct gpio_desc *reset_gpio = tsdata->reset_gpio; + int ret; + + if (device_may_wakeup(dev)) + return 0; + + if (tsdata->suspend_mode == EDT_PMODE_NOT_SUPPORTED) + return 0; + + /* Enter hibernate mode. */ + ret = edt_ft5x06_register_write(tsdata, PMOD_REGISTER_OPMODE, + PMOD_REGISTER_HIBERNATE); + if (ret) + dev_warn(dev, "Failed to set hibernate mode\n"); + + if (tsdata->suspend_mode == EDT_PMODE_HIBERNATE) + return 0; + + /* + * Power-off according the datasheet. Cut the power may leaf the irq + * line in an undefined state depending on the host pull resistor + * settings. Disable the irq to avoid adjusting each host till the + * device is back in a full functional state. + */ + disable_irq(tsdata->client->irq); + + gpiod_set_value_cansleep(reset_gpio, 1); + usleep_range(1000, 2000); + + ret = regulator_disable(tsdata->vcc); + if (ret) + dev_warn(dev, "Failed to disable vcc\n"); + + return 0; +} + +static int __maybe_unused edt_ft5x06_ts_resume(struct device *dev) +{ + struct i2c_client *client = to_i2c_client(dev); + struct edt_ft5x06_ts_data *tsdata = i2c_get_clientdata(client); + int ret = 0; + + if (device_may_wakeup(dev)) + return 0; + + if (tsdata->suspend_mode == EDT_PMODE_NOT_SUPPORTED) + return 0; + + if (tsdata->suspend_mode == EDT_PMODE_POWEROFF) { + struct gpio_desc *reset_gpio = tsdata->reset_gpio; + + /* + * We can't check if the regulator is a dummy or a real + * regulator. So we need to specify the 5ms reset time (T_rst) + * here instead of the 100us T_rtp time. We also need to wait + * 300ms in case it was a real supply and the power was cutted + * of. Toggle the reset pin is also a way to exit the hibernate + * mode. + */ + gpiod_set_value_cansleep(reset_gpio, 1); + usleep_range(5000, 6000); + + ret = regulator_enable(tsdata->vcc); + if (ret) { + dev_err(dev, "Failed to enable vcc\n"); + return ret; + } + + usleep_range(1000, 2000); + gpiod_set_value_cansleep(reset_gpio, 0); + msleep(300); + + edt_ft5x06_restore_reg_parameters(tsdata); + enable_irq(tsdata->client->irq); + + if (tsdata->factory_mode) + ret = edt_ft5x06_factory_mode(tsdata); + } else { + struct gpio_desc *wake_gpio = tsdata->wake_gpio; + + gpiod_set_value_cansleep(wake_gpio, 0); + usleep_range(5000, 6000); + gpiod_set_value_cansleep(wake_gpio, 1); + } + + + return ret; +} + +static SIMPLE_DEV_PM_OPS(edt_ft5x06_ts_pm_ops, + edt_ft5x06_ts_suspend, edt_ft5x06_ts_resume); + static const struct edt_i2c_chip_data edt_ft5x06_data = { .max_support_points = 5, }; @@ -1276,6 +1397,7 @@ static struct i2c_driver edt_ft5x06_ts_driver = { .driver = { .name = "edt_ft5x06", .of_match_table = edt_ft5x06_of_match, + .pm = &edt_ft5x06_ts_pm_ops, }, .id_table = edt_ft5x06_ts_id, .probe = edt_ft5x06_ts_probe, -- GitLab From 0f58daaacca9fdc078eadfcfaa19e467b264cb28 Mon Sep 17 00:00:00 2001 From: Ahmad Fatoum Date: Sat, 9 May 2020 14:27:23 -0700 Subject: [PATCH 0324/1971] Input: edt-ft5x06 - prefer asynchronous probe Probing the device takes a while, because we sleep for 300 ms after a reset; allow asynchronous probing so this can happen in the background while other devices are being probed. Signed-off-by: Ahmad Fatoum Signed-off-by: Marco Felsch Link: https://lore.kernel.org/r/20200227112819.16754-5-m.felsch@pengutronix.de Signed-off-by: Dmitry Torokhov --- drivers/input/touchscreen/edt-ft5x06.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/input/touchscreen/edt-ft5x06.c b/drivers/input/touchscreen/edt-ft5x06.c index d423bd6bfeda4..3a4f18d3450d1 100644 --- a/drivers/input/touchscreen/edt-ft5x06.c +++ b/drivers/input/touchscreen/edt-ft5x06.c @@ -1398,6 +1398,7 @@ static struct i2c_driver edt_ft5x06_ts_driver = { .name = "edt_ft5x06", .of_match_table = edt_ft5x06_of_match, .pm = &edt_ft5x06_ts_pm_ops, + .probe_type = PROBE_PREFER_ASYNCHRONOUS, }, .id_table = edt_ft5x06_ts_id, .probe = edt_ft5x06_ts_probe, -- GitLab From 934ed3847a4ebc75b655659c4d2349ba4337941c Mon Sep 17 00:00:00 2001 From: Christophe JAILLET Date: Sat, 9 May 2020 10:23:23 +0200 Subject: [PATCH 0325/1971] power: supply: lp8788: Fix an error handling path in 'lp8788_charger_probe()' In the probe function, in case of error, resources allocated in 'lp8788_setup_adc_channel()' must be released. This can be achieved easily by using the devm_ variant of 'iio_channel_get()'. This has the extra benefit to simplify the remove function and to axe the 'lp8788_release_adc_channel()' function which is now useless. Fixes: 98a276649358 ("power_supply: Add new lp8788 charger driver") Signed-off-by: Christophe JAILLET Signed-off-by: Sebastian Reichel --- drivers/power/supply/lp8788-charger.c | 18 ++---------------- 1 file changed, 2 insertions(+), 16 deletions(-) diff --git a/drivers/power/supply/lp8788-charger.c b/drivers/power/supply/lp8788-charger.c index 84a206f42a8e9..e7931ffb7151d 100644 --- a/drivers/power/supply/lp8788-charger.c +++ b/drivers/power/supply/lp8788-charger.c @@ -572,27 +572,14 @@ static void lp8788_setup_adc_channel(struct device *dev, return; /* ADC channel for battery voltage */ - chan = iio_channel_get(dev, pdata->adc_vbatt); + chan = devm_iio_channel_get(dev, pdata->adc_vbatt); pchg->chan[LP8788_VBATT] = IS_ERR(chan) ? NULL : chan; /* ADC channel for battery temperature */ - chan = iio_channel_get(dev, pdata->adc_batt_temp); + chan = devm_iio_channel_get(dev, pdata->adc_batt_temp); pchg->chan[LP8788_BATT_TEMP] = IS_ERR(chan) ? NULL : chan; } -static void lp8788_release_adc_channel(struct lp8788_charger *pchg) -{ - int i; - - for (i = 0; i < LP8788_NUM_CHG_ADC; i++) { - if (!pchg->chan[i]) - continue; - - iio_channel_release(pchg->chan[i]); - pchg->chan[i] = NULL; - } -} - static ssize_t lp8788_show_charger_status(struct device *dev, struct device_attribute *attr, char *buf) { @@ -735,7 +722,6 @@ static int lp8788_charger_remove(struct platform_device *pdev) flush_work(&pchg->charger_work); lp8788_irq_unregister(pdev, pchg); lp8788_psy_unregister(pchg); - lp8788_release_adc_channel(pchg); return 0; } -- GitLab From 1072ceada4586f7c1d316a0dd9fa751a7055969c Mon Sep 17 00:00:00 2001 From: Hongbo Yao Date: Sat, 9 May 2020 18:36:11 +0800 Subject: [PATCH 0326/1971] power: reset: ltc2952: remove unused variable MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Fix gcc '-Wunused-but-set-variable' warning: drivers/power/reset/ltc2952-poweroff.c:97:16: warning: variable ‘overruns’ set but not used [-Wunused-but-set-variable] unsigned long overruns; Reported-by: Hulk Robot Signed-off-by: Hongbo Yao Signed-off-by: Sebastian Reichel --- drivers/power/reset/ltc2952-poweroff.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/drivers/power/reset/ltc2952-poweroff.c b/drivers/power/reset/ltc2952-poweroff.c index e4a0cc45b3d11..318927938b05b 100644 --- a/drivers/power/reset/ltc2952-poweroff.c +++ b/drivers/power/reset/ltc2952-poweroff.c @@ -94,7 +94,6 @@ static enum hrtimer_restart ltc2952_poweroff_timer_wde(struct hrtimer *timer) { ktime_t now; int state; - unsigned long overruns; struct ltc2952_poweroff *data = to_ltc2952(timer, timer_wde); if (data->kernel_panic) @@ -104,7 +103,7 @@ static enum hrtimer_restart ltc2952_poweroff_timer_wde(struct hrtimer *timer) gpiod_set_value(data->gpio_watchdog, !state); now = hrtimer_cb_get_time(timer); - overruns = hrtimer_forward(timer, now, data->wde_interval); + hrtimer_forward(timer, now, data->wde_interval); return HRTIMER_RESTART; } -- GitLab From 92f7d909720e188bf5ce411dc2ebdb8049c855b1 Mon Sep 17 00:00:00 2001 From: Matti Vaittinen Date: Fri, 8 May 2020 18:44:45 +0300 Subject: [PATCH 0327/1971] power: supply: bd70528: use linear ranges Change the bd70528 to use common linear_range code instead of implementing a copy of it in this driver. Signed-off-by: Matti Vaittinen Signed-off-by: Sebastian Reichel --- drivers/power/supply/Kconfig | 1 + drivers/power/supply/bd70528-charger.c | 144 ++++++++++--------------- 2 files changed, 56 insertions(+), 89 deletions(-) diff --git a/drivers/power/supply/Kconfig b/drivers/power/supply/Kconfig index efdcf49f27bfe..9448d6c66cae4 100644 --- a/drivers/power/supply/Kconfig +++ b/drivers/power/supply/Kconfig @@ -706,6 +706,7 @@ config CHARGER_UCS1002 config CHARGER_BD70528 tristate "ROHM bd70528 charger driver" depends on MFD_ROHM_BD70528 + select LINEAR_RANGES default n help Say Y here to enable support for getting battery status diff --git a/drivers/power/supply/bd70528-charger.c b/drivers/power/supply/bd70528-charger.c index 3b820110ecfab..7c1f0b99c71bd 100644 --- a/drivers/power/supply/bd70528-charger.c +++ b/drivers/power/supply/bd70528-charger.c @@ -72,6 +72,7 @@ #include #include #include +#include #define CHG_STAT_SUSPEND 0x0 #define CHG_STAT_TRICKLE 0x1 @@ -335,38 +336,37 @@ static int bd70528_get_present(struct bd70528_psy *bdpsy, int *val) return 0; } -struct bd70528_linear_range { - int min; - int step; - int vals; - int low_sel; -}; - -static const struct bd70528_linear_range current_limit_ranges[] = { +static const struct linear_range current_limit_ranges[] = { { .min = 5, .step = 1, - .vals = 36, - .low_sel = 0, + .min_sel = 0, + .max_sel = 0x22, }, { .min = 40, .step = 5, - .vals = 5, - .low_sel = 0x23, + .min_sel = 0x23, + .max_sel = 0x26, }, { .min = 60, .step = 20, - .vals = 8, - .low_sel = 0x27, + .min_sel = 0x27, + .max_sel = 0x2d, }, { .min = 200, .step = 50, - .vals = 7, - .low_sel = 0x2e, - } + .min_sel = 0x2e, + .max_sel = 0x34, + }, + { + .min = 500, + .step = 0, + .min_sel = 0x35, + .max_sel = 0x3f, + }, }; /* @@ -374,18 +374,18 @@ static const struct bd70528_linear_range current_limit_ranges[] = { * voltage for low temperatures. The driver currently only reads * the charge current at room temperature. We do set both though. */ -static const struct bd70528_linear_range warm_charge_curr[] = { +static const struct linear_range warm_charge_curr[] = { { .min = 10, .step = 10, - .vals = 20, - .low_sel = 0, + .min_sel = 0, + .max_sel = 0x12, }, { .min = 200, .step = 25, - .vals = 13, - .low_sel = 0x13, + .min_sel = 0x13, + .max_sel = 0x1f, }, }; @@ -398,56 +398,6 @@ static const struct bd70528_linear_range warm_charge_curr[] = { #define MAX_WARM_CHG_CURR_SEL 0x1f #define MIN_CHG_CURR_SEL 0x0 -static int find_value_for_selector_low(const struct bd70528_linear_range *r, - int selectors, unsigned int sel, - unsigned int *val) -{ - int i; - - for (i = 0; i < selectors; i++) { - if (r[i].low_sel <= sel && r[i].low_sel + r[i].vals >= sel) { - *val = r[i].min + (sel - r[i].low_sel) * r[i].step; - return 0; - } - } - return -EINVAL; -} - -/* - * For BD70528 voltage/current limits we happily accept any value which - * belongs the range. We could check if value matching the selector is - * desired by computing the range min + (sel - sel_low) * range step - but - * I guess it is enough if we use voltage/current which is closest (below) - * the requested? - */ -static int find_selector_for_value_low(const struct bd70528_linear_range *r, - int selectors, unsigned int val, - unsigned int *sel, bool *found) -{ - int i; - int ret = -EINVAL; - - *found = false; - for (i = 0; i < selectors; i++) { - if (r[i].min <= val) { - if (r[i].min + r[i].step * r[i].vals >= val) { - *found = true; - *sel = r[i].low_sel + (val - r[i].min) / - r[i].step; - ret = 0; - break; - } - /* - * If the range max is smaller than requested - * we can set the max supported value from range - */ - *sel = r[i].low_sel + r[i].vals; - ret = 0; - } - } - return ret; -} - static int get_charge_current(struct bd70528_psy *bdpsy, int *ma) { unsigned int sel; @@ -463,9 +413,9 @@ static int get_charge_current(struct bd70528_psy *bdpsy, int *ma) sel &= BD70528_MASK_CHG_CHG_CURR; - ret = find_value_for_selector_low(&warm_charge_curr[0], - ARRAY_SIZE(warm_charge_curr), sel, - ma); + ret = linear_range_get_value_array(&warm_charge_curr[0], + ARRAY_SIZE(warm_charge_curr), + sel, ma); if (ret) { dev_err(bdpsy->dev, "Unknown charge current value 0x%x\n", @@ -491,10 +441,9 @@ static int get_current_limit(struct bd70528_psy *bdpsy, int *ma) sel &= BD70528_MASK_CHG_DCIN_ILIM; - ret = find_value_for_selector_low(¤t_limit_ranges[0], - ARRAY_SIZE(current_limit_ranges), sel, - ma); - + ret = linear_range_get_value_array(¤t_limit_ranges[0], + ARRAY_SIZE(current_limit_ranges), + sel, ma); if (ret) { /* Unspecified values mean 500 mA */ *ma = 500; @@ -588,15 +537,28 @@ static int set_charge_current(struct bd70528_psy *bdpsy, int ma) goto set; } - ret = find_selector_for_value_low(&warm_charge_curr[0], - ARRAY_SIZE(warm_charge_curr), ma, - ®, &found); +/* + * For BD70528 voltage/current limits we happily accept any value which + * belongs the range. We could check if value matching the selector is + * desired by computing the range min + (sel - sel_low) * range step - but + * I guess it is enough if we use voltage/current which is closest (below) + * the requested? + */ + + ret = linear_range_get_selector_low_array(warm_charge_curr, + ARRAY_SIZE(warm_charge_curr), + ma, ®, &found); if (ret) { + dev_err(bdpsy->dev, + "Unsupported charge current %u mA\n", ma); reg = MIN_CHG_CURR_SEL; goto set; } if (!found) { - /* There was a gap in supported values and we hit it */ + /* + * There was a gap in supported values and we hit it. + * Yet a smaller value was found so we use it. + */ dev_warn(bdpsy->dev, "Unsupported charge current %u mA\n", ma); } @@ -648,17 +610,21 @@ static int set_current_limit(struct bd70528_psy *bdpsy, int ma) goto set; } - ret = find_selector_for_value_low(¤t_limit_ranges[0], - ARRAY_SIZE(current_limit_ranges), ma, - ®, &found); + ret = linear_range_get_selector_low_array(current_limit_ranges, + ARRAY_SIZE(current_limit_ranges), + ma, ®, &found); if (ret) { + dev_err(bdpsy->dev, "Unsupported current limit %umA\n", ma); reg = MIN_CURR_LIMIT_SEL; goto set; } if (!found) { - /* There was a gap in supported values and we hit it ?*/ - dev_warn(bdpsy->dev, "Unsupported current limit %umA\n", - ma); + /* + * There was a gap in supported values and we hit it. + * We found a smaller value from ranges and use it. + * Warn user though. + */ + dev_warn(bdpsy->dev, "Unsupported current limit %umA\n", ma); } set: -- GitLab From e3420b49949c79d6182dd8128fa7a3958da01b07 Mon Sep 17 00:00:00 2001 From: Matti Vaittinen Date: Fri, 8 May 2020 18:46:17 +0300 Subject: [PATCH 0328/1971] dt-bindings: battery: add new battery parameters Add: - trickle-charge-current-microamp: Some chargers have 3 charging stages. First one when battery is almost empty is often called as trickle-charge. Last state when battery has been "woken up" is usually called as fast-charge. In addition to this some chargers have a 'middle state' which ROHM BD99954 data-sheet describes as pre-charge. Some batteries can benefit from this 3-phase charging [citation needed]. Introduce trickle-charge-current-microamp so that batteries can give charging current limit for all three states. - precharge-upper-limit-microvolt: When battery voltage has reached certain limit we change from trickle-charge to next charging state (pre-charge for BD99954). Allow battery to specify this limit. - re-charge-voltage-microvolt: Allow giving a battery specific voltage limit for chargers which can automatically re-start charging when battery has discharghed down to this limit. - over-voltage-threshold-microvolt Allow specifying voltage threshold after which the battery is assumed to be faulty. Signed-off-by: Matti Vaittinen Reviewed-by: Rob Herring Signed-off-by: Sebastian Reichel --- Documentation/devicetree/bindings/power/supply/battery.txt | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/Documentation/devicetree/bindings/power/supply/battery.txt b/Documentation/devicetree/bindings/power/supply/battery.txt index 3049cf88bdcf3..5e29595edd74a 100644 --- a/Documentation/devicetree/bindings/power/supply/battery.txt +++ b/Documentation/devicetree/bindings/power/supply/battery.txt @@ -11,15 +11,21 @@ different type. This prevents unpredictable, potentially harmful, behavior should a replacement that changes the battery type occur without a corresponding update to the dtb. +Please note that not all charger drivers respect all of the properties. + Required Properties: - compatible: Must be "simple-battery" Optional Properties: + - over-voltage-threshold-microvolt: battery over-voltage limit + - re-charge-voltage-microvolt: limit to automatically start charging again - voltage-min-design-microvolt: drained battery voltage - voltage-max-design-microvolt: fully charged battery voltage - energy-full-design-microwatt-hours: battery design energy - charge-full-design-microamp-hours: battery design capacity + - trickle-charge-current-microamp: current for trickle-charge phase - precharge-current-microamp: current for pre-charge phase + - precharge-upper-limit-microvolt: limit when to change to constant charging - charge-term-current-microamp: current for charge termination phase - constant-charge-current-max-microamp: maximum constant input current - constant-charge-voltage-max-microvolt: maximum constant input voltage -- GitLab From 5a63b7ba50fd6b7a897bf9353dbf31d579cfe116 Mon Sep 17 00:00:00 2001 From: Matti Vaittinen Date: Fri, 8 May 2020 18:47:20 +0300 Subject: [PATCH 0329/1971] power: supply: add battery parameters Add parsing of new device-tree battery bindings. - trickle-charge-current-microamp - precharge-upper-limit-microvolt - re-charge-voltage-microvolt - over-voltage-threshold-microvolt Signed-off-by: Matti Vaittinen Signed-off-by: Sebastian Reichel --- drivers/power/supply/power_supply_core.c | 8 ++++++++ include/linux/power_supply.h | 4 ++++ 2 files changed, 12 insertions(+) diff --git a/drivers/power/supply/power_supply_core.c b/drivers/power/supply/power_supply_core.c index 1a9a9fae73d32..02b37fe6061c0 100644 --- a/drivers/power/supply/power_supply_core.c +++ b/drivers/power/supply/power_supply_core.c @@ -620,10 +620,18 @@ int power_supply_get_battery_info(struct power_supply *psy, &info->voltage_min_design_uv); of_property_read_u32(battery_np, "voltage-max-design-microvolt", &info->voltage_max_design_uv); + of_property_read_u32(battery_np, "trickle-charge-current-microamp", + &info->tricklecharge_current_ua); of_property_read_u32(battery_np, "precharge-current-microamp", &info->precharge_current_ua); + of_property_read_u32(battery_np, "precharge-upper-limit-microvolt", + &info->precharge_voltage_max_uv); of_property_read_u32(battery_np, "charge-term-current-microamp", &info->charge_term_current_ua); + of_property_read_u32(battery_np, "re-charge-voltage-microvolt", + &info->charge_restart_voltage_uv); + of_property_read_u32(battery_np, "over-voltage-threshold-microvolt", + &info->overvoltage_limit_uv); of_property_read_u32(battery_np, "constant-charge-current-max-microamp", &info->constant_charge_current_max_ua); of_property_read_u32(battery_np, "constant-charge-voltage-max-microvolt", diff --git a/include/linux/power_supply.h b/include/linux/power_supply.h index 6a34df65d4d18..1f60731ec7fe2 100644 --- a/include/linux/power_supply.h +++ b/include/linux/power_supply.h @@ -346,8 +346,12 @@ struct power_supply_battery_info { int charge_full_design_uah; /* microAmp-hours */ int voltage_min_design_uv; /* microVolts */ int voltage_max_design_uv; /* microVolts */ + int tricklecharge_current_ua; /* microAmps */ int precharge_current_ua; /* microAmps */ + int precharge_voltage_max_uv; /* microVolts */ int charge_term_current_ua; /* microAmps */ + int charge_restart_voltage_uv; /* microVolts */ + int overvoltage_limit_uv; /* microVolts */ int constant_charge_current_max_ua; /* microAmps */ int constant_charge_voltage_max_uv; /* microVolts */ int factory_internal_resistance_uohm; /* microOhms */ -- GitLab From 2a75c8a4e4fe1dd3f773259932a7a993e507f786 Mon Sep 17 00:00:00 2001 From: Matti Vaittinen Date: Fri, 8 May 2020 18:48:24 +0300 Subject: [PATCH 0330/1971] dt_bindings: ROHM BD99954 Charger The ROHM BD99954 is a Battery Management LSI for 1-4 cell Lithium-Ion secondary battery. Intended to be used in space-constraint equipment such as Low profile Notebook PC, Tablets and other applications. BD99954 provides a Dual-source Battery Charger, two port BC1.2 detection and a Battery Monitor. Document the DT bindings for BD99954 Signed-off-by: Matti Vaittinen Reviewed-by: Rob Herring Signed-off-by: Sebastian Reichel --- .../bindings/power/supply/rohm,bd99954.yaml | 155 ++++++++++++++++++ 1 file changed, 155 insertions(+) create mode 100644 Documentation/devicetree/bindings/power/supply/rohm,bd99954.yaml diff --git a/Documentation/devicetree/bindings/power/supply/rohm,bd99954.yaml b/Documentation/devicetree/bindings/power/supply/rohm,bd99954.yaml new file mode 100644 index 0000000000000..7e0f73a898c7c --- /dev/null +++ b/Documentation/devicetree/bindings/power/supply/rohm,bd99954.yaml @@ -0,0 +1,155 @@ +# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/power/supply/rohm,bd99954.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: ROHM BD99954 Battery charger + +maintainers: + - Matti Vaittinen + - Markus Laine + - Mikko Mutanen + +description: | + The ROHM BD99954 is a Battery Management LSI for 1-4 cell Lithium-Ion + secondary battery intended to be used in space-constraint equipment such + as Low profile Notebook PC, Tablets and other applications. BD99954 + provides a Dual-source Battery Charger, two port BC1.2 detection and a + Battery Monitor. + + +properties: + compatible: + const: rohm,bd99954 +# +# The battery charging profile of BD99954. +# +# Curve (1) represents charging current. +# Curve (2) represents battery voltage. +# +# The BD99954 data sheet divides charging to three phases. +# a) Trickle-charge with constant current (8). +# b) pre-charge with constant current (6) +# c) fast-charge with: +# First a constant current (5) phase (CC) +# Then constant voltage (CV) phase (after the battery voltage has reached +# target level - until charging current has dropped to termination +# level (7) +# +# V ^ ^ I +# . . +# . . +# (4)- -.- - - - - - - - - - - - - - +++++++++++++++++++++++++++. +# . / . +# . ++++++/++ - - - - - - - - - - - - -.- - (5) +# . + / + . +# . + - -- . +# . + - + . +# . +.- -: . +# . .+ +` . +# . .- + | `/ . +# . .." + .: . +# . -" + -- . +# . (2) ..." + | :- . +# . ..."" + -: . +# (3)- -.-.""- - - - -+++++++++ - - - - - - -.:- - - - - - - - - .- - (6) +# . + `:. . +# . + | -: . +# . + -: . +# . + .. . +# . (1) + | "+++- - - -.- - (7) +# -++++++++++++++- - - - - - - - - - - - - - - - - + - - - .- - (8) +# . + - +# -------------------------------------------------+++++++++--> +# | | | CC | CV | +# | --trickle-- | -pre- | ---------fast----------- | +# +# The charger uses the following battery properties +# - trickle-charge-current-microamp: +# Current used at trickle-charge phase (8 in above chart) +# minimum: 64000 +# maximum: 1024000 +# multipleOf: 64000 +# - precharge-current-microamp: +# Current used at pre-charge phase (6 in above chart) +# minimum: 64000 +# maximum: 1024000 +# multipleOf: 64000 +# - constant-charge-current-max-microamp +# Current used at fast charge constant current phase (5 in above chart) +# minimum: 64000 +# maximum: 1024000 +# multipleOf: 64000 +# - constant-charge-voltage-max-microvolt +# The constant voltage used in fast charging phase (4 in above chart) +# minimum: 2560000 +# maximum: 19200000 +# multipleOf: 16000 +# - precharge-upper-limit-microvolt +# charging mode is changed from trickle charging to pre-charging +# when battery voltage exceeds this limit voltage (3 in above chart) +# minimum: 2048000 +# maximum: 19200000 +# multipleOf: 64000 +# - re-charge-voltage-microvolt +# minimum: 2560000 +# maximum: 19200000 +# multipleOf: 16000 +# re-charging is automatically started when battry has been discharging +# to the point where the battery voltage drops below this limit +# - over-voltage-threshold-microvolt +# battery is expected to be faulty if battery voltage exceeds this limit. +# Charger will then enter to a "battery faulty" -state +# minimum: 2560000 +# maximum: 19200000 +# multipleOf: 16000 +# - charge-term-current-microamp +# minimum: 0 +# maximum: 1024000 +# multipleOf: 64000 +# a charge cycle terminates when the battery voltage is above recharge +# threshold, and the current is below this setting (7 in above chart) +# See also Documentation/devicetree/bindings/power/supply/battery.txt + + monitored-battery: + description: + phandle of battery characteristics devicetree node + + rohm,vsys-regulation-microvolt: + description: system specific lower limit for system voltage. + minimum: 2560000 + maximum: 19200000 + multipleOf: 64000 + + rohm,vbus-input-current-limit-microamp: + description: system specific VBUS input current limit (in microamps). + minimum: 32000 + maximum: 16352000 + multipleOf: 32000 + + rohm,vcc-input-current-limit-microamp: + description: system specific VCC/VACP input current limit (in microamps). + minimum: 32000 + maximum: 16352000 + multipleOf: 32000 + +required: + - compatible + +examples: + - | + i2c { + #address-cells = <1>; + #size-cells = <0>; + charger@9 { + compatible = "rohm,bd99954"; + monitored-battery = <&battery>; + reg = <0x9>; + interrupt-parent = <&gpio1>; + interrupts = <29 8>; + rohm,vsys-regulation-microvolt = <8960000>; + rohm,vbus-input-current-limit-microamp = <1472000>; + rohm,vcc-input-current-limit-microamp = <1472000>; + }; + }; -- GitLab From 0902f8366491b7cf66ac01d8508ed3a12a36faa5 Mon Sep 17 00:00:00 2001 From: Matti Vaittinen Date: Fri, 8 May 2020 18:49:26 +0300 Subject: [PATCH 0331/1971] power: supply: Support ROHM bd99954 charger The ROHM BD99954 is a Battery Management LSI for 1-4 cell Lithium-Ion secondary battery intended to be used in space-constraint equipment such as Low profile Notebook PC, Tablets and other applications. BD99954 provides a Dual-source Battery Charger, two port BC1.2 detection and a Battery Monitor. Support ROHM BD99954 Charger IC. Signed-off-by: Matti Vaittinen Reviewed-by: Andy Shevchenko Signed-off-by: Sebastian Reichel --- drivers/power/supply/Kconfig | 9 + drivers/power/supply/Makefile | 1 + drivers/power/supply/bd99954-charger.c | 1142 ++++++++++++++++++++++++ drivers/power/supply/bd99954-charger.h | 1075 ++++++++++++++++++++++ 4 files changed, 2227 insertions(+) create mode 100644 drivers/power/supply/bd99954-charger.c create mode 100644 drivers/power/supply/bd99954-charger.h diff --git a/drivers/power/supply/Kconfig b/drivers/power/supply/Kconfig index 9448d6c66cae4..d058f19922ad1 100644 --- a/drivers/power/supply/Kconfig +++ b/drivers/power/supply/Kconfig @@ -713,6 +713,15 @@ config CHARGER_BD70528 information and altering charger configurations from charger block of the ROHM BD70528 Power Management IC. +config CHARGER_BD99954 + tristate "ROHM bd99954 charger driver" + depends on I2C + select LINEAR_RANGES + help + Say Y here to enable support for getting battery and charger + information and altering charger configurations from the ROHM + BD99954 charger IC. + config CHARGER_WILCO tristate "Wilco EC based charger for ChromeOS" depends on WILCO_EC diff --git a/drivers/power/supply/Makefile b/drivers/power/supply/Makefile index 69727a10e835b..bb934545103d8 100644 --- a/drivers/power/supply/Makefile +++ b/drivers/power/supply/Makefile @@ -92,4 +92,5 @@ obj-$(CONFIG_CHARGER_SC2731) += sc2731_charger.o obj-$(CONFIG_FUEL_GAUGE_SC27XX) += sc27xx_fuel_gauge.o obj-$(CONFIG_CHARGER_UCS1002) += ucs1002_power.o obj-$(CONFIG_CHARGER_BD70528) += bd70528-charger.o +obj-$(CONFIG_CHARGER_BD99954) += bd99954-charger.o obj-$(CONFIG_CHARGER_WILCO) += wilco-charger.o diff --git a/drivers/power/supply/bd99954-charger.c b/drivers/power/supply/bd99954-charger.c new file mode 100644 index 0000000000000..3da39c7293e36 --- /dev/null +++ b/drivers/power/supply/bd99954-charger.c @@ -0,0 +1,1142 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * ROHM BD99954 charger driver + * + * Copyright (C) 2020 Rohm Semiconductors + * Originally written by: + * Mikko Mutanen + * Markus Laine + * Bugs added by: + * Matti Vaittinen + */ + +/* + * The battery charging profile of BD99954. + * + * Curve (1) represents charging current. + * Curve (2) represents battery voltage. + * + * The BD99954 data sheet divides charging to three phases. + * a) Trickle-charge with constant current (8). + * b) pre-charge with constant current (6) + * c) fast-charge, first with constant current (5) phase. After + * the battery voltage has reached target level (4) we have constant + * voltage phase until charging current has dropped to termination + * level (7) + * + * V ^ ^ I + * . . + * . . + *(4)` `.` ` ` ` ` ` ` ` ` ` ` ` ` ` ----------------------------. + * . :/ . + * . o----+/:/ ` ` ` ` ` ` ` ` ` ` ` ` `.` ` (5) + * . + :: + . + * . + /- -- . + * . +`/- + . + * . o/- -: . + * . .s. +` . + * . .--+ `/ . + * . ..`` + .: . + * . -` + -- . + * . (2) ...`` + :- . + * . ...`` + -: . + *(3)` `.`."" ` ` ` `+-------- ` ` ` ` ` ` `.:` ` ` ` ` ` ` ` ` .` ` (6) + * . + `:. . + * . + -: . + * . + -:. . + * . + .--. . + * . (1) + `.+` ` ` `.` ` (7) + * -..............` ` ` ` ` ` ` ` ` ` ` ` ` ` ` ` ` + ` ` ` .` ` (8) + * . + - + * -------------------------------------------------+++++++++--> + * | trickle | pre | fast | + * + * Details of DT properties for different limits can be found from BD99954 + * device tree binding documentation. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "bd99954-charger.h" + +struct battery_data { + u16 precharge_current; /* Trickle-charge Current */ + u16 fc_reg_voltage; /* Fast Charging Regulation Voltage */ + u16 voltage_min; + u16 voltage_max; +}; + +/* Initial field values, converted to initial register values */ +struct bd9995x_init_data { + u16 vsysreg_set; /* VSYS Regulation Setting */ + u16 ibus_lim_set; /* VBUS input current limitation */ + u16 icc_lim_set; /* VCC/VACP Input Current Limit Setting */ + u16 itrich_set; /* Trickle-charge Current Setting */ + u16 iprech_set; /* Pre-Charge Current Setting */ + u16 ichg_set; /* Fast-Charge constant current */ + u16 vfastchg_reg_set1; /* Fast Charging Regulation Voltage */ + u16 vprechg_th_set; /* Pre-charge Voltage Threshold Setting */ + u16 vrechg_set; /* Re-charge Battery Voltage Setting */ + u16 vbatovp_set; /* Battery Over Voltage Threshold Setting */ + u16 iterm_set; /* Charging termination current */ +}; + +struct bd9995x_state { + u8 online; + u16 chgstm_status; + u16 vbat_vsys_status; + u16 vbus_vcc_status; +}; + +struct bd9995x_device { + struct i2c_client *client; + struct device *dev; + struct power_supply *charger; + + struct regmap *rmap; + struct regmap_field *rmap_fields[F_MAX_FIELDS]; + + int chip_id; + int chip_rev; + struct bd9995x_init_data init_data; + struct bd9995x_state state; + + struct mutex lock; /* Protect state data */ +}; + +static const struct regmap_range bd9995x_readonly_reg_ranges[] = { + regmap_reg_range(CHGSTM_STATUS, SEL_ILIM_VAL), + regmap_reg_range(IOUT_DACIN_VAL, IOUT_DACIN_VAL), + regmap_reg_range(VCC_UCD_STATUS, VCC_IDD_STATUS), + regmap_reg_range(VBUS_UCD_STATUS, VBUS_IDD_STATUS), + regmap_reg_range(CHIP_ID, CHIP_REV), + regmap_reg_range(SYSTEM_STATUS, SYSTEM_STATUS), + regmap_reg_range(IBATP_VAL, VBAT_AVE_VAL), + regmap_reg_range(VTH_VAL, EXTIADP_AVE_VAL), +}; + +static const struct regmap_access_table bd9995x_writeable_regs = { + .no_ranges = bd9995x_readonly_reg_ranges, + .n_no_ranges = ARRAY_SIZE(bd9995x_readonly_reg_ranges), +}; + +static const struct regmap_range bd9995x_volatile_reg_ranges[] = { + regmap_reg_range(CHGSTM_STATUS, WDT_STATUS), + regmap_reg_range(VCC_UCD_STATUS, VCC_IDD_STATUS), + regmap_reg_range(VBUS_UCD_STATUS, VBUS_IDD_STATUS), + regmap_reg_range(INT0_STATUS, INT7_STATUS), + regmap_reg_range(SYSTEM_STATUS, SYSTEM_CTRL_SET), + regmap_reg_range(IBATP_VAL, EXTIADP_AVE_VAL), /* Measurement regs */ +}; + +static const struct regmap_access_table bd9995x_volatile_regs = { + .yes_ranges = bd9995x_volatile_reg_ranges, + .n_yes_ranges = ARRAY_SIZE(bd9995x_volatile_reg_ranges), +}; + +static const struct regmap_range_cfg regmap_range_cfg[] = { + { + .selector_reg = MAP_SET, + .selector_mask = 0xFFFF, + .selector_shift = 0, + .window_start = 0, + .window_len = 0x100, + .range_min = 0 * 0x100, + .range_max = 3 * 0x100, + }, +}; + +static const struct regmap_config bd9995x_regmap_config = { + .reg_bits = 8, + .val_bits = 16, + .reg_stride = 1, + + .max_register = 3 * 0x100, + .cache_type = REGCACHE_RBTREE, + + .ranges = regmap_range_cfg, + .num_ranges = ARRAY_SIZE(regmap_range_cfg), + .val_format_endian = REGMAP_ENDIAN_LITTLE, + .wr_table = &bd9995x_writeable_regs, + .volatile_table = &bd9995x_volatile_regs, +}; + +enum bd9995x_chrg_fault { + CHRG_FAULT_NORMAL, + CHRG_FAULT_INPUT, + CHRG_FAULT_THERMAL_SHUTDOWN, + CHRG_FAULT_TIMER_EXPIRED, +}; + +static int bd9995x_get_prop_batt_health(struct bd9995x_device *bd) +{ + int ret, tmp; + + ret = regmap_field_read(bd->rmap_fields[F_BATTEMP], &tmp); + if (ret) + return POWER_SUPPLY_HEALTH_UNKNOWN; + + /* TODO: Check these against datasheet page 34 */ + + switch (tmp) { + case ROOM: + return POWER_SUPPLY_HEALTH_GOOD; + case HOT1: + case HOT2: + case HOT3: + return POWER_SUPPLY_HEALTH_OVERHEAT; + case COLD1: + case COLD2: + return POWER_SUPPLY_HEALTH_COLD; + case TEMP_DIS: + case BATT_OPEN: + default: + return POWER_SUPPLY_HEALTH_UNKNOWN; + } +} + +static int bd9995x_get_prop_charge_type(struct bd9995x_device *bd) +{ + int ret, tmp; + + ret = regmap_field_read(bd->rmap_fields[F_CHGSTM_STATE], &tmp); + if (ret) + return POWER_SUPPLY_CHARGE_TYPE_UNKNOWN; + + switch (tmp) { + case CHGSTM_TRICKLE_CHARGE: + case CHGSTM_PRE_CHARGE: + return POWER_SUPPLY_CHARGE_TYPE_TRICKLE; + case CHGSTM_FAST_CHARGE: + return POWER_SUPPLY_CHARGE_TYPE_FAST; + case CHGSTM_TOP_OFF: + case CHGSTM_DONE: + case CHGSTM_SUSPEND: + return POWER_SUPPLY_CHARGE_TYPE_NONE; + default: /* Rest of the states are error related, no charging */ + return POWER_SUPPLY_CHARGE_TYPE_NONE; + } +} + +static bool bd9995x_get_prop_batt_present(struct bd9995x_device *bd) +{ + int ret, tmp; + + ret = regmap_field_read(bd->rmap_fields[F_BATTEMP], &tmp); + if (ret) + return false; + + return tmp != BATT_OPEN; +} + +static int bd9995x_get_prop_batt_voltage(struct bd9995x_device *bd) +{ + int ret, tmp; + + ret = regmap_field_read(bd->rmap_fields[F_VBAT_VAL], &tmp); + if (ret) + return 0; + + tmp = min(tmp, 19200); + + return tmp * 1000; +} + +static int bd9995x_get_prop_batt_current(struct bd9995x_device *bd) +{ + int ret, tmp; + + ret = regmap_field_read(bd->rmap_fields[F_IBATP_VAL], &tmp); + if (ret) + return 0; + + return tmp * 1000; +} + +#define DEFAULT_BATTERY_TEMPERATURE 250 + +static int bd9995x_get_prop_batt_temp(struct bd9995x_device *bd) +{ + int ret, tmp; + + ret = regmap_field_read(bd->rmap_fields[F_THERM_VAL], &tmp); + if (ret) + return DEFAULT_BATTERY_TEMPERATURE; + + return (200 - tmp) * 10; +} + +static int bd9995x_power_supply_get_property(struct power_supply *psy, + enum power_supply_property psp, + union power_supply_propval *val) +{ + int ret, tmp; + struct bd9995x_device *bd = power_supply_get_drvdata(psy); + struct bd9995x_state state; + + mutex_lock(&bd->lock); + state = bd->state; + mutex_unlock(&bd->lock); + + switch (psp) { + case POWER_SUPPLY_PROP_STATUS: + switch (state.chgstm_status) { + case CHGSTM_TRICKLE_CHARGE: + case CHGSTM_PRE_CHARGE: + case CHGSTM_FAST_CHARGE: + case CHGSTM_TOP_OFF: + val->intval = POWER_SUPPLY_STATUS_CHARGING; + break; + + case CHGSTM_DONE: + val->intval = POWER_SUPPLY_STATUS_FULL; + break; + + case CHGSTM_SUSPEND: + case CHGSTM_TEMPERATURE_ERROR_1: + case CHGSTM_TEMPERATURE_ERROR_2: + case CHGSTM_TEMPERATURE_ERROR_3: + case CHGSTM_TEMPERATURE_ERROR_4: + case CHGSTM_TEMPERATURE_ERROR_5: + case CHGSTM_TEMPERATURE_ERROR_6: + case CHGSTM_TEMPERATURE_ERROR_7: + case CHGSTM_THERMAL_SHUT_DOWN_1: + case CHGSTM_THERMAL_SHUT_DOWN_2: + case CHGSTM_THERMAL_SHUT_DOWN_3: + case CHGSTM_THERMAL_SHUT_DOWN_4: + case CHGSTM_THERMAL_SHUT_DOWN_5: + case CHGSTM_THERMAL_SHUT_DOWN_6: + case CHGSTM_THERMAL_SHUT_DOWN_7: + case CHGSTM_BATTERY_ERROR: + val->intval = POWER_SUPPLY_STATUS_NOT_CHARGING; + break; + + default: + val->intval = POWER_SUPPLY_STATUS_UNKNOWN; + break; + } + break; + + case POWER_SUPPLY_PROP_MANUFACTURER: + val->strval = BD9995X_MANUFACTURER; + break; + + case POWER_SUPPLY_PROP_ONLINE: + val->intval = state.online; + break; + + case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT: + ret = regmap_field_read(bd->rmap_fields[F_IBATP_VAL], &tmp); + if (ret) + return ret; + val->intval = tmp * 1000; + break; + + case POWER_SUPPLY_PROP_CHARGE_AVG: + ret = regmap_field_read(bd->rmap_fields[F_IBATP_AVE_VAL], &tmp); + if (ret) + return ret; + val->intval = tmp * 1000; + break; + + case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX: + /* + * Currently the DT uses this property to give the + * target current for fast-charging constant current phase. + * I think it is correct in a sense. + * + * Yet, this prop we read and return here is the programmed + * safety limit for combined input currents. This feels + * also correct in a sense. + * + * However, this results a mismatch to DT value and value + * read from sysfs. + */ + ret = regmap_field_read(bd->rmap_fields[F_SEL_ILIM_VAL], &tmp); + if (ret) + return ret; + val->intval = tmp * 1000; + break; + + case POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE: + if (!state.online) { + val->intval = 0; + break; + } + + ret = regmap_field_read(bd->rmap_fields[F_VFASTCHG_REG_SET1], + &tmp); + if (ret) + return ret; + + /* + * The actual range : 2560 to 19200 mV. No matter what the + * register says + */ + val->intval = clamp_val(tmp << 4, 2560, 19200); + val->intval *= 1000; + break; + + case POWER_SUPPLY_PROP_CHARGE_TERM_CURRENT: + ret = regmap_field_read(bd->rmap_fields[F_ITERM_SET], &tmp); + if (ret) + return ret; + /* Start step is 64 mA */ + val->intval = tmp << 6; + /* Maximum is 1024 mA - no matter what register says */ + val->intval = min(val->intval, 1024); + val->intval *= 1000; + break; + + /* Battery properties which we access through charger */ + case POWER_SUPPLY_PROP_PRESENT: + val->intval = bd9995x_get_prop_batt_present(bd); + break; + + case POWER_SUPPLY_PROP_VOLTAGE_NOW: + val->intval = bd9995x_get_prop_batt_voltage(bd); + break; + + case POWER_SUPPLY_PROP_CURRENT_NOW: + val->intval = bd9995x_get_prop_batt_current(bd); + break; + + case POWER_SUPPLY_PROP_CHARGE_TYPE: + val->intval = bd9995x_get_prop_charge_type(bd); + break; + + case POWER_SUPPLY_PROP_HEALTH: + val->intval = bd9995x_get_prop_batt_health(bd); + break; + + case POWER_SUPPLY_PROP_TEMP: + val->intval = bd9995x_get_prop_batt_temp(bd); + break; + + case POWER_SUPPLY_PROP_TECHNOLOGY: + val->intval = POWER_SUPPLY_TECHNOLOGY_LION; + break; + + case POWER_SUPPLY_PROP_MODEL_NAME: + val->strval = "bd99954"; + break; + + default: + return -EINVAL; + + } + + return 0; +} + +static int bd9995x_get_chip_state(struct bd9995x_device *bd, + struct bd9995x_state *state) +{ + int i, ret, tmp; + struct { + struct regmap_field *id; + u16 *data; + } state_fields[] = { + { + bd->rmap_fields[F_CHGSTM_STATE], &state->chgstm_status, + }, { + bd->rmap_fields[F_VBAT_VSYS_STATUS], + &state->vbat_vsys_status, + }, { + bd->rmap_fields[F_VBUS_VCC_STATUS], + &state->vbus_vcc_status, + }, + }; + + + for (i = 0; i < ARRAY_SIZE(state_fields); i++) { + ret = regmap_field_read(state_fields[i].id, &tmp); + if (ret) + return ret; + + *state_fields[i].data = tmp; + } + + if (state->vbus_vcc_status & STATUS_VCC_DET || + state->vbus_vcc_status & STATUS_VBUS_DET) + state->online = 1; + else + state->online = 0; + + return 0; +} + +static irqreturn_t bd9995x_irq_handler_thread(int irq, void *private) +{ + struct bd9995x_device *bd = private; + int ret, status, mask, i; + unsigned long tmp; + struct bd9995x_state state; + + /* + * The bd9995x does not seem to generate big amount of interrupts. + * The logic regarding which interrupts can cause relevant + * status changes seem to be pretty complex. + * + * So lets implement really simple and hopefully bullet-proof handler: + * It does not really matter which IRQ we handle, we just go and + * re-read all interesting statuses + give the framework a nudge. + * + * Other option would be building a _complex_ and error prone logic + * trying to decide what could have been changed (resulting this IRQ + * we are now handling). During the normal operation the BD99954 does + * not seem to be generating much of interrupts so benefit from such + * logic would probably be minimal. + */ + + ret = regmap_read(bd->rmap, INT0_STATUS, &status); + if (ret) { + dev_err(bd->dev, "Failed to read IRQ status\n"); + return IRQ_NONE; + } + + ret = regmap_field_read(bd->rmap_fields[F_INT0_SET], &mask); + if (ret) { + dev_err(bd->dev, "Failed to read IRQ mask\n"); + return IRQ_NONE; + } + + /* Handle only IRQs that are not masked */ + status &= mask; + tmp = status; + + /* Lowest bit does not represent any sub-registers */ + tmp >>= 1; + + /* + * Mask and ack IRQs we will handle (+ the idiot bit) + */ + ret = regmap_field_write(bd->rmap_fields[F_INT0_SET], 0); + if (ret) { + dev_err(bd->dev, "Failed to mask F_INT0\n"); + return IRQ_NONE; + } + + ret = regmap_write(bd->rmap, INT0_STATUS, status); + if (ret) { + dev_err(bd->dev, "Failed to ack F_INT0\n"); + goto err_umask; + } + + for_each_set_bit(i, &tmp, 7) { + int sub_status, sub_mask; + int sub_status_reg[] = { + INT1_STATUS, INT2_STATUS, INT3_STATUS, INT4_STATUS, + INT5_STATUS, INT6_STATUS, INT7_STATUS, + }; + struct regmap_field *sub_mask_f[] = { + bd->rmap_fields[F_INT1_SET], + bd->rmap_fields[F_INT2_SET], + bd->rmap_fields[F_INT3_SET], + bd->rmap_fields[F_INT4_SET], + bd->rmap_fields[F_INT5_SET], + bd->rmap_fields[F_INT6_SET], + bd->rmap_fields[F_INT7_SET], + }; + + /* Clear sub IRQs */ + ret = regmap_read(bd->rmap, sub_status_reg[i], &sub_status); + if (ret) { + dev_err(bd->dev, "Failed to read IRQ sub-status\n"); + goto err_umask; + } + + ret = regmap_field_read(sub_mask_f[i], &sub_mask); + if (ret) { + dev_err(bd->dev, "Failed to read IRQ sub-mask\n"); + goto err_umask; + } + + /* Ack active sub-statuses */ + sub_status &= sub_mask; + + ret = regmap_write(bd->rmap, sub_status_reg[i], sub_status); + if (ret) { + dev_err(bd->dev, "Failed to ack sub-IRQ\n"); + goto err_umask; + } + } + + ret = regmap_field_write(bd->rmap_fields[F_INT0_SET], mask); + if (ret) + /* May as well retry once */ + goto err_umask; + + /* Read whole chip state */ + ret = bd9995x_get_chip_state(bd, &state); + if (ret < 0) { + dev_err(bd->dev, "Failed to read chip state\n"); + } else { + mutex_lock(&bd->lock); + bd->state = state; + mutex_unlock(&bd->lock); + + power_supply_changed(bd->charger); + } + + return IRQ_HANDLED; + +err_umask: + ret = regmap_field_write(bd->rmap_fields[F_INT0_SET], mask); + if (ret) + dev_err(bd->dev, + "Failed to un-mask F_INT0 - IRQ permanently disabled\n"); + + return IRQ_NONE; +} + +static int __bd9995x_chip_reset(struct bd9995x_device *bd) +{ + int ret, state; + int rst_check_counter = 10; + u16 tmp = ALLRST | OTPLD; + + ret = regmap_raw_write(bd->rmap, SYSTEM_CTRL_SET, &tmp, 2); + if (ret < 0) + return ret; + + do { + ret = regmap_field_read(bd->rmap_fields[F_OTPLD_STATE], &state); + if (ret) + return ret; + + msleep(10); + } while (state == 0 && --rst_check_counter); + + if (!rst_check_counter) { + dev_err(bd->dev, "chip reset not completed\n"); + return -ETIMEDOUT; + } + + tmp = 0; + ret = regmap_raw_write(bd->rmap, SYSTEM_CTRL_SET, &tmp, 2); + + return ret; +} + +static int bd9995x_hw_init(struct bd9995x_device *bd) +{ + int ret; + int i; + struct bd9995x_state state; + struct bd9995x_init_data *id = &bd->init_data; + + const struct { + enum bd9995x_fields id; + u16 value; + } init_data[] = { + /* Enable the charging trigger after SDP charger attached */ + {F_SDP_CHG_TRIG_EN, 1}, + /* Enable charging trigger after SDP charger attached */ + {F_SDP_CHG_TRIG, 1}, + /* Disable charging trigger by BC1.2 detection */ + {F_VBUS_BC_DISEN, 1}, + /* Disable charging trigger by BC1.2 detection */ + {F_VCC_BC_DISEN, 1}, + /* Disable automatic limitation of the input current */ + {F_ILIM_AUTO_DISEN, 1}, + /* Select current limitation when SDP charger attached*/ + {F_SDP_500_SEL, 1}, + /* Select current limitation when DCP charger attached */ + {F_DCP_2500_SEL, 1}, + {F_VSYSREG_SET, id->vsysreg_set}, + /* Activate USB charging and DC/DC converter */ + {F_USB_SUS, 0}, + /* DCDC clock: 1200 kHz*/ + {F_DCDC_CLK_SEL, 3}, + /* Enable charging */ + {F_CHG_EN, 1}, + /* Disable Input current Limit setting voltage measurement */ + {F_EXTIADPEN, 0}, + /* Disable input current limiting */ + {F_VSYS_PRIORITY, 1}, + {F_IBUS_LIM_SET, id->ibus_lim_set}, + {F_ICC_LIM_SET, id->icc_lim_set}, + /* Charge Termination Current Setting to 0*/ + {F_ITERM_SET, id->iterm_set}, + /* Trickle-charge Current Setting */ + {F_ITRICH_SET, id->itrich_set}, + /* Pre-charge Current setting */ + {F_IPRECH_SET, id->iprech_set}, + /* Fast Charge Current for constant current phase */ + {F_ICHG_SET, id->ichg_set}, + /* Fast Charge Voltage Regulation Setting */ + {F_VFASTCHG_REG_SET1, id->vfastchg_reg_set1}, + /* Set Pre-charge Voltage Threshold for trickle charging. */ + {F_VPRECHG_TH_SET, id->vprechg_th_set}, + {F_VRECHG_SET, id->vrechg_set}, + {F_VBATOVP_SET, id->vbatovp_set}, + /* Reverse buck boost voltage Setting */ + {F_VRBOOST_SET, 0}, + /* Disable fast-charging watchdog */ + {F_WDT_FST, 0}, + /* Disable pre-charging watchdog */ + {F_WDT_PRE, 0}, + /* Power save off */ + {F_POWER_SAVE_MODE, 0}, + {F_INT1_SET, INT1_ALL}, + {F_INT2_SET, INT2_ALL}, + {F_INT3_SET, INT3_ALL}, + {F_INT4_SET, INT4_ALL}, + {F_INT5_SET, INT5_ALL}, + {F_INT6_SET, INT6_ALL}, + {F_INT7_SET, INT7_ALL}, + }; + + /* + * Currently we initialize charger to a known state at startup. + * If we want to allow for example the boot code to initialize + * charger we should get rid of this. + */ + ret = __bd9995x_chip_reset(bd); + if (ret < 0) + return ret; + + /* Initialize currents/voltages and other parameters */ + for (i = 0; i < ARRAY_SIZE(init_data); i++) { + ret = regmap_field_write(bd->rmap_fields[init_data[i].id], + init_data[i].value); + if (ret) { + dev_err(bd->dev, "failed to initialize charger (%d)\n", + ret); + return ret; + } + } + + ret = bd9995x_get_chip_state(bd, &state); + if (ret < 0) + return ret; + + mutex_lock(&bd->lock); + bd->state = state; + mutex_unlock(&bd->lock); + + return 0; +} + +static enum power_supply_property bd9995x_power_supply_props[] = { + POWER_SUPPLY_PROP_MANUFACTURER, + POWER_SUPPLY_PROP_STATUS, + POWER_SUPPLY_PROP_ONLINE, + POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT, + POWER_SUPPLY_PROP_CHARGE_AVG, + POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX, + POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE, + POWER_SUPPLY_PROP_CHARGE_TERM_CURRENT, + /* Battery props we access through charger */ + POWER_SUPPLY_PROP_PRESENT, + POWER_SUPPLY_PROP_VOLTAGE_NOW, + POWER_SUPPLY_PROP_CURRENT_NOW, + POWER_SUPPLY_PROP_CHARGE_TYPE, + POWER_SUPPLY_PROP_HEALTH, + POWER_SUPPLY_PROP_TEMP, + POWER_SUPPLY_PROP_TECHNOLOGY, + POWER_SUPPLY_PROP_MODEL_NAME, +}; + +static const struct power_supply_desc bd9995x_power_supply_desc = { + .name = "bd9995x-charger", + .type = POWER_SUPPLY_TYPE_USB, + .properties = bd9995x_power_supply_props, + .num_properties = ARRAY_SIZE(bd9995x_power_supply_props), + .get_property = bd9995x_power_supply_get_property, +}; + +/* + * Limit configurations for vbus-input-current and vcc-vacp-input-current + * Minimum limit is 0 uA. Max is 511 * 32000 uA = 16352000 uA. This is + * configured by writing a register so that each increment in register + * value equals to 32000 uA limit increment. + * + * Eg, value 0x0 is limit 0, value 0x1 is limit 32000, ... + * Describe the setting in linear_range table. + */ +static const struct linear_range input_current_limit_ranges[] = { + { + .min = 0, + .step = 32000, + .min_sel = 0x0, + .max_sel = 0x1ff, + }, +}; + +/* Possible trickle, pre-charging and termination current values */ +static const struct linear_range charging_current_ranges[] = { + { + .min = 0, + .step = 64000, + .min_sel = 0x0, + .max_sel = 0x10, + }, { + .min = 1024000, + .step = 0, + .min_sel = 0x11, + .max_sel = 0x1f, + }, +}; + +/* + * Fast charging voltage regulation, starting re-charging limit + * and battery over voltage protection have same possible values + */ +static const struct linear_range charge_voltage_regulation_ranges[] = { + { + .min = 2560000, + .step = 0, + .min_sel = 0, + .max_sel = 0xA0, + }, { + .min = 2560000, + .step = 16000, + .min_sel = 0xA0, + .max_sel = 0x4B0, + }, { + .min = 19200000, + .step = 0, + .min_sel = 0x4B0, + .max_sel = 0x7FF, + }, +}; + +/* Possible VSYS voltage regulation values */ +static const struct linear_range vsys_voltage_regulation_ranges[] = { + { + .min = 2560000, + .step = 0, + .min_sel = 0, + .max_sel = 0x28, + }, { + .min = 2560000, + .step = 64000, + .min_sel = 0x28, + .max_sel = 0x12C, + }, { + .min = 19200000, + .step = 0, + .min_sel = 0x12C, + .max_sel = 0x1FF, + }, +}; + +/* Possible settings for switching from trickle to pre-charging limits */ +static const struct linear_range trickle_to_pre_threshold_ranges[] = { + { + .min = 2048000, + .step = 0, + .min_sel = 0, + .max_sel = 0x20, + }, { + .min = 2048000, + .step = 64000, + .min_sel = 0x20, + .max_sel = 0x12C, + }, { + .min = 19200000, + .step = 0, + .min_sel = 0x12C, + .max_sel = 0x1FF + } +}; + +/* Possible current values for fast-charging constant current phase */ +static const struct linear_range fast_charge_current_ranges[] = { + { + .min = 0, + .step = 64000, + .min_sel = 0, + .max_sel = 0xFF, + } +}; + +struct battery_init { + const char *name; + int *info_data; + const struct linear_range *range; + int ranges; + u16 *data; +}; + +struct dt_init { + char *prop; + const struct linear_range *range; + int ranges; + u16 *data; +}; + +static int bd9995x_fw_probe(struct bd9995x_device *bd) +{ + int ret; + struct power_supply_battery_info info; + u32 property; + int i; + int regval; + bool found; + struct bd9995x_init_data *init = &bd->init_data; + struct battery_init battery_inits[] = { + { + .name = "trickle-charging current", + .info_data = &info.tricklecharge_current_ua, + .range = &charging_current_ranges[0], + .ranges = 2, + .data = &init->itrich_set, + }, { + .name = "pre-charging current", + .info_data = &info.precharge_current_ua, + .range = &charging_current_ranges[0], + .ranges = 2, + .data = &init->iprech_set, + }, { + .name = "pre-to-trickle charge voltage threshold", + .info_data = &info.precharge_voltage_max_uv, + .range = &trickle_to_pre_threshold_ranges[0], + .ranges = 2, + .data = &init->vprechg_th_set, + }, { + .name = "charging termination current", + .info_data = &info.charge_term_current_ua, + .range = &charging_current_ranges[0], + .ranges = 2, + .data = &init->iterm_set, + }, { + .name = "charging re-start voltage", + .info_data = &info.charge_restart_voltage_uv, + .range = &charge_voltage_regulation_ranges[0], + .ranges = 2, + .data = &init->vrechg_set, + }, { + .name = "battery overvoltage limit", + .info_data = &info.overvoltage_limit_uv, + .range = &charge_voltage_regulation_ranges[0], + .ranges = 2, + .data = &init->vbatovp_set, + }, { + .name = "fast-charging max current", + .info_data = &info.constant_charge_current_max_ua, + .range = &fast_charge_current_ranges[0], + .ranges = 1, + .data = &init->ichg_set, + }, { + .name = "fast-charging voltage", + .info_data = &info.constant_charge_voltage_max_uv, + .range = &charge_voltage_regulation_ranges[0], + .ranges = 2, + .data = &init->vfastchg_reg_set1, + }, + }; + struct dt_init props[] = { + { + .prop = "rohm,vsys-regulation-microvolt", + .range = &vsys_voltage_regulation_ranges[0], + .ranges = 2, + .data = &init->vsysreg_set, + }, { + .prop = "rohm,vbus-input-current-limit-microamp", + .range = &input_current_limit_ranges[0], + .ranges = 1, + .data = &init->ibus_lim_set, + }, { + .prop = "rohm,vcc-input-current-limit-microamp", + .range = &input_current_limit_ranges[0], + .ranges = 1, + .data = &init->icc_lim_set, + }, + }; + + /* + * The power_supply_get_battery_info() does not support getting values + * from ACPI. Let's fix it if ACPI is required here. + */ + ret = power_supply_get_battery_info(bd->charger, &info); + if (ret < 0) + return ret; + + for (i = 0; i < ARRAY_SIZE(battery_inits); i++) { + int val = *battery_inits[i].info_data; + const struct linear_range *range = battery_inits[i].range; + int ranges = battery_inits[i].ranges; + + if (val == -EINVAL) + continue; + + ret = linear_range_get_selector_low_array(range, ranges, val, + ®val, &found); + if (ret) { + dev_err(bd->dev, "Unsupported value for %s\n", + battery_inits[i].name); + + power_supply_put_battery_info(bd->charger, &info); + return -EINVAL; + } + if (!found) { + dev_warn(bd->dev, + "Unsupported value for %s - using smaller\n", + battery_inits[i].name); + } + *(battery_inits[i].data) = regval; + } + + power_supply_put_battery_info(bd->charger, &info); + + for (i = 0; i < ARRAY_SIZE(props); i++) { + ret = device_property_read_u32(bd->dev, props[i].prop, + &property); + if (ret < 0) { + dev_err(bd->dev, "failed to read %s", props[i].prop); + + return ret; + } + + ret = linear_range_get_selector_low_array(props[i].range, + props[i].ranges, + property, ®val, + &found); + if (ret) { + dev_err(bd->dev, "Unsupported value for '%s'\n", + props[i].prop); + + return -EINVAL; + } + + if (!found) { + dev_warn(bd->dev, + "Unsupported value for '%s' - using smaller\n", + props[i].prop); + } + + *(props[i].data) = regval; + } + + return 0; +} + +void bd9995x_chip_reset(void *bd) +{ + __bd9995x_chip_reset(bd); +} + +static int bd9995x_probe(struct i2c_client *client) +{ + struct device *dev = &client->dev; + struct bd9995x_device *bd; + struct power_supply_config psy_cfg = {}; + int ret; + int i; + + bd = devm_kzalloc(dev, sizeof(*bd), GFP_KERNEL); + if (!bd) + return -ENOMEM; + + bd->client = client; + bd->dev = dev; + psy_cfg.drv_data = bd; + psy_cfg.of_node = dev->of_node; + + mutex_init(&bd->lock); + + bd->rmap = devm_regmap_init_i2c(client, &bd9995x_regmap_config); + if (IS_ERR(bd->rmap)) { + dev_err(dev, "Failed to setup register access via i2c\n"); + return PTR_ERR(bd->rmap); + } + + for (i = 0; i < ARRAY_SIZE(bd9995x_reg_fields); i++) { + const struct reg_field *reg_fields = bd9995x_reg_fields; + + bd->rmap_fields[i] = devm_regmap_field_alloc(dev, bd->rmap, + reg_fields[i]); + if (IS_ERR(bd->rmap_fields[i])) { + dev_err(dev, "cannot allocate regmap field\n"); + return PTR_ERR(bd->rmap_fields[i]); + } + } + + i2c_set_clientdata(client, bd); + + ret = regmap_field_read(bd->rmap_fields[F_CHIP_ID], &bd->chip_id); + if (ret) { + dev_err(dev, "Cannot read chip ID.\n"); + return ret; + } + + if (bd->chip_id != BD99954_ID) { + dev_err(dev, "Chip with ID=0x%x, not supported!\n", + bd->chip_id); + return -ENODEV; + } + + ret = regmap_field_read(bd->rmap_fields[F_CHIP_REV], &bd->chip_rev); + if (ret) { + dev_err(dev, "Cannot read revision.\n"); + return ret; + } + + dev_info(bd->dev, "Found BD99954 chip rev %d\n", bd->chip_rev); + + /* + * We need to init the psy before we can call + * power_supply_get_battery_info() for it + */ + bd->charger = devm_power_supply_register(bd->dev, + &bd9995x_power_supply_desc, + &psy_cfg); + if (IS_ERR(bd->charger)) { + dev_err(dev, "Failed to register power supply\n"); + return PTR_ERR(bd->charger); + } + + ret = bd9995x_fw_probe(bd); + if (ret < 0) { + dev_err(dev, "Cannot read device properties.\n"); + return ret; + } + + ret = bd9995x_hw_init(bd); + if (ret < 0) { + dev_err(dev, "Cannot initialize the chip.\n"); + return ret; + } + + ret = devm_add_action_or_reset(dev, bd9995x_chip_reset, bd); + if (ret) + return ret; + + return devm_request_threaded_irq(dev, client->irq, NULL, + bd9995x_irq_handler_thread, + IRQF_TRIGGER_LOW | IRQF_ONESHOT, + BD9995X_IRQ_PIN, bd); +} + +static const struct of_device_id bd9995x_of_match[] = { + { .compatible = "rohm,bd99954", }, + { } +}; +MODULE_DEVICE_TABLE(of, bd9995x_of_match); + +static struct i2c_driver bd9995x_driver = { + .driver = { + .name = "bd9995x-charger", + .of_match_table = bd9995x_of_match, + }, + .probe_new = bd9995x_probe, +}; +module_i2c_driver(bd9995x_driver); + +MODULE_AUTHOR("Laine Markus "); +MODULE_DESCRIPTION("ROHM BD99954 charger driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/power/supply/bd99954-charger.h b/drivers/power/supply/bd99954-charger.h new file mode 100644 index 0000000000000..f588979253830 --- /dev/null +++ b/drivers/power/supply/bd99954-charger.h @@ -0,0 +1,1075 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright (C) 2020 ROHM Semiconductors */ +#ifndef BD99954_CHARGER_H +#define BD99954_CHARGER_H + +#include + +#define BD9995X_MANUFACTURER "Rohm Semiconductor" +#define BD9995X_IRQ_PIN "bd9995x_irq" + +#define BD9995X_VSYS_PRECHARGE_OFFSET_MV 200 + +#define BD99954_ID 0x346 +#define BD99955_ID 0x221 +#define BD99956_ID 0x331 + +/* Battery Charger Commands */ +#define CHARGING_CURRENT 0x14 +#define CHARGING_VOLTAGE 0x15 +#define PROTECT_SET 0x3E +#define MAP_SET 0x3F + +/* Extended commands */ +#define CHGSTM_STATUS 0x100 +#define VBAT_VSYS_STATUS 0x101 +#define VBUS_VCC_STATUS 0x102 +#define CHGOP_STATUS 0x103 +#define WDT_STATUS 0x104 +#define CUR_ILIM_VAL 0x105 +#define SEL_ILIM_VAL 0x106 +#define IBUS_LIM_SET 0x107 +#define ICC_LIM_SET 0x108 +#define IOTG_LIM_SET 0x109 +#define VIN_CTRL_SET 0x10A +#define CHGOP_SET1 0x10B +#define CHGOP_SET2 0x10C +#define VBUSCLPS_TH_SET 0x10D +#define VCCCLPS_TH_SET 0x10E +#define CHGWDT_SET 0x10F +#define BATTWDT_SET 0x110 +#define VSYSREG_SET 0x111 +#define VSYSVAL_THH_SET 0x112 +#define VSYSVAL_THL_SET 0x113 +#define ITRICH_SET 0x114 +#define IPRECH_SET 0x115 +#define ICHG_SET 0x116 +#define ITERM_SET 0x117 +#define VPRECHG_TH_SET 0x118 +#define VRBOOST_SET 0x119 +#define VFASTCHG_REG_SET1 0x11A +#define VFASTCHG_REG_SET2 0x11B +#define VFASTCHG_REG_SET3 0x11C +#define VRECHG_SET 0x11D +#define VBATOVP_SET 0x11E +#define IBATSHORT_SET 0x11F +#define PROCHOT_CTRL_SET 0x120 +#define PROCHOT_ICRIT_SET 0x121 +#define PROCHOT_INORM_SET 0x122 +#define PROCHOT_IDCHG_SET 0x123 +#define PROCHOT_VSYS_SET 0x124 +#define PMON_IOUT_CTRL_SET 0x125 +#define PMON_DACIN_VAL 0x126 +#define IOUT_DACIN_VAL 0x127 +#define VCC_UCD_SET 0x128 +#define VCC_UCD_STATUS 0x129 +#define VCC_IDD_STATUS 0x12A +#define VCC_UCD_FCTRL_SET 0x12B +#define VCC_UCD_FCTRL_EN 0x12C +#define VBUS_UCD_SET 0x130 +#define VBUS_UCD_STATUS 0x131 +#define VBUS_IDD_STATUS 0x132 +#define VBUS_UCD_FCTRL_SET 0x133 +#define VBUS_UCD_FCTRL_EN 0x134 +#define CHIP_ID 0x138 +#define CHIP_REV 0x139 +#define IC_SET1 0x13A +#define IC_SET2 0x13B +#define SYSTEM_STATUS 0x13C +#define SYSTEM_CTRL_SET 0x13D +#define VM_CTRL_SET 0x140 +#define THERM_WINDOW_SET1 0x141 +#define THERM_WINDOW_SET2 0x142 +#define THERM_WINDOW_SET3 0x143 +#define THERM_WINDOW_SET4 0x144 +#define THERM_WINDOW_SET5 0x145 +#define IBATP_TH_SET 0x146 +#define IBATM_TH_SET 0x147 +#define VBAT_TH_SET 0x148 +#define THERM_TH_SET 0x149 +#define IACP_TH_SET 0x14A +#define VACP_TH_SET 0x14B +#define VBUS_TH_SET 0x14C +#define VCC_TH_SET 0x14D +#define VSYS_TH_SET 0x14E +#define EXTIADP_TH_SET 0x14F +#define IBATP_VAL 0x150 +#define IBATP_AVE_VAL 0x151 +#define IBATM_VAL 0x152 +#define IBATM_AVE_VAL 0x153 +#define VBAT_VAL 0x154 +#define VBAT_AVE_VAL 0x155 +#define THERM_VAL 0x156 +#define VTH_VAL 0x157 +#define IACP_VAL 0x158 +#define IACP_AVE_VAL 0x159 +#define VACP_VAL 0x15A +#define VACP_AVE_VAL 0x15B +#define VBUS_VAL 0x15C +#define VBUS_AVE_VAL 0x15D +#define VCC_VAL 0x15E +#define VCC_AVE_VAL 0x15F +#define VSYS_VAL 0x160 +#define VSYS_AVE_VAL 0x161 +#define EXTIADP_VAL 0x162 +#define EXTIADP_AVE_VAL 0x163 +#define VACPCLPS_TH_SET 0x164 +#define INT0_SET 0x168 +#define INT1_SET 0x169 +#define INT2_SET 0x16A +#define INT3_SET 0x16B +#define INT4_SET 0x16C +#define INT5_SET 0x16D +#define INT6_SET 0x16E +#define INT7_SET 0x16F +#define INT0_STATUS 0x170 +#define INT1_STATUS 0x171 +#define INT2_STATUS 0x172 +#define INT3_STATUS 0x173 +#define INT4_STATUS 0x174 +#define INT5_STATUS 0x175 +#define INT6_STATUS 0x176 +#define INT7_STATUS 0x177 +#define OTPREG0 0x17A +#define OTPREG1 0x17B +#define SMBREG 0x17C +#define DEBUG_MODE_SET 0x17F +#define DEBUG0x14 0x214 +#define DEBUG0x1A 0x21A + +enum bd9995x_fields { + F_PREV_CHGSTM_STATE, F_CHGSTM_STATE, + F_VBAT_VSYS_STATUS, + F_VBUS_VCC_STATUS, + F_BATTEMP, F_VRECHG_DET, F_RBOOST_UV, F_RBOOSTS, + F_THERMWDT_VAL, F_CHGWDT_VAL, + F_CUR_ILIM_VAL, + F_SEL_ILIM_VAL, + F_IBUS_LIM_SET, + F_ICC_LIM_SET, + F_IOTG_LIM_SET, + F_OTG_BOTH_EN, + F_VRBOOST_TRIG, + F_VRBOOST_EN, + F_PP_BOTH_THRU, + F_VIN_ORD, + F_VBUS_EN, + F_VCC_EN, + F_VSYS_PRIORITY, + F_PPC_SUB_CAP, + F_PPC_CAP, + F_DCP_2500_SEL, + F_SDP_500_SEL, + F_ILIM_AUTO_DISEN, + F_VCC_BC_DISEN, + F_VBUS_BC_DISEN, + F_SDP_CHG_TRIG_EN, + F_SDP_CHG_TRIG, + F_AUTO_TOF, + F_AUTO_FST, + F_AUTO_RECH, + F_ILIM_RESET_EN, + F_DCDC_1MS_SEL, + F_SEL_ILIM_DIV, + F_BATT_LEARN, + F_CHG_EN, + F_USB_SUS, + F_CHOP_SS_INIT, + F_CHOP_ALL_INIT, + F_DCDC_CLK_SEL, + F_CHOP_SS, + F_CHOP_ALL, + F_VBUSCLPS_TH_SET, + F_VCCCLPS_TH_SET, + F_WDT_FST, + F_WDT_PRE, + F_WDT_IBAT_SHORT, + F_WDT_THERM, + F_VSYSREG_SET, + F_VSYSVAL_THH_SET, + F_VSYSVAL_THL_SET, + F_ITRICH_SET, + F_IPRECH_SET, + F_ICHG_SET, + F_ITERM_SET, + F_VPRECHG_TH_SET, + F_VRBOOST_SET, + F_VFASTCHG_REG_SET1, + F_VFASTCHG_REG_SET2, + F_VFASTCHG_REG_SET3, + F_VRECHG_SET, + F_VBATOVP_SET, + F_IBATM_SHORT_SET, + F_PROCHOT_DG_SET, + F_PROCHOT_ICRIT_DG_SET, + F_PROCHOT_IDCHG_DG_SET, + F_PROCHOT_EN, + F_PROCHOT_ICRIT_SET, + F_PROCHOT_INORM_SET, + F_PROCHOT_IDCHG_SET, + F_PROCHOT_VSYS_SET, + F_IMON_INSEL, + F_PMON_INSEL, + F_IOUT_OUT_EN, + F_IOUT_SOURCE_SEL, + F_IOUT_GAIN_SET, + F_PMON_OUT_EN, + F_PMON_GAIN_SET, + F_PMON_DACIN_VAL, + F_IOUT_DACIN_VAL, + F_VCC_BCSRETRY, + F_VCC_ADCRTRY, + F_VCC_USBDETEN, + F_VCC_IDRDETEN, + F_VCC_ENUMRDY, + F_VCC_ADCPOLEN, + F_VCC_DCDMODE, + F_VCC_USB_SW_EN, + F_VCC_USB_SW, + F_VCC_DCDFAIL, + F_VCC_CHGPORT, + F_VCC_PUPDET, + F_VCC_VBUS_VLD, + F_VCC_CHGDET, + F_VCC_OTGDET, + F_VCC_VBINOP, + F_VCC_EXTID, + F_VCC_IDRDET, + F_VCC_INDO, + F_VCC_UCDSWEN, + F_VCC_RREF_EN, + F_VCC_DPPU_EN, + F_VCC_DPREF_EN, + F_VCC_DMREF_EN, + F_VCC_DPDET_EN, + F_VCC_DMDET_EN, + F_VCC_DPSINK_EN, + F_VCC_DMSINK_EN, + F_VCC_DP_BUFF_EN, + F_VCC_DM_BUFF_EN, + F_VCC_EXTCLKENBL, + F_VCC_PLSTESTEN, + F_VCC_UCDSWEN_TSTENB, + F_VCC_RREF_EN_TSTENB, + F_VCC_DPPU_EN_TSTENB, + F_VCC_DPREF_EN_TSTENB, + F_VCC_DMREF_EN_TSTENB, + F_VCC_DPDET_EN_TSTENB, + F_VCC_DMDET_EN_TSTENB, + F_VCC_DPSINK_EN_TSTENB, + F_VCC_DMSINK_EN_TSTENB, + F_VCC_DP_BUFF_EN_TSTENB, + F_VCC_DM_BUFF_EN_TSTENB, + F_VBUS_BCSRETRY, + F_VBUS_ADCRTRY, + F_VBUS_USBDETEN, + F_VBUS_IDRDETEN, + F_VBUS_ENUMRDY, + F_VBUS_ADCPOLEN, + F_VBUS_DCDMODE, + F_VBUS_USB_SW_EN, + F_VBUS_USB_SW, + F_VBUS_DCDFAIL, + F_VBUS_CHGPORT, + F_VBUS_PUPDET, + F_VBUS_VBUS_VLD, + F_VBUS_CHGDET, + F_VBUS_OTGDET, + F_VBUS_VBINOP, + F_VBUS_EXTID, + F_VBUS_IDRDET, + F_VBUS_INDO, + F_VBUS_UCDSWEN, + F_VBUS_RREF_EN, + F_VBUS_DPPU_EN, + F_VBUS_DPREF_EN, + F_VBUS_DMREF_EN, + F_VBUS_DPDET_EN, + F_VBUS_DMDET_EN, + F_VBUS_DPSINK_EN, + F_VBUS_DMSINK_EN, + F_VBUS_DP_BUFF_EN, + F_VBUS_DM_BUFF_EN, + F_VBUS_EXTCLKENBL, + F_VBUS_PLSTESTEN, + F_VBUS_UCDSWEN_TSTENB, + F_VBUS_RREF_EN_TSTENB, + F_VBUS_DPPU_EN_TSTENB, + F_VBUS_DPREF_EN_TSTENB, + F_VBUS_DMREF_EN_TSTENB, + F_VBUS_DPDET_EN_TSTENB, + F_VBUS_DMDET_EN_TSTENB, + F_VBUS_DPSINK_EN_TSTENB, + F_VBUS_DMSINK_EN_TSTENB, + F_VBUS_DP_BUFF_EN_TSTENB, + F_VBUS_DM_BUFF_EN_TSTENB, + F_CHIP_ID, + F_CHIP_REV, + F_ONE_CELL_MODE, + F_cell, + F_VACP_AUTO_DISCHG, + F_VACP_LOAD, + F_ACOK_POL, + F_ACOK_DISEN, + F_DEBUG_SET1, + F_DEBUG_SET0, + F_MONRST_STATE, + F_ALMRST_STATE, + F_CHGRST_STATE, + F_OTPLD_STATE, + F_ALLRST_STATE, + F_PROTECT_SET, + F_MAP_SET, + F_ADCINTERVAL, + F_ADCMOD, + F_ADCTMOD, + F_EXTIADPEN, + F_VSYSENB, + F_VCCENB, + F_VBUSENB, + F_VACPENB, + F_IACPENB, + F_THERMENB, + F_VBATENB, + F_IBATMENB, + F_IBATPENB, + F_TMPTHR1B, + F_TMPTHR1A, + F_TMPTHR2B, + F_TMPTHR2A, + F_TMPTHR3B, + F_TMPTHR3A, + F_TMPTHR4B, + F_TMPTHR4A, + F_TMPTHR5B, + F_TMPTHR5A, + F_IBATP_TH_SET, + F_IBATM_TH_SET, + F_VBAT_TH_SET, + F_THERM_TH_SET, + F_IACP_TH_SET, + F_VACP_TH_SET, + F_VBUS_TH_SET, + F_VCC_TH_SET, + F_VSYS_TH_SET, + F_EXTIADP_TH_SET, + F_IBATP_VAL, + F_IBATP_AVE_VAL, + F_IBATM_VAL, + F_IBATM_AVE_VAL, + F_VBAT_VAL, + F_VBAT_AVE_VAL, + F_THERM_VAL, + F_VTH_VAL, + F_IACP_VAL, + F_IACP_AVE_VAL, + F_VACP_VAL, + F_VACP_AVE_VAL, + F_VBUS_VAL, + F_VBUS_AVE_VAL, + F_VCC_VAL, + F_VCC_AVE_VAL, + F_VSYS_VAL, + F_VSYS_AVE_VAL, + F_EXTIADP_VAL, + F_EXTIADP_AVE_VAL, + F_VACPCLPS_TH_SET, + F_INT7_SET, + F_INT6_SET, + F_INT5_SET, + F_INT4_SET, + F_INT3_SET, + F_INT2_SET, + F_INT1_SET, + F_INT0_SET, + F_VBUS_RBUV_DET, + F_VBUS_RBUV_RES, + F_VBUS_TH_DET, + F_VBUS_TH_RES, + F_VBUS_IIN_MOD, + F_VBUS_OV_DET, + F_VBUS_OV_RES, + F_VBUS_CLPS_DET, + F_VBUS_CLPS, + F_VBUS_DET, + F_VBUS_RES, + F_VCC_RBUV_DET, + F_VCC_RBUV_RES, + F_VCC_TH_DET, + F_VCC_TH_RES, + F_VCC_IIN_MOD, + F_VCC_OVP_DET, + F_VCC_OVP_RES, + F_VCC_CLPS_DET, + F_VCC_CLPS_RES, + F_VCC_DET, + F_VCC_RES, + F_TH_DET, + F_TH_RMV, + F_TMP_OUT_DET, + F_TMP_OUT_RES, + F_VBAT_TH_DET, + F_VBAT_TH_RES, + F_IBAT_SHORT_DET, + F_IBAT_SHORT_RES, + F_VBAT_OV_DET, + F_VBAT_OV_RES, + F_BAT_ASSIST_DET, + F_BAT_ASSIST_RES, + F_VSYS_TH_DET, + F_VSYS_TH_RES, + F_VSYS_OV_DET, + F_VSYS_OV_RES, + F_VSYS_SHT_DET, + F_VSYS_SHT_RES, + F_VSYS_UV_DET, + F_VSYS_UV_RES, + F_OTP_LOAD_DONE, + F_PWR_ON, + F_EXTIADP_TRNS, + F_EXTIADP_TH_DET, + F_EXIADP_TH_RES, + F_BAT_MNT_DET, + F_BAT_MNT_RES, + F_TSD_DET, + F_TSD_RES, + F_CHGWDT_EXP, + F_THERMWDT_EXP, + F_TMP_TRNS, + F_CHG_TRNS, + F_VBUS_UCD_PORT_DET, + F_VBUS_UCD_UCHG_DET, + F_VBUS_UCD_URID_RMV, + F_VBUS_UCD_OTG_DET, + F_VBUS_UCD_URID_MOD, + F_VCC_UCD_PORT_DET, + F_VCC_UCD_UCHG_DET, + F_VCC_UCD_URID_RMV, + F_VCC_UCD_OTG_DET, + F_VCC_UCD_URID_MOD, + F_PROCHOT_DET, + F_PROCHOT_RES, + F_VACP_DET, + F_VACP_RES, + F_VACP_TH_DET, + F_VACP_TH_RES, + F_IACP_TH_DET, + F_IACP_THE_RES, + F_THERM_TH_DET, + F_THERM_TH_RES, + F_IBATM_TH_DET, + F_IBATM_TH_RES, + F_IBATP_TH_DET, + F_IBATP_TH_RES, + F_INT7_STATUS, + F_INT6_STATUS, + F_INT5_STATUS, + F_INT4_STATUS, + F_INT3_STATUS, + F_INT2_STATUS, + F_INT1_STATUS, + F_INT0_STATUS, + F_ILIM_DECREASE, + F_RESERVE_OTPREG1, + F_POWER_SAVE_MODE, + F_DEBUG_MODE_SET, + F_DEBUG0x14, + F_DEBUG0x1A, + F_MAX_FIELDS +}; + +static const struct reg_field bd9995x_reg_fields[] = { + [F_PREV_CHGSTM_STATE] = REG_FIELD(CHGSTM_STATUS, 8, 14), + [F_CHGSTM_STATE] = REG_FIELD(CHGSTM_STATUS, 0, 6), + [F_VBAT_VSYS_STATUS] = REG_FIELD(VBAT_VSYS_STATUS, 0, 15), + [F_VBUS_VCC_STATUS] = REG_FIELD(VBUS_VCC_STATUS, 0, 12), + [F_BATTEMP] = REG_FIELD(CHGOP_STATUS, 8, 10), + [F_VRECHG_DET] = REG_FIELD(CHGOP_STATUS, 6, 6), + [F_RBOOST_UV] = REG_FIELD(CHGOP_STATUS, 1, 1), + [F_RBOOSTS] = REG_FIELD(CHGOP_STATUS, 0, 0), + [F_THERMWDT_VAL] = REG_FIELD(WDT_STATUS, 8, 15), + [F_CHGWDT_VAL] = REG_FIELD(WDT_STATUS, 0, 7), + [F_CUR_ILIM_VAL] = REG_FIELD(CUR_ILIM_VAL, 0, 13), + [F_SEL_ILIM_VAL] = REG_FIELD(SEL_ILIM_VAL, 0, 13), + [F_IBUS_LIM_SET] = REG_FIELD(IBUS_LIM_SET, 5, 13), + [F_ICC_LIM_SET] = REG_FIELD(ICC_LIM_SET, 5, 13), + [F_IOTG_LIM_SET] = REG_FIELD(IOTG_LIM_SET, 5, 13), + [F_OTG_BOTH_EN] = REG_FIELD(VIN_CTRL_SET, 15, 15), + [F_VRBOOST_TRIG] = REG_FIELD(VIN_CTRL_SET, 14, 14), + [F_VRBOOST_EN] = REG_FIELD(VIN_CTRL_SET, 12, 13), + [F_PP_BOTH_THRU] = REG_FIELD(VIN_CTRL_SET, 11, 11), + [F_VIN_ORD] = REG_FIELD(VIN_CTRL_SET, 7, 7), + [F_VBUS_EN] = REG_FIELD(VIN_CTRL_SET, 6, 6), + [F_VCC_EN] = REG_FIELD(VIN_CTRL_SET, 5, 5), + [F_VSYS_PRIORITY] = REG_FIELD(VIN_CTRL_SET, 4, 4), + [F_PPC_SUB_CAP] = REG_FIELD(VIN_CTRL_SET, 2, 3), + [F_PPC_CAP] = REG_FIELD(VIN_CTRL_SET, 0, 1), + [F_DCP_2500_SEL] = REG_FIELD(CHGOP_SET1, 15, 15), + [F_SDP_500_SEL] = REG_FIELD(CHGOP_SET1, 14, 14), + [F_ILIM_AUTO_DISEN] = REG_FIELD(CHGOP_SET1, 13, 13), + [F_VCC_BC_DISEN] = REG_FIELD(CHGOP_SET1, 11, 11), + [F_VBUS_BC_DISEN] = REG_FIELD(CHGOP_SET1, 10, 10), + [F_SDP_CHG_TRIG_EN] = REG_FIELD(CHGOP_SET1, 9, 9), + [F_SDP_CHG_TRIG] = REG_FIELD(CHGOP_SET1, 8, 8), + [F_AUTO_TOF] = REG_FIELD(CHGOP_SET1, 6, 6), + [F_AUTO_FST] = REG_FIELD(CHGOP_SET1, 5, 5), + [F_AUTO_RECH] = REG_FIELD(CHGOP_SET1, 3, 3), + [F_ILIM_RESET_EN] = REG_FIELD(CHGOP_SET2, 14, 14), + [F_DCDC_1MS_SEL] = REG_FIELD(CHGOP_SET2, 12, 13), + [F_SEL_ILIM_DIV] = REG_FIELD(CHGOP_SET2, 10, 10), + [F_BATT_LEARN] = REG_FIELD(CHGOP_SET2, 8, 8), + [F_CHG_EN] = REG_FIELD(CHGOP_SET2, 7, 7), + [F_USB_SUS] = REG_FIELD(CHGOP_SET2, 6, 6), + [F_CHOP_SS_INIT] = REG_FIELD(CHGOP_SET2, 5, 5), + [F_CHOP_ALL_INIT] = REG_FIELD(CHGOP_SET2, 4, 4), + [F_DCDC_CLK_SEL] = REG_FIELD(CHGOP_SET2, 2, 3), + [F_CHOP_SS] = REG_FIELD(CHGOP_SET2, 1, 1), + [F_CHOP_ALL] = REG_FIELD(CHGOP_SET2, 0, 0), + [F_VBUSCLPS_TH_SET] = REG_FIELD(VBUSCLPS_TH_SET, 7, 14), + [F_VCCCLPS_TH_SET] = REG_FIELD(VCCCLPS_TH_SET, 7, 14), + [F_WDT_FST] = REG_FIELD(CHGWDT_SET, 8, 15), + [F_WDT_PRE] = REG_FIELD(CHGWDT_SET, 0, 7), + [F_WDT_IBAT_SHORT] = REG_FIELD(BATTWDT_SET, 8, 15), + [F_WDT_THERM] = REG_FIELD(BATTWDT_SET, 0, 7), + [F_VSYSREG_SET] = REG_FIELD(VSYSREG_SET, 6, 14), + [F_VSYSVAL_THH_SET] = REG_FIELD(VSYSVAL_THH_SET, 6, 14), + [F_VSYSVAL_THL_SET] = REG_FIELD(VSYSVAL_THL_SET, 6, 14), + [F_ITRICH_SET] = REG_FIELD(ITRICH_SET, 6, 10), + [F_IPRECH_SET] = REG_FIELD(IPRECH_SET, 6, 10), + [F_ICHG_SET] = REG_FIELD(ICHG_SET, 6, 13), + [F_ITERM_SET] = REG_FIELD(ITERM_SET, 6, 10), + [F_VPRECHG_TH_SET] = REG_FIELD(VPRECHG_TH_SET, 6, 14), + [F_VRBOOST_SET] = REG_FIELD(VRBOOST_SET, 6, 14), + [F_VFASTCHG_REG_SET1] = REG_FIELD(VFASTCHG_REG_SET1, 4, 14), + [F_VFASTCHG_REG_SET2] = REG_FIELD(VFASTCHG_REG_SET2, 4, 14), + [F_VFASTCHG_REG_SET3] = REG_FIELD(VFASTCHG_REG_SET3, 4, 14), + [F_VRECHG_SET] = REG_FIELD(VRECHG_SET, 4, 14), + [F_VBATOVP_SET] = REG_FIELD(VBATOVP_SET, 4, 14), + [F_IBATM_SHORT_SET] = REG_FIELD(IBATSHORT_SET, 0, 14), + [F_PROCHOT_DG_SET] = REG_FIELD(PROCHOT_CTRL_SET, 14, 15), + [F_PROCHOT_ICRIT_DG_SET] = REG_FIELD(PROCHOT_CTRL_SET, 10, 11), + [F_PROCHOT_IDCHG_DG_SET] = REG_FIELD(PROCHOT_CTRL_SET, 8, 9), + [F_PROCHOT_EN] = REG_FIELD(PROCHOT_CTRL_SET, 0, 4), + [F_PROCHOT_ICRIT_SET] = REG_FIELD(PROCHOT_ICRIT_SET, 0, 14), + [F_PROCHOT_INORM_SET] = REG_FIELD(PROCHOT_INORM_SET, 0, 14), + [F_PROCHOT_IDCHG_SET] = REG_FIELD(PROCHOT_IDCHG_SET, 0, 14), + [F_PROCHOT_VSYS_SET] = REG_FIELD(PROCHOT_VSYS_SET, 0, 14), + [F_IMON_INSEL] = REG_FIELD(PMON_IOUT_CTRL_SET, 9, 9), + [F_PMON_INSEL] = REG_FIELD(PMON_IOUT_CTRL_SET, 8, 8), + [F_IOUT_OUT_EN] = REG_FIELD(PMON_IOUT_CTRL_SET, 7, 7), + [F_IOUT_SOURCE_SEL] = REG_FIELD(PMON_IOUT_CTRL_SET, 6, 6), + [F_IOUT_GAIN_SET] = REG_FIELD(PMON_IOUT_CTRL_SET, 4, 5), + [F_PMON_OUT_EN] = REG_FIELD(PMON_IOUT_CTRL_SET, 3, 3), + [F_PMON_GAIN_SET] = REG_FIELD(PMON_IOUT_CTRL_SET, 0, 2), + [F_PMON_DACIN_VAL] = REG_FIELD(PMON_DACIN_VAL, 0, 9), + [F_IOUT_DACIN_VAL] = REG_FIELD(IOUT_DACIN_VAL, 0, 11), + [F_VCC_BCSRETRY] = REG_FIELD(VCC_UCD_SET, 12, 12), + [F_VCC_ADCRTRY] = REG_FIELD(VCC_UCD_SET, 8, 8), + [F_VCC_USBDETEN] = REG_FIELD(VCC_UCD_SET, 7, 7), + [F_VCC_IDRDETEN] = REG_FIELD(VCC_UCD_SET, 6, 6), + [F_VCC_ENUMRDY] = REG_FIELD(VCC_UCD_SET, 5, 5), + [F_VCC_ADCPOLEN] = REG_FIELD(VCC_UCD_SET, 4, 4), + [F_VCC_DCDMODE] = REG_FIELD(VCC_UCD_SET, 3, 3), + [F_VCC_USB_SW_EN] = REG_FIELD(VCC_UCD_SET, 1, 1), + [F_VCC_USB_SW] = REG_FIELD(VCC_UCD_SET, 0, 0), + [F_VCC_DCDFAIL] = REG_FIELD(VCC_UCD_STATUS, 15, 15), + [F_VCC_CHGPORT] = REG_FIELD(VCC_UCD_STATUS, 12, 13), + [F_VCC_PUPDET] = REG_FIELD(VCC_UCD_STATUS, 11, 11), + [F_VCC_VBUS_VLD] = REG_FIELD(VCC_UCD_STATUS, 7, 7), + [F_VCC_CHGDET] = REG_FIELD(VCC_UCD_STATUS, 6, 6), + [F_VCC_OTGDET] = REG_FIELD(VCC_UCD_STATUS, 3, 3), + [F_VCC_VBINOP] = REG_FIELD(VCC_IDD_STATUS, 6, 6), + [F_VCC_EXTID] = REG_FIELD(VCC_IDD_STATUS, 5, 5), + [F_VCC_IDRDET] = REG_FIELD(VCC_IDD_STATUS, 4, 4), + [F_VCC_INDO] = REG_FIELD(VCC_IDD_STATUS, 0, 3), + [F_VCC_UCDSWEN] = REG_FIELD(VCC_UCD_FCTRL_SET, 10, 10), + [F_VCC_RREF_EN] = REG_FIELD(VCC_UCD_FCTRL_SET, 9, 9), + [F_VCC_DPPU_EN] = REG_FIELD(VCC_UCD_FCTRL_SET, 8, 8), + [F_VCC_DPREF_EN] = REG_FIELD(VCC_UCD_FCTRL_SET, 7, 7), + [F_VCC_DMREF_EN] = REG_FIELD(VCC_UCD_FCTRL_SET, 6, 6), + [F_VCC_DPDET_EN] = REG_FIELD(VCC_UCD_FCTRL_SET, 5, 5), + [F_VCC_DMDET_EN] = REG_FIELD(VCC_UCD_FCTRL_SET, 4, 4), + [F_VCC_DPSINK_EN] = REG_FIELD(VCC_UCD_FCTRL_SET, 3, 3), + [F_VCC_DMSINK_EN] = REG_FIELD(VCC_UCD_FCTRL_SET, 2, 2), + [F_VCC_DP_BUFF_EN] = REG_FIELD(VCC_UCD_FCTRL_SET, 1, 1), + [F_VCC_DM_BUFF_EN] = REG_FIELD(VCC_UCD_FCTRL_SET, 0, 0), + [F_VCC_EXTCLKENBL] = REG_FIELD(VCC_UCD_FCTRL_EN, 15, 15), + [F_VCC_PLSTESTEN] = REG_FIELD(VCC_UCD_FCTRL_EN, 14, 14), + [F_VCC_UCDSWEN_TSTENB] = REG_FIELD(VCC_UCD_FCTRL_EN, 10, 10), + [F_VCC_RREF_EN_TSTENB] = REG_FIELD(VCC_UCD_FCTRL_EN, 9, 9), + [F_VCC_DPPU_EN_TSTENB] = REG_FIELD(VCC_UCD_FCTRL_EN, 8, 8), + [F_VCC_DPREF_EN_TSTENB] = REG_FIELD(VCC_UCD_FCTRL_EN, 7, 7), + [F_VCC_DMREF_EN_TSTENB] = REG_FIELD(VCC_UCD_FCTRL_EN, 6, 6), + [F_VCC_DPDET_EN_TSTENB] = REG_FIELD(VCC_UCD_FCTRL_EN, 5, 5), + [F_VCC_DMDET_EN_TSTENB] = REG_FIELD(VCC_UCD_FCTRL_EN, 4, 4), + [F_VCC_DPSINK_EN_TSTENB] = REG_FIELD(VCC_UCD_FCTRL_EN, 3, 3), + [F_VCC_DMSINK_EN_TSTENB] = REG_FIELD(VCC_UCD_FCTRL_EN, 2, 2), + [F_VCC_DP_BUFF_EN_TSTENB] = REG_FIELD(VCC_UCD_FCTRL_EN, 1, 1), + [F_VCC_DM_BUFF_EN_TSTENB] = REG_FIELD(VCC_UCD_FCTRL_EN, 0, 0), + + [F_VBUS_BCSRETRY] = REG_FIELD(VBUS_UCD_SET, 12, 12), + [F_VBUS_ADCRTRY] = REG_FIELD(VBUS_UCD_SET, 8, 8), + [F_VBUS_USBDETEN] = REG_FIELD(VBUS_UCD_SET, 7, 7), + [F_VBUS_IDRDETEN] = REG_FIELD(VBUS_UCD_SET, 6, 6), + [F_VBUS_ENUMRDY] = REG_FIELD(VBUS_UCD_SET, 5, 5), + [F_VBUS_ADCPOLEN] = REG_FIELD(VBUS_UCD_SET, 4, 4), + [F_VBUS_DCDMODE] = REG_FIELD(VBUS_UCD_SET, 3, 3), + [F_VBUS_USB_SW_EN] = REG_FIELD(VBUS_UCD_SET, 1, 1), + [F_VBUS_USB_SW] = REG_FIELD(VBUS_UCD_SET, 0, 0), + [F_VBUS_DCDFAIL] = REG_FIELD(VBUS_UCD_STATUS, 15, 15), + [F_VBUS_CHGPORT] = REG_FIELD(VBUS_UCD_STATUS, 12, 13), + [F_VBUS_PUPDET] = REG_FIELD(VBUS_UCD_STATUS, 11, 11), + [F_VBUS_VBUS_VLD] = REG_FIELD(VBUS_UCD_STATUS, 7, 7), + [F_VBUS_CHGDET] = REG_FIELD(VBUS_UCD_STATUS, 6, 6), + [F_VBUS_OTGDET] = REG_FIELD(VBUS_UCD_STATUS, 3, 3), + [F_VBUS_VBINOP] = REG_FIELD(VBUS_IDD_STATUS, 6, 6), + [F_VBUS_EXTID] = REG_FIELD(VBUS_IDD_STATUS, 5, 5), + [F_VBUS_IDRDET] = REG_FIELD(VBUS_IDD_STATUS, 4, 4), + [F_VBUS_INDO] = REG_FIELD(VBUS_IDD_STATUS, 0, 3), + [F_VBUS_UCDSWEN] = REG_FIELD(VCC_UCD_FCTRL_SET, 10, 10), + [F_VBUS_RREF_EN] = REG_FIELD(VCC_UCD_FCTRL_SET, 9, 9), + [F_VBUS_DPPU_EN] = REG_FIELD(VCC_UCD_FCTRL_SET, 8, 8), + [F_VBUS_DPREF_EN] = REG_FIELD(VCC_UCD_FCTRL_SET, 7, 7), + [F_VBUS_DMREF_EN] = REG_FIELD(VCC_UCD_FCTRL_SET, 6, 6), + [F_VBUS_DPDET_EN] = REG_FIELD(VCC_UCD_FCTRL_SET, 5, 5), + [F_VBUS_DMDET_EN] = REG_FIELD(VCC_UCD_FCTRL_SET, 4, 4), + [F_VBUS_DPSINK_EN] = REG_FIELD(VCC_UCD_FCTRL_SET, 3, 3), + [F_VBUS_DMSINK_EN] = REG_FIELD(VCC_UCD_FCTRL_SET, 2, 2), + [F_VBUS_DP_BUFF_EN] = REG_FIELD(VCC_UCD_FCTRL_SET, 1, 1), + [F_VBUS_DM_BUFF_EN] = REG_FIELD(VCC_UCD_FCTRL_SET, 0, 0), + + [F_VBUS_EXTCLKENBL] = REG_FIELD(VBUS_UCD_FCTRL_EN, 15, 15), + [F_VBUS_PLSTESTEN] = REG_FIELD(VBUS_UCD_FCTRL_EN, 14, 14), + [F_VBUS_UCDSWEN_TSTENB] = REG_FIELD(VBUS_UCD_FCTRL_EN, 10, 10), + [F_VBUS_RREF_EN_TSTENB] = REG_FIELD(VBUS_UCD_FCTRL_EN, 9, 9), + [F_VBUS_DPPU_EN_TSTENB] = REG_FIELD(VBUS_UCD_FCTRL_EN, 8, 8), + [F_VBUS_DPREF_EN_TSTENB] = REG_FIELD(VBUS_UCD_FCTRL_EN, 7, 7), + [F_VBUS_DMREF_EN_TSTENB] = REG_FIELD(VBUS_UCD_FCTRL_EN, 6, 6), + [F_VBUS_DPDET_EN_TSTENB] = REG_FIELD(VBUS_UCD_FCTRL_EN, 5, 5), + [F_VBUS_DMDET_EN_TSTENB] = REG_FIELD(VBUS_UCD_FCTRL_EN, 4, 4), + [F_VBUS_DPSINK_EN_TSTENB] = REG_FIELD(VBUS_UCD_FCTRL_EN, 3, 3), + [F_VBUS_DMSINK_EN_TSTENB] = REG_FIELD(VBUS_UCD_FCTRL_EN, 2, 2), + [F_VBUS_DP_BUFF_EN_TSTENB] = REG_FIELD(VBUS_UCD_FCTRL_EN, 1, 1), + [F_VBUS_DM_BUFF_EN_TSTENB] = REG_FIELD(VBUS_UCD_FCTRL_EN, 0, 0), + + [F_CHIP_ID] = REG_FIELD(CHIP_ID, 0, 15), + [F_CHIP_REV] = REG_FIELD(CHIP_REV, 0, 15), + [F_ONE_CELL_MODE] = REG_FIELD(IC_SET1, 11, 11), + [F_cell] = REG_FIELD(IC_SET1, 1, 1), + [F_VACP_AUTO_DISCHG] = REG_FIELD(IC_SET1, 9, 9), + [F_VACP_LOAD] = REG_FIELD(IC_SET1, 8, 8), + [F_ACOK_POL] = REG_FIELD(IC_SET1, 1, 1), + [F_ACOK_DISEN] = REG_FIELD(IC_SET1, 0, 0), + [F_DEBUG_SET1] = REG_FIELD(IC_SET2, 4, 8), + [F_DEBUG_SET0] = REG_FIELD(IC_SET2, 0, 0), + [F_MONRST_STATE] = REG_FIELD(SYSTEM_STATUS, 6, 6), + [F_ALMRST_STATE] = REG_FIELD(SYSTEM_STATUS, 5, 5), + [F_CHGRST_STATE] = REG_FIELD(SYSTEM_STATUS, 4, 4), + [F_OTPLD_STATE] = REG_FIELD(SYSTEM_STATUS, 1, 1), + [F_ALLRST_STATE] = REG_FIELD(SYSTEM_STATUS, 0, 0), + [F_PROTECT_SET] = REG_FIELD(PROTECT_SET, 0, 15), + [F_MAP_SET] = REG_FIELD(MAP_SET, 0, 15), + [F_ADCINTERVAL] = REG_FIELD(VM_CTRL_SET, 14, 15), + [F_ADCMOD] = REG_FIELD(VM_CTRL_SET, 12, 13), + [F_ADCTMOD] = REG_FIELD(VM_CTRL_SET, 10, 11), + [F_EXTIADPEN] = REG_FIELD(VM_CTRL_SET, 9, 9), + [F_VSYSENB] = REG_FIELD(VM_CTRL_SET, 8, 8), + [F_VCCENB] = REG_FIELD(VM_CTRL_SET, 7, 7), + [F_VBUSENB] = REG_FIELD(VM_CTRL_SET, 6, 6), + [F_VACPENB] = REG_FIELD(VM_CTRL_SET, 5, 5), + [F_IACPENB] = REG_FIELD(VM_CTRL_SET, 4, 4), + [F_THERMENB] = REG_FIELD(VM_CTRL_SET, 3, 3), + [F_VBATENB] = REG_FIELD(VM_CTRL_SET, 2, 2), + [F_IBATMENB] = REG_FIELD(VM_CTRL_SET, 1, 1), + [F_IBATPENB] = REG_FIELD(VM_CTRL_SET, 0, 0), + [F_TMPTHR1B] = REG_FIELD(THERM_WINDOW_SET1, 8, 15), + [F_TMPTHR1A] = REG_FIELD(THERM_WINDOW_SET1, 0, 7), + [F_TMPTHR2B] = REG_FIELD(THERM_WINDOW_SET2, 8, 15), + [F_TMPTHR2A] = REG_FIELD(THERM_WINDOW_SET2, 0, 7), + [F_TMPTHR3B] = REG_FIELD(THERM_WINDOW_SET3, 8, 15), + [F_TMPTHR3A] = REG_FIELD(THERM_WINDOW_SET3, 0, 7), + [F_TMPTHR4B] = REG_FIELD(THERM_WINDOW_SET4, 8, 15), + [F_TMPTHR4A] = REG_FIELD(THERM_WINDOW_SET4, 0, 7), + [F_TMPTHR5B] = REG_FIELD(THERM_WINDOW_SET5, 8, 15), + [F_TMPTHR5A] = REG_FIELD(THERM_WINDOW_SET5, 0, 7), + [F_IBATP_TH_SET] = REG_FIELD(IBATP_TH_SET, 0, 14), + [F_IBATM_TH_SET] = REG_FIELD(IBATM_TH_SET, 0, 14), + [F_VBAT_TH_SET] = REG_FIELD(VBAT_TH_SET, 0, 14), + [F_THERM_TH_SET] = REG_FIELD(THERM_TH_SET, 0, 7), + [F_IACP_TH_SET] = REG_FIELD(IACP_TH_SET, 0, 14), + [F_VACP_TH_SET] = REG_FIELD(VACP_TH_SET, 0, 14), + [F_VBUS_TH_SET] = REG_FIELD(VBUS_TH_SET, 0, 14), + [F_VCC_TH_SET] = REG_FIELD(VCC_TH_SET, 0, 14), + [F_VSYS_TH_SET] = REG_FIELD(VSYS_TH_SET, 0, 14), + [F_EXTIADP_TH_SET] = REG_FIELD(EXTIADP_TH_SET, 0, 11), + [F_IBATP_VAL] = REG_FIELD(IBATP_VAL, 0, 14), + [F_IBATP_AVE_VAL] = REG_FIELD(IBATP_AVE_VAL, 0, 14), + [F_IBATM_VAL] = REG_FIELD(IBATM_VAL, 0, 14), + [F_IBATM_AVE_VAL] = REG_FIELD(IBATM_AVE_VAL, 0, 14), + [F_VBAT_VAL] = REG_FIELD(VBAT_VAL, 0, 14), + [F_VBAT_AVE_VAL] = REG_FIELD(VBAT_AVE_VAL, 0, 14), + [F_THERM_VAL] = REG_FIELD(THERM_VAL, 0, 7), + [F_VTH_VAL] = REG_FIELD(VTH_VAL, 0, 11), + [F_IACP_VAL] = REG_FIELD(IACP_VAL, 0, 14), + [F_IACP_AVE_VAL] = REG_FIELD(IACP_AVE_VAL, 0, 14), + [F_VACP_VAL] = REG_FIELD(VACP_VAL, 0, 14), + [F_VACP_AVE_VAL] = REG_FIELD(VACP_AVE_VAL, 0, 14), + [F_VBUS_VAL] = REG_FIELD(VBUS_VAL, 0, 14), + [F_VBUS_AVE_VAL] = REG_FIELD(VBUS_AVE_VAL, 0, 14), + [F_VCC_VAL] = REG_FIELD(VCC_VAL, 0, 14), + [F_VCC_AVE_VAL] = REG_FIELD(VCC_AVE_VAL, 0, 14), + [F_VSYS_VAL] = REG_FIELD(VSYS_VAL, 0, 14), + [F_VSYS_AVE_VAL] = REG_FIELD(VSYS_AVE_VAL, 0, 14), + [F_EXTIADP_VAL] = REG_FIELD(EXTIADP_VAL, 0, 11), + [F_EXTIADP_AVE_VAL] = REG_FIELD(EXTIADP_AVE_VAL, 0, 11), + [F_VACPCLPS_TH_SET] = REG_FIELD(VACPCLPS_TH_SET, 7, 14), + [F_INT7_SET] = REG_FIELD(INT7_SET, 0, 15), + [F_INT6_SET] = REG_FIELD(INT6_SET, 0, 13), + [F_INT5_SET] = REG_FIELD(INT5_SET, 0, 13), + [F_INT4_SET] = REG_FIELD(INT4_SET, 0, 9), + [F_INT3_SET] = REG_FIELD(INT3_SET, 0, 15), + [F_INT2_SET] = REG_FIELD(INT2_SET, 0, 15), + [F_INT1_SET] = REG_FIELD(INT1_SET, 0, 15), + [F_INT0_SET] = REG_FIELD(INT0_SET, 0, 7), + [F_VBUS_RBUV_DET] = REG_FIELD(INT1_SET, 15, 15), + [F_VBUS_RBUV_RES] = REG_FIELD(INT1_SET, 14, 14), + [F_VBUS_TH_DET] = REG_FIELD(INT1_SET, 9, 9), + [F_VBUS_TH_RES] = REG_FIELD(INT1_SET, 8, 8), + [F_VBUS_IIN_MOD] = REG_FIELD(INT1_SET, 6, 6), + [F_VBUS_OV_DET] = REG_FIELD(INT1_SET, 5, 5), + [F_VBUS_OV_RES] = REG_FIELD(INT1_SET, 4, 4), + [F_VBUS_CLPS_DET] = REG_FIELD(INT1_SET, 3, 3), + [F_VBUS_CLPS] = REG_FIELD(INT1_SET, 2, 2), + [F_VBUS_DET] = REG_FIELD(INT1_SET, 1, 1), + [F_VBUS_RES] = REG_FIELD(INT1_SET, 0, 0), + [F_VCC_RBUV_DET] = REG_FIELD(INT2_SET, 15, 15), + [F_VCC_RBUV_RES] = REG_FIELD(INT2_SET, 14, 14), + [F_VCC_TH_DET] = REG_FIELD(INT2_SET, 9, 9), + [F_VCC_TH_RES] = REG_FIELD(INT2_SET, 8, 8), + [F_VCC_IIN_MOD] = REG_FIELD(INT2_SET, 6, 6), + [F_VCC_OVP_DET] = REG_FIELD(INT2_SET, 5, 5), + [F_VCC_OVP_RES] = REG_FIELD(INT2_SET, 4, 4), + [F_VCC_CLPS_DET] = REG_FIELD(INT2_SET, 3, 3), + [F_VCC_CLPS_RES] = REG_FIELD(INT2_SET, 2, 2), + [F_VCC_DET] = REG_FIELD(INT2_SET, 1, 1), + [F_VCC_RES] = REG_FIELD(INT2_SET, 0, 0), + [F_TH_DET] = REG_FIELD(INT3_SET, 15, 15), + [F_TH_RMV] = REG_FIELD(INT3_SET, 14, 14), + [F_TMP_OUT_DET] = REG_FIELD(INT3_SET, 11, 11), + [F_TMP_OUT_RES] = REG_FIELD(INT3_SET, 10, 10), + [F_VBAT_TH_DET] = REG_FIELD(INT3_SET, 9, 9), + [F_VBAT_TH_RES] = REG_FIELD(INT3_SET, 8, 8), + [F_IBAT_SHORT_DET] = REG_FIELD(INT3_SET, 7, 7), + [F_IBAT_SHORT_RES] = REG_FIELD(INT3_SET, 6, 6), + [F_VBAT_OV_DET] = REG_FIELD(INT3_SET, 5, 5), + [F_VBAT_OV_RES] = REG_FIELD(INT3_SET, 4, 4), + [F_BAT_ASSIST_DET] = REG_FIELD(INT3_SET, 3, 3), + [F_BAT_ASSIST_RES] = REG_FIELD(INT3_SET, 2, 2), + [F_VSYS_TH_DET] = REG_FIELD(INT4_SET, 9, 9), + [F_VSYS_TH_RES] = REG_FIELD(INT4_SET, 8, 8), + [F_VSYS_OV_DET] = REG_FIELD(INT4_SET, 5, 5), + [F_VSYS_OV_RES] = REG_FIELD(INT4_SET, 4, 4), + [F_VSYS_SHT_DET] = REG_FIELD(INT4_SET, 3, 3), + [F_VSYS_SHT_RES] = REG_FIELD(INT4_SET, 2, 2), + [F_VSYS_UV_DET] = REG_FIELD(INT4_SET, 1, 1), + [F_VSYS_UV_RES] = REG_FIELD(INT4_SET, 0, 0), + [F_OTP_LOAD_DONE] = REG_FIELD(INT5_SET, 13, 13), + [F_PWR_ON] = REG_FIELD(INT5_SET, 12, 12), + [F_EXTIADP_TRNS] = REG_FIELD(INT5_SET, 11, 11), + [F_EXTIADP_TH_DET] = REG_FIELD(INT5_SET, 9, 9), + [F_EXIADP_TH_RES] = REG_FIELD(INT5_SET, 8, 8), + [F_BAT_MNT_DET] = REG_FIELD(INT5_SET, 7, 7), + [F_BAT_MNT_RES] = REG_FIELD(INT5_SET, 6, 6), + [F_TSD_DET] = REG_FIELD(INT5_SET, 5, 5), + [F_TSD_RES] = REG_FIELD(INT5_SET, 4, 4), + [F_CHGWDT_EXP] = REG_FIELD(INT5_SET, 3, 3), + [F_THERMWDT_EXP] = REG_FIELD(INT5_SET, 2, 2), + [F_TMP_TRNS] = REG_FIELD(INT5_SET, 1, 1), + [F_CHG_TRNS] = REG_FIELD(INT5_SET, 0, 0), + [F_VBUS_UCD_PORT_DET] = REG_FIELD(INT6_SET, 13, 13), + [F_VBUS_UCD_UCHG_DET] = REG_FIELD(INT6_SET, 12, 12), + [F_VBUS_UCD_URID_RMV] = REG_FIELD(INT6_SET, 11, 11), + [F_VBUS_UCD_OTG_DET] = REG_FIELD(INT6_SET, 10, 10), + [F_VBUS_UCD_URID_MOD] = REG_FIELD(INT6_SET, 8, 8), + [F_VCC_UCD_PORT_DET] = REG_FIELD(INT6_SET, 5, 5), + [F_VCC_UCD_UCHG_DET] = REG_FIELD(INT6_SET, 4, 4), + [F_VCC_UCD_URID_RMV] = REG_FIELD(INT6_SET, 3, 3), + [F_VCC_UCD_OTG_DET] = REG_FIELD(INT6_SET, 2, 2), + [F_VCC_UCD_URID_MOD] = REG_FIELD(INT6_SET, 0, 0), + [F_PROCHOT_DET] = REG_FIELD(INT7_SET, 15, 15), + [F_PROCHOT_RES] = REG_FIELD(INT7_SET, 14, 14), + [F_VACP_DET] = REG_FIELD(INT7_SET, 11, 11), + [F_VACP_RES] = REG_FIELD(INT7_SET, 10, 10), + [F_VACP_TH_DET] = REG_FIELD(INT7_SET, 9, 9), + [F_VACP_TH_RES] = REG_FIELD(INT7_SET, 8, 8), + [F_IACP_TH_DET] = REG_FIELD(INT7_SET, 7, 7), + [F_IACP_THE_RES] = REG_FIELD(INT7_SET, 6, 6), + [F_THERM_TH_DET] = REG_FIELD(INT7_SET, 5, 5), + [F_THERM_TH_RES] = REG_FIELD(INT7_SET, 4, 4), + [F_IBATM_TH_DET] = REG_FIELD(INT7_SET, 3, 3), + [F_IBATM_TH_RES] = REG_FIELD(INT7_SET, 2, 2), + [F_IBATP_TH_DET] = REG_FIELD(INT7_SET, 1, 1), + [F_IBATP_TH_RES] = REG_FIELD(INT7_SET, 0, 0), + [F_INT7_STATUS] = REG_FIELD(INT7_STATUS, 0, 15), + [F_INT6_STATUS] = REG_FIELD(INT6_STATUS, 0, 13), + [F_INT5_STATUS] = REG_FIELD(INT5_STATUS, 0, 13), + [F_INT4_STATUS] = REG_FIELD(INT4_STATUS, 0, 9), + [F_INT3_STATUS] = REG_FIELD(INT3_STATUS, 0, 15), + [F_INT2_STATUS] = REG_FIELD(INT2_STATUS, 0, 15), + [F_INT1_STATUS] = REG_FIELD(INT1_STATUS, 0, 15), + [F_INT0_STATUS] = REG_FIELD(INT0_STATUS, 0, 7), + [F_ILIM_DECREASE] = REG_FIELD(OTPREG0, 0, 15), + [F_RESERVE_OTPREG1] = REG_FIELD(OTPREG1, 0, 15), + [F_POWER_SAVE_MODE] = REG_FIELD(SMBREG, 0, 15), + [F_DEBUG_MODE_SET] = REG_FIELD(DEBUG_MODE_SET, 0, 15), + [F_DEBUG0x14] = REG_FIELD(DEBUG0x14, 0, 15), + [F_DEBUG0x1A] = REG_FIELD(DEBUG0x1A, 0, 15), +}; + +/* CHGSTM_STATEs */ +#define CHGSTM_SUSPEND 0x00 +#define CHGSTM_TRICKLE_CHARGE 0x01 +#define CHGSTM_PRE_CHARGE 0x02 +#define CHGSTM_FAST_CHARGE 0x03 +#define CHGSTM_TOP_OFF 0x04 +#define CHGSTM_DONE 0x05 +#define CHGSTM_OTG 0x08 +#define CHGSTM_OTG_DONE 0x09 +#define CHGSTM_TEMPERATURE_ERROR_1 0x10 +#define CHGSTM_TEMPERATURE_ERROR_2 0x11 +#define CHGSTM_TEMPERATURE_ERROR_3 0x12 +#define CHGSTM_TEMPERATURE_ERROR_4 0x13 +#define CHGSTM_TEMPERATURE_ERROR_5 0x14 +#define CHGSTM_TEMPERATURE_ERROR_6 0x15 +#define CHGSTM_TEMPERATURE_ERROR_7 0x18 +#define CHGSTM_THERMAL_SHUT_DOWN_1 0x20 +#define CHGSTM_THERMAL_SHUT_DOWN_2 0x21 +#define CHGSTM_THERMAL_SHUT_DOWN_3 0x22 +#define CHGSTM_THERMAL_SHUT_DOWN_4 0x23 +#define CHGSTM_THERMAL_SHUT_DOWN_5 0x24 +#define CHGSTM_THERMAL_SHUT_DOWN_6 0x25 +#define CHGSTM_THERMAL_SHUT_DOWN_7 0x28 +#define CHGSTM_BATTERY_ERROR 0x40 + +/* VBAT_VSYS_STATUS */ +#define STATUS_VSYS_OV BIT(15) +#define STATUS_VSYS_SSD BIT(14) +#define STATUS_VSYS_SCP BIT(13) +#define STATUS_VSYS_UVN BIT(12) +#define STATUS_IBAT_SHORT BIT(6) +#define STATUS_VBAT_OV BIT(3) +#define STATUS_DEAD_BAT BIT(0) + +/* VBUS_VCC_STATUS */ +#define STATUS_VACP_DET BIT(12) +#define STATUS_VCC_OVP BIT(11) +#define STATUS_ILIM_VCC_MOD BIT(10) +#define STATUS_VCC_CLPS BIT(9) +#define STATUS_VCC_DET BIT(8) +#define STATUS_VBUS_OVP BIT(3) +#define STATUS_ILIM_VBUS_MOD BIT(2) +#define STATUS_VBUS_CLPS BIT(1) +#define STATUS_VBUS_DET BIT(0) + +/* Interrupt set/status definitions */ + +/* INT 0 */ +#define INT0_INT7_STATUS BIT(7) +#define INT0_INT6_STATUS BIT(6) +#define INT0_INT5_STATUS BIT(5) +#define INT0_INT4_STATUS BIT(4) +#define INT0_INT3_STATUS BIT(3) +#define INT0_INT2_STATUS BIT(2) +#define INT0_INT1_STATUS BIT(1) +#define INT0_INT0_STATUS BIT(0) +#define INT0_ALL 0xff + +/* INT 1 */ +#define VBUS_RBUV_DET BIT(15) +#define VBUS_RBUV_RES BIT(14) +#define VBUS_TH_DET BIT(9) +#define VBUS_TH_RES BIT(8) +#define VBUS_IIN_MOD BIT(6) +#define VBUS_OV_DET BIT(5) +#define VBUS_OV_RES BIT(4) +#define VBUS_CLPS_DET BIT(3) +#define VBUS_CLPS BIT(2) +#define VBUS_DET BIT(1) +#define VBUS_RES BIT(0) +#define INT1_ALL (VBUS_RBUV_DET|\ + VBUS_RBUV_RES|\ + VBUS_TH_DET |\ + VBUS_TH_RES |\ + VBUS_IIN_MOD|\ + VBUS_OV_DET |\ + VBUS_OV_RES |\ + VBUS_CLPS_DET |\ + VBUS_CLPS |\ + VBUS_DET |\ + VBUS_RES) + +/* INT 2 */ +#define VCC_RBUV_DET BIT(15) +#define VCC_RBUV_RES BIT(14) +#define VCC_TH_DET BIT(9) +#define VCC_TH_RES BIT(8) +#define VCC_IIN_MOD BIT(6) +#define VCC_OVP_DET BIT(5) +#define VCC_OVP_RES BIT(4) +#define VCC_CLPS_DET BIT(3) +#define VCC_CLPS_RES BIT(2) +#define VCC_DET BIT(1) +#define VCC_RES BIT(0) +#define INT2_ALL (VCC_RBUV_DET |\ + VCC_RBUV_RES |\ + VCC_TH_DET |\ + VCC_TH_RES |\ + VCC_IIN_MOD |\ + VCC_OVP_DET |\ + VCC_OVP_RES |\ + VCC_CLPS_DET |\ + VCC_CLPS_RES |\ + VCC_DET |\ + VCC_RES) +/* INT 3 */ +#define TH_DET BIT(15) +#define TH_RMV BIT(14) +#define TMP_OUT_DET BIT(11) +#define TMP_OUT_RES BIT(10) +#define VBAT_TH_DET BIT(9) +#define VBAT_TH_RES BIT(8) +#define IBAT_SHORT_DET BIT(7) +#define IBAT_SHORT_RES BIT(6) +#define VBAT_OV_DET BIT(5) +#define VBAT_OV_RES BIT(4) +#define BAT_ASSIST_DET BIT(3) +#define BAT_ASSIST_RES BIT(2) +#define INT3_ALL (TH_DET |\ + TH_RMV |\ + TMP_OUT_DET |\ + TMP_OUT_RES |\ + VBAT_TH_DET |\ + VBAT_TH_RES |\ + IBAT_SHORT_DET |\ + IBAT_SHORT_RES |\ + VBAT_OV_DET |\ + VBAT_OV_RES |\ + BAT_ASSIST_DET |\ + BAT_ASSIST_RES) + +/* INT 4 */ +#define VSYS_TH_DET BIT(9) +#define VSYS_TH_RES BIT(8) +#define VSYS_OV_DET BIT(5) +#define VSYS_OV_RES BIT(4) +#define VSYS_SHT_DET BIT(3) +#define VSYS_SHT_RES BIT(2) +#define VSYS_UV_DET BIT(1) +#define VSYS_UV_RES BIT(0) +#define INT4_ALL (VSYS_TH_DET |\ + VSYS_TH_RES |\ + VSYS_OV_DET |\ + VSYS_OV_RES |\ + VSYS_SHT_DET |\ + VSYS_SHT_RES |\ + VSYS_UV_DET |\ + VSYS_UV_RES) + +/* INT 5*/ +#define OTP_LOAD_DONE BIT(13) +#define PWR_ON BIT(12) +#define EXTIADP_TRNS BIT(11) +#define EXTIADP_TH_DET BIT(9) +#define EXIADP_TH_RES BIT(8) +#define BAT_MNT_DET BIT(7) +#define BAT_MNT_RES BIT(6) +#define TSD_DET BIT(5) +#define TSD_RES BIT(4) +#define CHGWDT_EXP BIT(3) +#define THERMWDT_EXP BIT(2) +#define TMP_TRNS BIT(1) +#define CHG_TRNS BIT(0) +#define INT5_ALL (OTP_LOAD_DONE |\ + PWR_ON |\ + EXTIADP_TRNS |\ + EXTIADP_TH_DET |\ + EXIADP_TH_RES |\ + BAT_MNT_DET |\ + BAT_MNT_RES |\ + TSD_DET |\ + TSD_RES |\ + CHGWDT_EXP |\ + THERMWDT_EXP |\ + TMP_TRNS |\ + CHG_TRNS) + +/* INT 6*/ +#define VBUS_UCD_PORT_DET BIT(13) +#define VBUS_UCD_UCHG_DET BIT(12) +#define VBUS_UCD_URID_RMV BIT(11) +#define VBUS_UCD_OTG_DET BIT(10) +#define VBUS_UCD_URID_MOD BIT(8) +#define VCC_UCD_PORT_DET BIT(5) +#define VCC_UCD_UCHG_DET BIT(4) +#define VCC_UCD_URID_RMV BIT(3) +#define VCC_UCD_OTG_DET BIT(2) +#define VCC_UCD_URID_MOD BIT(0) +#define INT6_ALL (VBUS_UCD_PORT_DET |\ + VBUS_UCD_UCHG_DET |\ + VBUS_UCD_URID_RMV |\ + VBUS_UCD_OTG_DET |\ + VBUS_UCD_URID_MOD |\ + VCC_UCD_PORT_DET |\ + VCC_UCD_UCHG_DET |\ + VCC_UCD_URID_RMV |\ + VCC_UCD_OTG_DET |\ + VCC_UCD_URID_MOD) + +/* INT 7 */ +#define PROCHOT_DET BIT(15) +#define PROCHOT_RES BIT(14) +#define VACP_DET BIT(11) +#define VACP_RES BIT(10) +#define VACP_TH_DET BIT(9) +#define VACP_TH_RES BIT(8) +#define IACP_TH_DET BIT(7) +#define IACP_THE_RES BIT(6) +#define THERM_TH_DET BIT(5) +#define THERM_TH_RES BIT(4) +#define IBATM_TH_DET BIT(3) +#define IBATM_TH_RES BIT(2) +#define IBATP_TH_DET BIT(1) +#define IBATP_TH_RES BIT(0) +#define INT7_ALL (PROCHOT_DET |\ + PROCHOT_RES |\ + VACP_DET |\ + VACP_RES |\ + VACP_TH_DET |\ + VACP_TH_RES |\ + IACP_TH_DET |\ + IACP_THE_RES |\ + THERM_TH_DET |\ + THERM_TH_RES |\ + IBATM_TH_DET |\ + IBATM_TH_RES |\ + IBATP_TH_DET |\ + IBATP_TH_RES) + +/* SYSTEM_CTRL_SET*/ +#define MONRST BIT(6) +#define ALMRST BIT(5) +#define CHGRST BIT(4) +#define OTPLD BIT(1) +#define ALLRST BIT(0) + +/* F_BATTEMP */ +#define ROOM 0x0 +#define HOT1 0x1 +#define HOT2 0x2 +#define HOT3 0x3 +#define COLD1 0x4 +#define COLD2 0x5 +#define TEMP_DIS 0x6 +#define BATT_OPEN 0x7 + +#endif -- GitLab From 72073aa1e262607126cb4025bd8f08044f677ab6 Mon Sep 17 00:00:00 2001 From: Matti Vaittinen Date: Fri, 8 May 2020 18:51:42 +0300 Subject: [PATCH 0332/1971] power: supply: Fix Kconfig help text indentiation Indent the help text as explained in Documentation/process/coding-style.rst Signed-off-by: Matti Vaittinen Signed-off-by: Sebastian Reichel --- drivers/power/supply/Kconfig | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/drivers/power/supply/Kconfig b/drivers/power/supply/Kconfig index d058f19922ad1..6343e2ead1ab7 100644 --- a/drivers/power/supply/Kconfig +++ b/drivers/power/supply/Kconfig @@ -426,7 +426,7 @@ config CHARGER_PCF50633 tristate "NXP PCF50633 MBC" depends on MFD_PCF50633 help - Say Y to include support for NXP PCF50633 Main Battery Charger. + Say Y to include support for NXP PCF50633 Main Battery Charger. config BATTERY_RX51 tristate "Nokia RX-51 (N900) battery driver" @@ -620,15 +620,15 @@ config CHARGER_TPS65090 tristate "TPS65090 battery charger driver" depends on MFD_TPS65090 help - Say Y here to enable support for battery charging with TPS65090 - PMIC chips. + Say Y here to enable support for battery charging with TPS65090 + PMIC chips. config CHARGER_TPS65217 tristate "TPS65217 battery charger driver" depends on MFD_TPS65217 help - Say Y here to enable support for battery charging with TPS65217 - PMIC chips. + Say Y here to enable support for battery charging with TPS65217 + PMIC chips. config BATTERY_GAUGE_LTC2941 tristate "LTC2941/LTC2943 Battery Gauge Driver" @@ -682,16 +682,16 @@ config CHARGER_SC2731 tristate "Spreadtrum SC2731 charger driver" depends on MFD_SC27XX_PMIC || COMPILE_TEST help - Say Y here to enable support for battery charging with SC2731 - PMIC chips. + Say Y here to enable support for battery charging with SC2731 + PMIC chips. config FUEL_GAUGE_SC27XX tristate "Spreadtrum SC27XX fuel gauge driver" depends on MFD_SC27XX_PMIC || COMPILE_TEST depends on IIO help - Say Y here to enable support for fuel gauge with SC27XX - PMIC chips. + Say Y here to enable support for fuel gauge with SC27XX + PMIC chips. config CHARGER_UCS1002 tristate "Microchip UCS1002 USB Port Power Controller" @@ -709,9 +709,9 @@ config CHARGER_BD70528 select LINEAR_RANGES default n help - Say Y here to enable support for getting battery status - information and altering charger configurations from charger - block of the ROHM BD70528 Power Management IC. + Say Y here to enable support for getting battery status + information and altering charger configurations from charger + block of the ROHM BD70528 Power Management IC. config CHARGER_BD99954 tristate "ROHM bd99954 charger driver" -- GitLab From 75ffb420aa715a3275bde04b64b101e91fe1f2b5 Mon Sep 17 00:00:00 2001 From: Matti Vaittinen Date: Fri, 8 May 2020 18:52:45 +0300 Subject: [PATCH 0333/1971] power: supply: KConfig cleanup default n The "default n" is not needed as it is, well, default. Clean the KConfig by removing "default n". Signed-off-by: Matti Vaittinen Reviewed-by: Andy Shevchenko Signed-off-by: Sebastian Reichel --- drivers/power/supply/Kconfig | 2 -- 1 file changed, 2 deletions(-) diff --git a/drivers/power/supply/Kconfig b/drivers/power/supply/Kconfig index 6343e2ead1ab7..81b567ee1638d 100644 --- a/drivers/power/supply/Kconfig +++ b/drivers/power/supply/Kconfig @@ -671,7 +671,6 @@ config CHARGER_RT9455 config CHARGER_CROS_USBPD tristate "ChromeOS EC based USBPD charger" depends on CROS_USBPD_NOTIFY - default n help Say Y here to enable ChromeOS EC based USBPD charger driver. This driver gets various bits of information about @@ -707,7 +706,6 @@ config CHARGER_BD70528 tristate "ROHM bd70528 charger driver" depends on MFD_ROHM_BD70528 select LINEAR_RANGES - default n help Say Y here to enable support for getting battery status information and altering charger configurations from charger -- GitLab From 7fdff6cc84de2efbea5a48c8d9cadb2bf5a7ba2d Mon Sep 17 00:00:00 2001 From: Linus Walleij Date: Sat, 9 May 2020 22:17:40 -0700 Subject: [PATCH 0334/1971] dt-bindings: touchscreen: Add CY8CTMA140 bindings This adds device tree bindings for the Cypress CY8CTMA140 touchscreen. Reviewed-by: Rob Herring Signed-off-by: Linus Walleij Link: https://lore.kernel.org/r/20200506123435.187432-1-linus.walleij@linaro.org Signed-off-by: Dmitry Torokhov --- .../input/touchscreen/cypress,cy8ctma140.yaml | 72 +++++++++++++++++++ 1 file changed, 72 insertions(+) create mode 100644 Documentation/devicetree/bindings/input/touchscreen/cypress,cy8ctma140.yaml diff --git a/Documentation/devicetree/bindings/input/touchscreen/cypress,cy8ctma140.yaml b/Documentation/devicetree/bindings/input/touchscreen/cypress,cy8ctma140.yaml new file mode 100644 index 0000000000000..8c73e52643124 --- /dev/null +++ b/Documentation/devicetree/bindings/input/touchscreen/cypress,cy8ctma140.yaml @@ -0,0 +1,72 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/input/touchscreen/cypress,cy8ctma140.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Cypress CY8CTMA140 series touchscreen controller bindings + +maintainers: + - Linus Walleij + +allOf: + - $ref: touchscreen.yaml# + +properties: + compatible: + const: cypress,cy8ctma140 + + reg: + const: 0x20 + + clock-frequency: + description: I2C client clock frequency, defined for host + minimum: 100000 + maximum: 400000 + + interrupts: + maxItems: 1 + + vcpin-supply: + description: Analog power supply regulator on VCPIN pin + + vdd-supply: + description: Digital power supply regulator on VDD pin + + touchscreen-inverted-x: true + touchscreen-inverted-y: true + touchscreen-size-x: true + touchscreen-size-y: true + touchscreen-swapped-x-y: true + touchscreen-max-pressure: true + +additionalProperties: false + +required: + - compatible + - reg + - interrupts + - touchscreen-size-x + - touchscreen-size-y + - touchscreen-max-pressure + +examples: +- | + #include + i2c { + #address-cells = <1>; + #size-cells = <0>; + touchscreen@20 { + compatible = "cypress,cy8ctma140"; + reg = <0x20>; + touchscreen-size-x = <480>; + touchscreen-size-y = <800>; + touchscreen-max-pressure = <255>; + interrupt-parent = <&gpio6>; + interrupts = <26 IRQ_TYPE_EDGE_FALLING>; + vdd-supply = <&ab8500_ldo_aux2_reg>; + vcpin-supply = <&ab8500_ldo_aux2_reg>; + }; + }; + +... -- GitLab From 3eb66d9f97f386262adbba9ce752ec80b85246ed Mon Sep 17 00:00:00 2001 From: Linus Walleij Date: Sat, 9 May 2020 22:18:18 -0700 Subject: [PATCH 0335/1971] Input: add driver for the Cypress CY8CTMA140 touchscreen This adds a new driver for the Cypress CY8CTMA140 touchscreen. This driver is inspired by out-of-tree code for the Samsung GT-S7710 mobile phone. I have tried to compare the structure and behaviour of this touchscreen to the existing CYTTSP and CYTTSP4 generics and it seems pretty different. It is also different in character from the cy8ctmg110_ts.c. It appears to rather be vaguely related to the Melfas MMS114 driver, yet distinctly different. Dmitry Torokhov rewrote the key scanning code during the submission process so the driver is a joint work. Signed-off-by: Linus Walleij Link: https://lore.kernel.org/r/20200506123435.187432-2-linus.walleij@linaro.org Signed-off-by: Dmitry Torokhov --- MAINTAINERS | 6 + drivers/input/touchscreen/Kconfig | 12 + drivers/input/touchscreen/Makefile | 1 + drivers/input/touchscreen/cy8ctma140.c | 353 +++++++++++++++++++++++++ 4 files changed, 372 insertions(+) create mode 100644 drivers/input/touchscreen/cy8ctma140.c diff --git a/MAINTAINERS b/MAINTAINERS index 8982c6e013b3c..faa5bfe8ece0c 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -4578,6 +4578,12 @@ T: git git://linuxtv.org/anttip/media_tree.git S: Maintained F: drivers/media/common/cypress_firmware* +CYPRESS CY8CTMA140 TOUCHSCREEN DRIVER +M: Linus Walleij +L: linux-input@vger.kernel.org +S: Maintained +F: drivers/input/touchscreen/cy8ctma140.c + CYTTSP TOUCHSCREEN DRIVER M: Ferruh Yigit L: linux-input@vger.kernel.org diff --git a/drivers/input/touchscreen/Kconfig b/drivers/input/touchscreen/Kconfig index c071f7c407b61..35c867b2d9a77 100644 --- a/drivers/input/touchscreen/Kconfig +++ b/drivers/input/touchscreen/Kconfig @@ -201,6 +201,18 @@ config TOUCHSCREEN_CHIPONE_ICN8505 To compile this driver as a module, choose M here: the module will be called chipone_icn8505. +config TOUCHSCREEN_CY8CTMA140 + tristate "cy8ctma140 touchscreen" + depends on I2C + help + Say Y here if you have a Cypress CY8CTMA140 capacitive + touchscreen also just known as "TMA140" + + If unsure, say N. + + To compile this driver as a module, choose M here: the + module will be called cy8ctma140. + config TOUCHSCREEN_CY8CTMG110 tristate "cy8ctmg110 touchscreen" depends on I2C diff --git a/drivers/input/touchscreen/Makefile b/drivers/input/touchscreen/Makefile index 94c6162409b37..30d1e1b424929 100644 --- a/drivers/input/touchscreen/Makefile +++ b/drivers/input/touchscreen/Makefile @@ -22,6 +22,7 @@ obj-$(CONFIG_TOUCHSCREEN_BU21013) += bu21013_ts.o obj-$(CONFIG_TOUCHSCREEN_BU21029) += bu21029_ts.o obj-$(CONFIG_TOUCHSCREEN_CHIPONE_ICN8318) += chipone_icn8318.o obj-$(CONFIG_TOUCHSCREEN_CHIPONE_ICN8505) += chipone_icn8505.o +obj-$(CONFIG_TOUCHSCREEN_CY8CTMA140) += cy8ctma140.o obj-$(CONFIG_TOUCHSCREEN_CY8CTMG110) += cy8ctmg110_ts.o obj-$(CONFIG_TOUCHSCREEN_CYTTSP_CORE) += cyttsp_core.o obj-$(CONFIG_TOUCHSCREEN_CYTTSP_I2C) += cyttsp_i2c.o cyttsp_i2c_common.o diff --git a/drivers/input/touchscreen/cy8ctma140.c b/drivers/input/touchscreen/cy8ctma140.c new file mode 100644 index 0000000000000..a9be29139cbff --- /dev/null +++ b/drivers/input/touchscreen/cy8ctma140.c @@ -0,0 +1,353 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Driver for Cypress CY8CTMA140 (TMA140) touchscreen + * (C) 2020 Linus Walleij + * (C) 2007 Cypress + * (C) 2007 Google, Inc. + * + * Inspired by the tma140_skomer.c driver in the Samsung GT-S7710 code + * drop. The GT-S7710 is codenamed "Skomer", the code also indicates + * that the same touchscreen was used in a product called "Lucas". + * + * The code drop for GT-S7710 also contains a firmware downloader and + * 15 (!) versions of the firmware drop from Cypress. But here we assume + * the firmware got downloaded to the touchscreen flash successfully and + * just use it to read the fingers. The shipped vendor driver does the + * same. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define CY8CTMA140_NAME "cy8ctma140" + +#define CY8CTMA140_MAX_FINGERS 4 + +#define CY8CTMA140_GET_FINGERS 0x00 +#define CY8CTMA140_GET_FW_INFO 0x19 + +/* This message also fits some bytes for touchkeys, if used */ +#define CY8CTMA140_PACKET_SIZE 31 + +#define CY8CTMA140_INVALID_BUFFER_BIT 5 + +struct cy8ctma140 { + struct input_dev *input; + struct touchscreen_properties props; + struct device *dev; + struct i2c_client *client; + struct regulator_bulk_data regulators[2]; + u8 prev_fingers; + u8 prev_f1id; + u8 prev_f2id; +}; + +static void cy8ctma140_report(struct cy8ctma140 *ts, u8 *data, int n_fingers) +{ + static const u8 contact_offsets[] = { 0x03, 0x09, 0x10, 0x16 }; + u8 *buf; + u16 x, y; + u8 w; + u8 id; + int slot; + int i; + + for (i = 0; i < n_fingers; i++) { + buf = &data[contact_offsets[i]]; + + /* + * Odd contacts have contact ID in the lower nibble of + * the preceding byte, whereas even contacts have it in + * the upper nibble of the following byte. + */ + id = i % 2 ? buf[-1] & 0x0f : buf[5] >> 4; + slot = input_mt_get_slot_by_key(ts->input, id); + if (slot < 0) + continue; + + x = get_unaligned_be16(buf); + y = get_unaligned_be16(buf + 2); + w = buf[4]; + + dev_dbg(ts->dev, "finger %d: ID %02x (%d, %d) w: %d\n", + slot, id, x, y, w); + + input_mt_slot(ts->input, slot); + input_mt_report_slot_state(ts->input, MT_TOOL_FINGER, true); + touchscreen_report_pos(ts->input, &ts->props, x, y, true); + input_report_abs(ts->input, ABS_MT_TOUCH_MAJOR, w); + } + + input_mt_sync_frame(ts->input); + input_sync(ts->input); +} + +static irqreturn_t cy8ctma140_irq_thread(int irq, void *d) +{ + struct cy8ctma140 *ts = d; + u8 cmdbuf[] = { CY8CTMA140_GET_FINGERS }; + u8 buf[CY8CTMA140_PACKET_SIZE]; + struct i2c_msg msg[] = { + { + .addr = ts->client->addr, + .flags = 0, + .len = sizeof(cmdbuf), + .buf = cmdbuf, + }, { + .addr = ts->client->addr, + .flags = I2C_M_RD, + .len = sizeof(buf), + .buf = buf, + }, + }; + u8 n_fingers; + int ret; + + ret = i2c_transfer(ts->client->adapter, msg, ARRAY_SIZE(msg)); + if (ret != ARRAY_SIZE(msg)) { + if (ret < 0) + dev_err(ts->dev, "error reading message: %d\n", ret); + else + dev_err(ts->dev, "wrong number of messages\n"); + goto out; + } + + if (buf[1] & BIT(CY8CTMA140_INVALID_BUFFER_BIT)) { + dev_dbg(ts->dev, "invalid event\n"); + goto out; + } + + n_fingers = buf[2] & 0x0f; + if (n_fingers > CY8CTMA140_MAX_FINGERS) { + dev_err(ts->dev, "unexpected number of fingers: %d\n", + n_fingers); + goto out; + } + + cy8ctma140_report(ts, buf, n_fingers); + +out: + return IRQ_HANDLED; +} + +static int cy8ctma140_init(struct cy8ctma140 *ts) +{ + u8 addr[1]; + u8 buf[5]; + int ret; + + addr[0] = CY8CTMA140_GET_FW_INFO; + ret = i2c_master_send(ts->client, addr, 1); + if (ret < 0) { + dev_err(ts->dev, "error sending FW info message\n"); + return ret; + } + ret = i2c_master_recv(ts->client, buf, 5); + if (ret < 0) { + dev_err(ts->dev, "error receiving FW info message\n"); + return ret; + } + if (ret != 5) { + dev_err(ts->dev, "got only %d bytes\n", ret); + return -EIO; + } + + dev_dbg(ts->dev, "vendor %c%c, HW ID %.2d, FW ver %.4d\n", + buf[0], buf[1], buf[3], buf[4]); + + return 0; +} + +static int cy8ctma140_power_up(struct cy8ctma140 *ts) +{ + int error; + + error = regulator_bulk_enable(ARRAY_SIZE(ts->regulators), + ts->regulators); + if (error) { + dev_err(ts->dev, "failed to enable regulators\n"); + return error; + } + + msleep(250); + + return 0; +} + +static void cy8ctma140_power_down(struct cy8ctma140 *ts) +{ + regulator_bulk_disable(ARRAY_SIZE(ts->regulators), + ts->regulators); +} + +/* Called from the registered devm action */ +static void cy8ctma140_power_off_action(void *d) +{ + struct cy8ctma140 *ts = d; + + cy8ctma140_power_down(ts); +} + +static int cy8ctma140_probe(struct i2c_client *client, + const struct i2c_device_id *id) +{ + struct cy8ctma140 *ts; + struct input_dev *input; + struct device *dev = &client->dev; + int error; + + ts = devm_kzalloc(dev, sizeof(*ts), GFP_KERNEL); + if (!ts) + return -ENOMEM; + + input = devm_input_allocate_device(dev); + if (!input) + return -ENOMEM; + + ts->dev = dev; + ts->client = client; + ts->input = input; + + input_set_capability(input, EV_ABS, ABS_MT_POSITION_X); + input_set_capability(input, EV_ABS, ABS_MT_POSITION_Y); + /* One byte for width 0..255 so this is the limit */ + input_set_abs_params(input, ABS_MT_TOUCH_MAJOR, 0, 255, 0, 0); + /* + * This sets up event max/min capabilities and fuzz. + * Some DT properties are compulsory so we do not need + * to provide defaults for X/Y max or pressure max. + * + * We just initialize a very simple MT touchscreen here, + * some devices use the capability of this touchscreen to + * provide touchkeys, and in that case this needs to be + * extended to handle touchkey input. + * + * The firmware takes care of finger tracking and dropping + * invalid ranges. + */ + touchscreen_parse_properties(input, true, &ts->props); + input_abs_set_fuzz(input, ABS_MT_POSITION_X, 0); + input_abs_set_fuzz(input, ABS_MT_POSITION_Y, 0); + + error = input_mt_init_slots(input, CY8CTMA140_MAX_FINGERS, + INPUT_MT_DIRECT | INPUT_MT_DROP_UNUSED); + if (error) + return error; + + input->name = CY8CTMA140_NAME; + input->id.bustype = BUS_I2C; + input_set_drvdata(input, ts); + + /* + * VCPIN is the analog voltage supply + * VDD is the digital voltage supply + * since the voltage range of VDD overlaps that of VCPIN, + * many designs to just supply both with a single voltage + * source of ~3.3 V. + */ + ts->regulators[0].supply = "vcpin"; + ts->regulators[1].supply = "vdd"; + error = devm_regulator_bulk_get(dev, ARRAY_SIZE(ts->regulators), + ts->regulators); + if (error) { + if (error != -EPROBE_DEFER) + dev_err(dev, "Failed to get regulators %d\n", + error); + return error; + } + + error = cy8ctma140_power_up(ts); + if (error) + return error; + + error = devm_add_action_or_reset(dev, cy8ctma140_power_off_action, ts); + if (error) { + dev_err(dev, "failed to install power off handler\n"); + return error; + } + + error = devm_request_threaded_irq(dev, client->irq, + NULL, cy8ctma140_irq_thread, + IRQF_ONESHOT, CY8CTMA140_NAME, ts); + if (error) { + dev_err(dev, "irq %d busy? error %d\n", client->irq, error); + return error; + } + + error = cy8ctma140_init(ts); + if (error) + return error; + + error = input_register_device(input); + if (error) + return error; + + i2c_set_clientdata(client, ts); + + return 0; +} + +static int __maybe_unused cy8ctma140_suspend(struct device *dev) +{ + struct i2c_client *client = to_i2c_client(dev); + struct cy8ctma140 *ts = i2c_get_clientdata(client); + + if (!device_may_wakeup(&client->dev)) + cy8ctma140_power_down(ts); + + return 0; +} + +static int __maybe_unused cy8ctma140_resume(struct device *dev) +{ + struct i2c_client *client = to_i2c_client(dev); + struct cy8ctma140 *ts = i2c_get_clientdata(client); + int error; + + if (!device_may_wakeup(&client->dev)) { + error = cy8ctma140_power_up(ts); + if (error) + return error; + } + + return 0; +} + +static SIMPLE_DEV_PM_OPS(cy8ctma140_pm, cy8ctma140_suspend, cy8ctma140_resume); + +static const struct i2c_device_id cy8ctma140_idtable[] = { + { CY8CTMA140_NAME, 0 }, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(i2c, cy8ctma140_idtable); + +static const struct of_device_id cy8ctma140_of_match[] = { + { .compatible = "cypress,cy8ctma140", }, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(of, cy8ctma140_of_match); + +static struct i2c_driver cy8ctma140_driver = { + .driver = { + .name = CY8CTMA140_NAME, + .pm = &cy8ctma140_pm, + .of_match_table = cy8ctma140_of_match, + }, + .id_table = cy8ctma140_idtable, + .probe = cy8ctma140_probe, +}; +module_i2c_driver(cy8ctma140_driver); + +MODULE_AUTHOR("Linus Walleij "); +MODULE_DESCRIPTION("CY8CTMA140 TouchScreen Driver"); +MODULE_LICENSE("GPL v2"); -- GitLab From c32ea07a30630ace950e07ffe7a18bdcc25898e1 Mon Sep 17 00:00:00 2001 From: Dmitry Osipenko Date: Sun, 29 Mar 2020 18:15:44 +0200 Subject: [PATCH 0336/1971] power: supply: smb347-charger: IRQSTAT_D is volatile Fix failure when USB cable is connected: smb347 2-006a: reading IRQSTAT_D failed Fixes: 1502cfe19bac ("smb347-charger: Fix battery status reporting logic for charger faults") Tested-by: David Heidelberg Signed-off-by: Dmitry Osipenko Signed-off-by: David Heidelberg Signed-off-by: Sebastian Reichel --- drivers/power/supply/smb347-charger.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/power/supply/smb347-charger.c b/drivers/power/supply/smb347-charger.c index c1d124b8be0c2..d102921b3ab2b 100644 --- a/drivers/power/supply/smb347-charger.c +++ b/drivers/power/supply/smb347-charger.c @@ -1138,6 +1138,7 @@ static bool smb347_volatile_reg(struct device *dev, unsigned int reg) switch (reg) { case IRQSTAT_A: case IRQSTAT_C: + case IRQSTAT_D: case IRQSTAT_E: case IRQSTAT_F: case STAT_A: -- GitLab From fa7cc725a343e305bef78e8d65be48f37d3f7720 Mon Sep 17 00:00:00 2001 From: David Heidelberg Date: Sun, 29 Mar 2020 18:15:45 +0200 Subject: [PATCH 0337/1971] power: supply: smb347-charger: Add delay before getting IRQSTAT This delay-fix is picked up from downstream driver, we measured that 25 - 35 ms delay ensure that we get required data. Tested on SMB347 on Nexus 7 2012. Otherwise IRQSTAT_E fails to provide correct information. Signed-off-by: Dmitry Osipenko Signed-off-by: David Heidelberg Signed-off-by: Sebastian Reichel --- drivers/power/supply/smb347-charger.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/drivers/power/supply/smb347-charger.c b/drivers/power/supply/smb347-charger.c index d102921b3ab2b..f99026d81f2a5 100644 --- a/drivers/power/supply/smb347-charger.c +++ b/drivers/power/supply/smb347-charger.c @@ -8,6 +8,7 @@ * Mika Westerberg */ +#include #include #include #include @@ -708,6 +709,9 @@ static irqreturn_t smb347_interrupt(int irq, void *data) bool handled = false; int ret; + /* SMB347 it needs at least 20ms for setting IRQSTAT_E_*IN_UV_IRQ */ + usleep_range(25000, 35000); + ret = regmap_read(smb->regmap, STAT_C, &stat_c); if (ret < 0) { dev_warn(smb->dev, "reading STAT_C failed\n"); -- GitLab From 29e9eff40f5edc2e5de63b28e700e82ed2b6b95c Mon Sep 17 00:00:00 2001 From: Lubomir Rintel Date: Sat, 21 Dec 2019 08:17:51 +0100 Subject: [PATCH 0338/1971] power: supply: olpc_battery: fix the power supply name The framework is unhappy about them, because it uses the names in sysfs attributes: power_supply olpc-ac: hwmon: 'olpc-ac' is not a valid name attribute, please fix power_supply olpc-battery: hwmon: 'olpc-battery' is not a valid name attribute, please fix See also commit 648cd48c9e56 ("hwmon: Do not accept invalid name attributes") and commit 74d3b6419772 ("hwmon: Relax name attribute validation for new APIs"). Signed-off-by: Lubomir Rintel Signed-off-by: Sebastian Reichel --- arch/x86/platform/olpc/olpc-xo1-sci.c | 4 ++-- arch/x86/platform/olpc/olpc-xo15-sci.c | 4 ++-- drivers/platform/olpc/olpc-xo175-ec.c | 4 ++-- drivers/power/supply/olpc_battery.c | 4 ++-- 4 files changed, 8 insertions(+), 8 deletions(-) diff --git a/arch/x86/platform/olpc/olpc-xo1-sci.c b/arch/x86/platform/olpc/olpc-xo1-sci.c index 933dd4fe3a974..f03a6883dcc6d 100644 --- a/arch/x86/platform/olpc/olpc-xo1-sci.c +++ b/arch/x86/platform/olpc/olpc-xo1-sci.c @@ -52,7 +52,7 @@ static const char * const lid_wake_mode_names[] = { static void battery_status_changed(void) { - struct power_supply *psy = power_supply_get_by_name("olpc-battery"); + struct power_supply *psy = power_supply_get_by_name("olpc_battery"); if (psy) { power_supply_changed(psy); @@ -62,7 +62,7 @@ static void battery_status_changed(void) static void ac_status_changed(void) { - struct power_supply *psy = power_supply_get_by_name("olpc-ac"); + struct power_supply *psy = power_supply_get_by_name("olpc_ac"); if (psy) { power_supply_changed(psy); diff --git a/arch/x86/platform/olpc/olpc-xo15-sci.c b/arch/x86/platform/olpc/olpc-xo15-sci.c index 089413cd944e7..85f4638764d69 100644 --- a/arch/x86/platform/olpc/olpc-xo15-sci.c +++ b/arch/x86/platform/olpc/olpc-xo15-sci.c @@ -75,7 +75,7 @@ static struct kobj_attribute lid_wake_on_close_attr = static void battery_status_changed(void) { - struct power_supply *psy = power_supply_get_by_name("olpc-battery"); + struct power_supply *psy = power_supply_get_by_name("olpc_battery"); if (psy) { power_supply_changed(psy); @@ -85,7 +85,7 @@ static void battery_status_changed(void) static void ac_status_changed(void) { - struct power_supply *psy = power_supply_get_by_name("olpc-ac"); + struct power_supply *psy = power_supply_get_by_name("olpc_ac"); if (psy) { power_supply_changed(psy); diff --git a/drivers/platform/olpc/olpc-xo175-ec.c b/drivers/platform/olpc/olpc-xo175-ec.c index 83ed1fbf73cfd..5e1d14e35f20b 100644 --- a/drivers/platform/olpc/olpc-xo175-ec.c +++ b/drivers/platform/olpc/olpc-xo175-ec.c @@ -410,7 +410,7 @@ static void olpc_xo175_ec_complete(void *arg) dev_dbg(dev, "got event %.2x\n", byte); switch (byte) { case EVENT_AC_CHANGE: - psy = power_supply_get_by_name("olpc-ac"); + psy = power_supply_get_by_name("olpc_ac"); if (psy) { power_supply_changed(psy); power_supply_put(psy); @@ -420,7 +420,7 @@ static void olpc_xo175_ec_complete(void *arg) case EVENT_BATTERY_CRITICAL: case EVENT_BATTERY_SOC_CHANGE: case EVENT_BATTERY_ERROR: - psy = power_supply_get_by_name("olpc-battery"); + psy = power_supply_get_by_name("olpc_battery"); if (psy) { power_supply_changed(psy); power_supply_put(psy); diff --git a/drivers/power/supply/olpc_battery.c b/drivers/power/supply/olpc_battery.c index ad0e9e0edb3f8..e0476ec06601d 100644 --- a/drivers/power/supply/olpc_battery.c +++ b/drivers/power/supply/olpc_battery.c @@ -88,7 +88,7 @@ static enum power_supply_property olpc_ac_props[] = { }; static const struct power_supply_desc olpc_ac_desc = { - .name = "olpc-ac", + .name = "olpc_ac", .type = POWER_SUPPLY_TYPE_MAINS, .properties = olpc_ac_props, .num_properties = ARRAY_SIZE(olpc_ac_props), @@ -605,7 +605,7 @@ static const struct attribute_group *olpc_bat_sysfs_groups[] = { *********************************************************************/ static struct power_supply_desc olpc_bat_desc = { - .name = "olpc-battery", + .name = "olpc_battery", .get_property = olpc_bat_get_property, .use_for_apm = 1, }; -- GitLab From 7c26e6ef96c95105d6b1dc828902067df9b0f076 Mon Sep 17 00:00:00 2001 From: Fenghua Yu Date: Fri, 20 Dec 2019 16:05:55 -0800 Subject: [PATCH 0339/1971] mtd: rawnand: fsmc: Change to non-atomic bit operations No need to use expensive atomic change_bit() on dat[] and err_idx[]: 1. fsmc_bch8_correct_data() is called while mutex chip->lock is held 2. err_idx[] is a local variable. To avoid big endian concern due to type cast to unsigned long, directly change the bit in the specified byte instead of using non-atomic __change_bit(). Suggested-by: Peter Zijlstra Signed-off-by: Fenghua Yu Reviewed-by: Tony Luck Signed-off-by: Miquel Raynal Link: https://lore.kernel.org/linux-mtd/1576886755-9788-1-git-send-email-fenghua.yu@intel.com --- drivers/mtd/nand/raw/fsmc_nand.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/drivers/mtd/nand/raw/fsmc_nand.c b/drivers/mtd/nand/raw/fsmc_nand.c index a6964feeec77d..7e28311dffcb1 100644 --- a/drivers/mtd/nand/raw/fsmc_nand.c +++ b/drivers/mtd/nand/raw/fsmc_nand.c @@ -809,11 +809,12 @@ static int fsmc_bch8_correct_data(struct nand_chip *chip, u8 *dat, i = 0; while (num_err--) { - change_bit(0, (unsigned long *)&err_idx[i]); - change_bit(1, (unsigned long *)&err_idx[i]); + err_idx[i] ^= 3; if (err_idx[i] < chip->ecc.size * 8) { - change_bit(err_idx[i], (unsigned long *)dat); + int err = err_idx[i]; + + dat[err >> 3] ^= BIT(err & 7); i++; } } -- GitLab From 73ab61552e2ffb1b794b70545d04c1d2b60e89e7 Mon Sep 17 00:00:00 2001 From: YueHaibing Date: Fri, 10 Apr 2020 19:51:21 +0800 Subject: [PATCH 0340/1971] mtd: rawnand: ingenic: Make qi_lb60_ooblayout_ops static Fix sparse warning: drivers/mtd/nand/raw/ingenic/ingenic_nand_drv.c:105:32: warning: symbol 'qi_lb60_ooblayout_ops' was not declared. Should it be static? Reported-by: Hulk Robot Signed-off-by: YueHaibing Acked-by: Paul Cercueil Signed-off-by: Miquel Raynal Link: https://lore.kernel.org/linux-mtd/20200410115121.11852-1-yuehaibing@huawei.com --- drivers/mtd/nand/raw/ingenic/ingenic_nand_drv.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/mtd/nand/raw/ingenic/ingenic_nand_drv.c b/drivers/mtd/nand/raw/ingenic/ingenic_nand_drv.c index 935c4902ada7b..e7bd845fdbf5f 100644 --- a/drivers/mtd/nand/raw/ingenic/ingenic_nand_drv.c +++ b/drivers/mtd/nand/raw/ingenic/ingenic_nand_drv.c @@ -102,7 +102,7 @@ static int qi_lb60_ooblayout_free(struct mtd_info *mtd, int section, return 0; } -const struct mtd_ooblayout_ops qi_lb60_ooblayout_ops = { +static const struct mtd_ooblayout_ops qi_lb60_ooblayout_ops = { .ecc = qi_lb60_ooblayout_ecc, .free = qi_lb60_ooblayout_free, }; -- GitLab From fb0f6f331e27bc9d0e53f4e5f0cfe61c50e40cf5 Mon Sep 17 00:00:00 2001 From: YueHaibing Date: Fri, 10 Apr 2020 19:52:28 +0800 Subject: [PATCH 0341/1971] mtd: rawnand: cadence: Make cadence_nand_attach_chip static Fix sparse warning: drivers/mtd/nand/raw/cadence-nand-controller.c:2595:5: warning: symbol 'cadence_nand_attach_chip' was not declared. Should it be static? Reported-by: Hulk Robot Signed-off-by: YueHaibing Signed-off-by: Miquel Raynal Link: https://lore.kernel.org/linux-mtd/20200410115228.30440-1-yuehaibing@huawei.com --- drivers/mtd/nand/raw/cadence-nand-controller.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/mtd/nand/raw/cadence-nand-controller.c b/drivers/mtd/nand/raw/cadence-nand-controller.c index efddc5c68afb2..acc0a24e58166 100644 --- a/drivers/mtd/nand/raw/cadence-nand-controller.c +++ b/drivers/mtd/nand/raw/cadence-nand-controller.c @@ -2592,7 +2592,7 @@ cadence_nand_setup_data_interface(struct nand_chip *chip, int chipnr, return 0; } -int cadence_nand_attach_chip(struct nand_chip *chip) +static int cadence_nand_attach_chip(struct nand_chip *chip) { struct cdns_nand_ctrl *cdns_ctrl = to_cdns_nand_ctrl(chip->controller); struct cdns_nand_chip *cdns_chip = to_cdns_nand_chip(chip); -- GitLab From 4ba246d7a365b8049892d852ab00d8bd707dc0e6 Mon Sep 17 00:00:00 2001 From: YueHaibing Date: Fri, 17 Apr 2020 18:11:29 +0800 Subject: [PATCH 0342/1971] mtd: rawnand: brcmnand: Remove unused including Remove including that don't need it. Signed-off-by: YueHaibing Acked-by: Florian Fainelli Signed-off-by: Kamal Dasu Reviewed-by: Kamal Dasu Signed-off-by: Miquel Raynal Link: https://lore.kernel.org/linux-mtd/20200417101129.35556-1-yuehaibing@huawei.com --- drivers/mtd/nand/raw/brcmnand/brcmnand.c | 1 - 1 file changed, 1 deletion(-) diff --git a/drivers/mtd/nand/raw/brcmnand/brcmnand.c b/drivers/mtd/nand/raw/brcmnand/brcmnand.c index e4e3ceeac38f8..57076c3d98dc9 100644 --- a/drivers/mtd/nand/raw/brcmnand/brcmnand.c +++ b/drivers/mtd/nand/raw/brcmnand/brcmnand.c @@ -4,7 +4,6 @@ */ #include -#include #include #include #include -- GitLab From ce446b4b2d803cf62de2e74b1fbda8758e4835bd Mon Sep 17 00:00:00 2001 From: Boris Brezillon Date: Sat, 18 Apr 2020 21:42:17 +0200 Subject: [PATCH 0343/1971] mtd: rawnand: Take check_only into account ->exec_op() is passed a check_only argument that encodes when the controller should just check whether the operation is supported or not without executing it. Some controllers simply ignore this arguments, others don't but keep modifying some of the registers before returning. Let's fix all those drivers. Signed-off-by: Boris Brezillon Signed-off-by: Miquel Raynal Link: https://lore.kernel.org/linux-mtd/20200418194217.1016060-1-boris.brezillon@collabora.com --- drivers/mtd/nand/raw/cadence-nand-controller.c | 8 +++++--- drivers/mtd/nand/raw/fsmc_nand.c | 3 +++ drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c | 3 +++ drivers/mtd/nand/raw/marvell_nand.c | 3 ++- drivers/mtd/nand/raw/meson_nand.c | 3 +++ drivers/mtd/nand/raw/mxic_nand.c | 3 +++ drivers/mtd/nand/raw/nandsim.c | 3 +++ drivers/mtd/nand/raw/stm32_fmc2_nand.c | 6 +++--- drivers/mtd/nand/raw/sunxi_nand.c | 3 ++- drivers/mtd/nand/raw/tegra_nand.c | 4 +++- drivers/mtd/nand/raw/vf610_nfc.c | 4 +++- 11 files changed, 33 insertions(+), 10 deletions(-) diff --git a/drivers/mtd/nand/raw/cadence-nand-controller.c b/drivers/mtd/nand/raw/cadence-nand-controller.c index acc0a24e58166..e7abb15c72536 100644 --- a/drivers/mtd/nand/raw/cadence-nand-controller.c +++ b/drivers/mtd/nand/raw/cadence-nand-controller.c @@ -2223,10 +2223,12 @@ static int cadence_nand_exec_op(struct nand_chip *chip, const struct nand_operation *op, bool check_only) { - int status = cadence_nand_select_target(chip); + if (!check_only) { + int status = cadence_nand_select_target(chip); - if (status) - return status; + if (status) + return status; + } return nand_op_parser_exec_op(chip, &cadence_nand_op_parser, op, check_only); diff --git a/drivers/mtd/nand/raw/fsmc_nand.c b/drivers/mtd/nand/raw/fsmc_nand.c index 7e28311dffcb1..31dc9fd0a94d9 100644 --- a/drivers/mtd/nand/raw/fsmc_nand.c +++ b/drivers/mtd/nand/raw/fsmc_nand.c @@ -608,6 +608,9 @@ static int fsmc_exec_op(struct nand_chip *chip, const struct nand_operation *op, unsigned int op_id; int i; + if (check_only) + return 0; + pr_debug("Executing operation [%d instructions]:\n", op->ninstrs); for (op_id = 0; op_id < op->ninstrs; op_id++) { diff --git a/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c b/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c index 53b00c841aec7..cc4cb190968e5 100644 --- a/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c +++ b/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c @@ -2408,6 +2408,9 @@ static int gpmi_nfc_exec_op(struct nand_chip *chip, struct completion *completion; unsigned long to; + if (check_only) + return 0; + this->ntransfers = 0; for (i = 0; i < GPMI_MAX_TRANSFERS; i++) this->transfers[i].direction = DMA_NONE; diff --git a/drivers/mtd/nand/raw/marvell_nand.c b/drivers/mtd/nand/raw/marvell_nand.c index 179f0ca585f85..343001250ccca 100644 --- a/drivers/mtd/nand/raw/marvell_nand.c +++ b/drivers/mtd/nand/raw/marvell_nand.c @@ -2107,7 +2107,8 @@ static int marvell_nfc_exec_op(struct nand_chip *chip, { struct marvell_nfc *nfc = to_marvell_nfc(chip->controller); - marvell_nfc_select_target(chip, op->cs); + if (!check_only) + marvell_nfc_select_target(chip, op->cs); if (nfc->caps->is_nfcv2) return nand_op_parser_exec_op(chip, &marvell_nfcv2_op_parser, diff --git a/drivers/mtd/nand/raw/meson_nand.c b/drivers/mtd/nand/raw/meson_nand.c index f6fb5c0e6255c..e961f7bebf0a8 100644 --- a/drivers/mtd/nand/raw/meson_nand.c +++ b/drivers/mtd/nand/raw/meson_nand.c @@ -899,6 +899,9 @@ static int meson_nfc_exec_op(struct nand_chip *nand, u32 op_id, delay_idle, cmd; int i; + if (check_only) + return 0; + meson_nfc_select_chip(nand, op->cs); for (op_id = 0; op_id < op->ninstrs; op_id++) { instr = &op->instrs[op_id]; diff --git a/drivers/mtd/nand/raw/mxic_nand.c b/drivers/mtd/nand/raw/mxic_nand.c index ed7a4e021bf58..5a5a5b3b546db 100644 --- a/drivers/mtd/nand/raw/mxic_nand.c +++ b/drivers/mtd/nand/raw/mxic_nand.c @@ -393,6 +393,9 @@ static int mxic_nfc_exec_op(struct nand_chip *chip, int ret = 0; unsigned int op_id; + if (check_only) + return 0; + mxic_nfc_cs_enable(nfc); init_completion(&nfc->complete); for (op_id = 0; op_id < op->ninstrs; op_id++) { diff --git a/drivers/mtd/nand/raw/nandsim.c b/drivers/mtd/nand/raw/nandsim.c index 1de03bb34e840..23cda67a3f53a 100644 --- a/drivers/mtd/nand/raw/nandsim.c +++ b/drivers/mtd/nand/raw/nandsim.c @@ -2144,6 +2144,9 @@ static int ns_exec_op(struct nand_chip *chip, const struct nand_operation *op, const struct nand_op_instr *instr = NULL; struct nandsim *ns = nand_get_controller_data(chip); + if (check_only) + return 0; + ns->lines.ce = 1; for (op_id = 0; op_id < op->ninstrs; op_id++) { diff --git a/drivers/mtd/nand/raw/stm32_fmc2_nand.c b/drivers/mtd/nand/raw/stm32_fmc2_nand.c index b6d45cd911aed..46b7d04e2c874 100644 --- a/drivers/mtd/nand/raw/stm32_fmc2_nand.c +++ b/drivers/mtd/nand/raw/stm32_fmc2_nand.c @@ -1365,13 +1365,13 @@ static int stm32_fmc2_exec_op(struct nand_chip *chip, unsigned int op_id, i; int ret; + if (check_only) + return 0; + ret = stm32_fmc2_select_chip(chip, op->cs); if (ret) return ret; - if (check_only) - return ret; - for (op_id = 0; op_id < op->ninstrs; op_id++) { instr = &op->instrs[op_id]; diff --git a/drivers/mtd/nand/raw/sunxi_nand.c b/drivers/mtd/nand/raw/sunxi_nand.c index 5f3e40b79fb16..18ac0b36abfa4 100644 --- a/drivers/mtd/nand/raw/sunxi_nand.c +++ b/drivers/mtd/nand/raw/sunxi_nand.c @@ -1907,7 +1907,8 @@ static int sunxi_nfc_exec_op(struct nand_chip *nand, struct sunxi_nand_chip *sunxi_nand = to_sunxi_nand(nand); const struct nand_op_parser *parser; - sunxi_nfc_select_chip(nand, op->cs); + if (!check_only) + sunxi_nfc_select_chip(nand, op->cs); if (sunxi_nand->sels[op->cs].rb >= 0) parser = &sunxi_nfc_op_parser; diff --git a/drivers/mtd/nand/raw/tegra_nand.c b/drivers/mtd/nand/raw/tegra_nand.c index 3cc9a4c41443a..6a255ba0f2881 100644 --- a/drivers/mtd/nand/raw/tegra_nand.c +++ b/drivers/mtd/nand/raw/tegra_nand.c @@ -467,7 +467,9 @@ static int tegra_nand_exec_op(struct nand_chip *chip, const struct nand_operation *op, bool check_only) { - tegra_nand_select_target(chip, op->cs); + if (!check_only) + tegra_nand_select_target(chip, op->cs); + return nand_op_parser_exec_op(chip, &tegra_nand_op_parser, op, check_only); } diff --git a/drivers/mtd/nand/raw/vf610_nfc.c b/drivers/mtd/nand/raw/vf610_nfc.c index 6b399a75f9aec..bd9e16de78a29 100644 --- a/drivers/mtd/nand/raw/vf610_nfc.c +++ b/drivers/mtd/nand/raw/vf610_nfc.c @@ -502,7 +502,9 @@ static int vf610_nfc_exec_op(struct nand_chip *chip, const struct nand_operation *op, bool check_only) { - vf610_nfc_select_target(chip, op->cs); + if (!check_only) + vf610_nfc_select_target(chip, op->cs); + return nand_op_parser_exec_op(chip, &vf610_nfc_op_parser, op, check_only); } -- GitLab From 5756f2e8dad46eba6e2d3e530243b8eff4dd5a42 Mon Sep 17 00:00:00 2001 From: Masahiro Yamada Date: Tue, 17 Mar 2020 16:18:21 +0900 Subject: [PATCH 0344/1971] mtd: rawnand: denali: add more delays before latching incoming data The Denali IP have several registers to specify how many clock cycles should be waited between falling/rising signals. You can improve the NAND access performance by programming these registers with optimized values. Because struct nand_sdr_timings represents the device requirement in pico seconds, denali_setup_data_interface() computes the register values by dividing the device timings with the clock period. Marek Vasut reported this driver in the latest kernel does not work on his SOCFPGA board. (The on-board NAND chip is mode 5) The suspicious parameter is acc_clks, so this commit relaxes it. The Denali NAND Flash Memory Controller User's Guide describes this register as follows: acc_clks signifies the number of bus interface clk_x clock cycles, controller should wait from read enable going low to sending out a strobe of clk_x for capturing of incoming data. Currently, acc_clks is calculated only based on tREA, the delay on the chip side. This does not include additional delays that come from the data path on the PCB and in the SoC, load capacity of the pins, etc. This relatively becomes a big factor on faster timing modes like mode 5. Before supporting the ->setup_data_interface() hook (e.g. Linux 4.12), the Denali driver hacks acc_clks in a couple of ways [1] [2] to support the timing mode 5. We would not go back to the hard-coded acc_clks, but we need to include this factor into the delay somehow. Let's say the amount of the additional delay is 10000 pico sec. In the new calculation, acc_clks is determined by timings->tREA_max + data_setup_on_host. Also, prolong the RE# low period to make sure the data hold is met. Finally, re-center the data latch timing for extra safety. [1] https://github.com/torvalds/linux/blob/v4.12/drivers/mtd/nand/denali.c#L276 [2] https://github.com/torvalds/linux/blob/v4.12/drivers/mtd/nand/denali.c#L282 Reported-by: Marek Vasut Signed-off-by: Masahiro Yamada Tested-by: Marek Vasut Signed-off-by: Miquel Raynal Link: https://lore.kernel.org/linux-mtd/20200317071821.9916-1-yamada.masahiro@socionext.com --- drivers/mtd/nand/raw/denali.c | 45 ++++++++++++++++++++++++++--------- 1 file changed, 34 insertions(+), 11 deletions(-) diff --git a/drivers/mtd/nand/raw/denali.c b/drivers/mtd/nand/raw/denali.c index 6a6c919b25698..2fcd2baf6e352 100644 --- a/drivers/mtd/nand/raw/denali.c +++ b/drivers/mtd/nand/raw/denali.c @@ -764,6 +764,7 @@ static int denali_write_page(struct nand_chip *chip, const u8 *buf, static int denali_setup_data_interface(struct nand_chip *chip, int chipnr, const struct nand_data_interface *conf) { + static const unsigned int data_setup_on_host = 10000; struct denali_controller *denali = to_denali_controller(chip); struct denali_chip_sel *sel; const struct nand_sdr_timings *timings; @@ -796,15 +797,6 @@ static int denali_setup_data_interface(struct nand_chip *chip, int chipnr, sel = &to_denali_chip(chip)->sels[chipnr]; - /* tREA -> ACC_CLKS */ - acc_clks = DIV_ROUND_UP(timings->tREA_max, t_x); - acc_clks = min_t(int, acc_clks, ACC_CLKS__VALUE); - - tmp = ioread32(denali->reg + ACC_CLKS); - tmp &= ~ACC_CLKS__VALUE; - tmp |= FIELD_PREP(ACC_CLKS__VALUE, acc_clks); - sel->acc_clks = tmp; - /* tRWH -> RE_2_WE */ re_2_we = DIV_ROUND_UP(timings->tRHW_min, t_x); re_2_we = min_t(int, re_2_we, RE_2_WE__VALUE); @@ -862,14 +854,45 @@ static int denali_setup_data_interface(struct nand_chip *chip, int chipnr, tmp |= FIELD_PREP(RDWR_EN_HI_CNT__VALUE, rdwr_en_hi); sel->rdwr_en_hi_cnt = tmp; - /* tRP, tWP -> RDWR_EN_LO_CNT */ + /* + * tREA -> ACC_CLKS + * tRP, tWP, tRHOH, tRC, tWC -> RDWR_EN_LO_CNT + */ + + /* + * Determine the minimum of acc_clks to meet the setup timing when + * capturing the incoming data. + * + * The delay on the chip side is well-defined as tREA, but we need to + * take additional delay into account. This includes a certain degree + * of unknowledge, such as signal propagation delays on the PCB and + * in the SoC, load capacity of the I/O pins, etc. + */ + acc_clks = DIV_ROUND_UP(timings->tREA_max + data_setup_on_host, t_x); + + /* Determine the minimum of rdwr_en_lo_cnt from RE#/WE# pulse width */ rdwr_en_lo = DIV_ROUND_UP(max(timings->tRP_min, timings->tWP_min), t_x); + + /* Extend rdwr_en_lo to meet the data hold timing */ + rdwr_en_lo = max_t(int, rdwr_en_lo, + acc_clks - timings->tRHOH_min / t_x); + + /* Extend rdwr_en_lo to meet the requirement for RE#/WE# cycle time */ rdwr_en_lo_hi = DIV_ROUND_UP(max(timings->tRC_min, timings->tWC_min), t_x); - rdwr_en_lo_hi = max_t(int, rdwr_en_lo_hi, mult_x); rdwr_en_lo = max(rdwr_en_lo, rdwr_en_lo_hi - rdwr_en_hi); rdwr_en_lo = min_t(int, rdwr_en_lo, RDWR_EN_LO_CNT__VALUE); + /* Center the data latch timing for extra safety */ + acc_clks = (acc_clks + rdwr_en_lo + + DIV_ROUND_UP(timings->tRHOH_min, t_x)) / 2; + acc_clks = min_t(int, acc_clks, ACC_CLKS__VALUE); + + tmp = ioread32(denali->reg + ACC_CLKS); + tmp &= ~ACC_CLKS__VALUE; + tmp |= FIELD_PREP(ACC_CLKS__VALUE, acc_clks); + sel->acc_clks = tmp; + tmp = ioread32(denali->reg + RDWR_EN_LO_CNT); tmp &= ~RDWR_EN_LO_CNT__VALUE; tmp |= FIELD_PREP(RDWR_EN_LO_CNT__VALUE, rdwr_en_lo); -- GitLab From 15770370df4d51bd5ca408f0b8c46c90098d3300 Mon Sep 17 00:00:00 2001 From: Boris Brezillon Date: Sun, 19 Apr 2020 21:30:34 +0200 Subject: [PATCH 0345/1971] mtd: rawnand: au1550nd: Stop using IO_ADDR_{R, W} in au_{read, write}_buf[16]() We are about to re-use those for the exec_op() implementation which will not rely on au1550_hwcontrol(). Let's patch those helpers to simply use the iomem address stored in the context. Signed-off-by: Boris Brezillon Reviewed-by: Miquel Raynal Signed-off-by: Miquel Raynal Link: https://lore.kernel.org/linux-mtd/20200419193037.1544035-2-boris.brezillon@collabora.com --- drivers/mtd/nand/raw/au1550nd.c | 17 +++++++++++++---- 1 file changed, 13 insertions(+), 4 deletions(-) diff --git a/drivers/mtd/nand/raw/au1550nd.c b/drivers/mtd/nand/raw/au1550nd.c index 75eb3e97fae34..2f8004f203499 100644 --- a/drivers/mtd/nand/raw/au1550nd.c +++ b/drivers/mtd/nand/raw/au1550nd.c @@ -23,6 +23,11 @@ struct au1550nd_ctx { void (*write_byte)(struct nand_chip *, u_char); }; +static struct au1550nd_ctx *chip_to_au_ctx(struct nand_chip *this) +{ + return container_of(this, struct au1550nd_ctx, chip); +} + /** * au_read_byte - read one byte from the chip * @this: NAND chip object @@ -85,10 +90,11 @@ static void au_write_byte16(struct nand_chip *this, u_char byte) */ static void au_write_buf(struct nand_chip *this, const u_char *buf, int len) { + struct au1550nd_ctx *ctx = chip_to_au_ctx(this); int i; for (i = 0; i < len; i++) { - writeb(buf[i], this->legacy.IO_ADDR_W); + writeb(buf[i], ctx->base + MEM_STNAND_DATA); wmb(); /* drain writebuffer */ } } @@ -103,10 +109,11 @@ static void au_write_buf(struct nand_chip *this, const u_char *buf, int len) */ static void au_read_buf(struct nand_chip *this, u_char *buf, int len) { + struct au1550nd_ctx *ctx = chip_to_au_ctx(this); int i; for (i = 0; i < len; i++) { - buf[i] = readb(this->legacy.IO_ADDR_R); + buf[i] = readb(ctx->base + MEM_STNAND_DATA); wmb(); /* drain writebuffer */ } } @@ -121,12 +128,13 @@ static void au_read_buf(struct nand_chip *this, u_char *buf, int len) */ static void au_write_buf16(struct nand_chip *this, const u_char *buf, int len) { + struct au1550nd_ctx *ctx = chip_to_au_ctx(this); int i; u16 *p = (u16 *) buf; len >>= 1; for (i = 0; i < len; i++) { - writew(p[i], this->legacy.IO_ADDR_W); + writew(p[i], ctx->base + MEM_STNAND_DATA); wmb(); /* drain writebuffer */ } @@ -142,12 +150,13 @@ static void au_write_buf16(struct nand_chip *this, const u_char *buf, int len) */ static void au_read_buf16(struct nand_chip *this, u_char *buf, int len) { + struct au1550nd_ctx *ctx = chip_to_au_ctx(this); int i; u16 *p = (u16 *) buf; len >>= 1; for (i = 0; i < len; i++) { - p[i] = readw(this->legacy.IO_ADDR_R); + p[i] = readw(ctx->base + MEM_STNAND_DATA); wmb(); /* drain writebuffer */ } } -- GitLab From a67537ef37d8d268adee17d7bab30ea5b8d76f52 Mon Sep 17 00:00:00 2001 From: Boris Brezillon Date: Sun, 19 Apr 2020 21:30:35 +0200 Subject: [PATCH 0346/1971] mtd: rawnand: au1550nd: Implement exec_op() So we can later get rid of the legacy interface implementation. Signed-off-by: Boris Brezillon Reviewed-by: Miquel Raynal Signed-off-by: Miquel Raynal Link: https://lore.kernel.org/linux-mtd/20200419193037.1544035-3-boris.brezillon@collabora.com --- drivers/mtd/nand/raw/au1550nd.c | 110 ++++++++++++++++++++++++++++++++ 1 file changed, 110 insertions(+) diff --git a/drivers/mtd/nand/raw/au1550nd.c b/drivers/mtd/nand/raw/au1550nd.c index 2f8004f203499..79bf9fbeeb222 100644 --- a/drivers/mtd/nand/raw/au1550nd.c +++ b/drivers/mtd/nand/raw/au1550nd.c @@ -16,6 +16,7 @@ struct au1550nd_ctx { + struct nand_controller controller; struct nand_chip chip; int cs; @@ -382,6 +383,112 @@ static int find_nand_cs(unsigned long nand_base) return -ENODEV; } +static int au1550nd_waitrdy(struct nand_chip *this, unsigned int timeout_ms) +{ + unsigned long timeout_jiffies = jiffies; + + timeout_jiffies += msecs_to_jiffies(timeout_ms) + 1; + do { + if (alchemy_rdsmem(AU1000_MEM_STSTAT) & 0x1) + return 0; + + usleep_range(10, 100); + } while (time_before(jiffies, timeout_jiffies)); + + return -ETIMEDOUT; +} + +static int au1550nd_exec_instr(struct nand_chip *this, + const struct nand_op_instr *instr) +{ + struct au1550nd_ctx *ctx = chip_to_au_ctx(this); + unsigned int i; + int ret = 0; + + switch (instr->type) { + case NAND_OP_CMD_INSTR: + writeb(instr->ctx.cmd.opcode, + ctx->base + MEM_STNAND_CMD); + /* Drain the writebuffer */ + wmb(); + break; + + case NAND_OP_ADDR_INSTR: + for (i = 0; i < instr->ctx.addr.naddrs; i++) { + writeb(instr->ctx.addr.addrs[i], + ctx->base + MEM_STNAND_ADDR); + /* Drain the writebuffer */ + wmb(); + } + break; + + case NAND_OP_DATA_IN_INSTR: + if ((this->options & NAND_BUSWIDTH_16) && + !instr->ctx.data.force_8bit) + au_read_buf16(this, instr->ctx.data.buf.in, + instr->ctx.data.len); + else + au_read_buf(this, instr->ctx.data.buf.in, + instr->ctx.data.len); + break; + + case NAND_OP_DATA_OUT_INSTR: + if ((this->options & NAND_BUSWIDTH_16) && + !instr->ctx.data.force_8bit) + au_write_buf16(this, instr->ctx.data.buf.out, + instr->ctx.data.len); + else + au_write_buf(this, instr->ctx.data.buf.out, + instr->ctx.data.len); + break; + + case NAND_OP_WAITRDY_INSTR: + ret = au1550nd_waitrdy(this, instr->ctx.waitrdy.timeout_ms); + break; + default: + return -EINVAL; + } + + if (instr->delay_ns) + ndelay(instr->delay_ns); + + return ret; +} + +static int au1550nd_exec_op(struct nand_chip *this, + const struct nand_operation *op, + bool check_only) +{ + struct au1550nd_ctx *ctx = chip_to_au_ctx(this); + unsigned int i; + int ret; + + if (check_only) + return 0; + + /* assert (force assert) chip enable */ + alchemy_wrsmem((1 << (4 + ctx->cs)), AU1000_MEM_STNDCTL); + /* Drain the writebuffer */ + wmb(); + + for (i = 0; i < op->ninstrs; i++) { + ret = au1550nd_exec_instr(this, &op->instrs[i]); + if (ret) + break; + } + + /* deassert chip enable */ + alchemy_wrsmem(0, AU1000_MEM_STNDCTL); + /* Drain the writebuffer */ + wmb(); + + return ret; +} + +static const struct nand_controller_ops au1550nd_ops = { + .exec_op = au1550nd_exec_op, +}; + static int au1550nd_probe(struct platform_device *pdev) { struct au1550nd_platdata *pd; @@ -439,6 +546,9 @@ static int au1550nd_probe(struct platform_device *pdev) /* 30 us command delay time */ this->legacy.chip_delay = 30; + nand_controller_init(&ctx->controller); + ctx->controller.ops = &au1550nd_ops; + this->controller = &ctx->controller; this->ecc.mode = NAND_ECC_SOFT; this->ecc.algo = NAND_ECC_HAMMING; -- GitLab From 0383024f811aa469df258039807810fc3793a105 Mon Sep 17 00:00:00 2001 From: Jonathan Bakker Date: Mon, 4 May 2020 15:12:58 -0700 Subject: [PATCH 0347/1971] power: supply: max17040: Correct voltage reading According to the datasheet available at (1), the bottom four bits are always zero and the actual voltage is 1.25x this value in mV. Since the kernel API specifies that voltages should be in uV, it should report 1250x the shifted value. 1) https://datasheets.maximintegrated.com/en/ds/MAX17040-MAX17041.pdf Signed-off-by: Jonathan Bakker Signed-off-by: Sebastian Reichel --- drivers/power/supply/max17040_battery.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/power/supply/max17040_battery.c b/drivers/power/supply/max17040_battery.c index 8a1f0ee493aac..48aa44665e2f1 100644 --- a/drivers/power/supply/max17040_battery.c +++ b/drivers/power/supply/max17040_battery.c @@ -126,7 +126,7 @@ static void max17040_get_vcell(struct i2c_client *client) vcell = max17040_read_reg(client, MAX17040_VCELL); - chip->vcell = vcell; + chip->vcell = (vcell >> 4) * 1250; } static void max17040_get_soc(struct i2c_client *client) -- GitLab From 1e4724d0b7d1db3b7307fc95fb902a97f4a7b85a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Micha=C5=82=20Miros=C5=82aw?= Date: Mon, 4 May 2020 21:47:30 +0200 Subject: [PATCH 0348/1971] power: bq25890: use proper CURRENT_NOW property for I_BAT MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Charge Current is more apropriately reflected by CURRENT_NOW property (measured current) than CONSTANT_CURRENT_VOLTAGE (configured CC-phase current limit). Fix the reference and make the sign reflect direction of the current. Signed-off-by: Michał Mirosław Signed-off-by: Sebastian Reichel --- drivers/power/supply/bq25890_charger.c | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/drivers/power/supply/bq25890_charger.c b/drivers/power/supply/bq25890_charger.c index 20b9824ef5acd..08d31ed4bc278 100644 --- a/drivers/power/supply/bq25890_charger.c +++ b/drivers/power/supply/bq25890_charger.c @@ -427,15 +427,6 @@ static int bq25890_power_supply_get_property(struct power_supply *psy, val->intval = POWER_SUPPLY_HEALTH_UNSPEC_FAILURE; break; - case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT: - ret = bq25890_field_read(bq, F_ICHGR); /* read measured value */ - if (ret < 0) - return ret; - - /* converted_val = ADC_val * 50mA (table 10.3.19) */ - val->intval = ret * 50000; - break; - case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX: val->intval = bq25890_find_val(bq->init_data.ichg, TBL_ICHG); break; @@ -471,6 +462,15 @@ static int bq25890_power_supply_get_property(struct power_supply *psy, val->intval = 2304000 + ret * 20000; break; + case POWER_SUPPLY_PROP_CURRENT_NOW: + ret = bq25890_field_read(bq, F_ICHGR); /* read measured value */ + if (ret < 0) + return ret; + + /* converted_val = ADC_val * 50mA (table 10.3.19) */ + val->intval = ret * -50000; + break; + default: return -EINVAL; } @@ -645,12 +645,12 @@ static const enum power_supply_property bq25890_power_supply_props[] = { POWER_SUPPLY_PROP_STATUS, POWER_SUPPLY_PROP_ONLINE, POWER_SUPPLY_PROP_HEALTH, - POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT, POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX, POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE, POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE_MAX, POWER_SUPPLY_PROP_CHARGE_TERM_CURRENT, POWER_SUPPLY_PROP_VOLTAGE_NOW, + POWER_SUPPLY_PROP_CURRENT_NOW, }; static char *bq25890_charger_supplied_to[] = { -- GitLab From 21d90eda433f5d39930ca7133340225b8fb7940f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Micha=C5=82=20Miros=C5=82aw?= Date: Mon, 4 May 2020 21:47:45 +0200 Subject: [PATCH 0349/1971] power: bq25890: fix ADC mode configuration MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Datasheet describes two modes for reading ADC measurements: 1. continuous, 1 Hz - enabled and started by CONV_RATE bit 2. one-shot - triggered by CONV_START bit In continuous mode, CONV_START is read-only and signifies an ongoing conversion. Change the code to follow the datasheet and really disable continuous mode for power saving. Signed-off-by: Michał Mirosław Signed-off-by: Sebastian Reichel --- drivers/power/supply/bq25890_charger.c | 31 +++++++++++++++++++++----- 1 file changed, 26 insertions(+), 5 deletions(-) diff --git a/drivers/power/supply/bq25890_charger.c b/drivers/power/supply/bq25890_charger.c index 08d31ed4bc278..c9f7877ded953 100644 --- a/drivers/power/supply/bq25890_charger.c +++ b/drivers/power/supply/bq25890_charger.c @@ -126,6 +126,7 @@ static const struct regmap_access_table bq25890_writeable_regs = { static const struct regmap_range bq25890_volatile_reg_ranges[] = { regmap_reg_range(0x00, 0x00), + regmap_reg_range(0x02, 0x02), regmap_reg_range(0x09, 0x09), regmap_reg_range(0x0b, 0x14), }; @@ -374,18 +375,38 @@ enum bq25890_chrg_fault { CHRG_FAULT_TIMER_EXPIRED, }; +static bool bq25890_is_adc_property(enum power_supply_property psp) +{ + switch (psp) { + case POWER_SUPPLY_PROP_VOLTAGE_NOW: + case POWER_SUPPLY_PROP_CURRENT_NOW: + return true; + + default: + return false; + } +} + static int bq25890_power_supply_get_property(struct power_supply *psy, enum power_supply_property psp, union power_supply_propval *val) { - int ret; struct bq25890_device *bq = power_supply_get_drvdata(psy); struct bq25890_state state; + bool do_adc_conv; + int ret; mutex_lock(&bq->lock); state = bq->state; + do_adc_conv = !state.online && bq25890_is_adc_property(psp); + if (do_adc_conv) + bq25890_field_write(bq, F_CONV_START, 1); mutex_unlock(&bq->lock); + if (do_adc_conv) + regmap_field_read_poll_timeout(bq->rmap_fields[F_CONV_START], + ret, !ret, 25000, 1000000); + switch (psp) { case POWER_SUPPLY_PROP_STATUS: if (!state.online) @@ -623,8 +644,8 @@ static int bq25890_hw_init(struct bq25890_device *bq) } } - /* Configure ADC for continuous conversions. This does not enable it. */ - ret = bq25890_field_write(bq, F_CONV_RATE, 1); + /* Configure ADC for continuous conversions when charging */ + ret = bq25890_field_write(bq, F_CONV_RATE, !!bq->state.online); if (ret < 0) { dev_dbg(bq->dev, "Config ADC failed %d\n", ret); return ret; @@ -966,7 +987,7 @@ static int bq25890_suspend(struct device *dev) * If charger is removed, while in suspend, make sure ADC is diabled * since it consumes slightly more power. */ - return bq25890_field_write(bq, F_CONV_START, 0); + return bq25890_field_write(bq, F_CONV_RATE, 0); } static int bq25890_resume(struct device *dev) @@ -982,7 +1003,7 @@ static int bq25890_resume(struct device *dev) /* Re-enable ADC only if charger is plugged in. */ if (bq->state.online) { - ret = bq25890_field_write(bq, F_CONV_START, 1); + ret = bq25890_field_write(bq, F_CONV_RATE, 1); if (ret < 0) goto unlock; } -- GitLab From 3b4df57bef695a41a23feb1017263be5f5f8d51c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Micha=C5=82=20Miros=C5=82aw?= Date: Mon, 4 May 2020 21:47:45 +0200 Subject: [PATCH 0350/1971] power: bq25890: update state on property read MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Edge interrupts from the charger may be lost or stuck in fault mode since probe(). Check if something changed everytime userspace wants some data. Signed-off-by: Michał Mirosław Signed-off-by: Sebastian Reichel --- drivers/power/supply/bq25890_charger.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/drivers/power/supply/bq25890_charger.c b/drivers/power/supply/bq25890_charger.c index c9f7877ded953..be9de68a3fc2f 100644 --- a/drivers/power/supply/bq25890_charger.c +++ b/drivers/power/supply/bq25890_charger.c @@ -387,6 +387,8 @@ static bool bq25890_is_adc_property(enum power_supply_property psp) } } +static irqreturn_t __bq25890_handle_irq(struct bq25890_device *bq); + static int bq25890_power_supply_get_property(struct power_supply *psy, enum power_supply_property psp, union power_supply_propval *val) @@ -397,6 +399,8 @@ static int bq25890_power_supply_get_property(struct power_supply *psy, int ret; mutex_lock(&bq->lock); + /* update state in case we lost an interrupt */ + __bq25890_handle_irq(bq); state = bq->state; do_adc_conv = !state.online && bq25890_is_adc_property(psp); if (do_adc_conv) -- GitLab From b302a0ae7205a3ccda238cb057224b5115a6115c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Micha=C5=82=20Miros=C5=82aw?= Date: Mon, 4 May 2020 21:47:46 +0200 Subject: [PATCH 0351/1971] power: bq25890: implement CHARGE_TYPE property MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Report charging type based on recently read state. Signed-off-by: Michał Mirosław Signed-off-by: Sebastian Reichel --- drivers/power/supply/bq25890_charger.c | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/drivers/power/supply/bq25890_charger.c b/drivers/power/supply/bq25890_charger.c index be9de68a3fc2f..6d8e0e2bfd8a0 100644 --- a/drivers/power/supply/bq25890_charger.c +++ b/drivers/power/supply/bq25890_charger.c @@ -427,6 +427,18 @@ static int bq25890_power_supply_get_property(struct power_supply *psy, break; + case POWER_SUPPLY_PROP_CHARGE_TYPE: + if (!state.online || state.chrg_status == STATUS_NOT_CHARGING || + state.chrg_status == STATUS_TERMINATION_DONE) + val->intval = POWER_SUPPLY_CHARGE_TYPE_NONE; + else if (state.chrg_status == STATUS_PRE_CHARGING) + val->intval = POWER_SUPPLY_CHARGE_TYPE_STANDARD; + else if (state.chrg_status == STATUS_FAST_CHARGING) + val->intval = POWER_SUPPLY_CHARGE_TYPE_FAST; + else /* unreachable */ + val->intval = POWER_SUPPLY_CHARGE_TYPE_UNKNOWN; + break; + case POWER_SUPPLY_PROP_MANUFACTURER: val->strval = BQ25890_MANUFACTURER; break; @@ -668,6 +680,7 @@ static const enum power_supply_property bq25890_power_supply_props[] = { POWER_SUPPLY_PROP_MANUFACTURER, POWER_SUPPLY_PROP_MODEL_NAME, POWER_SUPPLY_PROP_STATUS, + POWER_SUPPLY_PROP_CHARGE_TYPE, POWER_SUPPLY_PROP_ONLINE, POWER_SUPPLY_PROP_HEALTH, POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX, -- GitLab From c942656ddb3a0c248e0c53a2ebd4a36adaa46cbc Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Micha=C5=82=20Miros=C5=82aw?= Date: Mon, 4 May 2020 21:47:46 +0200 Subject: [PATCH 0352/1971] power: bq25890: implement PRECHARGE_CURRENT property MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Report configured precharge current. Signed-off-by: Michał Mirosław Signed-off-by: Sebastian Reichel --- drivers/power/supply/bq25890_charger.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/drivers/power/supply/bq25890_charger.c b/drivers/power/supply/bq25890_charger.c index 6d8e0e2bfd8a0..0c9c02088b523 100644 --- a/drivers/power/supply/bq25890_charger.c +++ b/drivers/power/supply/bq25890_charger.c @@ -486,6 +486,10 @@ static int bq25890_power_supply_get_property(struct power_supply *psy, val->intval = bq25890_find_val(bq->init_data.vreg, TBL_VREG); break; + case POWER_SUPPLY_PROP_PRECHARGE_CURRENT: + val->intval = bq25890_find_val(bq->init_data.iprechg, TBL_ITERM); + break; + case POWER_SUPPLY_PROP_CHARGE_TERM_CURRENT: val->intval = bq25890_find_val(bq->init_data.iterm, TBL_ITERM); break; @@ -686,6 +690,7 @@ static const enum power_supply_property bq25890_power_supply_props[] = { POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX, POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE, POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE_MAX, + POWER_SUPPLY_PROP_PRECHARGE_CURRENT, POWER_SUPPLY_PROP_CHARGE_TERM_CURRENT, POWER_SUPPLY_PROP_VOLTAGE_NOW, POWER_SUPPLY_PROP_CURRENT_NOW, -- GitLab From 478efc79ee32a3f1227cda49295ef24cda8b114c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Micha=C5=82=20Miros=C5=82aw?= Date: Mon, 4 May 2020 21:47:47 +0200 Subject: [PATCH 0353/1971] power: bq25890: implement INPUT_CURRENT_LIMIT property MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Report REG00.IINLIM value as INPUT_CURRENT_LIMIT property. Signed-off-by: Michał Mirosław Signed-off-by: Sebastian Reichel --- drivers/power/supply/bq25890_charger.c | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/drivers/power/supply/bq25890_charger.c b/drivers/power/supply/bq25890_charger.c index 0c9c02088b523..77150667e36bc 100644 --- a/drivers/power/supply/bq25890_charger.c +++ b/drivers/power/supply/bq25890_charger.c @@ -254,6 +254,7 @@ enum bq25890_table_ids { /* range tables */ TBL_ICHG, TBL_ITERM, + TBL_IILIM, TBL_VREG, TBL_BOOSTV, TBL_SYSVMIN, @@ -294,6 +295,7 @@ static const union { /* TODO: BQ25896 has max ICHG 3008 mA */ [TBL_ICHG] = { .rt = {0, 5056000, 64000} }, /* uA */ [TBL_ITERM] = { .rt = {64000, 1024000, 64000} }, /* uA */ + [TBL_IILIM] = { .rt = {50000, 3200000, 50000} }, /* uA */ [TBL_VREG] = { .rt = {3840000, 4608000, 16000} }, /* uV */ [TBL_BOOSTV] = { .rt = {4550000, 5510000, 64000} }, /* uV */ [TBL_SYSVMIN] = { .rt = {3000000, 3700000, 100000} }, /* uV */ @@ -494,6 +496,14 @@ static int bq25890_power_supply_get_property(struct power_supply *psy, val->intval = bq25890_find_val(bq->init_data.iterm, TBL_ITERM); break; + case POWER_SUPPLY_PROP_INPUT_CURRENT_LIMIT: + ret = bq25890_field_read(bq, F_IILIM); + if (ret < 0) + return ret; + + val->intval = bq25890_find_val(ret, TBL_IILIM); + break; + case POWER_SUPPLY_PROP_VOLTAGE_NOW: ret = bq25890_field_read(bq, F_SYSV); /* read measured value */ if (ret < 0) @@ -692,6 +702,7 @@ static const enum power_supply_property bq25890_power_supply_props[] = { POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE_MAX, POWER_SUPPLY_PROP_PRECHARGE_CURRENT, POWER_SUPPLY_PROP_CHARGE_TERM_CURRENT, + POWER_SUPPLY_PROP_INPUT_CURRENT_LIMIT, POWER_SUPPLY_PROP_VOLTAGE_NOW, POWER_SUPPLY_PROP_CURRENT_NOW, }; -- GitLab From b1593f8a431ce99e4476c54372672e3e293ed29a Mon Sep 17 00:00:00 2001 From: Boris Brezillon Date: Sun, 19 Apr 2020 21:30:36 +0200 Subject: [PATCH 0354/1971] mtd: rawnand: au1550nd: Get rid of the legacy interface implementation Now that exec_op() is implemented we can get rid of all other hooks. Signed-off-by: Boris Brezillon Reviewed-by: Miquel Raynal Signed-off-by: Miquel Raynal Link: https://lore.kernel.org/linux-mtd/20200419193037.1544035-4-boris.brezillon@collabora.com --- drivers/mtd/nand/raw/au1550nd.c | 264 -------------------------------- 1 file changed, 264 deletions(-) diff --git a/drivers/mtd/nand/raw/au1550nd.c b/drivers/mtd/nand/raw/au1550nd.c index 79bf9fbeeb222..ff8afd6562a2b 100644 --- a/drivers/mtd/nand/raw/au1550nd.c +++ b/drivers/mtd/nand/raw/au1550nd.c @@ -21,7 +21,6 @@ struct au1550nd_ctx { int cs; void __iomem *base; - void (*write_byte)(struct nand_chip *, u_char); }; static struct au1550nd_ctx *chip_to_au_ctx(struct nand_chip *this) @@ -29,58 +28,6 @@ static struct au1550nd_ctx *chip_to_au_ctx(struct nand_chip *this) return container_of(this, struct au1550nd_ctx, chip); } -/** - * au_read_byte - read one byte from the chip - * @this: NAND chip object - * - * read function for 8bit buswidth - */ -static u_char au_read_byte(struct nand_chip *this) -{ - u_char ret = readb(this->legacy.IO_ADDR_R); - wmb(); /* drain writebuffer */ - return ret; -} - -/** - * au_write_byte - write one byte to the chip - * @this: NAND chip object - * @byte: pointer to data byte to write - * - * write function for 8it buswidth - */ -static void au_write_byte(struct nand_chip *this, u_char byte) -{ - writeb(byte, this->legacy.IO_ADDR_W); - wmb(); /* drain writebuffer */ -} - -/** - * au_read_byte16 - read one byte endianness aware from the chip - * @this: NAND chip object - * - * read function for 16bit buswidth with endianness conversion - */ -static u_char au_read_byte16(struct nand_chip *this) -{ - u_char ret = (u_char) cpu_to_le16(readw(this->legacy.IO_ADDR_R)); - wmb(); /* drain writebuffer */ - return ret; -} - -/** - * au_write_byte16 - write one byte endianness aware to the chip - * @this: NAND chip object - * @byte: pointer to data byte to write - * - * write function for 16bit buswidth with endianness conversion - */ -static void au_write_byte16(struct nand_chip *this, u_char byte) -{ - writew(le16_to_cpu((u16) byte), this->legacy.IO_ADDR_W); - wmb(); /* drain writebuffer */ -} - /** * au_write_buf - write buffer to chip * @this: NAND chip object @@ -162,206 +109,6 @@ static void au_read_buf16(struct nand_chip *this, u_char *buf, int len) } } -/* Select the chip by setting nCE to low */ -#define NAND_CTL_SETNCE 1 -/* Deselect the chip by setting nCE to high */ -#define NAND_CTL_CLRNCE 2 -/* Select the command latch by setting CLE to high */ -#define NAND_CTL_SETCLE 3 -/* Deselect the command latch by setting CLE to low */ -#define NAND_CTL_CLRCLE 4 -/* Select the address latch by setting ALE to high */ -#define NAND_CTL_SETALE 5 -/* Deselect the address latch by setting ALE to low */ -#define NAND_CTL_CLRALE 6 - -static void au1550_hwcontrol(struct mtd_info *mtd, int cmd) -{ - struct nand_chip *this = mtd_to_nand(mtd); - struct au1550nd_ctx *ctx = container_of(this, struct au1550nd_ctx, - chip); - - switch (cmd) { - - case NAND_CTL_SETCLE: - this->legacy.IO_ADDR_W = ctx->base + MEM_STNAND_CMD; - break; - - case NAND_CTL_CLRCLE: - this->legacy.IO_ADDR_W = ctx->base + MEM_STNAND_DATA; - break; - - case NAND_CTL_SETALE: - this->legacy.IO_ADDR_W = ctx->base + MEM_STNAND_ADDR; - break; - - case NAND_CTL_CLRALE: - this->legacy.IO_ADDR_W = ctx->base + MEM_STNAND_DATA; - /* FIXME: Nobody knows why this is necessary, - * but it works only that way */ - udelay(1); - break; - - case NAND_CTL_SETNCE: - /* assert (force assert) chip enable */ - alchemy_wrsmem((1 << (4 + ctx->cs)), AU1000_MEM_STNDCTL); - break; - - case NAND_CTL_CLRNCE: - /* deassert chip enable */ - alchemy_wrsmem(0, AU1000_MEM_STNDCTL); - break; - } - - this->legacy.IO_ADDR_R = this->legacy.IO_ADDR_W; - - wmb(); /* Drain the writebuffer */ -} - -int au1550_device_ready(struct nand_chip *this) -{ - return (alchemy_rdsmem(AU1000_MEM_STSTAT) & 0x1) ? 1 : 0; -} - -/** - * au1550_select_chip - control -CE line - * Forbid driving -CE manually permitting the NAND controller to do this. - * Keeping -CE asserted during the whole sector reads interferes with the - * NOR flash and PCMCIA drivers as it causes contention on the static bus. - * We only have to hold -CE low for the NAND read commands since the flash - * chip needs it to be asserted during chip not ready time but the NAND - * controller keeps it released. - * - * @this: NAND chip object - * @chip: chipnumber to select, -1 for deselect - */ -static void au1550_select_chip(struct nand_chip *this, int chip) -{ -} - -/** - * au1550_command - Send command to NAND device - * @this: NAND chip object - * @command: the command to be sent - * @column: the column address for this command, -1 if none - * @page_addr: the page address for this command, -1 if none - */ -static void au1550_command(struct nand_chip *this, unsigned command, - int column, int page_addr) -{ - struct mtd_info *mtd = nand_to_mtd(this); - struct au1550nd_ctx *ctx = container_of(this, struct au1550nd_ctx, - chip); - int ce_override = 0, i; - unsigned long flags = 0; - - /* Begin command latch cycle */ - au1550_hwcontrol(mtd, NAND_CTL_SETCLE); - /* - * Write out the command to the device. - */ - if (command == NAND_CMD_SEQIN) { - int readcmd; - - if (column >= mtd->writesize) { - /* OOB area */ - column -= mtd->writesize; - readcmd = NAND_CMD_READOOB; - } else if (column < 256) { - /* First 256 bytes --> READ0 */ - readcmd = NAND_CMD_READ0; - } else { - column -= 256; - readcmd = NAND_CMD_READ1; - } - ctx->write_byte(this, readcmd); - } - ctx->write_byte(this, command); - - /* Set ALE and clear CLE to start address cycle */ - au1550_hwcontrol(mtd, NAND_CTL_CLRCLE); - - if (column != -1 || page_addr != -1) { - au1550_hwcontrol(mtd, NAND_CTL_SETALE); - - /* Serially input address */ - if (column != -1) { - /* Adjust columns for 16 bit buswidth */ - if (this->options & NAND_BUSWIDTH_16 && - !nand_opcode_8bits(command)) - column >>= 1; - ctx->write_byte(this, column); - } - if (page_addr != -1) { - ctx->write_byte(this, (u8)(page_addr & 0xff)); - - if (command == NAND_CMD_READ0 || - command == NAND_CMD_READ1 || - command == NAND_CMD_READOOB) { - /* - * NAND controller will release -CE after - * the last address byte is written, so we'll - * have to forcibly assert it. No interrupts - * are allowed while we do this as we don't - * want the NOR flash or PCMCIA drivers to - * steal our precious bytes of data... - */ - ce_override = 1; - local_irq_save(flags); - au1550_hwcontrol(mtd, NAND_CTL_SETNCE); - } - - ctx->write_byte(this, (u8)(page_addr >> 8)); - - if (this->options & NAND_ROW_ADDR_3) - ctx->write_byte(this, - ((page_addr >> 16) & 0x0f)); - } - /* Latch in address */ - au1550_hwcontrol(mtd, NAND_CTL_CLRALE); - } - - /* - * Program and erase have their own busy handlers. - * Status and sequential in need no delay. - */ - switch (command) { - - case NAND_CMD_PAGEPROG: - case NAND_CMD_ERASE1: - case NAND_CMD_ERASE2: - case NAND_CMD_SEQIN: - case NAND_CMD_STATUS: - return; - - case NAND_CMD_RESET: - break; - - case NAND_CMD_READ0: - case NAND_CMD_READ1: - case NAND_CMD_READOOB: - /* Check if we're really driving -CE low (just in case) */ - if (unlikely(!ce_override)) - break; - - /* Apply a short delay always to ensure that we do wait tWB. */ - ndelay(100); - /* Wait for a chip to become ready... */ - for (i = this->legacy.chip_delay; - !this->legacy.dev_ready(this) && i > 0; --i) - udelay(1); - - /* Release -CE and re-enable interrupts. */ - au1550_hwcontrol(mtd, NAND_CTL_CLRNCE); - local_irq_restore(flags); - return; - } - /* Apply this short delay always to ensure that we do wait tWB. */ - ndelay(100); - - while(!this->legacy.dev_ready(this)); -} - static int find_nand_cs(unsigned long nand_base) { void __iomem *base = @@ -540,12 +287,6 @@ static int au1550nd_probe(struct platform_device *pdev) } ctx->cs = cs; - this->legacy.dev_ready = au1550_device_ready; - this->legacy.select_chip = au1550_select_chip; - this->legacy.cmdfunc = au1550_command; - - /* 30 us command delay time */ - this->legacy.chip_delay = 30; nand_controller_init(&ctx->controller); ctx->controller.ops = &au1550nd_ops; this->controller = &ctx->controller; @@ -555,11 +296,6 @@ static int au1550nd_probe(struct platform_device *pdev) if (pd->devwidth) this->options |= NAND_BUSWIDTH_16; - this->legacy.read_byte = (pd->devwidth) ? au_read_byte16 : au_read_byte; - ctx->write_byte = (pd->devwidth) ? au_write_byte16 : au_write_byte; - this->legacy.write_buf = (pd->devwidth) ? au_write_buf16 : au_write_buf; - this->legacy.read_buf = (pd->devwidth) ? au_read_buf16 : au_read_buf; - ret = nand_scan(this, 1); if (ret) { dev_err(&pdev->dev, "NAND scan failed with %d\n", ret); -- GitLab From 806adfbe8840476ed380a6f7206b961d61b23117 Mon Sep 17 00:00:00 2001 From: Boris Brezillon Date: Sun, 19 Apr 2020 21:30:37 +0200 Subject: [PATCH 0355/1971] mtd: rawnand: au1550nd: Patch the read/write buf helper prototypes To match the types passed by au1550nd_exec_instr() function. Signed-off-by: Boris Brezillon Reviewed-by: Miquel Raynal Signed-off-by: Miquel Raynal Link: https://lore.kernel.org/linux-mtd/20200419193037.1544035-5-boris.brezillon@collabora.com --- drivers/mtd/nand/raw/au1550nd.c | 30 +++++++++++++++++------------- 1 file changed, 17 insertions(+), 13 deletions(-) diff --git a/drivers/mtd/nand/raw/au1550nd.c b/drivers/mtd/nand/raw/au1550nd.c index ff8afd6562a2b..2ac84cb0501d3 100644 --- a/drivers/mtd/nand/raw/au1550nd.c +++ b/drivers/mtd/nand/raw/au1550nd.c @@ -36,13 +36,15 @@ static struct au1550nd_ctx *chip_to_au_ctx(struct nand_chip *this) * * write function for 8bit buswidth */ -static void au_write_buf(struct nand_chip *this, const u_char *buf, int len) +static void au_write_buf(struct nand_chip *this, const void *buf, + unsigned int len) { struct au1550nd_ctx *ctx = chip_to_au_ctx(this); + const u8 *p = buf; int i; for (i = 0; i < len; i++) { - writeb(buf[i], ctx->base + MEM_STNAND_DATA); + writeb(p[i], ctx->base + MEM_STNAND_DATA); wmb(); /* drain writebuffer */ } } @@ -55,13 +57,15 @@ static void au_write_buf(struct nand_chip *this, const u_char *buf, int len) * * read function for 8bit buswidth */ -static void au_read_buf(struct nand_chip *this, u_char *buf, int len) +static void au_read_buf(struct nand_chip *this, void *buf, + unsigned int len) { struct au1550nd_ctx *ctx = chip_to_au_ctx(this); + u8 *p = buf; int i; for (i = 0; i < len; i++) { - buf[i] = readb(ctx->base + MEM_STNAND_DATA); + p[i] = readb(ctx->base + MEM_STNAND_DATA); wmb(); /* drain writebuffer */ } } @@ -74,18 +78,18 @@ static void au_read_buf(struct nand_chip *this, u_char *buf, int len) * * write function for 16bit buswidth */ -static void au_write_buf16(struct nand_chip *this, const u_char *buf, int len) +static void au_write_buf16(struct nand_chip *this, const void *buf, + unsigned int len) { struct au1550nd_ctx *ctx = chip_to_au_ctx(this); - int i; - u16 *p = (u16 *) buf; - len >>= 1; + const u16 *p = buf; + unsigned int i; + len >>= 1; for (i = 0; i < len; i++) { writew(p[i], ctx->base + MEM_STNAND_DATA); wmb(); /* drain writebuffer */ } - } /** @@ -96,13 +100,13 @@ static void au_write_buf16(struct nand_chip *this, const u_char *buf, int len) * * read function for 16bit buswidth */ -static void au_read_buf16(struct nand_chip *this, u_char *buf, int len) +static void au_read_buf16(struct nand_chip *this, void *buf, unsigned int len) { struct au1550nd_ctx *ctx = chip_to_au_ctx(this); - int i; - u16 *p = (u16 *) buf; - len >>= 1; + unsigned int i; + u16 *p = buf; + len >>= 1; for (i = 0; i < len; i++) { p[i] = readw(ctx->base + MEM_STNAND_DATA); wmb(); /* drain writebuffer */ -- GitLab From d10b41ba02fee57569ad2aacb723f7c91b6b2729 Mon Sep 17 00:00:00 2001 From: Miquel Raynal Date: Tue, 21 Apr 2020 18:39:06 +0200 Subject: [PATCH 0356/1971] mtd: rawnand: Give more information about the ECC weakness When the ECC strength is too weak compared to the NAND chip requirements, display the values so that it is clear for people how much they are far from the requirements (and might get in troubles in the future). Signed-off-by: Miquel Raynal Link: https://lore.kernel.org/linux-mtd/20200421163906.7515-1-miquel.raynal@bootlin.com --- drivers/mtd/nand/raw/nand_base.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/drivers/mtd/nand/raw/nand_base.c b/drivers/mtd/nand/raw/nand_base.c index c24e5e2ba130f..52cf73c18f55a 100644 --- a/drivers/mtd/nand/raw/nand_base.c +++ b/drivers/mtd/nand/raw/nand_base.c @@ -5781,8 +5781,10 @@ static int nand_scan_tail(struct nand_chip *chip) /* ECC sanity check: warn if it's too weak */ if (!nand_ecc_strength_good(chip)) - pr_warn("WARNING: %s: the ECC used on your system is too weak compared to the one required by the NAND chip\n", - mtd->name); + pr_warn("WARNING: %s: the ECC used on your system (%db/%dB) is too weak compared to the one required by the NAND chip (%db/%dB)\n", + mtd->name, chip->ecc.strength, chip->ecc.size, + chip->base.eccreq.strength, + chip->base.eccreq.step_size); /* Allow subpage writes up to ecc.steps. Not possible for MLC flash */ if (!(chip->options & NAND_NO_SUBPAGE_WRITE) && nand_is_slc(chip)) { -- GitLab From c27075772d1f1c8aaf276db9943b35adda8a8b65 Mon Sep 17 00:00:00 2001 From: Miquel Raynal Date: Fri, 24 Apr 2020 18:44:56 +0200 Subject: [PATCH 0357/1971] mtd: rawnand: marvell: Fix the condition on a return code In a previous fix, I changed the condition on which the timeout of an IRQ is reached from: if (!ret) into: if (ret && !pending) While having a non-zero return code is usual in the Linux kernel, here ret comes from a wait_for_completion_timeout() which returns 0 when the waiting period is too long. Hence, the revised condition should be: if (!ret && !pending) The faulty patch did not produce any error because of the !pending condition so this change is finally purely cosmetic and does not change the actual driver behavior. Fixes: cafb56dd741e ("mtd: rawnand: marvell: prevent timeouts on a loaded machine") Signed-off-by: Miquel Raynal Reviewed-by: Boris Brezillon Link: https://lore.kernel.org/linux-mtd/20200424164501.26719-2-miquel.raynal@bootlin.com --- drivers/mtd/nand/raw/marvell_nand.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/mtd/nand/raw/marvell_nand.c b/drivers/mtd/nand/raw/marvell_nand.c index 343001250ccca..b5e9fa71d4350 100644 --- a/drivers/mtd/nand/raw/marvell_nand.c +++ b/drivers/mtd/nand/raw/marvell_nand.c @@ -707,7 +707,7 @@ static int marvell_nfc_wait_op(struct nand_chip *chip, unsigned int timeout_ms) * In case the interrupt was not served in the required time frame, * check if the ISR was not served or if something went actually wrong. */ - if (ret && !pending) { + if (!ret && !pending) { dev_err(nfc->dev, "Timeout waiting for RB signal\n"); return -ETIMEDOUT; } -- GitLab From 5dcc99763c985d2fa9f3f176b913a336feafa7f9 Mon Sep 17 00:00:00 2001 From: Miquel Raynal Date: Fri, 24 Apr 2020 18:44:57 +0200 Subject: [PATCH 0358/1971] mtd: rawnand: marvell: Use devm_platform_ioremap_res() Switch from the old platform_get_resource()/devm_ioremap_resource() couple to the newer devm_platform_ioremap_resource() helper. Signed-off-by: Miquel Raynal Reviewed-by: Boris Brezillon Link: https://lore.kernel.org/linux-mtd/20200424164501.26719-3-miquel.raynal@bootlin.com --- drivers/mtd/nand/raw/marvell_nand.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/drivers/mtd/nand/raw/marvell_nand.c b/drivers/mtd/nand/raw/marvell_nand.c index b5e9fa71d4350..b7f0bf5e88948 100644 --- a/drivers/mtd/nand/raw/marvell_nand.c +++ b/drivers/mtd/nand/raw/marvell_nand.c @@ -2855,7 +2855,6 @@ static int marvell_nfc_init(struct marvell_nfc *nfc) static int marvell_nfc_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; - struct resource *r; struct marvell_nfc *nfc; int ret; int irq; @@ -2870,8 +2869,7 @@ static int marvell_nfc_probe(struct platform_device *pdev) nfc->controller.ops = &marvell_nand_controller_ops; INIT_LIST_HEAD(&nfc->chips); - r = platform_get_resource(pdev, IORESOURCE_MEM, 0); - nfc->regs = devm_ioremap_resource(dev, r); + nfc->regs = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(nfc->regs)) return PTR_ERR(nfc->regs); -- GitLab From 7a0c18fb5c71c6ac7d4662a145e4227dcd4a36a3 Mon Sep 17 00:00:00 2001 From: Miquel Raynal Date: Fri, 24 Apr 2020 18:44:58 +0200 Subject: [PATCH 0359/1971] mtd: rawnand: marvell: Use nand_cleanup() when the device is not yet registered Do not call nand_release() while the MTD device has not been registered, use nand_cleanup() instead. Fixes: 02f26ecf8c77 ("mtd: nand: add reworked Marvell NAND controller driver") Signed-off-by: Miquel Raynal Reviewed-by: Boris Brezillon Link: https://lore.kernel.org/linux-mtd/20200424164501.26719-4-miquel.raynal@bootlin.com --- drivers/mtd/nand/raw/marvell_nand.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/mtd/nand/raw/marvell_nand.c b/drivers/mtd/nand/raw/marvell_nand.c index b7f0bf5e88948..893f86c2f57c9 100644 --- a/drivers/mtd/nand/raw/marvell_nand.c +++ b/drivers/mtd/nand/raw/marvell_nand.c @@ -2665,7 +2665,7 @@ static int marvell_nand_chip_init(struct device *dev, struct marvell_nfc *nfc, ret = mtd_device_register(mtd, NULL, 0); if (ret) { dev_err(dev, "failed to register mtd device: %d\n", ret); - nand_release(chip); + nand_cleanup(chip); return ret; } -- GitLab From c525b7af96714f72e316c70781570a4a3e1c2856 Mon Sep 17 00:00:00 2001 From: Miquel Raynal Date: Fri, 24 Apr 2020 18:44:59 +0200 Subject: [PATCH 0360/1971] mtd: rawnand: marvell: Fix probe error path Ensure all chips are deregistered and cleaned in case of error during the probe. Fixes: 02f26ecf8c77 ("mtd: nand: add reworked Marvell NAND controller driver") Signed-off-by: Miquel Raynal Reviewed-by: Boris Brezillon Link: https://lore.kernel.org/linux-mtd/20200424164501.26719-5-miquel.raynal@bootlin.com --- drivers/mtd/nand/raw/marvell_nand.c | 23 ++++++++++++++--------- 1 file changed, 14 insertions(+), 9 deletions(-) diff --git a/drivers/mtd/nand/raw/marvell_nand.c b/drivers/mtd/nand/raw/marvell_nand.c index 893f86c2f57c9..b5b58075ae3e2 100644 --- a/drivers/mtd/nand/raw/marvell_nand.c +++ b/drivers/mtd/nand/raw/marvell_nand.c @@ -2674,6 +2674,16 @@ static int marvell_nand_chip_init(struct device *dev, struct marvell_nfc *nfc, return 0; } +static void marvell_nand_chips_cleanup(struct marvell_nfc *nfc) +{ + struct marvell_nand_chip *entry, *temp; + + list_for_each_entry_safe(entry, temp, &nfc->chips, node) { + nand_release(&entry->chip); + list_del(&entry->node); + } +} + static int marvell_nand_chips_init(struct device *dev, struct marvell_nfc *nfc) { struct device_node *np = dev->of_node; @@ -2708,21 +2718,16 @@ static int marvell_nand_chips_init(struct device *dev, struct marvell_nfc *nfc) ret = marvell_nand_chip_init(dev, nfc, nand_np); if (ret) { of_node_put(nand_np); - return ret; + goto cleanup_chips; } } return 0; -} -static void marvell_nand_chips_cleanup(struct marvell_nfc *nfc) -{ - struct marvell_nand_chip *entry, *temp; +cleanup_chips: + marvell_nand_chips_cleanup(nfc); - list_for_each_entry_safe(entry, temp, &nfc->chips, node) { - nand_release(&entry->chip); - list_del(&entry->node); - } + return ret; } static int marvell_nfc_init_dma(struct marvell_nfc *nfc) -- GitLab From 82c6c04e96a3efe3e8be8e309028de16e3f0605f Mon Sep 17 00:00:00 2001 From: Miquel Raynal Date: Fri, 24 Apr 2020 18:45:00 +0200 Subject: [PATCH 0361/1971] mtd: rawnand: marvell: Rename a function to clarify Cosmetic change to clarify the purpose of the function. Signed-off-by: Miquel Raynal Reviewed-by: Boris Brezillon Link: https://lore.kernel.org/linux-mtd/20200424164501.26719-6-miquel.raynal@bootlin.com --- drivers/mtd/nand/raw/marvell_nand.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/mtd/nand/raw/marvell_nand.c b/drivers/mtd/nand/raw/marvell_nand.c index b5b58075ae3e2..778a244b27c51 100644 --- a/drivers/mtd/nand/raw/marvell_nand.c +++ b/drivers/mtd/nand/raw/marvell_nand.c @@ -2167,8 +2167,8 @@ static const struct mtd_ooblayout_ops marvell_nand_ooblayout_ops = { .free = marvell_nand_ooblayout_free, }; -static int marvell_nand_hw_ecc_ctrl_init(struct mtd_info *mtd, - struct nand_ecc_ctrl *ecc) +static int marvell_nand_hw_ecc_controller_init(struct mtd_info *mtd, + struct nand_ecc_ctrl *ecc) { struct nand_chip *chip = mtd_to_nand(mtd); struct marvell_nfc *nfc = to_marvell_nfc(chip->controller); @@ -2262,7 +2262,7 @@ static int marvell_nand_ecc_init(struct mtd_info *mtd, switch (ecc->mode) { case NAND_ECC_HW: - ret = marvell_nand_hw_ecc_ctrl_init(mtd, ecc); + ret = marvell_nand_hw_ecc_controller_init(mtd, ecc); if (ret) return ret; break; -- GitLab From 1617942a813cd4dc152e2d955b2feabc4e192891 Mon Sep 17 00:00:00 2001 From: Miquel Raynal Date: Fri, 24 Apr 2020 18:45:01 +0200 Subject: [PATCH 0362/1971] mtd: rawnand: marvell: Rename the ->correct() function There is no correction involved at this point, it is just a matter of reading registers and checking whether bitflips have occurred or not. Rename the function to clarify it. Signed-off-by: Miquel Raynal Reviewed-by: Boris Brezillon Link: https://lore.kernel.org/linux-mtd/20200424164501.26719-7-miquel.raynal@bootlin.com --- drivers/mtd/nand/raw/marvell_nand.c | 19 +++++++++---------- 1 file changed, 9 insertions(+), 10 deletions(-) diff --git a/drivers/mtd/nand/raw/marvell_nand.c b/drivers/mtd/nand/raw/marvell_nand.c index 778a244b27c51..88269c44b2b4b 100644 --- a/drivers/mtd/nand/raw/marvell_nand.c +++ b/drivers/mtd/nand/raw/marvell_nand.c @@ -932,14 +932,14 @@ static void marvell_nfc_check_empty_chunk(struct nand_chip *chip, } /* - * Check a chunk is correct or not according to hardware ECC engine. + * Check if a chunk is correct or not according to the hardware ECC engine. * mtd->ecc_stats.corrected is updated, as well as max_bitflips, however * mtd->ecc_stats.failure is not, the function will instead return a non-zero * value indicating that a check on the emptyness of the subpage must be - * performed before declaring the subpage corrupted. + * performed before actually declaring the subpage as "corrupted". */ -static int marvell_nfc_hw_ecc_correct(struct nand_chip *chip, - unsigned int *max_bitflips) +static int marvell_nfc_hw_ecc_check_bitflips(struct nand_chip *chip, + unsigned int *max_bitflips) { struct mtd_info *mtd = nand_to_mtd(chip); struct marvell_nfc *nfc = to_marvell_nfc(chip->controller); @@ -1053,7 +1053,7 @@ static int marvell_nfc_hw_ecc_hmg_read_page(struct nand_chip *chip, u8 *buf, marvell_nfc_enable_hw_ecc(chip); marvell_nfc_hw_ecc_hmg_do_read_page(chip, buf, chip->oob_poi, false, page); - ret = marvell_nfc_hw_ecc_correct(chip, &max_bitflips); + ret = marvell_nfc_hw_ecc_check_bitflips(chip, &max_bitflips); marvell_nfc_disable_hw_ecc(chip); if (!ret) @@ -1336,7 +1336,7 @@ static int marvell_nfc_hw_ecc_bch_read_page(struct nand_chip *chip, /* Read the chunk and detect number of bitflips */ marvell_nfc_hw_ecc_bch_read_chunk(chip, chunk, data, data_len, spare, spare_len, page); - ret = marvell_nfc_hw_ecc_correct(chip, &max_bitflips); + ret = marvell_nfc_hw_ecc_check_bitflips(chip, &max_bitflips); if (ret) failure_mask |= BIT(chunk); @@ -1358,10 +1358,9 @@ static int marvell_nfc_hw_ecc_bch_read_page(struct nand_chip *chip, */ /* - * In case there is any subpage read error reported by ->correct(), we - * usually re-read only ECC bytes in raw mode and check if the whole - * page is empty. In this case, it is normal that the ECC check failed - * and we just ignore the error. + * In case there is any subpage read error, we usually re-read only ECC + * bytes in raw mode and check if the whole page is empty. In this case, + * it is normal that the ECC check failed and we just ignore the error. * * However, it has been empirically observed that for some layouts (e.g * 2k page, 8b strength per 512B chunk), the controller tries to correct -- GitLab From 83c411c29b90b7de505a2fe1d655293c95f8ba90 Mon Sep 17 00:00:00 2001 From: Miquel Raynal Date: Tue, 28 Apr 2020 11:42:54 +0200 Subject: [PATCH 0363/1971] mtd: rawnand: timings: Add mode information to the timings structure Convert the timings union into a structure containing the mode and the actual values. The values are still a union in prevision of the addition of the NVDDR modes. Signed-off-by: Miquel Raynal Reviewed-by: Boris Brezillon Link: https://lore.kernel.org/linux-mtd/20200428094302.14624-2-miquel.raynal@bootlin.com --- drivers/mtd/nand/raw/nand_timings.c | 6 ++++++ include/linux/mtd/rawnand.h | 10 +++++++--- 2 files changed, 13 insertions(+), 3 deletions(-) diff --git a/drivers/mtd/nand/raw/nand_timings.c b/drivers/mtd/nand/raw/nand_timings.c index f64b06a71dfa1..0061cbaf931d1 100644 --- a/drivers/mtd/nand/raw/nand_timings.c +++ b/drivers/mtd/nand/raw/nand_timings.c @@ -16,6 +16,7 @@ static const struct nand_data_interface onfi_sdr_timings[] = { /* Mode 0 */ { .type = NAND_SDR_IFACE, + .timings.mode = 0, .timings.sdr = { .tCCS_min = 500000, .tR_max = 200000000, @@ -58,6 +59,7 @@ static const struct nand_data_interface onfi_sdr_timings[] = { /* Mode 1 */ { .type = NAND_SDR_IFACE, + .timings.mode = 1, .timings.sdr = { .tCCS_min = 500000, .tR_max = 200000000, @@ -100,6 +102,7 @@ static const struct nand_data_interface onfi_sdr_timings[] = { /* Mode 2 */ { .type = NAND_SDR_IFACE, + .timings.mode = 2, .timings.sdr = { .tCCS_min = 500000, .tR_max = 200000000, @@ -142,6 +145,7 @@ static const struct nand_data_interface onfi_sdr_timings[] = { /* Mode 3 */ { .type = NAND_SDR_IFACE, + .timings.mode = 3, .timings.sdr = { .tCCS_min = 500000, .tR_max = 200000000, @@ -184,6 +188,7 @@ static const struct nand_data_interface onfi_sdr_timings[] = { /* Mode 4 */ { .type = NAND_SDR_IFACE, + .timings.mode = 4, .timings.sdr = { .tCCS_min = 500000, .tR_max = 200000000, @@ -226,6 +231,7 @@ static const struct nand_data_interface onfi_sdr_timings[] = { /* Mode 5 */ { .type = NAND_SDR_IFACE, + .timings.mode = 5, .timings.sdr = { .tCCS_min = 500000, .tR_max = 200000000, diff --git a/include/linux/mtd/rawnand.h b/include/linux/mtd/rawnand.h index 1e76196f9829f..21873168ba4df 100644 --- a/include/linux/mtd/rawnand.h +++ b/include/linux/mtd/rawnand.h @@ -491,13 +491,17 @@ enum nand_data_interface_type { /** * struct nand_data_interface - NAND interface timing * @type: type of the timing - * @timings: The timing, type according to @type + * @timings: The timing information + * @timings.mode: Timing mode as defined in the specification * @timings.sdr: Use it when @type is %NAND_SDR_IFACE. */ struct nand_data_interface { enum nand_data_interface_type type; - union { - struct nand_sdr_timings sdr; + struct nand_timings { + unsigned int mode; + union { + struct nand_sdr_timings sdr; + }; } timings; }; -- GitLab From 4d8ec041d9c454029f6cd90622f6d81eb61e781c Mon Sep 17 00:00:00 2001 From: Miquel Raynal Date: Tue, 28 Apr 2020 11:42:55 +0200 Subject: [PATCH 0364/1971] mtd: rawnand: timings: Fix default tR_max and tCCS_min timings tR and tCCS are currently wrongly expressed in femtoseconds, while we expect these values to be expressed in picoseconds. Set right hardcoded values. Fixes: 6a943386ee36 mtd: rawnand: add default values for dynamic timings Signed-off-by: Miquel Raynal Reviewed-by: Boris Brezillon Link: https://lore.kernel.org/linux-mtd/20200428094302.14624-3-miquel.raynal@bootlin.com --- drivers/mtd/nand/raw/nand_timings.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/drivers/mtd/nand/raw/nand_timings.c b/drivers/mtd/nand/raw/nand_timings.c index 0061cbaf931d1..36d21be3dfe5a 100644 --- a/drivers/mtd/nand/raw/nand_timings.c +++ b/drivers/mtd/nand/raw/nand_timings.c @@ -320,10 +320,9 @@ int onfi_fill_data_interface(struct nand_chip *chip, /* microseconds -> picoseconds */ timings->tPROG_max = 1000000ULL * ONFI_DYN_TIMING_MAX; timings->tBERS_max = 1000000ULL * ONFI_DYN_TIMING_MAX; - timings->tR_max = 1000000ULL * 200000000ULL; - /* nanoseconds -> picoseconds */ - timings->tCCS_min = 1000UL * 500000; + timings->tR_max = 200000000; + timings->tCCS_min = 500000; } return 0; -- GitLab From 1d5d08ee9b28cff907326b4ad5a2463fd2808be1 Mon Sep 17 00:00:00 2001 From: Miquel Raynal Date: Tue, 28 Apr 2020 11:42:56 +0200 Subject: [PATCH 0365/1971] mtd: rawnand: onfi: Fix redundancy detection check During ONFI detection, the CRC derived from the parameter page and the CRC supposed to be at the end of the parameter page are compared. If they do not match, the second then the third copies of the page are tried. The current implementation compares the newly derived CRC with the CRC contained in the first page only. So if this particular CRC area has been corrupted, then the detection will fail for a wrong reason. Fix this issue by checking the derived CRC against the right one. Fixes: 39138c1f4a31 ("mtd: rawnand: use bit-wise majority to recover the ONFI param page") Cc: stable@vger.kernel.org Signed-off-by: Miquel Raynal Reviewed-by: Boris Brezillon Link: https://lore.kernel.org/linux-mtd/20200428094302.14624-4-miquel.raynal@bootlin.com --- drivers/mtd/nand/raw/nand_onfi.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/mtd/nand/raw/nand_onfi.c b/drivers/mtd/nand/raw/nand_onfi.c index 0b879bd0a68c8..8fe8d7bdd2034 100644 --- a/drivers/mtd/nand/raw/nand_onfi.c +++ b/drivers/mtd/nand/raw/nand_onfi.c @@ -173,7 +173,7 @@ int nand_onfi_detect(struct nand_chip *chip) } if (onfi_crc16(ONFI_CRC_BASE, (u8 *)&p[i], 254) == - le16_to_cpu(p->crc)) { + le16_to_cpu(p[i].crc)) { if (i) memcpy(p, &p[i], sizeof(*p)); break; -- GitLab From 543e34f29dc48a921bc9cffbc1240f443871b512 Mon Sep 17 00:00:00 2001 From: Miquel Raynal Date: Tue, 28 Apr 2020 11:42:57 +0200 Subject: [PATCH 0366/1971] mtd: rawnand: onfi: Use intermediate variables to improve readability Before reworking a little bit the ONFI detection code, let's clean the coding style of the if statements to improve readability. Signed-off-by: Miquel Raynal Reviewed-by: Boris Brezillon Link: https://lore.kernel.org/linux-mtd/20200428094302.14624-5-miquel.raynal@bootlin.com --- drivers/mtd/nand/raw/nand_onfi.c | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/drivers/mtd/nand/raw/nand_onfi.c b/drivers/mtd/nand/raw/nand_onfi.c index 8fe8d7bdd2034..7d9a3130443a2 100644 --- a/drivers/mtd/nand/raw/nand_onfi.c +++ b/drivers/mtd/nand/raw/nand_onfi.c @@ -146,6 +146,7 @@ int nand_onfi_detect(struct nand_chip *chip) int onfi_version = 0; char id[4]; int i, ret, val; + u16 crc; memorg = nanddev_get_memorg(&chip->base); @@ -172,8 +173,8 @@ int nand_onfi_detect(struct nand_chip *chip) goto free_onfi_param_page; } - if (onfi_crc16(ONFI_CRC_BASE, (u8 *)&p[i], 254) == - le16_to_cpu(p[i].crc)) { + crc = onfi_crc16(ONFI_CRC_BASE, (u8 *)&p[i], 254); + if (crc == le16_to_cpu(p[i].crc)) { if (i) memcpy(p, &p[i], sizeof(*p)); break; @@ -187,8 +188,8 @@ int nand_onfi_detect(struct nand_chip *chip) nand_bit_wise_majority(srcbufs, ARRAY_SIZE(srcbufs), p, sizeof(*p)); - if (onfi_crc16(ONFI_CRC_BASE, (u8 *)p, 254) != - le16_to_cpu(p->crc)) { + crc = onfi_crc16(ONFI_CRC_BASE, (u8 *)p, 254); + if (crc != le16_to_cpu(p->crc)) { pr_err("ONFI parameter recovery failed, aborting\n"); goto free_onfi_param_page; } -- GitLab From dacd1a1297254ad22a3d625d5b6e508392971a86 Mon Sep 17 00:00:00 2001 From: Miquel Raynal Date: Tue, 28 Apr 2020 11:42:58 +0200 Subject: [PATCH 0367/1971] mtd: rawnand: onfi: Define the number of parameter pages Use a macro to define the number of parameter page instead of hardcoding it everywhere. Signed-off-by: Miquel Raynal Reviewed-by: Boris Brezillon Link: https://lore.kernel.org/linux-mtd/20200428094302.14624-6-miquel.raynal@bootlin.com --- drivers/mtd/nand/raw/nand_onfi.c | 16 +++++++++++----- 1 file changed, 11 insertions(+), 5 deletions(-) diff --git a/drivers/mtd/nand/raw/nand_onfi.c b/drivers/mtd/nand/raw/nand_onfi.c index 7d9a3130443a2..7286c014620bd 100644 --- a/drivers/mtd/nand/raw/nand_onfi.c +++ b/drivers/mtd/nand/raw/nand_onfi.c @@ -16,6 +16,8 @@ #include "internals.h" +#define ONFI_PARAM_PAGES 3 + u16 onfi_crc16(u16 crc, u8 const *p, size_t len) { int i; @@ -156,7 +158,7 @@ int nand_onfi_detect(struct nand_chip *chip) return 0; /* ONFI chip: allocate a buffer to hold its parameter page */ - p = kzalloc((sizeof(*p) * 3), GFP_KERNEL); + p = kzalloc((sizeof(*p) * ONFI_PARAM_PAGES), GFP_KERNEL); if (!p) return -ENOMEM; @@ -166,7 +168,7 @@ int nand_onfi_detect(struct nand_chip *chip) goto free_onfi_param_page; } - for (i = 0; i < 3; i++) { + for (i = 0; i < ONFI_PARAM_PAGES; i++) { ret = nand_read_data_op(chip, &p[i], sizeof(*p), true); if (ret) { ret = 0; @@ -181,11 +183,15 @@ int nand_onfi_detect(struct nand_chip *chip) } } - if (i == 3) { - const void *srcbufs[3] = {p, p + 1, p + 2}; + if (i == ONFI_PARAM_PAGES) { + const void *srcbufs[ONFI_PARAM_PAGES]; + unsigned int j; + + for (j = 0; j < ONFI_PARAM_PAGES; j++) + srcbufs[j] = p + j; pr_warn("Could not find a valid ONFI parameter page, trying bit-wise majority to recover it\n"); - nand_bit_wise_majority(srcbufs, ARRAY_SIZE(srcbufs), p, + nand_bit_wise_majority(srcbufs, ONFI_PARAM_PAGES, p, sizeof(*p)); crc = onfi_crc16(ONFI_CRC_BASE, (u8 *)p, 254); -- GitLab From 7e928263fc539bb20e73344a3ef5492ffa8bba7e Mon Sep 17 00:00:00 2001 From: Miquel Raynal Date: Tue, 28 Apr 2020 11:42:59 +0200 Subject: [PATCH 0368/1971] mtd: rawnand: onfi: Avoid doing a copy of the parameter page There is no need for copying the parameter page, playing with pointers does the trick. There is not functional change. Signed-off-by: Miquel Raynal Reviewed-by: Boris Brezillon Link: https://lore.kernel.org/linux-mtd/20200428094302.14624-7-miquel.raynal@bootlin.com --- drivers/mtd/nand/raw/nand_onfi.c | 30 +++++++++++++++--------------- 1 file changed, 15 insertions(+), 15 deletions(-) diff --git a/drivers/mtd/nand/raw/nand_onfi.c b/drivers/mtd/nand/raw/nand_onfi.c index 7286c014620bd..0f3fb9fe4d1db 100644 --- a/drivers/mtd/nand/raw/nand_onfi.c +++ b/drivers/mtd/nand/raw/nand_onfi.c @@ -143,7 +143,7 @@ int nand_onfi_detect(struct nand_chip *chip) { struct mtd_info *mtd = nand_to_mtd(chip); struct nand_memory_organization *memorg; - struct nand_onfi_params *p; + struct nand_onfi_params *p = NULL, *pbuf; struct onfi_params *onfi; int onfi_version = 0; char id[4]; @@ -158,8 +158,8 @@ int nand_onfi_detect(struct nand_chip *chip) return 0; /* ONFI chip: allocate a buffer to hold its parameter page */ - p = kzalloc((sizeof(*p) * ONFI_PARAM_PAGES), GFP_KERNEL); - if (!p) + pbuf = kzalloc((sizeof(*pbuf) * ONFI_PARAM_PAGES), GFP_KERNEL); + if (!pbuf) return -ENOMEM; ret = nand_read_param_page_op(chip, 0, NULL, 0); @@ -169,16 +169,15 @@ int nand_onfi_detect(struct nand_chip *chip) } for (i = 0; i < ONFI_PARAM_PAGES; i++) { - ret = nand_read_data_op(chip, &p[i], sizeof(*p), true); + ret = nand_read_data_op(chip, &pbuf[i], sizeof(*pbuf), true); if (ret) { ret = 0; goto free_onfi_param_page; } - crc = onfi_crc16(ONFI_CRC_BASE, (u8 *)&p[i], 254); - if (crc == le16_to_cpu(p[i].crc)) { - if (i) - memcpy(p, &p[i], sizeof(*p)); + crc = onfi_crc16(ONFI_CRC_BASE, (u8 *)&pbuf[i], 254); + if (crc == le16_to_cpu(pbuf[i].crc)) { + p = &pbuf[i]; break; } } @@ -188,17 +187,18 @@ int nand_onfi_detect(struct nand_chip *chip) unsigned int j; for (j = 0; j < ONFI_PARAM_PAGES; j++) - srcbufs[j] = p + j; + srcbufs[j] = pbuf + j; pr_warn("Could not find a valid ONFI parameter page, trying bit-wise majority to recover it\n"); - nand_bit_wise_majority(srcbufs, ONFI_PARAM_PAGES, p, - sizeof(*p)); + nand_bit_wise_majority(srcbufs, ONFI_PARAM_PAGES, pbuf, + sizeof(*pbuf)); - crc = onfi_crc16(ONFI_CRC_BASE, (u8 *)p, 254); - if (crc != le16_to_cpu(p->crc)) { + crc = onfi_crc16(ONFI_CRC_BASE, (u8 *)pbuf, 254); + if (crc != le16_to_cpu(pbuf->crc)) { pr_err("ONFI parameter recovery failed, aborting\n"); goto free_onfi_param_page; } + p = pbuf; } if (chip->manufacturer.desc && chip->manufacturer.desc->ops && @@ -306,14 +306,14 @@ int nand_onfi_detect(struct nand_chip *chip) chip->parameters.onfi = onfi; /* Identification done, free the full ONFI parameter page and exit */ - kfree(p); + kfree(pbuf); return 1; free_model: kfree(chip->parameters.model); free_onfi_param_page: - kfree(p); + kfree(pbuf); return ret; } -- GitLab From 6e9c65d87c6d92ed988ecdab3ae6ed4d7d94f67c Mon Sep 17 00:00:00 2001 From: Miquel Raynal Date: Tue, 28 Apr 2020 11:43:00 +0200 Subject: [PATCH 0369/1971] mtd: rawnand: onfi: Drop a useless parameter page read During detection the logic on the NAND bus is: /* Regular ONFI detection */ 1/ read the three NAND parameter pages /* Extended parameter page detection */ 2/ send "read the NAND parameter page" commands without reading actual data 3/ move the column pointer to the extended page and read it If fact, as long as there is nothing happening on the NAND bus between 1/ and 3/, the operation 2/ is redundant so remove it. Signed-off-by: Miquel Raynal Reviewed-by: Boris Brezillon Link: https://lore.kernel.org/linux-mtd/20200428094302.14624-8-miquel.raynal@bootlin.com --- drivers/mtd/nand/raw/nand_onfi.c | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/drivers/mtd/nand/raw/nand_onfi.c b/drivers/mtd/nand/raw/nand_onfi.c index 0f3fb9fe4d1db..ee0f2c2549c1b 100644 --- a/drivers/mtd/nand/raw/nand_onfi.c +++ b/drivers/mtd/nand/raw/nand_onfi.c @@ -47,12 +47,10 @@ static int nand_flash_detect_ext_param_page(struct nand_chip *chip, if (!ep) return -ENOMEM; - /* Send our own NAND_CMD_PARAM. */ - ret = nand_read_param_page_op(chip, 0, NULL, 0); - if (ret) - goto ext_out; - - /* Use the Change Read Column command to skip the ONFI param pages. */ + /* + * Use the Change Read Column command to skip the ONFI param pages and + * ensure we read at the right location. + */ ret = nand_change_read_column_op(chip, sizeof(*p) * p->num_of_param_pages, ep, len, true); -- GitLab From 2e8f56f2a9417692b758fee9ee5a01d857dd980f Mon Sep 17 00:00:00 2001 From: Miquel Raynal Date: Tue, 28 Apr 2020 11:43:01 +0200 Subject: [PATCH 0370/1971] mtd: rawnand: jedec: Define the number of parameter pages Use a macro to define the number of parameter page instead of hardcoding it everywhere. Signed-off-by: Miquel Raynal Reviewed-by: Boris Brezillon Link: https://lore.kernel.org/linux-mtd/20200428094302.14624-9-miquel.raynal@bootlin.com --- drivers/mtd/nand/raw/nand_jedec.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/drivers/mtd/nand/raw/nand_jedec.c b/drivers/mtd/nand/raw/nand_jedec.c index 9b540e76f84f5..0cd322a8be242 100644 --- a/drivers/mtd/nand/raw/nand_jedec.c +++ b/drivers/mtd/nand/raw/nand_jedec.c @@ -16,6 +16,8 @@ #include "internals.h" +#define JEDEC_PARAM_PAGES 3 + /* * Check if the NAND chip is JEDEC compliant, returns 1 if it is, 0 otherwise. */ @@ -47,7 +49,7 @@ int nand_jedec_detect(struct nand_chip *chip) goto free_jedec_param_page; } - for (i = 0; i < 3; i++) { + for (i = 0; i < JEDEC_PARAM_PAGES; i++) { ret = nand_read_data_op(chip, p, sizeof(*p), true); if (ret) { ret = 0; @@ -59,7 +61,7 @@ int nand_jedec_detect(struct nand_chip *chip) break; } - if (i == 3) { + if (i == JEDEC_PARAM_PAGES) { pr_err("Could not find valid JEDEC parameter page; aborting\n"); goto free_jedec_param_page; } -- GitLab From 432ab89d3035fe5fd4e8adc3b904105a6c4cccae Mon Sep 17 00:00:00 2001 From: Miquel Raynal Date: Tue, 28 Apr 2020 11:43:02 +0200 Subject: [PATCH 0371/1971] mtd: rawnand: jedec: Use intermediate variables to improve readability Before reworking a little bit the JEDEC detection code, let's clean the coding style of an if statement to improve readability. Signed-off-by: Miquel Raynal Reviewed-by: Boris Brezillon Link: https://lore.kernel.org/linux-mtd/20200428094302.14624-10-miquel.raynal@bootlin.com --- drivers/mtd/nand/raw/nand_jedec.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/drivers/mtd/nand/raw/nand_jedec.c b/drivers/mtd/nand/raw/nand_jedec.c index 0cd322a8be242..15937e02c64f1 100644 --- a/drivers/mtd/nand/raw/nand_jedec.c +++ b/drivers/mtd/nand/raw/nand_jedec.c @@ -30,6 +30,7 @@ int nand_jedec_detect(struct nand_chip *chip) int jedec_version = 0; char id[5]; int i, val, ret; + u16 crc; memorg = nanddev_get_memorg(&chip->base); @@ -56,8 +57,8 @@ int nand_jedec_detect(struct nand_chip *chip) goto free_jedec_param_page; } - if (onfi_crc16(ONFI_CRC_BASE, (uint8_t *)p, 510) == - le16_to_cpu(p->crc)) + crc = onfi_crc16(ONFI_CRC_BASE, (u8 *)p, 510); + if (crc == le16_to_cpu(p->crc)) break; } -- GitLab From c9e1817ff9456d51fc1bc39fa0af6e07e883383e Mon Sep 17 00:00:00 2001 From: Boris Brezillon Date: Fri, 1 May 2020 11:06:47 +0200 Subject: [PATCH 0372/1971] mtd: rawnand: cs553x: Declare controllers instead of NAND chips The CS553x companion chip embeds 4 NAND controllers. Declare them as NAND controllers instead of NAND chips. That's done in preparation of the transition to exec_op(). Signed-off-by: Boris Brezillon Reviewed-by: Miquel Raynal Signed-off-by: Miquel Raynal Link: https://lore.kernel.org/linux-mtd/20200501090650.1138200-2-boris.brezillon@collabora.com --- drivers/mtd/nand/raw/cs553x_nand.c | 33 ++++++++++++++++++++---------- 1 file changed, 22 insertions(+), 11 deletions(-) diff --git a/drivers/mtd/nand/raw/cs553x_nand.c b/drivers/mtd/nand/raw/cs553x_nand.c index e2322cee3229f..970de727679fa 100644 --- a/drivers/mtd/nand/raw/cs553x_nand.c +++ b/drivers/mtd/nand/raw/cs553x_nand.c @@ -89,6 +89,11 @@ #define CS_NAND_ECC_CLRECC (1<<1) #define CS_NAND_ECC_ENECC (1<<0) +struct cs553x_nand_controller { + struct nand_controller base; + struct nand_chip chip; +}; + static void cs553x_read_buf(struct nand_chip *this, u_char *buf, int len) { while (unlikely(len > 0x800)) { @@ -166,10 +171,11 @@ static int cs_calculate_ecc(struct nand_chip *this, const u_char *dat, return 0; } -static struct mtd_info *cs553x_mtd[4]; +static struct cs553x_nand_controller *controllers[4]; static int __init cs553x_init_one(int cs, int mmio, unsigned long adr) { + struct cs553x_nand_controller *controller; int err = 0; struct nand_chip *this; struct mtd_info *new_mtd; @@ -183,12 +189,15 @@ static int __init cs553x_init_one(int cs, int mmio, unsigned long adr) } /* Allocate memory for MTD device structure and private data */ - this = kzalloc(sizeof(struct nand_chip), GFP_KERNEL); - if (!this) { + controller = kzalloc(sizeof(*controller), GFP_KERNEL); + if (!controller) { err = -ENOMEM; goto out; } + this = &controller->chip; + nand_controller_init(&controller->base); + this->controller = &controller->base; new_mtd = nand_to_mtd(this); /* Link the private data with the MTD structure */ @@ -232,7 +241,7 @@ static int __init cs553x_init_one(int cs, int mmio, unsigned long adr) if (err) goto out_free; - cs553x_mtd[cs] = new_mtd; + controllers[cs] = controller; goto out; out_free: @@ -240,7 +249,7 @@ out_free: out_ior: iounmap(this->legacy.IO_ADDR_R); out_mtd: - kfree(this); + kfree(controller); out: return err; } @@ -295,9 +304,10 @@ static int __init cs553x_init(void) /* Register all devices together here. This means we can easily hack it to do mtdconcat etc. if we want to. */ for (i = 0; i < NR_CS553X_CONTROLLERS; i++) { - if (cs553x_mtd[i]) { + if (controllers[i]) { /* If any devices registered, return success. Else the last error. */ - mtd_device_register(cs553x_mtd[i], NULL, 0); + mtd_device_register(nand_to_mtd(&controllers[i]->chip), + NULL, 0); err = 0; } } @@ -312,9 +322,10 @@ static void __exit cs553x_cleanup(void) int i; for (i = 0; i < NR_CS553X_CONTROLLERS; i++) { - struct mtd_info *mtd = cs553x_mtd[i]; - struct nand_chip *this; void __iomem *mmio_base; + struct cs553x_nand_controller *controller = controllers[i]; + struct nand_chip *this = &controller->chip; + struct mtd_info *mtd = nand_to_mtd(this); if (!mtd) continue; @@ -325,13 +336,13 @@ static void __exit cs553x_cleanup(void) /* Release resources, unregister device */ nand_release(this); kfree(mtd->name); - cs553x_mtd[i] = NULL; + controllers[i] = NULL; /* unmap physical address */ iounmap(mmio_base); /* Free the MTD device structure */ - kfree(this); + kfree(controller); } } -- GitLab From ba03e4833946b91aa16950db37d759dd805a24ef Mon Sep 17 00:00:00 2001 From: Boris Brezillon Date: Fri, 1 May 2020 11:06:48 +0200 Subject: [PATCH 0373/1971] mtd: rawnand: cs553x: Stop using chip->legacy.IO_ADDR_{R, W} Now that we have our own controller struct we can keep the MMIO pointer in there and use instead of using the chip->legacy.IO_ADDR_{R,W} fields. Signed-off-by: Boris Brezillon Reviewed-by: Miquel Raynal Signed-off-by: Miquel Raynal Link: https://lore.kernel.org/linux-mtd/20200501090650.1138200-3-boris.brezillon@collabora.com --- drivers/mtd/nand/raw/cs553x_nand.c | 57 ++++++++++++++++++------------ 1 file changed, 34 insertions(+), 23 deletions(-) diff --git a/drivers/mtd/nand/raw/cs553x_nand.c b/drivers/mtd/nand/raw/cs553x_nand.c index 970de727679fa..84cf87f02bd30 100644 --- a/drivers/mtd/nand/raw/cs553x_nand.c +++ b/drivers/mtd/nand/raw/cs553x_nand.c @@ -92,51 +92,66 @@ struct cs553x_nand_controller { struct nand_controller base; struct nand_chip chip; + void __iomem *mmio; }; +static struct cs553x_nand_controller * +to_cs553x(struct nand_controller *controller) +{ + return container_of(controller, struct cs553x_nand_controller, base); +} + static void cs553x_read_buf(struct nand_chip *this, u_char *buf, int len) { + struct cs553x_nand_controller *cs553x = to_cs553x(this->controller); + while (unlikely(len > 0x800)) { - memcpy_fromio(buf, this->legacy.IO_ADDR_R, 0x800); + memcpy_fromio(buf, cs553x->mmio, 0x800); buf += 0x800; len -= 0x800; } - memcpy_fromio(buf, this->legacy.IO_ADDR_R, len); + memcpy_fromio(buf, cs553x->mmio, len); } static void cs553x_write_buf(struct nand_chip *this, const u_char *buf, int len) { + struct cs553x_nand_controller *cs553x = to_cs553x(this->controller); + while (unlikely(len > 0x800)) { - memcpy_toio(this->legacy.IO_ADDR_R, buf, 0x800); + memcpy_toio(cs553x->mmio, buf, 0x800); buf += 0x800; len -= 0x800; } - memcpy_toio(this->legacy.IO_ADDR_R, buf, len); + memcpy_toio(cs553x->mmio, buf, len); } static unsigned char cs553x_read_byte(struct nand_chip *this) { - return readb(this->legacy.IO_ADDR_R); + struct cs553x_nand_controller *cs553x = to_cs553x(this->controller); + + return readb(cs553x->mmio); } static void cs553x_write_byte(struct nand_chip *this, u_char byte) { + struct cs553x_nand_controller *cs553x = to_cs553x(this->controller); int i = 100000; - while (i && readb(this->legacy.IO_ADDR_R + MM_NAND_STS) & CS_NAND_CTLR_BUSY) { + while (i && readb(cs553x->mmio + MM_NAND_STS) & CS_NAND_CTLR_BUSY) { udelay(1); i--; } - writeb(byte, this->legacy.IO_ADDR_W + 0x801); + writeb(byte, cs553x->mmio + 0x801); } static void cs553x_hwcontrol(struct nand_chip *this, int cmd, unsigned int ctrl) { - void __iomem *mmio_base = this->legacy.IO_ADDR_R; + struct cs553x_nand_controller *cs553x = to_cs553x(this->controller); + if (ctrl & NAND_CTRL_CHANGE) { unsigned char ctl = (ctrl & ~NAND_CTRL_CHANGE ) ^ 0x01; - writeb(ctl, mmio_base + MM_NAND_CTL); + writeb(ctl, cs553x->mmio + MM_NAND_CTL); } if (cmd != NAND_CMD_NONE) cs553x_write_byte(this, cmd); @@ -144,26 +159,26 @@ static void cs553x_hwcontrol(struct nand_chip *this, int cmd, static int cs553x_device_ready(struct nand_chip *this) { - void __iomem *mmio_base = this->legacy.IO_ADDR_R; - unsigned char foo = readb(mmio_base + MM_NAND_STS); + struct cs553x_nand_controller *cs553x = to_cs553x(this->controller); + unsigned char foo = readb(cs553x->mmio + MM_NAND_STS); return (foo & CS_NAND_STS_FLASH_RDY) && !(foo & CS_NAND_CTLR_BUSY); } static void cs_enable_hwecc(struct nand_chip *this, int mode) { - void __iomem *mmio_base = this->legacy.IO_ADDR_R; + struct cs553x_nand_controller *cs553x = to_cs553x(this->controller); - writeb(0x07, mmio_base + MM_NAND_ECC_CTL); + writeb(0x07, cs553x->mmio + MM_NAND_ECC_CTL); } static int cs_calculate_ecc(struct nand_chip *this, const u_char *dat, u_char *ecc_code) { + struct cs553x_nand_controller *cs553x = to_cs553x(this->controller); uint32_t ecc; - void __iomem *mmio_base = this->legacy.IO_ADDR_R; - ecc = readl(mmio_base + MM_NAND_STS); + ecc = readl(cs553x->mmio + MM_NAND_STS); ecc_code[1] = ecc >> 8; ecc_code[0] = ecc >> 16; @@ -204,8 +219,8 @@ static int __init cs553x_init_one(int cs, int mmio, unsigned long adr) new_mtd->owner = THIS_MODULE; /* map physical address */ - this->legacy.IO_ADDR_R = this->legacy.IO_ADDR_W = ioremap(adr, 4096); - if (!this->legacy.IO_ADDR_R) { + controller->mmio = ioremap(adr, 4096); + if (!controller->mmio) { pr_warn("ioremap cs553x NAND @0x%08lx failed\n", adr); err = -EIO; goto out_mtd; @@ -247,7 +262,7 @@ static int __init cs553x_init_one(int cs, int mmio, unsigned long adr) out_free: kfree(new_mtd->name); out_ior: - iounmap(this->legacy.IO_ADDR_R); + iounmap(controller->mmio); out_mtd: kfree(controller); out: @@ -322,7 +337,6 @@ static void __exit cs553x_cleanup(void) int i; for (i = 0; i < NR_CS553X_CONTROLLERS; i++) { - void __iomem *mmio_base; struct cs553x_nand_controller *controller = controllers[i]; struct nand_chip *this = &controller->chip; struct mtd_info *mtd = nand_to_mtd(this); @@ -330,16 +344,13 @@ static void __exit cs553x_cleanup(void) if (!mtd) continue; - this = mtd_to_nand(mtd); - mmio_base = this->legacy.IO_ADDR_R; - /* Release resources, unregister device */ nand_release(this); kfree(mtd->name); controllers[i] = NULL; /* unmap physical address */ - iounmap(mmio_base); + iounmap(controller->mmio); /* Free the MTD device structure */ kfree(controller); -- GitLab From b4ed6328b93d23afc10c3ba222a74e9c9672c7ce Mon Sep 17 00:00:00 2001 From: Boris Brezillon Date: Fri, 1 May 2020 11:06:49 +0200 Subject: [PATCH 0374/1971] mtd: rawnand: cs553x: Implement exec_op() So we can later get rid of the legacy hooks. Signed-off-by: Boris Brezillon Reviewed-by: Miquel Raynal Signed-off-by: Miquel Raynal Link: https://lore.kernel.org/linux-mtd/20200501090650.1138200-4-boris.brezillon@collabora.com --- drivers/mtd/nand/raw/cs553x_nand.c | 126 ++++++++++++++++++++++++++++- 1 file changed, 125 insertions(+), 1 deletion(-) diff --git a/drivers/mtd/nand/raw/cs553x_nand.c b/drivers/mtd/nand/raw/cs553x_nand.c index 84cf87f02bd30..3596edc6b7770 100644 --- a/drivers/mtd/nand/raw/cs553x_nand.c +++ b/drivers/mtd/nand/raw/cs553x_nand.c @@ -21,9 +21,9 @@ #include #include #include +#include #include -#include #define NR_CS553X_CONTROLLERS 4 @@ -165,6 +165,125 @@ static int cs553x_device_ready(struct nand_chip *this) return (foo & CS_NAND_STS_FLASH_RDY) && !(foo & CS_NAND_CTLR_BUSY); } +static int cs553x_write_ctrl_byte(struct cs553x_nand_controller *cs553x, + u32 ctl, u8 data) +{ + u8 status; + int ret; + + writeb(ctl, cs553x->mmio + MM_NAND_CTL); + writeb(data, cs553x->mmio + MM_NAND_IO); + ret = readb_poll_timeout_atomic(cs553x->mmio + MM_NAND_STS, status, + !(status & CS_NAND_CTLR_BUSY), 1, + 100000); + if (ret) + return ret; + + return 0; +} + +static void cs553x_data_in(struct cs553x_nand_controller *cs553x, void *buf, + unsigned int len) +{ + writeb(0, cs553x->mmio + MM_NAND_CTL); + while (unlikely(len > 0x800)) { + memcpy_fromio(buf, cs553x->mmio, 0x800); + buf += 0x800; + len -= 0x800; + } + memcpy_fromio(buf, cs553x->mmio, len); +} + +static void cs553x_data_out(struct cs553x_nand_controller *cs553x, + const void *buf, unsigned int len) +{ + writeb(0, cs553x->mmio + MM_NAND_CTL); + while (unlikely(len > 0x800)) { + memcpy_toio(cs553x->mmio, buf, 0x800); + buf += 0x800; + len -= 0x800; + } + memcpy_toio(cs553x->mmio, buf, len); +} + +static int cs553x_wait_ready(struct cs553x_nand_controller *cs553x, + unsigned int timeout_ms) +{ + u8 mask = CS_NAND_CTLR_BUSY | CS_NAND_STS_FLASH_RDY; + u8 status; + + return readb_poll_timeout(cs553x->mmio + MM_NAND_STS, status, + (status & mask) == CS_NAND_STS_FLASH_RDY, 100, + timeout_ms * 1000); +} + +static int cs553x_exec_instr(struct cs553x_nand_controller *cs553x, + const struct nand_op_instr *instr) +{ + unsigned int i; + int ret = 0; + + switch (instr->type) { + case NAND_OP_CMD_INSTR: + ret = cs553x_write_ctrl_byte(cs553x, CS_NAND_CTL_CLE, + instr->ctx.cmd.opcode); + break; + + case NAND_OP_ADDR_INSTR: + for (i = 0; i < instr->ctx.addr.naddrs; i++) { + ret = cs553x_write_ctrl_byte(cs553x, CS_NAND_CTL_ALE, + instr->ctx.addr.addrs[i]); + if (ret) + break; + } + break; + + case NAND_OP_DATA_IN_INSTR: + cs553x_data_in(cs553x, instr->ctx.data.buf.in, + instr->ctx.data.len); + break; + + case NAND_OP_DATA_OUT_INSTR: + cs553x_data_out(cs553x, instr->ctx.data.buf.out, + instr->ctx.data.len); + break; + + case NAND_OP_WAITRDY_INSTR: + ret = cs553x_wait_ready(cs553x, instr->ctx.waitrdy.timeout_ms); + break; + } + + if (instr->delay_ns) + ndelay(instr->delay_ns); + + return ret; +} + +static int cs553x_exec_op(struct nand_chip *this, + const struct nand_operation *op, + bool check_only) +{ + struct cs553x_nand_controller *cs553x = to_cs553x(this->controller); + unsigned int i; + int ret; + + if (check_only) + return true; + + /* De-assert the CE pin */ + writeb(0, cs553x->mmio + MM_NAND_CTL); + for (i = 0; i < op->ninstrs; i++) { + ret = cs553x_exec_instr(cs553x, &op->instrs[i]); + if (ret) + break; + } + + /* Re-assert the CE pin. */ + writeb(CS_NAND_CTL_CE, cs553x->mmio + MM_NAND_CTL); + + return ret; +} + static void cs_enable_hwecc(struct nand_chip *this, int mode) { struct cs553x_nand_controller *cs553x = to_cs553x(this->controller); @@ -188,6 +307,10 @@ static int cs_calculate_ecc(struct nand_chip *this, const u_char *dat, static struct cs553x_nand_controller *controllers[4]; +static const struct nand_controller_ops cs553x_nand_controller_ops = { + .exec_op = cs553x_exec_op, +}; + static int __init cs553x_init_one(int cs, int mmio, unsigned long adr) { struct cs553x_nand_controller *controller; @@ -212,6 +335,7 @@ static int __init cs553x_init_one(int cs, int mmio, unsigned long adr) this = &controller->chip; nand_controller_init(&controller->base); + controller->base.ops = &cs553x_nand_controller_ops; this->controller = &controller->base; new_mtd = nand_to_mtd(this); -- GitLab From 51b71ac092eaf3597210b64255040071db1c10e2 Mon Sep 17 00:00:00 2001 From: Boris Brezillon Date: Fri, 1 May 2020 11:06:50 +0200 Subject: [PATCH 0375/1971] mtd: rawnand: cs553x: Get rid of the legacy interface implementation Now that exec_op() is implemented we no longer need to implement the legacy hooks. Signed-off-by: Boris Brezillon Reviewed-by: Miquel Raynal Signed-off-by: Miquel Raynal Link: https://lore.kernel.org/linux-mtd/20200501090650.1138200-5-boris.brezillon@collabora.com --- drivers/mtd/nand/raw/cs553x_nand.c | 72 ------------------------------ 1 file changed, 72 deletions(-) diff --git a/drivers/mtd/nand/raw/cs553x_nand.c b/drivers/mtd/nand/raw/cs553x_nand.c index 3596edc6b7770..df5e24a2bbd7b 100644 --- a/drivers/mtd/nand/raw/cs553x_nand.c +++ b/drivers/mtd/nand/raw/cs553x_nand.c @@ -101,70 +101,6 @@ to_cs553x(struct nand_controller *controller) return container_of(controller, struct cs553x_nand_controller, base); } -static void cs553x_read_buf(struct nand_chip *this, u_char *buf, int len) -{ - struct cs553x_nand_controller *cs553x = to_cs553x(this->controller); - - while (unlikely(len > 0x800)) { - memcpy_fromio(buf, cs553x->mmio, 0x800); - buf += 0x800; - len -= 0x800; - } - memcpy_fromio(buf, cs553x->mmio, len); -} - -static void cs553x_write_buf(struct nand_chip *this, const u_char *buf, int len) -{ - struct cs553x_nand_controller *cs553x = to_cs553x(this->controller); - - while (unlikely(len > 0x800)) { - memcpy_toio(cs553x->mmio, buf, 0x800); - buf += 0x800; - len -= 0x800; - } - memcpy_toio(cs553x->mmio, buf, len); -} - -static unsigned char cs553x_read_byte(struct nand_chip *this) -{ - struct cs553x_nand_controller *cs553x = to_cs553x(this->controller); - - return readb(cs553x->mmio); -} - -static void cs553x_write_byte(struct nand_chip *this, u_char byte) -{ - struct cs553x_nand_controller *cs553x = to_cs553x(this->controller); - int i = 100000; - - while (i && readb(cs553x->mmio + MM_NAND_STS) & CS_NAND_CTLR_BUSY) { - udelay(1); - i--; - } - writeb(byte, cs553x->mmio + 0x801); -} - -static void cs553x_hwcontrol(struct nand_chip *this, int cmd, - unsigned int ctrl) -{ - struct cs553x_nand_controller *cs553x = to_cs553x(this->controller); - - if (ctrl & NAND_CTRL_CHANGE) { - unsigned char ctl = (ctrl & ~NAND_CTRL_CHANGE ) ^ 0x01; - writeb(ctl, cs553x->mmio + MM_NAND_CTL); - } - if (cmd != NAND_CMD_NONE) - cs553x_write_byte(this, cmd); -} - -static int cs553x_device_ready(struct nand_chip *this) -{ - struct cs553x_nand_controller *cs553x = to_cs553x(this->controller); - unsigned char foo = readb(cs553x->mmio + MM_NAND_STS); - - return (foo & CS_NAND_STS_FLASH_RDY) && !(foo & CS_NAND_CTLR_BUSY); -} - static int cs553x_write_ctrl_byte(struct cs553x_nand_controller *cs553x, u32 ctl, u8 data) { @@ -350,14 +286,6 @@ static int __init cs553x_init_one(int cs, int mmio, unsigned long adr) goto out_mtd; } - this->legacy.cmd_ctrl = cs553x_hwcontrol; - this->legacy.dev_ready = cs553x_device_ready; - this->legacy.read_byte = cs553x_read_byte; - this->legacy.read_buf = cs553x_read_buf; - this->legacy.write_buf = cs553x_write_buf; - - this->legacy.chip_delay = 0; - this->ecc.mode = NAND_ECC_HW; this->ecc.size = 256; this->ecc.bytes = 3; -- GitLab From 5338ef99c951483fb38dc537e3f7d74aebdab087 Mon Sep 17 00:00:00 2001 From: Boris Brezillon Date: Sun, 3 May 2020 17:53:34 +0200 Subject: [PATCH 0376/1971] mtd: rawnand: toshiba: Add a specific init for TC58TEG5DCLTA00 TC58TEG5DCLTA00 is an MLC NAND which requires scrambling and supports SDR timings mode 5. Signed-off-by: Boris Brezillon Signed-off-by: Miquel Raynal Link: https://lore.kernel.org/linux-mtd/20200503155341.16712-2-miquel.raynal@bootlin.com --- drivers/mtd/nand/raw/nand_toshiba.c | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/drivers/mtd/nand/raw/nand_toshiba.c b/drivers/mtd/nand/raw/nand_toshiba.c index f3dcd695b5dbf..6b887ce20f308 100644 --- a/drivers/mtd/nand/raw/nand_toshiba.c +++ b/drivers/mtd/nand/raw/nand_toshiba.c @@ -194,6 +194,14 @@ static void toshiba_nand_decode_id(struct nand_chip *chip) } } +static int tc58teg5dclta00_init(struct nand_chip *chip) +{ + chip->onfi_timing_mode_default = 5; + chip->options |= NAND_NEED_SCRAMBLING; + + return 0; +} + static int toshiba_nand_init(struct nand_chip *chip) { if (nand_is_slc(chip)) @@ -204,6 +212,9 @@ static int toshiba_nand_init(struct nand_chip *chip) chip->id.data[4] & TOSHIBA_NAND_ID4_IS_BENAND) toshiba_nand_benand_init(chip); + if (!strcmp("TC58TEG5DCLTA00", chip->parameters.model)) + tc58teg5dclta00_init(chip); + return 0; } -- GitLab From 18729b17769c73fdf854dad99cd22dd0290e7f00 Mon Sep 17 00:00:00 2001 From: Boris Brezillon Date: Sun, 3 May 2020 17:53:35 +0200 Subject: [PATCH 0377/1971] mtd: rawnand: Define the "distance 3" MLC pairing scheme Define a new page pairing scheme for MLC NANDs with a distance of 3 pages between the lower and upper page. Signed-off-by: Boris Brezillon Signed-off-by: Miquel Raynal Link: https://lore.kernel.org/linux-mtd/20200503155341.16712-3-miquel.raynal@bootlin.com --- drivers/mtd/nand/raw/internals.h | 3 ++ drivers/mtd/nand/raw/nand_base.c | 50 ++++++++++++++++++++++++++++++++ 2 files changed, 53 insertions(+) diff --git a/drivers/mtd/nand/raw/internals.h b/drivers/mtd/nand/raw/internals.h index 9d0caadf940e7..bca9b3424646a 100644 --- a/drivers/mtd/nand/raw/internals.h +++ b/drivers/mtd/nand/raw/internals.h @@ -75,6 +75,9 @@ extern const struct nand_manufacturer_ops micron_nand_manuf_ops; extern const struct nand_manufacturer_ops samsung_nand_manuf_ops; extern const struct nand_manufacturer_ops toshiba_nand_manuf_ops; +/* MLC pairing schemes */ +extern const struct mtd_pairing_scheme dist3_pairing_scheme; + /* Core functions */ const struct nand_manufacturer *nand_get_manufacturer(u8 id); int nand_bbm_get_next_page(struct nand_chip *chip, int page); diff --git a/drivers/mtd/nand/raw/nand_base.c b/drivers/mtd/nand/raw/nand_base.c index 52cf73c18f55a..106edd514c005 100644 --- a/drivers/mtd/nand/raw/nand_base.c +++ b/drivers/mtd/nand/raw/nand_base.c @@ -205,6 +205,56 @@ static const struct mtd_ooblayout_ops nand_ooblayout_lp_hamming_ops = { .free = nand_ooblayout_free_lp_hamming, }; +static int nand_pairing_dist3_get_info(struct mtd_info *mtd, int page, + struct mtd_pairing_info *info) +{ + int lastpage = (mtd->erasesize / mtd->writesize) - 1; + int dist = 3; + + if (page == lastpage) + dist = 2; + + if (!page || (page & 1)) { + info->group = 0; + info->pair = (page + 1) / 2; + } else { + info->group = 1; + info->pair = (page + 1 - dist) / 2; + } + + return 0; +} + +static int nand_pairing_dist3_get_wunit(struct mtd_info *mtd, + const struct mtd_pairing_info *info) +{ + int lastpair = ((mtd->erasesize / mtd->writesize) - 1) / 2; + int page = info->pair * 2; + int dist = 3; + + if (!info->group && !info->pair) + return 0; + + if (info->pair == lastpair && info->group) + dist = 2; + + if (!info->group) + page--; + else if (info->pair) + page += dist - 1; + + if (page >= mtd->erasesize / mtd->writesize) + return -EINVAL; + + return page; +} + +const struct mtd_pairing_scheme dist3_pairing_scheme = { + .ngroups = 2, + .get_info = nand_pairing_dist3_get_info, + .get_wunit = nand_pairing_dist3_get_wunit, +}; + static int check_offs_len(struct nand_chip *chip, loff_t ofs, uint64_t len) { int ret = 0; -- GitLab From d652f3a5bdaf51010191b69744d6719d1e977d17 Mon Sep 17 00:00:00 2001 From: Boris Brezillon Date: Sun, 3 May 2020 17:53:36 +0200 Subject: [PATCH 0378/1971] mtd: rawnand: toshiba: Set the pairing scheme for TC58TEG5DCLTA00 TC58TEG5DCLTA00 uses a stride of 3 between its lower and upper page. Set the appropriate pairing scheme at init time. Signed-off-by: Boris Brezillon Signed-off-by: Miquel Raynal Link: https://lore.kernel.org/linux-mtd/20200503155341.16712-4-miquel.raynal@bootlin.com --- drivers/mtd/nand/raw/nand_toshiba.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/mtd/nand/raw/nand_toshiba.c b/drivers/mtd/nand/raw/nand_toshiba.c index 6b887ce20f308..ae069905d7e4a 100644 --- a/drivers/mtd/nand/raw/nand_toshiba.c +++ b/drivers/mtd/nand/raw/nand_toshiba.c @@ -196,8 +196,11 @@ static void toshiba_nand_decode_id(struct nand_chip *chip) static int tc58teg5dclta00_init(struct nand_chip *chip) { + struct mtd_info *mtd = nand_to_mtd(chip); + chip->onfi_timing_mode_default = 5; chip->options |= NAND_NEED_SCRAMBLING; + mtd_set_pairing_scheme(mtd, &dist3_pairing_scheme); return 0; } -- GitLab From 9e3307a169537a6adc30b13bf9063e94990a5493 Mon Sep 17 00:00:00 2001 From: Boris Brezillon Date: Sun, 3 May 2020 17:53:37 +0200 Subject: [PATCH 0379/1971] mtd: Add support for emulated SLC mode on MLC NANDs MLC NANDs can be made a bit more reliable if we only program the lower page of each pair. At least, this solves the paired-pages corruption issue. Signed-off-by: Boris Brezillon Signed-off-by: Miquel Raynal Link: https://lore.kernel.org/linux-mtd/20200503155341.16712-5-miquel.raynal@bootlin.com --- drivers/mtd/mtdcore.c | 189 ++++++++++++++++++++++++++++++--- drivers/mtd/mtdpart.c | 54 ++++++---- include/linux/mtd/mtd.h | 7 +- include/linux/mtd/partitions.h | 2 + include/uapi/mtd/mtd-abi.h | 1 + 5 files changed, 213 insertions(+), 40 deletions(-) diff --git a/drivers/mtd/mtdcore.c b/drivers/mtd/mtdcore.c index 2916674208b32..50437b4ffe767 100644 --- a/drivers/mtd/mtdcore.c +++ b/drivers/mtd/mtdcore.c @@ -617,6 +617,19 @@ int add_mtd_device(struct mtd_info *mtd) !(mtd->flags & MTD_NO_ERASE))) return -EINVAL; + /* + * MTD_SLC_ON_MLC_EMULATION can only be set on partitions, when the + * master is an MLC NAND and has a proper pairing scheme defined. + * We also reject masters that implement ->_writev() for now, because + * NAND controller drivers don't implement this hook, and adding the + * SLC -> MLC address/length conversion to this path is useless if we + * don't have a user. + */ + if (mtd->flags & MTD_SLC_ON_MLC_EMULATION && + (!mtd_is_partition(mtd) || master->type != MTD_MLCNANDFLASH || + !master->pairing || master->_writev)) + return -EINVAL; + mutex_lock(&mtd_table_mutex); i = idr_alloc(&mtd_idr, mtd, 0, 0, GFP_KERNEL); @@ -632,6 +645,14 @@ int add_mtd_device(struct mtd_info *mtd) if (mtd->bitflip_threshold == 0) mtd->bitflip_threshold = mtd->ecc_strength; + if (mtd->flags & MTD_SLC_ON_MLC_EMULATION) { + int ngroups = mtd_pairing_groups(master); + + mtd->erasesize /= ngroups; + mtd->size = (u64)mtd_div_by_eb(mtd->size, master) * + mtd->erasesize; + } + if (is_power_of_2(mtd->erasesize)) mtd->erasesize_shift = ffs(mtd->erasesize) - 1; else @@ -1074,9 +1095,11 @@ int mtd_erase(struct mtd_info *mtd, struct erase_info *instr) { struct mtd_info *master = mtd_get_master(mtd); u64 mst_ofs = mtd_get_master_ofs(mtd, 0); + struct erase_info adjinstr; int ret; instr->fail_addr = MTD_FAIL_ADDR_UNKNOWN; + adjinstr = *instr; if (!mtd->erasesize || !master->_erase) return -ENOTSUPP; @@ -1091,12 +1114,27 @@ int mtd_erase(struct mtd_info *mtd, struct erase_info *instr) ledtrig_mtd_activity(); - instr->addr += mst_ofs; - ret = master->_erase(master, instr); - if (instr->fail_addr != MTD_FAIL_ADDR_UNKNOWN) - instr->fail_addr -= mst_ofs; + if (mtd->flags & MTD_SLC_ON_MLC_EMULATION) { + adjinstr.addr = (loff_t)mtd_div_by_eb(instr->addr, mtd) * + master->erasesize; + adjinstr.len = ((u64)mtd_div_by_eb(instr->addr + instr->len, mtd) * + master->erasesize) - + adjinstr.addr; + } + + adjinstr.addr += mst_ofs; + + ret = master->_erase(master, &adjinstr); + + if (adjinstr.fail_addr != MTD_FAIL_ADDR_UNKNOWN) { + instr->fail_addr = adjinstr.fail_addr - mst_ofs; + if (mtd->flags & MTD_SLC_ON_MLC_EMULATION) { + instr->fail_addr = mtd_div_by_eb(instr->fail_addr, + master); + instr->fail_addr *= mtd->erasesize; + } + } - instr->addr -= mst_ofs; return ret; } EXPORT_SYMBOL_GPL(mtd_erase); @@ -1276,6 +1314,101 @@ static int mtd_check_oob_ops(struct mtd_info *mtd, loff_t offs, return 0; } +static int mtd_read_oob_std(struct mtd_info *mtd, loff_t from, + struct mtd_oob_ops *ops) +{ + struct mtd_info *master = mtd_get_master(mtd); + int ret; + + from = mtd_get_master_ofs(mtd, from); + if (master->_read_oob) + ret = master->_read_oob(master, from, ops); + else + ret = master->_read(master, from, ops->len, &ops->retlen, + ops->datbuf); + + return ret; +} + +static int mtd_write_oob_std(struct mtd_info *mtd, loff_t to, + struct mtd_oob_ops *ops) +{ + struct mtd_info *master = mtd_get_master(mtd); + int ret; + + to = mtd_get_master_ofs(mtd, to); + if (master->_write_oob) + ret = master->_write_oob(master, to, ops); + else + ret = master->_write(master, to, ops->len, &ops->retlen, + ops->datbuf); + + return ret; +} + +static int mtd_io_emulated_slc(struct mtd_info *mtd, loff_t start, bool read, + struct mtd_oob_ops *ops) +{ + struct mtd_info *master = mtd_get_master(mtd); + int ngroups = mtd_pairing_groups(master); + int npairs = mtd_wunit_per_eb(master) / ngroups; + struct mtd_oob_ops adjops = *ops; + unsigned int wunit, oobavail; + struct mtd_pairing_info info; + int max_bitflips = 0; + u32 ebofs, pageofs; + loff_t base, pos; + + ebofs = mtd_mod_by_eb(start, mtd); + base = (loff_t)mtd_div_by_eb(start, mtd) * master->erasesize; + info.group = 0; + info.pair = mtd_div_by_ws(ebofs, mtd); + pageofs = mtd_mod_by_ws(ebofs, mtd); + oobavail = mtd_oobavail(mtd, ops); + + while (ops->retlen < ops->len || ops->oobretlen < ops->ooblen) { + int ret; + + if (info.pair >= npairs) { + info.pair = 0; + base += master->erasesize; + } + + wunit = mtd_pairing_info_to_wunit(master, &info); + pos = mtd_wunit_to_offset(mtd, base, wunit); + + adjops.len = ops->len - ops->retlen; + if (adjops.len > mtd->writesize - pageofs) + adjops.len = mtd->writesize - pageofs; + + adjops.ooblen = ops->ooblen - ops->oobretlen; + if (adjops.ooblen > oobavail - adjops.ooboffs) + adjops.ooblen = oobavail - adjops.ooboffs; + + if (read) { + ret = mtd_read_oob_std(mtd, pos + pageofs, &adjops); + if (ret > 0) + max_bitflips = max(max_bitflips, ret); + } else { + ret = mtd_write_oob_std(mtd, pos + pageofs, &adjops); + } + + if (ret < 0) + return ret; + + max_bitflips = max(max_bitflips, ret); + ops->retlen += adjops.retlen; + ops->oobretlen += adjops.oobretlen; + adjops.datbuf += adjops.retlen; + adjops.oobbuf += adjops.oobretlen; + adjops.ooboffs = 0; + pageofs = 0; + info.pair++; + } + + return max_bitflips; +} + int mtd_read_oob(struct mtd_info *mtd, loff_t from, struct mtd_oob_ops *ops) { struct mtd_info *master = mtd_get_master(mtd); @@ -1294,12 +1427,10 @@ int mtd_read_oob(struct mtd_info *mtd, loff_t from, struct mtd_oob_ops *ops) if (!master->_read_oob && (!master->_read || ops->oobbuf)) return -EOPNOTSUPP; - from = mtd_get_master_ofs(mtd, from); - if (master->_read_oob) - ret_code = master->_read_oob(master, from, ops); + if (mtd->flags & MTD_SLC_ON_MLC_EMULATION) + ret_code = mtd_io_emulated_slc(mtd, from, true, ops); else - ret_code = master->_read(master, from, ops->len, &ops->retlen, - ops->datbuf); + ret_code = mtd_read_oob_std(mtd, from, ops); mtd_update_ecc_stats(mtd, master, &old_stats); @@ -1338,13 +1469,10 @@ int mtd_write_oob(struct mtd_info *mtd, loff_t to, if (!master->_write_oob && (!master->_write || ops->oobbuf)) return -EOPNOTSUPP; - to = mtd_get_master_ofs(mtd, to); + if (mtd->flags & MTD_SLC_ON_MLC_EMULATION) + return mtd_io_emulated_slc(mtd, to, false, ops); - if (master->_write_oob) - return master->_write_oob(master, to, ops); - else - return master->_write(master, to, ops->len, &ops->retlen, - ops->datbuf); + return mtd_write_oob_std(mtd, to, ops); } EXPORT_SYMBOL_GPL(mtd_write_oob); @@ -1817,6 +1945,12 @@ int mtd_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len) return -EINVAL; if (!len) return 0; + + if (mtd->flags & MTD_SLC_ON_MLC_EMULATION) { + ofs = (loff_t)mtd_div_by_eb(ofs, mtd) * master->erasesize; + len = (u64)mtd_div_by_eb(len, mtd) * master->erasesize; + } + return master->_lock(master, mtd_get_master_ofs(mtd, ofs), len); } EXPORT_SYMBOL_GPL(mtd_lock); @@ -1831,6 +1965,12 @@ int mtd_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len) return -EINVAL; if (!len) return 0; + + if (mtd->flags & MTD_SLC_ON_MLC_EMULATION) { + ofs = (loff_t)mtd_div_by_eb(ofs, mtd) * master->erasesize; + len = (u64)mtd_div_by_eb(len, mtd) * master->erasesize; + } + return master->_unlock(master, mtd_get_master_ofs(mtd, ofs), len); } EXPORT_SYMBOL_GPL(mtd_unlock); @@ -1845,6 +1985,12 @@ int mtd_is_locked(struct mtd_info *mtd, loff_t ofs, uint64_t len) return -EINVAL; if (!len) return 0; + + if (mtd->flags & MTD_SLC_ON_MLC_EMULATION) { + ofs = (loff_t)mtd_div_by_eb(ofs, mtd) * master->erasesize; + len = (u64)mtd_div_by_eb(len, mtd) * master->erasesize; + } + return master->_is_locked(master, mtd_get_master_ofs(mtd, ofs), len); } EXPORT_SYMBOL_GPL(mtd_is_locked); @@ -1857,6 +2003,10 @@ int mtd_block_isreserved(struct mtd_info *mtd, loff_t ofs) return -EINVAL; if (!master->_block_isreserved) return 0; + + if (mtd->flags & MTD_SLC_ON_MLC_EMULATION) + ofs = (loff_t)mtd_div_by_eb(ofs, mtd) * master->erasesize; + return master->_block_isreserved(master, mtd_get_master_ofs(mtd, ofs)); } EXPORT_SYMBOL_GPL(mtd_block_isreserved); @@ -1869,6 +2019,10 @@ int mtd_block_isbad(struct mtd_info *mtd, loff_t ofs) return -EINVAL; if (!master->_block_isbad) return 0; + + if (mtd->flags & MTD_SLC_ON_MLC_EMULATION) + ofs = (loff_t)mtd_div_by_eb(ofs, mtd) * master->erasesize; + return master->_block_isbad(master, mtd_get_master_ofs(mtd, ofs)); } EXPORT_SYMBOL_GPL(mtd_block_isbad); @@ -1885,6 +2039,9 @@ int mtd_block_markbad(struct mtd_info *mtd, loff_t ofs) if (!(mtd->flags & MTD_WRITEABLE)) return -EROFS; + if (mtd->flags & MTD_SLC_ON_MLC_EMULATION) + ofs = (loff_t)mtd_div_by_eb(ofs, mtd) * master->erasesize; + ret = master->_block_markbad(master, mtd_get_master_ofs(mtd, ofs)); if (ret) return ret; diff --git a/drivers/mtd/mtdpart.c b/drivers/mtd/mtdpart.c index 3f6025684f589..c3575b686f796 100644 --- a/drivers/mtd/mtdpart.c +++ b/drivers/mtd/mtdpart.c @@ -35,9 +35,12 @@ static struct mtd_info *allocate_partition(struct mtd_info *parent, const struct mtd_partition *part, int partno, uint64_t cur_offset) { - int wr_alignment = (parent->flags & MTD_NO_ERASE) ? parent->writesize : - parent->erasesize; - struct mtd_info *child, *master = mtd_get_master(parent); + struct mtd_info *master = mtd_get_master(parent); + int wr_alignment = (parent->flags & MTD_NO_ERASE) ? + master->writesize : master->erasesize; + u64 parent_size = mtd_is_partition(parent) ? + parent->part.size : parent->size; + struct mtd_info *child; u32 remainder; char *name; u64 tmp; @@ -56,8 +59,9 @@ static struct mtd_info *allocate_partition(struct mtd_info *parent, /* set up the MTD object for this partition */ child->type = parent->type; child->part.flags = parent->flags & ~part->mask_flags; + child->part.flags |= part->add_flags; child->flags = child->part.flags; - child->size = part->size; + child->part.size = part->size; child->writesize = parent->writesize; child->writebufsize = parent->writebufsize; child->oobsize = parent->oobsize; @@ -98,29 +102,29 @@ static struct mtd_info *allocate_partition(struct mtd_info *parent, } if (child->part.offset == MTDPART_OFS_RETAIN) { child->part.offset = cur_offset; - if (parent->size - child->part.offset >= child->size) { - child->size = parent->size - child->part.offset - - child->size; + if (parent_size - child->part.offset >= child->part.size) { + child->part.size = parent_size - child->part.offset - + child->part.size; } else { printk(KERN_ERR "mtd partition \"%s\" doesn't have enough space: %#llx < %#llx, disabled\n", - part->name, parent->size - child->part.offset, - child->size); + part->name, parent_size - child->part.offset, + child->part.size); /* register to preserve ordering */ goto out_register; } } - if (child->size == MTDPART_SIZ_FULL) - child->size = parent->size - child->part.offset; + if (child->part.size == MTDPART_SIZ_FULL) + child->part.size = parent_size - child->part.offset; printk(KERN_NOTICE "0x%012llx-0x%012llx : \"%s\"\n", - child->part.offset, child->part.offset + child->size, + child->part.offset, child->part.offset + child->part.size, child->name); /* let's do some sanity checks */ - if (child->part.offset >= parent->size) { + if (child->part.offset >= parent_size) { /* let's register it anyway to preserve ordering */ child->part.offset = 0; - child->size = 0; + child->part.size = 0; /* Initialize ->erasesize to make add_mtd_device() happy. */ child->erasesize = parent->erasesize; @@ -128,15 +132,16 @@ static struct mtd_info *allocate_partition(struct mtd_info *parent, part->name); goto out_register; } - if (child->part.offset + child->size > parent->size) { - child->size = parent->size - child->part.offset; + if (child->part.offset + child->part.size > parent->size) { + child->part.size = parent_size - child->part.offset; printk(KERN_WARNING"mtd: partition \"%s\" extends beyond the end of device \"%s\" -- size truncated to %#llx\n", - part->name, parent->name, child->size); + part->name, parent->name, child->part.size); } + if (parent->numeraseregions > 1) { /* Deal with variable erase size stuff */ int i, max = parent->numeraseregions; - u64 end = child->part.offset + child->size; + u64 end = child->part.offset + child->part.size; struct mtd_erase_region_info *regions = parent->eraseregions; /* Find the first erase regions which is part of this @@ -156,7 +161,7 @@ static struct mtd_info *allocate_partition(struct mtd_info *parent, BUG_ON(child->erasesize == 0); } else { /* Single erase size */ - child->erasesize = parent->erasesize; + child->erasesize = master->erasesize; } /* @@ -178,7 +183,7 @@ static struct mtd_info *allocate_partition(struct mtd_info *parent, part->name); } - tmp = mtd_get_master_ofs(child, 0) + child->size; + tmp = mtd_get_master_ofs(child, 0) + child->part.size; remainder = do_div(tmp, wr_alignment); if ((child->flags & MTD_WRITEABLE) && remainder) { child->flags &= ~MTD_WRITEABLE; @@ -186,6 +191,7 @@ static struct mtd_info *allocate_partition(struct mtd_info *parent, part->name); } + child->size = child->part.size; child->ecc_step_size = parent->ecc_step_size; child->ecc_strength = parent->ecc_strength; child->bitflip_threshold = parent->bitflip_threshold; @@ -193,7 +199,7 @@ static struct mtd_info *allocate_partition(struct mtd_info *parent, if (master->_block_isbad) { uint64_t offs = 0; - while (offs < child->size) { + while (offs < child->part.size) { if (mtd_block_isreserved(child, offs)) child->ecc_stats.bbtblocks++; else if (mtd_block_isbad(child, offs)) @@ -234,6 +240,8 @@ int mtd_add_partition(struct mtd_info *parent, const char *name, long long offset, long long length) { struct mtd_info *master = mtd_get_master(parent); + u64 parent_size = mtd_is_partition(parent) ? + parent->part.size : parent->size; struct mtd_partition part; struct mtd_info *child; int ret = 0; @@ -244,7 +252,7 @@ int mtd_add_partition(struct mtd_info *parent, const char *name, return -EINVAL; if (length == MTDPART_SIZ_FULL) - length = parent->size - offset; + length = parent_size - offset; if (length <= 0) return -EINVAL; @@ -419,7 +427,7 @@ int add_mtd_partitions(struct mtd_info *parent, /* Look for subpartitions */ parse_mtd_partitions(child, parts[i].types, NULL); - cur_offset = child->part.offset + child->size; + cur_offset = child->part.offset + child->part.size; } return 0; diff --git a/include/linux/mtd/mtd.h b/include/linux/mtd/mtd.h index 2d1f4a61f4ac5..157357ec14419 100644 --- a/include/linux/mtd/mtd.h +++ b/include/linux/mtd/mtd.h @@ -200,6 +200,8 @@ struct mtd_debug_info { * * @node: list node used to add an MTD partition to the parent partition list * @offset: offset of the partition relatively to the parent offset + * @size: partition size. Should be equal to mtd->size unless + * MTD_SLC_ON_MLC_EMULATION is set * @flags: original flags (before the mtdpart logic decided to tweak them based * on flash constraints, like eraseblock/pagesize alignment) * @@ -209,6 +211,7 @@ struct mtd_debug_info { struct mtd_part { struct list_head node; u64 offset; + u64 size; u32 flags; }; @@ -622,7 +625,9 @@ static inline uint32_t mtd_mod_by_ws(uint64_t sz, struct mtd_info *mtd) static inline int mtd_wunit_per_eb(struct mtd_info *mtd) { - return mtd->erasesize / mtd->writesize; + struct mtd_info *master = mtd_get_master(mtd); + + return master->erasesize / mtd->writesize; } static inline int mtd_offset_to_wunit(struct mtd_info *mtd, loff_t offs) diff --git a/include/linux/mtd/partitions.h b/include/linux/mtd/partitions.h index e545c050d3e8c..b74a539ec5819 100644 --- a/include/linux/mtd/partitions.h +++ b/include/linux/mtd/partitions.h @@ -37,6 +37,7 @@ * master MTD flag set for the corresponding MTD partition. * For example, to force a read-only partition, simply adding * MTD_WRITEABLE to the mask_flags will do the trick. + * add_flags: contains flags to add to the parent flags * * Note: writeable partitions require their size and offset be * erasesize aligned (e.g. use MTDPART_OFS_NEXTBLK). @@ -48,6 +49,7 @@ struct mtd_partition { uint64_t size; /* partition size */ uint64_t offset; /* offset within the master MTD space */ uint32_t mask_flags; /* master MTD flags to mask out for this partition */ + uint32_t add_flags; /* flags to add to the partition */ struct device_node *of_node; }; diff --git a/include/uapi/mtd/mtd-abi.h b/include/uapi/mtd/mtd-abi.h index 47ffe3208c270..4b48fbf7d343c 100644 --- a/include/uapi/mtd/mtd-abi.h +++ b/include/uapi/mtd/mtd-abi.h @@ -104,6 +104,7 @@ struct mtd_write_req { #define MTD_BIT_WRITEABLE 0x800 /* Single bits can be flipped */ #define MTD_NO_ERASE 0x1000 /* No erase necessary */ #define MTD_POWERUP_LOCK 0x2000 /* Always locked after reset */ +#define MTD_SLC_ON_MLC_EMULATION 0x4000 /* Emulate SLC behavior on MLC NANDs */ /* Some common devices / combinations of capabilities */ #define MTD_CAP_ROM 0 -- GitLab From 422928a040fe17d17ded69c57903c7908423c7ef Mon Sep 17 00:00:00 2001 From: Boris Brezillon Date: Sun, 3 May 2020 17:53:38 +0200 Subject: [PATCH 0380/1971] dt-bindings: mtd: partition: Document the slc-mode property Add a boolean property to force a specific partition attached to an MLC NAND to be accessed in an emulated SLC mode this making this partition immune to paired-pages corruptions. Signed-off-by: Boris Brezillon Signed-off-by: Miquel Raynal Link: https://lore.kernel.org/linux-mtd/20200503155341.16712-6-miquel.raynal@bootlin.com --- Documentation/devicetree/bindings/mtd/partition.txt | 3 +++ 1 file changed, 3 insertions(+) diff --git a/Documentation/devicetree/bindings/mtd/partition.txt b/Documentation/devicetree/bindings/mtd/partition.txt index afbbd870496de..4a39698221a21 100644 --- a/Documentation/devicetree/bindings/mtd/partition.txt +++ b/Documentation/devicetree/bindings/mtd/partition.txt @@ -61,6 +61,9 @@ Optional properties: clobbered. - lock : Do not unlock the partition at initialization time (not supported on all devices) +- slc-mode: This parameter, if present, allows one to emulate SLC mode on a + partition attached to an MLC NAND thus making this partition immune to + paired-pages corruptions Examples: -- GitLab From 1998053c8e80a22cf84f40dfea689bb5a94cddfa Mon Sep 17 00:00:00 2001 From: Boris Brezillon Date: Sun, 3 May 2020 17:53:39 +0200 Subject: [PATCH 0381/1971] mtd: partitions: ofpart: Parse the slc-mode property Parse the slc-mode property and set the MTD_MLC_IN_SLC_MODE flag when present. Signed-off-by: Boris Brezillon Signed-off-by: Miquel Raynal Link: https://lore.kernel.org/linux-mtd/20200503155341.16712-7-miquel.raynal@bootlin.com --- drivers/mtd/parsers/ofpart.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/mtd/parsers/ofpart.c b/drivers/mtd/parsers/ofpart.c index 3caeabf27987c..daf507c123e6f 100644 --- a/drivers/mtd/parsers/ofpart.c +++ b/drivers/mtd/parsers/ofpart.c @@ -117,6 +117,9 @@ static int parse_fixed_partitions(struct mtd_info *master, if (of_get_property(pp, "lock", &len)) parts[i].mask_flags |= MTD_POWERUP_LOCK; + if (of_property_read_bool(pp, "slc-mode")) + parts[i].add_flags |= MTD_SLC_ON_MLC_EMULATION; + i++; } -- GitLab From 568d841b6837fc2bb5c45d6f3e56b65ffb5ecac2 Mon Sep 17 00:00:00 2001 From: Boris Brezillon Date: Sun, 3 May 2020 17:53:40 +0200 Subject: [PATCH 0382/1971] mtd: cmdlinepart: Add an slc option to use SLC mode on a part Add a new option to set the MTD_SLC_ON_MLC_EMULATION flag. Signed-off-by: Boris Brezillon Signed-off-by: Miquel Raynal Link: https://lore.kernel.org/linux-mtd/20200503155341.16712-8-miquel.raynal@bootlin.com --- drivers/mtd/parsers/cmdlinepart.c | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/drivers/mtd/parsers/cmdlinepart.c b/drivers/mtd/parsers/cmdlinepart.c index c86f2db8c882d..af712f1519c57 100644 --- a/drivers/mtd/parsers/cmdlinepart.c +++ b/drivers/mtd/parsers/cmdlinepart.c @@ -9,7 +9,7 @@ * * mtdparts=[; := :[,] - * := [@][][ro][lk] + * := [@][][ro][lk][slc] * := unique name used in mapping driver/device (mtd->name) * := standard linux memsize OR "-" to denote all remaining space * size is automatically truncated at end of device @@ -92,7 +92,7 @@ static struct mtd_partition * newpart(char *s, int name_len; unsigned char *extra_mem; char delim; - unsigned int mask_flags; + unsigned int mask_flags, add_flags; /* fetch the partition size */ if (*s == '-') { @@ -109,6 +109,7 @@ static struct mtd_partition * newpart(char *s, /* fetch partition name and flags */ mask_flags = 0; /* this is going to be a regular partition */ + add_flags = 0; delim = 0; /* check for offset */ @@ -152,6 +153,12 @@ static struct mtd_partition * newpart(char *s, s += 2; } + /* if slc is found use emulated SLC mode on this partition*/ + if (!strncmp(s, "slc", 3)) { + add_flags |= MTD_SLC_ON_MLC_EMULATION; + s += 3; + } + /* test if more partitions are following */ if (*s == ',') { if (size == SIZE_REMAINING) { @@ -184,6 +191,7 @@ static struct mtd_partition * newpart(char *s, parts[this_part].size = size; parts[this_part].offset = offset; parts[this_part].mask_flags = mask_flags; + parts[this_part].add_flags = add_flags; if (name) strlcpy(extra_mem, name, name_len + 1); else -- GitLab From 66aaba3a07cb8e53878bbeabe54660a646b55a34 Mon Sep 17 00:00:00 2001 From: Boris Brezillon Date: Sun, 3 May 2020 17:53:41 +0200 Subject: [PATCH 0383/1971] ubi: Relax the 'no MLC' rule and allow MLCs operating in SLC mode The MTD layer provides an SLC mode (purely software emulation of SLC behavior) addressing the paired-pages corruption issue, which was the main reason for refusing attaching MLC NANDs to UBI. Relax this rule and allow partitions that have the MTD_EMULATE_SLC_ON_MLC flag set to be attached. Signed-off-by: Boris Brezillon Signed-off-by: Miquel Raynal Acked-by: Richard Weinberger Link: https://lore.kernel.org/linux-mtd/20200503155341.16712-9-miquel.raynal@bootlin.com --- drivers/mtd/ubi/build.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/drivers/mtd/ubi/build.c b/drivers/mtd/ubi/build.c index 12c02342149ca..e85b04e9716b2 100644 --- a/drivers/mtd/ubi/build.c +++ b/drivers/mtd/ubi/build.c @@ -867,8 +867,11 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num, * Both UBI and UBIFS have been designed for SLC NAND and NOR flashes. * MLC NAND is different and needs special care, otherwise UBI or UBIFS * will die soon and you will lose all your data. + * Relax this rule if the partition we're attaching to operates in SLC + * mode. */ - if (mtd->type == MTD_MLCNANDFLASH) { + if (mtd->type == MTD_MLCNANDFLASH && + !(mtd->flags & MTD_SLC_ON_MLC_EMULATION)) { pr_err("ubi: refuse attaching mtd%d - MLC NAND is not supported\n", mtd->index); return -EINVAL; -- GitLab From dd6ed5c9890b759ba1b56697b9f3f50e71909e43 Mon Sep 17 00:00:00 2001 From: Miquel Raynal Date: Thu, 7 May 2020 12:52:29 +0200 Subject: [PATCH 0384/1971] mtd: rawnand: Translate obscure bitfields into readable macros Use the BIT() macro instead of defining a 8-digit value. Signed-off-by: Miquel Raynal Reviewed-by: Boris Brezillon Link: https://lore.kernel.org/linux-mtd/20200507105241.14299-2-miquel.raynal@bootlin.com --- include/linux/mtd/rawnand.h | 38 ++++++++++++++++++------------------- 1 file changed, 19 insertions(+), 19 deletions(-) diff --git a/include/linux/mtd/rawnand.h b/include/linux/mtd/rawnand.h index 21873168ba4df..4b58de8423406 100644 --- a/include/linux/mtd/rawnand.h +++ b/include/linux/mtd/rawnand.h @@ -129,36 +129,36 @@ enum nand_ecc_algo { * features. */ /* Buswidth is 16 bit */ -#define NAND_BUSWIDTH_16 0x00000002 +#define NAND_BUSWIDTH_16 BIT(1) /* Chip has cache program function */ -#define NAND_CACHEPRG 0x00000008 +#define NAND_CACHEPRG BIT(3) /* * Chip requires ready check on read (for auto-incremented sequential read). * True only for small page devices; large page devices do not support * autoincrement. */ -#define NAND_NEED_READRDY 0x00000100 +#define NAND_NEED_READRDY BIT(8) /* Chip does not allow subpage writes */ -#define NAND_NO_SUBPAGE_WRITE 0x00000200 +#define NAND_NO_SUBPAGE_WRITE BIT(9) /* Device is one of 'new' xD cards that expose fake nand command set */ -#define NAND_BROKEN_XD 0x00000400 +#define NAND_BROKEN_XD BIT(10) /* Device behaves just like nand, but is readonly */ -#define NAND_ROM 0x00000800 +#define NAND_ROM BIT(11) /* Device supports subpage reads */ -#define NAND_SUBPAGE_READ 0x00001000 +#define NAND_SUBPAGE_READ BIT(12) /* * Some MLC NANDs need data scrambling to limit bitflips caused by repeated * patterns. */ -#define NAND_NEED_SCRAMBLING 0x00002000 +#define NAND_NEED_SCRAMBLING BIT(13) /* Device needs 3rd row address cycle */ -#define NAND_ROW_ADDR_3 0x00004000 +#define NAND_ROW_ADDR_3 BIT(14) /* Options valid for Samsung large page devices */ #define NAND_SAMSUNG_LP_OPTIONS NAND_CACHEPRG @@ -173,9 +173,9 @@ enum nand_ecc_algo { * Position within the block: Each of these pages needs to be checked for a * bad block marking pattern. */ -#define NAND_BBM_FIRSTPAGE 0x01000000 -#define NAND_BBM_SECONDPAGE 0x02000000 -#define NAND_BBM_LASTPAGE 0x04000000 +#define NAND_BBM_FIRSTPAGE BIT(24) +#define NAND_BBM_SECONDPAGE BIT(25) +#define NAND_BBM_LASTPAGE BIT(26) /* Position within the OOB data of the page */ #define NAND_BBM_POS_SMALL 5 @@ -183,21 +183,21 @@ enum nand_ecc_algo { /* Non chip related options */ /* This option skips the bbt scan during initialization. */ -#define NAND_SKIP_BBTSCAN 0x00010000 +#define NAND_SKIP_BBTSCAN BIT(16) /* Chip may not exist, so silence any errors in scan */ -#define NAND_SCAN_SILENT_NODEV 0x00040000 +#define NAND_SCAN_SILENT_NODEV BIT(18) /* * Autodetect nand buswidth with readid/onfi. * This suppose the driver will configure the hardware in 8 bits mode * when calling nand_scan_ident, and update its configuration * before calling nand_scan_tail. */ -#define NAND_BUSWIDTH_AUTO 0x00080000 +#define NAND_BUSWIDTH_AUTO BIT(19) /* * This option could be defined by controller drivers to protect against * kmap'ed, vmalloc'ed highmem buffers being passed from upper layers */ -#define NAND_USE_BOUNCE_BUFFER 0x00100000 +#define NAND_USE_BOUNCE_BUFFER BIT(20) /* * In case your controller is implementing ->legacy.cmd_ctrl() and is relying @@ -207,20 +207,20 @@ enum nand_ecc_algo { * If your controller already takes care of this delay, you don't need to set * this flag. */ -#define NAND_WAIT_TCCS 0x00200000 +#define NAND_WAIT_TCCS BIT(21) /* * Whether the NAND chip is a boot medium. Drivers might use this information * to select ECC algorithms supported by the boot ROM or similar restrictions. */ -#define NAND_IS_BOOT_MEDIUM 0x00400000 +#define NAND_IS_BOOT_MEDIUM BIT(22) /* * Do not try to tweak the timings at runtime. This is needed when the * controller initializes the timings on itself or when it relies on * configuration done by the bootloader. */ -#define NAND_KEEP_TIMINGS 0x00800000 +#define NAND_KEEP_TIMINGS BIT(23) /* Cell info constants */ #define NAND_CI_CHIPNR_MSK 0x03 -- GitLab From 96d627bdf112d116a79c86435550cd18b9a9d82b Mon Sep 17 00:00:00 2001 From: Miquel Raynal Date: Thu, 7 May 2020 12:52:30 +0200 Subject: [PATCH 0385/1971] mtd: rawnand: Reorder the nand_chip->options flags These flags are in a strange order, reorder the list, add spaces when it is relevant, pack definitions that are related. There is no functional change. Signed-off-by: Miquel Raynal Reviewed-by: Boris Brezillon Link: https://lore.kernel.org/linux-mtd/20200507105241.14299-3-miquel.raynal@bootlin.com --- include/linux/mtd/rawnand.h | 57 +++++++++++++++++++------------------ 1 file changed, 30 insertions(+), 27 deletions(-) diff --git a/include/linux/mtd/rawnand.h b/include/linux/mtd/rawnand.h index 4b58de8423406..e70fea67030b2 100644 --- a/include/linux/mtd/rawnand.h +++ b/include/linux/mtd/rawnand.h @@ -118,20 +118,25 @@ enum nand_ecc_algo { #define NAND_ECC_GENERIC_ERASED_CHECK BIT(0) #define NAND_ECC_MAXIMIZE BIT(1) -/* - * When using software implementation of Hamming, we can specify which byte - * ordering should be used. - */ -#define NAND_ECC_SOFT_HAMMING_SM_ORDER BIT(2) - /* * Option constants for bizarre disfunctionality and real * features. */ + /* Buswidth is 16 bit */ #define NAND_BUSWIDTH_16 BIT(1) + +/* + * When using software implementation of Hamming, we can specify which byte + * ordering should be used. + */ +#define NAND_ECC_SOFT_HAMMING_SM_ORDER BIT(2) + /* Chip has cache program function */ #define NAND_CACHEPRG BIT(3) +/* Options valid for Samsung large page devices */ +#define NAND_SAMSUNG_LP_OPTIONS NAND_CACHEPRG + /* * Chip requires ready check on read (for auto-incremented sequential read). * True only for small page devices; large page devices do not support @@ -150,6 +155,8 @@ enum nand_ecc_algo { /* Device supports subpage reads */ #define NAND_SUBPAGE_READ BIT(12) +/* Macros to identify the above */ +#define NAND_HAS_SUBPAGE_READ(chip) ((chip->options & NAND_SUBPAGE_READ)) /* * Some MLC NANDs need data scrambling to limit bitflips caused by repeated @@ -160,32 +167,12 @@ enum nand_ecc_algo { /* Device needs 3rd row address cycle */ #define NAND_ROW_ADDR_3 BIT(14) -/* Options valid for Samsung large page devices */ -#define NAND_SAMSUNG_LP_OPTIONS NAND_CACHEPRG - -/* Macros to identify the above */ -#define NAND_HAS_SUBPAGE_READ(chip) ((chip->options & NAND_SUBPAGE_READ)) - -/* - * There are different places where the manufacturer stores the factory bad - * block markers. - * - * Position within the block: Each of these pages needs to be checked for a - * bad block marking pattern. - */ -#define NAND_BBM_FIRSTPAGE BIT(24) -#define NAND_BBM_SECONDPAGE BIT(25) -#define NAND_BBM_LASTPAGE BIT(26) - -/* Position within the OOB data of the page */ -#define NAND_BBM_POS_SMALL 5 -#define NAND_BBM_POS_LARGE 0 - /* Non chip related options */ /* This option skips the bbt scan during initialization. */ #define NAND_SKIP_BBTSCAN BIT(16) /* Chip may not exist, so silence any errors in scan */ #define NAND_SCAN_SILENT_NODEV BIT(18) + /* * Autodetect nand buswidth with readid/onfi. * This suppose the driver will configure the hardware in 8 bits mode @@ -193,6 +180,7 @@ enum nand_ecc_algo { * before calling nand_scan_tail. */ #define NAND_BUSWIDTH_AUTO BIT(19) + /* * This option could be defined by controller drivers to protect against * kmap'ed, vmalloc'ed highmem buffers being passed from upper layers @@ -222,11 +210,26 @@ enum nand_ecc_algo { */ #define NAND_KEEP_TIMINGS BIT(23) +/* + * There are different places where the manufacturer stores the factory bad + * block markers. + * + * Position within the block: Each of these pages needs to be checked for a + * bad block marking pattern. + */ +#define NAND_BBM_FIRSTPAGE BIT(24) +#define NAND_BBM_SECONDPAGE BIT(25) +#define NAND_BBM_LASTPAGE BIT(26) + /* Cell info constants */ #define NAND_CI_CHIPNR_MSK 0x03 #define NAND_CI_CELLTYPE_MSK 0x0C #define NAND_CI_CELLTYPE_SHIFT 2 +/* Position within the OOB data of the page */ +#define NAND_BBM_POS_SMALL 5 +#define NAND_BBM_POS_LARGE 0 + /** * struct nand_parameters - NAND generic parameters from the parameter page * @model: Model name -- GitLab From ce8148d7b8f204a18188e3cd7386c8dddbb461a1 Mon Sep 17 00:00:00 2001 From: Miquel Raynal Date: Thu, 7 May 2020 12:52:31 +0200 Subject: [PATCH 0386/1971] mtd: rawnand: Rename a NAND chip option NAND controller drivers can set the NAND_USE_BOUNCE_BUFFER flag to a chip 'option' field. With this flag, the core is responsible of providing DMA-able buffers. The current behavior is to not force the use of a bounce buffer when the core thinks this is not needed. So in the end the name is a bit misleading, because in theory we will always have a DMA buffer but in practice it will not always be a bounce buffer. Rename this flag NAND_USES_DMA to be more accurate. Signed-off-by: Miquel Raynal Reviewed-by: Boris Brezillon Link: https://lore.kernel.org/linux-mtd/20200507105241.14299-4-miquel.raynal@bootlin.com --- drivers/mtd/nand/raw/atmel/nand-controller.c | 2 +- drivers/mtd/nand/raw/brcmnand/brcmnand.c | 2 +- drivers/mtd/nand/raw/denali.c | 2 +- drivers/mtd/nand/raw/meson_nand.c | 2 +- drivers/mtd/nand/raw/mtk_nand.c | 2 +- drivers/mtd/nand/raw/nand_base.c | 4 ++-- drivers/mtd/nand/raw/qcom_nandc.c | 2 +- drivers/mtd/nand/raw/stm32_fmc2_nand.c | 2 +- drivers/mtd/nand/raw/sunxi_nand.c | 2 +- drivers/mtd/nand/raw/tango_nand.c | 2 +- drivers/mtd/nand/raw/tegra_nand.c | 2 +- include/linux/mtd/rawnand.h | 2 +- 12 files changed, 13 insertions(+), 13 deletions(-) diff --git a/drivers/mtd/nand/raw/atmel/nand-controller.c b/drivers/mtd/nand/raw/atmel/nand-controller.c index 3ba17a98df4dd..46a3724a788ec 100644 --- a/drivers/mtd/nand/raw/atmel/nand-controller.c +++ b/drivers/mtd/nand/raw/atmel/nand-controller.c @@ -1494,7 +1494,7 @@ static void atmel_nand_init(struct atmel_nand_controller *nc, * suitable for DMA. */ if (nc->dmac) - chip->options |= NAND_USE_BOUNCE_BUFFER; + chip->options |= NAND_USES_DMA; /* Default to HW ECC if pmecc is available. */ if (nc->pmecc) diff --git a/drivers/mtd/nand/raw/brcmnand/brcmnand.c b/drivers/mtd/nand/raw/brcmnand/brcmnand.c index 57076c3d98dc9..fe7cd3aa0cd6c 100644 --- a/drivers/mtd/nand/raw/brcmnand/brcmnand.c +++ b/drivers/mtd/nand/raw/brcmnand/brcmnand.c @@ -2576,7 +2576,7 @@ static int brcmnand_attach_chip(struct nand_chip *chip) * to/from, and have nand_base pass us a bounce buffer instead, as * needed. */ - chip->options |= NAND_USE_BOUNCE_BUFFER; + chip->options |= NAND_USES_DMA; if (chip->bbt_options & NAND_BBT_USE_FLASH) chip->bbt_options |= NAND_BBT_NO_OOB; diff --git a/drivers/mtd/nand/raw/denali.c b/drivers/mtd/nand/raw/denali.c index 2fcd2baf6e352..7a76b761dd0b2 100644 --- a/drivers/mtd/nand/raw/denali.c +++ b/drivers/mtd/nand/raw/denali.c @@ -1226,7 +1226,7 @@ int denali_chip_init(struct denali_controller *denali, mtd->name = "denali-nand"; if (denali->dma_avail) { - chip->options |= NAND_USE_BOUNCE_BUFFER; + chip->options |= NAND_USES_DMA; chip->buf_align = 16; } diff --git a/drivers/mtd/nand/raw/meson_nand.c b/drivers/mtd/nand/raw/meson_nand.c index e961f7bebf0a8..3f376471f3f75 100644 --- a/drivers/mtd/nand/raw/meson_nand.c +++ b/drivers/mtd/nand/raw/meson_nand.c @@ -1269,7 +1269,7 @@ meson_nfc_nand_chip_init(struct device *dev, nand_set_flash_node(nand, np); nand_set_controller_data(nand, nfc); - nand->options |= NAND_USE_BOUNCE_BUFFER; + nand->options |= NAND_USES_DMA; mtd = nand_to_mtd(nand); mtd->owner = THIS_MODULE; mtd->dev.parent = dev; diff --git a/drivers/mtd/nand/raw/mtk_nand.c b/drivers/mtd/nand/raw/mtk_nand.c index ef149e8b26d0b..e7ec30e784fd1 100644 --- a/drivers/mtd/nand/raw/mtk_nand.c +++ b/drivers/mtd/nand/raw/mtk_nand.c @@ -1380,7 +1380,7 @@ static int mtk_nfc_nand_chip_init(struct device *dev, struct mtk_nfc *nfc, nand_set_flash_node(nand, np); nand_set_controller_data(nand, nfc); - nand->options |= NAND_USE_BOUNCE_BUFFER | NAND_SUBPAGE_READ; + nand->options |= NAND_USES_DMA | NAND_SUBPAGE_READ; nand->legacy.dev_ready = mtk_nfc_dev_ready; nand->legacy.select_chip = mtk_nfc_select_chip; nand->legacy.write_byte = mtk_nfc_write_byte; diff --git a/drivers/mtd/nand/raw/nand_base.c b/drivers/mtd/nand/raw/nand_base.c index 106edd514c005..906b8cd94bc4b 100644 --- a/drivers/mtd/nand/raw/nand_base.c +++ b/drivers/mtd/nand/raw/nand_base.c @@ -3241,7 +3241,7 @@ static int nand_do_read_ops(struct nand_chip *chip, loff_t from, if (!aligned) use_bufpoi = 1; - else if (chip->options & NAND_USE_BOUNCE_BUFFER) + else if (chip->options & NAND_USES_DMA) use_bufpoi = !virt_addr_valid(buf) || !IS_ALIGNED((unsigned long)buf, chip->buf_align); @@ -4067,7 +4067,7 @@ static int nand_do_write_ops(struct nand_chip *chip, loff_t to, if (part_pagewr) use_bufpoi = 1; - else if (chip->options & NAND_USE_BOUNCE_BUFFER) + else if (chip->options & NAND_USES_DMA) use_bufpoi = !virt_addr_valid(buf) || !IS_ALIGNED((unsigned long)buf, chip->buf_align); diff --git a/drivers/mtd/nand/raw/qcom_nandc.c b/drivers/mtd/nand/raw/qcom_nandc.c index 5b11c7061497a..9ab22c5d4166c 100644 --- a/drivers/mtd/nand/raw/qcom_nandc.c +++ b/drivers/mtd/nand/raw/qcom_nandc.c @@ -2836,7 +2836,7 @@ static int qcom_nand_host_init_and_register(struct qcom_nand_controller *nandc, chip->legacy.block_markbad = qcom_nandc_block_markbad; chip->controller = &nandc->controller; - chip->options |= NAND_NO_SUBPAGE_WRITE | NAND_USE_BOUNCE_BUFFER | + chip->options |= NAND_NO_SUBPAGE_WRITE | NAND_USES_DMA | NAND_SKIP_BBTSCAN; /* set up initial status value */ diff --git a/drivers/mtd/nand/raw/stm32_fmc2_nand.c b/drivers/mtd/nand/raw/stm32_fmc2_nand.c index 46b7d04e2c874..c5fde09e0175e 100644 --- a/drivers/mtd/nand/raw/stm32_fmc2_nand.c +++ b/drivers/mtd/nand/raw/stm32_fmc2_nand.c @@ -1987,7 +1987,7 @@ static int stm32_fmc2_probe(struct platform_device *pdev) chip->controller = &fmc2->base; chip->options |= NAND_BUSWIDTH_AUTO | NAND_NO_SUBPAGE_WRITE | - NAND_USE_BOUNCE_BUFFER; + NAND_USES_DMA; /* Default ECC settings */ chip->ecc.mode = NAND_ECC_HW; diff --git a/drivers/mtd/nand/raw/sunxi_nand.c b/drivers/mtd/nand/raw/sunxi_nand.c index 18ac0b36abfa4..26d862213cacc 100644 --- a/drivers/mtd/nand/raw/sunxi_nand.c +++ b/drivers/mtd/nand/raw/sunxi_nand.c @@ -1698,7 +1698,7 @@ static int sunxi_nand_hw_ecc_ctrl_init(struct nand_chip *nand, ecc->read_page = sunxi_nfc_hw_ecc_read_page_dma; ecc->read_subpage = sunxi_nfc_hw_ecc_read_subpage_dma; ecc->write_page = sunxi_nfc_hw_ecc_write_page_dma; - nand->options |= NAND_USE_BOUNCE_BUFFER; + nand->options |= NAND_USES_DMA; } else { ecc->read_page = sunxi_nfc_hw_ecc_read_page; ecc->read_subpage = sunxi_nfc_hw_ecc_read_subpage; diff --git a/drivers/mtd/nand/raw/tango_nand.c b/drivers/mtd/nand/raw/tango_nand.c index 9acf2de37ee08..b92de603e6db4 100644 --- a/drivers/mtd/nand/raw/tango_nand.c +++ b/drivers/mtd/nand/raw/tango_nand.c @@ -568,7 +568,7 @@ static int chip_init(struct device *dev, struct device_node *np) chip->legacy.select_chip = tango_select_chip; chip->legacy.cmd_ctrl = tango_cmd_ctrl; chip->legacy.dev_ready = tango_dev_ready; - chip->options = NAND_USE_BOUNCE_BUFFER | + chip->options = NAND_USES_DMA | NAND_NO_SUBPAGE_WRITE | NAND_WAIT_TCCS; chip->controller = &nfc->hw; diff --git a/drivers/mtd/nand/raw/tegra_nand.c b/drivers/mtd/nand/raw/tegra_nand.c index 6a255ba0f2881..f9d046b2cd3be 100644 --- a/drivers/mtd/nand/raw/tegra_nand.c +++ b/drivers/mtd/nand/raw/tegra_nand.c @@ -1115,7 +1115,7 @@ static int tegra_nand_chips_init(struct device *dev, if (!mtd->name) mtd->name = "tegra_nand"; - chip->options = NAND_NO_SUBPAGE_WRITE | NAND_USE_BOUNCE_BUFFER; + chip->options = NAND_NO_SUBPAGE_WRITE | NAND_USES_DMA; ret = nand_scan(chip, 1); if (ret) diff --git a/include/linux/mtd/rawnand.h b/include/linux/mtd/rawnand.h index e70fea67030b2..d1f5c5258e355 100644 --- a/include/linux/mtd/rawnand.h +++ b/include/linux/mtd/rawnand.h @@ -185,7 +185,7 @@ enum nand_ecc_algo { * This option could be defined by controller drivers to protect against * kmap'ed, vmalloc'ed highmem buffers being passed from upper layers */ -#define NAND_USE_BOUNCE_BUFFER BIT(20) +#define NAND_USES_DMA BIT(20) /* * In case your controller is implementing ->legacy.cmd_ctrl() and is relying -- GitLab From 2f959949f2149eb0f76aeea8020e8c7865bb0c53 Mon Sep 17 00:00:00 2001 From: Miquel Raynal Date: Thu, 7 May 2020 12:52:32 +0200 Subject: [PATCH 0387/1971] mtd: rawnand: Fix comments about the use of bufpoi Clarify these comments which are not very accurate (even wrong in the read case). Signed-off-by: Miquel Raynal Reviewed-by: Boris Brezillon Link: https://lore.kernel.org/linux-mtd/20200507105241.14299-5-miquel.raynal@bootlin.com --- drivers/mtd/nand/raw/nand_base.c | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/drivers/mtd/nand/raw/nand_base.c b/drivers/mtd/nand/raw/nand_base.c index 906b8cd94bc4b..7dc7889f84b4d 100644 --- a/drivers/mtd/nand/raw/nand_base.c +++ b/drivers/mtd/nand/raw/nand_base.c @@ -3279,7 +3279,10 @@ read_retry: break; } - /* Transfer not aligned data */ + /* + * Copy back the data in the initial buffer when reading + * partial pages or when a bounce buffer is required. + */ if (use_bufpoi) { if (!NAND_HAS_SUBPAGE_READ(chip) && !oob && !(mtd->ecc_stats.failed - ecc_failures) && @@ -4074,7 +4077,10 @@ static int nand_do_write_ops(struct nand_chip *chip, loff_t to, else use_bufpoi = 0; - /* Partial page write?, or need to use bounce buffer */ + /* + * Copy the data from the initial buffer when doing partial page + * writes or when a bounce buffer is required. + */ if (use_bufpoi) { pr_debug("%s: using write bounce buffer for buf@%p\n", __func__, buf); -- GitLab From 6446907307da6e3e490b1d9169d6ebab6622dee0 Mon Sep 17 00:00:00 2001 From: Miquel Raynal Date: Thu, 7 May 2020 12:52:33 +0200 Subject: [PATCH 0388/1971] mtd: rawnand: Rename the use_bufpoi variables Both in nand_do_read_ops() and nand_do_write_ops() there is a boolean called use_bufpoi which is set to true in case of unaligned request or when there is a need for a DMA-able buffer. It basically means "use a bounce buffer". Depending on the value of use_bufpoi, the bufpoi variable is always used and will either point to the original buffer or to the nand_chip structure "internal data buffer" (this buffer is allocated with kmalloc() on purpose so that it will be DMA-compliant). In all cases bufpoi is used so the boolean name is misleading. Rename use_bufpoi to be use_bouce_buf to be more accurate. Signed-off-by: Miquel Raynal Reviewed-by: Boris Brezillon Link: https://lore.kernel.org/linux-mtd/20200507105241.14299-6-miquel.raynal@bootlin.com --- drivers/mtd/nand/raw/nand_base.c | 34 ++++++++++++++++---------------- 1 file changed, 17 insertions(+), 17 deletions(-) diff --git a/drivers/mtd/nand/raw/nand_base.c b/drivers/mtd/nand/raw/nand_base.c index 7dc7889f84b4d..e2d96653fc3fd 100644 --- a/drivers/mtd/nand/raw/nand_base.c +++ b/drivers/mtd/nand/raw/nand_base.c @@ -3216,7 +3216,7 @@ static int nand_do_read_ops(struct nand_chip *chip, loff_t from, uint32_t max_oobsize = mtd_oobavail(mtd, ops); uint8_t *bufpoi, *oob, *buf; - int use_bufpoi; + int use_bounce_buf; unsigned int max_bitflips = 0; int retry_mode = 0; bool ecc_fail = false; @@ -3240,19 +3240,19 @@ static int nand_do_read_ops(struct nand_chip *chip, loff_t from, aligned = (bytes == mtd->writesize); if (!aligned) - use_bufpoi = 1; + use_bounce_buf = 1; else if (chip->options & NAND_USES_DMA) - use_bufpoi = !virt_addr_valid(buf) || - !IS_ALIGNED((unsigned long)buf, - chip->buf_align); + use_bounce_buf = !virt_addr_valid(buf) || + !IS_ALIGNED((unsigned long)buf, + chip->buf_align); else - use_bufpoi = 0; + use_bounce_buf = 0; /* Is the current page in the buffer? */ if (realpage != chip->pagecache.page || oob) { - bufpoi = use_bufpoi ? chip->data_buf : buf; + bufpoi = use_bounce_buf ? chip->data_buf : buf; - if (use_bufpoi && aligned) + if (use_bounce_buf && aligned) pr_debug("%s: using read bounce buffer for buf@%p\n", __func__, buf); @@ -3273,7 +3273,7 @@ read_retry: ret = chip->ecc.read_page(chip, bufpoi, oob_required, page); if (ret < 0) { - if (use_bufpoi) + if (use_bounce_buf) /* Invalidate page cache */ chip->pagecache.page = -1; break; @@ -3283,7 +3283,7 @@ read_retry: * Copy back the data in the initial buffer when reading * partial pages or when a bounce buffer is required. */ - if (use_bufpoi) { + if (use_bounce_buf) { if (!NAND_HAS_SUBPAGE_READ(chip) && !oob && !(mtd->ecc_stats.failed - ecc_failures) && (ops->mode != MTD_OPS_RAW)) { @@ -4065,23 +4065,23 @@ static int nand_do_write_ops(struct nand_chip *chip, loff_t to, while (1) { int bytes = mtd->writesize; uint8_t *wbuf = buf; - int use_bufpoi; + int use_bounce_buf; int part_pagewr = (column || writelen < mtd->writesize); if (part_pagewr) - use_bufpoi = 1; + use_bounce_buf = 1; else if (chip->options & NAND_USES_DMA) - use_bufpoi = !virt_addr_valid(buf) || - !IS_ALIGNED((unsigned long)buf, - chip->buf_align); + use_bounce_buf = !virt_addr_valid(buf) || + !IS_ALIGNED((unsigned long)buf, + chip->buf_align); else - use_bufpoi = 0; + use_bounce_buf = 0; /* * Copy the data from the initial buffer when doing partial page * writes or when a bounce buffer is required. */ - if (use_bufpoi) { + if (use_bounce_buf) { pr_debug("%s: using write bounce buffer for buf@%p\n", __func__, buf); if (part_pagewr) -- GitLab From 21b5cf3f6467cf608756151066941792f340c986 Mon Sep 17 00:00:00 2001 From: Miquel Raynal Date: Thu, 7 May 2020 12:52:34 +0200 Subject: [PATCH 0389/1971] mtd: rawnand: Avoid indirect access to ->data_buf() The logic in nand_do_read_ops() is to use a bufpoi variable, either set to the original buffer, or set to a bounce buffer which in the end happens to be chip->data_buf depending on the value of the use_bounce_buf boolean. This is not a reason to call chip->data_buf directly when we know that we are using the bounce buffer. Let's use bufpoi instead to be consistent. Signed-off-by: Miquel Raynal Reviewed-by: Boris Brezillon Link: https://lore.kernel.org/linux-mtd/20200507105241.14299-7-miquel.raynal@bootlin.com --- drivers/mtd/nand/raw/nand_base.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/mtd/nand/raw/nand_base.c b/drivers/mtd/nand/raw/nand_base.c index e2d96653fc3fd..c440f11a34e9e 100644 --- a/drivers/mtd/nand/raw/nand_base.c +++ b/drivers/mtd/nand/raw/nand_base.c @@ -3293,7 +3293,7 @@ read_retry: /* Invalidate page cache */ chip->pagecache.page = -1; } - memcpy(buf, chip->data_buf + col, bytes); + memcpy(buf, bufpoi + col, bytes); } if (unlikely(oob)) { -- GitLab From 930370253ec51fccebb743409428ac2f364b3559 Mon Sep 17 00:00:00 2001 From: Miquel Raynal Date: Thu, 7 May 2020 12:52:35 +0200 Subject: [PATCH 0390/1971] mtd: rawnand: Add a helper to check supported operations Let's use a helper to clearly check if an operation is supported or not. Return -ENOTSUPP when ->exec_op() is not implemented as we cannot know. Signed-off-by: Miquel Raynal Reviewed-by: Boris Brezillon Link: https://lore.kernel.org/linux-mtd/20200507105241.14299-8-miquel.raynal@bootlin.com --- drivers/mtd/nand/raw/internals.h | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/drivers/mtd/nand/raw/internals.h b/drivers/mtd/nand/raw/internals.h index bca9b3424646a..03866b0aadea7 100644 --- a/drivers/mtd/nand/raw/internals.h +++ b/drivers/mtd/nand/raw/internals.h @@ -109,6 +109,15 @@ static inline bool nand_has_exec_op(struct nand_chip *chip) return true; } +static inline int nand_check_op(struct nand_chip *chip, + const struct nand_operation *op) +{ + if (!nand_has_exec_op(chip)) + return 0; + + return chip->controller->ops->exec_op(chip, op, true); +} + static inline int nand_exec_op(struct nand_chip *chip, const struct nand_operation *op) { -- GitLab From b451f5beece3f5556920992e7498d23f6da6ef6e Mon Sep 17 00:00:00 2001 From: Miquel Raynal Date: Thu, 7 May 2020 12:52:36 +0200 Subject: [PATCH 0391/1971] mtd: rawnand: Give the possibility to verify a read operation is supported This can be used to discriminate between two path in the parameter page detection: use data_in cycles (like before) if supported, use the CHANGE READ COLUMN command otherwise. Signed-off-by: Miquel Raynal Reviewed-by: Boris Brezillon Link: https://lore.kernel.org/linux-mtd/20200507105241.14299-9-miquel.raynal@bootlin.com --- drivers/mtd/nand/raw/fsmc_nand.c | 2 +- drivers/mtd/nand/raw/marvell_nand.c | 4 +-- drivers/mtd/nand/raw/nand_base.c | 48 +++++++++++++++++------------ drivers/mtd/nand/raw/nand_jedec.c | 2 +- drivers/mtd/nand/raw/nand_legacy.c | 8 +++-- drivers/mtd/nand/raw/nand_micron.c | 6 ++-- drivers/mtd/nand/raw/nand_onfi.c | 3 +- include/linux/mtd/rawnand.h | 2 +- 8 files changed, 44 insertions(+), 31 deletions(-) diff --git a/drivers/mtd/nand/raw/fsmc_nand.c b/drivers/mtd/nand/raw/fsmc_nand.c index 31dc9fd0a94d9..2a9222e99bcc5 100644 --- a/drivers/mtd/nand/raw/fsmc_nand.c +++ b/drivers/mtd/nand/raw/fsmc_nand.c @@ -694,7 +694,7 @@ static int fsmc_read_page_hwecc(struct nand_chip *chip, u8 *buf, for (i = 0, s = 0; s < eccsteps; s++, i += eccbytes, p += eccsize) { nand_read_page_op(chip, page, s * eccsize, NULL, 0); chip->ecc.hwctl(chip, NAND_ECC_READ); - ret = nand_read_data_op(chip, p, eccsize, false); + ret = nand_read_data_op(chip, p, eccsize, false, false); if (ret) return ret; diff --git a/drivers/mtd/nand/raw/marvell_nand.c b/drivers/mtd/nand/raw/marvell_nand.c index 88269c44b2b4b..a79ce4bdd31cf 100644 --- a/drivers/mtd/nand/raw/marvell_nand.c +++ b/drivers/mtd/nand/raw/marvell_nand.c @@ -1224,12 +1224,12 @@ static int marvell_nfc_hw_ecc_bch_read_page_raw(struct nand_chip *chip, u8 *buf, /* Read spare bytes */ nand_read_data_op(chip, oob + (lt->spare_bytes * chunk), - spare_len, false); + spare_len, false, false); /* Read ECC bytes */ nand_read_data_op(chip, oob + ecc_offset + (ALIGN(lt->ecc_bytes, 32) * chunk), - ecc_len, false); + ecc_len, false, false); } return 0; diff --git a/drivers/mtd/nand/raw/nand_base.c b/drivers/mtd/nand/raw/nand_base.c index c440f11a34e9e..8df864881fff4 100644 --- a/drivers/mtd/nand/raw/nand_base.c +++ b/drivers/mtd/nand/raw/nand_base.c @@ -740,7 +740,8 @@ int nand_soft_waitrdy(struct nand_chip *chip, unsigned long timeout_ms) */ timeout_ms = jiffies + msecs_to_jiffies(timeout_ms) + 1; do { - ret = nand_read_data_op(chip, &status, sizeof(status), true); + ret = nand_read_data_op(chip, &status, sizeof(status), true, + false); if (ret) break; @@ -820,7 +821,7 @@ void panic_nand_wait(struct nand_chip *chip, unsigned long timeo) u8 status; ret = nand_read_data_op(chip, &status, sizeof(status), - true); + true, false); if (ret) return; @@ -1918,6 +1919,8 @@ EXPORT_SYMBOL_GPL(nand_reset_op); * @buf: buffer used to store the data * @len: length of the buffer * @force_8bit: force 8-bit bus access + * @check_only: do not actually run the command, only checks if the + * controller driver supports it * * This function does a raw data read on the bus. Usually used after launching * another NAND operation like nand_read_page_op(). @@ -1926,7 +1929,7 @@ EXPORT_SYMBOL_GPL(nand_reset_op); * Returns 0 on success, a negative error code otherwise. */ int nand_read_data_op(struct nand_chip *chip, void *buf, unsigned int len, - bool force_8bit) + bool force_8bit, bool check_only) { if (!len || !buf) return -EINVAL; @@ -1939,9 +1942,15 @@ int nand_read_data_op(struct nand_chip *chip, void *buf, unsigned int len, instrs[0].ctx.data.force_8bit = force_8bit; + if (check_only) + return nand_check_op(chip, &op); + return nand_exec_op(chip, &op); } + if (check_only) + return 0; + if (force_8bit) { u8 *p = buf; unsigned int i; @@ -2670,7 +2679,7 @@ int nand_read_page_raw(struct nand_chip *chip, uint8_t *buf, int oob_required, if (oob_required) { ret = nand_read_data_op(chip, chip->oob_poi, mtd->oobsize, - false); + false, false); if (ret) return ret; } @@ -2702,7 +2711,7 @@ static int nand_read_page_raw_syndrome(struct nand_chip *chip, uint8_t *buf, return ret; for (steps = chip->ecc.steps; steps > 0; steps--) { - ret = nand_read_data_op(chip, buf, eccsize, false); + ret = nand_read_data_op(chip, buf, eccsize, false, false); if (ret) return ret; @@ -2710,14 +2719,14 @@ static int nand_read_page_raw_syndrome(struct nand_chip *chip, uint8_t *buf, if (chip->ecc.prepad) { ret = nand_read_data_op(chip, oob, chip->ecc.prepad, - false); + false, false); if (ret) return ret; oob += chip->ecc.prepad; } - ret = nand_read_data_op(chip, oob, eccbytes, false); + ret = nand_read_data_op(chip, oob, eccbytes, false, false); if (ret) return ret; @@ -2725,7 +2734,7 @@ static int nand_read_page_raw_syndrome(struct nand_chip *chip, uint8_t *buf, if (chip->ecc.postpad) { ret = nand_read_data_op(chip, oob, chip->ecc.postpad, - false); + false, false); if (ret) return ret; @@ -2735,7 +2744,7 @@ static int nand_read_page_raw_syndrome(struct nand_chip *chip, uint8_t *buf, size = mtd->oobsize - (oob - chip->oob_poi); if (size) { - ret = nand_read_data_op(chip, oob, size, false); + ret = nand_read_data_op(chip, oob, size, false, false); if (ret) return ret; } @@ -2928,14 +2937,15 @@ static int nand_read_page_hwecc(struct nand_chip *chip, uint8_t *buf, for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) { chip->ecc.hwctl(chip, NAND_ECC_READ); - ret = nand_read_data_op(chip, p, eccsize, false); + ret = nand_read_data_op(chip, p, eccsize, false, false); if (ret) return ret; chip->ecc.calculate(chip, p, &ecc_calc[i]); } - ret = nand_read_data_op(chip, chip->oob_poi, mtd->oobsize, false); + ret = nand_read_data_op(chip, chip->oob_poi, mtd->oobsize, false, + false); if (ret) return ret; @@ -3014,7 +3024,7 @@ static int nand_read_page_hwecc_oob_first(struct nand_chip *chip, uint8_t *buf, chip->ecc.hwctl(chip, NAND_ECC_READ); - ret = nand_read_data_op(chip, p, eccsize, false); + ret = nand_read_data_op(chip, p, eccsize, false, false); if (ret) return ret; @@ -3071,13 +3081,13 @@ static int nand_read_page_syndrome(struct nand_chip *chip, uint8_t *buf, chip->ecc.hwctl(chip, NAND_ECC_READ); - ret = nand_read_data_op(chip, p, eccsize, false); + ret = nand_read_data_op(chip, p, eccsize, false, false); if (ret) return ret; if (chip->ecc.prepad) { ret = nand_read_data_op(chip, oob, chip->ecc.prepad, - false); + false, false); if (ret) return ret; @@ -3086,7 +3096,7 @@ static int nand_read_page_syndrome(struct nand_chip *chip, uint8_t *buf, chip->ecc.hwctl(chip, NAND_ECC_READSYN); - ret = nand_read_data_op(chip, oob, eccbytes, false); + ret = nand_read_data_op(chip, oob, eccbytes, false, false); if (ret) return ret; @@ -3096,7 +3106,7 @@ static int nand_read_page_syndrome(struct nand_chip *chip, uint8_t *buf, if (chip->ecc.postpad) { ret = nand_read_data_op(chip, oob, chip->ecc.postpad, - false); + false, false); if (ret) return ret; @@ -3124,7 +3134,7 @@ static int nand_read_page_syndrome(struct nand_chip *chip, uint8_t *buf, /* Calculate remaining oob bytes */ i = mtd->oobsize - (oob - chip->oob_poi); if (i) { - ret = nand_read_data_op(chip, oob, i, false); + ret = nand_read_data_op(chip, oob, i, false, false); if (ret) return ret; } @@ -3426,7 +3436,7 @@ static int nand_read_oob_syndrome(struct nand_chip *chip, int page) sndrnd = 1; toread = min_t(int, length, chunk); - ret = nand_read_data_op(chip, bufpoi, toread, false); + ret = nand_read_data_op(chip, bufpoi, toread, false, false); if (ret) return ret; @@ -3434,7 +3444,7 @@ static int nand_read_oob_syndrome(struct nand_chip *chip, int page) length -= toread; } if (length > 0) { - ret = nand_read_data_op(chip, bufpoi, length, false); + ret = nand_read_data_op(chip, bufpoi, length, false, false); if (ret) return ret; } diff --git a/drivers/mtd/nand/raw/nand_jedec.c b/drivers/mtd/nand/raw/nand_jedec.c index 15937e02c64f1..63069f1948a83 100644 --- a/drivers/mtd/nand/raw/nand_jedec.c +++ b/drivers/mtd/nand/raw/nand_jedec.c @@ -51,7 +51,7 @@ int nand_jedec_detect(struct nand_chip *chip) } for (i = 0; i < JEDEC_PARAM_PAGES; i++) { - ret = nand_read_data_op(chip, p, sizeof(*p), true); + ret = nand_read_data_op(chip, p, sizeof(*p), true, false); if (ret) { ret = 0; goto free_jedec_param_page; diff --git a/drivers/mtd/nand/raw/nand_legacy.c b/drivers/mtd/nand/raw/nand_legacy.c index f91e92e1b9720..d64791c06a977 100644 --- a/drivers/mtd/nand/raw/nand_legacy.c +++ b/drivers/mtd/nand/raw/nand_legacy.c @@ -225,7 +225,8 @@ static void nand_wait_status_ready(struct nand_chip *chip, unsigned long timeo) do { u8 status; - ret = nand_read_data_op(chip, &status, sizeof(status), true); + ret = nand_read_data_op(chip, &status, sizeof(status), true, + false); if (ret) return; @@ -552,7 +553,8 @@ static int nand_wait(struct nand_chip *chip) break; } else { ret = nand_read_data_op(chip, &status, - sizeof(status), true); + sizeof(status), true, + false); if (ret) return ret; @@ -563,7 +565,7 @@ static int nand_wait(struct nand_chip *chip) } while (time_before(jiffies, timeo)); } - ret = nand_read_data_op(chip, &status, sizeof(status), true); + ret = nand_read_data_op(chip, &status, sizeof(status), true, false); if (ret) return ret; diff --git a/drivers/mtd/nand/raw/nand_micron.c b/drivers/mtd/nand/raw/nand_micron.c index 56654030ec7ff..3a37d48c94724 100644 --- a/drivers/mtd/nand/raw/nand_micron.c +++ b/drivers/mtd/nand/raw/nand_micron.c @@ -212,7 +212,7 @@ static int micron_nand_on_die_ecc_status_4(struct nand_chip *chip, u8 status, */ if (!oob_required) { ret = nand_read_data_op(chip, chip->oob_poi, mtd->oobsize, - false); + false, false); if (ret) return ret; } @@ -304,10 +304,10 @@ micron_nand_read_page_on_die_ecc(struct nand_chip *chip, uint8_t *buf, if (ret) goto out; - ret = nand_read_data_op(chip, buf, mtd->writesize, false); + ret = nand_read_data_op(chip, buf, mtd->writesize, false, false); if (!ret && oob_required) ret = nand_read_data_op(chip, chip->oob_poi, mtd->oobsize, - false); + false, false); if (chip->ecc.strength == 4) max_bitflips = micron_nand_on_die_ecc_status_4(chip, status, diff --git a/drivers/mtd/nand/raw/nand_onfi.c b/drivers/mtd/nand/raw/nand_onfi.c index ee0f2c2549c1b..e6ffbe8c9a0cc 100644 --- a/drivers/mtd/nand/raw/nand_onfi.c +++ b/drivers/mtd/nand/raw/nand_onfi.c @@ -167,7 +167,8 @@ int nand_onfi_detect(struct nand_chip *chip) } for (i = 0; i < ONFI_PARAM_PAGES; i++) { - ret = nand_read_data_op(chip, &pbuf[i], sizeof(*pbuf), true); + ret = nand_read_data_op(chip, &pbuf[i], sizeof(*pbuf), true, + false); if (ret) { ret = 0; goto free_onfi_param_page; diff --git a/include/linux/mtd/rawnand.h b/include/linux/mtd/rawnand.h index d1f5c5258e355..70380c91731cc 100644 --- a/include/linux/mtd/rawnand.h +++ b/include/linux/mtd/rawnand.h @@ -1363,7 +1363,7 @@ int nand_change_write_column_op(struct nand_chip *chip, unsigned int offset_in_page, const void *buf, unsigned int len, bool force_8bit); int nand_read_data_op(struct nand_chip *chip, void *buf, unsigned int len, - bool force_8bit); + bool force_8bit, bool check_only); int nand_write_data_op(struct nand_chip *chip, const void *buf, unsigned int len, bool force_8bit); -- GitLab From c27842e7e11f97f0ba5f668953327acdb141c452 Mon Sep 17 00:00:00 2001 From: Miquel Raynal Date: Thu, 7 May 2020 12:52:37 +0200 Subject: [PATCH 0392/1971] mtd: rawnand: onfi: Adapt the parameter page read to constraint controllers We already know that there are controllers not able to read the three copies of the parameter page in one go. The workaround was to first request the controller to assert command and address cycles on the NAND bus to trigger a parameter page read, and then do a simple read operation for each page. But there are also controllers which are not able to split the parameter page read between the command/address cycles and the actual data operation. Let's use a regular PARAMETER PAGE READ operation for the first iteration and use either a CHANGE READ COLUMN or a simple DATA READ operation for the following copies, depending on what the controller supports. The default behavior for non-exec-op compliant drivers remains the same: DATA READ. Signed-off-by: Miquel Raynal Reviewed-by: Boris Brezillon Link: https://lore.kernel.org/linux-mtd/20200507105241.14299-10-miquel.raynal@bootlin.com --- drivers/mtd/nand/raw/nand_onfi.c | 21 ++++++++++++++------- 1 file changed, 14 insertions(+), 7 deletions(-) diff --git a/drivers/mtd/nand/raw/nand_onfi.c b/drivers/mtd/nand/raw/nand_onfi.c index e6ffbe8c9a0cc..be3456627288f 100644 --- a/drivers/mtd/nand/raw/nand_onfi.c +++ b/drivers/mtd/nand/raw/nand_onfi.c @@ -143,6 +143,7 @@ int nand_onfi_detect(struct nand_chip *chip) struct nand_memory_organization *memorg; struct nand_onfi_params *p = NULL, *pbuf; struct onfi_params *onfi; + bool use_datain = false; int onfi_version = 0; char id[4]; int i, ret, val; @@ -160,15 +161,21 @@ int nand_onfi_detect(struct nand_chip *chip) if (!pbuf) return -ENOMEM; - ret = nand_read_param_page_op(chip, 0, NULL, 0); - if (ret) { - ret = 0; - goto free_onfi_param_page; - } + if (!nand_has_exec_op(chip) || + !nand_read_data_op(chip, &pbuf[0], sizeof(*pbuf), true, true)) + use_datain = true; for (i = 0; i < ONFI_PARAM_PAGES; i++) { - ret = nand_read_data_op(chip, &pbuf[i], sizeof(*pbuf), true, - false); + if (!i) + ret = nand_read_param_page_op(chip, 0, &pbuf[i], + sizeof(*pbuf)); + else if (use_datain) + ret = nand_read_data_op(chip, &pbuf[i], sizeof(*pbuf), + true, false); + else + ret = nand_change_read_column_op(chip, sizeof(*pbuf) * i, + &pbuf[i], sizeof(*pbuf), + true); if (ret) { ret = 0; goto free_onfi_param_page; -- GitLab From daca31765e8bea70373fc9e604f562942168300b Mon Sep 17 00:00:00 2001 From: Miquel Raynal Date: Thu, 7 May 2020 12:52:38 +0200 Subject: [PATCH 0393/1971] mtd: rawnand: jedec: Adapt the parameter page read to constraint controllers We already know that there are controllers not able to read the three copies of the parameter page in one go. The workaround was to first request the controller to assert command and address cycles on the NAND bus to trigger a parameter page read, and then do a read operation for each page. But there are also controllers which are not able to split the parameter page read between the command/address cycles and the actual data operation. Let's use a regular PARAMETER PAGE READ operation for the first iteration and use eithe a CHANGE READ COLUMN or a simple DATA READ operation for the following copies, depending on what the controller supports. The default for non-exec-op compliant drivers remains unchanged: use a SIMPLE READ. Signed-off-by: Miquel Raynal Reviewed-by: Boris Brezillon Link: https://lore.kernel.org/linux-mtd/20200507105241.14299-11-miquel.raynal@bootlin.com --- drivers/mtd/nand/raw/nand_jedec.c | 19 +++++++++++++------ 1 file changed, 13 insertions(+), 6 deletions(-) diff --git a/drivers/mtd/nand/raw/nand_jedec.c b/drivers/mtd/nand/raw/nand_jedec.c index 63069f1948a83..b15c42f487559 100644 --- a/drivers/mtd/nand/raw/nand_jedec.c +++ b/drivers/mtd/nand/raw/nand_jedec.c @@ -27,6 +27,7 @@ int nand_jedec_detect(struct nand_chip *chip) struct nand_memory_organization *memorg; struct nand_jedec_params *p; struct jedec_ecc_info *ecc; + bool use_datain = false; int jedec_version = 0; char id[5]; int i, val, ret; @@ -44,14 +45,20 @@ int nand_jedec_detect(struct nand_chip *chip) if (!p) return -ENOMEM; - ret = nand_read_param_page_op(chip, 0x40, NULL, 0); - if (ret) { - ret = 0; - goto free_jedec_param_page; - } + if (!nand_has_exec_op(chip) || + !nand_read_data_op(chip, p, sizeof(*p), true, true)) + use_datain = true; for (i = 0; i < JEDEC_PARAM_PAGES; i++) { - ret = nand_read_data_op(chip, p, sizeof(*p), true, false); + if (!i) + ret = nand_read_param_page_op(chip, 0x40, p, + sizeof(*p)); + else if (use_datain) + ret = nand_read_data_op(chip, p, sizeof(*p), true, + false); + else + ret = nand_change_read_column_op(chip, sizeof(*p) * i, + p, sizeof(*p), true); if (ret) { ret = 0; goto free_jedec_param_page; -- GitLab From 658beb6639606268bd48eb7de7cf308d6b10600f Mon Sep 17 00:00:00 2001 From: Miquel Raynal Date: Thu, 7 May 2020 12:52:39 +0200 Subject: [PATCH 0394/1971] mtd: rawnand: Expose monolithic read/write_page_raw() helpers The current nand_read/write_page_raw() helpers are already widely used but do not fit the purpose of "constrained" controllers which cannot, for instance, separate command/address cycles with data cycles. Workaround this issue by proposing alternative helpers that can be used by these controller drivers instead. Signed-off-by: Miquel Raynal Reviewed-by: Boris Brezillon Link: https://lore.kernel.org/linux-mtd/20200507105241.14299-12-miquel.raynal@bootlin.com --- drivers/mtd/nand/raw/nand_base.c | 77 ++++++++++++++++++++++++++++++++ include/linux/mtd/rawnand.h | 8 +++- 2 files changed, 83 insertions(+), 2 deletions(-) diff --git a/drivers/mtd/nand/raw/nand_base.c b/drivers/mtd/nand/raw/nand_base.c index 8df864881fff4..43c7ebc7e8592 100644 --- a/drivers/mtd/nand/raw/nand_base.c +++ b/drivers/mtd/nand/raw/nand_base.c @@ -2688,6 +2688,47 @@ int nand_read_page_raw(struct nand_chip *chip, uint8_t *buf, int oob_required, } EXPORT_SYMBOL(nand_read_page_raw); +/** + * nand_monolithic_read_page_raw - Monolithic page read in raw mode + * @chip: NAND chip info structure + * @buf: buffer to store read data + * @oob_required: caller requires OOB data read to chip->oob_poi + * @page: page number to read + * + * This is a raw page read, ie. without any error detection/correction. + * Monolithic means we are requesting all the relevant data (main plus + * eventually OOB) to be loaded in the NAND cache and sent over the + * bus (from the NAND chip to the NAND controller) in a single + * operation. This is an alternative to nand_read_page_raw(), which + * first reads the main data, and if the OOB data is requested too, + * then reads more data on the bus. + */ +int nand_monolithic_read_page_raw(struct nand_chip *chip, u8 *buf, + int oob_required, int page) +{ + struct mtd_info *mtd = nand_to_mtd(chip); + unsigned int size = mtd->writesize; + u8 *read_buf = buf; + int ret; + + if (oob_required) { + size += mtd->oobsize; + + if (buf != chip->data_buf) + read_buf = nand_get_data_buf(chip); + } + + ret = nand_read_page_op(chip, page, 0, read_buf, size); + if (ret) + return ret; + + if (buf != chip->data_buf) + memcpy(buf, read_buf, mtd->writesize); + + return 0; +} +EXPORT_SYMBOL(nand_monolithic_read_page_raw); + /** * nand_read_page_raw_syndrome - [INTERN] read raw page data without ecc * @chip: nand chip info structure @@ -3696,6 +3737,42 @@ int nand_write_page_raw(struct nand_chip *chip, const uint8_t *buf, } EXPORT_SYMBOL(nand_write_page_raw); +/** + * nand_monolithic_write_page_raw - Monolithic page write in raw mode + * @chip: NAND chip info structure + * @buf: data buffer to write + * @oob_required: must write chip->oob_poi to OOB + * @page: page number to write + * + * This is a raw page write, ie. without any error detection/correction. + * Monolithic means we are requesting all the relevant data (main plus + * eventually OOB) to be sent over the bus and effectively programmed + * into the NAND chip arrays in a single operation. This is an + * alternative to nand_write_page_raw(), which first sends the main + * data, then eventually send the OOB data by latching more data + * cycles on the NAND bus, and finally sends the program command to + * synchronyze the NAND chip cache. + */ +int nand_monolithic_write_page_raw(struct nand_chip *chip, const u8 *buf, + int oob_required, int page) +{ + struct mtd_info *mtd = nand_to_mtd(chip); + unsigned int size = mtd->writesize; + u8 *write_buf = (u8 *)buf; + + if (oob_required) { + size += mtd->oobsize; + + if (buf != chip->data_buf) { + write_buf = nand_get_data_buf(chip); + memcpy(write_buf, buf, mtd->writesize); + } + } + + return nand_prog_page_op(chip, page, 0, write_buf, size); +} +EXPORT_SYMBOL(nand_monolithic_write_page_raw); + /** * nand_write_page_raw_syndrome - [INTERN] raw page write function * @chip: nand chip info structure diff --git a/include/linux/mtd/rawnand.h b/include/linux/mtd/rawnand.h index 70380c91731cc..406e9ff0f45ce 100644 --- a/include/linux/mtd/rawnand.h +++ b/include/linux/mtd/rawnand.h @@ -1328,13 +1328,17 @@ int nand_read_oob_std(struct nand_chip *chip, int page); int nand_get_set_features_notsupp(struct nand_chip *chip, int addr, u8 *subfeature_param); -/* Default read_page_raw implementation */ +/* read_page_raw implementations */ int nand_read_page_raw(struct nand_chip *chip, uint8_t *buf, int oob_required, int page); +int nand_monolithic_read_page_raw(struct nand_chip *chip, uint8_t *buf, + int oob_required, int page); -/* Default write_page_raw implementation */ +/* write_page_raw implementations */ int nand_write_page_raw(struct nand_chip *chip, const uint8_t *buf, int oob_required, int page); +int nand_monolithic_write_page_raw(struct nand_chip *chip, const uint8_t *buf, + int oob_required, int page); /* Reset and initialize a NAND device */ int nand_reset(struct nand_chip *chip, int chipnr); -- GitLab From 0e7f4b64ea46da6da5d84709d0316df9a7145f76 Mon Sep 17 00:00:00 2001 From: Miquel Raynal Date: Thu, 7 May 2020 12:52:40 +0200 Subject: [PATCH 0395/1971] mtd: rawnand: Allow controllers to overload soft ECC hooks Some controller drivers do not support executing regular nand_read/write_page_raw() helpers. For that, we created nand_monolithic_read/write_page_raw() alternatives. Let's now allow the driver to overload the ECC ->read/write_page_raw() hooks. Signed-off-by: Miquel Raynal Reviewed-by: Boris Brezillon Link: https://lore.kernel.org/linux-mtd/20200507105241.14299-13-miquel.raynal@bootlin.com --- drivers/mtd/nand/raw/nand_base.c | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/drivers/mtd/nand/raw/nand_base.c b/drivers/mtd/nand/raw/nand_base.c index 43c7ebc7e8592..ba37ca9c12600 100644 --- a/drivers/mtd/nand/raw/nand_base.c +++ b/drivers/mtd/nand/raw/nand_base.c @@ -5283,8 +5283,10 @@ static int nand_set_ecc_soft_ops(struct nand_chip *chip) ecc->read_page = nand_read_page_swecc; ecc->read_subpage = nand_read_subpage; ecc->write_page = nand_write_page_swecc; - ecc->read_page_raw = nand_read_page_raw; - ecc->write_page_raw = nand_write_page_raw; + if (!ecc->read_page_raw) + ecc->read_page_raw = nand_read_page_raw; + if (!ecc->write_page_raw) + ecc->write_page_raw = nand_write_page_raw; ecc->read_oob = nand_read_oob_std; ecc->write_oob = nand_write_oob_std; if (!ecc->size) @@ -5306,8 +5308,10 @@ static int nand_set_ecc_soft_ops(struct nand_chip *chip) ecc->read_page = nand_read_page_swecc; ecc->read_subpage = nand_read_subpage; ecc->write_page = nand_write_page_swecc; - ecc->read_page_raw = nand_read_page_raw; - ecc->write_page_raw = nand_write_page_raw; + if (!ecc->read_page_raw) + ecc->read_page_raw = nand_read_page_raw; + if (!ecc->write_page_raw) + ecc->write_page_raw = nand_write_page_raw; ecc->read_oob = nand_read_oob_std; ecc->write_oob = nand_write_oob_std; -- GitLab From 22dc5f9d490655a4ae1fbc55f93daa8e85b1c38f Mon Sep 17 00:00:00 2001 From: Miquel Raynal Date: Thu, 7 May 2020 12:52:41 +0200 Subject: [PATCH 0396/1971] mtd: rawnand: micron: Allow controllers to overload raw accessors Some controller drivers do not support executing regular nand_read/write_page_raw() helpers. For that, we created nand_monolithic_read/write_page_raw() alternatives. Let's now allow the driver to overload the ECC ->read/write_page_raw() hooks when these hooks are supported. Signed-off-by: Miquel Raynal Reviewed-by: Boris Brezillon Link: https://lore.kernel.org/linux-mtd/20200507105241.14299-14-miquel.raynal@bootlin.com --- drivers/mtd/nand/raw/nand_micron.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/drivers/mtd/nand/raw/nand_micron.c b/drivers/mtd/nand/raw/nand_micron.c index 3a37d48c94724..b2b047b245f41 100644 --- a/drivers/mtd/nand/raw/nand_micron.c +++ b/drivers/mtd/nand/raw/nand_micron.c @@ -508,8 +508,10 @@ static int micron_nand_init(struct nand_chip *chip) chip->ecc.read_page_raw = nand_read_page_raw_notsupp; chip->ecc.write_page_raw = nand_write_page_raw_notsupp; } else { - chip->ecc.read_page_raw = nand_read_page_raw; - chip->ecc.write_page_raw = nand_write_page_raw; + if (!chip->ecc.read_page_raw) + chip->ecc.read_page_raw = nand_read_page_raw; + if (!chip->ecc.write_page_raw) + chip->ecc.write_page_raw = nand_write_page_raw; } } -- GitLab From ec7cfc3d763c761ff1ad5a4e66c4f94098d7e161 Mon Sep 17 00:00:00 2001 From: Boris Brezillon Date: Mon, 11 May 2020 08:49:15 +0200 Subject: [PATCH 0397/1971] mtd: rawnand: Add a NAND_NO_BBM_QUIRK flag Some controllers with embedded ECC engines override the BBM marker with data or ECC bytes, thus making bad block detection through bad block marker impossible. Let's flag those chips so the core knows it shouldn't check the BBM and consider all blocks good. This should allow us to get rid of two implementers of the legacy.block_bad() hook. Signed-off-by: Boris Brezillon Signed-off-by: Miquel Raynal Link: https://lore.kernel.org/linux-mtd/20200511064917.6255-1-boris.brezillon@collabora.com --- drivers/mtd/nand/raw/nand_base.c | 3 +++ include/linux/mtd/rawnand.h | 8 ++++++++ 2 files changed, 11 insertions(+) diff --git a/drivers/mtd/nand/raw/nand_base.c b/drivers/mtd/nand/raw/nand_base.c index ba37ca9c12600..2d2a216af120f 100644 --- a/drivers/mtd/nand/raw/nand_base.c +++ b/drivers/mtd/nand/raw/nand_base.c @@ -395,6 +395,9 @@ static int nand_block_bad(struct nand_chip *chip, loff_t ofs) static int nand_isbad_bbm(struct nand_chip *chip, loff_t ofs) { + if (chip->options & NAND_NO_BBM_QUIRK) + return 0; + if (chip->legacy.block_bad) return chip->legacy.block_bad(chip, ofs); diff --git a/include/linux/mtd/rawnand.h b/include/linux/mtd/rawnand.h index 406e9ff0f45ce..0f45b6984ad19 100644 --- a/include/linux/mtd/rawnand.h +++ b/include/linux/mtd/rawnand.h @@ -221,6 +221,14 @@ enum nand_ecc_algo { #define NAND_BBM_SECONDPAGE BIT(25) #define NAND_BBM_LASTPAGE BIT(26) +/* + * Some controllers with pipelined ECC engines override the BBM marker with + * data or ECC bytes, thus making bad block detection through bad block marker + * impossible. Let's flag those chips so the core knows it shouldn't check the + * BBM and consider all blocks good. + */ +#define NAND_NO_BBM_QUIRK BIT(27) + /* Cell info constants */ #define NAND_CI_CHIPNR_MSK 0x03 #define NAND_CI_CELLTYPE_MSK 0x0C -- GitLab From 8420c68a16ce38794efa494c14ee73ecdc64b12f Mon Sep 17 00:00:00 2001 From: Boris Brezillon Date: Mon, 11 May 2020 08:49:16 +0200 Subject: [PATCH 0398/1971] mtd: rawnand: cafe: Set the NAND_NO_BBM_QUIRK flag We have a dummy block_bad() implementation returning 0. Let's set the NAND_NO_BBM_QUIRK flag and let the core take care of that. Signed-off-by: Boris Brezillon Reviewed-by: Miquel Raynal Signed-off-by: Miquel Raynal Link: https://lore.kernel.org/linux-mtd/20200511064917.6255-2-boris.brezillon@collabora.com --- drivers/mtd/nand/raw/cafe_nand.c | 11 ++--------- 1 file changed, 2 insertions(+), 9 deletions(-) diff --git a/drivers/mtd/nand/raw/cafe_nand.c b/drivers/mtd/nand/raw/cafe_nand.c index 2d1c22dc88c15..2a0df13df5f35 100644 --- a/drivers/mtd/nand/raw/cafe_nand.c +++ b/drivers/mtd/nand/raw/cafe_nand.c @@ -546,11 +546,6 @@ static int cafe_nand_write_page_lowlevel(struct nand_chip *chip, return nand_prog_page_end_op(chip); } -static int cafe_nand_block_bad(struct nand_chip *chip, loff_t ofs) -{ - return 0; -} - /* F_2[X]/(X**6+X+1) */ static unsigned short gf64_mul(u8 a, u8 b) { @@ -718,10 +713,8 @@ static int cafe_nand_probe(struct pci_dev *pdev, /* Enable the following for a flash based bad block table */ cafe->nand.bbt_options = NAND_BBT_USE_FLASH; - if (skipbbt) { - cafe->nand.options |= NAND_SKIP_BBTSCAN; - cafe->nand.legacy.block_bad = cafe_nand_block_bad; - } + if (skipbbt) + cafe->nand.options |= NAND_SKIP_BBTSCAN | NAND_NO_BBM_QUIRK; if (numtimings && numtimings != 3) { dev_warn(&cafe->pdev->dev, "%d timing register values ignored; precisely three are required\n", numtimings); -- GitLab From dace12ccfd08793a393d6933d2b95af5db7e46ec Mon Sep 17 00:00:00 2001 From: Boris Brezillon Date: Mon, 11 May 2020 08:49:17 +0200 Subject: [PATCH 0399/1971] mtd: rawnand: diskonchip: Set the NAND_NO_BBM_QUIRK flag We have a dummy block_bad() implementation returning 0. Let's set the NAND_NO_BBM_QUIRK flag and let the core take care of that. Signed-off-by: Boris Brezillon Reviewed-by: Miquel Raynal Signed-off-by: Miquel Raynal Link: https://lore.kernel.org/linux-mtd/20200511064917.6255-3-boris.brezillon@collabora.com --- drivers/mtd/nand/raw/diskonchip.c | 10 +--------- 1 file changed, 1 insertion(+), 9 deletions(-) diff --git a/drivers/mtd/nand/raw/diskonchip.c b/drivers/mtd/nand/raw/diskonchip.c index c2a391ad2c350..4c3d04da4cee2 100644 --- a/drivers/mtd/nand/raw/diskonchip.c +++ b/drivers/mtd/nand/raw/diskonchip.c @@ -776,13 +776,6 @@ static int doc200x_dev_ready(struct nand_chip *this) } } -static int doc200x_block_bad(struct nand_chip *this, loff_t ofs) -{ - /* This is our last resort if we couldn't find or create a BBT. Just - pretend all blocks are good. */ - return 0; -} - static void doc200x_enable_hwecc(struct nand_chip *this, int mode) { struct doc_priv *doc = nand_get_controller_data(this); @@ -1578,7 +1571,6 @@ static int __init doc_probe(unsigned long physadr) nand->legacy.cmd_ctrl = doc200x_hwcontrol; nand->legacy.dev_ready = doc200x_dev_ready; nand->legacy.waitfunc = doc200x_wait; - nand->legacy.block_bad = doc200x_block_bad; nand->ecc.hwctl = doc200x_enable_hwecc; nand->ecc.calculate = doc200x_calculate_ecc; nand->ecc.correct = doc200x_correct_data; @@ -1590,7 +1582,7 @@ static int __init doc_probe(unsigned long physadr) nand->ecc.options = NAND_ECC_GENERIC_ERASED_CHECK; nand->bbt_options = NAND_BBT_USE_FLASH; /* Skip the automatic BBT scan so we can run it manually */ - nand->options |= NAND_SKIP_BBTSCAN; + nand->options |= NAND_SKIP_BBTSCAN | NAND_NO_BBM_QUIRK; doc->physadr = physadr; doc->virtadr = virtadr; -- GitLab From a50b0c20bedcf10599708bed1608209274899053 Mon Sep 17 00:00:00 2001 From: Boris Brezillon Date: Fri, 1 May 2020 16:39:13 +0200 Subject: [PATCH 0400/1971] mtd: rawnand: diskonchip: Make sure doc2001plus_readbuf() works for single byte reads Single byte accesses normally go through read_byte() but we are about to use this function in the exec_op() implementation and thus needs to prepare for single byte reads. Signed-off-by: Boris Brezillon Signed-off-by: Miquel Raynal Link: https://lore.kernel.org/linux-mtd/20200501143917.1388957-2-boris.brezillon@collabora.com --- drivers/mtd/nand/raw/diskonchip.c | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/drivers/mtd/nand/raw/diskonchip.c b/drivers/mtd/nand/raw/diskonchip.c index 4c3d04da4cee2..ab5a36671e72d 100644 --- a/drivers/mtd/nand/raw/diskonchip.c +++ b/drivers/mtd/nand/raw/diskonchip.c @@ -550,9 +550,12 @@ static void doc2001plus_readbuf(struct nand_chip *this, u_char *buf, int len) } /* Terminate read pipeline */ - buf[len - 2] = ReadDOC(docptr, Mplus_LastDataRead); - if (debug && i < 16) - printk("%02x ", buf[len - 2]); + if (len >= 2) { + buf[len - 2] = ReadDOC(docptr, Mplus_LastDataRead); + if (debug && i < 16) + printk("%02x ", buf[len - 2]); + } + buf[len - 1] = ReadDOC(docptr, Mplus_LastDataRead); if (debug && i < 16) printk("%02x ", buf[len - 1]); -- GitLab From fddf5cec1cc6321712fc6f0749dd57847b554662 Mon Sep 17 00:00:00 2001 From: Boris Brezillon Date: Fri, 1 May 2020 16:39:14 +0200 Subject: [PATCH 0401/1971] mtd: rawnand: diskonchip: Get rid of doc2000_readbuf_dword() The logic can easily be merged in doc2000_readbuf(). Signed-off-by: Boris Brezillon Signed-off-by: Miquel Raynal Link: https://lore.kernel.org/linux-mtd/20200501143917.1388957-3-boris.brezillon@collabora.com --- drivers/mtd/nand/raw/diskonchip.c | 30 +++++++++--------------------- 1 file changed, 9 insertions(+), 21 deletions(-) diff --git a/drivers/mtd/nand/raw/diskonchip.c b/drivers/mtd/nand/raw/diskonchip.c index ab5a36671e72d..d3ef2d4fddfdf 100644 --- a/drivers/mtd/nand/raw/diskonchip.c +++ b/drivers/mtd/nand/raw/diskonchip.c @@ -69,6 +69,7 @@ struct doc_priv { int mh1_page; struct rs_control *rs_decoder; struct mtd_info *nextdoc; + bool supports_32b_reads; /* Handle the last stage of initialization (BBT scan, partitioning) */ int (*late_init)(struct mtd_info *mtd); @@ -337,32 +338,19 @@ static void doc2000_readbuf(struct nand_chip *this, u_char *buf, int len) { struct doc_priv *doc = nand_get_controller_data(this); void __iomem *docptr = doc->virtadr; + u32 *buf32 = (u32 *)buf; int i; if (debug) printk("readbuf of %d bytes: ", len); - for (i = 0; i < len; i++) - buf[i] = ReadDOC(docptr, 2k_CDSN_IO + i); -} - -static void doc2000_readbuf_dword(struct nand_chip *this, u_char *buf, int len) -{ - struct doc_priv *doc = nand_get_controller_data(this); - void __iomem *docptr = doc->virtadr; - int i; - - if (debug) - printk("readbuf_dword of %d bytes: ", len); - - if (unlikely((((unsigned long)buf) | len) & 3)) { - for (i = 0; i < len; i++) { - *(uint8_t *) (&buf[i]) = ReadDOC(docptr, 2k_CDSN_IO + i); - } + if (!doc->supports_32b_reads || + ((((unsigned long)buf) | len) & 3)) { + for (i = 0; i < len; i++) + buf[i] = ReadDOC(docptr, 2k_CDSN_IO + i); } else { - for (i = 0; i < len; i += 4) { - *(uint32_t *) (&buf[i]) = readl(docptr + DoC_2k_CDSN_IO + i); - } + for (i = 0; i < len / 4; i++) + buf32[i] = readl(docptr + DoC_2k_CDSN_IO + i); } } @@ -405,7 +393,7 @@ static uint16_t __init doc200x_ident_chip(struct mtd_info *mtd, int nr) ident.dword = readl(docptr + DoC_2k_CDSN_IO); if (((ident.byte[0] << 8) | ident.byte[1]) == ret) { pr_info("DiskOnChip 2000 responds to DWORD access\n"); - this->legacy.read_buf = &doc2000_readbuf_dword; + doc->supports_32b_reads = true; } } -- GitLab From f37b1d3c8f3682eb654ad914ca69503f5b98941d Mon Sep 17 00:00:00 2001 From: Boris Brezillon Date: Fri, 1 May 2020 16:39:15 +0200 Subject: [PATCH 0402/1971] mtd: rawnand: diskonchip: Inherit from nand_controller Stop relying on the dummy controller object embedded in nand_chip.legacy and explicitly inherit from nand_controller. Signed-off-by: Boris Brezillon Signed-off-by: Miquel Raynal Link: https://lore.kernel.org/linux-mtd/20200501143917.1388957-4-boris.brezillon@collabora.com --- drivers/mtd/nand/raw/diskonchip.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/mtd/nand/raw/diskonchip.c b/drivers/mtd/nand/raw/diskonchip.c index d3ef2d4fddfdf..aa17a8ae9d48d 100644 --- a/drivers/mtd/nand/raw/diskonchip.c +++ b/drivers/mtd/nand/raw/diskonchip.c @@ -58,6 +58,7 @@ static unsigned long doc_locations[] __initdata = { static struct mtd_info *doclist = NULL; struct doc_priv { + struct nand_controller base; void __iomem *virtadr; unsigned long physadr; u_char ChipID; @@ -1550,6 +1551,7 @@ static int __init doc_probe(unsigned long physadr) goto fail; } + nand_controller_init(&doc->base); mtd = nand_to_mtd(nand); nand->bbt_td = (struct nand_bbt_descr *) (doc + 1); nand->bbt_md = nand->bbt_td + 1; @@ -1557,6 +1559,7 @@ static int __init doc_probe(unsigned long physadr) mtd->owner = THIS_MODULE; mtd_set_ooblayout(mtd, &doc200x_ooblayout_ops); + nand->controller = &doc->base; nand_set_controller_data(nand, doc); nand->legacy.select_chip = doc200x_select_chip; nand->legacy.cmd_ctrl = doc200x_hwcontrol; -- GitLab From f46eb7affbda926a7379c0e13b7fa815ebfbb50d Mon Sep 17 00:00:00 2001 From: Boris Brezillon Date: Fri, 1 May 2020 16:39:16 +0200 Subject: [PATCH 0403/1971] mtd: rawnand: diskonchip: Implement exec_op() Implement exec_op() so we can later get rid of the legacy implementation. It's worth noting that the new implementation assert/deassert the CE pin on each operation, which might not be necessary. We also dropped the extra reset done at chip selection time on DOC2001plus. If it's needed we really should do something smarter, because having a reset everytime we access the chip is not that great perf-wise. Signed-off-by: Boris Brezillon Signed-off-by: Miquel Raynal Link: https://lore.kernel.org/linux-mtd/20200501143917.1388957-5-boris.brezillon@collabora.com --- drivers/mtd/nand/raw/diskonchip.c | 219 +++++++++++++++++++++++++++--- 1 file changed, 199 insertions(+), 20 deletions(-) diff --git a/drivers/mtd/nand/raw/diskonchip.c b/drivers/mtd/nand/raw/diskonchip.c index aa17a8ae9d48d..f85c0c8847579 100644 --- a/drivers/mtd/nand/raw/diskonchip.c +++ b/drivers/mtd/nand/raw/diskonchip.c @@ -355,25 +355,38 @@ static void doc2000_readbuf(struct nand_chip *this, u_char *buf, int len) } } +/* + * We need our own readid() here because it's called before the NAND chip + * has been initialized, and calling nand_op_readid() would lead to a NULL + * pointer exception when dereferencing the NAND timings. + */ +static void doc200x_readid(struct nand_chip *this, unsigned int cs, u8 *id) +{ + u8 addr = 0; + struct nand_op_instr instrs[] = { + NAND_OP_CMD(NAND_CMD_READID, 0), + NAND_OP_ADDR(1, &addr, 50), + NAND_OP_8BIT_DATA_IN(2, id, 0), + }; + + struct nand_operation op = NAND_OPERATION(cs, instrs); + + if (!id) + op.ninstrs--; + + this->controller->ops->exec_op(this, &op, false); +} + static uint16_t __init doc200x_ident_chip(struct mtd_info *mtd, int nr) { struct nand_chip *this = mtd_to_nand(mtd); struct doc_priv *doc = nand_get_controller_data(this); uint16_t ret; + u8 id[2]; - doc200x_select_chip(this, nr); - doc200x_hwcontrol(this, NAND_CMD_READID, - NAND_CTRL_CLE | NAND_CTRL_CHANGE); - doc200x_hwcontrol(this, 0, NAND_CTRL_ALE | NAND_CTRL_CHANGE); - doc200x_hwcontrol(this, NAND_CMD_NONE, NAND_NCE | NAND_CTRL_CHANGE); - - /* We can't use dev_ready here, but at least we wait for the - * command to complete - */ - udelay(50); + doc200x_readid(this, nr, id); - ret = this->legacy.read_byte(this) << 8; - ret |= this->legacy.read_byte(this); + ret = ((u16)id[0] << 8) | id[1]; if (doc->ChipID == DOC_ChipID_Doc2k && try_dword && !nr) { /* First chip probe. See if we get same results by 32-bit access */ @@ -383,13 +396,7 @@ static uint16_t __init doc200x_ident_chip(struct mtd_info *mtd, int nr) } ident; void __iomem *docptr = doc->virtadr; - doc200x_hwcontrol(this, NAND_CMD_READID, - NAND_CTRL_CLE | NAND_CTRL_CHANGE); - doc200x_hwcontrol(this, 0, NAND_CTRL_ALE | NAND_CTRL_CHANGE); - doc200x_hwcontrol(this, NAND_CMD_NONE, - NAND_NCE | NAND_CTRL_CHANGE); - - udelay(50); + doc200x_readid(this, nr, NULL); ident.dword = readl(docptr + DoC_2k_CDSN_IO); if (((ident.byte[0] << 8) | ident.byte[1]) == ret) { @@ -630,6 +637,166 @@ static void doc200x_hwcontrol(struct nand_chip *this, int cmd, } } +static void doc200x_write_control(struct doc_priv *doc, u8 value) +{ + WriteDOC(value, doc->virtadr, CDSNControl); + /* 11.4.3 -- 4 NOPs after CSDNControl write */ + DoC_Delay(doc, 4); +} + +static void doc200x_exec_instr(struct nand_chip *this, + const struct nand_op_instr *instr) +{ + struct doc_priv *doc = nand_get_controller_data(this); + unsigned int i; + + switch (instr->type) { + case NAND_OP_CMD_INSTR: + doc200x_write_control(doc, CDSN_CTRL_CE | CDSN_CTRL_CLE); + doc2000_write_byte(this, instr->ctx.cmd.opcode); + break; + + case NAND_OP_ADDR_INSTR: + doc200x_write_control(doc, CDSN_CTRL_CE | CDSN_CTRL_ALE); + for (i = 0; i < instr->ctx.addr.naddrs; i++) { + u8 addr = instr->ctx.addr.addrs[i]; + + if (DoC_is_2000(doc)) + doc2000_write_byte(this, addr); + else + doc2001_write_byte(this, addr); + } + break; + + case NAND_OP_DATA_IN_INSTR: + doc200x_write_control(doc, CDSN_CTRL_CE); + if (DoC_is_2000(doc)) + doc2000_readbuf(this, instr->ctx.data.buf.in, + instr->ctx.data.len); + else + doc2001_readbuf(this, instr->ctx.data.buf.in, + instr->ctx.data.len); + break; + + case NAND_OP_DATA_OUT_INSTR: + doc200x_write_control(doc, CDSN_CTRL_CE); + if (DoC_is_2000(doc)) + doc2000_writebuf(this, instr->ctx.data.buf.out, + instr->ctx.data.len); + else + doc2001_writebuf(this, instr->ctx.data.buf.out, + instr->ctx.data.len); + break; + + case NAND_OP_WAITRDY_INSTR: + DoC_WaitReady(doc); + break; + } + + if (instr->delay_ns) + ndelay(instr->delay_ns); +} + +static int doc200x_exec_op(struct nand_chip *this, + const struct nand_operation *op, + bool check_only) +{ + struct doc_priv *doc = nand_get_controller_data(this); + unsigned int i; + + if (check_only) + return true; + + doc->curchip = op->cs % doc->chips_per_floor; + doc->curfloor = op->cs / doc->chips_per_floor; + + WriteDOC(doc->curfloor, doc->virtadr, FloorSelect); + WriteDOC(doc->curchip, doc->virtadr, CDSNDeviceSelect); + + /* Assert CE pin */ + doc200x_write_control(doc, CDSN_CTRL_CE); + + for (i = 0; i < op->ninstrs; i++) + doc200x_exec_instr(this, &op->instrs[i]); + + /* De-assert CE pin */ + doc200x_write_control(doc, 0); + + return 0; +} + +static void doc2001plus_write_pipe_term(struct doc_priv *doc) +{ + WriteDOC(0x00, doc->virtadr, Mplus_WritePipeTerm); + WriteDOC(0x00, doc->virtadr, Mplus_WritePipeTerm); +} + +static void doc2001plus_exec_instr(struct nand_chip *this, + const struct nand_op_instr *instr) +{ + struct doc_priv *doc = nand_get_controller_data(this); + unsigned int i; + + switch (instr->type) { + case NAND_OP_CMD_INSTR: + WriteDOC(instr->ctx.cmd.opcode, doc->virtadr, Mplus_FlashCmd); + doc2001plus_write_pipe_term(doc); + break; + + case NAND_OP_ADDR_INSTR: + for (i = 0; i < instr->ctx.addr.naddrs; i++) { + u8 addr = instr->ctx.addr.addrs[i]; + + WriteDOC(addr, doc->virtadr, Mplus_FlashAddress); + } + doc2001plus_write_pipe_term(doc); + /* deassert ALE */ + WriteDOC(0, doc->virtadr, Mplus_FlashControl); + break; + + case NAND_OP_DATA_IN_INSTR: + doc2001plus_readbuf(this, instr->ctx.data.buf.in, + instr->ctx.data.len); + break; + case NAND_OP_DATA_OUT_INSTR: + doc2001plus_writebuf(this, instr->ctx.data.buf.out, + instr->ctx.data.len); + doc2001plus_write_pipe_term(doc); + break; + case NAND_OP_WAITRDY_INSTR: + DoC_WaitReady(doc); + break; + } + + if (instr->delay_ns) + ndelay(instr->delay_ns); +} + +static int doc2001plus_exec_op(struct nand_chip *this, + const struct nand_operation *op, + bool check_only) +{ + struct doc_priv *doc = nand_get_controller_data(this); + unsigned int i; + + if (check_only) + return true; + + doc->curchip = op->cs % doc->chips_per_floor; + doc->curfloor = op->cs / doc->chips_per_floor; + + /* Assert ChipEnable and deassert WriteProtect */ + WriteDOC(DOC_FLASH_CE, doc->virtadr, Mplus_FlashSelect); + + for (i = 0; i < op->ninstrs; i++) + doc2001plus_exec_instr(this, &op->instrs[i]); + + /* De-assert ChipEnable */ + WriteDOC(0, doc->virtadr, Mplus_FlashSelect); + + return 0; +} + static void doc2001plus_command(struct nand_chip *this, unsigned command, int column, int page_addr) { @@ -1390,6 +1557,14 @@ static inline int __init doc2001plus_init(struct mtd_info *mtd) return 1; } +static const struct nand_controller_ops doc200x_ops = { + .exec_op = doc200x_exec_op, +}; + +static const struct nand_controller_ops doc2001plus_ops = { + .exec_op = doc2001plus_exec_op, +}; + static int __init doc_probe(unsigned long physadr) { struct nand_chip *nand = NULL; @@ -1533,7 +1708,6 @@ static int __init doc_probe(unsigned long physadr) goto fail; } - /* * Allocate a RS codec instance * @@ -1552,6 +1726,11 @@ static int __init doc_probe(unsigned long physadr) } nand_controller_init(&doc->base); + if (ChipID == DOC_ChipID_DocMilPlus16) + doc->base.ops = &doc2001plus_ops; + else + doc->base.ops = &doc200x_ops; + mtd = nand_to_mtd(nand); nand->bbt_td = (struct nand_bbt_descr *) (doc + 1); nand->bbt_md = nand->bbt_td + 1; -- GitLab From d8ef2b73a459e448fc22b237a54e61855ef7ba3b Mon Sep 17 00:00:00 2001 From: Boris Brezillon Date: Fri, 1 May 2020 16:39:17 +0200 Subject: [PATCH 0404/1971] mtd: rawnand: diskonchip: Get rid of the legacy interface implementation Now that exec_op() has been implemented we can get rid of the legacy interface implementation. Signed-off-by: Boris Brezillon Signed-off-by: Miquel Raynal Link: https://lore.kernel.org/linux-mtd/20200501143917.1388957-6-boris.brezillon@collabora.com --- drivers/mtd/nand/raw/diskonchip.c | 292 ------------------------------ 1 file changed, 292 deletions(-) diff --git a/drivers/mtd/nand/raw/diskonchip.c b/drivers/mtd/nand/raw/diskonchip.c index f85c0c8847579..97f0b05b47c12 100644 --- a/drivers/mtd/nand/raw/diskonchip.c +++ b/drivers/mtd/nand/raw/diskonchip.c @@ -86,10 +86,6 @@ static u_char empty_write_ecc[6] = { 0x4b, 0x00, 0xe2, 0x0e, 0x93, 0xf7 }; #define DoC_is_Millennium(doc) ((doc)->ChipID == DOC_ChipID_DocMil) #define DoC_is_2000(doc) ((doc)->ChipID == DOC_ChipID_Doc2k) -static void doc200x_hwcontrol(struct nand_chip *this, int cmd, - unsigned int bitmask); -static void doc200x_select_chip(struct nand_chip *this, int chip); - static int debug = 0; module_param(debug, int, 0); @@ -304,20 +300,6 @@ static void doc2000_write_byte(struct nand_chip *this, u_char datum) WriteDOC(datum, docptr, 2k_CDSN_IO); } -static u_char doc2000_read_byte(struct nand_chip *this) -{ - struct doc_priv *doc = nand_get_controller_data(this); - void __iomem *docptr = doc->virtadr; - u_char ret; - - ReadDOC(docptr, CDSNSlowIO); - DoC_Delay(doc, 2); - ret = ReadDOC(docptr, 2k_CDSN_IO); - if (debug) - printk("read_byte returns %02x\n", ret); - return ret; -} - static void doc2000_writebuf(struct nand_chip *this, const u_char *buf, int len) { @@ -430,20 +412,6 @@ static void __init doc2000_count_chips(struct mtd_info *mtd) pr_debug("Detected %d chips per floor.\n", i); } -static int doc200x_wait(struct nand_chip *this) -{ - struct doc_priv *doc = nand_get_controller_data(this); - - int status; - - DoC_WaitReady(doc); - nand_status_op(this, NULL); - DoC_WaitReady(doc); - status = (int)this->legacy.read_byte(this); - - return status; -} - static void doc2001_write_byte(struct nand_chip *this, u_char datum) { struct doc_priv *doc = nand_get_controller_data(this); @@ -454,19 +422,6 @@ static void doc2001_write_byte(struct nand_chip *this, u_char datum) WriteDOC(datum, docptr, WritePipeTerm); } -static u_char doc2001_read_byte(struct nand_chip *this) -{ - struct doc_priv *doc = nand_get_controller_data(this); - void __iomem *docptr = doc->virtadr; - - //ReadDOC(docptr, CDSNSlowIO); - /* 11.4.5 -- delay twice to allow extended length cycle */ - DoC_Delay(doc, 2); - ReadDOC(docptr, ReadPipeInit); - //return ReadDOC(docptr, Mil_CDSN_IO); - return ReadDOC(docptr, LastDataRead); -} - static void doc2001_writebuf(struct nand_chip *this, const u_char *buf, int len) { struct doc_priv *doc = nand_get_controller_data(this); @@ -495,20 +450,6 @@ static void doc2001_readbuf(struct nand_chip *this, u_char *buf, int len) buf[i] = ReadDOC(docptr, LastDataRead); } -static u_char doc2001plus_read_byte(struct nand_chip *this) -{ - struct doc_priv *doc = nand_get_controller_data(this); - void __iomem *docptr = doc->virtadr; - u_char ret; - - ReadDOC(docptr, Mplus_ReadPipeInit); - ReadDOC(docptr, Mplus_ReadPipeInit); - ret = ReadDOC(docptr, Mplus_LastDataRead); - if (debug) - printk("read_byte returns %02x\n", ret); - return ret; -} - static void doc2001plus_writebuf(struct nand_chip *this, const u_char *buf, int len) { struct doc_priv *doc = nand_get_controller_data(this); @@ -559,84 +500,6 @@ static void doc2001plus_readbuf(struct nand_chip *this, u_char *buf, int len) printk("\n"); } -static void doc2001plus_select_chip(struct nand_chip *this, int chip) -{ - struct doc_priv *doc = nand_get_controller_data(this); - void __iomem *docptr = doc->virtadr; - int floor = 0; - - if (debug) - printk("select chip (%d)\n", chip); - - if (chip == -1) { - /* Disable flash internally */ - WriteDOC(0, docptr, Mplus_FlashSelect); - return; - } - - floor = chip / doc->chips_per_floor; - chip -= (floor * doc->chips_per_floor); - - /* Assert ChipEnable and deassert WriteProtect */ - WriteDOC((DOC_FLASH_CE), docptr, Mplus_FlashSelect); - nand_reset_op(this); - - doc->curchip = chip; - doc->curfloor = floor; -} - -static void doc200x_select_chip(struct nand_chip *this, int chip) -{ - struct doc_priv *doc = nand_get_controller_data(this); - void __iomem *docptr = doc->virtadr; - int floor = 0; - - if (debug) - printk("select chip (%d)\n", chip); - - if (chip == -1) - return; - - floor = chip / doc->chips_per_floor; - chip -= (floor * doc->chips_per_floor); - - /* 11.4.4 -- deassert CE before changing chip */ - doc200x_hwcontrol(this, NAND_CMD_NONE, 0 | NAND_CTRL_CHANGE); - - WriteDOC(floor, docptr, FloorSelect); - WriteDOC(chip, docptr, CDSNDeviceSelect); - - doc200x_hwcontrol(this, NAND_CMD_NONE, NAND_NCE | NAND_CTRL_CHANGE); - - doc->curchip = chip; - doc->curfloor = floor; -} - -#define CDSN_CTRL_MSK (CDSN_CTRL_CE | CDSN_CTRL_CLE | CDSN_CTRL_ALE) - -static void doc200x_hwcontrol(struct nand_chip *this, int cmd, - unsigned int ctrl) -{ - struct doc_priv *doc = nand_get_controller_data(this); - void __iomem *docptr = doc->virtadr; - - if (ctrl & NAND_CTRL_CHANGE) { - doc->CDSNControl &= ~CDSN_CTRL_MSK; - doc->CDSNControl |= ctrl & CDSN_CTRL_MSK; - if (debug) - printk("hwcontrol(%d): %02x\n", cmd, doc->CDSNControl); - WriteDOC(doc->CDSNControl, docptr, CDSNControl); - /* 11.4.3 -- 4 NOPs after CSDNControl write */ - DoC_Delay(doc, 4); - } - if (cmd != NAND_CMD_NONE) { - if (DoC_is_2000(doc)) - doc2000_write_byte(this, cmd); - else - doc2001_write_byte(this, cmd); - } -} - static void doc200x_write_control(struct doc_priv *doc, u8 value) { WriteDOC(value, doc->virtadr, CDSNControl); @@ -797,144 +660,6 @@ static int doc2001plus_exec_op(struct nand_chip *this, return 0; } -static void doc2001plus_command(struct nand_chip *this, unsigned command, - int column, int page_addr) -{ - struct mtd_info *mtd = nand_to_mtd(this); - struct doc_priv *doc = nand_get_controller_data(this); - void __iomem *docptr = doc->virtadr; - - /* - * Must terminate write pipeline before sending any commands - * to the device. - */ - if (command == NAND_CMD_PAGEPROG) { - WriteDOC(0x00, docptr, Mplus_WritePipeTerm); - WriteDOC(0x00, docptr, Mplus_WritePipeTerm); - } - - /* - * Write out the command to the device. - */ - if (command == NAND_CMD_SEQIN) { - int readcmd; - - if (column >= mtd->writesize) { - /* OOB area */ - column -= mtd->writesize; - readcmd = NAND_CMD_READOOB; - } else if (column < 256) { - /* First 256 bytes --> READ0 */ - readcmd = NAND_CMD_READ0; - } else { - column -= 256; - readcmd = NAND_CMD_READ1; - } - WriteDOC(readcmd, docptr, Mplus_FlashCmd); - } - WriteDOC(command, docptr, Mplus_FlashCmd); - WriteDOC(0, docptr, Mplus_WritePipeTerm); - WriteDOC(0, docptr, Mplus_WritePipeTerm); - - if (column != -1 || page_addr != -1) { - /* Serially input address */ - if (column != -1) { - /* Adjust columns for 16 bit buswidth */ - if (this->options & NAND_BUSWIDTH_16 && - !nand_opcode_8bits(command)) - column >>= 1; - WriteDOC(column, docptr, Mplus_FlashAddress); - } - if (page_addr != -1) { - WriteDOC((unsigned char)(page_addr & 0xff), docptr, Mplus_FlashAddress); - WriteDOC((unsigned char)((page_addr >> 8) & 0xff), docptr, Mplus_FlashAddress); - if (this->options & NAND_ROW_ADDR_3) { - WriteDOC((unsigned char)((page_addr >> 16) & 0x0f), docptr, Mplus_FlashAddress); - printk("high density\n"); - } - } - WriteDOC(0, docptr, Mplus_WritePipeTerm); - WriteDOC(0, docptr, Mplus_WritePipeTerm); - /* deassert ALE */ - if (command == NAND_CMD_READ0 || command == NAND_CMD_READ1 || - command == NAND_CMD_READOOB || command == NAND_CMD_READID) - WriteDOC(0, docptr, Mplus_FlashControl); - } - - /* - * program and erase have their own busy handlers - * status and sequential in needs no delay - */ - switch (command) { - - case NAND_CMD_PAGEPROG: - case NAND_CMD_ERASE1: - case NAND_CMD_ERASE2: - case NAND_CMD_SEQIN: - case NAND_CMD_STATUS: - return; - - case NAND_CMD_RESET: - if (this->legacy.dev_ready) - break; - udelay(this->legacy.chip_delay); - WriteDOC(NAND_CMD_STATUS, docptr, Mplus_FlashCmd); - WriteDOC(0, docptr, Mplus_WritePipeTerm); - WriteDOC(0, docptr, Mplus_WritePipeTerm); - while (!(this->legacy.read_byte(this) & 0x40)) ; - return; - - /* This applies to read commands */ - default: - /* - * If we don't have access to the busy pin, we apply the given - * command delay - */ - if (!this->legacy.dev_ready) { - udelay(this->legacy.chip_delay); - return; - } - } - - /* Apply this short delay always to ensure that we do wait tWB in - * any case on any machine. */ - ndelay(100); - /* wait until command is processed */ - while (!this->legacy.dev_ready(this)) ; -} - -static int doc200x_dev_ready(struct nand_chip *this) -{ - struct doc_priv *doc = nand_get_controller_data(this); - void __iomem *docptr = doc->virtadr; - - if (DoC_is_MillenniumPlus(doc)) { - /* 11.4.2 -- must NOP four times before checking FR/B# */ - DoC_Delay(doc, 4); - if ((ReadDOC(docptr, Mplus_FlashControl) & CDSN_CTRL_FR_B_MASK) != CDSN_CTRL_FR_B_MASK) { - if (debug) - printk("not ready\n"); - return 0; - } - if (debug) - printk("was ready\n"); - return 1; - } else { - /* 11.4.2 -- must NOP four times before checking FR/B# */ - DoC_Delay(doc, 4); - if (!(ReadDOC(docptr, CDSNControl) & CDSN_CTRL_FR_B)) { - if (debug) - printk("not ready\n"); - return 0; - } - /* 11.4.2 -- Must NOP twice if it's ready */ - DoC_Delay(doc, 2); - if (debug) - printk("was ready\n"); - return 1; - } -} - static void doc200x_enable_hwecc(struct nand_chip *this, int mode) { struct doc_priv *doc = nand_get_controller_data(this); @@ -1496,9 +1221,6 @@ static inline int __init doc2000_init(struct mtd_info *mtd) struct nand_chip *this = mtd_to_nand(mtd); struct doc_priv *doc = nand_get_controller_data(this); - this->legacy.read_byte = doc2000_read_byte; - this->legacy.write_buf = doc2000_writebuf; - this->legacy.read_buf = doc2000_readbuf; doc->late_init = nftl_scan_bbt; doc->CDSNControl = CDSN_CTRL_FLASH_IO | CDSN_CTRL_ECC_IO; @@ -1512,10 +1234,6 @@ static inline int __init doc2001_init(struct mtd_info *mtd) struct nand_chip *this = mtd_to_nand(mtd); struct doc_priv *doc = nand_get_controller_data(this); - this->legacy.read_byte = doc2001_read_byte; - this->legacy.write_buf = doc2001_writebuf; - this->legacy.read_buf = doc2001_readbuf; - ReadDOC(doc->virtadr, ChipID); ReadDOC(doc->virtadr, ChipID); ReadDOC(doc->virtadr, ChipID); @@ -1542,13 +1260,7 @@ static inline int __init doc2001plus_init(struct mtd_info *mtd) struct nand_chip *this = mtd_to_nand(mtd); struct doc_priv *doc = nand_get_controller_data(this); - this->legacy.read_byte = doc2001plus_read_byte; - this->legacy.write_buf = doc2001plus_writebuf; - this->legacy.read_buf = doc2001plus_readbuf; doc->late_init = inftl_scan_bbt; - this->legacy.cmd_ctrl = NULL; - this->legacy.select_chip = doc2001plus_select_chip; - this->legacy.cmdfunc = doc2001plus_command; this->ecc.hwctl = doc2001plus_enable_hwecc; doc->chips_per_floor = 1; @@ -1740,10 +1452,6 @@ static int __init doc_probe(unsigned long physadr) nand->controller = &doc->base; nand_set_controller_data(nand, doc); - nand->legacy.select_chip = doc200x_select_chip; - nand->legacy.cmd_ctrl = doc200x_hwcontrol; - nand->legacy.dev_ready = doc200x_dev_ready; - nand->legacy.waitfunc = doc200x_wait; nand->ecc.hwctl = doc200x_enable_hwecc; nand->ecc.calculate = doc200x_calculate_ecc; nand->ecc.correct = doc200x_correct_data; -- GitLab From 44fb26c6b4c5ad63f733df7936deffeae2950855 Mon Sep 17 00:00:00 2001 From: Ma Feng Date: Mon, 11 May 2020 20:07:08 +0800 Subject: [PATCH 0405/1971] nfsd: Fix old-style function definition Fix warning: fs/nfsd/nfssvc.c:604:6: warning: old-style function definition [-Wold-style-definition] bool i_am_nfsd() ^~~~~~~~~ Reported-by: Hulk Robot Signed-off-by: Ma Feng Signed-off-by: J. Bruce Fields --- fs/nfsd/nfssvc.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fs/nfsd/nfssvc.c b/fs/nfsd/nfssvc.c index 4f588c0eaaf44..b603dfcdd3615 100644 --- a/fs/nfsd/nfssvc.c +++ b/fs/nfsd/nfssvc.c @@ -601,7 +601,7 @@ static const struct svc_serv_ops nfsd_thread_sv_ops = { .svo_module = THIS_MODULE, }; -bool i_am_nfsd() +bool i_am_nfsd(void) { return kthread_func(current) == nfsd; } -- GitLab From 19bb22273c42bac264febe38f53a8b9d004a8df5 Mon Sep 17 00:00:00 2001 From: Andy Shevchenko Date: Sat, 25 Apr 2020 14:51:49 +0300 Subject: [PATCH 0406/1971] i2c: mux: pca954x: Refactor pca954x_irq_handler() Refactor pca954x_irq_handler() to: - use for_each_set_bit() macro - use IRQ_RETVAL() macro Above change makes code easy to read and understand. Signed-off-by: Andy Shevchenko Reviewed-by: Peter Rosin Signed-off-by: Wolfram Sang --- drivers/i2c/muxes/i2c-mux-pca954x.c | 17 +++++++---------- 1 file changed, 7 insertions(+), 10 deletions(-) diff --git a/drivers/i2c/muxes/i2c-mux-pca954x.c b/drivers/i2c/muxes/i2c-mux-pca954x.c index a0d926ae3f864..b764c7c746e96 100644 --- a/drivers/i2c/muxes/i2c-mux-pca954x.c +++ b/drivers/i2c/muxes/i2c-mux-pca954x.c @@ -327,21 +327,18 @@ static DEVICE_ATTR_RW(idle_state); static irqreturn_t pca954x_irq_handler(int irq, void *dev_id) { struct pca954x *data = dev_id; - unsigned int child_irq; - int ret, i, handled = 0; + unsigned long pending; + int ret, i; ret = i2c_smbus_read_byte(data->client); if (ret < 0) return IRQ_NONE; - for (i = 0; i < data->chip->nchans; i++) { - if (ret & BIT(PCA954X_IRQ_OFFSET + i)) { - child_irq = irq_linear_revmap(data->irq, i); - handle_nested_irq(child_irq); - handled++; - } - } - return handled ? IRQ_HANDLED : IRQ_NONE; + pending = (ret >> PCA954X_IRQ_OFFSET) & (BIT(data->chip->nchans) - 1); + for_each_set_bit(i, &pending, data->chip->nchans) + handle_nested_irq(irq_linear_revmap(data->irq, i)); + + return IRQ_RETVAL(pending); } static int pca954x_irq_set_type(struct irq_data *idata, unsigned int type) -- GitLab From 753aa3694382c5d967dcb427ae0c3ee199bd21aa Mon Sep 17 00:00:00 2001 From: Andy Shevchenko Date: Sat, 25 Apr 2020 14:51:50 +0300 Subject: [PATCH 0407/1971] i2c: mux: pca954x: Make use of device properties Device property API allows to gather device resources from different sources, such as ACPI. Convert the drivers to unleash the power of device property API. Signed-off-by: Andy Shevchenko Reviewed-by: Peter Rosin Signed-off-by: Wolfram Sang --- drivers/i2c/muxes/i2c-mux-pca954x.c | 15 +++++---------- 1 file changed, 5 insertions(+), 10 deletions(-) diff --git a/drivers/i2c/muxes/i2c-mux-pca954x.c b/drivers/i2c/muxes/i2c-mux-pca954x.c index b764c7c746e96..928c6f5ceac70 100644 --- a/drivers/i2c/muxes/i2c-mux-pca954x.c +++ b/drivers/i2c/muxes/i2c-mux-pca954x.c @@ -43,10 +43,8 @@ #include #include #include -#include -#include -#include #include +#include #include #include #include @@ -198,7 +196,6 @@ static const struct i2c_device_id pca954x_id[] = { }; MODULE_DEVICE_TABLE(i2c, pca954x_id); -#ifdef CONFIG_OF static const struct of_device_id pca954x_of_match[] = { { .compatible = "nxp,pca9540", .data = &chips[pca_9540] }, { .compatible = "nxp,pca9542", .data = &chips[pca_9542] }, @@ -215,7 +212,6 @@ static const struct of_device_id pca954x_of_match[] = { {} }; MODULE_DEVICE_TABLE(of, pca954x_of_match); -#endif /* Write to mux register. Don't use i2c_transfer()/i2c_smbus_xfer() for this as they will try to lock adapter a second time */ @@ -426,7 +422,6 @@ static int pca954x_probe(struct i2c_client *client, { struct i2c_adapter *adap = client->adapter; struct device *dev = &client->dev; - struct device_node *np = dev->of_node; struct gpio_desc *gpio; struct i2c_mux_core *muxc; struct pca954x *data; @@ -456,7 +451,7 @@ static int pca954x_probe(struct i2c_client *client, udelay(1); } - data->chip = of_device_get_match_data(dev); + data->chip = device_get_match_data(dev); if (!data->chip) data->chip = &chips[id->driver_data]; @@ -478,8 +473,8 @@ static int pca954x_probe(struct i2c_client *client, } data->idle_state = MUX_IDLE_AS_IS; - if (of_property_read_u32(np, "idle-state", &data->idle_state)) { - if (np && of_property_read_bool(np, "i2c-mux-idle-disconnect")) + if (device_property_read_u32(dev, "idle-state", &data->idle_state)) { + if (device_property_read_bool(dev, "i2c-mux-idle-disconnect")) data->idle_state = MUX_IDLE_DISCONNECT; } @@ -562,7 +557,7 @@ static struct i2c_driver pca954x_driver = { .driver = { .name = "pca954x", .pm = &pca954x_pm, - .of_match_table = of_match_ptr(pca954x_of_match), + .of_match_table = pca954x_of_match, }, .probe = pca954x_probe, .remove = pca954x_remove, -- GitLab From 3093c64101729e9b2b7be409abf3ebc4735f2b1e Mon Sep 17 00:00:00 2001 From: Andy Shevchenko Date: Sat, 25 Apr 2020 14:51:51 +0300 Subject: [PATCH 0408/1971] i2c: mux: pca954x: Move device_remove_file() out of pca954x_cleanup() device_create_file() is called the last in ->probe() but pca954x_cleanup(), which is called earlier in error path, tries to remove never created file. Move device_remove_file() call outside of pca954x_cleanup() to make it slightly closer to what pca954x_init() is doing. Signed-off-by: Andy Shevchenko Reviewed-by: Peter Rosin Signed-off-by: Wolfram Sang --- drivers/i2c/muxes/i2c-mux-pca954x.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/drivers/i2c/muxes/i2c-mux-pca954x.c b/drivers/i2c/muxes/i2c-mux-pca954x.c index 928c6f5ceac70..0908a0655cb79 100644 --- a/drivers/i2c/muxes/i2c-mux-pca954x.c +++ b/drivers/i2c/muxes/i2c-mux-pca954x.c @@ -383,11 +383,8 @@ static int pca954x_irq_setup(struct i2c_mux_core *muxc) static void pca954x_cleanup(struct i2c_mux_core *muxc) { struct pca954x *data = i2c_mux_priv(muxc); - struct i2c_client *client = data->client; int c, irq; - device_remove_file(&client->dev, &dev_attr_idle_state); - if (data->irq) { for (c = 0; c < data->chip->nchans; c++) { irq = irq_find_mapping(data->irq, c); @@ -531,6 +528,8 @@ static int pca954x_remove(struct i2c_client *client) { struct i2c_mux_core *muxc = i2c_get_clientdata(client); + device_remove_file(&client->dev, &dev_attr_idle_state); + pca954x_cleanup(muxc); return 0; } -- GitLab From 40e31f0e18475165b7c8582363cbe6a58fdaaca3 Mon Sep 17 00:00:00 2001 From: Andy Shevchenko Date: Sat, 25 Apr 2020 14:51:52 +0300 Subject: [PATCH 0409/1971] i2c: mux: pca954x: Convert license to SPDX identifier Use a shorter SPDX identifier instead of pasting the whole license. While here, replace duplicating PCA954x with PCA984x. Signed-off-by: Andy Shevchenko Reviewed-by: Peter Rosin Signed-off-by: Wolfram Sang --- drivers/i2c/muxes/i2c-mux-pca954x.c | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/drivers/i2c/muxes/i2c-mux-pca954x.c b/drivers/i2c/muxes/i2c-mux-pca954x.c index 0908a0655cb79..4ad665757dd81 100644 --- a/drivers/i2c/muxes/i2c-mux-pca954x.c +++ b/drivers/i2c/muxes/i2c-mux-pca954x.c @@ -1,10 +1,11 @@ +// SPDX-License-Identifier: GPL-2.0 /* * I2C multiplexer * * Copyright (c) 2008-2009 Rodolfo Giometti * Copyright (c) 2008-2009 Eurotech S.p.A. * - * This module supports the PCA954x and PCA954x series of I2C multiplexer/switch + * This module supports the PCA954x and PCA984x series of I2C multiplexer/switch * chips made by NXP Semiconductors. * This includes the: * PCA9540, PCA9542, PCA9543, PCA9544, PCA9545, PCA9546, PCA9547, @@ -29,10 +30,6 @@ * i2c-virtual_cb.c from Brian Kuschak * and * pca9540.c from Jean Delvare . - * - * This file is licensed under the terms of the GNU General Public - * License version 2. This program is licensed "as is" without any - * warranty of any kind, whether express or implied. */ #include -- GitLab From 71d1f1d5958fe15c1f12f0ecc5aa18ebe2073e31 Mon Sep 17 00:00:00 2001 From: Christophe Kerello Date: Wed, 6 May 2020 11:11:10 +0200 Subject: [PATCH 0410/1971] mtd: rawnand: stm32_fmc2: manage all errors cases at probe time This patch defers its probe when the expected reset control is not yet ready. This patch also handles properly all errors cases at probe time. Signed-off-by: Christophe Kerello Signed-off-by: Miquel Raynal Link: https://lore.kernel.org/linux-mtd/1588756279-17289-2-git-send-email-christophe.kerello@st.com --- drivers/mtd/nand/raw/stm32_fmc2_nand.c | 17 +++++++++++------ 1 file changed, 11 insertions(+), 6 deletions(-) diff --git a/drivers/mtd/nand/raw/stm32_fmc2_nand.c b/drivers/mtd/nand/raw/stm32_fmc2_nand.c index c5fde09e0175e..51275ee9ec8f5 100644 --- a/drivers/mtd/nand/raw/stm32_fmc2_nand.c +++ b/drivers/mtd/nand/raw/stm32_fmc2_nand.c @@ -1967,7 +1967,11 @@ static int stm32_fmc2_probe(struct platform_device *pdev) } rstc = devm_reset_control_get(dev, NULL); - if (!IS_ERR(rstc)) { + if (IS_ERR(rstc)) { + ret = PTR_ERR(rstc); + if (ret == -EPROBE_DEFER) + goto err_clk_disable; + } else { reset_control_assert(rstc); reset_control_deassert(rstc); } @@ -1975,7 +1979,7 @@ static int stm32_fmc2_probe(struct platform_device *pdev) /* DMA setup */ ret = stm32_fmc2_dma_setup(fmc2); if (ret) - return ret; + goto err_release_dma; /* FMC2 init routine */ stm32_fmc2_init(fmc2); @@ -1997,20 +2001,20 @@ static int stm32_fmc2_probe(struct platform_device *pdev) /* Scan to find existence of the device */ ret = nand_scan(chip, nand->ncs); if (ret) - goto err_scan; + goto err_release_dma; ret = mtd_device_register(mtd, NULL, 0); if (ret) - goto err_device_register; + goto err_nand_cleanup; platform_set_drvdata(pdev, fmc2); return 0; -err_device_register: +err_nand_cleanup: nand_cleanup(chip); -err_scan: +err_release_dma: if (fmc2->dma_ecc_ch) dma_release_channel(fmc2->dma_ecc_ch); if (fmc2->dma_tx_ch) @@ -2021,6 +2025,7 @@ err_scan: sg_free_table(&fmc2->dma_data_sg); sg_free_table(&fmc2->dma_ecc_sg); +err_clk_disable: clk_disable_unprepare(fmc2->clk); return ret; -- GitLab From 2d3d54bf1294f6290e55841e52bfbe6b9e65704b Mon Sep 17 00:00:00 2001 From: Christophe Kerello Date: Wed, 6 May 2020 11:11:11 +0200 Subject: [PATCH 0411/1971] mtd: rawnand: stm32_fmc2: remove useless inline comments Remove inline comments that are useless since function label are self explanatory. Signed-off-by: Christophe Kerello Reviewed-by: Miquel Raynal Signed-off-by: Miquel Raynal Link: https://lore.kernel.org/linux-mtd/1588756279-17289-3-git-send-email-christophe.kerello@st.com --- drivers/mtd/nand/raw/stm32_fmc2_nand.c | 40 -------------------------- 1 file changed, 40 deletions(-) diff --git a/drivers/mtd/nand/raw/stm32_fmc2_nand.c b/drivers/mtd/nand/raw/stm32_fmc2_nand.c index 51275ee9ec8f5..edbf8526d89c5 100644 --- a/drivers/mtd/nand/raw/stm32_fmc2_nand.c +++ b/drivers/mtd/nand/raw/stm32_fmc2_nand.c @@ -281,7 +281,6 @@ static inline struct stm32_fmc2_nfc *to_stm32_nfc(struct nand_controller *base) return container_of(base, struct stm32_fmc2_nfc, base); } -/* Timings configuration */ static void stm32_fmc2_timings_init(struct nand_chip *chip) { struct stm32_fmc2_nfc *fmc2 = to_stm32_nfc(chip->controller); @@ -313,7 +312,6 @@ static void stm32_fmc2_timings_init(struct nand_chip *chip) writel_relaxed(patt, fmc2->io_base + FMC2_PATT); } -/* Controller configuration */ static void stm32_fmc2_setup(struct nand_chip *chip) { struct stm32_fmc2_nfc *fmc2 = to_stm32_nfc(chip->controller); @@ -341,7 +339,6 @@ static void stm32_fmc2_setup(struct nand_chip *chip) writel_relaxed(pcr, fmc2->io_base + FMC2_PCR); } -/* Select target */ static int stm32_fmc2_select_chip(struct nand_chip *chip, int chipnr) { struct stm32_fmc2_nfc *fmc2 = to_stm32_nfc(chip->controller); @@ -353,11 +350,7 @@ static int stm32_fmc2_select_chip(struct nand_chip *chip, int chipnr) return 0; fmc2->cs_sel = nand->cs_used[chipnr]; - - /* FMC2 setup routine */ stm32_fmc2_setup(chip); - - /* Apply timings */ stm32_fmc2_timings_init(chip); if (fmc2->dma_tx_ch && fmc2->dma_rx_ch) { @@ -407,7 +400,6 @@ static int stm32_fmc2_select_chip(struct nand_chip *chip, int chipnr) return 0; } -/* Set bus width to 16-bit or 8-bit */ static void stm32_fmc2_set_buswidth_16(struct stm32_fmc2_nfc *fmc2, bool set) { u32 pcr = readl_relaxed(fmc2->io_base + FMC2_PCR); @@ -418,7 +410,6 @@ static void stm32_fmc2_set_buswidth_16(struct stm32_fmc2_nfc *fmc2, bool set) writel_relaxed(pcr, fmc2->io_base + FMC2_PCR); } -/* Enable/disable ECC */ static void stm32_fmc2_set_ecc(struct stm32_fmc2_nfc *fmc2, bool enable) { u32 pcr = readl(fmc2->io_base + FMC2_PCR); @@ -429,7 +420,6 @@ static void stm32_fmc2_set_ecc(struct stm32_fmc2_nfc *fmc2, bool enable) writel(pcr, fmc2->io_base + FMC2_PCR); } -/* Enable irq sources in case of the sequencer is used */ static inline void stm32_fmc2_enable_seq_irq(struct stm32_fmc2_nfc *fmc2) { u32 csqier = readl_relaxed(fmc2->io_base + FMC2_CSQIER); @@ -441,7 +431,6 @@ static inline void stm32_fmc2_enable_seq_irq(struct stm32_fmc2_nfc *fmc2) writel_relaxed(csqier, fmc2->io_base + FMC2_CSQIER); } -/* Disable irq sources in case of the sequencer is used */ static inline void stm32_fmc2_disable_seq_irq(struct stm32_fmc2_nfc *fmc2) { u32 csqier = readl_relaxed(fmc2->io_base + FMC2_CSQIER); @@ -453,13 +442,11 @@ static inline void stm32_fmc2_disable_seq_irq(struct stm32_fmc2_nfc *fmc2) fmc2->irq_state = FMC2_IRQ_UNKNOWN; } -/* Clear irq sources in case of the sequencer is used */ static inline void stm32_fmc2_clear_seq_irq(struct stm32_fmc2_nfc *fmc2) { writel_relaxed(FMC2_CSQICR_CLEAR_IRQ, fmc2->io_base + FMC2_CSQICR); } -/* Enable irq sources in case of bch is used */ static inline void stm32_fmc2_enable_bch_irq(struct stm32_fmc2_nfc *fmc2, int mode) { @@ -475,7 +462,6 @@ static inline void stm32_fmc2_enable_bch_irq(struct stm32_fmc2_nfc *fmc2, writel_relaxed(bchier, fmc2->io_base + FMC2_BCHIER); } -/* Disable irq sources in case of bch is used */ static inline void stm32_fmc2_disable_bch_irq(struct stm32_fmc2_nfc *fmc2) { u32 bchier = readl_relaxed(fmc2->io_base + FMC2_BCHIER); @@ -488,7 +474,6 @@ static inline void stm32_fmc2_disable_bch_irq(struct stm32_fmc2_nfc *fmc2) fmc2->irq_state = FMC2_IRQ_UNKNOWN; } -/* Clear irq sources in case of bch is used */ static inline void stm32_fmc2_clear_bch_irq(struct stm32_fmc2_nfc *fmc2) { writel_relaxed(FMC2_BCHICR_CLEAR_IRQ, fmc2->io_base + FMC2_BCHICR); @@ -549,10 +534,7 @@ static int stm32_fmc2_ham_calculate(struct nand_chip *chip, const u8 *data, } heccr = readl_relaxed(fmc2->io_base + FMC2_HECCR); - stm32_fmc2_ham_set_ecc(heccr, ecc); - - /* Disable ECC */ stm32_fmc2_set_ecc(fmc2, false); return 0; @@ -654,13 +636,11 @@ static int stm32_fmc2_bch_calculate(struct nand_chip *chip, const u8 *data, ecc[12] = bchpbr; } - /* Disable ECC */ stm32_fmc2_set_ecc(fmc2, false); return 0; } -/* BCH algorithm correction */ static int stm32_fmc2_bch_decode(int eccsize, u8 *dat, u32 *ecc_sta) { u32 bchdsr0 = ecc_sta[0]; @@ -720,7 +700,6 @@ static int stm32_fmc2_bch_correct(struct nand_chip *chip, u8 *dat, ecc_sta[3] = readl_relaxed(fmc2->io_base + FMC2_BCHDSR3); ecc_sta[4] = readl_relaxed(fmc2->io_base + FMC2_BCHDSR4); - /* Disable ECC */ stm32_fmc2_set_ecc(fmc2, false); return stm32_fmc2_bch_decode(chip->ecc.size, dat, ecc_sta); @@ -1054,7 +1033,6 @@ static int stm32_fmc2_sequencer_write_page(struct nand_chip *chip, { int ret; - /* Select the target */ ret = stm32_fmc2_select_chip(chip, chip->cur_cs); if (ret) return ret; @@ -1069,7 +1047,6 @@ static int stm32_fmc2_sequencer_write_page_raw(struct nand_chip *chip, { int ret; - /* Select the target */ ret = stm32_fmc2_select_chip(chip, chip->cur_cs); if (ret) return ret; @@ -1153,7 +1130,6 @@ static int stm32_fmc2_sequencer_read_page(struct nand_chip *chip, u8 *buf, u16 sta_map; int ret; - /* Select the target */ ret = stm32_fmc2_select_chip(chip, chip->cur_cs); if (ret) return ret; @@ -1199,7 +1175,6 @@ static int stm32_fmc2_sequencer_read_page_raw(struct nand_chip *chip, u8 *buf, struct mtd_info *mtd = nand_to_mtd(chip); int ret; - /* Select the target */ ret = stm32_fmc2_select_chip(chip, chip->cur_cs); if (ret) return ret; @@ -1409,7 +1384,6 @@ static int stm32_fmc2_exec_op(struct nand_chip *chip, return ret; } -/* Controller initialization */ static void stm32_fmc2_init(struct stm32_fmc2_nfc *fmc2) { u32 pcr = readl_relaxed(fmc2->io_base + FMC2_PCR); @@ -1452,7 +1426,6 @@ static void stm32_fmc2_init(struct stm32_fmc2_nfc *fmc2) writel_relaxed(FMC2_PATT_DEFAULT, fmc2->io_base + FMC2_PATT); } -/* Controller timings */ static void stm32_fmc2_calc_timings(struct nand_chip *chip, const struct nand_sdr_timings *sdrt) { @@ -1596,14 +1569,11 @@ static int stm32_fmc2_setup_interface(struct nand_chip *chip, int chipnr, return 0; stm32_fmc2_calc_timings(chip, sdrt); - - /* Apply timings */ stm32_fmc2_timings_init(chip); return 0; } -/* DMA configuration */ static int stm32_fmc2_dma_setup(struct stm32_fmc2_nfc *fmc2) { int ret = 0; @@ -1667,7 +1637,6 @@ err_dma: return ret; } -/* NAND callbacks setup */ static void stm32_fmc2_nand_callbacks_setup(struct nand_chip *chip) { struct stm32_fmc2_nfc *fmc2 = to_stm32_nfc(chip->controller); @@ -1708,7 +1677,6 @@ static void stm32_fmc2_nand_callbacks_setup(struct nand_chip *chip) chip->ecc.bytes = chip->options & NAND_BUSWIDTH_16 ? 8 : 7; } -/* FMC2 layout */ static int stm32_fmc2_nand_ooblayout_ecc(struct mtd_info *mtd, int section, struct mtd_oob_region *oobregion) { @@ -1744,7 +1712,6 @@ static const struct mtd_ooblayout_ops stm32_fmc2_nand_ooblayout_ops = { .free = stm32_fmc2_nand_ooblayout_free, }; -/* FMC2 caps */ static int stm32_fmc2_calc_ecc_bytes(int step_size, int strength) { /* Hamming */ @@ -1763,7 +1730,6 @@ NAND_ECC_CAPS_SINGLE(stm32_fmc2_ecc_caps, stm32_fmc2_calc_ecc_bytes, FMC2_ECC_STEP_SIZE, FMC2_ECC_HAM, FMC2_ECC_BCH4, FMC2_ECC_BCH8); -/* FMC2 controller ops */ static int stm32_fmc2_attach_chip(struct nand_chip *chip) { struct stm32_fmc2_nfc *fmc2 = to_stm32_nfc(chip->controller); @@ -1797,13 +1763,10 @@ static int stm32_fmc2_attach_chip(struct nand_chip *chip) if (chip->bbt_options & NAND_BBT_USE_FLASH) chip->bbt_options |= NAND_BBT_NO_OOB; - /* NAND callbacks setup */ stm32_fmc2_nand_callbacks_setup(chip); - /* Define ECC layout */ mtd_set_ooblayout(mtd, &stm32_fmc2_nand_ooblayout_ops); - /* Configure bus width to 16-bit */ if (chip->options & NAND_BUSWIDTH_16) stm32_fmc2_set_buswidth_16(fmc2, true); @@ -1816,7 +1779,6 @@ static const struct nand_controller_ops stm32_fmc2_nand_controller_ops = { .setup_data_interface = stm32_fmc2_setup_interface, }; -/* FMC2 probe */ static int stm32_fmc2_parse_child(struct stm32_fmc2_nfc *fmc2, struct device_node *dn) { @@ -1976,12 +1938,10 @@ static int stm32_fmc2_probe(struct platform_device *pdev) reset_control_deassert(rstc); } - /* DMA setup */ ret = stm32_fmc2_dma_setup(fmc2); if (ret) goto err_release_dma; - /* FMC2 init routine */ stm32_fmc2_init(fmc2); nand = &fmc2->nand; -- GitLab From 0185d50c5220e772aa90a21c515b0c87660ec4d7 Mon Sep 17 00:00:00 2001 From: Christophe Kerello Date: Wed, 6 May 2020 11:11:12 +0200 Subject: [PATCH 0412/1971] mtd: rawnand: stm32_fmc2: use FMC2_TIMEOUT_MS for timeouts This patch removes the constant FMC2_TIMEOUT_US. FMC2_TIMEOUT_MS will be used each time that we need to wait (except when the timeout value is set by the framework). It was seen, during stress tests with the sequencer in an overloaded system, that we could be close to 1 second, even if we never met this value. To be safe, FMC2_TIMEOUT_MS is set to 5 seconds. Signed-off-by: Christophe Kerello Signed-off-by: Miquel Raynal Link: https://lore.kernel.org/linux-mtd/1588756279-17289-4-git-send-email-christophe.kerello@st.com --- drivers/mtd/nand/raw/stm32_fmc2_nand.c | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/drivers/mtd/nand/raw/stm32_fmc2_nand.c b/drivers/mtd/nand/raw/stm32_fmc2_nand.c index edbf8526d89c5..30083e52b2fc3 100644 --- a/drivers/mtd/nand/raw/stm32_fmc2_nand.c +++ b/drivers/mtd/nand/raw/stm32_fmc2_nand.c @@ -37,8 +37,7 @@ /* Max ECC buffer length */ #define FMC2_MAX_ECC_BUF_LEN (FMC2_BCHDSRS_LEN * FMC2_MAX_SG) -#define FMC2_TIMEOUT_US 1000 -#define FMC2_TIMEOUT_MS 1000 +#define FMC2_TIMEOUT_MS 5000 /* Timings */ #define FMC2_THIZ 1 @@ -526,8 +525,8 @@ static int stm32_fmc2_ham_calculate(struct nand_chip *chip, const u8 *data, int ret; ret = readl_relaxed_poll_timeout(fmc2->io_base + FMC2_SR, - sr, sr & FMC2_SR_NWRF, 10, - FMC2_TIMEOUT_MS); + sr, sr & FMC2_SR_NWRF, 1, + 1000 * FMC2_TIMEOUT_MS); if (ret) { dev_err(fmc2->dev, "ham timeout\n"); return ret; @@ -1315,7 +1314,7 @@ static int stm32_fmc2_waitrdy(struct nand_chip *chip, unsigned long timeout_ms) /* Check if there is no pending requests to the NAND flash */ if (readl_relaxed_poll_timeout_atomic(fmc2->io_base + FMC2_SR, sr, sr & FMC2_SR_NWRF, 1, - FMC2_TIMEOUT_US)) + 1000 * FMC2_TIMEOUT_MS)) dev_warn(fmc2->dev, "Waitrdy timeout\n"); /* Wait tWB before R/B# signal is low */ -- GitLab From 45ba7a893ad89114e773b3dc32f6431354c465d6 Mon Sep 17 00:00:00 2001 From: David Gow Date: Thu, 30 Apr 2020 21:27:01 -0700 Subject: [PATCH 0413/1971] kunit: kunit_tool: Separate out config/build/exec/parse Add new subcommands to kunit.py to allow stages of the existing 'run' subcommand to be run independently: - 'config': Verifies that .config is a subset of .kunitconfig - 'build': Compiles a UML kernel for KUnit - 'exec': Runs the kernel, and outputs the test results. - 'parse': Parses test results from a file or stdin The 'run' command continues to behave as before. Signed-off-by: David Gow Reviewed-by: Brendan Higgins Signed-off-by: Shuah Khan --- tools/testing/kunit/kunit.py | 293 ++++++++++++++++++++----- tools/testing/kunit/kunit_tool_test.py | 63 +++++- 2 files changed, 296 insertions(+), 60 deletions(-) diff --git a/tools/testing/kunit/kunit.py b/tools/testing/kunit/kunit.py index 7dca74774dd2f..b01838b6f5f9e 100755 --- a/tools/testing/kunit/kunit.py +++ b/tools/testing/kunit/kunit.py @@ -20,8 +20,17 @@ import kunit_config import kunit_kernel import kunit_parser -KunitResult = namedtuple('KunitResult', ['status','result']) +KunitResult = namedtuple('KunitResult', ['status','result','elapsed_time']) +KunitConfigRequest = namedtuple('KunitConfigRequest', + ['build_dir', 'defconfig', 'make_options']) +KunitBuildRequest = namedtuple('KunitBuildRequest', + ['jobs', 'build_dir', 'alltests', + 'make_options']) +KunitExecRequest = namedtuple('KunitExecRequest', + ['timeout', 'build_dir', 'alltests']) +KunitParseRequest = namedtuple('KunitParseRequest', + ['raw_output', 'input_data']) KunitRequest = namedtuple('KunitRequest', ['raw_output','timeout', 'jobs', 'build_dir', 'defconfig', 'alltests', 'make_options']) @@ -46,14 +55,25 @@ def get_kernel_root_path(): sys.exit(1) return parts[0] -def run_tests(linux: kunit_kernel.LinuxSourceTree, - request: KunitRequest) -> KunitResult: +def config_tests(linux: kunit_kernel.LinuxSourceTree, + request: KunitConfigRequest) -> KunitResult: + kunit_parser.print_with_timestamp('Configuring KUnit Kernel ...') + config_start = time.time() + if request.defconfig: + create_default_kunitconfig() success = linux.build_reconfig(request.build_dir, request.make_options) config_end = time.time() if not success: - return KunitResult(KunitStatus.CONFIG_FAILURE, 'could not configure kernel') + return KunitResult(KunitStatus.CONFIG_FAILURE, + 'could not configure kernel', + config_end - config_start) + return KunitResult(KunitStatus.SUCCESS, + 'configured kernel successfully', + config_end - config_start) +def build_tests(linux: kunit_kernel.LinuxSourceTree, + request: KunitBuildRequest) -> KunitResult: kunit_parser.print_with_timestamp('Building KUnit Kernel ...') build_start = time.time() @@ -64,78 +84,167 @@ def run_tests(linux: kunit_kernel.LinuxSourceTree, build_end = time.time() if not success: return KunitResult(KunitStatus.BUILD_FAILURE, 'could not build kernel') + if not success: + return KunitResult(KunitStatus.BUILD_FAILURE, + 'could not build kernel', + build_end - build_start) + return KunitResult(KunitStatus.SUCCESS, + 'built kernel successfully', + build_end - build_start) +def exec_tests(linux: kunit_kernel.LinuxSourceTree, + request: KunitExecRequest) -> KunitResult: kunit_parser.print_with_timestamp('Starting KUnit Kernel ...') test_start = time.time() - kunit_output = linux.run_kernel( + result = linux.run_kernel( timeout=None if request.alltests else request.timeout, build_dir=request.build_dir) + + test_end = time.time() + + return KunitResult(KunitStatus.SUCCESS, + result, + test_end - test_start) + +def parse_tests(request: KunitParseRequest) -> KunitResult: + parse_start = time.time() + + test_result = kunit_parser.TestResult(kunit_parser.TestStatus.SUCCESS, + [], + 'Tests not Parsed.') if request.raw_output: - raw_output = kunit_parser.raw_output(kunit_output) - isolated = list(kunit_parser.isolate_kunit_output(raw_output)) - test_result = kunit_parser.parse_test_result(isolated) + kunit_parser.raw_output(request.input_data) else: - test_result = kunit_parser.parse_run_tests(kunit_output) - test_end = time.time() + test_result = kunit_parser.parse_run_tests(request.input_data) + parse_end = time.time() + + if test_result.status != kunit_parser.TestStatus.SUCCESS: + return KunitResult(KunitStatus.TEST_FAILURE, test_result, + parse_end - parse_start) + + return KunitResult(KunitStatus.SUCCESS, test_result, + parse_end - parse_start) + + +def run_tests(linux: kunit_kernel.LinuxSourceTree, + request: KunitRequest) -> KunitResult: + run_start = time.time() + + config_request = KunitConfigRequest(request.build_dir, + request.defconfig, + request.make_options) + config_result = config_tests(linux, config_request) + if config_result.status != KunitStatus.SUCCESS: + return config_result + + build_request = KunitBuildRequest(request.jobs, request.build_dir, + request.alltests, + request.make_options) + build_result = build_tests(linux, build_request) + if build_result.status != KunitStatus.SUCCESS: + return build_result + + exec_request = KunitExecRequest(request.timeout, request.build_dir, + request.alltests) + exec_result = exec_tests(linux, exec_request) + if exec_result.status != KunitStatus.SUCCESS: + return exec_result + + parse_request = KunitParseRequest(request.raw_output, + exec_result.result) + parse_result = parse_tests(parse_request) + + run_end = time.time() kunit_parser.print_with_timestamp(( 'Elapsed time: %.3fs total, %.3fs configuring, %.3fs ' + 'building, %.3fs running\n') % ( - test_end - config_start, - config_end - config_start, - build_end - build_start, - test_end - test_start)) + run_end - run_start, + config_result.elapsed_time, + build_result.elapsed_time, + exec_result.elapsed_time)) + return parse_result + +def add_common_opts(parser): + parser.add_argument('--build_dir', + help='As in the make command, it specifies the build ' + 'directory.', + type=str, default='', metavar='build_dir') + parser.add_argument('--make_options', + help='X=Y make option, can be repeated.', + action='append') + parser.add_argument('--alltests', + help='Run all KUnit tests through allyesconfig', + action='store_true') + +def add_config_opts(parser): + parser.add_argument('--defconfig', + help='Uses a default .kunitconfig.', + action='store_true') + +def add_build_opts(parser): + parser.add_argument('--jobs', + help='As in the make command, "Specifies the number of ' + 'jobs (commands) to run simultaneously."', + type=int, default=8, metavar='jobs') + +def add_exec_opts(parser): + parser.add_argument('--timeout', + help='maximum number of seconds to allow for all tests ' + 'to run. This does not include time taken to build the ' + 'tests.', + type=int, + default=300, + metavar='timeout') + +def add_parse_opts(parser): + parser.add_argument('--raw_output', help='don\'t format output from kernel', + action='store_true') - if test_result.status != kunit_parser.TestStatus.SUCCESS: - return KunitResult(KunitStatus.TEST_FAILURE, test_result) - else: - return KunitResult(KunitStatus.SUCCESS, test_result) def main(argv, linux=None): parser = argparse.ArgumentParser( description='Helps writing and running KUnit tests.') subparser = parser.add_subparsers(dest='subcommand') + # The 'run' command will config, build, exec, and parse in one go. run_parser = subparser.add_parser('run', help='Runs KUnit tests.') - run_parser.add_argument('--raw_output', help='don\'t format output from kernel', - action='store_true') - - run_parser.add_argument('--timeout', - help='maximum number of seconds to allow for all tests ' - 'to run. This does not include time taken to build the ' - 'tests.', - type=int, - default=300, - metavar='timeout') - - run_parser.add_argument('--jobs', - help='As in the make command, "Specifies the number of ' - 'jobs (commands) to run simultaneously."', - type=int, default=8, metavar='jobs') - - run_parser.add_argument('--build_dir', - help='As in the make command, it specifies the build ' - 'directory.', - type=str, default='', metavar='build_dir') - - run_parser.add_argument('--defconfig', - help='Uses a default .kunitconfig.', - action='store_true') - - run_parser.add_argument('--alltests', - help='Run all KUnit tests through allyesconfig', - action='store_true') - - run_parser.add_argument('--make_options', - help='X=Y make option, can be repeated.', - action='append') + add_common_opts(run_parser) + add_config_opts(run_parser) + add_build_opts(run_parser) + add_exec_opts(run_parser) + add_parse_opts(run_parser) + + config_parser = subparser.add_parser('config', + help='Ensures that .config contains all of ' + 'the options in .kunitconfig') + add_common_opts(config_parser) + add_config_opts(config_parser) + + build_parser = subparser.add_parser('build', help='Builds a kernel with KUnit tests') + add_common_opts(build_parser) + add_build_opts(build_parser) + + exec_parser = subparser.add_parser('exec', help='Run a kernel with KUnit tests') + add_common_opts(exec_parser) + add_exec_opts(exec_parser) + add_parse_opts(exec_parser) + + # The 'parse' option is special, as it doesn't need the kernel source + # (therefore there is no need for a build_dir, hence no add_common_opts) + # and the '--file' argument is not relevant to 'run', so isn't in + # add_parse_opts() + parse_parser = subparser.add_parser('parse', + help='Parses KUnit results from a file, ' + 'and parses formatted results.') + add_parse_opts(parse_parser) + parse_parser.add_argument('file', + help='Specifies the file to read results from.', + type=str, nargs='?', metavar='input_file') cli_args = parser.parse_args(argv) if cli_args.subcommand == 'run': - if get_kernel_root_path(): - os.chdir(get_kernel_root_path()) - if cli_args.build_dir: if not os.path.exists(cli_args.build_dir): os.mkdir(cli_args.build_dir) @@ -143,9 +252,6 @@ def main(argv, linux=None): cli_args.build_dir, kunit_kernel.kunitconfig_path) - if cli_args.defconfig: - create_default_kunitconfig() - if not linux: linux = kunit_kernel.LinuxSourceTree() @@ -159,6 +265,81 @@ def main(argv, linux=None): result = run_tests(linux, request) if result.status != KunitStatus.SUCCESS: sys.exit(1) + elif cli_args.subcommand == 'config': + if cli_args.build_dir: + if not os.path.exists(cli_args.build_dir): + os.mkdir(cli_args.build_dir) + kunit_kernel.kunitconfig_path = os.path.join( + cli_args.build_dir, + kunit_kernel.kunitconfig_path) + + if not linux: + linux = kunit_kernel.LinuxSourceTree() + + request = KunitConfigRequest(cli_args.build_dir, + cli_args.defconfig, + cli_args.make_options) + result = config_tests(linux, request) + kunit_parser.print_with_timestamp(( + 'Elapsed time: %.3fs\n') % ( + result.elapsed_time)) + if result.status != KunitStatus.SUCCESS: + sys.exit(1) + elif cli_args.subcommand == 'build': + if cli_args.build_dir: + if not os.path.exists(cli_args.build_dir): + os.mkdir(cli_args.build_dir) + kunit_kernel.kunitconfig_path = os.path.join( + cli_args.build_dir, + kunit_kernel.kunitconfig_path) + + if not linux: + linux = kunit_kernel.LinuxSourceTree() + + request = KunitBuildRequest(cli_args.jobs, + cli_args.build_dir, + cli_args.alltests, + cli_args.make_options) + result = build_tests(linux, request) + kunit_parser.print_with_timestamp(( + 'Elapsed time: %.3fs\n') % ( + result.elapsed_time)) + if result.status != KunitStatus.SUCCESS: + sys.exit(1) + elif cli_args.subcommand == 'exec': + if cli_args.build_dir: + if not os.path.exists(cli_args.build_dir): + os.mkdir(cli_args.build_dir) + kunit_kernel.kunitconfig_path = os.path.join( + cli_args.build_dir, + kunit_kernel.kunitconfig_path) + + if not linux: + linux = kunit_kernel.LinuxSourceTree() + + exec_request = KunitExecRequest(cli_args.timeout, + cli_args.build_dir, + cli_args.alltests) + exec_result = exec_tests(linux, exec_request) + parse_request = KunitParseRequest(cli_args.raw_output, + exec_result.result) + result = parse_tests(parse_request) + kunit_parser.print_with_timestamp(( + 'Elapsed time: %.3fs\n') % ( + exec_result.elapsed_time)) + if result.status != KunitStatus.SUCCESS: + sys.exit(1) + elif cli_args.subcommand == 'parse': + if cli_args.file == None: + kunit_output = sys.stdin + else: + with open(cli_args.file, 'r') as f: + kunit_output = f.read().splitlines() + request = KunitParseRequest(cli_args.raw_output, + kunit_output) + result = parse_tests(request) + if result.status != KunitStatus.SUCCESS: + sys.exit(1) else: parser.print_help() diff --git a/tools/testing/kunit/kunit_tool_test.py b/tools/testing/kunit/kunit_tool_test.py index 984588d6ba952..5bb7b118ebd94 100755 --- a/tools/testing/kunit/kunit_tool_test.py +++ b/tools/testing/kunit/kunit_tool_test.py @@ -239,6 +239,24 @@ class KUnitMainTest(unittest.TestCase): self.print_patch.stop() pass + def test_config_passes_args_pass(self): + kunit.main(['config'], self.linux_source_mock) + assert self.linux_source_mock.build_reconfig.call_count == 1 + assert self.linux_source_mock.run_kernel.call_count == 0 + + def test_build_passes_args_pass(self): + kunit.main(['build'], self.linux_source_mock) + assert self.linux_source_mock.build_reconfig.call_count == 0 + self.linux_source_mock.build_um_kernel.assert_called_once_with(False, 8, '', None) + assert self.linux_source_mock.run_kernel.call_count == 0 + + def test_exec_passes_args_pass(self): + kunit.main(['exec'], self.linux_source_mock) + assert self.linux_source_mock.build_reconfig.call_count == 0 + assert self.linux_source_mock.run_kernel.call_count == 1 + self.linux_source_mock.run_kernel.assert_called_once_with(build_dir='', timeout=300) + self.print_mock.assert_any_call(StrContains('Testing complete.')) + def test_run_passes_args_pass(self): kunit.main(['run'], self.linux_source_mock) assert self.linux_source_mock.build_reconfig.call_count == 1 @@ -247,6 +265,13 @@ class KUnitMainTest(unittest.TestCase): build_dir='', timeout=300) self.print_mock.assert_any_call(StrContains('Testing complete.')) + def test_exec_passes_args_fail(self): + self.linux_source_mock.run_kernel = mock.Mock(return_value=[]) + with self.assertRaises(SystemExit) as e: + kunit.main(['exec'], self.linux_source_mock) + assert type(e.exception) == SystemExit + assert e.exception.code == 1 + def test_run_passes_args_fail(self): self.linux_source_mock.run_kernel = mock.Mock(return_value=[]) with self.assertRaises(SystemExit) as e: @@ -257,14 +282,28 @@ class KUnitMainTest(unittest.TestCase): assert self.linux_source_mock.run_kernel.call_count == 1 self.print_mock.assert_any_call(StrContains(' 0 tests run')) + def test_exec_raw_output(self): + self.linux_source_mock.run_kernel = mock.Mock(return_value=[]) + kunit.main(['exec', '--raw_output'], self.linux_source_mock) + assert self.linux_source_mock.run_kernel.call_count == 1 + for kall in self.print_mock.call_args_list: + assert kall != mock.call(StrContains('Testing complete.')) + assert kall != mock.call(StrContains(' 0 tests run')) + def test_run_raw_output(self): self.linux_source_mock.run_kernel = mock.Mock(return_value=[]) - with self.assertRaises(SystemExit) as e: - kunit.main(['run', '--raw_output'], self.linux_source_mock) - assert type(e.exception) == SystemExit - assert e.exception.code == 1 + kunit.main(['run', '--raw_output'], self.linux_source_mock) assert self.linux_source_mock.build_reconfig.call_count == 1 assert self.linux_source_mock.run_kernel.call_count == 1 + for kall in self.print_mock.call_args_list: + assert kall != mock.call(StrContains('Testing complete.')) + assert kall != mock.call(StrContains(' 0 tests run')) + + def test_exec_timeout(self): + timeout = 3453 + kunit.main(['exec', '--timeout', str(timeout)], self.linux_source_mock) + self.linux_source_mock.run_kernel.assert_called_once_with(build_dir='', timeout=timeout) + self.print_mock.assert_any_call(StrContains('Testing complete.')) def test_run_timeout(self): timeout = 3453 @@ -282,5 +321,21 @@ class KUnitMainTest(unittest.TestCase): build_dir=build_dir, timeout=300) self.print_mock.assert_any_call(StrContains('Testing complete.')) + def test_config_builddir(self): + build_dir = '.kunit' + kunit.main(['config', '--build_dir', build_dir], self.linux_source_mock) + assert self.linux_source_mock.build_reconfig.call_count == 1 + + def test_build_builddir(self): + build_dir = '.kunit' + kunit.main(['build', '--build_dir', build_dir], self.linux_source_mock) + self.linux_source_mock.build_um_kernel.assert_called_once_with(False, 8, build_dir, None) + + def test_exec_builddir(self): + build_dir = '.kunit' + kunit.main(['exec', '--build_dir', build_dir], self.linux_source_mock) + self.linux_source_mock.run_kernel.assert_called_once_with(build_dir=build_dir, timeout=300) + self.print_mock.assert_any_call(StrContains('Testing complete.')) + if __name__ == '__main__': unittest.main() -- GitLab From 746c6237ece641da79df09abcf87bc29f6f9665b Mon Sep 17 00:00:00 2001 From: Xiongfeng Wang Date: Fri, 8 May 2020 09:32:59 +0800 Subject: [PATCH 0414/1971] sunrpc: add missing newline when printing parameter 'pool_mode' by sysfs When I cat parameter '/sys/module/sunrpc/parameters/pool_mode', it displays as follows. It is better to add a newline for easy reading. [root@hulk-202 ~]# cat /sys/module/sunrpc/parameters/pool_mode global[root@hulk-202 ~]# Signed-off-by: Xiongfeng Wang Signed-off-by: J. Bruce Fields --- net/sunrpc/svc.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c index 9ed3126600ce8..e534f8f082e77 100644 --- a/net/sunrpc/svc.c +++ b/net/sunrpc/svc.c @@ -88,15 +88,15 @@ param_get_pool_mode(char *buf, const struct kernel_param *kp) switch (*ip) { case SVC_POOL_AUTO: - return strlcpy(buf, "auto", 20); + return strlcpy(buf, "auto\n", 20); case SVC_POOL_GLOBAL: - return strlcpy(buf, "global", 20); + return strlcpy(buf, "global\n", 20); case SVC_POOL_PERCPU: - return strlcpy(buf, "percpu", 20); + return strlcpy(buf, "percpu\n", 20); case SVC_POOL_PERNODE: - return strlcpy(buf, "pernode", 20); + return strlcpy(buf, "pernode\n", 20); default: - return sprintf(buf, "%d", *ip); + return sprintf(buf, "%d\n", *ip); } } -- GitLab From 5e6bbde95982300d66d78fb282d4ee39df78fc33 Mon Sep 17 00:00:00 2001 From: Chao Yu Date: Wed, 8 Apr 2020 19:56:05 +0800 Subject: [PATCH 0415/1971] f2fs: introduce mempool for {,de}compress intermediate page allocation If compression feature is on, in scenario of no enough free memory, page refault ratio is higher than before, the root cause is: - {,de}compression flow needs to allocate intermediate pages to store compressed data in cluster, so during their allocation, vm may reclaim mmaped pages. - if above reclaimed pages belong to compressed cluster, during its refault, it may cause more intermediate pages allocation, result in reclaiming more mmaped pages. So this patch introduces a mempool for intermediate page allocation, in order to avoid high refault ratio, by default, number of preallocated page in pool is 512, user can change the number by assigning 'num_compress_pages' parameter during module initialization. Ma Feng found warnings in the original patch and fixed like below. Fix the following sparse warning: fs/f2fs/compress.c:501:5: warning: symbol 'num_compress_pages' was not declared. Should it be static? fs/f2fs/compress.c:530:6: warning: symbol 'f2fs_compress_free_page' was not declared. Should it be static? Reported-by: Hulk Robot Signed-off-by: Chao Yu Signed-off-by: Ma Feng Signed-off-by: Jaegeuk Kim --- fs/f2fs/compress.c | 64 ++++++++++++++++++++++++++++++---------------- fs/f2fs/f2fs.h | 4 +++ fs/f2fs/super.c | 6 +++++ 3 files changed, 52 insertions(+), 22 deletions(-) diff --git a/fs/f2fs/compress.c b/fs/f2fs/compress.c index d6283e351c959..230ea7cd15104 100644 --- a/fs/f2fs/compress.c +++ b/fs/f2fs/compress.c @@ -65,15 +65,6 @@ static void f2fs_set_compressed_page(struct page *page, page->mapping = inode->i_mapping; } -static void f2fs_put_compressed_page(struct page *page) -{ - set_page_private(page, (unsigned long)NULL); - ClearPagePrivate(page); - page->mapping = NULL; - unlock_page(page); - put_page(page); -} - static void f2fs_drop_rpages(struct compress_ctx *cc, int len, bool unlock) { int i; @@ -476,17 +467,47 @@ bool f2fs_is_compress_backend_ready(struct inode *inode) return f2fs_cops[F2FS_I(inode)->i_compress_algorithm]; } -static struct page *f2fs_grab_page(void) +static mempool_t *compress_page_pool = NULL; +static int num_compress_pages = 512; +module_param(num_compress_pages, uint, 0444); +MODULE_PARM_DESC(num_compress_pages, + "Number of intermediate compress pages to preallocate"); + +int f2fs_init_compress_mempool(void) +{ + compress_page_pool = mempool_create_page_pool(num_compress_pages, 0); + if (!compress_page_pool) + return -ENOMEM; + + return 0; +} + +void f2fs_destroy_compress_mempool(void) +{ + mempool_destroy(compress_page_pool); +} + +static struct page *f2fs_compress_alloc_page(void) { struct page *page; - page = alloc_page(GFP_NOFS); - if (!page) - return NULL; + page = mempool_alloc(compress_page_pool, GFP_NOFS); lock_page(page); + return page; } +static void f2fs_compress_free_page(struct page *page) +{ + if (!page) + return; + set_page_private(page, (unsigned long)NULL); + ClearPagePrivate(page); + page->mapping = NULL; + unlock_page(page); + mempool_free(page, compress_page_pool); +} + static int f2fs_compress_pages(struct compress_ctx *cc) { struct f2fs_sb_info *sbi = F2FS_I_SB(cc->inode); @@ -516,7 +537,7 @@ static int f2fs_compress_pages(struct compress_ctx *cc) } for (i = 0; i < cc->nr_cpages; i++) { - cc->cpages[i] = f2fs_grab_page(); + cc->cpages[i] = f2fs_compress_alloc_page(); if (!cc->cpages[i]) { ret = -ENOMEM; goto out_free_cpages; @@ -561,7 +582,7 @@ static int f2fs_compress_pages(struct compress_ctx *cc) vunmap(cc->rbuf); for (i = nr_cpages; i < cc->nr_cpages; i++) { - f2fs_put_compressed_page(cc->cpages[i]); + f2fs_compress_free_page(cc->cpages[i]); cc->cpages[i] = NULL; } @@ -581,7 +602,7 @@ out_vunmap_rbuf: out_free_cpages: for (i = 0; i < cc->nr_cpages; i++) { if (cc->cpages[i]) - f2fs_put_compressed_page(cc->cpages[i]); + f2fs_compress_free_page(cc->cpages[i]); } kfree(cc->cpages); cc->cpages = NULL; @@ -1183,7 +1204,7 @@ void f2fs_compress_write_end_io(struct bio *bio, struct page *page) if (unlikely(bio->bi_status)) mapping_set_error(cic->inode->i_mapping, -EIO); - f2fs_put_compressed_page(page); + f2fs_compress_free_page(page); dec_page_count(sbi, F2FS_WB_DATA); @@ -1344,7 +1365,7 @@ struct decompress_io_ctx *f2fs_alloc_dic(struct compress_ctx *cc) for (i = 0; i < dic->nr_cpages; i++) { struct page *page; - page = f2fs_grab_page(); + page = f2fs_compress_alloc_page(); if (!page) goto out_free; @@ -1364,7 +1385,7 @@ struct decompress_io_ctx *f2fs_alloc_dic(struct compress_ctx *cc) continue; } - dic->tpages[i] = f2fs_grab_page(); + dic->tpages[i] = f2fs_compress_alloc_page(); if (!dic->tpages[i]) goto out_free; } @@ -1386,8 +1407,7 @@ void f2fs_free_dic(struct decompress_io_ctx *dic) continue; if (!dic->tpages[i]) continue; - unlock_page(dic->tpages[i]); - put_page(dic->tpages[i]); + f2fs_compress_free_page(dic->tpages[i]); } kfree(dic->tpages); } @@ -1396,7 +1416,7 @@ void f2fs_free_dic(struct decompress_io_ctx *dic) for (i = 0; i < dic->nr_cpages; i++) { if (!dic->cpages[i]) continue; - f2fs_put_compressed_page(dic->cpages[i]); + f2fs_compress_free_page(dic->cpages[i]); } kfree(dic->cpages); } diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h index cc7a7a0c02d6a..d098b94ca22d1 100644 --- a/fs/f2fs/f2fs.h +++ b/fs/f2fs/f2fs.h @@ -3829,6 +3829,8 @@ bool f2fs_compress_write_end(struct inode *inode, void *fsdata, int f2fs_truncate_partial_cluster(struct inode *inode, u64 from, bool lock); void f2fs_compress_write_end_io(struct bio *bio, struct page *page); bool f2fs_is_compress_backend_ready(struct inode *inode); +int f2fs_init_compress_mempool(void); +void f2fs_destroy_compress_mempool(void); void f2fs_decompress_pages(struct bio *bio, struct page *page, bool verity); bool f2fs_cluster_is_empty(struct compress_ctx *cc); bool f2fs_cluster_can_merge_page(struct compress_ctx *cc, pgoff_t index); @@ -3862,6 +3864,8 @@ static inline struct page *f2fs_compress_control_page(struct page *page) WARN_ON_ONCE(1); return ERR_PTR(-EINVAL); } +static inline int f2fs_init_compress_mempool(void) { return 0; } +static inline void f2fs_destroy_compress_mempool(void) { } #endif static inline void set_compress_context(struct inode *inode) diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c index d0a705e493320..1b170fc489412 100644 --- a/fs/f2fs/super.c +++ b/fs/f2fs/super.c @@ -3879,7 +3879,12 @@ static int __init init_f2fs_fs(void) err = f2fs_init_bioset(); if (err) goto free_bio_enrty_cache; + err = f2fs_init_compress_mempool(); + if (err) + goto free_bioset; return 0; +free_bioset: + f2fs_destroy_bioset(); free_bio_enrty_cache: f2fs_destroy_bio_entry_cache(); free_post_read: @@ -3907,6 +3912,7 @@ fail: static void __exit exit_f2fs_fs(void) { + f2fs_destroy_compress_mempool(); f2fs_destroy_bioset(); f2fs_destroy_bio_entry_cache(); f2fs_destroy_post_read_processing(); -- GitLab From 6d92b201035dfe77426f8814fd5259db385a18b3 Mon Sep 17 00:00:00 2001 From: Chao Yu Date: Wed, 8 Apr 2020 19:56:32 +0800 Subject: [PATCH 0416/1971] f2fs: compress: support lzo-rle compress algorithm LZO-RLE extension (run length encoding) was introduced to improve performance of LZO algorithm in scenario of data contains many zeros, zram has changed to use this extended algorithm by default, this patch adds to support this algorithm extension, to enable this extension, it needs to enable F2FS_FS_LZO and F2FS_FS_LZORLE config, and specifies "compress_algorithm=lzo-rle" mountoption. Signed-off-by: Chao Yu Signed-off-by: Jaegeuk Kim --- Documentation/filesystems/f2fs.rst | 2 +- fs/f2fs/Kconfig | 10 ++++++++++ fs/f2fs/compress.c | 30 ++++++++++++++++++++++++++++++ fs/f2fs/f2fs.h | 1 + fs/f2fs/super.c | 6 ++++++ include/trace/events/f2fs.h | 3 ++- 6 files changed, 50 insertions(+), 2 deletions(-) diff --git a/Documentation/filesystems/f2fs.rst b/Documentation/filesystems/f2fs.rst index 87d794bc75a47..96e3f89d7abac 100644 --- a/Documentation/filesystems/f2fs.rst +++ b/Documentation/filesystems/f2fs.rst @@ -244,7 +244,7 @@ checkpoint=%s[:%u[%]] Set to "disable" to turn off checkpointing. Set to "enabl would be unusable can be viewed at /sys/fs/f2fs//unusable This space is reclaimed once checkpoint=enable. compress_algorithm=%s Control compress algorithm, currently f2fs supports "lzo", - "lz4" and "zstd" algorithm. + "lz4", "zstd" and "lzo-rle" algorithm. compress_log_size=%u Support configuring compress cluster size, the size will be 4KB * (1 << %u), 16KB is minimum size, also it's default size. diff --git a/fs/f2fs/Kconfig b/fs/f2fs/Kconfig index bb68d21e1f8c6..d13c5c6a97876 100644 --- a/fs/f2fs/Kconfig +++ b/fs/f2fs/Kconfig @@ -127,3 +127,13 @@ config F2FS_FS_ZSTD default y help Support ZSTD compress algorithm, if unsure, say Y. + +config F2FS_FS_LZORLE + bool "LZO-RLE compression support" + depends on F2FS_FS_COMPRESSION + depends on F2FS_FS_LZO + select LZO_COMPRESS + select LZO_DECOMPRESS + default y + help + Support LZO-RLE compress algorithm, if unsure, say Y. diff --git a/fs/f2fs/compress.c b/fs/f2fs/compress.c index 230ea7cd15104..c7c5a8f8a48cb 100644 --- a/fs/f2fs/compress.c +++ b/fs/f2fs/compress.c @@ -442,6 +442,31 @@ static const struct f2fs_compress_ops f2fs_zstd_ops = { }; #endif +#ifdef CONFIG_F2FS_FS_LZO +#ifdef CONFIG_F2FS_FS_LZORLE +static int lzorle_compress_pages(struct compress_ctx *cc) +{ + int ret; + + ret = lzorle1x_1_compress(cc->rbuf, cc->rlen, cc->cbuf->cdata, + &cc->clen, cc->private); + if (ret != LZO_E_OK) { + printk_ratelimited("%sF2FS-fs (%s): lzo-rle compress failed, ret:%d\n", + KERN_ERR, F2FS_I_SB(cc->inode)->sb->s_id, ret); + return -EIO; + } + return 0; +} + +static const struct f2fs_compress_ops f2fs_lzorle_ops = { + .init_compress_ctx = lzo_init_compress_ctx, + .destroy_compress_ctx = lzo_destroy_compress_ctx, + .compress_pages = lzorle_compress_pages, + .decompress_pages = lzo_decompress_pages, +}; +#endif +#endif + static const struct f2fs_compress_ops *f2fs_cops[COMPRESS_MAX] = { #ifdef CONFIG_F2FS_FS_LZO &f2fs_lzo_ops, @@ -458,6 +483,11 @@ static const struct f2fs_compress_ops *f2fs_cops[COMPRESS_MAX] = { #else NULL, #endif +#if defined(CONFIG_F2FS_FS_LZO) && defined(CONFIG_F2FS_FS_LZORLE) + &f2fs_lzorle_ops, +#else + NULL, +#endif }; bool f2fs_is_compress_backend_ready(struct inode *inode) diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h index d098b94ca22d1..0dab21e764d9f 100644 --- a/fs/f2fs/f2fs.h +++ b/fs/f2fs/f2fs.h @@ -1281,6 +1281,7 @@ enum compress_algorithm_type { COMPRESS_LZO, COMPRESS_LZ4, COMPRESS_ZSTD, + COMPRESS_LZORLE, COMPRESS_MAX, }; diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c index 1b170fc489412..582bbf40c5590 100644 --- a/fs/f2fs/super.c +++ b/fs/f2fs/super.c @@ -822,6 +822,9 @@ static int parse_options(struct super_block *sb, char *options) } else if (!strcmp(name, "zstd")) { F2FS_OPTION(sbi).compress_algorithm = COMPRESS_ZSTD; + } else if (!strcmp(name, "lzo-rle")) { + F2FS_OPTION(sbi).compress_algorithm = + COMPRESS_LZORLE; } else { kfree(name); return -EINVAL; @@ -1415,6 +1418,9 @@ static inline void f2fs_show_compress_options(struct seq_file *seq, case COMPRESS_ZSTD: algtype = "zstd"; break; + case COMPRESS_LZORLE: + algtype = "lzo-rle"; + break; } seq_printf(seq, ",compress_algorithm=%s", algtype); diff --git a/include/trace/events/f2fs.h b/include/trace/events/f2fs.h index 417a486f5c8a9..757d3d6031e63 100644 --- a/include/trace/events/f2fs.h +++ b/include/trace/events/f2fs.h @@ -154,7 +154,8 @@ TRACE_DEFINE_ENUM(CP_PAUSE); __print_symbolic(type, \ { COMPRESS_LZO, "LZO" }, \ { COMPRESS_LZ4, "LZ4" }, \ - { COMPRESS_ZSTD, "ZSTD" }) + { COMPRESS_ZSTD, "ZSTD" }, \ + { COMPRESS_LZORLE, "LZO-RLE" }) struct f2fs_sb_info; struct f2fs_io_info; -- GitLab From ff5f85c8d62a487bde415ef4c9e2d0be718021df Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Thu, 7 May 2020 00:59:02 -0700 Subject: [PATCH 0417/1971] f2fs: don't leak filename in f2fs_try_convert_inline_dir() We need to call fscrypt_free_filename() to free the memory allocated by fscrypt_setup_filename(). Fixes: b06af2aff28b ("f2fs: convert inline_dir early before starting rename") Cc: # v5.6+ Signed-off-by: Eric Biggers Reviewed-by: Chao Yu Signed-off-by: Jaegeuk Kim --- fs/f2fs/inline.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/fs/f2fs/inline.c b/fs/f2fs/inline.c index 4167e54081518..59a4b7ff11e17 100644 --- a/fs/f2fs/inline.c +++ b/fs/f2fs/inline.c @@ -559,12 +559,12 @@ int f2fs_try_convert_inline_dir(struct inode *dir, struct dentry *dentry) ipage = f2fs_get_node_page(sbi, dir->i_ino); if (IS_ERR(ipage)) { err = PTR_ERR(ipage); - goto out; + goto out_fname; } if (f2fs_has_enough_room(dir, ipage, &fname)) { f2fs_put_page(ipage, 1); - goto out; + goto out_fname; } inline_dentry = inline_data_addr(dir, ipage); @@ -572,6 +572,8 @@ int f2fs_try_convert_inline_dir(struct inode *dir, struct dentry *dentry) err = do_convert_inline_dir(dir, ipage, inline_dentry); if (!err) f2fs_put_page(ipage, 1); +out_fname: + fscrypt_free_filename(&fname); out: f2fs_unlock_op(sbi); return err; -- GitLab From f874fa1c7c7905c1744a2037a11516558ed00a81 Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Thu, 7 May 2020 00:59:03 -0700 Subject: [PATCH 0418/1971] f2fs: split f2fs_d_compare() from f2fs_match_name() Sharing f2fs_ci_compare() between comparing cached dentries (f2fs_d_compare()) and comparing on-disk dentries (f2fs_match_name()) doesn't work as well as intended, as these actions fundamentally differ in several ways (e.g. whether the task may sleep, whether the directory is stable, whether the casefolded name was precomputed, whether the dentry will need to be decrypted once we allow casefold+encrypt, etc.) Just make f2fs_d_compare() implement what it needs directly, and rework f2fs_ci_compare() to be specialized for f2fs_match_name(). Signed-off-by: Eric Biggers Reviewed-by: Chao Yu Signed-off-by: Jaegeuk Kim --- fs/f2fs/dir.c | 70 +++++++++++++++++++++++++------------------------- fs/f2fs/f2fs.h | 5 ---- 2 files changed, 35 insertions(+), 40 deletions(-) diff --git a/fs/f2fs/dir.c b/fs/f2fs/dir.c index 44bfc464df787..44eb12a00cd0e 100644 --- a/fs/f2fs/dir.c +++ b/fs/f2fs/dir.c @@ -107,36 +107,28 @@ static struct f2fs_dir_entry *find_in_block(struct inode *dir, /* * Test whether a case-insensitive directory entry matches the filename * being searched for. - * - * Returns: 0 if the directory entry matches, more than 0 if it - * doesn't match or less than zero on error. */ -int f2fs_ci_compare(const struct inode *parent, const struct qstr *name, - const struct qstr *entry, bool quick) +static bool f2fs_match_ci_name(const struct inode *dir, const struct qstr *name, + const struct qstr *entry, bool quick) { - const struct f2fs_sb_info *sbi = F2FS_SB(parent->i_sb); + const struct f2fs_sb_info *sbi = F2FS_SB(dir->i_sb); const struct unicode_map *um = sbi->s_encoding; - int ret; + int res; if (quick) - ret = utf8_strncasecmp_folded(um, name, entry); + res = utf8_strncasecmp_folded(um, name, entry); else - ret = utf8_strncasecmp(um, name, entry); - - if (ret < 0) { - /* Handle invalid character sequence as either an error - * or as an opaque byte sequence. + res = utf8_strncasecmp(um, name, entry); + if (res < 0) { + /* + * In strict mode, ignore invalid names. In non-strict mode, + * fall back to treating them as opaque byte sequences. */ - if (f2fs_has_strict_mode(sbi)) - return -EINVAL; - - if (name->len != entry->len) - return 1; - - return !!memcmp(name->name, entry->name, name->len); + if (f2fs_has_strict_mode(sbi) || name->len != entry->len) + return false; + return !memcmp(name->name, entry->name, name->len); } - - return ret; + return res == 0; } static void f2fs_fname_setup_ci_filename(struct inode *dir, @@ -188,10 +180,10 @@ static inline bool f2fs_match_name(struct f2fs_dentry_ptr *d, if (cf_str->name) { struct qstr cf = {.name = cf_str->name, .len = cf_str->len}; - return !f2fs_ci_compare(parent, &cf, &entry, true); + return f2fs_match_ci_name(parent, &cf, &entry, true); } - return !f2fs_ci_compare(parent, fname->usr_fname, &entry, - false); + return f2fs_match_ci_name(parent, fname->usr_fname, &entry, + false); } #endif if (fscrypt_match_name(fname, d->filename[bit_pos], @@ -1080,17 +1072,25 @@ const struct file_operations f2fs_dir_operations = { static int f2fs_d_compare(const struct dentry *dentry, unsigned int len, const char *str, const struct qstr *name) { - struct qstr qstr = {.name = str, .len = len }; const struct dentry *parent = READ_ONCE(dentry->d_parent); - const struct inode *inode = READ_ONCE(parent->d_inode); - - if (!inode || !IS_CASEFOLDED(inode)) { - if (len != name->len) - return -1; - return memcmp(str, name->name, len); - } - - return f2fs_ci_compare(inode, name, &qstr, false); + const struct inode *dir = READ_ONCE(parent->d_inode); + const struct f2fs_sb_info *sbi = F2FS_SB(dentry->d_sb); + struct qstr entry = QSTR_INIT(str, len); + int res; + + if (!dir || !IS_CASEFOLDED(dir)) + goto fallback; + + res = utf8_strncasecmp(sbi->s_encoding, name, &entry); + if (res >= 0) + return res; + + if (f2fs_has_strict_mode(sbi)) + return -EINVAL; +fallback: + if (len != name->len) + return 1; + return !!memcmp(str, name->name, len); } static int f2fs_d_hash(const struct dentry *dentry, struct qstr *str) diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h index 0dab21e764d9f..216b0a8093554 100644 --- a/fs/f2fs/f2fs.h +++ b/fs/f2fs/f2fs.h @@ -3142,11 +3142,6 @@ int f2fs_update_extension_list(struct f2fs_sb_info *sbi, const char *name, bool hot, bool set); struct dentry *f2fs_get_parent(struct dentry *child); -extern int f2fs_ci_compare(const struct inode *parent, - const struct qstr *name, - const struct qstr *entry, - bool quick); - /* * dir.c */ -- GitLab From 43c780ba26244e4caf3f9986beb6c4ae5eb54f50 Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Thu, 7 May 2020 00:59:04 -0700 Subject: [PATCH 0419/1971] f2fs: rework filename handling Rework f2fs's handling of filenames to use a new 'struct f2fs_filename'. Similar to 'struct ext4_filename', this stores the usr_fname, disk_name, dirhash, crypto_buf, and casefolded name. Some of these names can be NULL in some cases. 'struct f2fs_filename' differs from 'struct fscrypt_name' mainly in that the casefolded name is included. For user-initiated directory operations like lookup() and create(), initialize the f2fs_filename by translating the corresponding fscrypt_name, then computing the dirhash and casefolded name if needed. This makes the dirhash and casefolded name be cached for each syscall, so we don't have to recompute them repeatedly. (Previously, f2fs computed the dirhash once per directory level, and the casefolded name once per directory block.) This improves performance. This rework also makes it much easier to correctly handle all combinations of normal, encrypted, casefolded, and encrypted+casefolded directories. (The fourth isn't supported yet but is being worked on.) The only other cases where an f2fs_filename gets initialized are for two filesystem-internal operations: (1) when converting an inline directory to a regular one, we grab the needed disk_name and hash from an existing f2fs_dir_entry; and (2) when roll-forward recovering a new dentry, we grab the needed disk_name from f2fs_inode::i_name and compute the hash. Signed-off-by: Eric Biggers Signed-off-by: Jaegeuk Kim --- fs/f2fs/dir.c | 310 +++++++++++++++++++++++++-------------------- fs/f2fs/f2fs.h | 78 +++++++++--- fs/f2fs/hash.c | 76 +++++------ fs/f2fs/inline.c | 45 ++++--- fs/f2fs/namei.c | 6 +- fs/f2fs/recovery.c | 51 ++++++-- 6 files changed, 331 insertions(+), 235 deletions(-) diff --git a/fs/f2fs/dir.c b/fs/f2fs/dir.c index 44eb12a00cd0e..29f70f2295cce 100644 --- a/fs/f2fs/dir.c +++ b/fs/f2fs/dir.c @@ -70,6 +70,111 @@ unsigned char f2fs_get_de_type(struct f2fs_dir_entry *de) return DT_UNKNOWN; } +/* If @dir is casefolded, initialize @fname->cf_name from @fname->usr_fname. */ +int f2fs_init_casefolded_name(const struct inode *dir, + struct f2fs_filename *fname) +{ +#ifdef CONFIG_UNICODE + struct f2fs_sb_info *sbi = F2FS_SB(dir->i_sb); + + if (IS_CASEFOLDED(dir)) { + fname->cf_name.name = f2fs_kmalloc(sbi, F2FS_NAME_LEN, + GFP_NOFS); + if (!fname->cf_name.name) + return -ENOMEM; + fname->cf_name.len = utf8_casefold(sbi->s_encoding, + fname->usr_fname, + fname->cf_name.name, + F2FS_NAME_LEN); + if ((int)fname->cf_name.len <= 0) { + kfree(fname->cf_name.name); + fname->cf_name.name = NULL; + if (f2fs_has_strict_mode(sbi)) + return -EINVAL; + /* fall back to treating name as opaque byte sequence */ + } + } +#endif + return 0; +} + +static int __f2fs_setup_filename(const struct inode *dir, + const struct fscrypt_name *crypt_name, + struct f2fs_filename *fname) +{ + int err; + + memset(fname, 0, sizeof(*fname)); + + fname->usr_fname = crypt_name->usr_fname; + fname->disk_name = crypt_name->disk_name; +#ifdef CONFIG_FS_ENCRYPTION + fname->crypto_buf = crypt_name->crypto_buf; +#endif + if (crypt_name->is_ciphertext_name) { + /* hash was decoded from the no-key name */ + fname->hash = cpu_to_le32(crypt_name->hash); + } else { + err = f2fs_init_casefolded_name(dir, fname); + if (err) { + f2fs_free_filename(fname); + return err; + } + f2fs_hash_filename(dir, fname); + } + return 0; +} + +/* + * Prepare to search for @iname in @dir. This is similar to + * fscrypt_setup_filename(), but this also handles computing the casefolded name + * and the f2fs dirhash if needed, then packing all the information about this + * filename up into a 'struct f2fs_filename'. + */ +int f2fs_setup_filename(struct inode *dir, const struct qstr *iname, + int lookup, struct f2fs_filename *fname) +{ + struct fscrypt_name crypt_name; + int err; + + err = fscrypt_setup_filename(dir, iname, lookup, &crypt_name); + if (err) + return err; + + return __f2fs_setup_filename(dir, &crypt_name, fname); +} + +/* + * Prepare to look up @dentry in @dir. This is similar to + * fscrypt_prepare_lookup(), but this also handles computing the casefolded name + * and the f2fs dirhash if needed, then packing all the information about this + * filename up into a 'struct f2fs_filename'. + */ +int f2fs_prepare_lookup(struct inode *dir, struct dentry *dentry, + struct f2fs_filename *fname) +{ + struct fscrypt_name crypt_name; + int err; + + err = fscrypt_prepare_lookup(dir, dentry, &crypt_name); + if (err) + return err; + + return __f2fs_setup_filename(dir, &crypt_name, fname); +} + +void f2fs_free_filename(struct f2fs_filename *fname) +{ +#ifdef CONFIG_FS_ENCRYPTION + kfree(fname->crypto_buf.name); + fname->crypto_buf.name = NULL; +#endif +#ifdef CONFIG_UNICODE + kfree(fname->cf_name.name); + fname->cf_name.name = NULL; +#endif +} + static unsigned long dir_block_index(unsigned int level, int dir_level, unsigned int idx) { @@ -84,8 +189,7 @@ static unsigned long dir_block_index(unsigned int level, static struct f2fs_dir_entry *find_in_block(struct inode *dir, struct page *dentry_page, - struct fscrypt_name *fname, - f2fs_hash_t namehash, + const struct f2fs_filename *fname, int *max_slots, struct page **res_page) { @@ -96,7 +200,7 @@ static struct f2fs_dir_entry *find_in_block(struct inode *dir, dentry_blk = (struct f2fs_dentry_block *)page_address(dentry_page); make_dentry_ptr_block(dir, &d, dentry_blk); - de = f2fs_find_target_dentry(fname, namehash, max_slots, &d); + de = f2fs_find_target_dentry(&d, fname, max_slots); if (de) *res_page = dentry_page; @@ -109,102 +213,55 @@ static struct f2fs_dir_entry *find_in_block(struct inode *dir, * being searched for. */ static bool f2fs_match_ci_name(const struct inode *dir, const struct qstr *name, - const struct qstr *entry, bool quick) + const u8 *de_name, u32 de_name_len) { const struct f2fs_sb_info *sbi = F2FS_SB(dir->i_sb); const struct unicode_map *um = sbi->s_encoding; + struct qstr entry = QSTR_INIT(de_name, de_name_len); int res; - if (quick) - res = utf8_strncasecmp_folded(um, name, entry); - else - res = utf8_strncasecmp(um, name, entry); + res = utf8_strncasecmp_folded(um, name, &entry); if (res < 0) { /* * In strict mode, ignore invalid names. In non-strict mode, * fall back to treating them as opaque byte sequences. */ - if (f2fs_has_strict_mode(sbi) || name->len != entry->len) + if (f2fs_has_strict_mode(sbi) || name->len != entry.len) return false; - return !memcmp(name->name, entry->name, name->len); + return !memcmp(name->name, entry.name, name->len); } return res == 0; } +#endif /* CONFIG_UNICODE */ -static void f2fs_fname_setup_ci_filename(struct inode *dir, - const struct qstr *iname, - struct fscrypt_str *cf_name) +static inline bool f2fs_match_name(const struct inode *dir, + const struct f2fs_filename *fname, + const u8 *de_name, u32 de_name_len) { - struct f2fs_sb_info *sbi = F2FS_I_SB(dir); + struct fscrypt_name f; - if (!IS_CASEFOLDED(dir)) { - cf_name->name = NULL; - return; - } - - cf_name->name = f2fs_kmalloc(sbi, F2FS_NAME_LEN, GFP_NOFS); - if (!cf_name->name) - return; - - cf_name->len = utf8_casefold(sbi->s_encoding, - iname, cf_name->name, - F2FS_NAME_LEN); - if ((int)cf_name->len <= 0) { - kvfree(cf_name->name); - cf_name->name = NULL; - } -} -#endif - -static inline bool f2fs_match_name(struct f2fs_dentry_ptr *d, - struct f2fs_dir_entry *de, - struct fscrypt_name *fname, - struct fscrypt_str *cf_str, - unsigned long bit_pos, - f2fs_hash_t namehash) -{ #ifdef CONFIG_UNICODE - struct inode *parent = d->inode; - struct f2fs_sb_info *sbi = F2FS_I_SB(parent); - struct qstr entry; -#endif + if (fname->cf_name.name) { + struct qstr cf = FSTR_TO_QSTR(&fname->cf_name); - if (de->hash_code != namehash) - return false; - -#ifdef CONFIG_UNICODE - entry.name = d->filename[bit_pos]; - entry.len = de->name_len; - - if (sbi->s_encoding && IS_CASEFOLDED(parent)) { - if (cf_str->name) { - struct qstr cf = {.name = cf_str->name, - .len = cf_str->len}; - return f2fs_match_ci_name(parent, &cf, &entry, true); - } - return f2fs_match_ci_name(parent, fname->usr_fname, &entry, - false); + return f2fs_match_ci_name(dir, &cf, de_name, de_name_len); } #endif - if (fscrypt_match_name(fname, d->filename[bit_pos], - le16_to_cpu(de->name_len))) - return true; - return false; + f.usr_fname = fname->usr_fname; + f.disk_name = fname->disk_name; +#ifdef CONFIG_FS_ENCRYPTION + f.crypto_buf = fname->crypto_buf; +#endif + return fscrypt_match_name(&f, de_name, de_name_len); } -struct f2fs_dir_entry *f2fs_find_target_dentry(struct fscrypt_name *fname, - f2fs_hash_t namehash, int *max_slots, - struct f2fs_dentry_ptr *d) +struct f2fs_dir_entry *f2fs_find_target_dentry(const struct f2fs_dentry_ptr *d, + const struct f2fs_filename *fname, int *max_slots) { struct f2fs_dir_entry *de; - struct fscrypt_str cf_str = { .name = NULL, .len = 0 }; unsigned long bit_pos = 0; int max_len = 0; -#ifdef CONFIG_UNICODE - f2fs_fname_setup_ci_filename(d->inode, fname->usr_fname, &cf_str); -#endif - if (max_slots) *max_slots = 0; while (bit_pos < d->max) { @@ -221,7 +278,9 @@ struct f2fs_dir_entry *f2fs_find_target_dentry(struct fscrypt_name *fname, continue; } - if (f2fs_match_name(d, de, fname, &cf_str, bit_pos, namehash)) + if (de->hash_code == fname->hash && + f2fs_match_name(d->inode, fname, d->filename[bit_pos], + le16_to_cpu(de->name_len))) goto found; if (max_slots && max_len > *max_slots) @@ -235,33 +294,27 @@ struct f2fs_dir_entry *f2fs_find_target_dentry(struct fscrypt_name *fname, found: if (max_slots && max_len > *max_slots) *max_slots = max_len; - -#ifdef CONFIG_UNICODE - kvfree(cf_str.name); -#endif return de; } static struct f2fs_dir_entry *find_in_level(struct inode *dir, unsigned int level, - struct fscrypt_name *fname, + const struct f2fs_filename *fname, struct page **res_page) { - struct qstr name = FSTR_TO_QSTR(&fname->disk_name); - int s = GET_DENTRY_SLOTS(name.len); + int s = GET_DENTRY_SLOTS(fname->disk_name.len); unsigned int nbucket, nblock; unsigned int bidx, end_block; struct page *dentry_page; struct f2fs_dir_entry *de = NULL; bool room = false; int max_slots; - f2fs_hash_t namehash = f2fs_dentry_hash(dir, &name, fname); nbucket = dir_buckets(level, F2FS_I(dir)->i_dir_level); nblock = bucket_blocks(level); bidx = dir_block_index(level, F2FS_I(dir)->i_dir_level, - le32_to_cpu(namehash) % nbucket); + le32_to_cpu(fname->hash) % nbucket); end_block = bidx + nblock; for (; bidx < end_block; bidx++) { @@ -277,8 +330,8 @@ static struct f2fs_dir_entry *find_in_level(struct inode *dir, } } - de = find_in_block(dir, dentry_page, fname, namehash, - &max_slots, res_page); + de = find_in_block(dir, dentry_page, fname, &max_slots, + res_page); if (de) break; @@ -287,8 +340,8 @@ static struct f2fs_dir_entry *find_in_level(struct inode *dir, f2fs_put_page(dentry_page, 0); } - if (!de && room && F2FS_I(dir)->chash != namehash) { - F2FS_I(dir)->chash = namehash; + if (!de && room && F2FS_I(dir)->chash != fname->hash) { + F2FS_I(dir)->chash = fname->hash; F2FS_I(dir)->clevel = level; } @@ -296,7 +349,8 @@ static struct f2fs_dir_entry *find_in_level(struct inode *dir, } struct f2fs_dir_entry *__f2fs_find_entry(struct inode *dir, - struct fscrypt_name *fname, struct page **res_page) + const struct f2fs_filename *fname, + struct page **res_page) { unsigned long npages = dir_blocks(dir); struct f2fs_dir_entry *de = NULL; @@ -345,18 +399,10 @@ struct f2fs_dir_entry *f2fs_find_entry(struct inode *dir, const struct qstr *child, struct page **res_page) { struct f2fs_dir_entry *de = NULL; - struct fscrypt_name fname; + struct f2fs_filename fname; int err; -#ifdef CONFIG_UNICODE - if (f2fs_has_strict_mode(F2FS_I_SB(dir)) && IS_CASEFOLDED(dir) && - utf8_validate(F2FS_I_SB(dir)->s_encoding, child)) { - *res_page = ERR_PTR(-EINVAL); - return NULL; - } -#endif - - err = fscrypt_setup_filename(dir, child, 1, &fname); + err = f2fs_setup_filename(dir, child, 1, &fname); if (err) { if (err == -ENOENT) *res_page = NULL; @@ -367,7 +413,7 @@ struct f2fs_dir_entry *f2fs_find_entry(struct inode *dir, de = __f2fs_find_entry(dir, &fname, res_page); - fscrypt_free_filename(&fname); + f2fs_free_filename(&fname); return de; } @@ -408,7 +454,8 @@ void f2fs_set_link(struct inode *dir, struct f2fs_dir_entry *de, f2fs_put_page(page, 1); } -static void init_dent_inode(const struct qstr *name, struct page *ipage) +static void init_dent_inode(const struct f2fs_filename *fname, + struct page *ipage) { struct f2fs_inode *ri; @@ -416,16 +463,16 @@ static void init_dent_inode(const struct qstr *name, struct page *ipage) /* copy name info. to this inode page */ ri = F2FS_INODE(ipage); - ri->i_namelen = cpu_to_le32(name->len); - memcpy(ri->i_name, name->name, name->len); + ri->i_namelen = cpu_to_le32(fname->disk_name.len); + memcpy(ri->i_name, fname->disk_name.name, fname->disk_name.len); set_page_dirty(ipage); } void f2fs_do_make_empty_dir(struct inode *inode, struct inode *parent, struct f2fs_dentry_ptr *d) { - struct qstr dot = QSTR_INIT(".", 1); - struct qstr dotdot = QSTR_INIT("..", 2); + struct fscrypt_str dot = FSTR_INIT(".", 1); + struct fscrypt_str dotdot = FSTR_INIT("..", 2); /* update dirent of "." */ f2fs_update_dentry(inode->i_ino, inode->i_mode, d, &dot, 0, 0); @@ -459,8 +506,7 @@ static int make_empty_dir(struct inode *inode, } struct page *f2fs_init_inode_metadata(struct inode *inode, struct inode *dir, - const struct qstr *new_name, const struct qstr *orig_name, - struct page *dpage) + const struct f2fs_filename *fname, struct page *dpage) { struct page *page; int err; @@ -485,7 +531,8 @@ struct page *f2fs_init_inode_metadata(struct inode *inode, struct inode *dir, if (err) goto put_error; - err = f2fs_init_security(inode, dir, orig_name, page); + err = f2fs_init_security(inode, dir, + fname ? fname->usr_fname : NULL, page); if (err) goto put_error; @@ -500,8 +547,8 @@ struct page *f2fs_init_inode_metadata(struct inode *inode, struct inode *dir, return page; } - if (new_name) { - init_dent_inode(new_name, page); + if (fname) { + init_dent_inode(fname, page); if (IS_ENCRYPTED(dir)) file_set_enc_name(inode); } @@ -569,11 +616,11 @@ next: } bool f2fs_has_enough_room(struct inode *dir, struct page *ipage, - struct fscrypt_name *fname) + const struct f2fs_filename *fname) { struct f2fs_dentry_ptr d; unsigned int bit_pos; - int slots = GET_DENTRY_SLOTS(fname_len(fname)); + int slots = GET_DENTRY_SLOTS(fname->disk_name.len); make_dentry_ptr_inline(dir, &d, inline_data_addr(dir, ipage)); @@ -583,8 +630,8 @@ bool f2fs_has_enough_room(struct inode *dir, struct page *ipage, } void f2fs_update_dentry(nid_t ino, umode_t mode, struct f2fs_dentry_ptr *d, - const struct qstr *name, f2fs_hash_t name_hash, - unsigned int bit_pos) + const struct fscrypt_str *name, f2fs_hash_t name_hash, + unsigned int bit_pos) { struct f2fs_dir_entry *de; int slots = GET_DENTRY_SLOTS(name->len); @@ -604,15 +651,13 @@ void f2fs_update_dentry(nid_t ino, umode_t mode, struct f2fs_dentry_ptr *d, } } -int f2fs_add_regular_entry(struct inode *dir, const struct qstr *new_name, - const struct qstr *orig_name, - struct inode *inode, nid_t ino, umode_t mode) +int f2fs_add_regular_entry(struct inode *dir, const struct f2fs_filename *fname, + struct inode *inode, nid_t ino, umode_t mode) { unsigned int bit_pos; unsigned int level; unsigned int current_depth; unsigned long bidx, block; - f2fs_hash_t dentry_hash; unsigned int nbucket, nblock; struct page *dentry_page = NULL; struct f2fs_dentry_block *dentry_blk = NULL; @@ -621,11 +666,10 @@ int f2fs_add_regular_entry(struct inode *dir, const struct qstr *new_name, int slots, err = 0; level = 0; - slots = GET_DENTRY_SLOTS(new_name->len); - dentry_hash = f2fs_dentry_hash(dir, new_name, NULL); + slots = GET_DENTRY_SLOTS(fname->disk_name.len); current_depth = F2FS_I(dir)->i_current_depth; - if (F2FS_I(dir)->chash == dentry_hash) { + if (F2FS_I(dir)->chash == fname->hash) { level = F2FS_I(dir)->clevel; F2FS_I(dir)->chash = 0; } @@ -647,7 +691,7 @@ start: nblock = bucket_blocks(level); bidx = dir_block_index(level, F2FS_I(dir)->i_dir_level, - (le32_to_cpu(dentry_hash) % nbucket)); + (le32_to_cpu(fname->hash) % nbucket)); for (block = bidx; block <= (bidx + nblock - 1); block++) { dentry_page = f2fs_get_new_data_page(dir, NULL, block, true); @@ -671,8 +715,7 @@ add_dentry: if (inode) { down_write(&F2FS_I(inode)->i_sem); - page = f2fs_init_inode_metadata(inode, dir, new_name, - orig_name, NULL); + page = f2fs_init_inode_metadata(inode, dir, fname, NULL); if (IS_ERR(page)) { err = PTR_ERR(page); goto fail; @@ -680,7 +723,8 @@ add_dentry: } make_dentry_ptr_block(NULL, &d, dentry_blk); - f2fs_update_dentry(ino, mode, &d, new_name, dentry_hash, bit_pos); + f2fs_update_dentry(ino, mode, &d, &fname->disk_name, fname->hash, + bit_pos); set_page_dirty(dentry_page); @@ -704,21 +748,15 @@ fail: return err; } -int f2fs_add_dentry(struct inode *dir, struct fscrypt_name *fname, - struct inode *inode, nid_t ino, umode_t mode) +int f2fs_add_dentry(struct inode *dir, const struct f2fs_filename *fname, + struct inode *inode, nid_t ino, umode_t mode) { - struct qstr new_name; int err = -EAGAIN; - new_name.name = fname_name(fname); - new_name.len = fname_len(fname); - if (f2fs_has_inline_dentry(dir)) - err = f2fs_add_inline_entry(dir, &new_name, fname->usr_fname, - inode, ino, mode); + err = f2fs_add_inline_entry(dir, fname, inode, ino, mode); if (err == -EAGAIN) - err = f2fs_add_regular_entry(dir, &new_name, fname->usr_fname, - inode, ino, mode); + err = f2fs_add_regular_entry(dir, fname, inode, ino, mode); f2fs_update_time(F2FS_I_SB(dir), REQ_TIME); return err; @@ -731,12 +769,12 @@ int f2fs_add_dentry(struct inode *dir, struct fscrypt_name *fname, int f2fs_do_add_link(struct inode *dir, const struct qstr *name, struct inode *inode, nid_t ino, umode_t mode) { - struct fscrypt_name fname; + struct f2fs_filename fname; struct page *page = NULL; struct f2fs_dir_entry *de = NULL; int err; - err = fscrypt_setup_filename(dir, name, 0, &fname); + err = f2fs_setup_filename(dir, name, 0, &fname); if (err) return err; @@ -759,7 +797,7 @@ int f2fs_do_add_link(struct inode *dir, const struct qstr *name, } else { err = f2fs_add_dentry(dir, &fname, inode, ino, mode); } - fscrypt_free_filename(&fname); + f2fs_free_filename(&fname); return err; } @@ -769,7 +807,7 @@ int f2fs_do_tmpfile(struct inode *inode, struct inode *dir) int err = 0; down_write(&F2FS_I(inode)->i_sem); - page = f2fs_init_inode_metadata(inode, dir, NULL, NULL, NULL); + page = f2fs_init_inode_metadata(inode, dir, NULL, NULL); if (IS_ERR(page)) { err = PTR_ERR(page); goto fail; diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h index 216b0a8093554..20f047a07c5a1 100644 --- a/fs/f2fs/f2fs.h +++ b/fs/f2fs/f2fs.h @@ -506,6 +506,42 @@ static inline int get_inline_xattr_addrs(struct inode *inode); * For INODE and NODE manager */ /* for directory operations */ + +struct f2fs_filename { + /* + * The filename the user specified. This is NULL for some + * filesystem-internal operations, e.g. converting an inline directory + * to a non-inline one, or roll-forward recovering an encrypted dentry. + */ + const struct qstr *usr_fname; + + /* + * The on-disk filename. For encrypted directories, this is encrypted. + * This may be NULL for lookups in an encrypted dir without the key. + */ + struct fscrypt_str disk_name; + + /* The dirhash of this filename */ + f2fs_hash_t hash; + +#ifdef CONFIG_FS_ENCRYPTION + /* + * For lookups in encrypted directories: either the buffer backing + * disk_name, or a buffer that holds the decoded no-key name. + */ + struct fscrypt_str crypto_buf; +#endif +#ifdef CONFIG_UNICODE + /* + * For casefolded directories: the casefolded name, but it's left NULL + * if the original name is not valid Unicode or if the filesystem is + * doing an internal operation where usr_fname is also NULL. In these + * cases we fall back to treating the name as an opaque byte sequence. + */ + struct fscrypt_str cf_name; +#endif +}; + struct f2fs_dentry_ptr { struct inode *inode; void *bitmap; @@ -2921,12 +2957,12 @@ static inline bool f2fs_cp_error(struct f2fs_sb_info *sbi) return is_set_ckpt_flags(sbi, CP_ERROR_FLAG); } -static inline bool is_dot_dotdot(const struct qstr *str) +static inline bool is_dot_dotdot(const u8 *name, size_t len) { - if (str->len == 1 && str->name[0] == '.') + if (len == 1 && name[0] == '.') return true; - if (str->len == 2 && str->name[0] == '.' && str->name[1] == '.') + if (len == 2 && name[0] == '.' && name[1] == '.') return true; return false; @@ -3146,22 +3182,28 @@ struct dentry *f2fs_get_parent(struct dentry *child); * dir.c */ unsigned char f2fs_get_de_type(struct f2fs_dir_entry *de); -struct f2fs_dir_entry *f2fs_find_target_dentry(struct fscrypt_name *fname, - f2fs_hash_t namehash, int *max_slots, - struct f2fs_dentry_ptr *d); +int f2fs_init_casefolded_name(const struct inode *dir, + struct f2fs_filename *fname); +int f2fs_setup_filename(struct inode *dir, const struct qstr *iname, + int lookup, struct f2fs_filename *fname); +int f2fs_prepare_lookup(struct inode *dir, struct dentry *dentry, + struct f2fs_filename *fname); +void f2fs_free_filename(struct f2fs_filename *fname); +struct f2fs_dir_entry *f2fs_find_target_dentry(const struct f2fs_dentry_ptr *d, + const struct f2fs_filename *fname, int *max_slots); int f2fs_fill_dentries(struct dir_context *ctx, struct f2fs_dentry_ptr *d, unsigned int start_pos, struct fscrypt_str *fstr); void f2fs_do_make_empty_dir(struct inode *inode, struct inode *parent, struct f2fs_dentry_ptr *d); struct page *f2fs_init_inode_metadata(struct inode *inode, struct inode *dir, - const struct qstr *new_name, - const struct qstr *orig_name, struct page *dpage); + const struct f2fs_filename *fname, struct page *dpage); void f2fs_update_parent_metadata(struct inode *dir, struct inode *inode, unsigned int current_depth); int f2fs_room_for_filename(const void *bitmap, int slots, int max_slots); void f2fs_drop_nlink(struct inode *dir, struct inode *inode); struct f2fs_dir_entry *__f2fs_find_entry(struct inode *dir, - struct fscrypt_name *fname, struct page **res_page); + const struct f2fs_filename *fname, + struct page **res_page); struct f2fs_dir_entry *f2fs_find_entry(struct inode *dir, const struct qstr *child, struct page **res_page); struct f2fs_dir_entry *f2fs_parent_dir(struct inode *dir, struct page **p); @@ -3170,14 +3212,13 @@ ino_t f2fs_inode_by_name(struct inode *dir, const struct qstr *qstr, void f2fs_set_link(struct inode *dir, struct f2fs_dir_entry *de, struct page *page, struct inode *inode); bool f2fs_has_enough_room(struct inode *dir, struct page *ipage, - struct fscrypt_name *fname); + const struct f2fs_filename *fname); void f2fs_update_dentry(nid_t ino, umode_t mode, struct f2fs_dentry_ptr *d, - const struct qstr *name, f2fs_hash_t name_hash, + const struct fscrypt_str *name, f2fs_hash_t name_hash, unsigned int bit_pos); -int f2fs_add_regular_entry(struct inode *dir, const struct qstr *new_name, - const struct qstr *orig_name, +int f2fs_add_regular_entry(struct inode *dir, const struct f2fs_filename *fname, struct inode *inode, nid_t ino, umode_t mode); -int f2fs_add_dentry(struct inode *dir, struct fscrypt_name *fname, +int f2fs_add_dentry(struct inode *dir, const struct f2fs_filename *fname, struct inode *inode, nid_t ino, umode_t mode); int f2fs_do_add_link(struct inode *dir, const struct qstr *name, struct inode *inode, nid_t ino, umode_t mode); @@ -3207,8 +3248,7 @@ int f2fs_sanity_check_ckpt(struct f2fs_sb_info *sbi); /* * hash.c */ -f2fs_hash_t f2fs_dentry_hash(const struct inode *dir, - const struct qstr *name_info, struct fscrypt_name *fname); +void f2fs_hash_filename(const struct inode *dir, struct f2fs_filename *fname); /* * node.c @@ -3719,11 +3759,11 @@ int f2fs_try_convert_inline_dir(struct inode *dir, struct dentry *dentry); int f2fs_write_inline_data(struct inode *inode, struct page *page); bool f2fs_recover_inline_data(struct inode *inode, struct page *npage); struct f2fs_dir_entry *f2fs_find_in_inline_dir(struct inode *dir, - struct fscrypt_name *fname, struct page **res_page); + const struct f2fs_filename *fname, + struct page **res_page); int f2fs_make_empty_inline_dir(struct inode *inode, struct inode *parent, struct page *ipage); -int f2fs_add_inline_entry(struct inode *dir, const struct qstr *new_name, - const struct qstr *orig_name, +int f2fs_add_inline_entry(struct inode *dir, const struct f2fs_filename *fname, struct inode *inode, nid_t ino, umode_t mode); void f2fs_delete_inline_entry(struct f2fs_dir_entry *dentry, struct page *page, struct inode *dir, diff --git a/fs/f2fs/hash.c b/fs/f2fs/hash.c index 5bc4dcd8fc03f..e5997919472d4 100644 --- a/fs/f2fs/hash.c +++ b/fs/f2fs/hash.c @@ -68,22 +68,9 @@ static void str2hashbuf(const unsigned char *msg, size_t len, *buf++ = pad; } -static f2fs_hash_t __f2fs_dentry_hash(const struct qstr *name_info, - struct fscrypt_name *fname) +static u32 TEA_hash_name(const u8 *p, size_t len) { - __u32 hash; - f2fs_hash_t f2fs_hash; - const unsigned char *p; __u32 in[8], buf[4]; - const unsigned char *name = name_info->name; - size_t len = name_info->len; - - /* encrypted bigname case */ - if (fname && !fname->disk_name.name) - return cpu_to_le32(fname->hash); - - if (is_dot_dotdot(name_info)) - return 0; /* Initialize the default seed for the hash checksum functions */ buf[0] = 0x67452301; @@ -91,7 +78,6 @@ static f2fs_hash_t __f2fs_dentry_hash(const struct qstr *name_info, buf[2] = 0x98badcfe; buf[3] = 0x10325476; - p = name; while (1) { str2hashbuf(p, len, in, 4); TEA_transform(buf, in); @@ -100,41 +86,43 @@ static f2fs_hash_t __f2fs_dentry_hash(const struct qstr *name_info, break; len -= 16; } - hash = buf[0]; - f2fs_hash = cpu_to_le32(hash & ~F2FS_HASH_COL_BIT); - return f2fs_hash; + return buf[0] & ~F2FS_HASH_COL_BIT; } -f2fs_hash_t f2fs_dentry_hash(const struct inode *dir, - const struct qstr *name_info, struct fscrypt_name *fname) +/* + * Compute @fname->hash. For all directories, @fname->disk_name must be set. + * For casefolded directories, @fname->usr_fname must be set, and also + * @fname->cf_name if the filename is valid Unicode. + */ +void f2fs_hash_filename(const struct inode *dir, struct f2fs_filename *fname) { -#ifdef CONFIG_UNICODE - struct f2fs_sb_info *sbi = F2FS_SB(dir->i_sb); - const struct unicode_map *um = sbi->s_encoding; - int r, dlen; - unsigned char *buff; - struct qstr folded; + const u8 *name = fname->disk_name.name; + size_t len = fname->disk_name.len; - if (!name_info->len || !IS_CASEFOLDED(dir)) - goto opaque_seq; + WARN_ON_ONCE(!name); - buff = f2fs_kzalloc(sbi, sizeof(char) * PATH_MAX, GFP_KERNEL); - if (!buff) - return -ENOMEM; - - dlen = utf8_casefold(um, name_info, buff, PATH_MAX); - if (dlen < 0) { - kvfree(buff); - goto opaque_seq; + if (is_dot_dotdot(name, len)) { + fname->hash = 0; + return; } - folded.name = buff; - folded.len = dlen; - r = __f2fs_dentry_hash(&folded, fname); - - kvfree(buff); - return r; -opaque_seq: +#ifdef CONFIG_UNICODE + if (IS_CASEFOLDED(dir)) { + /* + * If the casefolded name is provided, hash it instead of the + * on-disk name. If the casefolded name is *not* provided, that + * should only be because the name wasn't valid Unicode, so fall + * back to treating the name as an opaque byte sequence. + */ + WARN_ON_ONCE(!fname->usr_fname->name); + if (fname->cf_name.name) { + name = fname->cf_name.name; + len = fname->cf_name.len; + } else { + name = fname->usr_fname->name; + len = fname->usr_fname->len; + } + } #endif - return __f2fs_dentry_hash(name_info, fname); + fname->hash = cpu_to_le32(TEA_hash_name(name, len)); } diff --git a/fs/f2fs/inline.c b/fs/f2fs/inline.c index 59a4b7ff11e17..751a012ff8404 100644 --- a/fs/f2fs/inline.c +++ b/fs/f2fs/inline.c @@ -305,15 +305,14 @@ process_inline: } struct f2fs_dir_entry *f2fs_find_in_inline_dir(struct inode *dir, - struct fscrypt_name *fname, struct page **res_page) + const struct f2fs_filename *fname, + struct page **res_page) { struct f2fs_sb_info *sbi = F2FS_SB(dir->i_sb); - struct qstr name = FSTR_TO_QSTR(&fname->disk_name); struct f2fs_dir_entry *de; struct f2fs_dentry_ptr d; struct page *ipage; void *inline_dentry; - f2fs_hash_t namehash; ipage = f2fs_get_node_page(sbi, dir->i_ino); if (IS_ERR(ipage)) { @@ -321,12 +320,10 @@ struct f2fs_dir_entry *f2fs_find_in_inline_dir(struct inode *dir, return NULL; } - namehash = f2fs_dentry_hash(dir, &name, fname); - inline_dentry = inline_data_addr(dir, ipage); make_dentry_ptr_inline(dir, &d, inline_dentry); - de = f2fs_find_target_dentry(fname, namehash, NULL, &d); + de = f2fs_find_target_dentry(&d, fname, NULL); unlock_page(ipage); if (de) *res_page = ipage; @@ -443,7 +440,7 @@ static int f2fs_add_inline_entries(struct inode *dir, void *inline_dentry) while (bit_pos < d.max) { struct f2fs_dir_entry *de; - struct qstr new_name; + struct f2fs_filename fname; nid_t ino; umode_t fake_mode; @@ -459,14 +456,19 @@ static int f2fs_add_inline_entries(struct inode *dir, void *inline_dentry) continue; } - new_name.name = d.filename[bit_pos]; - new_name.len = le16_to_cpu(de->name_len); + /* + * We only need the disk_name and hash to move the dentry. + * We don't need the original or casefolded filenames. + */ + memset(&fname, 0, sizeof(fname)); + fname.disk_name.name = d.filename[bit_pos]; + fname.disk_name.len = le16_to_cpu(de->name_len); + fname.hash = de->hash_code; ino = le32_to_cpu(de->ino); fake_mode = f2fs_get_de_type(de) << S_SHIFT; - err = f2fs_add_regular_entry(dir, &new_name, NULL, NULL, - ino, fake_mode); + err = f2fs_add_regular_entry(dir, &fname, NULL, ino, fake_mode); if (err) goto punch_dentry_pages; @@ -543,7 +545,7 @@ int f2fs_try_convert_inline_dir(struct inode *dir, struct dentry *dentry) { struct f2fs_sb_info *sbi = F2FS_I_SB(dir); struct page *ipage; - struct fscrypt_name fname; + struct f2fs_filename fname; void *inline_dentry = NULL; int err = 0; @@ -552,7 +554,7 @@ int f2fs_try_convert_inline_dir(struct inode *dir, struct dentry *dentry) f2fs_lock_op(sbi); - err = fscrypt_setup_filename(dir, &dentry->d_name, 0, &fname); + err = f2fs_setup_filename(dir, &dentry->d_name, 0, &fname); if (err) goto out; @@ -573,23 +575,21 @@ int f2fs_try_convert_inline_dir(struct inode *dir, struct dentry *dentry) if (!err) f2fs_put_page(ipage, 1); out_fname: - fscrypt_free_filename(&fname); + f2fs_free_filename(&fname); out: f2fs_unlock_op(sbi); return err; } -int f2fs_add_inline_entry(struct inode *dir, const struct qstr *new_name, - const struct qstr *orig_name, - struct inode *inode, nid_t ino, umode_t mode) +int f2fs_add_inline_entry(struct inode *dir, const struct f2fs_filename *fname, + struct inode *inode, nid_t ino, umode_t mode) { struct f2fs_sb_info *sbi = F2FS_I_SB(dir); struct page *ipage; unsigned int bit_pos; - f2fs_hash_t name_hash; void *inline_dentry = NULL; struct f2fs_dentry_ptr d; - int slots = GET_DENTRY_SLOTS(new_name->len); + int slots = GET_DENTRY_SLOTS(fname->disk_name.len); struct page *page = NULL; int err = 0; @@ -611,8 +611,7 @@ int f2fs_add_inline_entry(struct inode *dir, const struct qstr *new_name, if (inode) { down_write(&F2FS_I(inode)->i_sem); - page = f2fs_init_inode_metadata(inode, dir, new_name, - orig_name, ipage); + page = f2fs_init_inode_metadata(inode, dir, fname, ipage); if (IS_ERR(page)) { err = PTR_ERR(page); goto fail; @@ -621,8 +620,8 @@ int f2fs_add_inline_entry(struct inode *dir, const struct qstr *new_name, f2fs_wait_on_page_writeback(ipage, NODE, true, true); - name_hash = f2fs_dentry_hash(dir, new_name, NULL); - f2fs_update_dentry(ino, mode, &d, new_name, name_hash, bit_pos); + f2fs_update_dentry(ino, mode, &d, &fname->disk_name, fname->hash, + bit_pos); set_page_dirty(ipage); diff --git a/fs/f2fs/namei.c b/fs/f2fs/namei.c index f54119da2217f..58db1de7ca947 100644 --- a/fs/f2fs/namei.c +++ b/fs/f2fs/namei.c @@ -482,7 +482,7 @@ static struct dentry *f2fs_lookup(struct inode *dir, struct dentry *dentry, nid_t ino = -1; int err = 0; unsigned int root_ino = F2FS_ROOT_INO(F2FS_I_SB(dir)); - struct fscrypt_name fname; + struct f2fs_filename fname; trace_f2fs_lookup_start(dir, dentry, flags); @@ -491,13 +491,13 @@ static struct dentry *f2fs_lookup(struct inode *dir, struct dentry *dentry, goto out; } - err = fscrypt_prepare_lookup(dir, dentry, &fname); + err = f2fs_prepare_lookup(dir, dentry, &fname); if (err == -ENOENT) goto out_splice; if (err) goto out; de = __f2fs_find_entry(dir, &fname, &page); - fscrypt_free_filename(&fname); + f2fs_free_filename(&fname); if (!de) { if (IS_ERR(page)) { diff --git a/fs/f2fs/recovery.c b/fs/f2fs/recovery.c index dd804c07eeb05..ae5310f02e7ff 100644 --- a/fs/f2fs/recovery.c +++ b/fs/f2fs/recovery.c @@ -107,13 +107,51 @@ static void del_fsync_inode(struct fsync_inode_entry *entry, int drop) kmem_cache_free(fsync_entry_slab, entry); } +static int init_recovered_filename(const struct inode *dir, + struct f2fs_inode *raw_inode, + struct f2fs_filename *fname, + struct qstr *usr_fname) +{ + int err; + + memset(fname, 0, sizeof(*fname)); + fname->disk_name.len = le32_to_cpu(raw_inode->i_namelen); + fname->disk_name.name = raw_inode->i_name; + + if (WARN_ON(fname->disk_name.len > F2FS_NAME_LEN)) + return -ENAMETOOLONG; + + if (!IS_ENCRYPTED(dir)) { + usr_fname->name = fname->disk_name.name; + usr_fname->len = fname->disk_name.len; + fname->usr_fname = usr_fname; + } + + /* Compute the hash of the filename */ + if (IS_CASEFOLDED(dir)) { + err = f2fs_init_casefolded_name(dir, fname); + if (err) + return err; + f2fs_hash_filename(dir, fname); +#ifdef CONFIG_UNICODE + /* Case-sensitive match is fine for recovery */ + kfree(fname->cf_name.name); + fname->cf_name.name = NULL; +#endif + } else { + f2fs_hash_filename(dir, fname); + } + return 0; +} + static int recover_dentry(struct inode *inode, struct page *ipage, struct list_head *dir_list) { struct f2fs_inode *raw_inode = F2FS_INODE(ipage); nid_t pino = le32_to_cpu(raw_inode->i_pino); struct f2fs_dir_entry *de; - struct fscrypt_name fname; + struct f2fs_filename fname; + struct qstr usr_fname; struct page *page; struct inode *dir, *einode; struct fsync_inode_entry *entry; @@ -132,16 +170,9 @@ static int recover_dentry(struct inode *inode, struct page *ipage, } dir = entry->inode; - - memset(&fname, 0, sizeof(struct fscrypt_name)); - fname.disk_name.len = le32_to_cpu(raw_inode->i_namelen); - fname.disk_name.name = raw_inode->i_name; - - if (unlikely(fname.disk_name.len > F2FS_NAME_LEN)) { - WARN_ON(1); - err = -ENAMETOOLONG; + err = init_recovered_filename(dir, raw_inode, &fname, &usr_fname); + if (err) goto out; - } retry: de = __f2fs_find_entry(dir, &fname, &page); if (de && inode->i_ino == le32_to_cpu(de->ino)) -- GitLab From 84c9c2de0626567c0d964ee5fa1ae3310911ddf8 Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Tue, 5 May 2020 11:41:11 -0700 Subject: [PATCH 0420/1971] f2fs: correctly fix the parent inode number during fsync() fsync() may be called on a deleted file that's still open. So when fsync() tries to set the parent inode number when the inode has LOST_PINO and i_nlink == 1 (to avoid later checkpoints), it needs to make sure to get the parent directory via a non-deleted alias. Also remove the unnecessary igrab() and iput(), as the caller already holds a reference to the inode. Signed-off-by: Eric Biggers Signed-off-by: Jaegeuk Kim --- fs/f2fs/file.c | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c index 175c661900876..81bfc5b44fa13 100644 --- a/fs/f2fs/file.c +++ b/fs/f2fs/file.c @@ -169,9 +169,11 @@ static int get_parent_ino(struct inode *inode, nid_t *pino) { struct dentry *dentry; - inode = igrab(inode); - dentry = d_find_any_alias(inode); - iput(inode); + /* + * Make sure to get the non-deleted alias. The alias associated with + * the open file descriptor being fsync()'ed may be deleted already. + */ + dentry = d_find_alias(inode); if (!dentry) return 0; -- GitLab From 042be373adf719ab64c4a44ae809d110826becbf Mon Sep 17 00:00:00 2001 From: Chao Yu Date: Fri, 8 May 2020 17:50:20 +0800 Subject: [PATCH 0421/1971] f2fs: shrink spinlock coverage In f2fs_try_to_free_nids(), .nid_list_lock spinlock critical region will increase as expected shrink number increase, to avoid spining other CPUs for long time, we change to release nid caches with small batch each time under .nid_list_lock coverage. Signed-off-by: Chao Yu Signed-off-by: Jaegeuk Kim --- fs/f2fs/node.c | 25 +++++++++++++++---------- fs/f2fs/node.h | 3 +++ 2 files changed, 18 insertions(+), 10 deletions(-) diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c index 4da0d8713df5c..1db8cabf727ef 100644 --- a/fs/f2fs/node.c +++ b/fs/f2fs/node.c @@ -2488,7 +2488,6 @@ void f2fs_alloc_nid_failed(struct f2fs_sb_info *sbi, nid_t nid) int f2fs_try_to_free_nids(struct f2fs_sb_info *sbi, int nr_shrink) { struct f2fs_nm_info *nm_i = NM_I(sbi); - struct free_nid *i, *next; int nr = nr_shrink; if (nm_i->nid_cnt[FREE_NID] <= MAX_FREE_NIDS) @@ -2497,17 +2496,23 @@ int f2fs_try_to_free_nids(struct f2fs_sb_info *sbi, int nr_shrink) if (!mutex_trylock(&nm_i->build_lock)) return 0; - spin_lock(&nm_i->nid_list_lock); - list_for_each_entry_safe(i, next, &nm_i->free_nid_list, list) { - if (nr_shrink <= 0 || - nm_i->nid_cnt[FREE_NID] <= MAX_FREE_NIDS) - break; + while (nr_shrink && nm_i->nid_cnt[FREE_NID] > MAX_FREE_NIDS) { + struct free_nid *i, *next; + unsigned int batch = SHRINK_NID_BATCH_SIZE; - __remove_free_nid(sbi, i, FREE_NID); - kmem_cache_free(free_nid_slab, i); - nr_shrink--; + spin_lock(&nm_i->nid_list_lock); + list_for_each_entry_safe(i, next, &nm_i->free_nid_list, list) { + if (!nr_shrink || !batch || + nm_i->nid_cnt[FREE_NID] <= MAX_FREE_NIDS) + break; + __remove_free_nid(sbi, i, FREE_NID); + kmem_cache_free(free_nid_slab, i); + nr_shrink--; + batch--; + } + spin_unlock(&nm_i->nid_list_lock); } - spin_unlock(&nm_i->nid_list_lock); + mutex_unlock(&nm_i->build_lock); return nr - nr_shrink; diff --git a/fs/f2fs/node.h b/fs/f2fs/node.h index 6a2011deea23c..69e5859e993cf 100644 --- a/fs/f2fs/node.h +++ b/fs/f2fs/node.h @@ -15,6 +15,9 @@ #define FREE_NID_PAGES 8 #define MAX_FREE_NIDS (NAT_ENTRY_PER_BLOCK * FREE_NID_PAGES) +/* size of free nid batch when shrinking */ +#define SHRINK_NID_BATCH_SIZE 8 + #define DEF_RA_NID_PAGES 0 /* # of nid pages to be readaheaded */ /* maximum readahead size for node during getting data blocks */ -- GitLab From ef8d563f184e1112651f2cbde383d43e599334e8 Mon Sep 17 00:00:00 2001 From: Chao Yu Date: Fri, 6 Mar 2020 15:36:09 +0800 Subject: [PATCH 0422/1971] f2fs: introduce F2FS_IOC_RELEASE_COMPRESS_BLOCKS There are still reserved blocks on compressed inode, this patch introduce a new ioctl to help release reserved blocks back to filesystem, so that userspace can reuse those freed space. ---- Daeho fixed a bug like below. Now, if writing pages and releasing compress blocks occur simultaneously, and releasing cblocks is executed more than one time to a file, then total block count of filesystem and block count of the file could be incorrect and damaged. We have to execute releasing compress blocks only one time for a file without being interfered by writepages path. --- Signed-off-by: Chao Yu Signed-off-by: Daeho Jeong Signed-off-by: Jaegeuk Kim --- fs/f2fs/f2fs.h | 6 ++ fs/f2fs/file.c | 169 ++++++++++++++++++++++++++++++++++++++++++++++++- 2 files changed, 174 insertions(+), 1 deletion(-) diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h index 20f047a07c5a1..d916540f12813 100644 --- a/fs/f2fs/f2fs.h +++ b/fs/f2fs/f2fs.h @@ -428,6 +428,8 @@ static inline bool __has_cursum_space(struct f2fs_journal *journal, #define F2FS_IOC_PRECACHE_EXTENTS _IO(F2FS_IOCTL_MAGIC, 15) #define F2FS_IOC_RESIZE_FS _IOW(F2FS_IOCTL_MAGIC, 16, __u64) #define F2FS_IOC_GET_COMPRESS_BLOCKS _IOR(F2FS_IOCTL_MAGIC, 17, __u64) +#define F2FS_IOC_RELEASE_COMPRESS_BLOCKS \ + _IOR(F2FS_IOCTL_MAGIC, 18, __u64) #define F2FS_IOC_GET_VOLUME_NAME FS_IOC_GETFSLABEL #define F2FS_IOC_SET_VOLUME_NAME FS_IOC_SETFSLABEL @@ -4048,6 +4050,10 @@ static inline void f2fs_i_compr_blocks_update(struct inode *inode, { int diff = F2FS_I(inode)->i_cluster_size - blocks; + /* don't update i_compr_blocks if saved blocks were released */ + if (!add && !F2FS_I(inode)->i_compr_blocks) + return; + if (add) { F2FS_I(inode)->i_compr_blocks += diff; stat_add_compr_blocks(inode, diff); diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c index 81bfc5b44fa13..189f8ce046f07 100644 --- a/fs/f2fs/file.c +++ b/fs/f2fs/file.c @@ -563,6 +563,7 @@ void f2fs_truncate_data_blocks_range(struct dnode_of_data *dn, int count) bool compressed_cluster = false; int cluster_index = 0, valid_blocks = 0; int cluster_size = F2FS_I(dn->inode)->i_cluster_size; + bool released = !F2FS_I(dn->inode)->i_compr_blocks; if (IS_INODE(dn->node_page) && f2fs_has_extra_attr(dn->inode)) base = get_extra_isize(dn->inode); @@ -601,7 +602,9 @@ void f2fs_truncate_data_blocks_range(struct dnode_of_data *dn, int count) clear_inode_flag(dn->inode, FI_FIRST_BLOCK_WRITTEN); f2fs_invalidate_blocks(sbi, blkaddr); - nr_free++; + + if (!released || blkaddr != COMPRESS_ADDR) + nr_free++; } if (compressed_cluster) @@ -3434,6 +3437,167 @@ static int f2fs_get_compress_blocks(struct file *filp, unsigned long arg) return put_user(blocks, (u64 __user *)arg); } +static int release_compress_blocks(struct dnode_of_data *dn, pgoff_t count) +{ + struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode); + unsigned int released_blocks = 0; + int cluster_size = F2FS_I(dn->inode)->i_cluster_size; + block_t blkaddr; + int i; + + for (i = 0; i < count; i++) { + blkaddr = data_blkaddr(dn->inode, dn->node_page, + dn->ofs_in_node + i); + + if (!__is_valid_data_blkaddr(blkaddr)) + continue; + if (unlikely(!f2fs_is_valid_blkaddr(sbi, blkaddr, + DATA_GENERIC_ENHANCE))) + return -EFSCORRUPTED; + } + + while (count) { + int compr_blocks = 0; + + for (i = 0; i < cluster_size; i++, dn->ofs_in_node++) { + blkaddr = f2fs_data_blkaddr(dn); + + if (i == 0) { + if (blkaddr == COMPRESS_ADDR) + continue; + dn->ofs_in_node += cluster_size; + goto next; + } + + if (__is_valid_data_blkaddr(blkaddr)) + compr_blocks++; + + if (blkaddr != NEW_ADDR) + continue; + + dn->data_blkaddr = NULL_ADDR; + f2fs_set_data_blkaddr(dn); + } + + f2fs_i_compr_blocks_update(dn->inode, compr_blocks, false); + dec_valid_block_count(sbi, dn->inode, + cluster_size - compr_blocks); + + released_blocks += cluster_size - compr_blocks; +next: + count -= cluster_size; + } + + return released_blocks; +} + +static int f2fs_release_compress_blocks(struct file *filp, unsigned long arg) +{ + struct inode *inode = file_inode(filp); + struct f2fs_sb_info *sbi = F2FS_I_SB(inode); + pgoff_t page_idx = 0, last_idx; + unsigned int released_blocks = 0; + int ret; + int writecount; + + if (!f2fs_sb_has_compression(F2FS_I_SB(inode))) + return -EOPNOTSUPP; + + if (!f2fs_compressed_file(inode)) + return -EINVAL; + + if (f2fs_readonly(sbi->sb)) + return -EROFS; + + ret = mnt_want_write_file(filp); + if (ret) + return ret; + + f2fs_balance_fs(F2FS_I_SB(inode), true); + + inode_lock(inode); + + writecount = atomic_read(&inode->i_writecount); + if ((filp->f_mode & FMODE_WRITE && writecount != 1) || writecount) { + ret = -EBUSY; + goto out; + } + + if (IS_IMMUTABLE(inode)) { + ret = -EINVAL; + goto out; + } + + ret = filemap_write_and_wait_range(inode->i_mapping, 0, LLONG_MAX); + if (ret) + goto out; + + if (!F2FS_I(inode)->i_compr_blocks) + goto out; + + F2FS_I(inode)->i_flags |= F2FS_IMMUTABLE_FL; + f2fs_set_inode_flags(inode); + inode->i_ctime = current_time(inode); + f2fs_mark_inode_dirty_sync(inode, true); + + down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); + down_write(&F2FS_I(inode)->i_mmap_sem); + + last_idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE); + + while (page_idx < last_idx) { + struct dnode_of_data dn; + pgoff_t end_offset, count; + + set_new_dnode(&dn, inode, NULL, NULL, 0); + ret = f2fs_get_dnode_of_data(&dn, page_idx, LOOKUP_NODE); + if (ret) { + if (ret == -ENOENT) { + page_idx = f2fs_get_next_page_offset(&dn, + page_idx); + ret = 0; + continue; + } + break; + } + + end_offset = ADDRS_PER_PAGE(dn.node_page, inode); + count = min(end_offset - dn.ofs_in_node, last_idx - page_idx); + count = roundup(count, F2FS_I(inode)->i_cluster_size); + + ret = release_compress_blocks(&dn, count); + + f2fs_put_dnode(&dn); + + if (ret < 0) + break; + + page_idx += count; + released_blocks += ret; + } + + up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); + up_write(&F2FS_I(inode)->i_mmap_sem); +out: + inode_unlock(inode); + + mnt_drop_write_file(filp); + + if (ret >= 0) { + ret = put_user(released_blocks, (u64 __user *)arg); + } else if (released_blocks && F2FS_I(inode)->i_compr_blocks) { + set_sbi_flag(sbi, SBI_NEED_FSCK); + f2fs_warn(sbi, "%s: partial blocks were released i_ino=%lx " + "iblocks=%llu, released=%u, compr_blocks=%llu, " + "run fsck to fix.", + __func__, inode->i_ino, inode->i_blocks, + released_blocks, + F2FS_I(inode)->i_compr_blocks); + } + + return ret; +} + long f2fs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) { if (unlikely(f2fs_cp_error(F2FS_I_SB(file_inode(filp))))) @@ -3516,6 +3680,8 @@ long f2fs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) return f2fs_set_volume_name(filp, arg); case F2FS_IOC_GET_COMPRESS_BLOCKS: return f2fs_get_compress_blocks(filp, arg); + case F2FS_IOC_RELEASE_COMPRESS_BLOCKS: + return f2fs_release_compress_blocks(filp, arg); default: return -ENOTTY; } @@ -3683,6 +3849,7 @@ long f2fs_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg) case F2FS_IOC_GET_VOLUME_NAME: case F2FS_IOC_SET_VOLUME_NAME: case F2FS_IOC_GET_COMPRESS_BLOCKS: + case F2FS_IOC_RELEASE_COMPRESS_BLOCKS: break; default: return -ENOIOCTLCMD; -- GitLab From 1f5f11a3c41e2b23288b2769435a00f74e02496b Mon Sep 17 00:00:00 2001 From: Jaegeuk Kim Date: Fri, 8 May 2020 12:25:45 -0700 Subject: [PATCH 0423/1971] f2fs: remove blk_plugging in block_operations blk_plugging doesn't seem to give any benefit. Signed-off-by: Jaegeuk Kim --- fs/f2fs/checkpoint.c | 11 +++-------- 1 file changed, 3 insertions(+), 8 deletions(-) diff --git a/fs/f2fs/checkpoint.c b/fs/f2fs/checkpoint.c index 5ba649e17c72b..d49f7a01d8a26 100644 --- a/fs/f2fs/checkpoint.c +++ b/fs/f2fs/checkpoint.c @@ -1166,11 +1166,8 @@ static int block_operations(struct f2fs_sb_info *sbi) .nr_to_write = LONG_MAX, .for_reclaim = 0, }; - struct blk_plug plug; int err = 0, cnt = 0; - blk_start_plug(&plug); - retry_flush_quotas: f2fs_lock_all(sbi); if (__need_flush_quota(sbi)) { @@ -1198,7 +1195,7 @@ retry_flush_dents: f2fs_unlock_all(sbi); err = f2fs_sync_dirty_inodes(sbi, DIR_INODE); if (err) - goto out; + return err; cond_resched(); goto retry_flush_quotas; } @@ -1214,7 +1211,7 @@ retry_flush_dents: f2fs_unlock_all(sbi); err = f2fs_sync_inode_meta(sbi); if (err) - goto out; + return err; cond_resched(); goto retry_flush_quotas; } @@ -1230,7 +1227,7 @@ retry_flush_nodes: if (err) { up_write(&sbi->node_change); f2fs_unlock_all(sbi); - goto out; + return err; } cond_resched(); goto retry_flush_nodes; @@ -1242,8 +1239,6 @@ retry_flush_nodes: */ __prepare_cp_block(sbi); up_write(&sbi->node_change); -out: - blk_finish_plug(&plug); return err; } -- GitLab From f6644143c63f2eac88973f7fea087582579b0189 Mon Sep 17 00:00:00 2001 From: Chao Yu Date: Sat, 9 May 2020 15:01:04 +0800 Subject: [PATCH 0424/1971] f2fs: compress: let lz4 compressor handle output buffer budget properly Commonly, in order to handle lz4 worst compress case, caller should allocate buffer with size of LZ4_compressBound(inputsize) for target compressed data storing, however in this case, if caller didn't allocate enough space, lz4 compressor still can handle output buffer budget properly, and end up compressing when left space in output buffer is not enough. So we don't have to allocate buffer with size for worst case, then we can avoid 2 * 4KB size intermediate buffer allocation when log_cluster_size is 2, and avoid unnecessary compressing work of compressor if we can not save at least 4KB space. Suggested-by: Daeho Jeong Signed-off-by: Chao Yu Signed-off-by: Jaegeuk Kim --- fs/f2fs/compress.c | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/fs/f2fs/compress.c b/fs/f2fs/compress.c index c7c5a8f8a48cb..dcedf008f359c 100644 --- a/fs/f2fs/compress.c +++ b/fs/f2fs/compress.c @@ -227,7 +227,12 @@ static int lz4_init_compress_ctx(struct compress_ctx *cc) if (!cc->private) return -ENOMEM; - cc->clen = LZ4_compressBound(PAGE_SIZE << cc->log_cluster_size); + /* + * we do not change cc->clen to LZ4_compressBound(inputsize) to + * adapt worst compress case, because lz4 compressor can handle + * output budget properly. + */ + cc->clen = cc->rlen - PAGE_SIZE - COMPRESS_HEADER_SIZE; return 0; } @@ -243,11 +248,9 @@ static int lz4_compress_pages(struct compress_ctx *cc) len = LZ4_compress_default(cc->rbuf, cc->cbuf->cdata, cc->rlen, cc->clen, cc->private); - if (!len) { - printk_ratelimited("%sF2FS-fs (%s): lz4 compress failed\n", - KERN_ERR, F2FS_I_SB(cc->inode)->sb->s_id); - return -EIO; - } + if (!len) + return -EAGAIN; + cc->clen = len; return 0; } -- GitLab From 48abe91ac1ad27cd5a5709f983dcf58f2b9a6b70 Mon Sep 17 00:00:00 2001 From: YueHaibing Date: Sat, 9 May 2020 19:21:35 +0800 Subject: [PATCH 0425/1971] f2fs: Fix wrong stub helper update_sit_info update_sit_info should be f2fs_update_sit_info, otherwise build fails while no CONFIG_F2FS_STAT_FS. Fixes: fc7100ea2a52 ("f2fs: Add f2fs stats to sysfs") Signed-off-by: YueHaibing Reviewed-by: Chao Yu Signed-off-by: Jaegeuk Kim --- fs/f2fs/f2fs.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h index d916540f12813..2a8ea81c52a15 100644 --- a/fs/f2fs/f2fs.h +++ b/fs/f2fs/f2fs.h @@ -3728,7 +3728,7 @@ static inline int f2fs_build_stats(struct f2fs_sb_info *sbi) { return 0; } static inline void f2fs_destroy_stats(struct f2fs_sb_info *sbi) { } static inline void __init f2fs_create_root_stats(void) { } static inline void f2fs_destroy_root_stats(void) { } -static inline void update_sit_info(struct f2fs_sb_info *sbi) {} +static inline void f2fs_update_sit_info(struct f2fs_sb_info *sbi) {} #endif extern const struct file_operations f2fs_dir_operations; -- GitLab From baaa7ebf25c78c5cb712fac16b7f549100beddd3 Mon Sep 17 00:00:00 2001 From: Konstantin Khlebnikov Date: Mon, 11 May 2020 09:15:18 +0300 Subject: [PATCH 0426/1971] f2fs: report delalloc reserve as non-free in statfs for project quota This reserved space isn't committed yet but cannot be used for allocations. For userspace it has no difference from used space. See the same fix in ext4 commit f06925c73942 ("ext4: report delalloc reserve as non-free in statfs for project quota"). Fixes: ddc34e328d06 ("f2fs: introduce f2fs_statfs_project") Signed-off-by: Konstantin Khlebnikov Reviewed-by: Chao Yu Signed-off-by: Jaegeuk Kim --- fs/f2fs/super.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c index 582bbf40c5590..f6c5c7ec5a12b 100644 --- a/fs/f2fs/super.c +++ b/fs/f2fs/super.c @@ -1283,7 +1283,8 @@ static int f2fs_statfs_project(struct super_block *sb, limit >>= sb->s_blocksize_bits; if (limit && buf->f_blocks > limit) { - curblock = dquot->dq_dqb.dqb_curspace >> sb->s_blocksize_bits; + curblock = (dquot->dq_dqb.dqb_curspace + + dquot->dq_dqb.dqb_rsvspace) >> sb->s_blocksize_bits; buf->f_blocks = limit; buf->f_bfree = buf->f_bavail = (buf->f_blocks > curblock) ? -- GitLab From 34c061ad85a2f5d5e9e3b045d72f3b211db6e282 Mon Sep 17 00:00:00 2001 From: Sayali Lokhande Date: Thu, 30 Apr 2020 16:28:29 +0530 Subject: [PATCH 0427/1971] f2fs: Avoid double lock for cp_rwsem during checkpoint There could be a scenario where f2fs_sync_node_pages gets called during checkpoint, which in turn tries to flush inline data and calls iput(). This results in deadlock as iput() tries to hold cp_rwsem, which is already held at the beginning by checkpoint->block_operations(). Call stack : Thread A Thread B f2fs_write_checkpoint() - block_operations(sbi) - f2fs_lock_all(sbi); - down_write(&sbi->cp_rwsem); - open() - igrab() - write() write inline data - unlink() - f2fs_sync_node_pages() - if (is_inline_node(page)) - flush_inline_data() - ilookup() page = f2fs_pagecache_get_page() if (!page) goto iput_out; iput_out: -close() -iput() iput(inode); - f2fs_evict_inode() - f2fs_truncate_blocks() - f2fs_lock_op() - down_read(&sbi->cp_rwsem); Fixes: 2049d4fcb057 ("f2fs: avoid multiple node page writes due to inline_data") Signed-off-by: Sayali Lokhande Signed-off-by: Jaegeuk Kim --- fs/f2fs/checkpoint.c | 5 +++++ fs/f2fs/f2fs.h | 1 + fs/f2fs/node.c | 51 ++++++++++++++++++++++++++++++++++++++++++-- 3 files changed, 55 insertions(+), 2 deletions(-) diff --git a/fs/f2fs/checkpoint.c b/fs/f2fs/checkpoint.c index d49f7a01d8a26..79e605f38f4fa 100644 --- a/fs/f2fs/checkpoint.c +++ b/fs/f2fs/checkpoint.c @@ -1168,6 +1168,11 @@ static int block_operations(struct f2fs_sb_info *sbi) }; int err = 0, cnt = 0; + /* + * Let's flush inline_data in dirty node pages. + */ + f2fs_flush_inline_data(sbi); + retry_flush_quotas: f2fs_lock_all(sbi); if (__need_flush_quota(sbi)) { diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h index 2a8ea81c52a15..7f3d259e7e376 100644 --- a/fs/f2fs/f2fs.h +++ b/fs/f2fs/f2fs.h @@ -3282,6 +3282,7 @@ void f2fs_ra_node_page(struct f2fs_sb_info *sbi, nid_t nid); struct page *f2fs_get_node_page(struct f2fs_sb_info *sbi, pgoff_t nid); struct page *f2fs_get_node_page_ra(struct page *parent, int start); int f2fs_move_node_page(struct page *node_page, int gc_type); +int f2fs_flush_inline_data(struct f2fs_sb_info *sbi); int f2fs_fsync_node_pages(struct f2fs_sb_info *sbi, struct inode *inode, struct writeback_control *wbc, bool atomic, unsigned int *seq_id); diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c index 1db8cabf727ef..e632de10aedab 100644 --- a/fs/f2fs/node.c +++ b/fs/f2fs/node.c @@ -1807,6 +1807,53 @@ static bool flush_dirty_inode(struct page *page) return true; } +int f2fs_flush_inline_data(struct f2fs_sb_info *sbi) +{ + pgoff_t index = 0; + struct pagevec pvec; + int nr_pages; + int ret = 0; + + pagevec_init(&pvec); + + while ((nr_pages = pagevec_lookup_tag(&pvec, + NODE_MAPPING(sbi), &index, PAGECACHE_TAG_DIRTY))) { + int i; + + for (i = 0; i < nr_pages; i++) { + struct page *page = pvec.pages[i]; + + if (!IS_DNODE(page)) + continue; + + lock_page(page); + + if (unlikely(page->mapping != NODE_MAPPING(sbi))) { +continue_unlock: + unlock_page(page); + continue; + } + + if (!PageDirty(page)) { + /* someone wrote it for us */ + goto continue_unlock; + } + + /* flush inline_data, if it's async context. */ + if (is_inline_node(page)) { + clear_inline_node(page); + unlock_page(page); + flush_inline_data(sbi, ino_of_node(page)); + continue; + } + unlock_page(page); + } + pagevec_release(&pvec); + cond_resched(); + } + return ret; +} + int f2fs_sync_node_pages(struct f2fs_sb_info *sbi, struct writeback_control *wbc, bool do_balance, enum iostat_type io_type) @@ -1870,8 +1917,8 @@ continue_unlock: goto continue_unlock; } - /* flush inline_data */ - if (is_inline_node(page)) { + /* flush inline_data, if it's async context. */ + if (do_balance && is_inline_node(page)) { clear_inline_node(page); unlock_page(page); flush_inline_data(sbi, ino_of_node(page)); -- GitLab From c75488fb4d82b697f381f855bf5b16779df440aa Mon Sep 17 00:00:00 2001 From: Chao Yu Date: Fri, 6 Mar 2020 14:35:33 +0800 Subject: [PATCH 0428/1971] f2fs: introduce F2FS_IOC_RESERVE_COMPRESS_BLOCKS This patch introduces a new ioctl to rollback all compress inode status: - add reserved blocks in dnode blocks - increase i_compr_blocks, i_blocks, total_valid_block_count - remove immutable flag Then compress inode can be restored to support overwrite functionality again. Signee-off-by: Chao Yu Signed-off-by: Jaegeuk Kim --- fs/f2fs/f2fs.h | 2 + fs/f2fs/file.c | 162 +++++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 164 insertions(+) diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h index 7f3d259e7e376..1d96f733b1b71 100644 --- a/fs/f2fs/f2fs.h +++ b/fs/f2fs/f2fs.h @@ -430,6 +430,8 @@ static inline bool __has_cursum_space(struct f2fs_journal *journal, #define F2FS_IOC_GET_COMPRESS_BLOCKS _IOR(F2FS_IOCTL_MAGIC, 17, __u64) #define F2FS_IOC_RELEASE_COMPRESS_BLOCKS \ _IOR(F2FS_IOCTL_MAGIC, 18, __u64) +#define F2FS_IOC_RESERVE_COMPRESS_BLOCKS \ + _IOR(F2FS_IOCTL_MAGIC, 19, __u64) #define F2FS_IOC_GET_VOLUME_NAME FS_IOC_GETFSLABEL #define F2FS_IOC_SET_VOLUME_NAME FS_IOC_SETFSLABEL diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c index 189f8ce046f07..48d9088118077 100644 --- a/fs/f2fs/file.c +++ b/fs/f2fs/file.c @@ -3598,6 +3598,165 @@ out: return ret; } +static int reserve_compress_blocks(struct dnode_of_data *dn, pgoff_t count) +{ + struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode); + unsigned int reserved_blocks = 0; + int cluster_size = F2FS_I(dn->inode)->i_cluster_size; + block_t blkaddr; + int i; + + for (i = 0; i < count; i++) { + blkaddr = data_blkaddr(dn->inode, dn->node_page, + dn->ofs_in_node + i); + + if (!__is_valid_data_blkaddr(blkaddr)) + continue; + if (unlikely(!f2fs_is_valid_blkaddr(sbi, blkaddr, + DATA_GENERIC_ENHANCE))) + return -EFSCORRUPTED; + } + + while (count) { + int compr_blocks = 0; + blkcnt_t reserved; + int ret; + + for (i = 0; i < cluster_size; i++, dn->ofs_in_node++) { + blkaddr = f2fs_data_blkaddr(dn); + + if (i == 0) { + if (blkaddr == COMPRESS_ADDR) + continue; + dn->ofs_in_node += cluster_size; + goto next; + } + + if (__is_valid_data_blkaddr(blkaddr)) { + compr_blocks++; + continue; + } + + dn->data_blkaddr = NEW_ADDR; + f2fs_set_data_blkaddr(dn); + } + + reserved = cluster_size - compr_blocks; + ret = inc_valid_block_count(sbi, dn->inode, &reserved); + if (ret) + return ret; + + if (reserved != cluster_size - compr_blocks) + return -ENOSPC; + + f2fs_i_compr_blocks_update(dn->inode, compr_blocks, true); + + reserved_blocks += reserved; +next: + count -= cluster_size; + } + + return reserved_blocks; +} + +static int f2fs_reserve_compress_blocks(struct file *filp, unsigned long arg) +{ + struct inode *inode = file_inode(filp); + struct f2fs_sb_info *sbi = F2FS_I_SB(inode); + pgoff_t page_idx = 0, last_idx; + unsigned int reserved_blocks = 0; + int ret; + + if (!f2fs_sb_has_compression(F2FS_I_SB(inode))) + return -EOPNOTSUPP; + + if (!f2fs_compressed_file(inode)) + return -EINVAL; + + if (f2fs_readonly(sbi->sb)) + return -EROFS; + + ret = mnt_want_write_file(filp); + if (ret) + return ret; + + if (F2FS_I(inode)->i_compr_blocks) + goto out; + + f2fs_balance_fs(F2FS_I_SB(inode), true); + + inode_lock(inode); + + if (!IS_IMMUTABLE(inode)) { + ret = -EINVAL; + goto unlock_inode; + } + + down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); + down_write(&F2FS_I(inode)->i_mmap_sem); + + last_idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE); + + while (page_idx < last_idx) { + struct dnode_of_data dn; + pgoff_t end_offset, count; + + set_new_dnode(&dn, inode, NULL, NULL, 0); + ret = f2fs_get_dnode_of_data(&dn, page_idx, LOOKUP_NODE); + if (ret) { + if (ret == -ENOENT) { + page_idx = f2fs_get_next_page_offset(&dn, + page_idx); + ret = 0; + continue; + } + break; + } + + end_offset = ADDRS_PER_PAGE(dn.node_page, inode); + count = min(end_offset - dn.ofs_in_node, last_idx - page_idx); + count = roundup(count, F2FS_I(inode)->i_cluster_size); + + ret = reserve_compress_blocks(&dn, count); + + f2fs_put_dnode(&dn); + + if (ret < 0) + break; + + page_idx += count; + reserved_blocks += ret; + } + + up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); + up_write(&F2FS_I(inode)->i_mmap_sem); + + if (ret >= 0) { + F2FS_I(inode)->i_flags &= ~F2FS_IMMUTABLE_FL; + f2fs_set_inode_flags(inode); + inode->i_ctime = current_time(inode); + f2fs_mark_inode_dirty_sync(inode, true); + } +unlock_inode: + inode_unlock(inode); +out: + mnt_drop_write_file(filp); + + if (ret >= 0) { + ret = put_user(reserved_blocks, (u64 __user *)arg); + } else if (reserved_blocks && F2FS_I(inode)->i_compr_blocks) { + set_sbi_flag(sbi, SBI_NEED_FSCK); + f2fs_warn(sbi, "%s: partial blocks were released i_ino=%lx " + "iblocks=%llu, reserved=%u, compr_blocks=%llu, " + "run fsck to fix.", + __func__, inode->i_ino, inode->i_blocks, + reserved_blocks, + F2FS_I(inode)->i_compr_blocks); + } + + return ret; +} + long f2fs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) { if (unlikely(f2fs_cp_error(F2FS_I_SB(file_inode(filp))))) @@ -3682,6 +3841,8 @@ long f2fs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) return f2fs_get_compress_blocks(filp, arg); case F2FS_IOC_RELEASE_COMPRESS_BLOCKS: return f2fs_release_compress_blocks(filp, arg); + case F2FS_IOC_RESERVE_COMPRESS_BLOCKS: + return f2fs_reserve_compress_blocks(filp, arg); default: return -ENOTTY; } @@ -3850,6 +4011,7 @@ long f2fs_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg) case F2FS_IOC_SET_VOLUME_NAME: case F2FS_IOC_GET_COMPRESS_BLOCKS: case F2FS_IOC_RELEASE_COMPRESS_BLOCKS: + case F2FS_IOC_RESERVE_COMPRESS_BLOCKS: break; default: return -ENOIOCTLCMD; -- GitLab From 4fec3fc026717f81e34fca59937b0acbfb05642d Mon Sep 17 00:00:00 2001 From: Chao Yu Date: Wed, 8 Apr 2020 19:55:17 +0800 Subject: [PATCH 0429/1971] f2fs: use round_up to enhance calculation .i_cluster_size should be power of 2, so we can use round_up() instead of roundup() to enhance the calculation. Signed-off-by: Chao Yu Signed-off-by: Jaegeuk Kim --- fs/f2fs/file.c | 17 +++++------------ 1 file changed, 5 insertions(+), 12 deletions(-) diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c index 48d9088118077..97cc95a4a8ed4 100644 --- a/fs/f2fs/file.c +++ b/fs/f2fs/file.c @@ -742,16 +742,9 @@ int f2fs_truncate_blocks(struct inode *inode, u64 from, bool lock) * for compressed file, only support cluster size * aligned truncation. */ - if (f2fs_compressed_file(inode)) { - size_t cluster_shift = PAGE_SHIFT + - F2FS_I(inode)->i_log_cluster_size; - size_t cluster_mask = (1 << cluster_shift) - 1; - - free_from = from >> cluster_shift; - if (from & cluster_mask) - free_from++; - free_from <<= cluster_shift; - } + if (f2fs_compressed_file(inode)) + free_from = round_up(from, + F2FS_I(inode)->i_cluster_size << PAGE_SHIFT); #endif err = f2fs_do_truncate_blocks(inode, free_from, lock); @@ -3563,7 +3556,7 @@ static int f2fs_release_compress_blocks(struct file *filp, unsigned long arg) end_offset = ADDRS_PER_PAGE(dn.node_page, inode); count = min(end_offset - dn.ofs_in_node, last_idx - page_idx); - count = roundup(count, F2FS_I(inode)->i_cluster_size); + count = round_up(count, F2FS_I(inode)->i_cluster_size); ret = release_compress_blocks(&dn, count); @@ -3715,7 +3708,7 @@ static int f2fs_reserve_compress_blocks(struct file *filp, unsigned long arg) end_offset = ADDRS_PER_PAGE(dn.node_page, inode); count = min(end_offset - dn.ofs_in_node, last_idx - page_idx); - count = roundup(count, F2FS_I(inode)->i_cluster_size); + count = round_up(count, F2FS_I(inode)->i_cluster_size); ret = reserve_compress_blocks(&dn, count); -- GitLab From b4b10061ef98c583bcf82a4200703fbaa98c18dc Mon Sep 17 00:00:00 2001 From: Jaegeuk Kim Date: Tue, 31 Mar 2020 11:43:07 -0700 Subject: [PATCH 0430/1971] f2fs: refactor resize_fs to avoid meta updates in progress Sahitya raised an issue: - prevent meta updates while checkpoint is in progress allocate_segment_for_resize() can cause metapage updates if it requires to change the current node/data segments for resizing. Stop these meta updates when there is a checkpoint already in progress to prevent inconsistent CP data. Signed-off-by: Sahitya Tummala Reviewed-by: Chao Yu Signed-off-by: Jaegeuk Kim --- fs/f2fs/checkpoint.c | 6 +- fs/f2fs/f2fs.h | 2 +- fs/f2fs/file.c | 5 +- fs/f2fs/gc.c | 117 +++++++++++++++++++++--------------- fs/f2fs/super.c | 1 - include/trace/events/f2fs.h | 4 +- 6 files changed, 77 insertions(+), 58 deletions(-) diff --git a/fs/f2fs/checkpoint.c b/fs/f2fs/checkpoint.c index 79e605f38f4fa..620a386d82c1a 100644 --- a/fs/f2fs/checkpoint.c +++ b/fs/f2fs/checkpoint.c @@ -1559,7 +1559,8 @@ int f2fs_write_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc) return 0; f2fs_warn(sbi, "Start checkpoint disabled!"); } - mutex_lock(&sbi->cp_mutex); + if (cpc->reason != CP_RESIZE) + mutex_lock(&sbi->cp_mutex); if (!is_sbi_flag_set(sbi, SBI_IS_DIRTY) && ((cpc->reason & CP_FASTBOOT) || (cpc->reason & CP_SYNC) || @@ -1628,7 +1629,8 @@ stop: f2fs_update_time(sbi, CP_TIME); trace_f2fs_write_checkpoint(sbi->sb, cpc->reason, "finish checkpoint"); out: - mutex_unlock(&sbi->cp_mutex); + if (cpc->reason != CP_RESIZE) + mutex_unlock(&sbi->cp_mutex); return err; } diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h index 1d96f733b1b71..338622069e421 100644 --- a/fs/f2fs/f2fs.h +++ b/fs/f2fs/f2fs.h @@ -194,6 +194,7 @@ enum { #define CP_DISCARD 0x00000010 #define CP_TRIMMED 0x00000020 #define CP_PAUSE 0x00000040 +#define CP_RESIZE 0x00000080 #define MAX_DISCARD_BLOCKS(sbi) BLKS_PER_SEC(sbi) #define DEF_MAX_DISCARD_REQUEST 8 /* issue 8 discards per round */ @@ -1471,7 +1472,6 @@ struct f2fs_sb_info { unsigned int segs_per_sec; /* segments per section */ unsigned int secs_per_zone; /* sections per zone */ unsigned int total_sections; /* total section count */ - struct mutex resize_mutex; /* for resize exclusion */ unsigned int total_node_count; /* total node block count */ unsigned int total_valid_node_count; /* valid node block count */ loff_t max_file_blocks; /* max block index of file */ diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c index 97cc95a4a8ed4..f7de2a1da5283 100644 --- a/fs/f2fs/file.c +++ b/fs/f2fs/file.c @@ -3312,7 +3312,6 @@ static int f2fs_ioc_resize_fs(struct file *filp, unsigned long arg) { struct f2fs_sb_info *sbi = F2FS_I_SB(file_inode(filp)); __u64 block_count; - int ret; if (!capable(CAP_SYS_ADMIN)) return -EPERM; @@ -3324,9 +3323,7 @@ static int f2fs_ioc_resize_fs(struct file *filp, unsigned long arg) sizeof(block_count))) return -EFAULT; - ret = f2fs_resize_fs(sbi, block_count); - - return ret; + return f2fs_resize_fs(sbi, block_count); } static int f2fs_ioc_enable_verity(struct file *filp, unsigned long arg) diff --git a/fs/f2fs/gc.c b/fs/f2fs/gc.c index 28a8c79c8bdc3..f3c45ec2a7e7a 100644 --- a/fs/f2fs/gc.c +++ b/fs/f2fs/gc.c @@ -13,6 +13,7 @@ #include #include #include +#include #include "f2fs.h" #include "node.h" @@ -1405,12 +1406,29 @@ void f2fs_build_gc_manager(struct f2fs_sb_info *sbi) GET_SEGNO(sbi, FDEV(0).end_blk) + 1; } -static int free_segment_range(struct f2fs_sb_info *sbi, unsigned int start, - unsigned int end) +static int free_segment_range(struct f2fs_sb_info *sbi, + unsigned int secs, bool gc_only) { - int type; - unsigned int segno, next_inuse; + unsigned int segno, next_inuse, start, end; + struct cp_control cpc = { CP_RESIZE, 0, 0, 0 }; + int gc_mode, gc_type; int err = 0; + int type; + + /* Force block allocation for GC */ + MAIN_SECS(sbi) -= secs; + start = MAIN_SECS(sbi) * sbi->segs_per_sec; + end = MAIN_SEGS(sbi) - 1; + + mutex_lock(&DIRTY_I(sbi)->seglist_lock); + for (gc_mode = 0; gc_mode < MAX_GC_POLICY; gc_mode++) + if (SIT_I(sbi)->last_victim[gc_mode] >= start) + SIT_I(sbi)->last_victim[gc_mode] = 0; + + for (gc_type = BG_GC; gc_type <= FG_GC; gc_type++) + if (sbi->next_victim_seg[gc_type] >= start) + sbi->next_victim_seg[gc_type] = NULL_SEGNO; + mutex_unlock(&DIRTY_I(sbi)->seglist_lock); /* Move out cursegs from the target range */ for (type = CURSEG_HOT_DATA; type < NR_CURSEG_TYPE; type++) @@ -1423,18 +1441,24 @@ static int free_segment_range(struct f2fs_sb_info *sbi, unsigned int start, .iroot = RADIX_TREE_INIT(gc_list.iroot, GFP_NOFS), }; - down_write(&sbi->gc_lock); do_garbage_collect(sbi, segno, &gc_list, FG_GC); - up_write(&sbi->gc_lock); put_gc_inode(&gc_list); - if (get_valid_blocks(sbi, segno, true)) - return -EAGAIN; + if (!gc_only && get_valid_blocks(sbi, segno, true)) { + err = -EAGAIN; + goto out; + } + if (fatal_signal_pending(current)) { + err = -ERESTARTSYS; + goto out; + } } + if (gc_only) + goto out; - err = f2fs_sync_fs(sbi->sb, 1); + err = f2fs_write_checkpoint(sbi, &cpc); if (err) - return err; + goto out; next_inuse = find_next_inuse(FREE_I(sbi), end + 1, start); if (next_inuse <= end) { @@ -1442,6 +1466,8 @@ static int free_segment_range(struct f2fs_sb_info *sbi, unsigned int start, next_inuse); f2fs_bug_on(sbi, 1); } +out: + MAIN_SECS(sbi) += secs; return err; } @@ -1487,6 +1513,7 @@ static void update_fs_metadata(struct f2fs_sb_info *sbi, int secs) SM_I(sbi)->segment_count = (int)SM_I(sbi)->segment_count + segs; MAIN_SEGS(sbi) = (int)MAIN_SEGS(sbi) + segs; + MAIN_SECS(sbi) += secs; FREE_I(sbi)->free_sections = (int)FREE_I(sbi)->free_sections + secs; FREE_I(sbi)->free_segments = (int)FREE_I(sbi)->free_segments + segs; F2FS_CKPT(sbi)->user_block_count = cpu_to_le64(user_block_count + blks); @@ -1508,8 +1535,8 @@ static void update_fs_metadata(struct f2fs_sb_info *sbi, int secs) int f2fs_resize_fs(struct f2fs_sb_info *sbi, __u64 block_count) { __u64 old_block_count, shrunk_blocks; + struct cp_control cpc = { CP_RESIZE, 0, 0, 0 }; unsigned int secs; - int gc_mode, gc_type; int err = 0; __u32 rem; @@ -1544,10 +1571,27 @@ int f2fs_resize_fs(struct f2fs_sb_info *sbi, __u64 block_count) return -EINVAL; } - freeze_bdev(sbi->sb->s_bdev); - shrunk_blocks = old_block_count - block_count; secs = div_u64(shrunk_blocks, BLKS_PER_SEC(sbi)); + + /* stop other GC */ + if (!down_write_trylock(&sbi->gc_lock)) + return -EAGAIN; + + /* stop CP to protect MAIN_SEC in free_segment_range */ + f2fs_lock_op(sbi); + err = free_segment_range(sbi, secs, true); + f2fs_unlock_op(sbi); + up_write(&sbi->gc_lock); + if (err) + return err; + + set_sbi_flag(sbi, SBI_IS_RESIZEFS); + + freeze_super(sbi->sb); + down_write(&sbi->gc_lock); + mutex_lock(&sbi->cp_mutex); + spin_lock(&sbi->stat_lock); if (shrunk_blocks + valid_user_blocks(sbi) + sbi->current_reserved_blocks + sbi->unusable_block_count + @@ -1556,69 +1600,44 @@ int f2fs_resize_fs(struct f2fs_sb_info *sbi, __u64 block_count) else sbi->user_block_count -= shrunk_blocks; spin_unlock(&sbi->stat_lock); - if (err) { - thaw_bdev(sbi->sb->s_bdev, sbi->sb); - return err; - } - - mutex_lock(&sbi->resize_mutex); - set_sbi_flag(sbi, SBI_IS_RESIZEFS); - - mutex_lock(&DIRTY_I(sbi)->seglist_lock); - - MAIN_SECS(sbi) -= secs; - - for (gc_mode = 0; gc_mode < MAX_GC_POLICY; gc_mode++) - if (SIT_I(sbi)->last_victim[gc_mode] >= - MAIN_SECS(sbi) * sbi->segs_per_sec) - SIT_I(sbi)->last_victim[gc_mode] = 0; - - for (gc_type = BG_GC; gc_type <= FG_GC; gc_type++) - if (sbi->next_victim_seg[gc_type] >= - MAIN_SECS(sbi) * sbi->segs_per_sec) - sbi->next_victim_seg[gc_type] = NULL_SEGNO; - - mutex_unlock(&DIRTY_I(sbi)->seglist_lock); + if (err) + goto out_err; - err = free_segment_range(sbi, MAIN_SECS(sbi) * sbi->segs_per_sec, - MAIN_SEGS(sbi) - 1); + err = free_segment_range(sbi, secs, false); if (err) - goto out; + goto recover_out; update_sb_metadata(sbi, -secs); err = f2fs_commit_super(sbi, false); if (err) { update_sb_metadata(sbi, secs); - goto out; + goto recover_out; } - mutex_lock(&sbi->cp_mutex); update_fs_metadata(sbi, -secs); clear_sbi_flag(sbi, SBI_IS_RESIZEFS); set_sbi_flag(sbi, SBI_IS_DIRTY); - mutex_unlock(&sbi->cp_mutex); - err = f2fs_sync_fs(sbi->sb, 1); + err = f2fs_write_checkpoint(sbi, &cpc); if (err) { - mutex_lock(&sbi->cp_mutex); update_fs_metadata(sbi, secs); - mutex_unlock(&sbi->cp_mutex); update_sb_metadata(sbi, secs); f2fs_commit_super(sbi, false); } -out: +recover_out: if (err) { set_sbi_flag(sbi, SBI_NEED_FSCK); f2fs_err(sbi, "resize_fs failed, should run fsck to repair!"); - MAIN_SECS(sbi) += secs; spin_lock(&sbi->stat_lock); sbi->user_block_count += shrunk_blocks; spin_unlock(&sbi->stat_lock); } +out_err: + mutex_unlock(&sbi->cp_mutex); + up_write(&sbi->gc_lock); + thaw_super(sbi->sb); clear_sbi_flag(sbi, SBI_IS_RESIZEFS); - mutex_unlock(&sbi->resize_mutex); - thaw_bdev(sbi->sb->s_bdev, sbi->sb); return err; } diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c index f6c5c7ec5a12b..441eaaf9739c4 100644 --- a/fs/f2fs/super.c +++ b/fs/f2fs/super.c @@ -3408,7 +3408,6 @@ try_onemore: init_rwsem(&sbi->gc_lock); mutex_init(&sbi->writepages); mutex_init(&sbi->cp_mutex); - mutex_init(&sbi->resize_mutex); init_rwsem(&sbi->node_write); init_rwsem(&sbi->node_change); diff --git a/include/trace/events/f2fs.h b/include/trace/events/f2fs.h index 757d3d6031e63..4dbcdc6d27383 100644 --- a/include/trace/events/f2fs.h +++ b/include/trace/events/f2fs.h @@ -50,6 +50,7 @@ TRACE_DEFINE_ENUM(CP_RECOVERY); TRACE_DEFINE_ENUM(CP_DISCARD); TRACE_DEFINE_ENUM(CP_TRIMMED); TRACE_DEFINE_ENUM(CP_PAUSE); +TRACE_DEFINE_ENUM(CP_RESIZE); #define show_block_type(type) \ __print_symbolic(type, \ @@ -126,7 +127,8 @@ TRACE_DEFINE_ENUM(CP_PAUSE); { CP_RECOVERY, "Recovery" }, \ { CP_DISCARD, "Discard" }, \ { CP_PAUSE, "Pause" }, \ - { CP_TRIMMED, "Trimmed" }) + { CP_TRIMMED, "Trimmed" }, \ + { CP_RESIZE, "Resize" }) #define show_fsync_cpreason(type) \ __print_symbolic(type, \ -- GitLab From deaf160f8aa767f1b2cd53f1834c1a4126815631 Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Mon, 20 Apr 2020 23:00:57 +0100 Subject: [PATCH 0431/1971] f2fs: remove redundant assignment to variable err The variable err is being assigned with a value that is never read and it is being updated later with a new value. The initialization is redundant and can be removed. Addresses-Coverity: ("Unused value") Signed-off-by: Colin Ian King Reviewed-by: Chao Yu Signed-off-by: Jaegeuk Kim --- fs/f2fs/namei.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fs/f2fs/namei.c b/fs/f2fs/namei.c index 58db1de7ca947..63b34a161cf48 100644 --- a/fs/f2fs/namei.c +++ b/fs/f2fs/namei.c @@ -564,7 +564,7 @@ static int f2fs_unlink(struct inode *dir, struct dentry *dentry) struct inode *inode = d_inode(dentry); struct f2fs_dir_entry *de; struct page *page; - int err = -ENOENT; + int err; trace_f2fs_unlink_enter(dir, dentry); -- GitLab From 03382f1aa99f438f8e9dc7562c7368f55ba8f56c Mon Sep 17 00:00:00 2001 From: Chao Yu Date: Tue, 21 Apr 2020 19:36:21 +0800 Subject: [PATCH 0432/1971] f2fs: compress: don't handle non-compressed data in workqueue If bio has no compressed data, we don't need to handle end_io work in workqueue, instead, it should just let interrupter handle it directly to speed up IO response. Signed-off-by: Chao Yu Signed-off-by: Jaegeuk Kim --- fs/f2fs/data.c | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c index ff8a92df8d995..8607c02e8f960 100644 --- a/fs/f2fs/data.c +++ b/fs/f2fs/data.c @@ -114,7 +114,8 @@ static enum count_type __read_io_type(struct page *page) /* postprocessing steps for read bios */ enum bio_post_read_step { STEP_DECRYPT, - STEP_DECOMPRESS, + STEP_DECOMPRESS_NOWQ, /* handle normal cluster data inplace */ + STEP_DECOMPRESS, /* handle compressed cluster data in workqueue */ STEP_VERITY, }; @@ -990,7 +991,7 @@ static struct bio *f2fs_grab_read_bio(struct inode *inode, block_t blkaddr, if (f2fs_encrypted_file(inode)) post_read_steps |= 1 << STEP_DECRYPT; if (f2fs_compressed_file(inode)) - post_read_steps |= 1 << STEP_DECOMPRESS; + post_read_steps |= 1 << STEP_DECOMPRESS_NOWQ; if (f2fs_need_verity(inode, first_idx)) post_read_steps |= 1 << STEP_VERITY; @@ -2189,6 +2190,7 @@ int f2fs_read_multi_pages(struct compress_ctx *cc, struct bio **bio_ret, for (i = 0; i < dic->nr_cpages; i++) { struct page *page = dic->cpages[i]; block_t blkaddr; + struct bio_post_read_ctx *ctx; blkaddr = data_blkaddr(dn.inode, dn.node_page, dn.ofs_in_node + i + 1); @@ -2225,6 +2227,11 @@ submit_and_realloc: if (bio_add_page(bio, page, blocksize, 0) < blocksize) goto submit_and_realloc; + /* tag STEP_DECOMPRESS to handle IO in wq */ + ctx = bio->bi_private; + if (!(ctx->enabled_steps & (1 << STEP_DECOMPRESS))) + ctx->enabled_steps |= 1 << STEP_DECOMPRESS; + inc_page_count(sbi, F2FS_RD_DATA); f2fs_update_iostat(sbi, FS_DATA_READ_IO, F2FS_BLKSIZE); ClearPageError(page); -- GitLab From f3494345ce9999624b36109252a4bf5f00e51a46 Mon Sep 17 00:00:00 2001 From: Chao Yu Date: Thu, 23 Apr 2020 17:57:33 +0800 Subject: [PATCH 0433/1971] f2fs: fix potential use-after-free issue In error path of f2fs_read_multi_pages(), it should let last referrer release decompress io context memory, otherwise, other referrer will cause use-after-free issue. Fixes: 4c8ff7095bef ("f2fs: support data compression") Signed-off-by: Chao Yu Signed-off-by: Jaegeuk Kim --- fs/f2fs/data.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c index 8607c02e8f960..4d871d27a85f9 100644 --- a/fs/f2fs/data.c +++ b/fs/f2fs/data.c @@ -2208,16 +2208,16 @@ submit_and_realloc: page->index, for_write); if (IS_ERR(bio)) { ret = PTR_ERR(bio); - bio = NULL; dic->failed = true; if (refcount_sub_and_test(dic->nr_cpages - i, - &dic->ref)) + &dic->ref)) { f2fs_decompress_end_io(dic->rpages, cc->cluster_size, true, false); - f2fs_free_dic(dic); + f2fs_free_dic(dic); + } f2fs_put_dnode(&dn); - *bio_ret = bio; + *bio_ret = NULL; return ret; } } -- GitLab From 9c1223845a37ce09fd498b8c8ed061decff20eda Mon Sep 17 00:00:00 2001 From: Chao Yu Date: Thu, 23 Apr 2020 18:03:06 +0800 Subject: [PATCH 0434/1971] f2fs: add compressed/gc data read IO stat in order to account data read IOs more accurately. Signed-off-by: Chao Yu Signed-off-by: Jaegeuk Kim --- fs/f2fs/data.c | 1 + fs/f2fs/f2fs.h | 2 ++ fs/f2fs/gc.c | 2 ++ fs/f2fs/sysfs.c | 7 +++++++ include/trace/events/f2fs.h | 11 ++++++++--- 5 files changed, 20 insertions(+), 3 deletions(-) diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c index 4d871d27a85f9..48a622b95b76e 100644 --- a/fs/f2fs/data.c +++ b/fs/f2fs/data.c @@ -2234,6 +2234,7 @@ submit_and_realloc: inc_page_count(sbi, F2FS_RD_DATA); f2fs_update_iostat(sbi, FS_DATA_READ_IO, F2FS_BLKSIZE); + f2fs_update_iostat(sbi, FS_CDATA_READ_IO, F2FS_BLKSIZE); ClearPageError(page); *last_block_in_bio = blkaddr; } diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h index 338622069e421..51863e4f5d4e0 100644 --- a/fs/f2fs/f2fs.h +++ b/fs/f2fs/f2fs.h @@ -1149,6 +1149,8 @@ enum iostat_type { APP_READ_IO, /* app read IOs */ APP_MAPPED_READ_IO, /* app mapped read IOs */ FS_DATA_READ_IO, /* data read IOs */ + FS_GDATA_READ_IO, /* data read IOs from background gc */ + FS_CDATA_READ_IO, /* compressed data read IOs */ FS_NODE_READ_IO, /* node read IOs */ FS_META_READ_IO, /* meta read IOs */ diff --git a/fs/f2fs/gc.c b/fs/f2fs/gc.c index f3c45ec2a7e7a..5b95d5a146eb6 100644 --- a/fs/f2fs/gc.c +++ b/fs/f2fs/gc.c @@ -740,6 +740,7 @@ got_it: f2fs_put_page(page, 1); f2fs_update_iostat(sbi, FS_DATA_READ_IO, F2FS_BLKSIZE); + f2fs_update_iostat(sbi, FS_GDATA_READ_IO, F2FS_BLKSIZE); return 0; put_encrypted_page: @@ -846,6 +847,7 @@ static int move_data_block(struct inode *inode, block_t bidx, } f2fs_update_iostat(fio.sbi, FS_DATA_READ_IO, F2FS_BLKSIZE); + f2fs_update_iostat(fio.sbi, FS_GDATA_READ_IO, F2FS_BLKSIZE); lock_page(mpage); if (unlikely(mpage->mapping != META_MAPPING(fio.sbi) || diff --git a/fs/f2fs/sysfs.c b/fs/f2fs/sysfs.c index eaf8088548f08..a117ae1f9d5f1 100644 --- a/fs/f2fs/sysfs.c +++ b/fs/f2fs/sysfs.c @@ -803,6 +803,7 @@ static int __maybe_unused iostat_info_seq_show(struct seq_file *seq, seq_printf(seq, "time: %-16llu\n", now); /* print app write IOs */ + seq_puts(seq, "[WRITE]\n"); seq_printf(seq, "app buffered: %-16llu\n", sbi->rw_iostat[APP_BUFFERED_IO]); seq_printf(seq, "app direct: %-16llu\n", @@ -829,6 +830,7 @@ static int __maybe_unused iostat_info_seq_show(struct seq_file *seq, sbi->rw_iostat[FS_CP_META_IO]); /* print app read IOs */ + seq_puts(seq, "[READ]\n"); seq_printf(seq, "app buffered: %-16llu\n", sbi->rw_iostat[APP_BUFFERED_READ_IO]); seq_printf(seq, "app direct: %-16llu\n", @@ -839,12 +841,17 @@ static int __maybe_unused iostat_info_seq_show(struct seq_file *seq, /* print fs read IOs */ seq_printf(seq, "fs data: %-16llu\n", sbi->rw_iostat[FS_DATA_READ_IO]); + seq_printf(seq, "fs gc data: %-16llu\n", + sbi->rw_iostat[FS_GDATA_READ_IO]); + seq_printf(seq, "fs compr_data: %-16llu\n", + sbi->rw_iostat[FS_CDATA_READ_IO]); seq_printf(seq, "fs node: %-16llu\n", sbi->rw_iostat[FS_NODE_READ_IO]); seq_printf(seq, "fs meta: %-16llu\n", sbi->rw_iostat[FS_META_READ_IO]); /* print other IOs */ + seq_puts(seq, "[OTHER]\n"); seq_printf(seq, "fs discard: %-16llu\n", sbi->rw_iostat[FS_DISCARD]); diff --git a/include/trace/events/f2fs.h b/include/trace/events/f2fs.h index 4dbcdc6d27383..a8e2e83c91cef 100644 --- a/include/trace/events/f2fs.h +++ b/include/trace/events/f2fs.h @@ -1840,6 +1840,8 @@ TRACE_EVENT(f2fs_iostat, __field(unsigned long long, app_rio) __field(unsigned long long, app_mrio) __field(unsigned long long, fs_drio) + __field(unsigned long long, fs_gdrio) + __field(unsigned long long, fs_cdrio) __field(unsigned long long, fs_nrio) __field(unsigned long long, fs_mrio) __field(unsigned long long, fs_discard) @@ -1864,6 +1866,8 @@ TRACE_EVENT(f2fs_iostat, __entry->app_rio = iostat[APP_READ_IO]; __entry->app_mrio = iostat[APP_MAPPED_READ_IO]; __entry->fs_drio = iostat[FS_DATA_READ_IO]; + __entry->fs_gdrio = iostat[FS_GDATA_READ_IO]; + __entry->fs_cdrio = iostat[FS_CDATA_READ_IO]; __entry->fs_nrio = iostat[FS_NODE_READ_IO]; __entry->fs_mrio = iostat[FS_META_READ_IO]; __entry->fs_discard = iostat[FS_DISCARD]; @@ -1875,15 +1879,16 @@ TRACE_EVENT(f2fs_iostat, "gc [data=%llu, node=%llu], " "cp [data=%llu, node=%llu, meta=%llu], " "app [read=%llu (direct=%llu, buffered=%llu), mapped=%llu], " - "fs [data=%llu, node=%llu, meta=%llu]", + "fs [data=%llu, (gc_data=%llu, compr_data=%llu), " + "node=%llu, meta=%llu]", show_dev(__entry->dev), __entry->app_wio, __entry->app_dio, __entry->app_bio, __entry->app_mio, __entry->fs_dio, __entry->fs_nio, __entry->fs_mio, __entry->fs_discard, __entry->fs_gc_dio, __entry->fs_gc_nio, __entry->fs_cp_dio, __entry->fs_cp_nio, __entry->fs_cp_mio, __entry->app_rio, __entry->app_drio, __entry->app_brio, - __entry->app_mrio, __entry->fs_drio, __entry->fs_nrio, - __entry->fs_mrio) + __entry->app_mrio, __entry->fs_drio, __entry->fs_gdrio, + __entry->fs_cdrio, __entry->fs_nrio, __entry->fs_mrio) ); #endif /* _TRACE_F2FS_H */ -- GitLab From 1454c978efbb57b052670d50023f48c759d704ce Mon Sep 17 00:00:00 2001 From: Chao Yu Date: Fri, 8 May 2020 09:16:03 +0800 Subject: [PATCH 0435/1971] f2fs: compress: fix zstd data corruption During zstd compression, ZSTD_endStream() may return non-zero value because distination buffer is full, but there is still compressed data remained in intermediate buffer, it means that zstd algorithm can not save at last one block space, let's just writeback raw data instead of compressed one, this can fix data corruption when decompressing incomplete stored compression data. Fixes: 50cfa66f0de0 ("f2fs: compress: support zstd compress algorithm") Signed-off-by: Daeho Jeong Signed-off-by: Chao Yu Signed-off-by: Jaegeuk Kim --- fs/f2fs/compress.c | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/fs/f2fs/compress.c b/fs/f2fs/compress.c index dcedf008f359c..bf152c0d79fe8 100644 --- a/fs/f2fs/compress.c +++ b/fs/f2fs/compress.c @@ -360,6 +360,13 @@ static int zstd_compress_pages(struct compress_ctx *cc) return -EIO; } + /* + * there is compressed data remained in intermediate buffer due to + * no more space in cbuf.cdata + */ + if (ret) + return -EAGAIN; + cc->clen = outbuf.pos; return 0; } -- GitLab From 0f03c08892ac3c42d93662c8dec86bf74e5d4c9b Mon Sep 17 00:00:00 2001 From: Russell King Date: Mon, 11 May 2020 22:10:27 +0100 Subject: [PATCH 0436/1971] i2c: pxa: consolidate i2c_pxa_*xfer() implementations Most of i2c_pxa_pio_xfer() and i2c_pxa_xfer() are identical; the only differences are that i2c_pxa_pio_xfer() may reset the bus, and they use different underlying transfer functions. The retry loop is the same. Consolidate these two functions. Signed-off-by: Russell King Signed-off-by: Wolfram Sang --- drivers/i2c/busses/i2c-pxa.c | 36 ++++++++++++++++-------------------- 1 file changed, 16 insertions(+), 20 deletions(-) diff --git a/drivers/i2c/busses/i2c-pxa.c b/drivers/i2c/busses/i2c-pxa.c index c1e50c0b9756b..46f1cf97d9555 100644 --- a/drivers/i2c/busses/i2c-pxa.c +++ b/drivers/i2c/busses/i2c-pxa.c @@ -1102,18 +1102,20 @@ static int i2c_pxa_do_xfer(struct pxa_i2c *i2c, struct i2c_msg *msg, int num) return ret; } -static int i2c_pxa_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num) +static int i2c_pxa_internal_xfer(struct pxa_i2c *i2c, + struct i2c_msg *msgs, int num, + int (*xfer)(struct pxa_i2c *, + struct i2c_msg *, int num)) { - struct pxa_i2c *i2c = adap->algo_data; int ret, i; - for (i = adap->retries; i >= 0; i--) { - ret = i2c_pxa_do_xfer(i2c, msgs, num); + for (i = i2c->adap.retries; i >= 0; i--) { + ret = xfer(i2c, msgs, num); if (ret != I2C_RETRY) goto out; if (i2c_debug) - dev_dbg(&adap->dev, "Retrying transmission\n"); + dev_dbg(&i2c->adap.dev, "Retrying transmission\n"); udelay(100); } i2c_pxa_scream_blue_murder(i2c, "exhausted retries"); @@ -1123,6 +1125,14 @@ static int i2c_pxa_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num return ret; } +static int i2c_pxa_xfer(struct i2c_adapter *adap, + struct i2c_msg msgs[], int num) +{ + struct pxa_i2c *i2c = adap->algo_data; + + return i2c_pxa_internal_xfer(i2c, msgs, num, i2c_pxa_do_xfer); +} + static u32 i2c_pxa_functionality(struct i2c_adapter *adap) { return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL | @@ -1210,7 +1220,6 @@ static int i2c_pxa_pio_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num) { struct pxa_i2c *i2c = adap->algo_data; - int ret, i; /* If the I2C controller is disabled we need to reset it (probably due to a suspend/resume destroying state). We do @@ -1219,20 +1228,7 @@ static int i2c_pxa_pio_xfer(struct i2c_adapter *adap, if (!(readl(_ICR(i2c)) & ICR_IUE)) i2c_pxa_reset(i2c); - for (i = adap->retries; i >= 0; i--) { - ret = i2c_pxa_do_pio_xfer(i2c, msgs, num); - if (ret != I2C_RETRY) - goto out; - - if (i2c_debug) - dev_dbg(&adap->dev, "Retrying transmission\n"); - udelay(100); - } - i2c_pxa_scream_blue_murder(i2c, "exhausted retries"); - ret = -EREMOTEIO; - out: - i2c_pxa_set_slave(i2c, ret); - return ret; + return i2c_pxa_internal_xfer(i2c, msgs, num, i2c_pxa_do_pio_xfer); } static const struct i2c_algorithm i2c_pxa_pio_algorithm = { -- GitLab From c25e509aef8bc511e245ca441017d00996f09e36 Mon Sep 17 00:00:00 2001 From: Russell King Date: Mon, 11 May 2020 22:10:32 +0100 Subject: [PATCH 0437/1971] i2c: pxa: avoid complaints with non-responsive slaves Running i2cdetect on a PXA I2C adapter is very noisy; it complains whenever a slave fails to respond to the address cycle. Since it is normal to probe for slaves in this way, we should not fill the kernel log. This is especially true with SFP modules that take a while to respond on the I2C bus, and probing via the I2C bus is the only way to detect that they are ready. Fix this by changing the internal transfer return code from I2C_RETRY to a new NO_SLAVE code (mapped to -ENXIO, as per the I2C documentation for this condition, but we still return -EREMOTEIO to the I2C stack to maintain long established driver behaviour.) Signed-off-by: Russell King Signed-off-by: Wolfram Sang --- drivers/i2c/busses/i2c-pxa.c | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/drivers/i2c/busses/i2c-pxa.c b/drivers/i2c/busses/i2c-pxa.c index 46f1cf97d9555..f20f8b9057937 100644 --- a/drivers/i2c/busses/i2c-pxa.c +++ b/drivers/i2c/busses/i2c-pxa.c @@ -90,6 +90,7 @@ */ #define DEF_TIMEOUT 32 +#define NO_SLAVE (-ENXIO) #define BUS_ERROR (-EREMOTEIO) #define XFER_NAKED (-ECONNREFUSED) #define I2C_RETRY (-2000) /* an error has occurred retry transmit */ @@ -881,7 +882,7 @@ static void i2c_pxa_irq_txempty(struct pxa_i2c *i2c, u32 isr) */ if (isr & ISR_ACKNAK) { if (i2c->msg_ptr == 0 && i2c->msg_idx == 0) - ret = I2C_RETRY; + ret = NO_SLAVE; else ret = XFER_NAKED; } @@ -1109,16 +1110,19 @@ static int i2c_pxa_internal_xfer(struct pxa_i2c *i2c, { int ret, i; - for (i = i2c->adap.retries; i >= 0; i--) { + for (i = 0; ; ) { ret = xfer(i2c, msgs, num); - if (ret != I2C_RETRY) + if (ret != I2C_RETRY && ret != NO_SLAVE) goto out; + if (++i >= i2c->adap.retries) + break; if (i2c_debug) dev_dbg(&i2c->adap.dev, "Retrying transmission\n"); udelay(100); } - i2c_pxa_scream_blue_murder(i2c, "exhausted retries"); + if (ret != NO_SLAVE) + i2c_pxa_scream_blue_murder(i2c, "exhausted retries"); ret = -EREMOTEIO; out: i2c_pxa_set_slave(i2c, ret); -- GitLab From ae1c3b73945cadb59af1cbf31bc8105d2014ef90 Mon Sep 17 00:00:00 2001 From: Russell King Date: Mon, 11 May 2020 22:10:37 +0100 Subject: [PATCH 0438/1971] i2c: pxa: ensure timeout messages are unique Ensure that the various timeout messages can identify where in the code they were produced from to aid debugging. Signed-off-by: Russell King Signed-off-by: Wolfram Sang --- drivers/i2c/busses/i2c-pxa.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/i2c/busses/i2c-pxa.c b/drivers/i2c/busses/i2c-pxa.c index f20f8b9057937..0becab239476a 100644 --- a/drivers/i2c/busses/i2c-pxa.c +++ b/drivers/i2c/busses/i2c-pxa.c @@ -1095,7 +1095,7 @@ static int i2c_pxa_do_xfer(struct pxa_i2c *i2c, struct i2c_msg *msg, int num) ret = i2c->msg_idx; if (!timeout && i2c->msg_num) { - i2c_pxa_scream_blue_murder(i2c, "timeout"); + i2c_pxa_scream_blue_murder(i2c, "timeout with active message"); ret = I2C_RETRY; } @@ -1169,7 +1169,7 @@ static int i2c_pxa_pio_set_master(struct pxa_i2c *i2c) if (timeout < 0) { show_state(i2c); dev_err(&i2c->adap.dev, - "i2c_pxa: timeout waiting for bus free\n"); + "i2c_pxa: timeout waiting for bus free (set_master)\n"); return I2C_RETRY; } @@ -1213,7 +1213,7 @@ static int i2c_pxa_do_pio_xfer(struct pxa_i2c *i2c, out: if (timeout == 0) { - i2c_pxa_scream_blue_murder(i2c, "timeout"); + i2c_pxa_scream_blue_murder(i2c, "timeout (do_pio_xfer)"); ret = I2C_RETRY; } -- GitLab From 18d30c0946f912cdb86ab0417661eccda6342375 Mon Sep 17 00:00:00 2001 From: Russell King Date: Mon, 11 May 2020 22:10:42 +0100 Subject: [PATCH 0439/1971] i2c: pxa: remove some unnecessary debug Remove unnecessary show_state() in the loop inside i2c_pxa_pio_set_master(), which can be unnecessarily verbose. Remove the i2c_pxa_scream_blue_murder() in i2c_pxa_pio_xfer(), which will trigger if we are probing the I2C bus and a slave does not respond; this is a normal event, and not something to report. Signed-off-by: Russell King Signed-off-by: Wolfram Sang --- drivers/i2c/busses/i2c-pxa.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/drivers/i2c/busses/i2c-pxa.c b/drivers/i2c/busses/i2c-pxa.c index 0becab239476a..db739cce93acb 100644 --- a/drivers/i2c/busses/i2c-pxa.c +++ b/drivers/i2c/busses/i2c-pxa.c @@ -1161,10 +1161,8 @@ static int i2c_pxa_pio_set_master(struct pxa_i2c *i2c) /* * Wait for the bus to become free. */ - while (timeout-- && readl(_ISR(i2c)) & (ISR_IBB | ISR_UB)) { + while (timeout-- && readl(_ISR(i2c)) & (ISR_IBB | ISR_UB)) udelay(1000); - show_state(i2c); - } if (timeout < 0) { show_state(i2c); -- GitLab From e81c979f4e071d516aa27cf5a0c3939da00dc1ca Mon Sep 17 00:00:00 2001 From: Russell King Date: Wed, 6 May 2020 10:36:38 +0100 Subject: [PATCH 0440/1971] i2c: pxa: clear all master action bits in i2c_pxa_stop_message() If we timeout during a message transfer, the control register may contain bits that cause an action to be set. Read-modify-writing the register leaving these bits set may trigger the hardware to attempt one of these actions unintentionally. Always clear these bits when cleaning up after a message or after a timeout. Signed-off-by: Russell King Signed-off-by: Wolfram Sang --- drivers/i2c/busses/i2c-pxa.c | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/drivers/i2c/busses/i2c-pxa.c b/drivers/i2c/busses/i2c-pxa.c index db739cce93acb..a72d07bdb7938 100644 --- a/drivers/i2c/busses/i2c-pxa.c +++ b/drivers/i2c/busses/i2c-pxa.c @@ -795,11 +795,9 @@ static inline void i2c_pxa_stop_message(struct pxa_i2c *i2c) { u32 icr; - /* - * Clear the STOP and ACK flags - */ + /* Clear the START, STOP, ACK, TB and MA flags */ icr = readl(_ICR(i2c)); - icr &= ~(ICR_STOP | ICR_ACKNAK); + icr &= ~(ICR_START | ICR_STOP | ICR_ACKNAK | ICR_TB | ICR_MA); writel(icr, _ICR(i2c)); } -- GitLab From 2fd6cbf41aa9c76c8ce6e63bbbdebc0ce0c5d49f Mon Sep 17 00:00:00 2001 From: Russell King Date: Wed, 6 May 2020 10:36:43 +0100 Subject: [PATCH 0441/1971] i2c: pxa: use master-abort for device probes Use master-abort to send the stop condition after an address cycle rather than resetting the controller. Signed-off-by: Russell King Signed-off-by: Wolfram Sang --- drivers/i2c/busses/i2c-pxa.c | 10 ++-------- 1 file changed, 2 insertions(+), 8 deletions(-) diff --git a/drivers/i2c/busses/i2c-pxa.c b/drivers/i2c/busses/i2c-pxa.c index a72d07bdb7938..0e194d6cd1b56 100644 --- a/drivers/i2c/busses/i2c-pxa.c +++ b/drivers/i2c/busses/i2c-pxa.c @@ -940,14 +940,8 @@ static void i2c_pxa_irq_txempty(struct pxa_i2c *i2c, u32 isr) icr &= ~ICR_ALDIE; icr |= ICR_START | ICR_TB; } else { - if (i2c->msg->len == 0) { - /* - * Device probes have a message length of zero - * and need the bus to be reset before it can - * be used again. - */ - i2c_pxa_reset(i2c); - } + if (i2c->msg->len == 0) + icr |= ICR_MA; i2c_pxa_master_complete(i2c, 0); } -- GitLab From 454b9c1ffd422bfd5cfb7212a66a00a0d8a2c1b4 Mon Sep 17 00:00:00 2001 From: Wolfram Sang Date: Thu, 26 Mar 2020 22:09:54 +0100 Subject: [PATCH 0442/1971] power: supply: bq24190_charger: convert to use i2c_new_client_device() Move away from the deprecated API in this comment. Signed-off-by: Wolfram Sang Signed-off-by: Sebastian Reichel --- drivers/power/supply/bq24190_charger.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/power/supply/bq24190_charger.c b/drivers/power/supply/bq24190_charger.c index 453d6332d43a7..4540e913057f1 100644 --- a/drivers/power/supply/bq24190_charger.c +++ b/drivers/power/supply/bq24190_charger.c @@ -673,7 +673,7 @@ static int bq24190_register_reset(struct bq24190_dev_info *bdi) * { .type = "bq24190", .addr = 0x6b, .properties = pe, .irq = irq }; * struct i2c_adapter ad = { ... }; * i2c_add_adapter(&ad); - * i2c_new_device(&ad, &bi); + * i2c_new_client_device(&ad, &bi); */ if (device_property_read_bool(bdi->dev, "disable-reset")) return 0; -- GitLab From c9c457022b7df2ab515f0424e65445353f91ecad Mon Sep 17 00:00:00 2001 From: David Heidelberg Date: Mon, 11 May 2020 14:50:30 -0700 Subject: [PATCH 0443/1971] dt-bindings: input: touchscreen: elants_i2c: convert to YAML Convert elants_i2c.txt DT binding to YAML and put into correct directory. Reviewed-by: Dmitry Osipenko Signed-off-by: David Heidelberg Reviewed-by: Rob Herring Link: https://lore.kernel.org/r/20200423173253.711725-2-david@ixit.cz Signed-off-by: Dmitry Torokhov --- .../devicetree/bindings/input/elants_i2c.txt | 34 --------- .../input/touchscreen/elan,elants_i2c.yaml | 69 +++++++++++++++++++ 2 files changed, 69 insertions(+), 34 deletions(-) delete mode 100644 Documentation/devicetree/bindings/input/elants_i2c.txt create mode 100644 Documentation/devicetree/bindings/input/touchscreen/elan,elants_i2c.yaml diff --git a/Documentation/devicetree/bindings/input/elants_i2c.txt b/Documentation/devicetree/bindings/input/elants_i2c.txt deleted file mode 100644 index 5edac8be08028..0000000000000 --- a/Documentation/devicetree/bindings/input/elants_i2c.txt +++ /dev/null @@ -1,34 +0,0 @@ -Elantech I2C Touchscreen - -Required properties: -- compatible: must be "elan,ekth3500". -- reg: I2C address of the chip. -- interrupts: interrupt to which the chip is connected (see interrupt - binding[0]). - -Optional properties: -- wakeup-source: touchscreen can be used as a wakeup source. -- pinctrl-names: should be "default" (see pinctrl binding [1]). -- pinctrl-0: a phandle pointing to the pin settings for the device (see - pinctrl binding [1]). -- reset-gpios: reset gpio the chip is connected to. -- vcc33-supply: a phandle for the regulator supplying 3.3V power. -- vccio-supply: a phandle for the regulator supplying IO power. - -[0]: Documentation/devicetree/bindings/interrupt-controller/interrupts.txt -[1]: Documentation/devicetree/bindings/pinctrl/pinctrl-bindings.txt - -Example: - &i2c1 { - /* ... */ - - touchscreen@10 { - compatible = "elan,ekth3500"; - reg = <0x10>; - interrupt-parent = <&gpio4>; - interrupts = <0x0 IRQ_TYPE_EDGE_FALLING>; - wakeup-source; - }; - - /* ... */ - }; diff --git a/Documentation/devicetree/bindings/input/touchscreen/elan,elants_i2c.yaml b/Documentation/devicetree/bindings/input/touchscreen/elan,elants_i2c.yaml new file mode 100644 index 0000000000000..a792d6377b1da --- /dev/null +++ b/Documentation/devicetree/bindings/input/touchscreen/elan,elants_i2c.yaml @@ -0,0 +1,69 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +%YAML 1.2 +--- +$id: "http://devicetree.org/schemas/input/touchscreen/elan,elants_i2c.yaml#" +$schema: "http://devicetree.org/meta-schemas/core.yaml#" + +title: Elantech I2C Touchscreen + +maintainers: + - David Heidelberg + +allOf: + - $ref: touchscreen.yaml# + +properties: + compatible: + enum: + - elan,ektf3624 + - elan,ekth3500 + + reg: + maxItems: 1 + + interrupts: + maxItems: 1 + + wakeup-source: + type: boolean + description: touchscreen can be used as a wakeup source. + + reset-gpios: + description: reset gpio the chip is connected to. + + vcc33-supply: + description: a phandle for the regulator supplying 3.3V power. + + vccio-supply: + description: a phandle for the regulator supplying IO power. + + touchscreen-inverted-x: true + touchscreen-inverted-y: true + touchscreen-size-x: true + touchscreen-size-y: true + touchscreen-swapped-x-y: true + +additionalProperties: false + +required: + - compatible + - reg + - interrupts + +examples: + - | + #include + + i2c { + #address-cells = <1>; + #size-cells = <0>; + + touchscreen@10 { + compatible = "elan,ekth3500"; + reg = <0x10>; + + interrupt-parent = <&gpio4>; + interrupts = <0x0 IRQ_TYPE_EDGE_FALLING>; + wakeup-source; + }; + }; -- GitLab From 5fc70e350edd30fb22d2f9b4e6d680c5471890ff Mon Sep 17 00:00:00 2001 From: Jiada Wang Date: Mon, 11 May 2020 13:12:13 -0700 Subject: [PATCH 0444/1971] Input: introduce input_mt_report_slot_inactive() input_mt_report_slot_state() ignores "tool" argument when the slot is closed, which has caused a bit of confusion. Let's introduce input_mt_report_slot_inactive() to report inactive slot state. Suggested-by: Dmitry Torokhov Signed-off-by: Jiada Wang Link: https://lore.kernel.org/r/20200508055656.96389-2-jiada_wang@mentor.com Signed-off-by: Dmitry Torokhov --- drivers/hid/hid-alps.c | 3 +-- drivers/hid/hid-multitouch.c | 6 ++---- drivers/input/misc/xen-kbdfront.c | 2 +- drivers/input/mouse/elan_i2c_core.c | 2 +- drivers/input/touchscreen/atmel_mxt_ts.c | 7 +++---- drivers/input/touchscreen/cyttsp4_core.c | 5 ++--- drivers/input/touchscreen/cyttsp_core.c | 2 +- drivers/input/touchscreen/melfas_mip4.c | 4 ++-- drivers/input/touchscreen/mms114.c | 2 +- drivers/input/touchscreen/raspberrypi-ts.c | 2 +- drivers/input/touchscreen/stmfts.c | 2 +- include/linux/input/mt.h | 5 +++++ 12 files changed, 21 insertions(+), 21 deletions(-) diff --git a/drivers/hid/hid-alps.c b/drivers/hid/hid-alps.c index fa704153cb00d..28588f74425ea 100644 --- a/drivers/hid/hid-alps.c +++ b/drivers/hid/hid-alps.c @@ -387,8 +387,7 @@ static int u1_raw_event(struct alps_dev *hdata, u8 *data, int size) input_report_abs(hdata->input, ABS_MT_PRESSURE, z); } else { - input_mt_report_slot_state(hdata->input, - MT_TOOL_FINGER, 0); + input_mt_report_slot_inactive(hdata->input); } } diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c index 362805ddf3777..e2ce790ff4a4c 100644 --- a/drivers/hid/hid-multitouch.c +++ b/drivers/hid/hid-multitouch.c @@ -896,7 +896,7 @@ static void mt_release_pending_palms(struct mt_device *td, clear_bit(slotnum, app->pending_palm_slots); input_mt_slot(input, slotnum); - input_mt_report_slot_state(input, MT_TOOL_PALM, false); + input_mt_report_slot_inactive(input); need_sync = true; } @@ -1640,9 +1640,7 @@ static void mt_release_contacts(struct hid_device *hid) if (mt) { for (i = 0; i < mt->num_slots; i++) { input_mt_slot(input_dev, i); - input_mt_report_slot_state(input_dev, - MT_TOOL_FINGER, - false); + input_mt_report_slot_inactive(input_dev); } input_mt_sync_frame(input_dev); input_sync(input_dev); diff --git a/drivers/input/misc/xen-kbdfront.c b/drivers/input/misc/xen-kbdfront.c index 24bc5c5d876fc..a1bba722b234c 100644 --- a/drivers/input/misc/xen-kbdfront.c +++ b/drivers/input/misc/xen-kbdfront.c @@ -146,7 +146,7 @@ static void xenkbd_handle_mt_event(struct xenkbd_info *info, break; case XENKBD_MT_EV_UP: - input_mt_report_slot_state(info->mtouch, MT_TOOL_FINGER, false); + input_mt_report_slot_inactive(info->mtouch); break; case XENKBD_MT_EV_SYN: diff --git a/drivers/input/mouse/elan_i2c_core.c b/drivers/input/mouse/elan_i2c_core.c index 8719da5403834..3f9354baac4b3 100644 --- a/drivers/input/mouse/elan_i2c_core.c +++ b/drivers/input/mouse/elan_i2c_core.c @@ -938,7 +938,7 @@ static void elan_report_contact(struct elan_tp_data *data, input_report_abs(input, ABS_MT_TOUCH_MINOR, minor); } else { input_mt_slot(input, contact_num); - input_mt_report_slot_state(input, MT_TOOL_FINGER, false); + input_mt_report_slot_inactive(input); } } diff --git a/drivers/input/touchscreen/atmel_mxt_ts.c b/drivers/input/touchscreen/atmel_mxt_ts.c index ae60442efda0d..a2189739e30f5 100644 --- a/drivers/input/touchscreen/atmel_mxt_ts.c +++ b/drivers/input/touchscreen/atmel_mxt_ts.c @@ -822,8 +822,7 @@ static void mxt_proc_t9_message(struct mxt_data *data, u8 *message) * have happened. */ if (status & MXT_T9_RELEASE) { - input_mt_report_slot_state(input_dev, - MT_TOOL_FINGER, 0); + input_mt_report_slot_inactive(input_dev); mxt_input_sync(data); } @@ -839,7 +838,7 @@ static void mxt_proc_t9_message(struct mxt_data *data, u8 *message) input_report_abs(input_dev, ABS_MT_TOUCH_MAJOR, area); } else { /* Touch no longer active, close out slot */ - input_mt_report_slot_state(input_dev, MT_TOOL_FINGER, 0); + input_mt_report_slot_inactive(input_dev); } data->update_input = true; @@ -947,7 +946,7 @@ static void mxt_proc_t100_message(struct mxt_data *data, u8 *message) dev_dbg(dev, "[%u] release\n", id); /* close out slot */ - input_mt_report_slot_state(input_dev, 0, 0); + input_mt_report_slot_inactive(input_dev); } data->update_input = true; diff --git a/drivers/input/touchscreen/cyttsp4_core.c b/drivers/input/touchscreen/cyttsp4_core.c index 6bcffc930384a..02a73d9a4defe 100644 --- a/drivers/input/touchscreen/cyttsp4_core.c +++ b/drivers/input/touchscreen/cyttsp4_core.c @@ -744,8 +744,7 @@ static void cyttsp4_report_slot_liftoff(struct cyttsp4_mt_data *md, for (t = 0; t < max_slots; t++) { input_mt_slot(md->input, t); - input_mt_report_slot_state(md->input, - MT_TOOL_FINGER, false); + input_mt_report_slot_inactive(md->input); } } @@ -845,7 +844,7 @@ static void cyttsp4_final_sync(struct input_dev *input, int max_slots, int *ids) if (ids[t]) continue; input_mt_slot(input, t); - input_mt_report_slot_state(input, MT_TOOL_FINGER, false); + input_mt_report_slot_inactive(input); } input_sync(input); diff --git a/drivers/input/touchscreen/cyttsp_core.c b/drivers/input/touchscreen/cyttsp_core.c index 3f5d463dbeed6..697aa2c158f74 100644 --- a/drivers/input/touchscreen/cyttsp_core.c +++ b/drivers/input/touchscreen/cyttsp_core.c @@ -340,7 +340,7 @@ static void cyttsp_report_tchdata(struct cyttsp *ts) continue; input_mt_slot(input, i); - input_mt_report_slot_state(input, MT_TOOL_FINGER, false); + input_mt_report_slot_inactive(input); } input_sync(input); diff --git a/drivers/input/touchscreen/melfas_mip4.c b/drivers/input/touchscreen/melfas_mip4.c index 247c3aaba2d8f..f67efdd040b24 100644 --- a/drivers/input/touchscreen/melfas_mip4.c +++ b/drivers/input/touchscreen/melfas_mip4.c @@ -391,7 +391,7 @@ static void mip4_clear_input(struct mip4_ts *ts) /* Screen */ for (i = 0; i < MIP4_MAX_FINGERS; i++) { input_mt_slot(ts->input, i); - input_mt_report_slot_state(ts->input, MT_TOOL_FINGER, 0); + input_mt_report_slot_inactive(ts->input); } /* Keys */ @@ -534,7 +534,7 @@ static void mip4_report_touch(struct mip4_ts *ts, u8 *packet) } else { /* Release event */ input_mt_slot(ts->input, id); - input_mt_report_slot_state(ts->input, MT_TOOL_FINGER, 0); + input_mt_report_slot_inactive(ts->input); } input_mt_sync_frame(ts->input); diff --git a/drivers/input/touchscreen/mms114.c b/drivers/input/touchscreen/mms114.c index b3dda1722bae1..d5e2d2a139a2a 100644 --- a/drivers/input/touchscreen/mms114.c +++ b/drivers/input/touchscreen/mms114.c @@ -559,7 +559,7 @@ static int __maybe_unused mms114_suspend(struct device *dev) /* Release all touch */ for (id = 0; id < MMS114_MAX_TOUCH; id++) { input_mt_slot(input_dev, id); - input_mt_report_slot_state(input_dev, MT_TOOL_FINGER, false); + input_mt_report_slot_inactive(input_dev); } input_mt_report_pointer_emulation(input_dev, true); diff --git a/drivers/input/touchscreen/raspberrypi-ts.c b/drivers/input/touchscreen/raspberrypi-ts.c index 0e2e08f3f433c..ef6aaed217cfb 100644 --- a/drivers/input/touchscreen/raspberrypi-ts.c +++ b/drivers/input/touchscreen/raspberrypi-ts.c @@ -100,7 +100,7 @@ static void rpi_ts_poll(struct input_dev *input) released_ids = ts->known_ids & ~modified_ids; for_each_set_bit(i, &released_ids, RPI_TS_MAX_SUPPORTED_POINTS) { input_mt_slot(input, i); - input_mt_report_slot_state(input, MT_TOOL_FINGER, 0); + input_mt_report_slot_inactive(input); modified_ids &= ~(BIT(i)); } ts->known_ids = modified_ids; diff --git a/drivers/input/touchscreen/stmfts.c b/drivers/input/touchscreen/stmfts.c index b6f95f20f9244..b54cc64e4ea64 100644 --- a/drivers/input/touchscreen/stmfts.c +++ b/drivers/input/touchscreen/stmfts.c @@ -198,7 +198,7 @@ static void stmfts_report_contact_release(struct stmfts_data *sdata, u8 slot_id = (event[0] & STMFTS_MASK_TOUCH_ID) >> 4; input_mt_slot(sdata->input, slot_id); - input_mt_report_slot_state(sdata->input, MT_TOOL_FINGER, false); + input_mt_report_slot_inactive(sdata->input); input_sync(sdata->input); } diff --git a/include/linux/input/mt.h b/include/linux/input/mt.h index 9e409bb136421..3b8580bd33c14 100644 --- a/include/linux/input/mt.h +++ b/include/linux/input/mt.h @@ -100,6 +100,11 @@ static inline bool input_is_mt_axis(int axis) bool input_mt_report_slot_state(struct input_dev *dev, unsigned int tool_type, bool active); +static inline void input_mt_report_slot_inactive(struct input_dev *dev) +{ + input_mt_report_slot_state(dev, 0, false); +} + void input_mt_report_finger_count(struct input_dev *dev, int count); void input_mt_report_pointer_emulation(struct input_dev *dev, bool use_count); void input_mt_drop_unused(struct input_dev *dev); -- GitLab From 82dd45f5cb0bbc2a837a91dfdd1de1c585bd13b0 Mon Sep 17 00:00:00 2001 From: Thierry Reding Date: Thu, 2 Apr 2020 00:21:28 +0200 Subject: [PATCH 0445/1971] i2c: tegra: Use FIELD_PREP/FIELD_GET macros Using these macros helps increase readability of the code. Signed-off-by: Thierry Reding --- drivers/i2c/busses/i2c-tegra.c | 81 +++++++++++++++++----------------- 1 file changed, 40 insertions(+), 41 deletions(-) diff --git a/drivers/i2c/busses/i2c-tegra.c b/drivers/i2c/busses/i2c-tegra.c index 7c92e91a1cc25..f445b05accfa7 100644 --- a/drivers/i2c/busses/i2c-tegra.c +++ b/drivers/i2c/busses/i2c-tegra.c @@ -6,6 +6,7 @@ * Author: Colin Cross */ +#include #include #include #include @@ -29,11 +30,11 @@ #define BYTES_PER_FIFO_WORD 4 #define I2C_CNFG 0x000 -#define I2C_CNFG_DEBOUNCE_CNT_SHIFT 12 +#define I2C_CNFG_DEBOUNCE_CNT GENMASK(14, 12) #define I2C_CNFG_PACKET_MODE_EN BIT(10) #define I2C_CNFG_NEW_MASTER_FSM BIT(11) #define I2C_CNFG_MULTI_MASTER_MODE BIT(17) -#define I2C_STATUS 0x01C +#define I2C_STATUS 0x01c #define I2C_SL_CNFG 0x020 #define I2C_SL_CNFG_NACK BIT(1) #define I2C_SL_CNFG_NEWSL BIT(2) @@ -48,10 +49,8 @@ #define I2C_FIFO_CONTROL_TX_TRIG(x) (((x) - 1) << 5) #define I2C_FIFO_CONTROL_RX_TRIG(x) (((x) - 1) << 2) #define I2C_FIFO_STATUS 0x060 -#define I2C_FIFO_STATUS_TX_MASK 0xF0 -#define I2C_FIFO_STATUS_TX_SHIFT 4 -#define I2C_FIFO_STATUS_RX_MASK 0x0F -#define I2C_FIFO_STATUS_RX_SHIFT 0 +#define I2C_FIFO_STATUS_TX GENMASK(7, 4) +#define I2C_FIFO_STATUS_RX GENMASK(3, 0) #define I2C_INT_MASK 0x064 #define I2C_INT_STATUS 0x068 #define I2C_INT_BUS_CLR_DONE BIT(11) @@ -61,7 +60,8 @@ #define I2C_INT_TX_FIFO_DATA_REQ BIT(1) #define I2C_INT_RX_FIFO_DATA_REQ BIT(0) #define I2C_CLK_DIVISOR 0x06c -#define I2C_CLK_DIVISOR_STD_FAST_MODE_SHIFT 16 +#define I2C_CLK_DIVISOR_STD_FAST_MODE GENMASK(31, 16) +#define I2C_CLK_DIVISOR_HSMODE GENMASK(15, 0) #define DVC_CTRL_REG1 0x000 #define DVC_CTRL_REG1_INTR_EN BIT(10) @@ -77,10 +77,11 @@ #define I2C_ERR_UNKNOWN_INTERRUPT BIT(2) #define I2C_ERR_RX_BUFFER_OVERFLOW BIT(3) -#define PACKET_HEADER0_HEADER_SIZE_SHIFT 28 -#define PACKET_HEADER0_PACKET_ID_SHIFT 16 -#define PACKET_HEADER0_CONT_ID_SHIFT 12 -#define PACKET_HEADER0_PROTOCOL_I2C BIT(4) +#define PACKET_HEADER0_HEADER_SIZE GENMASK(29, 28) +#define PACKET_HEADER0_PACKET_ID GENMASK(23, 16) +#define PACKET_HEADER0_CONT_ID GENMASK(15, 12) +#define PACKET_HEADER0_PROTOCOL GENMASK(7, 4) +#define PACKET_HEADER0_PROTOCOL_I2C 1 #define I2C_HEADER_CONT_ON_NAK BIT(21) #define I2C_HEADER_READ BIT(19) @@ -91,21 +92,23 @@ #define I2C_HEADER_SLAVE_ADDR_SHIFT 1 #define I2C_BUS_CLEAR_CNFG 0x084 -#define I2C_BC_SCLK_THRESHOLD 9 -#define I2C_BC_SCLK_THRESHOLD_SHIFT 16 +#define I2C_BC_SCLK_THRESHOLD GENMASK(23, 16) #define I2C_BC_STOP_COND BIT(2) #define I2C_BC_TERMINATE BIT(1) #define I2C_BC_ENABLE BIT(0) #define I2C_BUS_CLEAR_STATUS 0x088 #define I2C_BC_STATUS BIT(0) -#define I2C_CONFIG_LOAD 0x08C +#define I2C_CONFIG_LOAD 0x08c #define I2C_MSTR_CONFIG_LOAD BIT(0) #define I2C_CLKEN_OVERRIDE 0x090 #define I2C_MST_CORE_CLKEN_OVR BIT(0) -#define I2C_CONFIG_LOAD_TIMEOUT 1000000 +#define I2C_INTERFACE_TIMING_0 0x094 +#define I2C_INTERFACE_TIMING_THIGH GENMASK(13, 8) +#define I2C_INTERFACE_TIMING_TLOW GENMASK(5, 0) +#define I2C_INTERFACE_TIMING_1 0x098 #define I2C_MST_FIFO_CONTROL 0x0b4 #define I2C_MST_FIFO_CONTROL_RX_FLUSH BIT(0) @@ -114,14 +117,11 @@ #define I2C_MST_FIFO_CONTROL_TX_TRIG(x) (((x) - 1) << 16) #define I2C_MST_FIFO_STATUS 0x0b8 -#define I2C_MST_FIFO_STATUS_RX_MASK 0xff -#define I2C_MST_FIFO_STATUS_RX_SHIFT 0 -#define I2C_MST_FIFO_STATUS_TX_MASK 0xff0000 -#define I2C_MST_FIFO_STATUS_TX_SHIFT 16 +#define I2C_MST_FIFO_STATUS_TX GENMASK(23, 16) +#define I2C_MST_FIFO_STATUS_RX GENMASK(7, 0) -#define I2C_INTERFACE_TIMING_0 0x94 -#define I2C_THIGH_SHIFT 8 -#define I2C_INTERFACE_TIMING_1 0x98 +/* configuration load timeout in microseconds */ +#define I2C_CONFIG_LOAD_TIMEOUT 1000000 /* Packet header size in bytes */ #define I2C_PACKET_HEADER_SIZE 12 @@ -495,12 +495,10 @@ static int tegra_i2c_empty_rx_fifo(struct tegra_i2c_dev *i2c_dev) if (i2c_dev->hw->has_mst_fifo) { val = i2c_readl(i2c_dev, I2C_MST_FIFO_STATUS); - rx_fifo_avail = (val & I2C_MST_FIFO_STATUS_RX_MASK) >> - I2C_MST_FIFO_STATUS_RX_SHIFT; + rx_fifo_avail = FIELD_GET(I2C_MST_FIFO_STATUS_RX, val); } else { val = i2c_readl(i2c_dev, I2C_FIFO_STATUS); - rx_fifo_avail = (val & I2C_FIFO_STATUS_RX_MASK) >> - I2C_FIFO_STATUS_RX_SHIFT; + rx_fifo_avail = FIELD_GET(I2C_FIFO_STATUS_RX, val); } /* Rounds down to not include partial word at the end of buf */ @@ -551,12 +549,10 @@ static int tegra_i2c_fill_tx_fifo(struct tegra_i2c_dev *i2c_dev) if (i2c_dev->hw->has_mst_fifo) { val = i2c_readl(i2c_dev, I2C_MST_FIFO_STATUS); - tx_fifo_avail = (val & I2C_MST_FIFO_STATUS_TX_MASK) >> - I2C_MST_FIFO_STATUS_TX_SHIFT; + tx_fifo_avail = FIELD_GET(I2C_MST_FIFO_STATUS_TX, val); } else { val = i2c_readl(i2c_dev, I2C_FIFO_STATUS); - tx_fifo_avail = (val & I2C_FIFO_STATUS_TX_MASK) >> - I2C_FIFO_STATUS_TX_SHIFT; + tx_fifo_avail = FIELD_GET(I2C_FIFO_STATUS_TX, val); } /* Rounds down to not include partial word at the end of buf */ @@ -719,7 +715,7 @@ static int tegra_i2c_init(struct tegra_i2c_dev *i2c_dev, bool clk_reinit) tegra_dvc_init(i2c_dev); val = I2C_CNFG_NEW_MASTER_FSM | I2C_CNFG_PACKET_MODE_EN | - (0x2 << I2C_CNFG_DEBOUNCE_CNT_SHIFT); + FIELD_PREP(I2C_CNFG_DEBOUNCE_CNT, 2); if (i2c_dev->hw->has_multi_master_mode) val |= I2C_CNFG_MULTI_MASTER_MODE; @@ -728,9 +724,10 @@ static int tegra_i2c_init(struct tegra_i2c_dev *i2c_dev, bool clk_reinit) i2c_writel(i2c_dev, 0, I2C_INT_MASK); /* Make sure clock divisor programmed correctly */ - clk_divisor = i2c_dev->hw->clk_divisor_hs_mode; - clk_divisor |= i2c_dev->clk_divisor_non_hs_mode << - I2C_CLK_DIVISOR_STD_FAST_MODE_SHIFT; + clk_divisor = FIELD_PREP(I2C_CLK_DIVISOR_HSMODE, + i2c_dev->hw->clk_divisor_hs_mode) | + FIELD_PREP(I2C_CLK_DIVISOR_STD_FAST_MODE, + i2c_dev->clk_divisor_non_hs_mode); i2c_writel(i2c_dev, clk_divisor, I2C_CLK_DIVISOR); if (i2c_dev->bus_clk_rate > I2C_MAX_STANDARD_MODE_FREQ && @@ -745,7 +742,8 @@ static int tegra_i2c_init(struct tegra_i2c_dev *i2c_dev, bool clk_reinit) } if (i2c_dev->hw->has_interface_timing_reg) { - val = (thigh << I2C_THIGH_SHIFT) | tlow; + val = FIELD_PREP(I2C_INTERFACE_TIMING_THIGH, thigh) | + FIELD_PREP(I2C_INTERFACE_TIMING_TLOW, tlow); i2c_writel(i2c_dev, val, I2C_INTERFACE_TIMING_0); } @@ -1054,8 +1052,8 @@ static int tegra_i2c_issue_bus_clear(struct i2c_adapter *adap) u32 reg; reinit_completion(&i2c_dev->msg_complete); - reg = (I2C_BC_SCLK_THRESHOLD << I2C_BC_SCLK_THRESHOLD_SHIFT) | - I2C_BC_STOP_COND | I2C_BC_TERMINATE; + reg = FIELD_PREP(I2C_BC_SCLK_THRESHOLD, 9) | I2C_BC_STOP_COND | + I2C_BC_TERMINATE; i2c_writel(i2c_dev, reg, I2C_BUS_CLEAR_CNFG); if (i2c_dev->hw->has_config_load_reg) { err = tegra_i2c_wait_for_config_load(i2c_dev); @@ -1148,10 +1146,11 @@ static int tegra_i2c_xfer_msg(struct tegra_i2c_dev *i2c_dev, } } - packet_header = (0 << PACKET_HEADER0_HEADER_SIZE_SHIFT) | - PACKET_HEADER0_PROTOCOL_I2C | - (i2c_dev->cont_id << PACKET_HEADER0_CONT_ID_SHIFT) | - (1 << PACKET_HEADER0_PACKET_ID_SHIFT); + packet_header = FIELD_PREP(PACKET_HEADER0_HEADER_SIZE, 0) | + FIELD_PREP(PACKET_HEADER0_PROTOCOL, + PACKET_HEADER0_PROTOCOL_I2C) | + FIELD_PREP(PACKET_HEADER0_CONT_ID, i2c_dev->cont_id) | + FIELD_PREP(PACKET_HEADER0_PACKET_ID, 1); if (dma && !i2c_dev->msg_read) *buffer++ = packet_header; else -- GitLab From c73178b93754edd8449dccd3faf05baafd4d3f0e Mon Sep 17 00:00:00 2001 From: Thierry Reding Date: Fri, 7 Jun 2019 15:56:23 +0200 Subject: [PATCH 0446/1971] i2c: tegra: Add support for the VI I2C on Tegra210 Tegra210 has an extra instance of the I2C controller that is in the domain of host1x and usually used for camera use-cases. The programming model for the VI variant of the controller is roughly the same as for the other variants, except that the I2C registers start at an offset and are spaced further apart. VI I2C also doesn't support slave mode. Signed-off-by: Thierry Reding --- drivers/i2c/busses/i2c-tegra.c | 97 +++++++++++++++++++++++++++++++++- 1 file changed, 95 insertions(+), 2 deletions(-) diff --git a/drivers/i2c/busses/i2c-tegra.c b/drivers/i2c/busses/i2c-tegra.c index f445b05accfa7..15772964a05f1 100644 --- a/drivers/i2c/busses/i2c-tegra.c +++ b/drivers/i2c/busses/i2c-tegra.c @@ -40,6 +40,7 @@ #define I2C_SL_CNFG_NEWSL BIT(2) #define I2C_SL_ADDR1 0x02c #define I2C_SL_ADDR2 0x030 +#define I2C_TLOW_SEXT 0x034 #define I2C_TX_FIFO 0x050 #define I2C_RX_FIFO 0x054 #define I2C_PACKET_TRANSFER_STATUS 0x058 @@ -109,6 +110,18 @@ #define I2C_INTERFACE_TIMING_THIGH GENMASK(13, 8) #define I2C_INTERFACE_TIMING_TLOW GENMASK(5, 0) #define I2C_INTERFACE_TIMING_1 0x098 +#define I2C_INTERFACE_TIMING_TBUF GENMASK(29, 24) +#define I2C_INTERFACE_TIMING_TSU_STO GENMASK(21, 16) +#define I2C_INTERFACE_TIMING_THD_STA GENMASK(13, 8) +#define I2C_INTERFACE_TIMING_TSU_STA GENMASK(5, 0) + +#define I2C_HS_INTERFACE_TIMING_0 0x09c +#define I2C_HS_INTERFACE_TIMING_THIGH GENMASK(13, 8) +#define I2C_HS_INTERFACE_TIMING_TLOW GENMASK(5, 0) +#define I2C_HS_INTERFACE_TIMING_1 0x0a0 +#define I2C_HS_INTERFACE_TIMING_TSU_STO GENMASK(21, 16) +#define I2C_HS_INTERFACE_TIMING_THD_STA GENMASK(13, 8) +#define I2C_HS_INTERFACE_TIMING_TSU_STA GENMASK(5, 0) #define I2C_MST_FIFO_CONTROL 0x0b4 #define I2C_MST_FIFO_CONTROL_RX_FLUSH BIT(0) @@ -230,6 +243,7 @@ struct tegra_i2c_hw_feature { * @cont_id: I2C controller ID, used for packet header * @irq: IRQ number of transfer complete interrupt * @is_dvc: identifies the DVC I2C controller, has a different register layout + * @is_vi: identifies the VI I2C controller, has a different register layout * @msg_complete: transfer completion notifier * @msg_err: error code for completed message * @msg_buf: pointer to current message data @@ -253,12 +267,14 @@ struct tegra_i2c_dev { struct i2c_adapter adapter; struct clk *div_clk; struct clk *fast_clk; + struct clk *slow_clk; struct reset_control *rst; void __iomem *base; phys_addr_t base_phys; int cont_id; int irq; int is_dvc; + bool is_vi; struct completion msg_complete; int msg_err; u8 *msg_buf; @@ -297,6 +313,8 @@ static unsigned long tegra_i2c_reg_addr(struct tegra_i2c_dev *i2c_dev, { if (i2c_dev->is_dvc) reg += (reg >= I2C_TX_FIFO) ? 0x10 : 0x40; + else if (i2c_dev->is_vi) + reg = 0xc00 + (reg << 2); return reg; } @@ -646,6 +664,14 @@ static int __maybe_unused tegra_i2c_runtime_resume(struct device *dev) } } + if (i2c_dev->slow_clk) { + ret = clk_enable(i2c_dev->slow_clk); + if (ret < 0) { + dev_err(dev, "failed to enable slow clock: %d\n", ret); + return ret; + } + } + ret = clk_enable(i2c_dev->div_clk); if (ret < 0) { dev_err(i2c_dev->dev, @@ -662,6 +688,10 @@ static int __maybe_unused tegra_i2c_runtime_suspend(struct device *dev) struct tegra_i2c_dev *i2c_dev = dev_get_drvdata(dev); clk_disable(i2c_dev->div_clk); + + if (i2c_dev->slow_clk) + clk_disable(i2c_dev->slow_clk); + if (!i2c_dev->hw->has_single_clk_source) clk_disable(i2c_dev->fast_clk); @@ -699,6 +729,35 @@ static int tegra_i2c_wait_for_config_load(struct tegra_i2c_dev *i2c_dev) return 0; } +static void tegra_i2c_vi_init(struct tegra_i2c_dev *i2c_dev) +{ + u32 value; + + value = FIELD_PREP(I2C_INTERFACE_TIMING_THIGH, 2) | + FIELD_PREP(I2C_INTERFACE_TIMING_TLOW, 4); + i2c_writel(i2c_dev, value, I2C_INTERFACE_TIMING_0); + + value = FIELD_PREP(I2C_INTERFACE_TIMING_TBUF, 4) | + FIELD_PREP(I2C_INTERFACE_TIMING_TSU_STO, 7) | + FIELD_PREP(I2C_INTERFACE_TIMING_THD_STA, 4) | + FIELD_PREP(I2C_INTERFACE_TIMING_TSU_STA, 4); + i2c_writel(i2c_dev, value, I2C_INTERFACE_TIMING_1); + + value = FIELD_PREP(I2C_HS_INTERFACE_TIMING_THIGH, 3) | + FIELD_PREP(I2C_HS_INTERFACE_TIMING_TLOW, 8); + i2c_writel(i2c_dev, value, I2C_HS_INTERFACE_TIMING_0); + + value = FIELD_PREP(I2C_HS_INTERFACE_TIMING_TSU_STO, 11) | + FIELD_PREP(I2C_HS_INTERFACE_TIMING_THD_STA, 11) | + FIELD_PREP(I2C_HS_INTERFACE_TIMING_TSU_STA, 11); + i2c_writel(i2c_dev, value, I2C_HS_INTERFACE_TIMING_1); + + value = FIELD_PREP(I2C_BC_SCLK_THRESHOLD, 9) | I2C_BC_STOP_COND; + i2c_writel(i2c_dev, value, I2C_BUS_CLEAR_CNFG); + + i2c_writel(i2c_dev, 0x0, I2C_TLOW_SEXT); +} + static int tegra_i2c_init(struct tegra_i2c_dev *i2c_dev, bool clk_reinit) { u32 val; @@ -723,6 +782,9 @@ static int tegra_i2c_init(struct tegra_i2c_dev *i2c_dev, bool clk_reinit) i2c_writel(i2c_dev, val, I2C_CNFG); i2c_writel(i2c_dev, 0, I2C_INT_MASK); + if (i2c_dev->is_vi) + tegra_i2c_vi_init(i2c_dev); + /* Make sure clock divisor programmed correctly */ clk_divisor = FIELD_PREP(I2C_CLK_DIVISOR_HSMODE, i2c_dev->hw->clk_divisor_hs_mode) | @@ -766,7 +828,7 @@ static int tegra_i2c_init(struct tegra_i2c_dev *i2c_dev, bool clk_reinit) } } - if (!i2c_dev->is_dvc) { + if (!i2c_dev->is_dvc && !i2c_dev->is_vi) { u32 sl_cfg = i2c_readl(i2c_dev, I2C_SL_CNFG); sl_cfg |= I2C_SL_CNFG_NACK | I2C_SL_CNFG_NEWSL; @@ -1555,6 +1617,7 @@ static const struct tegra_i2c_hw_feature tegra194_i2c_hw = { static const struct of_device_id tegra_i2c_of_match[] = { { .compatible = "nvidia,tegra194-i2c", .data = &tegra194_i2c_hw, }, { .compatible = "nvidia,tegra186-i2c", .data = &tegra186_i2c_hw, }, + { .compatible = "nvidia,tegra210-i2c-vi", .data = &tegra210_i2c_hw, }, { .compatible = "nvidia,tegra210-i2c", .data = &tegra210_i2c_hw, }, { .compatible = "nvidia,tegra124-i2c", .data = &tegra124_i2c_hw, }, { .compatible = "nvidia,tegra114-i2c", .data = &tegra114_i2c_hw, }, @@ -1567,6 +1630,7 @@ MODULE_DEVICE_TABLE(of, tegra_i2c_of_match); static int tegra_i2c_probe(struct platform_device *pdev) { + struct device *dev = &pdev->dev; struct tegra_i2c_dev *i2c_dev; struct resource *res; struct clk *div_clk; @@ -1622,6 +1686,8 @@ static int tegra_i2c_probe(struct platform_device *pdev) i2c_dev->hw = of_device_get_match_data(&pdev->dev); i2c_dev->is_dvc = of_device_is_compatible(pdev->dev.of_node, "nvidia,tegra20-i2c-dvc"); + i2c_dev->is_vi = of_device_is_compatible(dev->of_node, + "nvidia,tegra210-i2c-vi"); i2c_dev->adapter.quirks = i2c_dev->hw->quirks; i2c_dev->dma_buf_size = i2c_dev->adapter.quirks->max_write_len + I2C_PACKET_HEADER_SIZE; @@ -1637,6 +1703,17 @@ static int tegra_i2c_probe(struct platform_device *pdev) i2c_dev->fast_clk = fast_clk; } + if (i2c_dev->is_vi) { + i2c_dev->slow_clk = devm_clk_get(dev, "slow"); + if (IS_ERR(i2c_dev->slow_clk)) { + if (PTR_ERR(i2c_dev->slow_clk) != -EPROBE_DEFER) + dev_err(dev, "failed to get slow clock: %ld\n", + PTR_ERR(i2c_dev->slow_clk)); + + return PTR_ERR(i2c_dev->slow_clk); + } + } + platform_set_drvdata(pdev, i2c_dev); if (!i2c_dev->hw->has_single_clk_source) { @@ -1647,6 +1724,14 @@ static int tegra_i2c_probe(struct platform_device *pdev) } } + if (i2c_dev->slow_clk) { + ret = clk_prepare(i2c_dev->slow_clk); + if (ret < 0) { + dev_err(dev, "failed to prepare slow clock: %d\n", ret); + goto unprepare_fast_clk; + } + } + if (i2c_dev->bus_clk_rate > I2C_MAX_FAST_MODE_FREQ && i2c_dev->bus_clk_rate <= I2C_MAX_FAST_MODE_PLUS_FREQ) i2c_dev->clk_divisor_non_hs_mode = @@ -1662,7 +1747,7 @@ static int tegra_i2c_probe(struct platform_device *pdev) ret = clk_prepare(i2c_dev->div_clk); if (ret < 0) { dev_err(i2c_dev->dev, "Clock prepare failed %d\n", ret); - goto unprepare_fast_clk; + goto unprepare_slow_clk; } pm_runtime_irq_safe(&pdev->dev); @@ -1749,6 +1834,10 @@ disable_rpm: unprepare_div_clk: clk_unprepare(i2c_dev->div_clk); +unprepare_slow_clk: + if (i2c_dev->is_vi) + clk_unprepare(i2c_dev->slow_clk); + unprepare_fast_clk: if (!i2c_dev->hw->has_single_clk_source) clk_unprepare(i2c_dev->fast_clk); @@ -1770,6 +1859,10 @@ static int tegra_i2c_remove(struct platform_device *pdev) tegra_i2c_runtime_suspend(&pdev->dev); clk_unprepare(i2c_dev->div_clk); + + if (i2c_dev->slow_clk) + clk_unprepare(i2c_dev->slow_clk); + if (!i2c_dev->hw->has_single_clk_source) clk_unprepare(i2c_dev->fast_clk); -- GitLab From 3dcbd36fa34ce9124ec51accd835130251f74213 Mon Sep 17 00:00:00 2001 From: Thierry Reding Date: Fri, 28 Feb 2020 12:43:51 +0100 Subject: [PATCH 0447/1971] clk: tegra: Rename Tegra124 EMC clock source file This code is only used on Tegra124, so rename it accordingly to make it more consistent with other file names. While at it, also get rid of the TEGRA_CLK_EMC Kconfig symbol that's really just an alias for TEGRA124_EMC. Signed-off-by: Thierry Reding --- drivers/clk/tegra/Kconfig | 4 ---- drivers/clk/tegra/Makefile | 2 +- drivers/clk/tegra/{clk-emc.c => clk-tegra124-emc.c} | 0 drivers/clk/tegra/clk.h | 2 +- 4 files changed, 2 insertions(+), 6 deletions(-) rename drivers/clk/tegra/{clk-emc.c => clk-tegra124-emc.c} (100%) diff --git a/drivers/clk/tegra/Kconfig b/drivers/clk/tegra/Kconfig index 4d99a87704857..deaa4605824c9 100644 --- a/drivers/clk/tegra/Kconfig +++ b/drivers/clk/tegra/Kconfig @@ -1,8 +1,4 @@ # SPDX-License-Identifier: GPL-2.0-only -config TEGRA_CLK_EMC - def_bool y - depends on TEGRA124_EMC - config CLK_TEGRA_BPMP def_bool y depends on TEGRA_BPMP diff --git a/drivers/clk/tegra/Makefile b/drivers/clk/tegra/Makefile index 1f7c30f87ece4..7f5f5ec33739e 100644 --- a/drivers/clk/tegra/Makefile +++ b/drivers/clk/tegra/Makefile @@ -14,7 +14,6 @@ obj-y += clk-tegra-audio.o obj-y += clk-tegra-periph.o obj-y += clk-tegra-fixed.o obj-y += clk-tegra-super-gen4.o -obj-$(CONFIG_TEGRA_CLK_EMC) += clk-emc.o obj-$(CONFIG_ARCH_TEGRA_2x_SOC) += clk-tegra20.o obj-$(CONFIG_ARCH_TEGRA_2x_SOC) += clk-tegra20-emc.o obj-$(CONFIG_ARCH_TEGRA_3x_SOC) += clk-tegra30.o @@ -22,6 +21,7 @@ obj-$(CONFIG_ARCH_TEGRA_3x_SOC) += clk-tegra20-emc.o obj-$(CONFIG_ARCH_TEGRA_114_SOC) += clk-tegra114.o obj-$(CONFIG_ARCH_TEGRA_124_SOC) += clk-tegra124.o obj-$(CONFIG_TEGRA_CLK_DFLL) += clk-tegra124-dfll-fcpu.o +obj-$(CONFIG_TEGRA124_EMC) += clk-tegra124-emc.o obj-$(CONFIG_ARCH_TEGRA_132_SOC) += clk-tegra124.o obj-y += cvb.o obj-$(CONFIG_ARCH_TEGRA_210_SOC) += clk-tegra210.o diff --git a/drivers/clk/tegra/clk-emc.c b/drivers/clk/tegra/clk-tegra124-emc.c similarity index 100% rename from drivers/clk/tegra/clk-emc.c rename to drivers/clk/tegra/clk-tegra124-emc.c diff --git a/drivers/clk/tegra/clk.h b/drivers/clk/tegra/clk.h index 2c9a68302e02c..1d9f1bc183349 100644 --- a/drivers/clk/tegra/clk.h +++ b/drivers/clk/tegra/clk.h @@ -866,7 +866,7 @@ void tegra_super_clk_gen5_init(void __iomem *clk_base, void __iomem *pmc_base, struct tegra_clk *tegra_clks, struct tegra_clk_pll_params *pll_params); -#ifdef CONFIG_TEGRA_CLK_EMC +#ifdef CONFIG_TEGRA124_EMC struct clk *tegra_clk_register_emc(void __iomem *base, struct device_node *np, spinlock_t *lock); #else -- GitLab From cd4d6f357545bc03112265b19e5ed50592812986 Mon Sep 17 00:00:00 2001 From: Joseph Lo Date: Wed, 29 May 2019 16:21:33 +0800 Subject: [PATCH 0448/1971] clk: tegra: Add PLLP_UD and PLLMB_UD for Tegra210 Introduce the low jitter path of PLLP and PLLMB which can be used as EMC clock source. Signed-off-by: Joseph Lo Signed-off-by: Thierry Reding --- drivers/clk/tegra/clk-tegra210.c | 11 +++++++++++ include/dt-bindings/clock/tegra210-car.h | 4 ++-- 2 files changed, 13 insertions(+), 2 deletions(-) diff --git a/drivers/clk/tegra/clk-tegra210.c b/drivers/clk/tegra/clk-tegra210.c index defe3b7ebfa49..57d97e87d870e 100644 --- a/drivers/clk/tegra/clk-tegra210.c +++ b/drivers/clk/tegra/clk-tegra210.c @@ -3153,6 +3153,17 @@ static void __init tegra210_pll_init(void __iomem *clk_base, clk_register_clkdev(clk, "pll_m_ud", NULL); clks[TEGRA210_CLK_PLL_M_UD] = clk; + /* PLLMB_UD */ + clk = clk_register_fixed_factor(NULL, "pll_mb_ud", "pll_mb", + CLK_SET_RATE_PARENT, 1, 1); + clk_register_clkdev(clk, "pll_mb_ud", NULL); + clks[TEGRA210_CLK_PLL_MB_UD] = clk; + + /* PLLP_UD */ + clk = clk_register_fixed_factor(NULL, "pll_p_ud", "pll_p", + 0, 1, 1); + clks[TEGRA210_CLK_PLL_P_UD] = clk; + /* PLLU_VCO */ if (!tegra210_init_pllu()) { clk = clk_register_fixed_rate(NULL, "pll_u_vco", "pll_ref", 0, diff --git a/include/dt-bindings/clock/tegra210-car.h b/include/dt-bindings/clock/tegra210-car.h index 99c598694923e..54441fcd0b94a 100644 --- a/include/dt-bindings/clock/tegra210-car.h +++ b/include/dt-bindings/clock/tegra210-car.h @@ -351,8 +351,8 @@ #define TEGRA210_CLK_PLL_P_OUT_XUSB 317 #define TEGRA210_CLK_XUSB_SSP_SRC 318 #define TEGRA210_CLK_PLL_RE_OUT1 319 -/* 320 */ -/* 321 */ +#define TEGRA210_CLK_PLL_MB_UD 320 +#define TEGRA210_CLK_PLL_P_UD 321 #define TEGRA210_CLK_ISP 322 #define TEGRA210_CLK_PLL_A_OUT_ADSP 323 #define TEGRA210_CLK_PLL_A_OUT0_OUT_ADSP 324 -- GitLab From a3cba697a2a09e6769996d5265991a3228004d92 Mon Sep 17 00:00:00 2001 From: Joseph Lo Date: Wed, 29 May 2019 16:21:34 +0800 Subject: [PATCH 0449/1971] clk: tegra: Export functions for EMC clock scaling Export functions to allow accessing the CAR register required by EMC clock scaling. These functions will be used to access the CAR register as part of the scaling sequence. Signed-off-by: Joseph Lo Signed-off-by: Thierry Reding --- drivers/clk/tegra/clk-tegra210.c | 26 ++++++++++++++++++++++++++ include/linux/clk/tegra.h | 3 +++ 2 files changed, 29 insertions(+) diff --git a/drivers/clk/tegra/clk-tegra210.c b/drivers/clk/tegra/clk-tegra210.c index 57d97e87d870e..798920ec50e96 100644 --- a/drivers/clk/tegra/clk-tegra210.c +++ b/drivers/clk/tegra/clk-tegra210.c @@ -37,6 +37,7 @@ #define CLK_SOURCE_LA 0x1f8 #define CLK_SOURCE_SDMMC2 0x154 #define CLK_SOURCE_SDMMC4 0x164 +#define CLK_SOURCE_EMC_DLL 0x664 #define PLLC_BASE 0x80 #define PLLC_OUT 0x84 @@ -227,6 +228,10 @@ #define RST_DFLL_DVCO 0x2f4 #define DVFS_DFLL_RESET_SHIFT 0 +#define CLK_RST_CONTROLLER_CLK_OUT_ENB_X_SET 0x284 +#define CLK_RST_CONTROLLER_CLK_OUT_ENB_X_CLR 0x288 +#define CLK_OUT_ENB_X_CLK_ENB_EMC_DLL BIT(14) + #define CLK_RST_CONTROLLER_RST_DEV_Y_SET 0x2a8 #define CLK_RST_CONTROLLER_RST_DEV_Y_CLR 0x2ac #define CPU_SOFTRST_CTRL 0x380 @@ -555,6 +560,27 @@ void tegra210_set_sata_pll_seq_sw(bool state) } EXPORT_SYMBOL_GPL(tegra210_set_sata_pll_seq_sw); +void tegra210_clk_emc_dll_enable(bool flag) +{ + u32 offset = flag ? CLK_RST_CONTROLLER_CLK_OUT_ENB_X_SET : + CLK_RST_CONTROLLER_CLK_OUT_ENB_X_CLR; + + writel_relaxed(CLK_OUT_ENB_X_CLK_ENB_EMC_DLL, clk_base + offset); +} +EXPORT_SYMBOL_GPL(tegra210_clk_emc_dll_enable); + +void tegra210_clk_emc_dll_update_setting(u32 emc_dll_src_value) +{ + writel_relaxed(emc_dll_src_value, clk_base + CLK_SOURCE_EMC_DLL); +} +EXPORT_SYMBOL_GPL(tegra210_clk_emc_dll_update_setting); + +void tegra210_clk_emc_update_setting(u32 emc_src_value) +{ + writel_relaxed(emc_src_value, clk_base + CLK_SOURCE_EMC); +} +EXPORT_SYMBOL_GPL(tegra210_clk_emc_update_setting); + static void tegra210_generic_mbist_war(struct tegra210_domain_mbist_war *mbist) { u32 val; diff --git a/include/linux/clk/tegra.h b/include/linux/clk/tegra.h index 2b1b352400749..5b0bdb4134604 100644 --- a/include/linux/clk/tegra.h +++ b/include/linux/clk/tegra.h @@ -131,6 +131,9 @@ extern void tegra210_set_sata_pll_seq_sw(bool state); extern void tegra210_put_utmipll_in_iddq(void); extern void tegra210_put_utmipll_out_iddq(void); extern int tegra210_clk_handle_mbist_war(unsigned int id); +extern void tegra210_clk_emc_dll_enable(bool flag); +extern void tegra210_clk_emc_dll_update_setting(u32 emc_dll_src_value); +extern void tegra210_clk_emc_update_setting(u32 emc_src_value); struct clk; -- GitLab From 0ac65fc946d3a15ff30cea28b38a00b9ba98217b Mon Sep 17 00:00:00 2001 From: Joseph Lo Date: Wed, 29 May 2019 16:21:35 +0800 Subject: [PATCH 0450/1971] clk: tegra: Implement Tegra210 EMC clock The EMC clock needs to carefully coordinate with the EMC controller programming to make sure external memory can be properly clocked. Do so by hooking up the EMC clock with an EMC provider that will specify which rates are supported by the EMC and provide a callback to use for setting the clock rate at the EMC. Based on work by Peter De Schrijver . Signed-off-by: Joseph Lo Signed-off-by: Thierry Reding --- drivers/clk/tegra/Makefile | 1 + drivers/clk/tegra/clk-tegra210-emc.c | 369 +++++++++++++++++++++++++++ drivers/clk/tegra/clk.h | 3 + include/linux/clk/tegra.h | 24 ++ 4 files changed, 397 insertions(+) create mode 100644 drivers/clk/tegra/clk-tegra210-emc.c diff --git a/drivers/clk/tegra/Makefile b/drivers/clk/tegra/Makefile index 7f5f5ec33739e..6e1200b3bb682 100644 --- a/drivers/clk/tegra/Makefile +++ b/drivers/clk/tegra/Makefile @@ -25,5 +25,6 @@ obj-$(CONFIG_TEGRA124_EMC) += clk-tegra124-emc.o obj-$(CONFIG_ARCH_TEGRA_132_SOC) += clk-tegra124.o obj-y += cvb.o obj-$(CONFIG_ARCH_TEGRA_210_SOC) += clk-tegra210.o +obj-$(CONFIG_ARCH_TEGRA_210_SOC) += clk-tegra210-emc.o obj-$(CONFIG_CLK_TEGRA_BPMP) += clk-bpmp.o obj-y += clk-utils.o diff --git a/drivers/clk/tegra/clk-tegra210-emc.c b/drivers/clk/tegra/clk-tegra210-emc.c new file mode 100644 index 0000000000000..352a2c3fc3740 --- /dev/null +++ b/drivers/clk/tegra/clk-tegra210-emc.c @@ -0,0 +1,369 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2015-2020, NVIDIA CORPORATION. All rights reserved. + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#define CLK_SOURCE_EMC 0x19c +#define CLK_SOURCE_EMC_2X_CLK_SRC GENMASK(31, 29) +#define CLK_SOURCE_EMC_MC_EMC_SAME_FREQ BIT(16) +#define CLK_SOURCE_EMC_2X_CLK_DIVISOR GENMASK(7, 0) + +#define CLK_SRC_PLLM 0 +#define CLK_SRC_PLLC 1 +#define CLK_SRC_PLLP 2 +#define CLK_SRC_CLK_M 3 +#define CLK_SRC_PLLM_UD 4 +#define CLK_SRC_PLLMB_UD 5 +#define CLK_SRC_PLLMB 6 +#define CLK_SRC_PLLP_UD 7 + +struct tegra210_clk_emc { + struct clk_hw hw; + void __iomem *regs; + + struct tegra210_clk_emc_provider *provider; + + struct clk *parents[8]; +}; + +static inline struct tegra210_clk_emc * +to_tegra210_clk_emc(struct clk_hw *hw) +{ + return container_of(hw, struct tegra210_clk_emc, hw); +} + +static const char *tegra210_clk_emc_parents[] = { + "pll_m", "pll_c", "pll_p", "clk_m", "pll_m_ud", "pll_mb_ud", + "pll_mb", "pll_p_ud", +}; + +static u8 tegra210_clk_emc_get_parent(struct clk_hw *hw) +{ + struct tegra210_clk_emc *emc = to_tegra210_clk_emc(hw); + u32 value; + u8 src; + + value = readl_relaxed(emc->regs + CLK_SOURCE_EMC); + src = FIELD_GET(CLK_SOURCE_EMC_2X_CLK_SRC, value); + + return src; +} + +static unsigned long tegra210_clk_emc_recalc_rate(struct clk_hw *hw, + unsigned long parent_rate) +{ + struct tegra210_clk_emc *emc = to_tegra210_clk_emc(hw); + u32 value, div; + + /* + * CCF assumes that neither the parent nor its rate will change during + * ->set_rate(), so the parent rate passed in here was cached from the + * parent before the ->set_rate() call. + * + * This can lead to wrong results being reported for the EMC clock if + * the parent and/or parent rate have changed as part of the EMC rate + * change sequence. Fix this by overriding the parent clock with what + * we know to be the correct value after the rate change. + */ + parent_rate = clk_hw_get_rate(clk_hw_get_parent(hw)); + + value = readl_relaxed(emc->regs + CLK_SOURCE_EMC); + + div = FIELD_GET(CLK_SOURCE_EMC_2X_CLK_DIVISOR, value); + div += 2; + + return DIV_ROUND_UP(parent_rate * 2, div); +} + +static long tegra210_clk_emc_round_rate(struct clk_hw *hw, unsigned long rate, + unsigned long *prate) +{ + struct tegra210_clk_emc *emc = to_tegra210_clk_emc(hw); + struct tegra210_clk_emc_provider *provider = emc->provider; + unsigned int i; + + if (!provider || !provider->configs || provider->num_configs == 0) + return clk_hw_get_rate(hw); + + for (i = 0; i < provider->num_configs; i++) { + if (provider->configs[i].rate >= rate) + return provider->configs[i].rate; + } + + return provider->configs[i - 1].rate; +} + +static struct clk *tegra210_clk_emc_find_parent(struct tegra210_clk_emc *emc, + u8 index) +{ + struct clk_hw *parent = clk_hw_get_parent_by_index(&emc->hw, index); + const char *name = clk_hw_get_name(parent); + + /* XXX implement cache? */ + + return __clk_lookup(name); +} + +static int tegra210_clk_emc_set_rate(struct clk_hw *hw, unsigned long rate, + unsigned long parent_rate) +{ + struct tegra210_clk_emc *emc = to_tegra210_clk_emc(hw); + struct tegra210_clk_emc_provider *provider = emc->provider; + struct tegra210_clk_emc_config *config; + struct device *dev = provider->dev; + struct clk_hw *old, *new, *parent; + u8 old_idx, new_idx, index; + struct clk *clk; + unsigned int i; + int err; + + if (!provider || !provider->configs || provider->num_configs == 0) + return -EINVAL; + + for (i = 0; i < provider->num_configs; i++) { + if (provider->configs[i].rate >= rate) { + config = &provider->configs[i]; + break; + } + } + + if (i == provider->num_configs) + config = &provider->configs[i - 1]; + + old_idx = tegra210_clk_emc_get_parent(hw); + new_idx = FIELD_GET(CLK_SOURCE_EMC_2X_CLK_SRC, config->value); + + old = clk_hw_get_parent_by_index(hw, old_idx); + new = clk_hw_get_parent_by_index(hw, new_idx); + + /* if the rate has changed... */ + if (config->parent_rate != clk_hw_get_rate(old)) { + /* ... but the clock source remains the same ... */ + if (new_idx == old_idx) { + /* ... switch to the alternative clock source. */ + switch (new_idx) { + case CLK_SRC_PLLM: + new_idx = CLK_SRC_PLLMB; + break; + + case CLK_SRC_PLLM_UD: + new_idx = CLK_SRC_PLLMB_UD; + break; + + case CLK_SRC_PLLMB_UD: + new_idx = CLK_SRC_PLLM_UD; + break; + + case CLK_SRC_PLLMB: + new_idx = CLK_SRC_PLLM; + break; + } + + /* + * This should never happen because we can't deal with + * it. + */ + if (WARN_ON(new_idx == old_idx)) + return -EINVAL; + + new = clk_hw_get_parent_by_index(hw, new_idx); + } + + index = new_idx; + parent = new; + } else { + index = old_idx; + parent = old; + } + + clk = tegra210_clk_emc_find_parent(emc, index); + if (IS_ERR(clk)) { + err = PTR_ERR(clk); + dev_err(dev, "failed to get parent clock for index %u: %d\n", + index, err); + return err; + } + + /* set the new parent clock to the required rate */ + if (clk_get_rate(clk) != config->parent_rate) { + err = clk_set_rate(clk, config->parent_rate); + if (err < 0) { + dev_err(dev, "failed to set rate %lu Hz for %pC: %d\n", + config->parent_rate, clk, err); + return err; + } + } + + /* enable the new parent clock */ + if (parent != old) { + err = clk_prepare_enable(clk); + if (err < 0) { + dev_err(dev, "failed to enable parent clock %pC: %d\n", + clk, err); + return err; + } + } + + /* update the EMC source configuration to reflect the new parent */ + config->value &= ~CLK_SOURCE_EMC_2X_CLK_SRC; + config->value |= FIELD_PREP(CLK_SOURCE_EMC_2X_CLK_SRC, index); + + /* + * Finally, switch the EMC programming with both old and new parent + * clocks enabled. + */ + err = provider->set_rate(dev, config); + if (err < 0) { + dev_err(dev, "failed to set EMC rate to %lu Hz: %d\n", rate, + err); + + /* + * If we're unable to switch to the new EMC frequency, we no + * longer need the new parent to be enabled. + */ + if (parent != old) + clk_disable_unprepare(clk); + + return err; + } + + /* reparent to new parent clock and disable the old parent clock */ + if (parent != old) { + clk = tegra210_clk_emc_find_parent(emc, old_idx); + if (IS_ERR(clk)) { + err = PTR_ERR(clk); + dev_err(dev, + "failed to get parent clock for index %u: %d\n", + old_idx, err); + return err; + } + + clk_hw_reparent(hw, parent); + clk_disable_unprepare(clk); + } + + return err; +} + +static const struct clk_ops tegra210_clk_emc_ops = { + .get_parent = tegra210_clk_emc_get_parent, + .recalc_rate = tegra210_clk_emc_recalc_rate, + .round_rate = tegra210_clk_emc_round_rate, + .set_rate = tegra210_clk_emc_set_rate, +}; + +struct clk *tegra210_clk_register_emc(struct device_node *np, + void __iomem *regs) +{ + struct tegra210_clk_emc *emc; + struct clk_init_data init; + struct clk *clk; + + emc = kzalloc(sizeof(*emc), GFP_KERNEL); + if (!emc) + return ERR_PTR(-ENOMEM); + + emc->regs = regs; + + init.name = "emc"; + init.ops = &tegra210_clk_emc_ops; + init.flags = CLK_IS_CRITICAL | CLK_GET_RATE_NOCACHE; + init.parent_names = tegra210_clk_emc_parents; + init.num_parents = ARRAY_SIZE(tegra210_clk_emc_parents); + emc->hw.init = &init; + + clk = clk_register(NULL, &emc->hw); + if (IS_ERR(clk)) { + kfree(emc); + return clk; + } + + return clk; +} + +int tegra210_clk_emc_attach(struct clk *clk, + struct tegra210_clk_emc_provider *provider) +{ + struct clk_hw *hw = __clk_get_hw(clk); + struct tegra210_clk_emc *emc = to_tegra210_clk_emc(hw); + struct device *dev = provider->dev; + unsigned int i; + int err; + + if (!try_module_get(provider->owner)) + return -ENODEV; + + for (i = 0; i < provider->num_configs; i++) { + struct tegra210_clk_emc_config *config = &provider->configs[i]; + struct clk_hw *parent; + bool same_freq; + u8 div, src; + + div = FIELD_GET(CLK_SOURCE_EMC_2X_CLK_DIVISOR, config->value); + src = FIELD_GET(CLK_SOURCE_EMC_2X_CLK_SRC, config->value); + + /* do basic sanity checking on the EMC timings */ + if (div & 0x1) { + dev_err(dev, "invalid odd divider %u for rate %lu Hz\n", + div, config->rate); + err = -EINVAL; + goto put; + } + + same_freq = config->value & CLK_SOURCE_EMC_MC_EMC_SAME_FREQ; + + if (same_freq != config->same_freq) { + dev_err(dev, + "ambiguous EMC to MC ratio for rate %lu Hz\n", + config->rate); + err = -EINVAL; + goto put; + } + + parent = clk_hw_get_parent_by_index(hw, src); + config->parent = src; + + if (src == CLK_SRC_PLLM || src == CLK_SRC_PLLM_UD) { + config->parent_rate = config->rate * (1 + div / 2); + } else { + unsigned long rate = config->rate * (1 + div / 2); + + config->parent_rate = clk_hw_get_rate(parent); + + if (config->parent_rate != rate) { + dev_err(dev, + "rate %lu Hz does not match input\n", + config->rate); + err = -EINVAL; + goto put; + } + } + } + + emc->provider = provider; + + return 0; + +put: + module_put(provider->owner); + return err; +} +EXPORT_SYMBOL_GPL(tegra210_clk_emc_attach); + +void tegra210_clk_emc_detach(struct clk *clk) +{ + struct tegra210_clk_emc *emc = to_tegra210_clk_emc(__clk_get_hw(clk)); + + module_put(emc->provider->owner); + emc->provider = NULL; +} +EXPORT_SYMBOL_GPL(tegra210_clk_emc_detach); diff --git a/drivers/clk/tegra/clk.h b/drivers/clk/tegra/clk.h index 1d9f1bc183349..09cc700bab415 100644 --- a/drivers/clk/tegra/clk.h +++ b/drivers/clk/tegra/clk.h @@ -907,4 +907,7 @@ void tegra_clk_periph_resume(void); bool tegra20_clk_emc_driver_available(struct clk_hw *emc_hw); struct clk *tegra20_clk_register_emc(void __iomem *ioaddr, bool low_jitter); +struct clk *tegra210_clk_register_emc(struct device_node *np, + void __iomem *regs); + #endif /* TEGRA_CLK_H */ diff --git a/include/linux/clk/tegra.h b/include/linux/clk/tegra.h index 5b0bdb4134604..3f01d43f05980 100644 --- a/include/linux/clk/tegra.h +++ b/include/linux/clk/tegra.h @@ -146,4 +146,28 @@ void tegra20_clk_set_emc_round_callback(tegra20_clk_emc_round_cb *round_cb, void *cb_arg); int tegra20_clk_prepare_emc_mc_same_freq(struct clk *emc_clk, bool same); +struct tegra210_clk_emc_config { + unsigned long rate; + bool same_freq; + u32 value; + + unsigned long parent_rate; + u8 parent; +}; + +struct tegra210_clk_emc_provider { + struct module *owner; + struct device *dev; + + struct tegra210_clk_emc_config *configs; + unsigned int num_configs; + + int (*set_rate)(struct device *dev, + const struct tegra210_clk_emc_config *config); +}; + +int tegra210_clk_emc_attach(struct clk *clk, + struct tegra210_clk_emc_provider *provider); +void tegra210_clk_emc_detach(struct clk *clk); + #endif /* __LINUX_CLK_TEGRA_H_ */ -- GitLab From 1d3e3c4e129eeab578c61a9d9a3afea23aec14e6 Mon Sep 17 00:00:00 2001 From: Joseph Lo Date: Wed, 29 May 2019 16:21:38 +0800 Subject: [PATCH 0451/1971] clk: tegra: Remove the old emc_mux clock for Tegra210 Remove the old emc_mux clock and don't use the common EMC clock definition. This will be replaced by a new clock defined in the EMC driver. Signed-off-by: Joseph Lo Signed-off-by: Thierry Reding --- drivers/clk/tegra/clk-tegra210.c | 50 ++++++++++++++++++++------------ 1 file changed, 31 insertions(+), 19 deletions(-) diff --git a/drivers/clk/tegra/clk-tegra210.c b/drivers/clk/tegra/clk-tegra210.c index 798920ec50e96..f2bb188fbc6a1 100644 --- a/drivers/clk/tegra/clk-tegra210.c +++ b/drivers/clk/tegra/clk-tegra210.c @@ -319,12 +319,6 @@ static unsigned long tegra210_input_freq[] = { [8] = 12000000, }; -static const char *mux_pllmcp_clkm[] = { - "pll_m", "pll_c", "pll_p", "clk_m", "pll_m_ud", "pll_mb", "pll_mb", - "pll_p", -}; -#define mux_pllmcp_clkm_idx NULL - #define PLL_ENABLE (1 << 30) #define PLLCX_MISC1_IDDQ (1 << 27) @@ -2336,7 +2330,6 @@ static struct tegra_clk tegra210_clks[tegra_clk_max] __initdata = { [tegra_clk_i2c2] = { .dt_id = TEGRA210_CLK_I2C2, .present = true }, [tegra_clk_uartc_8] = { .dt_id = TEGRA210_CLK_UARTC, .present = true }, [tegra_clk_mipi_cal] = { .dt_id = TEGRA210_CLK_MIPI_CAL, .present = true }, - [tegra_clk_emc] = { .dt_id = TEGRA210_CLK_EMC, .present = true }, [tegra_clk_usb2] = { .dt_id = TEGRA210_CLK_USB2, .present = true }, [tegra_clk_bsev] = { .dt_id = TEGRA210_CLK_BSEV, .present = true }, [tegra_clk_uartd_8] = { .dt_id = TEGRA210_CLK_UARTD, .present = true }, @@ -2979,6 +2972,27 @@ static const char * const sor1_parents[] = { static u32 sor1_parents_idx[] = { 0, 2, 5, 6 }; +static const struct clk_div_table mc_div_table_tegra210[] = { + { .val = 0, .div = 2 }, + { .val = 1, .div = 4 }, + { .val = 2, .div = 1 }, + { .val = 3, .div = 2 }, + { .val = 0, .div = 0 }, +}; + +static void tegra210_clk_register_mc(const char *name, + const char *parent_name) +{ + struct clk *clk; + + clk = clk_register_divider_table(NULL, name, parent_name, + CLK_IS_CRITICAL, + clk_base + CLK_SOURCE_EMC, + 15, 2, CLK_DIVIDER_READ_ONLY, + mc_div_table_tegra210, &emc_lock); + clks[TEGRA210_CLK_MC] = clk; +} + static const char * const sor1_out_parents[] = { /* * Bit 0 of the mux selects sor1_pad_clkout, irrespective of bit 1, so @@ -3021,7 +3035,8 @@ static const char * const la_parents[] = { static struct tegra_clk_periph tegra210_la = TEGRA_CLK_PERIPH(29, 7, 9, 0, 8, 1, TEGRA_DIVIDER_ROUND_UP, 76, 0, NULL, NULL); -static __init void tegra210_periph_clk_init(void __iomem *clk_base, +static __init void tegra210_periph_clk_init(struct device_node *np, + void __iomem *clk_base, void __iomem *pmc_base) { struct clk *clk; @@ -3067,16 +3082,6 @@ static __init void tegra210_periph_clk_init(void __iomem *clk_base, CLK_SOURCE_LA, 0); clks[TEGRA210_CLK_LA] = clk; - /* emc mux */ - clk = clk_register_mux(NULL, "emc_mux", mux_pllmcp_clkm, - ARRAY_SIZE(mux_pllmcp_clkm), 0, - clk_base + CLK_SOURCE_EMC, - 29, 3, 0, &emc_lock); - - clk = tegra_clk_register_mc("mc", "emc_mux", clk_base + CLK_SOURCE_EMC, - &emc_lock); - clks[TEGRA210_CLK_MC] = clk; - /* cml0 */ clk = clk_register_gate(NULL, "cml0", "pll_e", 0, clk_base + PLLE_AUX, 0, 0, &pll_e_lock); @@ -3119,6 +3124,13 @@ static __init void tegra210_periph_clk_init(void __iomem *clk_base, } tegra_periph_clk_init(clk_base, pmc_base, tegra210_clks, &pll_p_params); + + /* emc */ + clk = tegra210_clk_register_emc(np, clk_base); + clks[TEGRA210_CLK_EMC] = clk; + + /* mc */ + tegra210_clk_register_mc("mc", "emc"); } static void __init tegra210_pll_init(void __iomem *clk_base, @@ -3717,7 +3729,7 @@ static void __init tegra210_clock_init(struct device_node *np) tegra_fixed_clk_init(tegra210_clks); tegra210_pll_init(clk_base, pmc_base); - tegra210_periph_clk_init(clk_base, pmc_base); + tegra210_periph_clk_init(np, clk_base, pmc_base); tegra_audio_clk_init(clk_base, pmc_base, tegra210_clks, tegra210_audio_plls, ARRAY_SIZE(tegra210_audio_plls), 24576000); -- GitLab From 1641567920fc363be971f9059f3e7afc58a0dda6 Mon Sep 17 00:00:00 2001 From: Dmitry Osipenko Date: Thu, 19 Mar 2020 22:02:18 +0300 Subject: [PATCH 0452/1971] clk: tegra: Add custom CCLK implementation CCLK stands for "CPU Clock", CPU core is running off CCLK. CCLK supports multiple parents, it has internal clock divider and a clock skipper. PLLX is the main CCLK parent that provides clock rates above 1GHz and it has special property such that the CCLK's internal divider is set into bypass mode when PLLX is selected as a parent for CCLK. This patch forks generic Super Clock into CCLK implementation which takes into account all CCLK specifics. The proper CCLK implementation is needed by the upcoming Tegra20 CPUFreq driver update that will allow to utilize the generic cpufreq-dt driver by moving intermediate clock selection into the clock driver. Note that technically this patch could be squashed into clk-super.c, but it is cleaner to have a separate source file. Also note that currently all CCLKLP bits are left in the clk-super.c and only CCLKG is supported by clk-tegra-super-cclk. It shouldn't be difficult to move the CCLKLP bits, but CCLKLP is not used by anything in kernel and thus better not to touch it for now. Acked-by: Peter De Schrijver Tested-by: Peter Geis Tested-by: Marcel Ziswiler Tested-by: Jasper Korten Tested-by: David Heidelberg Tested-by: Nicolas Chauvet Signed-off-by: Dmitry Osipenko Signed-off-by: Thierry Reding --- drivers/clk/tegra/Makefile | 1 + drivers/clk/tegra/clk-tegra-super-cclk.c | 178 +++++++++++++++++++++++ drivers/clk/tegra/clk.h | 11 +- 3 files changed, 188 insertions(+), 2 deletions(-) create mode 100644 drivers/clk/tegra/clk-tegra-super-cclk.c diff --git a/drivers/clk/tegra/Makefile b/drivers/clk/tegra/Makefile index 6e1200b3bb682..eec2313fd37e0 100644 --- a/drivers/clk/tegra/Makefile +++ b/drivers/clk/tegra/Makefile @@ -13,6 +13,7 @@ obj-y += clk-super.o obj-y += clk-tegra-audio.o obj-y += clk-tegra-periph.o obj-y += clk-tegra-fixed.o +obj-y += clk-tegra-super-cclk.o obj-y += clk-tegra-super-gen4.o obj-$(CONFIG_ARCH_TEGRA_2x_SOC) += clk-tegra20.o obj-$(CONFIG_ARCH_TEGRA_2x_SOC) += clk-tegra20-emc.o diff --git a/drivers/clk/tegra/clk-tegra-super-cclk.c b/drivers/clk/tegra/clk-tegra-super-cclk.c new file mode 100644 index 0000000000000..7bcb9e8d0860a --- /dev/null +++ b/drivers/clk/tegra/clk-tegra-super-cclk.c @@ -0,0 +1,178 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Based on clk-super.c + * Copyright (c) 2012, NVIDIA CORPORATION. All rights reserved. + * + * Based on older tegra20-cpufreq driver by Colin Cross + * Copyright (C) 2010 Google, Inc. + * + * Author: Dmitry Osipenko + * Copyright (C) 2019 GRATE-DRIVER project + */ + +#include +#include +#include +#include +#include +#include +#include + +#include "clk.h" + +#define PLLP_INDEX 4 +#define PLLX_INDEX 8 + +#define SUPER_CDIV_ENB BIT(31) + +static u8 cclk_super_get_parent(struct clk_hw *hw) +{ + return tegra_clk_super_ops.get_parent(hw); +} + +static int cclk_super_set_parent(struct clk_hw *hw, u8 index) +{ + return tegra_clk_super_ops.set_parent(hw, index); +} + +static int cclk_super_set_rate(struct clk_hw *hw, unsigned long rate, + unsigned long parent_rate) +{ + return tegra_clk_super_ops.set_rate(hw, rate, parent_rate); +} + +static unsigned long cclk_super_recalc_rate(struct clk_hw *hw, + unsigned long parent_rate) +{ + if (cclk_super_get_parent(hw) == PLLX_INDEX) + return parent_rate; + + return tegra_clk_super_ops.recalc_rate(hw, parent_rate); +} + +static int cclk_super_determine_rate(struct clk_hw *hw, + struct clk_rate_request *req) +{ + struct clk_hw *pllp_hw = clk_hw_get_parent_by_index(hw, PLLP_INDEX); + struct clk_hw *pllx_hw = clk_hw_get_parent_by_index(hw, PLLX_INDEX); + struct tegra_clk_super_mux *super = to_clk_super_mux(hw); + unsigned long pllp_rate; + long rate = req->rate; + + if (WARN_ON_ONCE(!pllp_hw || !pllx_hw)) + return -EINVAL; + + /* + * Switch parent to PLLP for all CCLK rates that are suitable for PLLP. + * PLLX will be disabled in this case, saving some power. + */ + pllp_rate = clk_hw_get_rate(pllp_hw); + + if (rate <= pllp_rate) { + if (super->flags & TEGRA20_SUPER_CLK) + rate = pllp_rate; + else + rate = tegra_clk_super_ops.round_rate(hw, rate, + &pllp_rate); + + req->best_parent_rate = pllp_rate; + req->best_parent_hw = pllp_hw; + req->rate = rate; + } else { + rate = clk_hw_round_rate(pllx_hw, rate); + req->best_parent_rate = rate; + req->best_parent_hw = pllx_hw; + req->rate = rate; + } + + if (WARN_ON_ONCE(rate <= 0)) + return -EINVAL; + + return 0; +} + +static const struct clk_ops tegra_cclk_super_ops = { + .get_parent = cclk_super_get_parent, + .set_parent = cclk_super_set_parent, + .set_rate = cclk_super_set_rate, + .recalc_rate = cclk_super_recalc_rate, + .determine_rate = cclk_super_determine_rate, +}; + +static const struct clk_ops tegra_cclk_super_mux_ops = { + .get_parent = cclk_super_get_parent, + .set_parent = cclk_super_set_parent, + .determine_rate = cclk_super_determine_rate, +}; + +struct clk *tegra_clk_register_super_cclk(const char *name, + const char * const *parent_names, u8 num_parents, + unsigned long flags, void __iomem *reg, u8 clk_super_flags, + spinlock_t *lock) +{ + struct tegra_clk_super_mux *super; + struct clk *clk; + struct clk_init_data init; + u32 val; + + super = kzalloc(sizeof(*super), GFP_KERNEL); + if (!super) + return ERR_PTR(-ENOMEM); + + init.name = name; + init.flags = flags; + init.parent_names = parent_names; + init.num_parents = num_parents; + + super->reg = reg; + super->lock = lock; + super->width = 4; + super->flags = clk_super_flags; + super->hw.init = &init; + + if (super->flags & TEGRA20_SUPER_CLK) { + init.ops = &tegra_cclk_super_mux_ops; + } else { + init.ops = &tegra_cclk_super_ops; + + super->frac_div.reg = reg + 4; + super->frac_div.shift = 16; + super->frac_div.width = 8; + super->frac_div.frac_width = 1; + super->frac_div.lock = lock; + super->div_ops = &tegra_clk_frac_div_ops; + } + + /* + * Tegra30+ has the following CPUG clock topology: + * + * +---+ +-------+ +-+ +-+ +-+ + * PLLP+->+ +->+DIVIDER+->+0| +-------->+0| ------------->+0| + * | | +-------+ | | | +---+ | | | | | + * PLLC+->+MUX| | +->+ | S | | +->+ | +->+CPU + * ... | | | | | | K | | | | +-------+ | | + * PLLX+->+-->+------------>+1| +->+ I +->+1| +->+ DIV2 +->+1| + * +---+ +++ | P | +++ |SKIPPER| +++ + * ^ | P | ^ +-------+ ^ + * | | E | | | + * PLLX_SEL+--+ | R | | OVERHEAT+--+ + * +---+ | + * | + * SUPER_CDIV_ENB+--+ + * + * Tegra20 is similar, but simpler. It doesn't have the divider and + * thermal DIV2 skipper. + * + * At least for now we're not going to use clock-skipper, hence let's + * ensure that it is disabled. + */ + val = readl_relaxed(reg + 4); + val &= ~SUPER_CDIV_ENB; + writel_relaxed(val, reg + 4); + + clk = clk_register(NULL, &super->hw); + if (IS_ERR(clk)) + kfree(super); + + return clk; +} diff --git a/drivers/clk/tegra/clk.h b/drivers/clk/tegra/clk.h index 09cc700bab415..659c0be03b2e7 100644 --- a/drivers/clk/tegra/clk.h +++ b/drivers/clk/tegra/clk.h @@ -729,8 +729,10 @@ struct clk *tegra_clk_register_periph_data(void __iomem *clk_base, * TEGRA_DIVIDER_2 - LP cluster has additional divider. This flag indicates * that this is LP cluster clock. * TEGRA210_CPU_CLK - This flag is used to identify CPU cluster for gen5 - * super mux parent using PLLP branches. To use PLLP branches to CPU, need - * to configure additional bit PLLP_OUT_CPU in the clock registers. + * super mux parent using PLLP branches. To use PLLP branches to CPU, need + * to configure additional bit PLLP_OUT_CPU in the clock registers. + * TEGRA20_SUPER_CLK - Tegra20 doesn't have a dedicated divider for Super + * clocks, it only has a clock-skipper. */ struct tegra_clk_super_mux { struct clk_hw hw; @@ -748,6 +750,7 @@ struct tegra_clk_super_mux { #define TEGRA_DIVIDER_2 BIT(0) #define TEGRA210_CPU_CLK BIT(1) +#define TEGRA20_SUPER_CLK BIT(2) extern const struct clk_ops tegra_clk_super_ops; struct clk *tegra_clk_register_super_mux(const char *name, @@ -758,6 +761,10 @@ struct clk *tegra_clk_register_super_clk(const char *name, const char * const *parent_names, u8 num_parents, unsigned long flags, void __iomem *reg, u8 clk_super_flags, spinlock_t *lock); +struct clk *tegra_clk_register_super_cclk(const char *name, + const char * const *parent_names, u8 num_parents, + unsigned long flags, void __iomem *reg, u8 clk_super_flags, + spinlock_t *lock); /** * struct tegra_sdmmc_mux - switch divider with Low Jitter inputs for SDMMC -- GitLab From 9157abe74b05b9c2ede8f07ad4c7f89b717ff303 Mon Sep 17 00:00:00 2001 From: Dmitry Osipenko Date: Thu, 19 Mar 2020 22:02:19 +0300 Subject: [PATCH 0453/1971] clk: tegra: pll: Add pre/post rate-change hooks There is a need to temporarily re-parent CCLK away from PLLX if PLLX's rate is about to change. The newly introduced PLL pre/post rate-change hooks allow to handle such case. Acked-by: Peter De Schrijver Tested-by: Peter Geis Tested-by: Marcel Ziswiler Tested-by: Jasper Korten Tested-by: David Heidelberg Tested-by: Nicolas Chauvet Signed-off-by: Dmitry Osipenko Signed-off-by: Thierry Reding --- drivers/clk/tegra/clk-pll.c | 12 +++++++++++- drivers/clk/tegra/clk.h | 6 ++++++ 2 files changed, 17 insertions(+), 1 deletion(-) diff --git a/drivers/clk/tegra/clk-pll.c b/drivers/clk/tegra/clk-pll.c index 531c2b3d814eb..0b212cf2e7942 100644 --- a/drivers/clk/tegra/clk-pll.c +++ b/drivers/clk/tegra/clk-pll.c @@ -744,13 +744,19 @@ static int _program_pll(struct clk_hw *hw, struct tegra_clk_pll_freq_table *cfg, state = clk_pll_is_enabled(hw); + if (state && pll->params->pre_rate_change) { + ret = pll->params->pre_rate_change(); + if (WARN_ON(ret)) + return ret; + } + _get_pll_mnp(pll, &old_cfg); if (state && pll->params->defaults_set && pll->params->dyn_ramp && (cfg->m == old_cfg.m) && (cfg->p == old_cfg.p)) { ret = pll->params->dyn_ramp(pll, cfg); if (!ret) - return 0; + goto done; } if (state) { @@ -772,6 +778,10 @@ static int _program_pll(struct clk_hw *hw, struct tegra_clk_pll_freq_table *cfg, pll_clk_start_ss(pll); } +done: + if (state && pll->params->post_rate_change) + pll->params->post_rate_change(); + return ret; } diff --git a/drivers/clk/tegra/clk.h b/drivers/clk/tegra/clk.h index 659c0be03b2e7..87a6fb21aafaf 100644 --- a/drivers/clk/tegra/clk.h +++ b/drivers/clk/tegra/clk.h @@ -266,6 +266,10 @@ struct tegra_clk_pll; * disabled. * @dyn_ramp: Callback which can be used to define a custom * dynamic ramp function for a given PLL. + * @pre_rate_change: Callback which is invoked just before changing + * PLL's rate. + * @post_rate_change: Callback which is invoked right after changing + * PLL's rate. * * Flags: * TEGRA_PLL_USE_LOCK - This flag indicated to use lock bits for @@ -342,6 +346,8 @@ struct tegra_clk_pll_params { void (*set_defaults)(struct tegra_clk_pll *pll); int (*dyn_ramp)(struct tegra_clk_pll *pll, struct tegra_clk_pll_freq_table *cfg); + int (*pre_rate_change)(void); + void (*post_rate_change)(void); }; #define TEGRA_PLL_USE_LOCK BIT(0) -- GitLab From dec15c9901382f9a2ec548ff0a7ed639d4be0a38 Mon Sep 17 00:00:00 2001 From: Dmitry Osipenko Date: Thu, 19 Mar 2020 22:02:20 +0300 Subject: [PATCH 0454/1971] clk: tegra: cclk: Add helpers for handling PLLX rate changes CCLK should be re-parented away from PLLX if PLLX's rate is changing. The PLLP parent is a common safe CPU parent for all Tegra SoCs, thus CCLK will be re-parented to PLLP before PLLX rate-change begins and then switched back to PLLX after the rate-change completion. This patch adds helper functions which perform CCLK re-parenting, these helpers will be utilized by further patches. Acked-by: Peter De Schrijver Tested-by: Peter Geis Tested-by: Marcel Ziswiler Tested-by: Jasper Korten Tested-by: David Heidelberg Tested-by: Nicolas Chauvet Signed-off-by: Dmitry Osipenko Signed-off-by: Thierry Reding --- drivers/clk/tegra/clk-tegra-super-cclk.c | 34 ++++++++++++++++++++++++ drivers/clk/tegra/clk.h | 2 ++ 2 files changed, 36 insertions(+) diff --git a/drivers/clk/tegra/clk-tegra-super-cclk.c b/drivers/clk/tegra/clk-tegra-super-cclk.c index 7bcb9e8d0860a..a03119c30456b 100644 --- a/drivers/clk/tegra/clk-tegra-super-cclk.c +++ b/drivers/clk/tegra/clk-tegra-super-cclk.c @@ -25,6 +25,9 @@ #define SUPER_CDIV_ENB BIT(31) +static struct tegra_clk_super_mux *cclk_super; +static bool cclk_on_pllx; + static u8 cclk_super_get_parent(struct clk_hw *hw) { return tegra_clk_super_ops.get_parent(hw); @@ -115,6 +118,9 @@ struct clk *tegra_clk_register_super_cclk(const char *name, struct clk_init_data init; u32 val; + if (WARN_ON(cclk_super)) + return ERR_PTR(-EBUSY); + super = kzalloc(sizeof(*super), GFP_KERNEL); if (!super) return ERR_PTR(-ENOMEM); @@ -173,6 +179,34 @@ struct clk *tegra_clk_register_super_cclk(const char *name, clk = clk_register(NULL, &super->hw); if (IS_ERR(clk)) kfree(super); + else + cclk_super = super; return clk; } + +int tegra_cclk_pre_pllx_rate_change(void) +{ + if (IS_ERR_OR_NULL(cclk_super)) + return -EINVAL; + + if (cclk_super_get_parent(&cclk_super->hw) == PLLX_INDEX) + cclk_on_pllx = true; + else + cclk_on_pllx = false; + + /* + * CPU needs to be temporarily re-parented away from PLLX if PLLX + * changes its rate. PLLP is a safe parent for CPU on all Tegra SoCs. + */ + if (cclk_on_pllx) + cclk_super_set_parent(&cclk_super->hw, PLLP_INDEX); + + return 0; +} + +void tegra_cclk_post_pllx_rate_change(void) +{ + if (cclk_on_pllx) + cclk_super_set_parent(&cclk_super->hw, PLLX_INDEX); +} diff --git a/drivers/clk/tegra/clk.h b/drivers/clk/tegra/clk.h index 87a6fb21aafaf..6b565f6b5f664 100644 --- a/drivers/clk/tegra/clk.h +++ b/drivers/clk/tegra/clk.h @@ -771,6 +771,8 @@ struct clk *tegra_clk_register_super_cclk(const char *name, const char * const *parent_names, u8 num_parents, unsigned long flags, void __iomem *reg, u8 clk_super_flags, spinlock_t *lock); +int tegra_cclk_pre_pllx_rate_change(void); +void tegra_cclk_post_pllx_rate_change(void); /** * struct tegra_sdmmc_mux - switch divider with Low Jitter inputs for SDMMC -- GitLab From 2db2fcd7a2e3036bc4c9937c686abadb9d32524c Mon Sep 17 00:00:00 2001 From: Dmitry Osipenko Date: Thu, 19 Mar 2020 22:02:21 +0300 Subject: [PATCH 0455/1971] clk: tegra20: Use custom CCLK implementation We're going to use the generic cpufreq-dt driver on Tegra20 and thus CCLK intermediate re-parenting will be performed by the clock driver. There is now special CCLK implementation that supports all CCLK quirks, this patch makes Tegra20 SoCs to use that implementation. Acked-by: Peter De Schrijver Tested-by: Peter Geis Tested-by: Marcel Ziswiler Tested-by: Jasper Korten Tested-by: David Heidelberg Tested-by: Nicolas Chauvet Signed-off-by: Dmitry Osipenko Signed-off-by: Thierry Reding --- drivers/clk/tegra/clk-tegra20.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/drivers/clk/tegra/clk-tegra20.c b/drivers/clk/tegra/clk-tegra20.c index 085feb04e9132..3efc651b42e3a 100644 --- a/drivers/clk/tegra/clk-tegra20.c +++ b/drivers/clk/tegra/clk-tegra20.c @@ -391,6 +391,8 @@ static struct tegra_clk_pll_params pll_x_params = { .lock_delay = 300, .freq_table = pll_x_freq_table, .flags = TEGRA_PLL_HAS_CPCON | TEGRA_PLL_HAS_LOCK_ENABLE, + .pre_rate_change = tegra_cclk_pre_pllx_rate_change, + .post_rate_change = tegra_cclk_post_pllx_rate_change, }; static struct tegra_clk_pll_params pll_e_params = { @@ -702,9 +704,10 @@ static void tegra20_super_clk_init(void) struct clk *clk; /* CCLK */ - clk = tegra_clk_register_super_mux("cclk", cclk_parents, + clk = tegra_clk_register_super_cclk("cclk", cclk_parents, ARRAY_SIZE(cclk_parents), CLK_SET_RATE_PARENT, - clk_base + CCLK_BURST_POLICY, 0, 4, 0, 0, NULL); + clk_base + CCLK_BURST_POLICY, TEGRA20_SUPER_CLK, + NULL); clks[TEGRA20_CLK_CCLK] = clk; /* SCLK */ -- GitLab From 42329854410e672b7ffeb391d284ad719efcc465 Mon Sep 17 00:00:00 2001 From: Dmitry Osipenko Date: Thu, 19 Mar 2020 22:02:22 +0300 Subject: [PATCH 0456/1971] clk: tegra30: Use custom CCLK implementation We're going to use the generic cpufreq-dt driver on Tegra30 and thus CCLK intermediate re-parenting will be performed by the clock driver. There is now special CCLK implementation that supports all CCLK quirks, this patch makes Tegra30 SoCs to use that implementation. Acked-by: Peter De Schrijver Tested-by: Peter Geis Tested-by: Marcel Ziswiler Tested-by: Jasper Korten Tested-by: David Heidelberg Tested-by: Nicolas Chauvet Signed-off-by: Dmitry Osipenko Signed-off-by: Thierry Reding --- drivers/clk/tegra/clk-tegra30.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/drivers/clk/tegra/clk-tegra30.c b/drivers/clk/tegra/clk-tegra30.c index 3255f82e61b58..37244a7e68c22 100644 --- a/drivers/clk/tegra/clk-tegra30.c +++ b/drivers/clk/tegra/clk-tegra30.c @@ -499,6 +499,8 @@ static struct tegra_clk_pll_params pll_x_params __ro_after_init = { .freq_table = pll_x_freq_table, .flags = TEGRA_PLL_HAS_CPCON | TEGRA_PLL_SET_DCCON | TEGRA_PLL_USE_LOCK | TEGRA_PLL_HAS_LOCK_ENABLE, + .pre_rate_change = tegra_cclk_pre_pllx_rate_change, + .post_rate_change = tegra_cclk_post_pllx_rate_change, }; static struct tegra_clk_pll_params pll_e_params __ro_after_init = { @@ -926,11 +928,11 @@ static void __init tegra30_super_clk_init(void) clk_register_clkdev(clk, "pll_p_out4_cclkg", NULL); /* CCLKG */ - clk = tegra_clk_register_super_mux("cclk_g", cclk_g_parents, + clk = tegra_clk_register_super_cclk("cclk_g", cclk_g_parents, ARRAY_SIZE(cclk_g_parents), CLK_SET_RATE_PARENT, clk_base + CCLKG_BURST_POLICY, - 0, 4, 0, 0, NULL); + 0, NULL); clks[TEGRA30_CLK_CCLK_G] = clk; /* -- GitLab From dec396322d25ca5ce2f307b6da897060fdf9a782 Mon Sep 17 00:00:00 2001 From: Sowjanya Komatineni Date: Mon, 4 May 2020 19:31:55 -0700 Subject: [PATCH 0457/1971] clk: tegra: Add Tegra210 CSI TPG clock gate Tegra210 CSI hardware internally uses PLLD for internal test pattern generator logic. PLLD_BASE register in CAR has a bit CSI_CLK_SOURCE to enable PLLD out to CSI during TPG mode. This patch adds this CSI TPG clock gate to Tegra210 clock driver to allow Tegra video driver to ungate CSI TPG clock during TPG mode and gate during non TPG mode. Acked-by: Stephen Boyd Signed-off-by: Sowjanya Komatineni Signed-off-by: Thierry Reding --- drivers/clk/tegra/clk-tegra210.c | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/drivers/clk/tegra/clk-tegra210.c b/drivers/clk/tegra/clk-tegra210.c index f2bb188fbc6a1..68cbb98af567d 100644 --- a/drivers/clk/tegra/clk-tegra210.c +++ b/drivers/clk/tegra/clk-tegra210.c @@ -3076,6 +3076,13 @@ static __init void tegra210_periph_clk_init(struct device_node *np, periph_clk_enb_refcnt); clks[TEGRA210_CLK_DSIB] = clk; + /* csi_tpg */ + clk = clk_register_gate(NULL, "csi_tpg", "pll_d", + CLK_SET_RATE_PARENT, clk_base + PLLD_BASE, + 23, 0, &pll_d_lock); + clk_register_clkdev(clk, "csi_tpg", NULL); + clks[TEGRA210_CLK_CSI_TPG] = clk; + /* la */ clk = tegra_clk_register_periph("la", la_parents, ARRAY_SIZE(la_parents), &tegra210_la, clk_base, -- GitLab From cf520c64301204afa8272a94cc2e1495a425446a Mon Sep 17 00:00:00 2001 From: Johnny Chuang Date: Tue, 12 May 2020 14:44:39 -0700 Subject: [PATCH 0458/1971] Input: elants_i2c - provide an attribute to show calibration count There is an non-touch case by non-calibration after update firmware. Elan could know calibrate or not by calibration count. The value of '0xffff' means we didn't calibrate after update firmware. If calibrate success, it will plus one and change to '0x0000'. Signed-off-by: Johnny Chuang Link: https://lore.kernel.org/r/1588754932-5902-1-git-send-email-johnny.chuang.emc@gmail.com Signed-off-by: Dmitry Torokhov --- drivers/input/touchscreen/elants_i2c.c | 28 +++++++++++++++++++++++++- 1 file changed, 27 insertions(+), 1 deletion(-) diff --git a/drivers/input/touchscreen/elants_i2c.c b/drivers/input/touchscreen/elants_i2c.c index 14c577c16b169..6e962b54fea34 100644 --- a/drivers/input/touchscreen/elants_i2c.c +++ b/drivers/input/touchscreen/elants_i2c.c @@ -87,6 +87,7 @@ /* FW read command, 0x53 0x?? 0x0, 0x01 */ #define E_ELAN_INFO_FW_VER 0x00 #define E_ELAN_INFO_BC_VER 0x10 +#define E_ELAN_INFO_REK 0xE0 #define E_ELAN_INFO_TEST_VER 0xE0 #define E_ELAN_INFO_FW_ID 0xF0 #define E_INFO_OSR 0xD6 @@ -1010,7 +1011,7 @@ out: */ static ssize_t calibrate_store(struct device *dev, struct device_attribute *attr, - const char *buf, size_t count) + const char *buf, size_t count) { struct i2c_client *client = to_i2c_client(dev); struct elants_data *ts = i2c_get_clientdata(client); @@ -1056,8 +1057,32 @@ static ssize_t show_iap_mode(struct device *dev, "Normal" : "Recovery"); } +static ssize_t show_calibration_count(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct i2c_client *client = to_i2c_client(dev); + const u8 cmd[] = { CMD_HEADER_READ, E_ELAN_INFO_REK, 0x00, 0x01 }; + u8 resp[HEADER_SIZE]; + u16 rek_count; + int error; + + error = elants_i2c_execute_command(client, cmd, sizeof(cmd), + resp, sizeof(resp)); + if (error) { + dev_err(&client->dev, + "read ReK status error=%d, buf=%*phC\n", + error, (int)sizeof(resp), resp); + return sprintf(buf, "%d\n", error); + } + + rek_count = get_unaligned_be16(&resp[2]); + + return sprintf(buf, "0x%04x\n", rek_count); +} + static DEVICE_ATTR_WO(calibrate); static DEVICE_ATTR(iap_mode, S_IRUGO, show_iap_mode, NULL); +static DEVICE_ATTR(calibration_count, S_IRUGO, show_calibration_count, NULL); static DEVICE_ATTR(update_fw, S_IWUSR, NULL, write_update_fw); struct elants_version_attribute { @@ -1113,6 +1138,7 @@ static struct attribute *elants_attributes[] = { &dev_attr_calibrate.attr, &dev_attr_update_fw.attr, &dev_attr_iap_mode.attr, + &dev_attr_calibration_count.attr, &elants_ver_attr_fw_version.dattr.attr, &elants_ver_attr_hw_version.dattr.attr, -- GitLab From 3819ad44025fbc03b32d41070b941acea86e6686 Mon Sep 17 00:00:00 2001 From: Stephen Boyd Date: Wed, 8 Apr 2020 23:44:14 -0700 Subject: [PATCH 0459/1971] ARM: mmp: Remove legacy clk code Remove all the legacy clk code that supports a non-common clk framework implementation of 'struct clk' in mach-mmp. This code doesn't look to be compiled anymore given that the MMP is fully supported in the multi-platform config via ARCH_MULTIPLATFORM as of commit 377524dc4d77 ("ARM: mmp: move into ARCH_MULTIPLATFORM"). The ARCH_MULTIPLATFORM config selects COMMON_CLK and therefore the Makefile rule can never actually compile the code in these files. Cc: Lubomir Rintel Cc: Russell King Cc: Cc: Arnd Bergmann Signed-off-by: Stephen Boyd Link: https://lkml.kernel.org/r/20200409064416.83340-9-sboyd@kernel.org Reviewed-by: Arnd Bergmann [sboyd@kernel.org: Squash in a clock.h include removal found by Stephen Rothwell ] --- arch/arm/mach-mmp/Makefile | 6 -- arch/arm/mach-mmp/clock-mmp2.c | 114 ------------------------------- arch/arm/mach-mmp/clock-pxa168.c | 94 ------------------------- arch/arm/mach-mmp/clock-pxa910.c | 70 ------------------- arch/arm/mach-mmp/clock.c | 105 ---------------------------- arch/arm/mach-mmp/clock.h | 65 ------------------ arch/arm/mach-mmp/pxa168.c | 1 - arch/arm/mach-mmp/time.c | 1 - 8 files changed, 456 deletions(-) delete mode 100644 arch/arm/mach-mmp/clock-mmp2.c delete mode 100644 arch/arm/mach-mmp/clock-pxa168.c delete mode 100644 arch/arm/mach-mmp/clock-pxa910.c delete mode 100644 arch/arm/mach-mmp/clock.c delete mode 100644 arch/arm/mach-mmp/clock.h diff --git a/arch/arm/mach-mmp/Makefile b/arch/arm/mach-mmp/Makefile index 7b3a7f979eece..e3758f7e1fe7f 100644 --- a/arch/arm/mach-mmp/Makefile +++ b/arch/arm/mach-mmp/Makefile @@ -12,12 +12,6 @@ obj-$(CONFIG_CPU_PXA910) += pxa910.o obj-$(CONFIG_CPU_MMP2) += mmp2.o obj-$(CONFIG_MMP_SRAM) += sram.o -ifeq ($(CONFIG_COMMON_CLK), ) -obj-y += clock.o -obj-$(CONFIG_CPU_PXA168) += clock-pxa168.o -obj-$(CONFIG_CPU_PXA910) += clock-pxa910.o -obj-$(CONFIG_CPU_MMP2) += clock-mmp2.o -endif ifeq ($(CONFIG_PM),y) obj-$(CONFIG_CPU_PXA910) += pm-pxa910.o obj-$(CONFIG_CPU_MMP2) += pm-mmp2.o diff --git a/arch/arm/mach-mmp/clock-mmp2.c b/arch/arm/mach-mmp/clock-mmp2.c deleted file mode 100644 index 7536398bf1c16..0000000000000 --- a/arch/arm/mach-mmp/clock-mmp2.c +++ /dev/null @@ -1,114 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -#include -#include -#include -#include -#include -#include -#include - -#include "addr-map.h" - -#include "common.h" -#include "clock.h" - -/* - * APB Clock register offsets for MMP2 - */ -#define APBC_RTC APBC_REG(0x000) -#define APBC_TWSI1 APBC_REG(0x004) -#define APBC_TWSI2 APBC_REG(0x008) -#define APBC_TWSI3 APBC_REG(0x00c) -#define APBC_TWSI4 APBC_REG(0x010) -#define APBC_KPC APBC_REG(0x018) -#define APBC_UART1 APBC_REG(0x02c) -#define APBC_UART2 APBC_REG(0x030) -#define APBC_UART3 APBC_REG(0x034) -#define APBC_GPIO APBC_REG(0x038) -#define APBC_PWM0 APBC_REG(0x03c) -#define APBC_PWM1 APBC_REG(0x040) -#define APBC_PWM2 APBC_REG(0x044) -#define APBC_PWM3 APBC_REG(0x048) -#define APBC_SSP0 APBC_REG(0x04c) -#define APBC_SSP1 APBC_REG(0x050) -#define APBC_SSP2 APBC_REG(0x054) -#define APBC_SSP3 APBC_REG(0x058) -#define APBC_SSP4 APBC_REG(0x05c) -#define APBC_SSP5 APBC_REG(0x060) -#define APBC_TWSI5 APBC_REG(0x07c) -#define APBC_TWSI6 APBC_REG(0x080) -#define APBC_UART4 APBC_REG(0x088) - -#define APMU_USB APMU_REG(0x05c) -#define APMU_NAND APMU_REG(0x060) -#define APMU_SDH0 APMU_REG(0x054) -#define APMU_SDH1 APMU_REG(0x058) -#define APMU_SDH2 APMU_REG(0x0e8) -#define APMU_SDH3 APMU_REG(0x0ec) - -static void sdhc_clk_enable(struct clk *clk) -{ - uint32_t clk_rst; - - clk_rst = __raw_readl(clk->clk_rst); - clk_rst |= clk->enable_val; - __raw_writel(clk_rst, clk->clk_rst); -} - -static void sdhc_clk_disable(struct clk *clk) -{ - uint32_t clk_rst; - - clk_rst = __raw_readl(clk->clk_rst); - clk_rst &= ~clk->enable_val; - __raw_writel(clk_rst, clk->clk_rst); -} - -struct clkops sdhc_clk_ops = { - .enable = sdhc_clk_enable, - .disable = sdhc_clk_disable, -}; - -/* APB peripheral clocks */ -static APBC_CLK(uart1, UART1, 1, 26000000); -static APBC_CLK(uart2, UART2, 1, 26000000); -static APBC_CLK(uart3, UART3, 1, 26000000); -static APBC_CLK(uart4, UART4, 1, 26000000); -static APBC_CLK(twsi1, TWSI1, 0, 26000000); -static APBC_CLK(twsi2, TWSI2, 0, 26000000); -static APBC_CLK(twsi3, TWSI3, 0, 26000000); -static APBC_CLK(twsi4, TWSI4, 0, 26000000); -static APBC_CLK(twsi5, TWSI5, 0, 26000000); -static APBC_CLK(twsi6, TWSI6, 0, 26000000); -static APBC_CLK(gpio, GPIO, 0, 26000000); - -static APMU_CLK(nand, NAND, 0xbf, 100000000); -static APMU_CLK_OPS(sdh0, SDH0, 0x1b, 200000000, &sdhc_clk_ops); -static APMU_CLK_OPS(sdh1, SDH1, 0x1b, 200000000, &sdhc_clk_ops); -static APMU_CLK_OPS(sdh2, SDH2, 0x1b, 200000000, &sdhc_clk_ops); -static APMU_CLK_OPS(sdh3, SDH3, 0x1b, 200000000, &sdhc_clk_ops); - -static struct clk_lookup mmp2_clkregs[] = { - INIT_CLKREG(&clk_uart1, "pxa2xx-uart.0", NULL), - INIT_CLKREG(&clk_uart2, "pxa2xx-uart.1", NULL), - INIT_CLKREG(&clk_uart3, "pxa2xx-uart.2", NULL), - INIT_CLKREG(&clk_uart4, "pxa2xx-uart.3", NULL), - INIT_CLKREG(&clk_twsi1, "pxa2xx-i2c.0", NULL), - INIT_CLKREG(&clk_twsi2, "pxa2xx-i2c.1", NULL), - INIT_CLKREG(&clk_twsi3, "pxa2xx-i2c.2", NULL), - INIT_CLKREG(&clk_twsi4, "pxa2xx-i2c.3", NULL), - INIT_CLKREG(&clk_twsi5, "pxa2xx-i2c.4", NULL), - INIT_CLKREG(&clk_twsi6, "pxa2xx-i2c.5", NULL), - INIT_CLKREG(&clk_nand, "pxa3xx-nand", NULL), - INIT_CLKREG(&clk_gpio, "mmp2-gpio", NULL), - INIT_CLKREG(&clk_sdh0, "sdhci-pxav3.0", "PXA-SDHCLK"), - INIT_CLKREG(&clk_sdh1, "sdhci-pxav3.1", "PXA-SDHCLK"), - INIT_CLKREG(&clk_sdh2, "sdhci-pxav3.2", "PXA-SDHCLK"), - INIT_CLKREG(&clk_sdh3, "sdhci-pxav3.3", "PXA-SDHCLK"), -}; - -void __init mmp2_clk_init(phys_addr_t mpmu_phys, phys_addr_t apmu_phys, - phys_addr_t apbc_phys) -{ - clkdev_add_table(ARRAY_AND_SIZE(mmp2_clkregs)); -} diff --git a/arch/arm/mach-mmp/clock-pxa168.c b/arch/arm/mach-mmp/clock-pxa168.c deleted file mode 100644 index 2d4a5d96a1ff4..0000000000000 --- a/arch/arm/mach-mmp/clock-pxa168.c +++ /dev/null @@ -1,94 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -#include -#include -#include -#include -#include -#include -#include - -#include "addr-map.h" - -#include "common.h" -#include "clock.h" - -/* - * APB clock register offsets for PXA168 - */ -#define APBC_UART1 APBC_REG(0x000) -#define APBC_UART2 APBC_REG(0x004) -#define APBC_GPIO APBC_REG(0x008) -#define APBC_PWM1 APBC_REG(0x00c) -#define APBC_PWM2 APBC_REG(0x010) -#define APBC_PWM3 APBC_REG(0x014) -#define APBC_PWM4 APBC_REG(0x018) -#define APBC_RTC APBC_REG(0x028) -#define APBC_TWSI0 APBC_REG(0x02c) -#define APBC_KPC APBC_REG(0x030) -#define APBC_TWSI1 APBC_REG(0x06c) -#define APBC_UART3 APBC_REG(0x070) -#define APBC_SSP1 APBC_REG(0x81c) -#define APBC_SSP2 APBC_REG(0x820) -#define APBC_SSP3 APBC_REG(0x84c) -#define APBC_SSP4 APBC_REG(0x858) -#define APBC_SSP5 APBC_REG(0x85c) - -#define APMU_NAND APMU_REG(0x060) -#define APMU_LCD APMU_REG(0x04c) -#define APMU_ETH APMU_REG(0x0fc) -#define APMU_USB APMU_REG(0x05c) - -/* APB peripheral clocks */ -static APBC_CLK(uart1, UART1, 1, 14745600); -static APBC_CLK(uart2, UART2, 1, 14745600); -static APBC_CLK(uart3, UART3, 1, 14745600); -static APBC_CLK(twsi0, TWSI0, 1, 33000000); -static APBC_CLK(twsi1, TWSI1, 1, 33000000); -static APBC_CLK(pwm1, PWM1, 1, 13000000); -static APBC_CLK(pwm2, PWM2, 1, 13000000); -static APBC_CLK(pwm3, PWM3, 1, 13000000); -static APBC_CLK(pwm4, PWM4, 1, 13000000); -static APBC_CLK(ssp1, SSP1, 4, 0); -static APBC_CLK(ssp2, SSP2, 4, 0); -static APBC_CLK(ssp3, SSP3, 4, 0); -static APBC_CLK(ssp4, SSP4, 4, 0); -static APBC_CLK(ssp5, SSP5, 4, 0); -static APBC_CLK(gpio, GPIO, 0, 13000000); -static APBC_CLK(keypad, KPC, 0, 32000); -static APBC_CLK(rtc, RTC, 8, 32768); - -static APMU_CLK(nand, NAND, 0x19b, 156000000); -static APMU_CLK(lcd, LCD, 0x7f, 312000000); -static APMU_CLK(eth, ETH, 0x09, 0); -static APMU_CLK(usb, USB, 0x12, 0); - -/* device and clock bindings */ -static struct clk_lookup pxa168_clkregs[] = { - INIT_CLKREG(&clk_uart1, "pxa2xx-uart.0", NULL), - INIT_CLKREG(&clk_uart2, "pxa2xx-uart.1", NULL), - INIT_CLKREG(&clk_uart3, "pxa2xx-uart.2", NULL), - INIT_CLKREG(&clk_twsi0, "pxa2xx-i2c.0", NULL), - INIT_CLKREG(&clk_twsi1, "pxa2xx-i2c.1", NULL), - INIT_CLKREG(&clk_pwm1, "pxa168-pwm.0", NULL), - INIT_CLKREG(&clk_pwm2, "pxa168-pwm.1", NULL), - INIT_CLKREG(&clk_pwm3, "pxa168-pwm.2", NULL), - INIT_CLKREG(&clk_pwm4, "pxa168-pwm.3", NULL), - INIT_CLKREG(&clk_ssp1, "pxa168-ssp.0", NULL), - INIT_CLKREG(&clk_ssp2, "pxa168-ssp.1", NULL), - INIT_CLKREG(&clk_ssp3, "pxa168-ssp.2", NULL), - INIT_CLKREG(&clk_ssp4, "pxa168-ssp.3", NULL), - INIT_CLKREG(&clk_ssp5, "pxa168-ssp.4", NULL), - INIT_CLKREG(&clk_nand, "pxa3xx-nand", NULL), - INIT_CLKREG(&clk_lcd, "pxa168-fb", NULL), - INIT_CLKREG(&clk_gpio, "mmp-gpio", NULL), - INIT_CLKREG(&clk_keypad, "pxa27x-keypad", NULL), - INIT_CLKREG(&clk_eth, "pxa168-eth", "MFUCLK"), - INIT_CLKREG(&clk_usb, NULL, "PXA168-USBCLK"), - INIT_CLKREG(&clk_rtc, "sa1100-rtc", NULL), -}; - -void __init pxa168_clk_init(phys_addr_t mpmu_phys, phys_addr_t apmu_phys, - phys_addr_t apbc_phys) -{ - clkdev_add_table(ARRAY_AND_SIZE(pxa168_clkregs)); -} diff --git a/arch/arm/mach-mmp/clock-pxa910.c b/arch/arm/mach-mmp/clock-pxa910.c deleted file mode 100644 index 3cd83ff91bb08..0000000000000 --- a/arch/arm/mach-mmp/clock-pxa910.c +++ /dev/null @@ -1,70 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -#include -#include -#include -#include -#include -#include -#include - -#include "addr-map.h" - -#include "common.h" -#include "clock.h" - -/* - * APB Clock register offsets for PXA910 - */ -#define APBC_UART0 APBC_REG(0x000) -#define APBC_UART1 APBC_REG(0x004) -#define APBC_GPIO APBC_REG(0x008) -#define APBC_PWM1 APBC_REG(0x00c) -#define APBC_PWM2 APBC_REG(0x010) -#define APBC_PWM3 APBC_REG(0x014) -#define APBC_PWM4 APBC_REG(0x018) -#define APBC_SSP1 APBC_REG(0x01c) -#define APBC_SSP2 APBC_REG(0x020) -#define APBC_RTC APBC_REG(0x028) -#define APBC_TWSI0 APBC_REG(0x02c) -#define APBC_KPC APBC_REG(0x030) -#define APBC_SSP3 APBC_REG(0x04c) -#define APBC_TWSI1 APBC_REG(0x06c) - -#define APMU_NAND APMU_REG(0x060) -#define APMU_USB APMU_REG(0x05c) - -static APBC_CLK(uart1, UART0, 1, 14745600); -static APBC_CLK(uart2, UART1, 1, 14745600); -static APBC_CLK(twsi0, TWSI0, 1, 33000000); -static APBC_CLK(twsi1, TWSI1, 1, 33000000); -static APBC_CLK(pwm1, PWM1, 1, 13000000); -static APBC_CLK(pwm2, PWM2, 1, 13000000); -static APBC_CLK(pwm3, PWM3, 1, 13000000); -static APBC_CLK(pwm4, PWM4, 1, 13000000); -static APBC_CLK(gpio, GPIO, 0, 13000000); -static APBC_CLK(rtc, RTC, 8, 32768); - -static APMU_CLK(nand, NAND, 0x19b, 156000000); -static APMU_CLK(u2o, USB, 0x1b, 480000000); - -/* device and clock bindings */ -static struct clk_lookup pxa910_clkregs[] = { - INIT_CLKREG(&clk_uart1, "pxa2xx-uart.0", NULL), - INIT_CLKREG(&clk_uart2, "pxa2xx-uart.1", NULL), - INIT_CLKREG(&clk_twsi0, "pxa2xx-i2c.0", NULL), - INIT_CLKREG(&clk_twsi1, "pxa2xx-i2c.1", NULL), - INIT_CLKREG(&clk_pwm1, "pxa910-pwm.0", NULL), - INIT_CLKREG(&clk_pwm2, "pxa910-pwm.1", NULL), - INIT_CLKREG(&clk_pwm3, "pxa910-pwm.2", NULL), - INIT_CLKREG(&clk_pwm4, "pxa910-pwm.3", NULL), - INIT_CLKREG(&clk_nand, "pxa3xx-nand", NULL), - INIT_CLKREG(&clk_gpio, "mmp-gpio", NULL), - INIT_CLKREG(&clk_u2o, NULL, "U2OCLK"), - INIT_CLKREG(&clk_rtc, "sa1100-rtc", NULL), -}; - -void __init pxa910_clk_init(phys_addr_t mpmu_phys, phys_addr_t apmu_phys, - phys_addr_t apbc_phys, phys_addr_t apbcp_phys) -{ - clkdev_add_table(ARRAY_AND_SIZE(pxa910_clkregs)); -} diff --git a/arch/arm/mach-mmp/clock.c b/arch/arm/mach-mmp/clock.c deleted file mode 100644 index 291fe41e3547b..0000000000000 --- a/arch/arm/mach-mmp/clock.c +++ /dev/null @@ -1,105 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * linux/arch/arm/mach-mmp/clock.c - */ - -#include -#include -#include -#include -#include -#include - -#include "regs-apbc.h" -#include "clock.h" - -static void apbc_clk_enable(struct clk *clk) -{ - uint32_t clk_rst; - - clk_rst = APBC_APBCLK | APBC_FNCLK | APBC_FNCLKSEL(clk->fnclksel); - __raw_writel(clk_rst, clk->clk_rst); -} - -static void apbc_clk_disable(struct clk *clk) -{ - __raw_writel(0, clk->clk_rst); -} - -struct clkops apbc_clk_ops = { - .enable = apbc_clk_enable, - .disable = apbc_clk_disable, -}; - -static void apmu_clk_enable(struct clk *clk) -{ - __raw_writel(clk->enable_val, clk->clk_rst); -} - -static void apmu_clk_disable(struct clk *clk) -{ - __raw_writel(0, clk->clk_rst); -} - -struct clkops apmu_clk_ops = { - .enable = apmu_clk_enable, - .disable = apmu_clk_disable, -}; - -static DEFINE_SPINLOCK(clocks_lock); - -int clk_enable(struct clk *clk) -{ - unsigned long flags; - - spin_lock_irqsave(&clocks_lock, flags); - if (clk->enabled++ == 0) - clk->ops->enable(clk); - spin_unlock_irqrestore(&clocks_lock, flags); - return 0; -} -EXPORT_SYMBOL(clk_enable); - -void clk_disable(struct clk *clk) -{ - unsigned long flags; - - if (!clk) - return; - - WARN_ON(clk->enabled == 0); - - spin_lock_irqsave(&clocks_lock, flags); - if (--clk->enabled == 0) - clk->ops->disable(clk); - spin_unlock_irqrestore(&clocks_lock, flags); -} -EXPORT_SYMBOL(clk_disable); - -unsigned long clk_get_rate(struct clk *clk) -{ - unsigned long rate; - - if (clk->ops->getrate) - rate = clk->ops->getrate(clk); - else - rate = clk->rate; - - return rate; -} -EXPORT_SYMBOL(clk_get_rate); - -int clk_set_rate(struct clk *clk, unsigned long rate) -{ - unsigned long flags; - int ret = -EINVAL; - - if (clk->ops->setrate) { - spin_lock_irqsave(&clocks_lock, flags); - ret = clk->ops->setrate(clk, rate); - spin_unlock_irqrestore(&clocks_lock, flags); - } - - return ret; -} -EXPORT_SYMBOL(clk_set_rate); diff --git a/arch/arm/mach-mmp/clock.h b/arch/arm/mach-mmp/clock.h deleted file mode 100644 index 0256c894fa115..0000000000000 --- a/arch/arm/mach-mmp/clock.h +++ /dev/null @@ -1,65 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ - -#include - -struct clkops { - void (*enable)(struct clk *); - void (*disable)(struct clk *); - unsigned long (*getrate)(struct clk *); - int (*setrate)(struct clk *, unsigned long); -}; - -struct clk { - const struct clkops *ops; - - void __iomem *clk_rst; /* clock reset control register */ - int fnclksel; /* functional clock select (APBC) */ - uint32_t enable_val; /* value for clock enable (APMU) */ - unsigned long rate; - int enabled; -}; - -extern struct clkops apbc_clk_ops; -extern struct clkops apmu_clk_ops; - -#define APBC_CLK(_name, _reg, _fnclksel, _rate) \ -struct clk clk_##_name = { \ - .clk_rst = APBC_##_reg, \ - .fnclksel = _fnclksel, \ - .rate = _rate, \ - .ops = &apbc_clk_ops, \ -} - -#define APBC_CLK_OPS(_name, _reg, _fnclksel, _rate, _ops) \ -struct clk clk_##_name = { \ - .clk_rst = APBC_##_reg, \ - .fnclksel = _fnclksel, \ - .rate = _rate, \ - .ops = _ops, \ -} - -#define APMU_CLK(_name, _reg, _eval, _rate) \ -struct clk clk_##_name = { \ - .clk_rst = APMU_##_reg, \ - .enable_val = _eval, \ - .rate = _rate, \ - .ops = &apmu_clk_ops, \ -} - -#define APMU_CLK_OPS(_name, _reg, _eval, _rate, _ops) \ -struct clk clk_##_name = { \ - .clk_rst = APMU_##_reg, \ - .enable_val = _eval, \ - .rate = _rate, \ - .ops = _ops, \ -} - -#define INIT_CLKREG(_clk, _devname, _conname) \ - { \ - .clk = _clk, \ - .dev_id = _devname, \ - .con_id = _conname, \ - } - -extern struct clk clk_pxa168_gpio; -extern struct clk clk_pxa168_timers; diff --git a/arch/arm/mach-mmp/pxa168.c b/arch/arm/mach-mmp/pxa168.c index b642e900727a5..1e9389245d0e7 100644 --- a/arch/arm/mach-mmp/pxa168.c +++ b/arch/arm/mach-mmp/pxa168.c @@ -19,7 +19,6 @@ #include #include "addr-map.h" -#include "clock.h" #include "common.h" #include #include "devices.h" diff --git a/arch/arm/mach-mmp/time.c b/arch/arm/mach-mmp/time.c index 049a65f47b428..41b2e8abc9e69 100644 --- a/arch/arm/mach-mmp/time.c +++ b/arch/arm/mach-mmp/time.c @@ -34,7 +34,6 @@ #include "regs-apbc.h" #include "irqs.h" #include -#include "clock.h" #define TIMERS_VIRT_BASE TIMERS1_VIRT_BASE -- GitLab From c7725c9b74b4058a5ec16c88c29ad88947307f5c Mon Sep 17 00:00:00 2001 From: Stephen Boyd Date: Wed, 8 Apr 2020 23:44:15 -0700 Subject: [PATCH 0460/1971] MIPS: Loongson64: Drop asm/clock.h include This include isn't used by this file, so just remove it. Acked-by: Jiaxun Yang Cc: Paul Burton Cc: Cc: Signed-off-by: Stephen Boyd Link: https://lkml.kernel.org/r/20200409064416.83340-10-sboyd@kernel.org Reviewed-by: Arnd Bergmann --- arch/mips/loongson64/smp.c | 1 - 1 file changed, 1 deletion(-) diff --git a/arch/mips/loongson64/smp.c b/arch/mips/loongson64/smp.c index e1fe8bbb377d5..e744e1bee49e8 100644 --- a/arch/mips/loongson64/smp.c +++ b/arch/mips/loongson64/smp.c @@ -15,7 +15,6 @@ #include #include #include -#include #include #include #include -- GitLab From bc8c945e0ad2ff6d725af9b10085c6ee5d35a563 Mon Sep 17 00:00:00 2001 From: Stephen Boyd Date: Wed, 8 Apr 2020 23:44:16 -0700 Subject: [PATCH 0461/1971] clk: Move HAVE_CLK config out of architecture layer The implementation of 'struct clk' is not really an architectual detail anymore now that most architectures have migrated to the common clk framework. To sway new architecture ports away from trying to implement their own 'struct clk', move the config next to the common clk framework config. Cc: Russell King Cc: Arnd Bergmann Signed-off-by: Stephen Boyd Link: https://lkml.kernel.org/r/20200409064416.83340-11-sboyd@kernel.org Reviewed-by: Arnd Bergmann --- arch/Kconfig | 6 ------ drivers/clk/Kconfig | 6 ++++++ 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/arch/Kconfig b/arch/Kconfig index 786a85d4ad40d..84fe54085f63f 100644 --- a/arch/Kconfig +++ b/arch/Kconfig @@ -328,12 +328,6 @@ config HAVE_FUNCTION_ARG_ACCESS_API the API needed to access function arguments from pt_regs, declared in asm/ptrace.h -config HAVE_CLK - bool - help - The calls support software clock gating and - thus are a key power management tool on many systems. - config HAVE_HW_BREAKPOINT bool depends on PERF_EVENTS diff --git a/drivers/clk/Kconfig b/drivers/clk/Kconfig index 890bed62196db..6ea0631e39567 100644 --- a/drivers/clk/Kconfig +++ b/drivers/clk/Kconfig @@ -1,5 +1,11 @@ # SPDX-License-Identifier: GPL-2.0 +config HAVE_CLK + bool + help + The calls support software clock gating and + thus are a key power management tool on many systems. + config CLKDEV_LOOKUP bool select HAVE_CLK -- GitLab From 45a41875fa20836f2839d4dda1f37d119a275542 Mon Sep 17 00:00:00 2001 From: Saravana Kannan Date: Tue, 12 May 2020 15:53:18 +0300 Subject: [PATCH 0462/1971] dt-bindings: opp: Introduce opp-peak-kBps and opp-avg-kBps bindings Interconnects often quantify their performance points in terms of bandwidth. So, add opp-peak-kBps (required) and opp-avg-kBps (optional) to allow specifying Bandwidth OPP tables in DT. opp-peak-kBps is a required property that replaces opp-hz for Bandwidth OPP tables. opp-avg-kBps is an optional property that can be used in Bandwidth OPP tables. Signed-off-by: Saravana Kannan Signed-off-by: Georgi Djakov Reviewed-by: Sibi Sankar Reviewed-by: Rob Herring Signed-off-by: Viresh Kumar --- Documentation/devicetree/bindings/opp/opp.txt | 17 ++++++++++++++--- .../devicetree/bindings/property-units.txt | 4 ++++ 2 files changed, 18 insertions(+), 3 deletions(-) diff --git a/Documentation/devicetree/bindings/opp/opp.txt b/Documentation/devicetree/bindings/opp/opp.txt index 68592271461f6..9d16d417e9be4 100644 --- a/Documentation/devicetree/bindings/opp/opp.txt +++ b/Documentation/devicetree/bindings/opp/opp.txt @@ -83,9 +83,14 @@ properties. Required properties: - opp-hz: Frequency in Hz, expressed as a 64-bit big-endian integer. This is a - required property for all device nodes but devices like power domains. The - power domain nodes must have another (implementation dependent) property which - uniquely identifies the OPP nodes. + required property for all device nodes, unless another "required" property to + uniquely identify the OPP nodes exists. Devices like power domains must have + another (implementation dependent) property. + +- opp-peak-kBps: Peak bandwidth in kilobytes per second, expressed as an array + of 32-bit big-endian integers. Each element of the array represents the + peak bandwidth value of each interconnect path. The number of elements should + match the number of interconnect paths. Optional properties: - opp-microvolt: voltage in micro Volts. @@ -132,6 +137,12 @@ Optional properties: - opp-level: A value representing the performance level of the device, expressed as a 32-bit integer. +- opp-avg-kBps: Average bandwidth in kilobytes per second, expressed as an array + of 32-bit big-endian integers. Each element of the array represents the + average bandwidth value of each interconnect path. The number of elements + should match the number of interconnect paths. This property is only + meaningful in OPP tables where opp-peak-kBps is present. + - clock-latency-ns: Specifies the maximum possible transition latency (in nanoseconds) for switching to this OPP from any other OPP. diff --git a/Documentation/devicetree/bindings/property-units.txt b/Documentation/devicetree/bindings/property-units.txt index e9b8360b32880..c80a110c1e266 100644 --- a/Documentation/devicetree/bindings/property-units.txt +++ b/Documentation/devicetree/bindings/property-units.txt @@ -41,3 +41,7 @@ Temperature Pressure ---------------------------------------- -kpascal : kilopascal + +Throughput +---------------------------------------- +-kBps : kilobytes per second -- GitLab From 6c591eec67cbb4db988ab35b944f5cf9013c0714 Mon Sep 17 00:00:00 2001 From: Saravana Kannan Date: Tue, 12 May 2020 15:53:19 +0300 Subject: [PATCH 0463/1971] OPP: Add helpers for reading the binding properties The opp-hz DT property is not mandatory and we may use another property as a key in the OPP table. Add helper functions to simplify the reading and comparing the keys. Signed-off-by: Saravana Kannan Signed-off-by: Georgi Djakov Reviewed-by: Matthias Kaehlcke Reviewed-by: Sibi Sankar [ Viresh: Removed an unnecessary comment ] Signed-off-by: Viresh Kumar --- drivers/opp/core.c | 15 +++++++++++++-- drivers/opp/of.c | 44 ++++++++++++++++++++++++++------------------ drivers/opp/opp.h | 1 + 3 files changed, 40 insertions(+), 20 deletions(-) diff --git a/drivers/opp/core.c b/drivers/opp/core.c index e4f01e7771a22..ce7e4103ec092 100644 --- a/drivers/opp/core.c +++ b/drivers/opp/core.c @@ -1286,11 +1286,21 @@ static bool _opp_supported_by_regulators(struct dev_pm_opp *opp, return true; } +int _opp_compare_key(struct dev_pm_opp *opp1, struct dev_pm_opp *opp2) +{ + if (opp1->rate != opp2->rate) + return opp1->rate < opp2->rate ? -1 : 1; + if (opp1->level != opp2->level) + return opp1->level < opp2->level ? -1 : 1; + return 0; +} + static int _opp_is_duplicate(struct device *dev, struct dev_pm_opp *new_opp, struct opp_table *opp_table, struct list_head **head) { struct dev_pm_opp *opp; + int opp_cmp; /* * Insert new OPP in order of increasing frequency and discard if @@ -1301,12 +1311,13 @@ static int _opp_is_duplicate(struct device *dev, struct dev_pm_opp *new_opp, * loop. */ list_for_each_entry(opp, &opp_table->opp_list, node) { - if (new_opp->rate > opp->rate) { + opp_cmp = _opp_compare_key(new_opp, opp); + if (opp_cmp > 0) { *head = &opp->node; continue; } - if (new_opp->rate < opp->rate) + if (opp_cmp < 0) return 0; /* Duplicate OPPs */ diff --git a/drivers/opp/of.c b/drivers/opp/of.c index 9cd8f0adacae4..303d2207e0ff5 100644 --- a/drivers/opp/of.c +++ b/drivers/opp/of.c @@ -521,6 +521,28 @@ void dev_pm_opp_of_remove_table(struct device *dev) } EXPORT_SYMBOL_GPL(dev_pm_opp_of_remove_table); +static int _read_opp_key(struct dev_pm_opp *new_opp, struct device_node *np, + bool *rate_not_available) +{ + u64 rate; + int ret; + + ret = of_property_read_u64(np, "opp-hz", &rate); + if (!ret) { + /* + * Rate is defined as an unsigned long in clk API, and so + * casting explicitly to its type. Must be fixed once rate is 64 + * bit guaranteed in clk API. + */ + new_opp->rate = (unsigned long)rate; + } + *rate_not_available = !!ret; + + of_property_read_u32(np, "opp-level", &new_opp->level); + + return ret; +} + /** * _opp_add_static_v2() - Allocate static OPPs (As per 'v2' DT bindings) * @opp_table: OPP table @@ -558,26 +580,12 @@ static struct dev_pm_opp *_opp_add_static_v2(struct opp_table *opp_table, if (!new_opp) return ERR_PTR(-ENOMEM); - ret = of_property_read_u64(np, "opp-hz", &rate); - if (ret < 0) { - /* "opp-hz" is optional for devices like power domains. */ - if (!opp_table->is_genpd) { - dev_err(dev, "%s: opp-hz not found\n", __func__); - goto free_opp; - } - - rate_not_available = true; - } else { - /* - * Rate is defined as an unsigned long in clk API, and so - * casting explicitly to its type. Must be fixed once rate is 64 - * bit guaranteed in clk API. - */ - new_opp->rate = (unsigned long)rate; + ret = _read_opp_key(new_opp, np, &rate_not_available); + if (ret < 0 && !opp_table->is_genpd) { + dev_err(dev, "%s: opp key field not found\n", __func__); + goto free_opp; } - of_property_read_u32(np, "opp-level", &new_opp->level); - /* Check if the OPP supports hardware's hierarchy of versions or not */ if (!_opp_is_supported(dev, opp_table, np)) { dev_dbg(dev, "OPP not supported by hardware: %llu\n", rate); diff --git a/drivers/opp/opp.h b/drivers/opp/opp.h index d14e27102730c..bcadb1e328a4c 100644 --- a/drivers/opp/opp.h +++ b/drivers/opp/opp.h @@ -211,6 +211,7 @@ struct opp_device *_add_opp_dev(const struct device *dev, struct opp_table *opp_ void _dev_pm_opp_find_and_remove_table(struct device *dev); struct dev_pm_opp *_opp_allocate(struct opp_table *opp_table); void _opp_free(struct dev_pm_opp *opp); +int _opp_compare_key(struct dev_pm_opp *opp1, struct dev_pm_opp *opp2); int _opp_add(struct device *dev, struct dev_pm_opp *new_opp, struct opp_table *opp_table, bool rate_not_available); int _opp_add_v1(struct opp_table *opp_table, struct device *dev, unsigned long freq, long u_volt, bool dynamic); void _dev_pm_opp_cpumask_remove_table(const struct cpumask *cpumask, int last_cpu); -- GitLab From b0def88d807f6db6076aab2aeb11f542036e81a6 Mon Sep 17 00:00:00 2001 From: Amir Goldstein Date: Thu, 9 Apr 2020 18:58:34 +0300 Subject: [PATCH 0464/1971] ovl: resolve more conflicting mount options Similar to the way that a conflict between metacopy=on,redirect_dir=off is resolved, also resolve conflicts between nfs_export=on,index=off and nfs_export=on,metacopy=on. An explicit mount option wins over a default config value. Both explicit mount options result in an error. Without this change the xfstests group overlay/exportfs are skipped if metacopy is enabled by default. Reported-by: Chengguang Xu Signed-off-by: Amir Goldstein Signed-off-by: Miklos Szeredi --- Documentation/filesystems/overlayfs.rst | 7 ++-- fs/overlayfs/super.c | 48 +++++++++++++++++++++++++ 2 files changed, 53 insertions(+), 2 deletions(-) diff --git a/Documentation/filesystems/overlayfs.rst b/Documentation/filesystems/overlayfs.rst index c9d2bf96b02d6..660dbaf0b9b8c 100644 --- a/Documentation/filesystems/overlayfs.rst +++ b/Documentation/filesystems/overlayfs.rst @@ -365,8 +365,8 @@ pointed by REDIRECT. This should not be possible on local system as setting "trusted." xattrs will require CAP_SYS_ADMIN. But it should be possible for untrusted layers like from a pen drive. -Note: redirect_dir={off|nofollow|follow[*]} conflicts with metacopy=on, and -results in an error. +Note: redirect_dir={off|nofollow|follow[*]} and nfs_export=on mount options +conflict with metacopy=on, and will result in an error. [*] redirect_dir=follow only conflicts with metacopy=on if upperdir=... is given. @@ -560,6 +560,9 @@ When the NFS export feature is enabled, all directory index entries are verified on mount time to check that upper file handles are not stale. This verification may cause significant overhead in some cases. +Note: the mount options index=off,nfs_export=on are conflicting and will +result in an error. + Testsuite --------- diff --git a/fs/overlayfs/super.c b/fs/overlayfs/super.c index 732ad5495c921..fbd6207acdbfa 100644 --- a/fs/overlayfs/super.c +++ b/fs/overlayfs/super.c @@ -470,6 +470,7 @@ static int ovl_parse_opt(char *opt, struct ovl_config *config) char *p; int err; bool metacopy_opt = false, redirect_opt = false; + bool nfs_export_opt = false, index_opt = false; config->redirect_mode = kstrdup(ovl_redirect_mode_def(), GFP_KERNEL); if (!config->redirect_mode) @@ -519,18 +520,22 @@ static int ovl_parse_opt(char *opt, struct ovl_config *config) case OPT_INDEX_ON: config->index = true; + index_opt = true; break; case OPT_INDEX_OFF: config->index = false; + index_opt = true; break; case OPT_NFS_EXPORT_ON: config->nfs_export = true; + nfs_export_opt = true; break; case OPT_NFS_EXPORT_OFF: config->nfs_export = false; + nfs_export_opt = true; break; case OPT_XINO_ON: @@ -552,6 +557,7 @@ static int ovl_parse_opt(char *opt, struct ovl_config *config) case OPT_METACOPY_OFF: config->metacopy = false; + metacopy_opt = true; break; default: @@ -601,6 +607,48 @@ static int ovl_parse_opt(char *opt, struct ovl_config *config) } } + /* Resolve nfs_export -> index dependency */ + if (config->nfs_export && !config->index) { + if (nfs_export_opt && index_opt) { + pr_err("conflicting options: nfs_export=on,index=off\n"); + return -EINVAL; + } + if (index_opt) { + /* + * There was an explicit index=off that resulted + * in this conflict. + */ + pr_info("disabling nfs_export due to index=off\n"); + config->nfs_export = false; + } else { + /* Automatically enable index otherwise. */ + config->index = true; + } + } + + /* Resolve nfs_export -> !metacopy dependency */ + if (config->nfs_export && config->metacopy) { + if (nfs_export_opt && metacopy_opt) { + pr_err("conflicting options: nfs_export=on,metacopy=on\n"); + return -EINVAL; + } + if (metacopy_opt) { + /* + * There was an explicit metacopy=on that resulted + * in this conflict. + */ + pr_info("disabling nfs_export due to metacopy=on\n"); + config->nfs_export = false; + } else { + /* + * There was an explicit nfs_export=on that resulted + * in this conflict. + */ + pr_info("disabling metacopy due to nfs_export=on\n"); + config->metacopy = false; + } + } + return 0; } -- GitLab From 3011645b5b061e99cf0f024b3260ec506f91b27c Mon Sep 17 00:00:00 2001 From: Amir Goldstein Date: Fri, 3 Apr 2020 07:58:49 +0300 Subject: [PATCH 0465/1971] ovl: cleanup non-empty directories in ovl_indexdir_cleanup() Teach ovl_indexdir_cleanup() to remove temp directories containing whiteouts to prepare for using index dir instead of work dir for removing merge directories. Signed-off-by: Amir Goldstein Signed-off-by: Miklos Szeredi --- fs/overlayfs/namei.c | 11 ----------- fs/overlayfs/overlayfs.h | 4 ++-- fs/overlayfs/readdir.c | 16 ++++++++++++---- 3 files changed, 14 insertions(+), 17 deletions(-) diff --git a/fs/overlayfs/namei.c b/fs/overlayfs/namei.c index 0db23baf98e7c..723d177447580 100644 --- a/fs/overlayfs/namei.c +++ b/fs/overlayfs/namei.c @@ -484,12 +484,6 @@ struct dentry *ovl_index_upper(struct ovl_fs *ofs, struct dentry *index) return upper; } -/* Is this a leftover from create/whiteout of directory index entry? */ -static bool ovl_is_temp_index(struct dentry *index) -{ - return index->d_name.name[0] == '#'; -} - /* * Verify that an index entry name matches the origin file handle stored in * OVL_XATTR_ORIGIN and that origin file handle can be decoded to lower path. @@ -507,11 +501,6 @@ int ovl_verify_index(struct ovl_fs *ofs, struct dentry *index) if (!d_inode(index)) return 0; - /* Cleanup leftover from index create/cleanup attempt */ - err = -ESTALE; - if (ovl_is_temp_index(index)) - goto fail; - err = -EINVAL; if (index->d_name.len < sizeof(struct ovl_fb)*2) goto fail; diff --git a/fs/overlayfs/overlayfs.h b/fs/overlayfs/overlayfs.h index e6f3670146ed1..e00b1ff6dea94 100644 --- a/fs/overlayfs/overlayfs.h +++ b/fs/overlayfs/overlayfs.h @@ -394,8 +394,8 @@ void ovl_cleanup_whiteouts(struct dentry *upper, struct list_head *list); void ovl_cache_free(struct list_head *list); void ovl_dir_cache_free(struct inode *inode); int ovl_check_d_type_supported(struct path *realpath); -void ovl_workdir_cleanup(struct inode *dir, struct vfsmount *mnt, - struct dentry *dentry, int level); +int ovl_workdir_cleanup(struct inode *dir, struct vfsmount *mnt, + struct dentry *dentry, int level); int ovl_indexdir_cleanup(struct ovl_fs *ofs); /* inode.c */ diff --git a/fs/overlayfs/readdir.c b/fs/overlayfs/readdir.c index e452ff7d583d2..20f5310d3ee42 100644 --- a/fs/overlayfs/readdir.c +++ b/fs/overlayfs/readdir.c @@ -1071,14 +1071,13 @@ out: ovl_cache_free(&list); } -void ovl_workdir_cleanup(struct inode *dir, struct vfsmount *mnt, +int ovl_workdir_cleanup(struct inode *dir, struct vfsmount *mnt, struct dentry *dentry, int level) { int err; if (!d_is_dir(dentry) || level > 1) { - ovl_cleanup(dir, dentry); - return; + return ovl_cleanup(dir, dentry); } err = ovl_do_rmdir(dir, dentry); @@ -1088,8 +1087,10 @@ void ovl_workdir_cleanup(struct inode *dir, struct vfsmount *mnt, inode_unlock(dir); ovl_workdir_cleanup_recurse(&path, level + 1); inode_lock_nested(dir, I_MUTEX_PARENT); - ovl_cleanup(dir, dentry); + err = ovl_cleanup(dir, dentry); } + + return err; } int ovl_indexdir_cleanup(struct ovl_fs *ofs) @@ -1128,6 +1129,13 @@ int ovl_indexdir_cleanup(struct ovl_fs *ofs) index = NULL; break; } + /* Cleanup leftover from index create/cleanup attempt */ + if (index->d_name.name[0] == '#') { + err = ovl_workdir_cleanup(dir, path.mnt, index, 1); + if (err) + break; + goto next; + } err = ovl_verify_index(ofs, index); if (!err) { goto next; -- GitLab From 773cb4c56b1bedeb5644f5bd06b76e348bb21634 Mon Sep 17 00:00:00 2001 From: Amir Goldstein Date: Fri, 3 Apr 2020 08:43:12 +0300 Subject: [PATCH 0466/1971] ovl: prepare to copy up without workdir With index=on, we copy up lower hardlinks to work dir and move them into index dir. Fix locking to allow work dir and index dir to be the same directory. Signed-off-by: Amir Goldstein Signed-off-by: Miklos Szeredi --- fs/overlayfs/copy_up.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/fs/overlayfs/copy_up.c b/fs/overlayfs/copy_up.c index 9709cf22cab3c..66004534bd40c 100644 --- a/fs/overlayfs/copy_up.c +++ b/fs/overlayfs/copy_up.c @@ -584,9 +584,10 @@ static int ovl_copy_up_workdir(struct ovl_copy_up_ctx *c) .link = c->link }; - err = ovl_lock_rename_workdir(c->workdir, c->destdir); - if (err) - return err; + /* workdir and destdir could be the same when copying up to indexdir */ + err = -EIO; + if (lock_rename(c->workdir, c->destdir) != NULL) + goto unlock; err = ovl_prep_cu_creds(c->dentry, &cc); if (err) -- GitLab From 62a8a85be8355b01330667c2b4676fb60c184380 Mon Sep 17 00:00:00 2001 From: Amir Goldstein Date: Fri, 3 Apr 2020 10:03:38 +0300 Subject: [PATCH 0467/1971] ovl: index dir act as work dir With index=on, let index dir act as the work dir for copy up and cleanups. This will help implementing whiteout inode sharing. We still create the "work" dir on mount regardless of index=on and it is used to test the features supported by upper fs. One reason is that before the feature tests, we do not know if index could be enabled or not. The reason we do not use "index" directory also as workdir with index=off is because the existence of the "index" directory acts as a simple persistent signal that index was enabled on this filesystem and tools may want to use that signal. Signed-off-by: Amir Goldstein Signed-off-by: Miklos Szeredi --- fs/overlayfs/super.c | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/fs/overlayfs/super.c b/fs/overlayfs/super.c index fbd6207acdbfa..6e3fa96613f50 100644 --- a/fs/overlayfs/super.c +++ b/fs/overlayfs/super.c @@ -1825,17 +1825,21 @@ static int ovl_fill_super(struct super_block *sb, void *data, int silent) sb->s_flags |= SB_RDONLY; if (!(ovl_force_readonly(ofs)) && ofs->config.index) { + /* index dir will act also as workdir */ + dput(ofs->workdir); + ofs->workdir = NULL; + iput(ofs->workdir_trap); + ofs->workdir_trap = NULL; + err = ovl_get_indexdir(sb, ofs, oe, &upperpath); if (err) goto out_free_oe; /* Force r/o mount with no index dir */ - if (!ofs->indexdir) { - dput(ofs->workdir); - ofs->workdir = NULL; + if (ofs->indexdir) + ofs->workdir = dget(ofs->indexdir); + else sb->s_flags |= SB_RDONLY; - } - } err = ovl_check_overlapping_layers(sb, ofs); -- GitLab From 32b1924b210a70dcacdf65abd687c5ef86a67541 Mon Sep 17 00:00:00 2001 From: Konstantin Khlebnikov Date: Thu, 9 Apr 2020 11:29:47 +0300 Subject: [PATCH 0468/1971] ovl: skip overlayfs superblocks at global sync Stacked filesystems like overlayfs has no own writeback, but they have to forward syncfs() requests to backend for keeping data integrity. During global sync() each overlayfs instance calls method ->sync_fs() for backend although it itself is in global list of superblocks too. As a result one syscall sync() could write one superblock several times and send multiple disk barriers. This patch adds flag SB_I_SKIP_SYNC into sb->sb_iflags to avoid that. Reported-by: Dmitry Monakhov Signed-off-by: Konstantin Khlebnikov Reviewed-by: Amir Goldstein Signed-off-by: Miklos Szeredi --- fs/overlayfs/super.c | 5 +++-- fs/sync.c | 3 ++- include/linux/fs.h | 2 ++ 3 files changed, 7 insertions(+), 3 deletions(-) diff --git a/fs/overlayfs/super.c b/fs/overlayfs/super.c index 6e3fa96613f50..f57aa348dcd63 100644 --- a/fs/overlayfs/super.c +++ b/fs/overlayfs/super.c @@ -261,8 +261,8 @@ static int ovl_sync_fs(struct super_block *sb, int wait) return 0; /* - * If this is a sync(2) call or an emergency sync, all the super blocks - * will be iterated, including upper_sb, so no need to do anything. + * Not called for sync(2) call or an emergency sync (SB_I_SKIP_SYNC). + * All the super blocks will be iterated, including upper_sb. * * If this is a syncfs(2) call, then we do need to call * sync_filesystem() on upper_sb, but enough if we do it when being @@ -1870,6 +1870,7 @@ static int ovl_fill_super(struct super_block *sb, void *data, int silent) sb->s_xattr = ovl_xattr_handlers; sb->s_fs_info = ofs; sb->s_flags |= SB_POSIXACL; + sb->s_iflags |= SB_I_SKIP_SYNC; err = -ENOMEM; root_dentry = ovl_get_root(sb, upperpath.dentry, oe); diff --git a/fs/sync.c b/fs/sync.c index 4d1ff010bc5af..16c2630ee4bf1 100644 --- a/fs/sync.c +++ b/fs/sync.c @@ -76,7 +76,8 @@ static void sync_inodes_one_sb(struct super_block *sb, void *arg) static void sync_fs_one_sb(struct super_block *sb, void *arg) { - if (!sb_rdonly(sb) && sb->s_op->sync_fs) + if (!sb_rdonly(sb) && !(sb->s_iflags & SB_I_SKIP_SYNC) && + sb->s_op->sync_fs) sb->s_op->sync_fs(sb, *(int *)arg); } diff --git a/include/linux/fs.h b/include/linux/fs.h index 4f6f59b4f22a8..f186a966a36cb 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -1409,6 +1409,8 @@ extern int send_sigurg(struct fown_struct *fown); #define SB_I_IMA_UNVERIFIABLE_SIGNATURE 0x00000020 #define SB_I_UNTRUSTED_MOUNTER 0x00000040 +#define SB_I_SKIP_SYNC 0x00000100 /* Skip superblock at global sync */ + /* Possible states of 'frozen' field */ enum { SB_UNFROZEN = 0, /* FS is unfrozen */ -- GitLab From 654255fa205cb2b010e9abb34b0c8afcca9c78c7 Mon Sep 17 00:00:00 2001 From: Jeffle Xu Date: Thu, 23 Apr 2020 19:06:55 +0800 Subject: [PATCH 0469/1971] ovl: inherit SB_NOSEC flag from upperdir Since the stacking of regular file operations [1], the overlayfs edition of write_iter() is called when writing regular files. Since then, xattr lookup is needed on every write since file_remove_privs() is called from ovl_write_iter(), which would become the performance bottleneck when writing small chunks of data. In my test case, file_remove_privs() would consume ~15% CPU when running fstime of unixbench (the workload is repeadly writing 1 KB to the same file) [2]. Inherit the SB_NOSEC flag from upperdir. Since then xattr lookup would be done only once on the first write. Unixbench fstime gets a ~20% performance gain with this patch. [1] https://lore.kernel.org/lkml/20180606150905.GC9426@magnolia/T/ [2] https://www.spinics.net/lists/linux-unionfs/msg07153.html Signed-off-by: Jeffle Xu Signed-off-by: Miklos Szeredi --- fs/overlayfs/super.c | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/fs/overlayfs/super.c b/fs/overlayfs/super.c index f57aa348dcd63..af69f41f564db 100644 --- a/fs/overlayfs/super.c +++ b/fs/overlayfs/super.c @@ -1100,6 +1100,18 @@ static int ovl_get_upper(struct super_block *sb, struct ovl_fs *ofs, upper_mnt->mnt_flags &= ~(MNT_NOATIME | MNT_NODIRATIME | MNT_RELATIME); ofs->upper_mnt = upper_mnt; + /* + * Inherit SB_NOSEC flag from upperdir. + * + * This optimization changes behavior when a security related attribute + * (suid/sgid/security.*) is changed on an underlying layer. This is + * okay because we don't yet have guarantees in that case, but it will + * need careful treatment once we want to honour changes to underlying + * filesystems. + */ + if (upper_mnt->mnt_sb->s_flags & SB_NOSEC) + sb->s_flags |= SB_NOSEC; + if (ovl_inuse_trylock(ofs->upper_mnt->mnt_root)) { ofs->upperdir_locked = true; } else { -- GitLab From c21c839b8448dd4b1e37ffc1bde928f57d34c0db Mon Sep 17 00:00:00 2001 From: Chengguang Xu Date: Fri, 24 Apr 2020 10:55:17 +0800 Subject: [PATCH 0470/1971] ovl: whiteout inode sharing Share inode with different whiteout files for saving inode and speeding up delete operation. If EMLINK is encountered when linking a shared whiteout, create a new one. In case of any other error, disable sharing for this super block. Note: ofs->whiteout is protected by inode lock on workdir. Signed-off-by: Chengguang Xu Reviewed-by: Amir Goldstein Signed-off-by: Miklos Szeredi --- fs/overlayfs/dir.c | 49 ++++++++++++++++++++++++++++++---------- fs/overlayfs/overlayfs.h | 2 +- fs/overlayfs/ovl_entry.h | 3 +++ fs/overlayfs/readdir.c | 2 +- fs/overlayfs/super.c | 4 ++++ fs/overlayfs/util.c | 3 ++- 6 files changed, 48 insertions(+), 15 deletions(-) diff --git a/fs/overlayfs/dir.c b/fs/overlayfs/dir.c index 279009dee3669..09faa63cf24dd 100644 --- a/fs/overlayfs/dir.c +++ b/fs/overlayfs/dir.c @@ -62,35 +62,59 @@ struct dentry *ovl_lookup_temp(struct dentry *workdir) } /* caller holds i_mutex on workdir */ -static struct dentry *ovl_whiteout(struct dentry *workdir) +static struct dentry *ovl_whiteout(struct ovl_fs *ofs) { int err; struct dentry *whiteout; + struct dentry *workdir = ofs->workdir; struct inode *wdir = workdir->d_inode; - whiteout = ovl_lookup_temp(workdir); - if (IS_ERR(whiteout)) - return whiteout; + if (!ofs->whiteout) { + whiteout = ovl_lookup_temp(workdir); + if (IS_ERR(whiteout)) + goto out; - err = ovl_do_whiteout(wdir, whiteout); - if (err) { - dput(whiteout); - whiteout = ERR_PTR(err); + err = ovl_do_whiteout(wdir, whiteout); + if (err) { + dput(whiteout); + whiteout = ERR_PTR(err); + goto out; + } + ofs->whiteout = whiteout; } + if (ofs->share_whiteout) { + whiteout = ovl_lookup_temp(workdir); + if (IS_ERR(whiteout)) + goto out; + + err = ovl_do_link(ofs->whiteout, wdir, whiteout); + if (!err) + goto out; + + if (err != -EMLINK) { + pr_warn("Failed to link whiteout - disabling whiteout inode sharing(nlink=%u, err=%i)\n", + ofs->whiteout->d_inode->i_nlink, err); + ofs->share_whiteout = false; + } + dput(whiteout); + } + whiteout = ofs->whiteout; + ofs->whiteout = NULL; +out: return whiteout; } /* Caller must hold i_mutex on both workdir and dir */ -int ovl_cleanup_and_whiteout(struct dentry *workdir, struct inode *dir, +int ovl_cleanup_and_whiteout(struct ovl_fs *ofs, struct inode *dir, struct dentry *dentry) { - struct inode *wdir = workdir->d_inode; + struct inode *wdir = ofs->workdir->d_inode; struct dentry *whiteout; int err; int flags = 0; - whiteout = ovl_whiteout(workdir); + whiteout = ovl_whiteout(ofs); err = PTR_ERR(whiteout); if (IS_ERR(whiteout)) return err; @@ -715,6 +739,7 @@ static bool ovl_matches_upper(struct dentry *dentry, struct dentry *upper) static int ovl_remove_and_whiteout(struct dentry *dentry, struct list_head *list) { + struct ovl_fs *ofs = OVL_FS(dentry->d_sb); struct dentry *workdir = ovl_workdir(dentry); struct dentry *upperdir = ovl_dentry_upper(dentry->d_parent); struct dentry *upper; @@ -748,7 +773,7 @@ static int ovl_remove_and_whiteout(struct dentry *dentry, goto out_dput_upper; } - err = ovl_cleanup_and_whiteout(workdir, d_inode(upperdir), upper); + err = ovl_cleanup_and_whiteout(ofs, d_inode(upperdir), upper); if (err) goto out_d_drop; diff --git a/fs/overlayfs/overlayfs.h b/fs/overlayfs/overlayfs.h index e00b1ff6dea94..76747f5b05175 100644 --- a/fs/overlayfs/overlayfs.h +++ b/fs/overlayfs/overlayfs.h @@ -455,7 +455,7 @@ static inline void ovl_copyflags(struct inode *from, struct inode *to) /* dir.c */ extern const struct inode_operations ovl_dir_inode_operations; -int ovl_cleanup_and_whiteout(struct dentry *workdir, struct inode *dir, +int ovl_cleanup_and_whiteout(struct ovl_fs *ofs, struct inode *dir, struct dentry *dentry); struct ovl_cattr { dev_t rdev; diff --git a/fs/overlayfs/ovl_entry.h b/fs/overlayfs/ovl_entry.h index 5762d802fe016..a8f82fb7ffb49 100644 --- a/fs/overlayfs/ovl_entry.h +++ b/fs/overlayfs/ovl_entry.h @@ -68,6 +68,7 @@ struct ovl_fs { /* Did we take the inuse lock? */ bool upperdir_locked; bool workdir_locked; + bool share_whiteout; /* Traps in ovl inode cache */ struct inode *upperdir_trap; struct inode *workbasedir_trap; @@ -77,6 +78,8 @@ struct ovl_fs { int xino_mode; /* For allocation of non-persistent inode numbers */ atomic_long_t last_ino; + /* Whiteout dentry cache */ + struct dentry *whiteout; }; static inline struct ovl_fs *OVL_FS(struct super_block *sb) diff --git a/fs/overlayfs/readdir.c b/fs/overlayfs/readdir.c index 20f5310d3ee42..a10b21b562ad3 100644 --- a/fs/overlayfs/readdir.c +++ b/fs/overlayfs/readdir.c @@ -1154,7 +1154,7 @@ int ovl_indexdir_cleanup(struct ovl_fs *ofs) * Whiteout orphan index to block future open by * handle after overlay nlink dropped to zero. */ - err = ovl_cleanup_and_whiteout(indexdir, dir, index); + err = ovl_cleanup_and_whiteout(ofs, dir, index); } else { /* Cleanup orphan index entries */ err = ovl_cleanup(dir, index); diff --git a/fs/overlayfs/super.c b/fs/overlayfs/super.c index af69f41f564db..a88a7badf4441 100644 --- a/fs/overlayfs/super.c +++ b/fs/overlayfs/super.c @@ -217,6 +217,7 @@ static void ovl_free_fs(struct ovl_fs *ofs) iput(ofs->indexdir_trap); iput(ofs->workdir_trap); iput(ofs->upperdir_trap); + dput(ofs->whiteout); dput(ofs->indexdir); dput(ofs->workdir); if (ofs->workdir_locked) @@ -1776,6 +1777,9 @@ static int ovl_fill_super(struct super_block *sb, void *data, int silent) if (!cred) goto out_err; + /* Is there a reason anyone would want not to share whiteouts? */ + ofs->share_whiteout = true; + ofs->config.index = ovl_index_def; ofs->config.nfs_export = ovl_nfs_export_def; ofs->config.xino = ovl_xino_def(); diff --git a/fs/overlayfs/util.c b/fs/overlayfs/util.c index 36b60788ee473..01755bc186c91 100644 --- a/fs/overlayfs/util.c +++ b/fs/overlayfs/util.c @@ -707,7 +707,8 @@ static void ovl_cleanup_index(struct dentry *dentry) index = NULL; } else if (ovl_index_all(dentry->d_sb)) { /* Whiteout orphan index to block future open by handle */ - err = ovl_cleanup_and_whiteout(indexdir, dir, index); + err = ovl_cleanup_and_whiteout(OVL_FS(dentry->d_sb), + dir, index); } else { /* Cleanup orphan index entries */ err = ovl_cleanup(dir, index); -- GitLab From 399c109d357a7e217cf7ef551e7e234439c68c15 Mon Sep 17 00:00:00 2001 From: Chengguang Xu Date: Wed, 22 Apr 2020 12:28:43 +0800 Subject: [PATCH 0471/1971] ovl: sync dirty data when remounting to ro mode sync_filesystem() does not sync dirty data for readonly filesystem during umount, so before changing to readonly filesystem we should sync dirty data for data integrity. Signed-off-by: Chengguang Xu Signed-off-by: Miklos Szeredi --- fs/overlayfs/super.c | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/fs/overlayfs/super.c b/fs/overlayfs/super.c index a88a7badf4441..60dfb27bc12bc 100644 --- a/fs/overlayfs/super.c +++ b/fs/overlayfs/super.c @@ -365,11 +365,20 @@ static int ovl_show_options(struct seq_file *m, struct dentry *dentry) static int ovl_remount(struct super_block *sb, int *flags, char *data) { struct ovl_fs *ofs = sb->s_fs_info; + struct super_block *upper_sb; + int ret = 0; if (!(*flags & SB_RDONLY) && ovl_force_readonly(ofs)) return -EROFS; - return 0; + if (*flags & SB_RDONLY && !sb_rdonly(sb)) { + upper_sb = ofs->upper_mnt->mnt_sb; + down_read(&upper_sb->s_umount); + ret = sync_filesystem(upper_sb); + up_read(&upper_sb->s_umount); + } + + return ret; } static const struct super_operations ovl_super_operations = { -- GitLab From 144da23beab87b27992e5e1b41bd954de0bf2581 Mon Sep 17 00:00:00 2001 From: Lubos Dolezel Date: Mon, 4 May 2020 21:35:09 +0200 Subject: [PATCH 0472/1971] ovl: return required buffer size for file handles Overlayfs doesn't work well with the fanotify mechanism. Fanotify first probes for the required buffer size for the file handle, but overlayfs currently bails out without passing the size back. That results in errors in the kernel log, such as: [527944.485384] overlayfs: failed to encode file handle (/, err=-75, buflen=0, len=29, type=1) [527944.485386] fanotify: failed to encode fid (fsid=ae521e68.a434d95f, type=255, bytes=0, err=-2) Signed-off-by: Lubos Dolezel Signed-off-by: Amir Goldstein Signed-off-by: Miklos Szeredi --- fs/overlayfs/export.c | 18 ++++++++---------- 1 file changed, 8 insertions(+), 10 deletions(-) diff --git a/fs/overlayfs/export.c b/fs/overlayfs/export.c index ed5c1078919cc..321e24bf5c60e 100644 --- a/fs/overlayfs/export.c +++ b/fs/overlayfs/export.c @@ -231,12 +231,9 @@ static int ovl_dentry_to_fid(struct dentry *dentry, u32 *fid, int buflen) if (IS_ERR(fh)) return PTR_ERR(fh); - err = -EOVERFLOW; len = OVL_FH_LEN(fh); - if (len > buflen) - goto fail; - - memcpy(fid, fh, len); + if (len <= buflen) + memcpy(fid, fh, len); err = len; out: @@ -244,9 +241,8 @@ out: return err; fail: - pr_warn_ratelimited("failed to encode file handle (%pd2, err=%i, buflen=%d, len=%d, type=%d)\n", - dentry, err, buflen, fh ? (int)fh->fb.len : 0, - fh ? fh->fb.type : 0); + pr_warn_ratelimited("failed to encode file handle (%pd2, err=%i)\n", + dentry, err); goto out; } @@ -254,7 +250,7 @@ static int ovl_encode_fh(struct inode *inode, u32 *fid, int *max_len, struct inode *parent) { struct dentry *dentry; - int bytes = *max_len << 2; + int bytes, buflen = *max_len << 2; /* TODO: encode connectable file handles */ if (parent) @@ -264,12 +260,14 @@ static int ovl_encode_fh(struct inode *inode, u32 *fid, int *max_len, if (WARN_ON(!dentry)) return FILEID_INVALID; - bytes = ovl_dentry_to_fid(dentry, fid, bytes); + bytes = ovl_dentry_to_fid(dentry, fid, buflen); dput(dentry); if (bytes <= 0) return FILEID_INVALID; *max_len = bytes >> 2; + if (bytes > buflen) + return FILEID_INVALID; return OVL_FILEID_V1; } -- GitLab From 188fe480cd65c53dca02a464d62be02ca3158290 Mon Sep 17 00:00:00 2001 From: Andy Shevchenko Date: Sat, 25 Apr 2020 16:44:44 +0300 Subject: [PATCH 0473/1971] i2c: designware: Use devm_platform_ioremap_resource() to simplify code Use devm_platform_ioremap_resource() instead of platform_get_resource() + devm_ioremap_resource(). Signed-off-by: Andy Shevchenko Signed-off-by: Wolfram Sang --- drivers/i2c/busses/i2c-designware-platdrv.c | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/drivers/i2c/busses/i2c-designware-platdrv.c b/drivers/i2c/busses/i2c-designware-platdrv.c index c98befe2a92e0..f5d57b8865ef7 100644 --- a/drivers/i2c/busses/i2c-designware-platdrv.c +++ b/drivers/i2c/busses/i2c-designware-platdrv.c @@ -157,12 +157,10 @@ static int mscc_twi_set_sda_hold_time(struct dw_i2c_dev *dev) static int dw_i2c_of_configure(struct platform_device *pdev) { struct dw_i2c_dev *dev = platform_get_drvdata(pdev); - struct resource *mem; switch (dev->flags & MODEL_MASK) { case MODEL_MSCC_OCELOT: - mem = platform_get_resource(pdev, IORESOURCE_MEM, 1); - dev->ext = devm_ioremap_resource(&pdev->dev, mem); + dev->ext = devm_platform_ioremap_resource(pdev, 1); if (!IS_ERR(dev->ext)) dev->set_sda_hold_time = mscc_twi_set_sda_hold_time; break; @@ -241,7 +239,6 @@ static int dw_i2c_plat_probe(struct platform_device *pdev) struct dw_i2c_dev *dev; struct i2c_timings *t; u32 acpi_speed; - struct resource *mem; int i, irq, ret; irq = platform_get_irq(pdev, 0); @@ -252,8 +249,7 @@ static int dw_i2c_plat_probe(struct platform_device *pdev) if (!dev) return -ENOMEM; - mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); - dev->base = devm_ioremap_resource(&pdev->dev, mem); + dev->base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(dev->base)) return PTR_ERR(dev->base); -- GitLab From 3ebe40ed1c39016eeae947acc9fd57d6b10d43b2 Mon Sep 17 00:00:00 2001 From: Andy Shevchenko Date: Sat, 25 Apr 2020 16:44:45 +0300 Subject: [PATCH 0474/1971] i2c: designware: Move configuration routines to respective modules Move configuration routines to respective modules, i.e. master and slave. Signed-off-by: Andy Shevchenko Signed-off-by: Wolfram Sang --- drivers/i2c/busses/i2c-designware-core.h | 12 +++++++ drivers/i2c/busses/i2c-designware-master.c | 24 +++++++++++++ drivers/i2c/busses/i2c-designware-platdrv.c | 38 +-------------------- drivers/i2c/busses/i2c-designware-slave.c | 11 ++++++ 4 files changed, 48 insertions(+), 37 deletions(-) diff --git a/drivers/i2c/busses/i2c-designware-core.h b/drivers/i2c/busses/i2c-designware-core.h index b220ad64c38d7..b2f894f685233 100644 --- a/drivers/i2c/busses/i2c-designware-core.h +++ b/drivers/i2c/busses/i2c-designware-core.h @@ -314,13 +314,25 @@ static inline void __i2c_dw_disable_nowait(struct dw_i2c_dev *dev) void __i2c_dw_disable(struct dw_i2c_dev *dev); +extern void i2c_dw_configure_master(struct dw_i2c_dev *dev); extern int i2c_dw_probe(struct dw_i2c_dev *dev); + #if IS_ENABLED(CONFIG_I2C_DESIGNWARE_SLAVE) +extern void i2c_dw_configure_slave(struct dw_i2c_dev *dev); extern int i2c_dw_probe_slave(struct dw_i2c_dev *dev); #else +static inline void i2c_dw_configure_slave(struct dw_i2c_dev *dev) { } static inline int i2c_dw_probe_slave(struct dw_i2c_dev *dev) { return -EINVAL; } #endif +static inline void i2c_dw_configure(struct dw_i2c_dev *dev) +{ + if (i2c_detect_slave_mode(dev->dev)) + i2c_dw_configure_slave(dev); + else + i2c_dw_configure_master(dev); +} + #if IS_ENABLED(CONFIG_I2C_DESIGNWARE_BAYTRAIL) extern int i2c_dw_probe_lock_support(struct dw_i2c_dev *dev); #else diff --git a/drivers/i2c/busses/i2c-designware-master.c b/drivers/i2c/busses/i2c-designware-master.c index b6c17b550d31e..139ba98c40331 100644 --- a/drivers/i2c/busses/i2c-designware-master.c +++ b/drivers/i2c/busses/i2c-designware-master.c @@ -657,6 +657,30 @@ static irqreturn_t i2c_dw_isr(int this_irq, void *dev_id) return IRQ_HANDLED; } +void i2c_dw_configure_master(struct dw_i2c_dev *dev) +{ + struct i2c_timings *t = &dev->timings; + + dev->functionality = I2C_FUNC_10BIT_ADDR | DW_IC_DEFAULT_FUNCTIONALITY; + + dev->master_cfg = DW_IC_CON_MASTER | DW_IC_CON_SLAVE_DISABLE | + DW_IC_CON_RESTART_EN; + + dev->mode = DW_IC_MASTER; + + switch (t->bus_freq_hz) { + case I2C_MAX_STANDARD_MODE_FREQ: + dev->master_cfg |= DW_IC_CON_SPEED_STD; + break; + case I2C_MAX_HIGH_SPEED_MODE_FREQ: + dev->master_cfg |= DW_IC_CON_SPEED_HIGH; + break; + default: + dev->master_cfg |= DW_IC_CON_SPEED_FAST; + } +} +EXPORT_SYMBOL_GPL(i2c_dw_configure_master); + static void i2c_dw_prepare_recovery(struct i2c_adapter *adap) { struct dw_i2c_dev *dev = i2c_get_adapdata(adap); diff --git a/drivers/i2c/busses/i2c-designware-platdrv.c b/drivers/i2c/busses/i2c-designware-platdrv.c index f5d57b8865ef7..8ca5d0f7672db 100644 --- a/drivers/i2c/busses/i2c-designware-platdrv.c +++ b/drivers/i2c/busses/i2c-designware-platdrv.c @@ -184,39 +184,6 @@ static inline int dw_i2c_of_configure(struct platform_device *pdev) } #endif -static void i2c_dw_configure_master(struct dw_i2c_dev *dev) -{ - struct i2c_timings *t = &dev->timings; - - dev->functionality = I2C_FUNC_10BIT_ADDR | DW_IC_DEFAULT_FUNCTIONALITY; - - dev->master_cfg = DW_IC_CON_MASTER | DW_IC_CON_SLAVE_DISABLE | - DW_IC_CON_RESTART_EN; - - dev->mode = DW_IC_MASTER; - - switch (t->bus_freq_hz) { - case I2C_MAX_STANDARD_MODE_FREQ: - dev->master_cfg |= DW_IC_CON_SPEED_STD; - break; - case I2C_MAX_HIGH_SPEED_MODE_FREQ: - dev->master_cfg |= DW_IC_CON_SPEED_HIGH; - break; - default: - dev->master_cfg |= DW_IC_CON_SPEED_FAST; - } -} - -static void i2c_dw_configure_slave(struct dw_i2c_dev *dev) -{ - dev->functionality = I2C_FUNC_SLAVE | DW_IC_DEFAULT_FUNCTIONALITY; - - dev->slave_cfg = DW_IC_CON_RX_FIFO_FULL_HLD_CTRL | - DW_IC_CON_RESTART_EN | DW_IC_CON_STOP_DET_IFADDRESSED; - - dev->mode = DW_IC_SLAVE; -} - static void dw_i2c_plat_pm_cleanup(struct dw_i2c_dev *dev) { pm_runtime_disable(dev->dev); @@ -319,10 +286,7 @@ static int dw_i2c_plat_probe(struct platform_device *pdev) if (ret) goto exit_reset; - if (i2c_detect_slave_mode(&pdev->dev)) - i2c_dw_configure_slave(dev); - else - i2c_dw_configure_master(dev); + i2c_dw_configure(dev); /* Optional interface clock */ dev->pclk = devm_clk_get_optional(&pdev->dev, "pclk"); diff --git a/drivers/i2c/busses/i2c-designware-slave.c b/drivers/i2c/busses/i2c-designware-slave.c index f5ecf76c0d024..576e7af4e94be 100644 --- a/drivers/i2c/busses/i2c-designware-slave.c +++ b/drivers/i2c/busses/i2c-designware-slave.c @@ -241,6 +241,17 @@ static const struct i2c_algorithm i2c_dw_algo = { .unreg_slave = i2c_dw_unreg_slave, }; +void i2c_dw_configure_slave(struct dw_i2c_dev *dev) +{ + dev->functionality = I2C_FUNC_SLAVE | DW_IC_DEFAULT_FUNCTIONALITY; + + dev->slave_cfg = DW_IC_CON_RX_FIFO_FULL_HLD_CTRL | + DW_IC_CON_RESTART_EN | DW_IC_CON_STOP_DET_IFADDRESSED; + + dev->mode = DW_IC_SLAVE; +} +EXPORT_SYMBOL_GPL(i2c_dw_configure_slave); + int i2c_dw_probe_slave(struct dw_i2c_dev *dev) { struct i2c_adapter *adap = &dev->adapter; -- GitLab From 42ab0012340d35ca1bce466285a873613c8109e0 Mon Sep 17 00:00:00 2001 From: Andy Shevchenko Date: Sat, 25 Apr 2020 16:44:46 +0300 Subject: [PATCH 0475/1971] i2c: designware: Switch PCI driver to use i2c_dw_configure_master() Since we have available helper to configure master mode, let's use it in the PCI driver instead of spread open-coded variant. Signed-off-by: Andy Shevchenko Signed-off-by: Wolfram Sang --- drivers/i2c/busses/i2c-designware-pcidrv.c | 33 +++++----------------- 1 file changed, 7 insertions(+), 26 deletions(-) diff --git a/drivers/i2c/busses/i2c-designware-pcidrv.c b/drivers/i2c/busses/i2c-designware-pcidrv.c index 7a0b65b5b5b59..d5e5abc03683f 100644 --- a/drivers/i2c/busses/i2c-designware-pcidrv.c +++ b/drivers/i2c/busses/i2c-designware-pcidrv.c @@ -46,20 +46,14 @@ struct dw_scl_sda_cfg { struct dw_pci_controller { u32 bus_num; - u32 bus_cfg; u32 tx_fifo_depth; u32 rx_fifo_depth; u32 clk_khz; - u32 functionality; u32 flags; struct dw_scl_sda_cfg *scl_sda_cfg; int (*setup)(struct pci_dev *pdev, struct dw_pci_controller *c); }; -#define INTEL_MID_STD_CFG (DW_IC_CON_MASTER | \ - DW_IC_CON_SLAVE_DISABLE | \ - DW_IC_CON_RESTART_EN) - /* Merrifield HCNT/LCNT/SDA hold time */ static struct dw_scl_sda_cfg mrfld_config = { .ss_hcnt = 0x2f8, @@ -88,10 +82,11 @@ static struct dw_scl_sda_cfg hsw_config = { static int mfld_setup(struct pci_dev *pdev, struct dw_pci_controller *c) { + struct dw_i2c_dev *dev = dev_get_drvdata(&pdev->dev); + switch (pdev->device) { case 0x0817: - c->bus_cfg &= ~DW_IC_CON_SPEED_MASK; - c->bus_cfg |= DW_IC_CON_SPEED_STD; + dev->timings.bus_freq_hz = I2C_MAX_STANDARD_MODE_FREQ; /* fall through */ case 0x0818: case 0x0819: @@ -128,53 +123,41 @@ static int mrfld_setup(struct pci_dev *pdev, struct dw_pci_controller *c) static struct dw_pci_controller dw_pci_controllers[] = { [medfield] = { .bus_num = -1, - .bus_cfg = INTEL_MID_STD_CFG | DW_IC_CON_SPEED_FAST, .tx_fifo_depth = 32, .rx_fifo_depth = 32, - .functionality = I2C_FUNC_10BIT_ADDR, .clk_khz = 25000, .setup = mfld_setup, }, [merrifield] = { .bus_num = -1, - .bus_cfg = INTEL_MID_STD_CFG | DW_IC_CON_SPEED_FAST, .tx_fifo_depth = 64, .rx_fifo_depth = 64, - .functionality = I2C_FUNC_10BIT_ADDR, .scl_sda_cfg = &mrfld_config, .setup = mrfld_setup, }, [baytrail] = { .bus_num = -1, - .bus_cfg = INTEL_MID_STD_CFG | DW_IC_CON_SPEED_FAST, .tx_fifo_depth = 32, .rx_fifo_depth = 32, - .functionality = I2C_FUNC_10BIT_ADDR, .scl_sda_cfg = &byt_config, }, [haswell] = { .bus_num = -1, - .bus_cfg = INTEL_MID_STD_CFG | DW_IC_CON_SPEED_FAST, .tx_fifo_depth = 32, .rx_fifo_depth = 32, - .functionality = I2C_FUNC_10BIT_ADDR, .scl_sda_cfg = &hsw_config, }, [cherrytrail] = { .bus_num = -1, - .bus_cfg = INTEL_MID_STD_CFG | DW_IC_CON_SPEED_FAST, .tx_fifo_depth = 32, .rx_fifo_depth = 32, - .functionality = I2C_FUNC_10BIT_ADDR, .flags = MODEL_CHERRYTRAIL, .scl_sda_cfg = &byt_config, }, [elkhartlake] = { .bus_num = -1, - .bus_cfg = INTEL_MID_STD_CFG | DW_IC_CON_SPEED_FAST, .tx_fifo_depth = 32, .rx_fifo_depth = 32, - .functionality = I2C_FUNC_10BIT_ADDR, .clk_khz = 100000, }, }; @@ -250,14 +233,16 @@ static int i2c_dw_pci_probe(struct pci_dev *pdev, if (r < 0) return r; - dev->clk = NULL; dev->controller = controller; dev->get_clk_rate_khz = i2c_dw_get_clk_rate_khz; + dev->timings.bus_freq_hz = I2C_MAX_FAST_MODE_FREQ; dev->base = pcim_iomap_table(pdev)[0]; dev->dev = &pdev->dev; dev->irq = pci_irq_vector(pdev, 0); dev->flags |= controller->flags; + pci_set_drvdata(pdev, dev); + if (controller->setup) { r = controller->setup(pdev, controller); if (r) { @@ -266,10 +251,8 @@ static int i2c_dw_pci_probe(struct pci_dev *pdev, } } - dev->functionality = controller->functionality | - DW_IC_DEFAULT_FUNCTIONALITY; + i2c_dw_configure_master(dev); - dev->master_cfg = controller->bus_cfg; if (controller->scl_sda_cfg) { cfg = controller->scl_sda_cfg; dev->ss_hcnt = cfg->ss_hcnt; @@ -279,8 +262,6 @@ static int i2c_dw_pci_probe(struct pci_dev *pdev, dev->sda_hold_time = cfg->sda_hold; } - pci_set_drvdata(pdev, dev); - dev->tx_fifo_depth = controller->tx_fifo_depth; dev->rx_fifo_depth = controller->rx_fifo_depth; -- GitLab From bed20c84021eb6dfc9df1590d8e231895b0392aa Mon Sep 17 00:00:00 2001 From: Andy Shevchenko Date: Sat, 25 Apr 2020 16:44:47 +0300 Subject: [PATCH 0476/1971] i2c: designware: Rename i2c_dw_probe() to i2c_dw_probe_master() As a preparatory patch to support slave mode for PCI enumerated devices rename i2c_dw_probe() to i2c_dw_probe_master() and split common i2c_dw_probe() as inline helper. Signed-off-by: Andy Shevchenko Signed-off-by: Wolfram Sang --- drivers/i2c/busses/Makefile | 3 ++- drivers/i2c/busses/i2c-designware-core.h | 15 ++++++++++++++- drivers/i2c/busses/i2c-designware-master.c | 4 ++-- drivers/i2c/busses/i2c-designware-pcidrv.c | 2 +- drivers/i2c/busses/i2c-designware-platdrv.c | 6 +----- 5 files changed, 20 insertions(+), 10 deletions(-) diff --git a/drivers/i2c/busses/Makefile b/drivers/i2c/busses/Makefile index c852d27370377..d5f56e6179a4b 100644 --- a/drivers/i2c/busses/Makefile +++ b/drivers/i2c/busses/Makefile @@ -49,7 +49,8 @@ obj-$(CONFIG_I2C_CBUS_GPIO) += i2c-cbus-gpio.o obj-$(CONFIG_I2C_CPM) += i2c-cpm.o obj-$(CONFIG_I2C_DAVINCI) += i2c-davinci.o obj-$(CONFIG_I2C_DESIGNWARE_CORE) += i2c-designware-core.o -i2c-designware-core-objs := i2c-designware-common.o i2c-designware-master.o +i2c-designware-core-objs := i2c-designware-common.o +i2c-designware-core-objs += i2c-designware-master.o ifeq ($(CONFIG_I2C_DESIGNWARE_SLAVE),y) i2c-designware-core-objs += i2c-designware-slave.o endif diff --git a/drivers/i2c/busses/i2c-designware-core.h b/drivers/i2c/busses/i2c-designware-core.h index b2f894f685233..1674caf277451 100644 --- a/drivers/i2c/busses/i2c-designware-core.h +++ b/drivers/i2c/busses/i2c-designware-core.h @@ -315,7 +315,7 @@ static inline void __i2c_dw_disable_nowait(struct dw_i2c_dev *dev) void __i2c_dw_disable(struct dw_i2c_dev *dev); extern void i2c_dw_configure_master(struct dw_i2c_dev *dev); -extern int i2c_dw_probe(struct dw_i2c_dev *dev); +extern int i2c_dw_probe_master(struct dw_i2c_dev *dev); #if IS_ENABLED(CONFIG_I2C_DESIGNWARE_SLAVE) extern void i2c_dw_configure_slave(struct dw_i2c_dev *dev); @@ -325,6 +325,19 @@ static inline void i2c_dw_configure_slave(struct dw_i2c_dev *dev) { } static inline int i2c_dw_probe_slave(struct dw_i2c_dev *dev) { return -EINVAL; } #endif +static inline int i2c_dw_probe(struct dw_i2c_dev *dev) +{ + switch (dev->mode) { + case DW_IC_SLAVE: + return i2c_dw_probe_slave(dev); + case DW_IC_MASTER: + return i2c_dw_probe_master(dev); + default: + dev_err(dev->dev, "Wrong operation mode: %d\n", dev->mode); + return -EINVAL; + } +} + static inline void i2c_dw_configure(struct dw_i2c_dev *dev) { if (i2c_detect_slave_mode(dev->dev)) diff --git a/drivers/i2c/busses/i2c-designware-master.c b/drivers/i2c/busses/i2c-designware-master.c index 139ba98c40331..95eeec53c744f 100644 --- a/drivers/i2c/busses/i2c-designware-master.c +++ b/drivers/i2c/busses/i2c-designware-master.c @@ -727,7 +727,7 @@ static int i2c_dw_init_recovery_info(struct dw_i2c_dev *dev) return 0; } -int i2c_dw_probe(struct dw_i2c_dev *dev) +int i2c_dw_probe_master(struct dw_i2c_dev *dev) { struct i2c_adapter *adap = &dev->adapter; unsigned long irq_flags; @@ -794,7 +794,7 @@ int i2c_dw_probe(struct dw_i2c_dev *dev) return ret; } -EXPORT_SYMBOL_GPL(i2c_dw_probe); +EXPORT_SYMBOL_GPL(i2c_dw_probe_master); MODULE_DESCRIPTION("Synopsys DesignWare I2C bus master adapter"); MODULE_LICENSE("GPL"); diff --git a/drivers/i2c/busses/i2c-designware-pcidrv.c b/drivers/i2c/busses/i2c-designware-pcidrv.c index d5e5abc03683f..7e994f366a5e8 100644 --- a/drivers/i2c/busses/i2c-designware-pcidrv.c +++ b/drivers/i2c/busses/i2c-designware-pcidrv.c @@ -271,7 +271,7 @@ static int i2c_dw_pci_probe(struct pci_dev *pdev, ACPI_COMPANION_SET(&adap->dev, ACPI_COMPANION(&pdev->dev)); adap->nr = controller->bus_num; - r = i2c_dw_probe(dev); + r = i2c_dw_probe_master(dev); if (r) { pci_free_irq_vectors(pdev); return r; diff --git a/drivers/i2c/busses/i2c-designware-platdrv.c b/drivers/i2c/busses/i2c-designware-platdrv.c index 8ca5d0f7672db..c6f04449036a8 100644 --- a/drivers/i2c/busses/i2c-designware-platdrv.c +++ b/drivers/i2c/busses/i2c-designware-platdrv.c @@ -331,11 +331,7 @@ static int dw_i2c_plat_probe(struct platform_device *pdev) pm_runtime_enable(&pdev->dev); - if (dev->mode == DW_IC_SLAVE) - ret = i2c_dw_probe_slave(dev); - else - ret = i2c_dw_probe(dev); - + ret = i2c_dw_probe(dev); if (ret) goto exit_probe; -- GitLab From 7943f1d178833518aa5d25249f8e88068517bac8 Mon Sep 17 00:00:00 2001 From: Andy Shevchenko Date: Sat, 25 Apr 2020 16:44:48 +0300 Subject: [PATCH 0477/1971] i2c: designware: Allow slave mode for PCI enumerated devices Allow slave mode for PCI enumerated devices by calling a common i2c_dw_probe() instead of i2c_dw_probe_master(). While dropping dependency to platform driver in slave module, move its configuration section above, closer to core. Signed-off-by: Andy Shevchenko Acked-by: Jarkko Nikula Signed-off-by: Wolfram Sang --- drivers/i2c/busses/Kconfig | 21 ++++++++++----------- drivers/i2c/busses/i2c-designware-pcidrv.c | 4 ++-- 2 files changed, 12 insertions(+), 13 deletions(-) diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig index 2f6e39b41e6c0..6bf68d52a65a9 100644 --- a/drivers/i2c/busses/Kconfig +++ b/drivers/i2c/busses/Kconfig @@ -527,6 +527,16 @@ config I2C_DAVINCI config I2C_DESIGNWARE_CORE tristate +config I2C_DESIGNWARE_SLAVE + bool "Synopsys DesignWare Slave" + select I2C_SLAVE + help + If you say yes to this option, support will be included for the + Synopsys DesignWare I2C slave adapter. + + This is not a standalone module, this module compiles together with + i2c-designware-core. + config I2C_DESIGNWARE_PLATFORM tristate "Synopsys DesignWare Platform" select I2C_DESIGNWARE_CORE @@ -538,17 +548,6 @@ config I2C_DESIGNWARE_PLATFORM This driver can also be built as a module. If so, the module will be called i2c-designware-platform. -config I2C_DESIGNWARE_SLAVE - bool "Synopsys DesignWare Slave" - select I2C_SLAVE - depends on I2C_DESIGNWARE_PLATFORM - help - If you say yes to this option, support will be included for the - Synopsys DesignWare I2C slave adapter. - - This is not a standalone module, this module compiles together with - i2c-designware-core. - config I2C_DESIGNWARE_PCI tristate "Synopsys DesignWare PCI" depends on PCI diff --git a/drivers/i2c/busses/i2c-designware-pcidrv.c b/drivers/i2c/busses/i2c-designware-pcidrv.c index 7e994f366a5e8..c762e5a11e44e 100644 --- a/drivers/i2c/busses/i2c-designware-pcidrv.c +++ b/drivers/i2c/busses/i2c-designware-pcidrv.c @@ -251,7 +251,7 @@ static int i2c_dw_pci_probe(struct pci_dev *pdev, } } - i2c_dw_configure_master(dev); + i2c_dw_configure(dev); if (controller->scl_sda_cfg) { cfg = controller->scl_sda_cfg; @@ -271,7 +271,7 @@ static int i2c_dw_pci_probe(struct pci_dev *pdev, ACPI_COMPANION_SET(&adap->dev, ACPI_COMPANION(&pdev->dev)); adap->nr = controller->bus_num; - r = i2c_dw_probe_master(dev); + r = i2c_dw_probe(dev); if (r) { pci_free_irq_vectors(pdev); return r; -- GitLab From e47bd937e602bb4379546095d1bd0b9871fa60c2 Mon Sep 17 00:00:00 2001 From: Marek Szyprowski Date: Wed, 6 May 2020 15:26:58 +0200 Subject: [PATCH 0478/1971] clk: samsung: Mark top ISP and CAM clocks on Exynos542x as critical The TOP 'aclk*_isp', 'aclk550_cam', 'gscl_wa' and 'gscl_wb' clocks must be kept enabled all the time to allow proper access to power management control for the ISP and CAM power domains. The last two clocks, although related to GScaler device and GSCL power domain, provides also the I_WRAP_CLK signal to MIPI CSIS0/1 devices, which are a part of CAM power domain and are needed for proper power on/off sequence. Currently there are no drivers for the devices, which are part of CAM and ISP power domains yet. This patch only fixes the race between disabling the unused power domains and disabling unused clocks, which randomly resulted in the following error during boot: Power domain CAM disable failed Power domain ISP disable failed Fixes: 318fa46cc60d ("clk/samsung: exynos542x: mark some clocks as critical") Signed-off-by: Marek Szyprowski Acked-by: Chanwoo Choi Signed-off-by: Sylwester Nawrocki --- drivers/clk/samsung/clk-exynos5420.c | 16 +++++++++------- 1 file changed, 9 insertions(+), 7 deletions(-) diff --git a/drivers/clk/samsung/clk-exynos5420.c b/drivers/clk/samsung/clk-exynos5420.c index c9e5a1fb66539..edb2363c735af 100644 --- a/drivers/clk/samsung/clk-exynos5420.c +++ b/drivers/clk/samsung/clk-exynos5420.c @@ -540,7 +540,7 @@ static const struct samsung_div_clock exynos5800_div_clks[] __initconst = { static const struct samsung_gate_clock exynos5800_gate_clks[] __initconst = { GATE(CLK_ACLK550_CAM, "aclk550_cam", "mout_user_aclk550_cam", - GATE_BUS_TOP, 24, 0, 0), + GATE_BUS_TOP, 24, CLK_IS_CRITICAL, 0), GATE(CLK_ACLK432_SCALER, "aclk432_scaler", "mout_user_aclk432_scaler", GATE_BUS_TOP, 27, CLK_IS_CRITICAL, 0), }; @@ -943,25 +943,25 @@ static const struct samsung_gate_clock exynos5x_gate_clks[] __initconst = { GATE(0, "aclk300_jpeg", "mout_user_aclk300_jpeg", GATE_BUS_TOP, 4, CLK_IGNORE_UNUSED, 0), GATE(0, "aclk333_432_isp0", "mout_user_aclk333_432_isp0", - GATE_BUS_TOP, 5, 0, 0), + GATE_BUS_TOP, 5, CLK_IS_CRITICAL, 0), GATE(0, "aclk300_gscl", "mout_user_aclk300_gscl", GATE_BUS_TOP, 6, CLK_IS_CRITICAL, 0), GATE(0, "aclk333_432_gscl", "mout_user_aclk333_432_gscl", GATE_BUS_TOP, 7, CLK_IGNORE_UNUSED, 0), GATE(0, "aclk333_432_isp", "mout_user_aclk333_432_isp", - GATE_BUS_TOP, 8, 0, 0), + GATE_BUS_TOP, 8, CLK_IS_CRITICAL, 0), GATE(CLK_PCLK66_GPIO, "pclk66_gpio", "mout_user_pclk66_gpio", GATE_BUS_TOP, 9, CLK_IGNORE_UNUSED, 0), GATE(0, "aclk66_psgen", "mout_user_aclk66_psgen", GATE_BUS_TOP, 10, CLK_IGNORE_UNUSED, 0), GATE(0, "aclk266_isp", "mout_user_aclk266_isp", - GATE_BUS_TOP, 13, 0, 0), + GATE_BUS_TOP, 13, CLK_IS_CRITICAL, 0), GATE(0, "aclk166", "mout_user_aclk166", GATE_BUS_TOP, 14, CLK_IGNORE_UNUSED, 0), GATE(CLK_ACLK333, "aclk333", "mout_user_aclk333", GATE_BUS_TOP, 15, CLK_IS_CRITICAL, 0), GATE(0, "aclk400_isp", "mout_user_aclk400_isp", - GATE_BUS_TOP, 16, 0, 0), + GATE_BUS_TOP, 16, CLK_IS_CRITICAL, 0), GATE(0, "aclk400_mscl", "mout_user_aclk400_mscl", GATE_BUS_TOP, 17, CLK_IS_CRITICAL, 0), GATE(0, "aclk200_disp1", "mout_user_aclk200_disp1", @@ -1161,8 +1161,10 @@ static const struct samsung_gate_clock exynos5x_gate_clks[] __initconst = { GATE_IP_GSCL1, 3, 0, 0), GATE(CLK_SMMU_FIMCL1, "smmu_fimcl1", "dout_gscl_blk_333", GATE_IP_GSCL1, 4, 0, 0), - GATE(CLK_GSCL_WA, "gscl_wa", "sclk_gscl_wa", GATE_IP_GSCL1, 12, 0, 0), - GATE(CLK_GSCL_WB, "gscl_wb", "sclk_gscl_wb", GATE_IP_GSCL1, 13, 0, 0), + GATE(CLK_GSCL_WA, "gscl_wa", "sclk_gscl_wa", GATE_IP_GSCL1, 12, + CLK_IS_CRITICAL, 0), + GATE(CLK_GSCL_WB, "gscl_wb", "sclk_gscl_wb", GATE_IP_GSCL1, 13, + CLK_IS_CRITICAL, 0), GATE(CLK_SMMU_FIMCL3, "smmu_fimcl3,", "dout_gscl_blk_333", GATE_IP_GSCL1, 16, 0, 0), GATE(CLK_FIMC_LITE3, "fimc_lite3", "aclk333_432_gscl", -- GitLab From 4b159cf13a8a504c1809890121755d04d15b1c22 Mon Sep 17 00:00:00 2001 From: Marek Szyprowski Date: Wed, 6 May 2020 15:26:59 +0200 Subject: [PATCH 0479/1971] clk: samsung: Fix CLK_SMMU_FIMCL3 clock name on Exynos542x The proper name for CLK_SMMU_FIMCL3 is "smmu_fimcl3". Remove obvious typo. Signed-off-by: Marek Szyprowski Acked-by: Chanwoo Choi Signed-off-by: Sylwester Nawrocki --- drivers/clk/samsung/clk-exynos5420.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/clk/samsung/clk-exynos5420.c b/drivers/clk/samsung/clk-exynos5420.c index edb2363c735af..fea33399a632d 100644 --- a/drivers/clk/samsung/clk-exynos5420.c +++ b/drivers/clk/samsung/clk-exynos5420.c @@ -1165,7 +1165,7 @@ static const struct samsung_gate_clock exynos5x_gate_clks[] __initconst = { CLK_IS_CRITICAL, 0), GATE(CLK_GSCL_WB, "gscl_wb", "sclk_gscl_wb", GATE_IP_GSCL1, 13, CLK_IS_CRITICAL, 0), - GATE(CLK_SMMU_FIMCL3, "smmu_fimcl3,", "dout_gscl_blk_333", + GATE(CLK_SMMU_FIMCL3, "smmu_fimcl3", "dout_gscl_blk_333", GATE_IP_GSCL1, 16, 0, 0), GATE(CLK_FIMC_LITE3, "fimc_lite3", "aclk333_432_gscl", GATE_IP_GSCL1, 17, 0, 0), -- GitLab From d24224dea57108f96d13579f20206c339bb8e52f Mon Sep 17 00:00:00 2001 From: Jason Yan Date: Mon, 4 May 2020 19:34:06 +0800 Subject: [PATCH 0480/1971] dmaengine: qcom_hidma: use true,false for bool variable Fix the following coccicheck warning: drivers/dma/qcom/hidma.c:553:1-17: WARNING: Assignment of 0/1 to bool variable Signed-off-by: Jason Yan Acked By: Sinan Kaya Link: https://lore.kernel.org/r/20200504113406.41530-1-yanaijie@huawei.com Signed-off-by: Vinod Koul --- drivers/dma/qcom/hidma.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/dma/qcom/hidma.c b/drivers/dma/qcom/hidma.c index 87490e125bc3c..0a6d3ea08c782 100644 --- a/drivers/dma/qcom/hidma.c +++ b/drivers/dma/qcom/hidma.c @@ -550,7 +550,7 @@ static void hidma_free_chan_resources(struct dma_chan *dmach) kfree(mdesc); } - mchan->allocated = 0; + mchan->allocated = false; spin_unlock_irqrestore(&mchan->lock, irqflags); } -- GitLab From f2b5d503c04a28828af8e41fcd4a5d9fd978a78e Mon Sep 17 00:00:00 2001 From: Christophe JAILLET Date: Fri, 1 May 2020 12:08:24 +0200 Subject: [PATCH 0481/1971] dmaengine: sf-pdma: Simplify the error handling path in 'sf_pdma_probe()' There is no need to explicitly free memory that have been 'devm_kzalloc'ed. Simplify the probe function accordingly. Signed-off-by: Christophe JAILLET Tested-by: Green Wan Reviewed-by: Green Wan Link: https://lore.kernel.org/r/20200501100824.126534-1-christophe.jaillet@wanadoo.fr Signed-off-by: Vinod Koul --- drivers/dma/sf-pdma/sf-pdma.c | 25 +++++++------------------ 1 file changed, 7 insertions(+), 18 deletions(-) diff --git a/drivers/dma/sf-pdma/sf-pdma.c b/drivers/dma/sf-pdma/sf-pdma.c index 6d0bec9476365..5c118c7e02bdc 100644 --- a/drivers/dma/sf-pdma/sf-pdma.c +++ b/drivers/dma/sf-pdma/sf-pdma.c @@ -506,11 +506,11 @@ static int sf_pdma_probe(struct platform_device *pdev) res = platform_get_resource(pdev, IORESOURCE_MEM, 0); pdma->membase = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(pdma->membase)) - goto ERR_MEMBASE; + return PTR_ERR(pdma->membase); ret = sf_pdma_irq_init(pdev, pdma); if (ret) - goto ERR_INITIRQ; + return ret; sf_pdma_setup_chans(pdma); @@ -544,24 +544,13 @@ static int sf_pdma_probe(struct platform_device *pdev) "Failed to set DMA mask. Fall back to default.\n"); ret = dma_async_device_register(&pdma->dma_dev); - if (ret) - goto ERR_REG_DMADEVICE; + if (ret) { + dev_err(&pdev->dev, + "Can't register SiFive Platform DMA. (%d)\n", ret); + return ret; + } return 0; - -ERR_MEMBASE: - devm_kfree(&pdev->dev, pdma); - return PTR_ERR(pdma->membase); - -ERR_INITIRQ: - devm_kfree(&pdev->dev, pdma); - return ret; - -ERR_REG_DMADEVICE: - devm_kfree(&pdev->dev, pdma); - dev_err(&pdev->dev, - "Can't register SiFive Platform DMA. (%d)\n", ret); - return ret; } static int sf_pdma_remove(struct platform_device *pdev) -- GitLab From c794f7edbcd7069d888d3d49866e6adacccf7766 Mon Sep 17 00:00:00 2001 From: YueHaibing Date: Tue, 5 May 2020 10:13:53 +0000 Subject: [PATCH 0482/1971] dmaengine: moxart-dma: Drop pointless static qualifier in moxart_probe() There is no need to have the 'void __iomem *dma_base_addr' variable static since new value always be assigned before use it. Signed-off-by: YueHaibing Link: https://lore.kernel.org/r/20200505101353.195446-1-yuehaibing@huawei.com Signed-off-by: Vinod Koul --- drivers/dma/moxart-dma.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/dma/moxart-dma.c b/drivers/dma/moxart-dma.c index e04499c1f27f4..4ab493d463759 100644 --- a/drivers/dma/moxart-dma.c +++ b/drivers/dma/moxart-dma.c @@ -568,7 +568,7 @@ static int moxart_probe(struct platform_device *pdev) struct device *dev = &pdev->dev; struct device_node *node = dev->of_node; struct resource *res; - static void __iomem *dma_base_addr; + void __iomem *dma_base_addr; int ret, i; unsigned int irq; struct moxart_chan *ch; -- GitLab From 214a0006b2c83ffa41794a424a9dd6758ca9eb4d Mon Sep 17 00:00:00 2001 From: Samuel Zou Date: Wed, 6 May 2020 17:25:46 +0800 Subject: [PATCH 0483/1971] dmaengine: ti: k3-udma: Use PTR_ERR_OR_ZERO() to simplify code Fixes coccicheck warnings: drivers/dma/ti/k3-udma.c:1294:1-3: WARNING: PTR_ERR_OR_ZERO can be used drivers/dma/ti/k3-udma.c:1311:1-3: WARNING: PTR_ERR_OR_ZERO can be used drivers/dma/ti/k3-udma.c:1376:1-3: WARNING: PTR_ERR_OR_ZERO can be used Reported-by: Hulk Robot Signed-off-by: Samuel Zou Link: https://lore.kernel.org/r/1588757146-38858-1-git-send-email-zou_wei@huawei.com Signed-off-by: Vinod Koul --- drivers/dma/ti/k3-udma.c | 12 +++--------- 1 file changed, 3 insertions(+), 9 deletions(-) diff --git a/drivers/dma/ti/k3-udma.c b/drivers/dma/ti/k3-udma.c index fa53234d8f097..80d64f71a7cb4 100644 --- a/drivers/dma/ti/k3-udma.c +++ b/drivers/dma/ti/k3-udma.c @@ -1291,10 +1291,8 @@ static int udma_get_tchan(struct udma_chan *uc) } uc->tchan = __udma_reserve_tchan(ud, uc->config.channel_tpl, -1); - if (IS_ERR(uc->tchan)) - return PTR_ERR(uc->tchan); - return 0; + return PTR_ERR_OR_ZERO(uc->tchan); } static int udma_get_rchan(struct udma_chan *uc) @@ -1308,10 +1306,8 @@ static int udma_get_rchan(struct udma_chan *uc) } uc->rchan = __udma_reserve_rchan(ud, uc->config.channel_tpl, -1); - if (IS_ERR(uc->rchan)) - return PTR_ERR(uc->rchan); - return 0; + return PTR_ERR_OR_ZERO(uc->rchan); } static int udma_get_chan_pair(struct udma_chan *uc) @@ -1373,10 +1369,8 @@ static int udma_get_rflow(struct udma_chan *uc, int flow_id) } uc->rflow = __udma_get_rflow(ud, flow_id); - if (IS_ERR(uc->rflow)) - return PTR_ERR(uc->rflow); - return 0; + return PTR_ERR_OR_ZERO(uc->rflow); } static void udma_put_rchan(struct udma_chan *uc) -- GitLab From c18b5bdebd67112314fe73762d269d381ce3073c Mon Sep 17 00:00:00 2001 From: "Gustavo A. R. Silva" Date: Fri, 8 May 2020 16:07:07 -0500 Subject: [PATCH 0484/1971] dmaengine: qcom: bam_dma: Replace zero-length array with flexible-array The current codebase makes use of the zero-length array language extension to the C90 standard, but the preferred mechanism to declare variable-length types such as these ones is a flexible array member[1][2], introduced in C99: struct foo { int stuff; struct boo array[]; }; By making use of the mechanism above, we will get a compiler warning in case the flexible array does not occur last in the structure, which will help us prevent some kind of undefined behavior bugs from being inadvertently introduced[3] to the codebase from now on. Also, notice that, dynamic memory allocations won't be affected by this change: "Flexible array members have incomplete type, and so the sizeof operator may not be applied. As a quirk of the original implementation of zero-length arrays, sizeof evaluates to zero."[1] sizeof(flexible-array-member) triggers a warning because flexible array members have incomplete type[1]. There are some instances of code in which the sizeof operator is being incorrectly/erroneously applied to zero-length arrays and the result is zero. Such instances may be hiding some bugs. So, this work (flexible-array member conversions) will also help to get completely rid of those sorts of issues. This issue was found with the help of Coccinelle. [1] https://gcc.gnu.org/onlinedocs/gcc/Zero-Length.html [2] https://github.com/KSPP/linux/issues/21 [3] commit 76497732932f ("cxgb3/l2t: Fix undefined behaviour") Reviewed-by: Jeffrey Hugo Signed-off-by: Gustavo A. R. Silva Link: https://lore.kernel.org/r/20200508210707.GA24136@embeddedor Signed-off-by: Vinod Koul --- drivers/dma/qcom/bam_dma.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/dma/qcom/bam_dma.c b/drivers/dma/qcom/bam_dma.c index ef73f65224b1b..5a08dd0d33887 100644 --- a/drivers/dma/qcom/bam_dma.c +++ b/drivers/dma/qcom/bam_dma.c @@ -74,7 +74,7 @@ struct bam_async_desc { struct list_head desc_node; enum dma_transfer_direction dir; size_t length; - struct bam_desc_hw desc[0]; + struct bam_desc_hw desc[]; }; enum bam_reg { -- GitLab From e05a0b78f39f39488c3ea8866aee2b953e206a02 Mon Sep 17 00:00:00 2001 From: "Gustavo A. R. Silva" Date: Thu, 7 May 2020 14:00:38 -0500 Subject: [PATCH 0485/1971] dmaengine: at_hdmac: Replace zero-length array with flexible-array The current codebase makes use of the zero-length array language extension to the C90 standard, but the preferred mechanism to declare variable-length types such as these ones is a flexible array member[1][2], introduced in C99: struct foo { int stuff; struct boo array[]; }; By making use of the mechanism above, we will get a compiler warning in case the flexible array does not occur last in the structure, which will help us prevent some kind of undefined behavior bugs from being inadvertently introduced[3] to the codebase from now on. Also, notice that, dynamic memory allocations won't be affected by this change: "Flexible array members have incomplete type, and so the sizeof operator may not be applied. As a quirk of the original implementation of zero-length arrays, sizeof evaluates to zero."[1] sizeof(flexible-array-member) triggers a warning because flexible array members have incomplete type[1]. There are some instances of code in which the sizeof operator is being incorrectly/erroneously applied to zero-length arrays and the result is zero. Such instances may be hiding some bugs. So, this work (flexible-array member conversions) will also help to get completely rid of those sorts of issues. This issue was found with the help of Coccinelle. [1] https://gcc.gnu.org/onlinedocs/gcc/Zero-Length.html [2] https://github.com/KSPP/linux/issues/21 [3] commit 76497732932f ("cxgb3/l2t: Fix undefined behaviour") Signed-off-by: Gustavo A. R. Silva Acked-by: Ludovic Desroches Link: https://lore.kernel.org/r/20200507190038.GA15272@embeddedor Signed-off-by: Vinod Koul --- drivers/dma/at_hdmac_regs.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/dma/at_hdmac_regs.h b/drivers/dma/at_hdmac_regs.h index 397692e937b31..80fc2fe8c77ea 100644 --- a/drivers/dma/at_hdmac_regs.h +++ b/drivers/dma/at_hdmac_regs.h @@ -331,7 +331,7 @@ struct at_dma { struct dma_pool *dma_desc_pool; struct dma_pool *memset_pool; /* AT THE END channels table */ - struct at_dma_chan chan[0]; + struct at_dma_chan chan[]; }; #define dma_readl(atdma, name) \ -- GitLab From d9fd428a304f2e33caf609011cdc9f17a8d3fb9e Mon Sep 17 00:00:00 2001 From: "Gustavo A. R. Silva" Date: Thu, 7 May 2020 14:00:46 -0500 Subject: [PATCH 0486/1971] dmaengine: at_xdmac: Replace zero-length array with flexible-array The current codebase makes use of the zero-length array language extension to the C90 standard, but the preferred mechanism to declare variable-length types such as these ones is a flexible array member[1][2], introduced in C99: struct foo { int stuff; struct boo array[]; }; By making use of the mechanism above, we will get a compiler warning in case the flexible array does not occur last in the structure, which will help us prevent some kind of undefined behavior bugs from being inadvertently introduced[3] to the codebase from now on. Also, notice that, dynamic memory allocations won't be affected by this change: "Flexible array members have incomplete type, and so the sizeof operator may not be applied. As a quirk of the original implementation of zero-length arrays, sizeof evaluates to zero."[1] sizeof(flexible-array-member) triggers a warning because flexible array members have incomplete type[1]. There are some instances of code in which the sizeof operator is being incorrectly/erroneously applied to zero-length arrays and the result is zero. Such instances may be hiding some bugs. So, this work (flexible-array member conversions) will also help to get completely rid of those sorts of issues. This issue was found with the help of Coccinelle. [1] https://gcc.gnu.org/onlinedocs/gcc/Zero-Length.html [2] https://github.com/KSPP/linux/issues/21 [3] commit 76497732932f ("cxgb3/l2t: Fix undefined behaviour") Signed-off-by: Gustavo A. R. Silva Acked-by: Ludovic Desroches Link: https://lore.kernel.org/r/20200507190046.GA15298@embeddedor Signed-off-by: Vinod Koul --- drivers/dma/at_xdmac.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/dma/at_xdmac.c b/drivers/dma/at_xdmac.c index bb0eaf38b5945..fd92f048c4918 100644 --- a/drivers/dma/at_xdmac.c +++ b/drivers/dma/at_xdmac.c @@ -212,7 +212,7 @@ struct at_xdmac { struct clk *clk; u32 save_gim; struct dma_pool *at_xdmac_desc_pool; - struct at_xdmac_chan chan[0]; + struct at_xdmac_chan chan[]; }; -- GitLab From d364436337007f58356bbe272a0a2adc0fc80d4e Mon Sep 17 00:00:00 2001 From: Brian Masney Date: Tue, 12 May 2020 20:03:10 -0700 Subject: [PATCH 0487/1971] dt-bindings: Input: remove msm-vibrator The address referenced in this binding is within the Qualcomm Clock namespace so let's drop the msm-vibrator bindings so that a more generic solution can be used instead. No one is currently using these bindings so this won't affect any users. Signed-off-by: Brian Masney Acked-by: Rob Herring Link: https://lore.kernel.org/r/20200513013140.69935-2-masneyb@onstation.org Signed-off-by: Dmitry Torokhov --- .../bindings/input/msm-vibrator.txt | 36 ------------------- 1 file changed, 36 deletions(-) delete mode 100644 Documentation/devicetree/bindings/input/msm-vibrator.txt diff --git a/Documentation/devicetree/bindings/input/msm-vibrator.txt b/Documentation/devicetree/bindings/input/msm-vibrator.txt deleted file mode 100644 index 8dcf014ef2e57..0000000000000 --- a/Documentation/devicetree/bindings/input/msm-vibrator.txt +++ /dev/null @@ -1,36 +0,0 @@ -* Device tree bindings for the Qualcomm MSM vibrator - -Required properties: - - - compatible: Should be one of - "qcom,msm8226-vibrator" - "qcom,msm8974-vibrator" - - reg: the base address and length of the IO memory for the registers. - - pinctrl-names: set to default. - - pinctrl-0: phandles pointing to pin configuration nodes. See - Documentation/devicetree/bindings/pinctrl/pinctrl-bindings.txt - - clock-names: set to pwm - - clocks: phandle of the clock. See - Documentation/devicetree/bindings/clock/clock-bindings.txt - - enable-gpios: GPIO that enables the vibrator. - -Optional properties: - - - vcc-supply: phandle to the regulator that provides power to the sensor. - -Example from a LG Nexus 5 (hammerhead) phone: - -vibrator@fd8c3450 { - reg = <0xfd8c3450 0x400>; - compatible = "qcom,msm8974-vibrator"; - - vcc-supply = <&pm8941_l19>; - - clocks = <&mmcc CAMSS_GP1_CLK>; - clock-names = "pwm"; - - enable-gpios = <&msmgpio 60 GPIO_ACTIVE_HIGH>; - - pinctrl-names = "default"; - pinctrl-0 = <&vibrator_pin>; -}; -- GitLab From 2ecf9487a7c86e033bab007d63d61f2ec2ca093f Mon Sep 17 00:00:00 2001 From: Brian Masney Date: Tue, 12 May 2020 20:03:31 -0700 Subject: [PATCH 0488/1971] Input: remove msm-vibrator driver The address referenced by this driver is within the Qualcomm Clock namespace so let's drop the msm-vibrator bindings so that a more generic solution can be used instead. No one is currently using driver so this won't affect any users. Signed-off-by: Brian Masney Link: https://lore.kernel.org/r/20200513013140.69935-3-masneyb@onstation.org Signed-off-by: Dmitry Torokhov --- drivers/input/misc/Kconfig | 10 -- drivers/input/misc/Makefile | 1 - drivers/input/misc/msm-vibrator.c | 281 ------------------------------ 3 files changed, 292 deletions(-) delete mode 100644 drivers/input/misc/msm-vibrator.c diff --git a/drivers/input/misc/Kconfig b/drivers/input/misc/Kconfig index 293e55fb7a4e4..315b14d59ad7d 100644 --- a/drivers/input/misc/Kconfig +++ b/drivers/input/misc/Kconfig @@ -117,16 +117,6 @@ config INPUT_E3X0_BUTTON To compile this driver as a module, choose M here: the module will be called e3x0_button. -config INPUT_MSM_VIBRATOR - tristate "Qualcomm MSM vibrator driver" - select INPUT_FF_MEMLESS - help - Support for the vibrator that is found on various Qualcomm MSM - SOCs. - - To compile this driver as a module, choose M here: the module - will be called msm_vibrator. - config INPUT_PCSPKR tristate "PC Speaker support" depends on PCSPKR_PLATFORM diff --git a/drivers/input/misc/Makefile b/drivers/input/misc/Makefile index 1db1815ce8032..546766f29acae 100644 --- a/drivers/input/misc/Makefile +++ b/drivers/input/misc/Makefile @@ -49,7 +49,6 @@ obj-$(CONFIG_INPUT_MAX8925_ONKEY) += max8925_onkey.o obj-$(CONFIG_INPUT_MAX8997_HAPTIC) += max8997_haptic.o obj-$(CONFIG_INPUT_MC13783_PWRBUTTON) += mc13783-pwrbutton.o obj-$(CONFIG_INPUT_MMA8450) += mma8450.o -obj-$(CONFIG_INPUT_MSM_VIBRATOR) += msm-vibrator.o obj-$(CONFIG_INPUT_PALMAS_PWRBUTTON) += palmas-pwrbutton.o obj-$(CONFIG_INPUT_PCAP) += pcap_keys.o obj-$(CONFIG_INPUT_PCF50633_PMU) += pcf50633-input.o diff --git a/drivers/input/misc/msm-vibrator.c b/drivers/input/misc/msm-vibrator.c deleted file mode 100644 index b60f1aaee7053..0000000000000 --- a/drivers/input/misc/msm-vibrator.c +++ /dev/null @@ -1,281 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0+ -/* - * Qualcomm MSM vibrator driver - * - * Copyright (c) 2018 Brian Masney - * - * Based on qcom,pwm-vibrator.c from: - * Copyright (c) 2018 Jonathan Marek - * - * Based on msm_pwm_vibrator.c from downstream Android sources: - * Copyright (C) 2009-2014 LGE, Inc. - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#define REG_CMD_RCGR 0x00 -#define REG_CFG_RCGR 0x04 -#define REG_M 0x08 -#define REG_N 0x0C -#define REG_D 0x10 -#define REG_CBCR 0x24 -#define MMSS_CC_M_DEFAULT 1 - -struct msm_vibrator { - struct input_dev *input; - struct mutex mutex; - struct work_struct worker; - void __iomem *base; - struct regulator *vcc; - struct clk *clk; - struct gpio_desc *enable_gpio; - u16 magnitude; - bool enabled; -}; - -static void msm_vibrator_write(struct msm_vibrator *vibrator, int offset, - u32 value) -{ - writel(value, vibrator->base + offset); -} - -static int msm_vibrator_start(struct msm_vibrator *vibrator) -{ - int d_reg_val, ret = 0; - - mutex_lock(&vibrator->mutex); - - if (!vibrator->enabled) { - ret = clk_set_rate(vibrator->clk, 24000); - if (ret) { - dev_err(&vibrator->input->dev, - "Failed to set clock rate: %d\n", ret); - goto unlock; - } - - ret = clk_prepare_enable(vibrator->clk); - if (ret) { - dev_err(&vibrator->input->dev, - "Failed to enable clock: %d\n", ret); - goto unlock; - } - - ret = regulator_enable(vibrator->vcc); - if (ret) { - dev_err(&vibrator->input->dev, - "Failed to enable regulator: %d\n", ret); - clk_disable(vibrator->clk); - goto unlock; - } - - gpiod_set_value_cansleep(vibrator->enable_gpio, 1); - - vibrator->enabled = true; - } - - d_reg_val = 127 - ((126 * vibrator->magnitude) / 0xffff); - msm_vibrator_write(vibrator, REG_CFG_RCGR, - (2 << 12) | /* dual edge mode */ - (0 << 8) | /* cxo */ - (7 << 0)); - msm_vibrator_write(vibrator, REG_M, 1); - msm_vibrator_write(vibrator, REG_N, 128); - msm_vibrator_write(vibrator, REG_D, d_reg_val); - msm_vibrator_write(vibrator, REG_CMD_RCGR, 1); - msm_vibrator_write(vibrator, REG_CBCR, 1); - -unlock: - mutex_unlock(&vibrator->mutex); - - return ret; -} - -static void msm_vibrator_stop(struct msm_vibrator *vibrator) -{ - mutex_lock(&vibrator->mutex); - - if (vibrator->enabled) { - gpiod_set_value_cansleep(vibrator->enable_gpio, 0); - regulator_disable(vibrator->vcc); - clk_disable(vibrator->clk); - vibrator->enabled = false; - } - - mutex_unlock(&vibrator->mutex); -} - -static void msm_vibrator_worker(struct work_struct *work) -{ - struct msm_vibrator *vibrator = container_of(work, - struct msm_vibrator, - worker); - - if (vibrator->magnitude) - msm_vibrator_start(vibrator); - else - msm_vibrator_stop(vibrator); -} - -static int msm_vibrator_play_effect(struct input_dev *dev, void *data, - struct ff_effect *effect) -{ - struct msm_vibrator *vibrator = input_get_drvdata(dev); - - mutex_lock(&vibrator->mutex); - - if (effect->u.rumble.strong_magnitude > 0) - vibrator->magnitude = effect->u.rumble.strong_magnitude; - else - vibrator->magnitude = effect->u.rumble.weak_magnitude; - - mutex_unlock(&vibrator->mutex); - - schedule_work(&vibrator->worker); - - return 0; -} - -static void msm_vibrator_close(struct input_dev *input) -{ - struct msm_vibrator *vibrator = input_get_drvdata(input); - - cancel_work_sync(&vibrator->worker); - msm_vibrator_stop(vibrator); -} - -static int msm_vibrator_probe(struct platform_device *pdev) -{ - struct msm_vibrator *vibrator; - struct resource *res; - int ret; - - vibrator = devm_kzalloc(&pdev->dev, sizeof(*vibrator), GFP_KERNEL); - if (!vibrator) - return -ENOMEM; - - vibrator->input = devm_input_allocate_device(&pdev->dev); - if (!vibrator->input) - return -ENOMEM; - - vibrator->vcc = devm_regulator_get(&pdev->dev, "vcc"); - if (IS_ERR(vibrator->vcc)) { - if (PTR_ERR(vibrator->vcc) != -EPROBE_DEFER) - dev_err(&pdev->dev, "Failed to get regulator: %ld\n", - PTR_ERR(vibrator->vcc)); - return PTR_ERR(vibrator->vcc); - } - - vibrator->enable_gpio = devm_gpiod_get(&pdev->dev, "enable", - GPIOD_OUT_LOW); - if (IS_ERR(vibrator->enable_gpio)) { - if (PTR_ERR(vibrator->enable_gpio) != -EPROBE_DEFER) - dev_err(&pdev->dev, "Failed to get enable gpio: %ld\n", - PTR_ERR(vibrator->enable_gpio)); - return PTR_ERR(vibrator->enable_gpio); - } - - vibrator->clk = devm_clk_get(&pdev->dev, "pwm"); - if (IS_ERR(vibrator->clk)) { - if (PTR_ERR(vibrator->clk) != -EPROBE_DEFER) - dev_err(&pdev->dev, "Failed to lookup pwm clock: %ld\n", - PTR_ERR(vibrator->clk)); - return PTR_ERR(vibrator->clk); - } - - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (!res) { - dev_err(&pdev->dev, "Failed to get platform resource\n"); - return -ENODEV; - } - - vibrator->base = devm_ioremap(&pdev->dev, res->start, - resource_size(res)); - if (!vibrator->base) { - dev_err(&pdev->dev, "Failed to iomap resource.\n"); - return -ENOMEM; - } - - vibrator->enabled = false; - mutex_init(&vibrator->mutex); - INIT_WORK(&vibrator->worker, msm_vibrator_worker); - - vibrator->input->name = "msm-vibrator"; - vibrator->input->id.bustype = BUS_HOST; - vibrator->input->close = msm_vibrator_close; - - input_set_drvdata(vibrator->input, vibrator); - input_set_capability(vibrator->input, EV_FF, FF_RUMBLE); - - ret = input_ff_create_memless(vibrator->input, NULL, - msm_vibrator_play_effect); - if (ret) { - dev_err(&pdev->dev, "Failed to create ff memless: %d", ret); - return ret; - } - - ret = input_register_device(vibrator->input); - if (ret) { - dev_err(&pdev->dev, "Failed to register input device: %d", ret); - return ret; - } - - platform_set_drvdata(pdev, vibrator); - - return 0; -} - -static int __maybe_unused msm_vibrator_suspend(struct device *dev) -{ - struct platform_device *pdev = to_platform_device(dev); - struct msm_vibrator *vibrator = platform_get_drvdata(pdev); - - cancel_work_sync(&vibrator->worker); - - if (vibrator->enabled) - msm_vibrator_stop(vibrator); - - return 0; -} - -static int __maybe_unused msm_vibrator_resume(struct device *dev) -{ - struct platform_device *pdev = to_platform_device(dev); - struct msm_vibrator *vibrator = platform_get_drvdata(pdev); - - if (vibrator->enabled) - msm_vibrator_start(vibrator); - - return 0; -} - -static SIMPLE_DEV_PM_OPS(msm_vibrator_pm_ops, msm_vibrator_suspend, - msm_vibrator_resume); - -static const struct of_device_id msm_vibrator_of_match[] = { - { .compatible = "qcom,msm8226-vibrator" }, - { .compatible = "qcom,msm8974-vibrator" }, - {}, -}; -MODULE_DEVICE_TABLE(of, msm_vibrator_of_match); - -static struct platform_driver msm_vibrator_driver = { - .probe = msm_vibrator_probe, - .driver = { - .name = "msm-vibrator", - .pm = &msm_vibrator_pm_ops, - .of_match_table = of_match_ptr(msm_vibrator_of_match), - }, -}; -module_platform_driver(msm_vibrator_driver); - -MODULE_AUTHOR("Brian Masney "); -MODULE_DESCRIPTION("Qualcomm MSM vibrator driver"); -MODULE_LICENSE("GPL"); -- GitLab From 768062fd1284529212daffd360314e9aa93abb62 Mon Sep 17 00:00:00 2001 From: Anson Huang Date: Wed, 13 May 2020 10:15:49 -0700 Subject: [PATCH 0489/1971] Input: imx_sc_key - use devm_add_action_or_reset() to handle all cleanups Use devm_add_action_or_reset() to handle all cleanups of failure in .probe and .remove, then .remove callback can be dropped. Signed-off-by: Anson Huang Link: https://lore.kernel.org/r/1584082751-17047-1-git-send-email-Anson.Huang@nxp.com Signed-off-by: Dmitry Torokhov --- drivers/input/keyboard/imx_sc_key.c | 33 +++++++++++++---------------- 1 file changed, 15 insertions(+), 18 deletions(-) diff --git a/drivers/input/keyboard/imx_sc_key.c b/drivers/input/keyboard/imx_sc_key.c index 9f809aeb785c4..d18839f1f4f60 100644 --- a/drivers/input/keyboard/imx_sc_key.c +++ b/drivers/input/keyboard/imx_sc_key.c @@ -99,6 +99,15 @@ static void imx_sc_check_for_events(struct work_struct *work) msecs_to_jiffies(REPEAT_INTERVAL)); } +static void imx_sc_key_action(void *data) +{ + struct imx_key_drv_data *priv = data; + + imx_scu_irq_group_enable(SC_IRQ_GROUP_WAKE, SC_IRQ_BUTTON, false); + imx_scu_irq_unregister_notifier(&priv->key_notifier); + cancel_delayed_work_sync(&priv->check_work); +} + static int imx_sc_key_probe(struct platform_device *pdev) { struct imx_key_drv_data *priv; @@ -149,27 +158,16 @@ static int imx_sc_key_probe(struct platform_device *pdev) return error; } + error = devm_add_action_or_reset(&pdev->dev, imx_sc_key_action, &priv); + if (error) + return error; + priv->key_notifier.notifier_call = imx_sc_key_notify; error = imx_scu_irq_register_notifier(&priv->key_notifier); - if (error) { - imx_scu_irq_group_enable(SC_IRQ_GROUP_WAKE, SC_IRQ_BUTTON, - false); + if (error) dev_err(&pdev->dev, "failed to register scu notifier\n"); - return error; - } - - return 0; -} - -static int imx_sc_key_remove(struct platform_device *pdev) -{ - struct imx_key_drv_data *priv = platform_get_drvdata(pdev); - - imx_scu_irq_group_enable(SC_IRQ_GROUP_WAKE, SC_IRQ_BUTTON, false); - imx_scu_irq_unregister_notifier(&priv->key_notifier); - cancel_delayed_work_sync(&priv->check_work); - return 0; + return error; } static const struct of_device_id imx_sc_key_ids[] = { @@ -184,7 +182,6 @@ static struct platform_driver imx_sc_key_driver = { .of_match_table = imx_sc_key_ids, }, .probe = imx_sc_key_probe, - .remove = imx_sc_key_remove, }; module_platform_driver(imx_sc_key_driver); -- GitLab From daa28975dd4971b4352f5e7df75728096048da52 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Fri, 8 May 2020 18:15:15 +0200 Subject: [PATCH 0490/1971] nvdimm/blk: stop using ->queuedata In preparation for removing queuedata as an argument to make_request_fn() drop the dependency ->queuedata. Signed-off-by: Christoph Hellwig Link: https://lore.kernel.org/r/20200508161517.252308-14-hch@lst.de Signed-off-by: Dan Williams --- drivers/nvdimm/blk.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/drivers/nvdimm/blk.c b/drivers/nvdimm/blk.c index 43751fab9d36a..ffe4728bad8b1 100644 --- a/drivers/nvdimm/blk.c +++ b/drivers/nvdimm/blk.c @@ -165,7 +165,7 @@ static int nsblk_do_bvec(struct nd_namespace_blk *nsblk, static blk_qc_t nd_blk_make_request(struct request_queue *q, struct bio *bio) { struct bio_integrity_payload *bip; - struct nd_namespace_blk *nsblk; + struct nd_namespace_blk *nsblk = bio->bi_disk->private_data; struct bvec_iter iter; unsigned long start; struct bio_vec bvec; @@ -176,7 +176,6 @@ static blk_qc_t nd_blk_make_request(struct request_queue *q, struct bio *bio) return BLK_QC_T_NONE; bip = bio_integrity(bio); - nsblk = q->queuedata; rw = bio_data_dir(bio); do_acct = nd_iostat_start(bio, &start); bio_for_each_segment(bvec, bio, iter) { @@ -258,7 +257,6 @@ static int nsblk_attach_disk(struct nd_namespace_blk *nsblk) blk_queue_max_hw_sectors(q, UINT_MAX); blk_queue_logical_block_size(q, nsblk_sector_size(nsblk)); blk_queue_flag_set(QUEUE_FLAG_NONROT, q); - q->queuedata = nsblk; disk = alloc_disk(0); if (!disk) @@ -268,6 +266,7 @@ static int nsblk_attach_disk(struct nd_namespace_blk *nsblk) disk->fops = &nd_blk_fops; disk->queue = q; disk->flags = GENHD_FL_EXT_DEVT; + disk->private_data = nsblk; nvdimm_namespace_disk_name(&nsblk->common, disk->disk_name); if (devm_add_action_or_reset(dev, nd_blk_release_disk, disk)) -- GitLab From 5713bcc3fd2b0780418b20a7877195758d498b81 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Fri, 8 May 2020 18:15:16 +0200 Subject: [PATCH 0491/1971] nvdimm/btt: stop using ->queuedata In preparation for removing queuedata as an argument to make_request_fn() drop the dependency ->queuedata. Signed-off-by: Christoph Hellwig Link: https://lore.kernel.org/r/20200508161517.252308-15-hch@lst.de Signed-off-by: Dan Williams --- drivers/nvdimm/btt.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/drivers/nvdimm/btt.c b/drivers/nvdimm/btt.c index 3b09419218d6f..eca3e48fefda1 100644 --- a/drivers/nvdimm/btt.c +++ b/drivers/nvdimm/btt.c @@ -1442,7 +1442,7 @@ static int btt_do_bvec(struct btt *btt, struct bio_integrity_payload *bip, static blk_qc_t btt_make_request(struct request_queue *q, struct bio *bio) { struct bio_integrity_payload *bip = bio_integrity(bio); - struct btt *btt = q->queuedata; + struct btt *btt = bio->bi_disk->private_data; struct bvec_iter iter; unsigned long start; struct bio_vec bvec; @@ -1543,7 +1543,6 @@ static int btt_blk_init(struct btt *btt) blk_queue_logical_block_size(btt->btt_queue, btt->sector_size); blk_queue_max_hw_sectors(btt->btt_queue, UINT_MAX); blk_queue_flag_set(QUEUE_FLAG_NONROT, btt->btt_queue); - btt->btt_queue->queuedata = btt; if (btt_meta_size(btt)) { int rc = nd_integrity_init(btt->btt_disk, btt_meta_size(btt)); -- GitLab From 6ec26b8b2d70b41d7c2affd8660d94ce78b3823c Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Fri, 8 May 2020 18:15:17 +0200 Subject: [PATCH 0492/1971] nvdimm/pmem: stop using ->queuedata In preparation for removing queuedata as an argument to make_request_fn() drop the dependency ->queuedata. Signed-off-by: Christoph Hellwig Link: https://lore.kernel.org/r/20200508161517.252308-16-hch@lst.de Signed-off-by: Dan Williams --- drivers/nvdimm/pmem.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c index 2df6994acf836..f8dc5941215bf 100644 --- a/drivers/nvdimm/pmem.c +++ b/drivers/nvdimm/pmem.c @@ -196,7 +196,7 @@ static blk_qc_t pmem_make_request(struct request_queue *q, struct bio *bio) unsigned long start; struct bio_vec bvec; struct bvec_iter iter; - struct pmem_device *pmem = q->queuedata; + struct pmem_device *pmem = bio->bi_disk->private_data; struct nd_region *nd_region = to_region(pmem); if (bio->bi_opf & REQ_PREFLUSH) @@ -231,7 +231,7 @@ static blk_qc_t pmem_make_request(struct request_queue *q, struct bio *bio) static int pmem_rw_page(struct block_device *bdev, sector_t sector, struct page *page, unsigned int op) { - struct pmem_device *pmem = bdev->bd_queue->queuedata; + struct pmem_device *pmem = bdev->bd_disk->private_data; blk_status_t rc; if (op_is_write(op)) @@ -464,7 +464,6 @@ static int pmem_attach_disk(struct device *dev, blk_queue_flag_set(QUEUE_FLAG_NONROT, q); if (pmem->pfn_flags & PFN_MAP) blk_queue_flag_set(QUEUE_FLAG_DAX, q); - q->queuedata = pmem; disk = alloc_disk_node(0, nid); if (!disk) @@ -474,6 +473,7 @@ static int pmem_attach_disk(struct device *dev, disk->fops = &pmem_fops; disk->queue = q; disk->flags = GENHD_FL_EXT_DEVT; + disk->private_data = pmem; disk->queue->backing_dev_info->capabilities |= BDI_CAP_SYNCHRONOUS_IO; nvdimm_namespace_disk_name(ndns, disk->disk_name); set_capacity(disk, (pmem->size - pmem->pfn_pad - pmem->data_offset) -- GitLab From c7c1cbbc9217ebb5601b88d138d4a5358548de9d Mon Sep 17 00:00:00 2001 From: Tero Kristo Date: Wed, 29 Apr 2020 16:13:39 +0300 Subject: [PATCH 0493/1971] clk: ti: composite: fix memory leak The parent_names is never released for a component clock definition, causing some memory leak. Fix by releasing it once it is no longer needed. Reported-by: Tomi Valkeinen Signed-off-by: Tero Kristo Link: https://lkml.kernel.org/r/20200429131341.4697-2-t-kristo@ti.com Acked-by: Tony Lindgren Signed-off-by: Stephen Boyd --- drivers/clk/ti/composite.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/clk/ti/composite.c b/drivers/clk/ti/composite.c index 6a89936ba03af..eaa43575cfa5e 100644 --- a/drivers/clk/ti/composite.c +++ b/drivers/clk/ti/composite.c @@ -196,6 +196,7 @@ cleanup: if (!cclk->comp_clks[i]) continue; list_del(&cclk->comp_clks[i]->link); + kfree(cclk->comp_clks[i]->parent_names); kfree(cclk->comp_clks[i]); } -- GitLab From 74c0ac108d42bf24d37aaea68b6d39ba52d5f0e2 Mon Sep 17 00:00:00 2001 From: Tero Kristo Date: Wed, 29 Apr 2020 16:13:40 +0300 Subject: [PATCH 0494/1971] clk: ti: omap4: Add proper parent clocks for l4-secure clocks L4 secure clocks do not have their parents set currently, which ends them up to the orphan clock list. Fix this by adding either l3 or l4 clock as their parent. Signed-off-by: Tero Kristo Link: https://lkml.kernel.org/r/20200429131341.4697-3-t-kristo@ti.com Acked-by: Tony Lindgren Signed-off-by: Stephen Boyd --- drivers/clk/ti/clk-44xx.c | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/drivers/clk/ti/clk-44xx.c b/drivers/clk/ti/clk-44xx.c index 312a20f8ec0ec..a38c921539793 100644 --- a/drivers/clk/ti/clk-44xx.c +++ b/drivers/clk/ti/clk-44xx.c @@ -606,13 +606,13 @@ static const struct omap_clkctrl_reg_data omap4_l4_per_clkctrl_regs[] __initcons static const struct omap_clkctrl_reg_data omap4_l4_secure_clkctrl_regs[] __initconst = { - { OMAP4_AES1_CLKCTRL, NULL, CLKF_SW_SUP, "" }, - { OMAP4_AES2_CLKCTRL, NULL, CLKF_SW_SUP, "" }, - { OMAP4_DES3DES_CLKCTRL, NULL, CLKF_SW_SUP, "" }, - { OMAP4_PKA_CLKCTRL, NULL, CLKF_SW_SUP, "" }, - { OMAP4_RNG_CLKCTRL, NULL, CLKF_HW_SUP | CLKF_SOC_NONSEC, "" }, - { OMAP4_SHA2MD5_CLKCTRL, NULL, CLKF_SW_SUP, "" }, - { OMAP4_CRYPTODMA_CLKCTRL, NULL, CLKF_HW_SUP | CLKF_SOC_NONSEC, "" }, + { OMAP4_AES1_CLKCTRL, NULL, CLKF_SW_SUP, "l3_div_ck" }, + { OMAP4_AES2_CLKCTRL, NULL, CLKF_SW_SUP, "l3_div_ck" }, + { OMAP4_DES3DES_CLKCTRL, NULL, CLKF_SW_SUP, "l4_div_ck" }, + { OMAP4_PKA_CLKCTRL, NULL, CLKF_SW_SUP, "l4_div_ck" }, + { OMAP4_RNG_CLKCTRL, NULL, CLKF_HW_SUP | CLKF_SOC_NONSEC, "l4_div_ck" }, + { OMAP4_SHA2MD5_CLKCTRL, NULL, CLKF_SW_SUP, "l3_div_ck" }, + { OMAP4_CRYPTODMA_CLKCTRL, NULL, CLKF_HW_SUP | CLKF_SOC_NONSEC, "l3_div_ck" }, { 0 }, }; -- GitLab From f968045fb92e5408c25f1b4eae21bab4882e88e8 Mon Sep 17 00:00:00 2001 From: Tero Kristo Date: Wed, 29 Apr 2020 16:13:41 +0300 Subject: [PATCH 0495/1971] clk: ti: omap5: Add proper parent clocks for l4-secure clocks L4 secure clocks do not have their parents set currently, which ends them up to the orphan clock list. Fix this by adding either l3 or l4 clock as their parent. Signed-off-by: Tero Kristo Link: https://lkml.kernel.org/r/20200429131341.4697-4-t-kristo@ti.com Acked-by: Tony Lindgren Signed-off-by: Stephen Boyd --- drivers/clk/ti/clk-54xx.c | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/drivers/clk/ti/clk-54xx.c b/drivers/clk/ti/clk-54xx.c index 92bf2dda95b9b..8694bc9f5fc7f 100644 --- a/drivers/clk/ti/clk-54xx.c +++ b/drivers/clk/ti/clk-54xx.c @@ -303,13 +303,13 @@ static const struct omap_clkctrl_reg_data omap5_l4per_clkctrl_regs[] __initconst static const struct omap_clkctrl_reg_data omap5_l4_secure_clkctrl_regs[] __initconst = { - { OMAP5_AES1_CLKCTRL, NULL, CLKF_HW_SUP, "" }, - { OMAP5_AES2_CLKCTRL, NULL, CLKF_HW_SUP, "" }, - { OMAP5_DES3DES_CLKCTRL, NULL, CLKF_HW_SUP, "" }, - { OMAP5_FPKA_CLKCTRL, NULL, CLKF_SW_SUP, "" }, - { OMAP5_RNG_CLKCTRL, NULL, CLKF_HW_SUP | CLKF_SOC_NONSEC, "" }, - { OMAP5_SHA2MD5_CLKCTRL, NULL, CLKF_HW_SUP, "" }, - { OMAP5_DMA_CRYPTO_CLKCTRL, NULL, CLKF_HW_SUP | CLKF_SOC_NONSEC, "" }, + { OMAP5_AES1_CLKCTRL, NULL, CLKF_HW_SUP, "l3_iclk_div" }, + { OMAP5_AES2_CLKCTRL, NULL, CLKF_HW_SUP, "l3_iclk_div" }, + { OMAP5_DES3DES_CLKCTRL, NULL, CLKF_HW_SUP, "l4_root_clk_div" }, + { OMAP5_FPKA_CLKCTRL, NULL, CLKF_SW_SUP, "l4_root_clk_div" }, + { OMAP5_RNG_CLKCTRL, NULL, CLKF_HW_SUP | CLKF_SOC_NONSEC, "l4_root_clk_div" }, + { OMAP5_SHA2MD5_CLKCTRL, NULL, CLKF_HW_SUP, "l3_iclk_div" }, + { OMAP5_DMA_CRYPTO_CLKCTRL, NULL, CLKF_HW_SUP | CLKF_SOC_NONSEC, "l3_iclk_div" }, { 0 }, }; -- GitLab From f45c8a501d1abbb74e9fac23c3fdd6c9ab3673d9 Mon Sep 17 00:00:00 2001 From: Tero Kristo Date: Thu, 30 Apr 2020 11:36:38 +0300 Subject: [PATCH 0496/1971] clk: ti: dra7xx: fix gpu clkctrl parent gpu_cm:* parent clock name is wrong, replace this with correct gpu-clkctrl:* clock. Otherwise the clock ends up in the orphaned list. Signed-off-by: Tero Kristo Link: https://lkml.kernel.org/r/20200430083640.8621-2-t-kristo@ti.com Signed-off-by: Stephen Boyd --- drivers/clk/ti/clk-7xx.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/clk/ti/clk-7xx.c b/drivers/clk/ti/clk-7xx.c index 14b6450931078..366cb75378cbf 100644 --- a/drivers/clk/ti/clk-7xx.c +++ b/drivers/clk/ti/clk-7xx.c @@ -328,7 +328,7 @@ static const struct omap_clkctrl_bit_data dra7_gpu_core_bit_data[] __initconst = }; static const struct omap_clkctrl_reg_data dra7_gpu_clkctrl_regs[] __initconst = { - { DRA7_GPU_CLKCTRL, dra7_gpu_core_bit_data, CLKF_SW_SUP, "gpu_cm:clk:0000:24", }, + { DRA7_GPU_CLKCTRL, dra7_gpu_core_bit_data, CLKF_SW_SUP, "gpu-clkctrl:0000:24", }, { 0 }, }; -- GitLab From c752424b55c944cfd9aaa580045227df57764c28 Mon Sep 17 00:00:00 2001 From: Tero Kristo Date: Thu, 30 Apr 2020 11:36:39 +0300 Subject: [PATCH 0497/1971] clk: ti: dra7xx: mark MCAN clock as DRA76x only This clock entry does not exist on any other devices except DRA76, so mark it as specific to that SoC only. Signed-off-by: Tero Kristo Link: https://lkml.kernel.org/r/20200430083640.8621-3-t-kristo@ti.com Signed-off-by: Stephen Boyd --- drivers/clk/ti/clk-7xx.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/clk/ti/clk-7xx.c b/drivers/clk/ti/clk-7xx.c index 366cb75378cbf..146d1d67c7321 100644 --- a/drivers/clk/ti/clk-7xx.c +++ b/drivers/clk/ti/clk-7xx.c @@ -815,7 +815,7 @@ static const struct omap_clkctrl_reg_data dra7_wkupaon_clkctrl_regs[] __initcons { DRA7_WKUPAON_COUNTER_32K_CLKCTRL, NULL, 0, "wkupaon_iclk_mux" }, { DRA7_WKUPAON_UART10_CLKCTRL, dra7_uart10_bit_data, CLKF_SW_SUP, "wkupaon-clkctrl:0060:24" }, { DRA7_WKUPAON_DCAN1_CLKCTRL, dra7_dcan1_bit_data, CLKF_SW_SUP, "wkupaon-clkctrl:0068:24" }, - { DRA7_WKUPAON_ADC_CLKCTRL, NULL, CLKF_SW_SUP, "mcan_clk" }, + { DRA7_WKUPAON_ADC_CLKCTRL, NULL, CLKF_SW_SUP | CLKF_SOC_DRA76, "mcan_clk" }, { 0 }, }; -- GitLab From 4f74251df0ceb3b9fbddf8b5c6aef31c0092b617 Mon Sep 17 00:00:00 2001 From: Tero Kristo Date: Thu, 30 Apr 2020 11:36:40 +0300 Subject: [PATCH 0498/1971] clk: ti: dra7xx: fix RNG clock parent RNG is sourced from L4 clock. Add info for this for proper parenting of the clock. Signed-off-by: Tero Kristo Link: https://lkml.kernel.org/r/20200430083640.8621-4-t-kristo@ti.com Signed-off-by: Stephen Boyd --- drivers/clk/ti/clk-7xx.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/clk/ti/clk-7xx.c b/drivers/clk/ti/clk-7xx.c index 146d1d67c7321..bf8fced40e426 100644 --- a/drivers/clk/ti/clk-7xx.c +++ b/drivers/clk/ti/clk-7xx.c @@ -644,7 +644,7 @@ static const struct omap_clkctrl_reg_data dra7_l4sec_clkctrl_regs[] __initconst { DRA7_L4SEC_AES1_CLKCTRL, NULL, CLKF_HW_SUP, "l3_iclk_div" }, { DRA7_L4SEC_AES2_CLKCTRL, NULL, CLKF_HW_SUP, "l3_iclk_div" }, { DRA7_L4SEC_DES_CLKCTRL, NULL, CLKF_HW_SUP, "l3_iclk_div" }, - { DRA7_L4SEC_RNG_CLKCTRL, NULL, CLKF_HW_SUP | CLKF_SOC_NONSEC, "" }, + { DRA7_L4SEC_RNG_CLKCTRL, NULL, CLKF_HW_SUP | CLKF_SOC_NONSEC, "l4_root_clk_div" }, { DRA7_L4SEC_SHAM_CLKCTRL, NULL, CLKF_HW_SUP, "l3_iclk_div" }, { 0 }, }; -- GitLab From 37416e554961b34451f3a160acd1e27656103e9f Mon Sep 17 00:00:00 2001 From: Bjorn Andersson Date: Fri, 17 Apr 2020 00:00:41 -0700 Subject: [PATCH 0499/1971] clk: qcom: gdsc: Handle GDSC regulator supplies Certain GDSCs, such as the GPU_GX on MSM8996, requires that the upstream regulator supply is powered in order to be turned on. It's not guaranteed that the bootloader will leave these supplies on and the driver core will attempt to enable any GDSCs before allowing the individual drivers to probe defer on the PMIC regulator driver not yet being present. So the gdsc driver needs to be made aware of supplying regulators and probe defer on their absence, and it needs to enable and disable the regulator accordingly. Voltage adjustments of the supplying regulator are deferred to the client drivers themselves. Signed-off-by: Bjorn Andersson Link: https://lkml.kernel.org/r/20200417070044.1376212-2-bjorn.andersson@linaro.org Reviewed-by: Vinod Koul Signed-off-by: Stephen Boyd --- drivers/clk/qcom/gdsc.c | 23 +++++++++++++++++++++++ drivers/clk/qcom/gdsc.h | 4 ++++ 2 files changed, 27 insertions(+) diff --git a/drivers/clk/qcom/gdsc.c b/drivers/clk/qcom/gdsc.c index a250f59708d85..04944f11659b6 100644 --- a/drivers/clk/qcom/gdsc.c +++ b/drivers/clk/qcom/gdsc.c @@ -11,6 +11,7 @@ #include #include #include +#include #include #include #include "gdsc.h" @@ -112,6 +113,12 @@ static int gdsc_toggle_logic(struct gdsc *sc, enum gdsc_status status) int ret; u32 val = (status == GDSC_ON) ? 0 : SW_COLLAPSE_MASK; + if (status == GDSC_ON && sc->rsupply) { + ret = regulator_enable(sc->rsupply); + if (ret < 0) + return ret; + } + ret = regmap_update_bits(sc->regmap, sc->gdscr, SW_COLLAPSE_MASK, val); if (ret) return ret; @@ -143,6 +150,13 @@ static int gdsc_toggle_logic(struct gdsc *sc, enum gdsc_status status) ret = gdsc_poll_status(sc, status); WARN(ret, "%s status stuck at 'o%s'", sc->pd.name, status ? "ff" : "n"); + + if (!ret && status == GDSC_OFF && sc->rsupply) { + ret = regulator_disable(sc->rsupply); + if (ret < 0) + return ret; + } + return ret; } @@ -371,6 +385,15 @@ int gdsc_register(struct gdsc_desc *desc, if (!data->domains) return -ENOMEM; + for (i = 0; i < num; i++) { + if (!scs[i] || !scs[i]->supply) + continue; + + scs[i]->rsupply = devm_regulator_get(dev, scs[i]->supply); + if (IS_ERR(scs[i]->rsupply)) + return PTR_ERR(scs[i]->rsupply); + } + data->num_domains = num; for (i = 0; i < num; i++) { if (!scs[i]) diff --git a/drivers/clk/qcom/gdsc.h b/drivers/clk/qcom/gdsc.h index 64cdc8cf0d4d2..c36fc26dcdffe 100644 --- a/drivers/clk/qcom/gdsc.h +++ b/drivers/clk/qcom/gdsc.h @@ -10,6 +10,7 @@ #include struct regmap; +struct regulator; struct reset_controller_dev; /** @@ -52,6 +53,9 @@ struct gdsc { struct reset_controller_dev *rcdev; unsigned int *resets; unsigned int reset_count; + + const char *supply; + struct regulator *rsupply; }; struct gdsc_desc { -- GitLab From 90a3691e0bd907daae23bb22850d4f4f4bfefa50 Mon Sep 17 00:00:00 2001 From: Bjorn Andersson Date: Fri, 17 Apr 2020 00:00:42 -0700 Subject: [PATCH 0500/1971] clk: qcom: mmcc-msm8996: Properly describe GPU_GX gdsc The GPU_GX GDSC depends on both GPU GDSC being enabled and that the VDD_GX rail is powered, so update the description of the node to cover these requirements. Signed-off-by: Bjorn Andersson Link: https://lkml.kernel.org/r/20200417070044.1376212-3-bjorn.andersson@linaro.org Acked-by: Rob Herring Reviewed-by: Vinod Koul Signed-off-by: Stephen Boyd --- Documentation/devicetree/bindings/clock/qcom,mmcc.yaml | 4 ++++ drivers/clk/qcom/mmcc-msm8996.c | 2 ++ 2 files changed, 6 insertions(+) diff --git a/Documentation/devicetree/bindings/clock/qcom,mmcc.yaml b/Documentation/devicetree/bindings/clock/qcom,mmcc.yaml index f684fe67db84e..244db36cc920f 100644 --- a/Documentation/devicetree/bindings/clock/qcom,mmcc.yaml +++ b/Documentation/devicetree/bindings/clock/qcom,mmcc.yaml @@ -67,6 +67,10 @@ properties: description: Protected clock specifier list as per common clock binding + vdd-gfx-supply: + description: + Regulator supply for the GPU_GX GDSC + required: - compatible - reg diff --git a/drivers/clk/qcom/mmcc-msm8996.c b/drivers/clk/qcom/mmcc-msm8996.c index 6c7592ddf8bb3..3b3aac07fb2d5 100644 --- a/drivers/clk/qcom/mmcc-msm8996.c +++ b/drivers/clk/qcom/mmcc-msm8996.c @@ -3064,7 +3064,9 @@ static struct gdsc gpu_gx_gdsc = { .name = "gpu_gx", }, .pwrsts = PWRSTS_OFF_ON, + .parent = &gpu_gdsc.pd, .flags = CLAMP_IO, + .supply = "vdd-gfx", }; static struct clk_regmap *mmcc_msm8996_clocks[] = { -- GitLab From f73a4230d5bbc8fc7e1a2479ac997f786111c7bb Mon Sep 17 00:00:00 2001 From: Vinod Koul Date: Wed, 13 May 2020 12:24:19 +0530 Subject: [PATCH 0501/1971] clk: qcom: gcc: Add GPU and NPU clocks for SM8150 Add the GPU and NPU clocks for SM8150. They were missed in earlier addition of clock driver. Fixes: 2a1d7eb854bb ("clk: qcom: gcc: Add global clock controller driver for SM8150") Signed-off-by: Vinod Koul Link: https://lkml.kernel.org/r/20200513065420.32735-1-vkoul@kernel.org Signed-off-by: Stephen Boyd --- drivers/clk/qcom/gcc-sm8150.c | 64 +++++++++++++++++++++++++++++++++++ 1 file changed, 64 insertions(+) diff --git a/drivers/clk/qcom/gcc-sm8150.c b/drivers/clk/qcom/gcc-sm8150.c index ef98fdc51755c..7c82dd85deaf5 100644 --- a/drivers/clk/qcom/gcc-sm8150.c +++ b/drivers/clk/qcom/gcc-sm8150.c @@ -1617,6 +1617,36 @@ static struct clk_branch gcc_gpu_cfg_ahb_clk = { }, }; +static struct clk_branch gcc_gpu_gpll0_clk_src = { + .clkr = { + .enable_reg = 0x52004, + .enable_mask = BIT(15), + .hw.init = &(struct clk_init_data){ + .name = "gcc_gpu_gpll0_clk_src", + .parent_hws = (const struct clk_hw *[]){ + &gpll0.clkr.hw }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_gpu_gpll0_div_clk_src = { + .clkr = { + .enable_reg = 0x52004, + .enable_mask = BIT(16), + .hw.init = &(struct clk_init_data){ + .name = "gcc_gpu_gpll0_div_clk_src", + .parent_hws = (const struct clk_hw *[]){ + &gcc_gpu_gpll0_clk_src.clkr.hw }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + static struct clk_branch gcc_gpu_iref_clk = { .halt_reg = 0x8c010, .halt_check = BRANCH_HALT, @@ -1699,6 +1729,36 @@ static struct clk_branch gcc_npu_cfg_ahb_clk = { }, }; +static struct clk_branch gcc_npu_gpll0_clk_src = { + .clkr = { + .enable_reg = 0x52004, + .enable_mask = BIT(18), + .hw.init = &(struct clk_init_data){ + .name = "gcc_npu_gpll0_clk_src", + .parent_hws = (const struct clk_hw *[]){ + &gpll0.clkr.hw }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_npu_gpll0_div_clk_src = { + .clkr = { + .enable_reg = 0x52004, + .enable_mask = BIT(19), + .hw.init = &(struct clk_init_data){ + .name = "gcc_npu_gpll0_div_clk_src", + .parent_hws = (const struct clk_hw *[]){ + &gcc_npu_gpll0_clk_src.clkr.hw }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + static struct clk_branch gcc_npu_trig_clk = { .halt_reg = 0x4d00c, .halt_check = BRANCH_VOTED, @@ -3375,12 +3435,16 @@ static struct clk_regmap *gcc_sm8150_clocks[] = { [GCC_GP3_CLK] = &gcc_gp3_clk.clkr, [GCC_GP3_CLK_SRC] = &gcc_gp3_clk_src.clkr, [GCC_GPU_CFG_AHB_CLK] = &gcc_gpu_cfg_ahb_clk.clkr, + [GCC_GPU_GPLL0_CLK_SRC] = &gcc_gpu_gpll0_clk_src.clkr, + [GCC_GPU_GPLL0_DIV_CLK_SRC] = &gcc_gpu_gpll0_div_clk_src.clkr, [GCC_GPU_IREF_CLK] = &gcc_gpu_iref_clk.clkr, [GCC_GPU_MEMNOC_GFX_CLK] = &gcc_gpu_memnoc_gfx_clk.clkr, [GCC_GPU_SNOC_DVM_GFX_CLK] = &gcc_gpu_snoc_dvm_gfx_clk.clkr, [GCC_NPU_AT_CLK] = &gcc_npu_at_clk.clkr, [GCC_NPU_AXI_CLK] = &gcc_npu_axi_clk.clkr, [GCC_NPU_CFG_AHB_CLK] = &gcc_npu_cfg_ahb_clk.clkr, + [GCC_NPU_GPLL0_CLK_SRC] = &gcc_npu_gpll0_clk_src.clkr, + [GCC_NPU_GPLL0_DIV_CLK_SRC] = &gcc_npu_gpll0_div_clk_src.clkr, [GCC_NPU_TRIG_CLK] = &gcc_npu_trig_clk.clkr, [GCC_PCIE0_PHY_REFGEN_CLK] = &gcc_pcie0_phy_refgen_clk.clkr, [GCC_PCIE1_PHY_REFGEN_CLK] = &gcc_pcie1_phy_refgen_clk.clkr, -- GitLab From 37c72e4cae37f0dace1abb3711ede7fbc6d0862a Mon Sep 17 00:00:00 2001 From: Vinod Koul Date: Wed, 13 May 2020 12:24:20 +0530 Subject: [PATCH 0502/1971] clk: qcom: gcc: Add missing UFS clocks for SM8150 Add the missing ufs card and ufs phy clocks for SM8150. They were missed in earlier addition of clock driver. Fixes: 2a1d7eb854bb ("clk: qcom: gcc: Add global clock controller driver for SM8150") Signed-off-by: Vinod Koul Link: https://lkml.kernel.org/r/20200513065420.32735-2-vkoul@kernel.org Signed-off-by: Stephen Boyd --- drivers/clk/qcom/gcc-sm8150.c | 84 +++++++++++++++++++++++++++++++++++ 1 file changed, 84 insertions(+) diff --git a/drivers/clk/qcom/gcc-sm8150.c b/drivers/clk/qcom/gcc-sm8150.c index 7c82dd85deaf5..2bc08e7125bf5 100644 --- a/drivers/clk/qcom/gcc-sm8150.c +++ b/drivers/clk/qcom/gcc-sm8150.c @@ -2873,6 +2873,45 @@ static struct clk_branch gcc_ufs_card_phy_aux_hw_ctl_clk = { }, }; +/* external clocks so add BRANCH_HALT_SKIP */ +static struct clk_branch gcc_ufs_card_rx_symbol_0_clk = { + .halt_check = BRANCH_HALT_SKIP, + .clkr = { + .enable_reg = 0x7501c, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_ufs_card_rx_symbol_0_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +/* external clocks so add BRANCH_HALT_SKIP */ +static struct clk_branch gcc_ufs_card_rx_symbol_1_clk = { + .halt_check = BRANCH_HALT_SKIP, + .clkr = { + .enable_reg = 0x750ac, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_ufs_card_rx_symbol_1_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +/* external clocks so add BRANCH_HALT_SKIP */ +static struct clk_branch gcc_ufs_card_tx_symbol_0_clk = { + .halt_check = BRANCH_HALT_SKIP, + .clkr = { + .enable_reg = 0x75018, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_ufs_card_tx_symbol_0_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + static struct clk_branch gcc_ufs_card_unipro_core_clk = { .halt_reg = 0x75058, .halt_check = BRANCH_HALT, @@ -3053,6 +3092,45 @@ static struct clk_branch gcc_ufs_phy_phy_aux_hw_ctl_clk = { }, }; +/* external clocks so add BRANCH_HALT_SKIP */ +static struct clk_branch gcc_ufs_phy_rx_symbol_0_clk = { + .halt_check = BRANCH_HALT_SKIP, + .clkr = { + .enable_reg = 0x7701c, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_ufs_phy_rx_symbol_0_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +/* external clocks so add BRANCH_HALT_SKIP */ +static struct clk_branch gcc_ufs_phy_rx_symbol_1_clk = { + .halt_check = BRANCH_HALT_SKIP, + .clkr = { + .enable_reg = 0x770ac, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_ufs_phy_rx_symbol_1_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +/* external clocks so add BRANCH_HALT_SKIP */ +static struct clk_branch gcc_ufs_phy_tx_symbol_0_clk = { + .halt_check = BRANCH_HALT_SKIP, + .clkr = { + .enable_reg = 0x77018, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_ufs_phy_tx_symbol_0_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + static struct clk_branch gcc_ufs_phy_unipro_core_clk = { .halt_reg = 0x77058, .halt_check = BRANCH_HALT, @@ -3549,6 +3627,9 @@ static struct clk_regmap *gcc_sm8150_clocks[] = { [GCC_UFS_CARD_PHY_AUX_CLK_SRC] = &gcc_ufs_card_phy_aux_clk_src.clkr, [GCC_UFS_CARD_PHY_AUX_HW_CTL_CLK] = &gcc_ufs_card_phy_aux_hw_ctl_clk.clkr, + [GCC_UFS_CARD_RX_SYMBOL_0_CLK] = &gcc_ufs_card_rx_symbol_0_clk.clkr, + [GCC_UFS_CARD_RX_SYMBOL_1_CLK] = &gcc_ufs_card_rx_symbol_1_clk.clkr, + [GCC_UFS_CARD_TX_SYMBOL_0_CLK] = &gcc_ufs_card_tx_symbol_0_clk.clkr, [GCC_UFS_CARD_UNIPRO_CORE_CLK] = &gcc_ufs_card_unipro_core_clk.clkr, [GCC_UFS_CARD_UNIPRO_CORE_CLK_SRC] = &gcc_ufs_card_unipro_core_clk_src.clkr, @@ -3566,6 +3647,9 @@ static struct clk_regmap *gcc_sm8150_clocks[] = { [GCC_UFS_PHY_PHY_AUX_CLK] = &gcc_ufs_phy_phy_aux_clk.clkr, [GCC_UFS_PHY_PHY_AUX_CLK_SRC] = &gcc_ufs_phy_phy_aux_clk_src.clkr, [GCC_UFS_PHY_PHY_AUX_HW_CTL_CLK] = &gcc_ufs_phy_phy_aux_hw_ctl_clk.clkr, + [GCC_UFS_PHY_RX_SYMBOL_0_CLK] = &gcc_ufs_phy_rx_symbol_0_clk.clkr, + [GCC_UFS_PHY_RX_SYMBOL_1_CLK] = &gcc_ufs_phy_rx_symbol_1_clk.clkr, + [GCC_UFS_PHY_TX_SYMBOL_0_CLK] = &gcc_ufs_phy_tx_symbol_0_clk.clkr, [GCC_UFS_PHY_UNIPRO_CORE_CLK] = &gcc_ufs_phy_unipro_core_clk.clkr, [GCC_UFS_PHY_UNIPRO_CORE_CLK_SRC] = &gcc_ufs_phy_unipro_core_clk_src.clkr, -- GitLab From 4c71d6abc4fc0e1171ac078687a98cca1537bbaa Mon Sep 17 00:00:00 2001 From: Bryan O'Donoghue Date: Tue, 12 May 2020 12:50:22 +0100 Subject: [PATCH 0503/1971] clk: qcom: Add DT bindings for MSM8939 GCC Add compatible strings and the include files for the MSM8939 GCC. Cc: Andy Gross Cc: Bjorn Andersson Cc: Michael Turquette Cc: Stephen Boyd Cc: Rob Herring Cc: linux-arm-msm@vger.kernel.org Cc: linux-clk@vger.kernel.org Cc: devicetree@vger.kernel.org Cc: linux-kernel@vger.kernel.org Tested-by: Vincent Knecht Signed-off-by: Bryan O'Donoghue Link: https://lkml.kernel.org/r/20200512115023.2856617-2-bryan.odonoghue@linaro.org Reviewed-by: Rob Herring Signed-off-by: Stephen Boyd --- .../devicetree/bindings/clock/qcom,gcc.yaml | 3 + include/dt-bindings/clock/qcom,gcc-msm8939.h | 206 ++++++++++++++++++ include/dt-bindings/reset/qcom,gcc-msm8939.h | 110 ++++++++++ 3 files changed, 319 insertions(+) create mode 100644 include/dt-bindings/clock/qcom,gcc-msm8939.h create mode 100644 include/dt-bindings/reset/qcom,gcc-msm8939.h diff --git a/Documentation/devicetree/bindings/clock/qcom,gcc.yaml b/Documentation/devicetree/bindings/clock/qcom,gcc.yaml index e533bb0cfd2b4..ee0467fb5e31c 100644 --- a/Documentation/devicetree/bindings/clock/qcom,gcc.yaml +++ b/Documentation/devicetree/bindings/clock/qcom,gcc.yaml @@ -22,6 +22,8 @@ description: | - dt-bindings/reset/qcom,gcc-ipq6018.h - dt-bindings/clock/qcom,gcc-ipq806x.h (qcom,gcc-ipq8064) - dt-bindings/reset/qcom,gcc-ipq806x.h (qcom,gcc-ipq8064) + - dt-bindings/clock/qcom,gcc-msm8939.h + - dt-bindings/reset/qcom,gcc-msm8939.h - dt-bindings/clock/qcom,gcc-msm8660.h - dt-bindings/reset/qcom,gcc-msm8660.h - dt-bindings/clock/qcom,gcc-msm8974.h @@ -41,6 +43,7 @@ properties: - qcom,gcc-ipq8064 - qcom,gcc-msm8660 - qcom,gcc-msm8916 + - qcom,gcc-msm8939 - qcom,gcc-msm8960 - qcom,gcc-msm8974 - qcom,gcc-msm8974pro diff --git a/include/dt-bindings/clock/qcom,gcc-msm8939.h b/include/dt-bindings/clock/qcom,gcc-msm8939.h new file mode 100644 index 0000000000000..0634467c4ce5a --- /dev/null +++ b/include/dt-bindings/clock/qcom,gcc-msm8939.h @@ -0,0 +1,206 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright 2020 Linaro Limited + */ + +#ifndef _DT_BINDINGS_CLK_MSM_GCC_8939_H +#define _DT_BINDINGS_CLK_MSM_GCC_8939_H + +#define GPLL0 0 +#define GPLL0_VOTE 1 +#define BIMC_PLL 2 +#define BIMC_PLL_VOTE 3 +#define GPLL1 4 +#define GPLL1_VOTE 5 +#define GPLL2 6 +#define GPLL2_VOTE 7 +#define PCNOC_BFDCD_CLK_SRC 8 +#define SYSTEM_NOC_BFDCD_CLK_SRC 9 +#define CAMSS_AHB_CLK_SRC 10 +#define APSS_AHB_CLK_SRC 11 +#define CSI0_CLK_SRC 12 +#define CSI1_CLK_SRC 13 +#define GFX3D_CLK_SRC 14 +#define VFE0_CLK_SRC 15 +#define BLSP1_QUP1_I2C_APPS_CLK_SRC 16 +#define BLSP1_QUP1_SPI_APPS_CLK_SRC 17 +#define BLSP1_QUP2_I2C_APPS_CLK_SRC 18 +#define BLSP1_QUP2_SPI_APPS_CLK_SRC 19 +#define BLSP1_QUP3_I2C_APPS_CLK_SRC 20 +#define BLSP1_QUP3_SPI_APPS_CLK_SRC 21 +#define BLSP1_QUP4_I2C_APPS_CLK_SRC 22 +#define BLSP1_QUP4_SPI_APPS_CLK_SRC 23 +#define BLSP1_QUP5_I2C_APPS_CLK_SRC 24 +#define BLSP1_QUP5_SPI_APPS_CLK_SRC 25 +#define BLSP1_QUP6_I2C_APPS_CLK_SRC 26 +#define BLSP1_QUP6_SPI_APPS_CLK_SRC 27 +#define BLSP1_UART1_APPS_CLK_SRC 28 +#define BLSP1_UART2_APPS_CLK_SRC 29 +#define CCI_CLK_SRC 30 +#define CAMSS_GP0_CLK_SRC 31 +#define CAMSS_GP1_CLK_SRC 32 +#define JPEG0_CLK_SRC 33 +#define MCLK0_CLK_SRC 34 +#define MCLK1_CLK_SRC 35 +#define CSI0PHYTIMER_CLK_SRC 36 +#define CSI1PHYTIMER_CLK_SRC 37 +#define CPP_CLK_SRC 38 +#define CRYPTO_CLK_SRC 39 +#define GP1_CLK_SRC 40 +#define GP2_CLK_SRC 41 +#define GP3_CLK_SRC 42 +#define BYTE0_CLK_SRC 43 +#define ESC0_CLK_SRC 44 +#define MDP_CLK_SRC 45 +#define PCLK0_CLK_SRC 46 +#define VSYNC_CLK_SRC 47 +#define PDM2_CLK_SRC 48 +#define SDCC1_APPS_CLK_SRC 49 +#define SDCC2_APPS_CLK_SRC 50 +#define APSS_TCU_CLK_SRC 51 +#define USB_HS_SYSTEM_CLK_SRC 52 +#define VCODEC0_CLK_SRC 53 +#define GCC_BLSP1_AHB_CLK 54 +#define GCC_BLSP1_SLEEP_CLK 55 +#define GCC_BLSP1_QUP1_I2C_APPS_CLK 56 +#define GCC_BLSP1_QUP1_SPI_APPS_CLK 57 +#define GCC_BLSP1_QUP2_I2C_APPS_CLK 58 +#define GCC_BLSP1_QUP2_SPI_APPS_CLK 59 +#define GCC_BLSP1_QUP3_I2C_APPS_CLK 60 +#define GCC_BLSP1_QUP3_SPI_APPS_CLK 61 +#define GCC_BLSP1_QUP4_I2C_APPS_CLK 62 +#define GCC_BLSP1_QUP4_SPI_APPS_CLK 63 +#define GCC_BLSP1_QUP5_I2C_APPS_CLK 64 +#define GCC_BLSP1_QUP5_SPI_APPS_CLK 65 +#define GCC_BLSP1_QUP6_I2C_APPS_CLK 66 +#define GCC_BLSP1_QUP6_SPI_APPS_CLK 67 +#define GCC_BLSP1_UART1_APPS_CLK 68 +#define GCC_BLSP1_UART2_APPS_CLK 69 +#define GCC_BOOT_ROM_AHB_CLK 70 +#define GCC_CAMSS_CCI_AHB_CLK 71 +#define GCC_CAMSS_CCI_CLK 72 +#define GCC_CAMSS_CSI0_AHB_CLK 73 +#define GCC_CAMSS_CSI0_CLK 74 +#define GCC_CAMSS_CSI0PHY_CLK 75 +#define GCC_CAMSS_CSI0PIX_CLK 76 +#define GCC_CAMSS_CSI0RDI_CLK 77 +#define GCC_CAMSS_CSI1_AHB_CLK 78 +#define GCC_CAMSS_CSI1_CLK 79 +#define GCC_CAMSS_CSI1PHY_CLK 80 +#define GCC_CAMSS_CSI1PIX_CLK 81 +#define GCC_CAMSS_CSI1RDI_CLK 82 +#define GCC_CAMSS_CSI_VFE0_CLK 83 +#define GCC_CAMSS_GP0_CLK 84 +#define GCC_CAMSS_GP1_CLK 85 +#define GCC_CAMSS_ISPIF_AHB_CLK 86 +#define GCC_CAMSS_JPEG0_CLK 87 +#define GCC_CAMSS_JPEG_AHB_CLK 88 +#define GCC_CAMSS_JPEG_AXI_CLK 89 +#define GCC_CAMSS_MCLK0_CLK 90 +#define GCC_CAMSS_MCLK1_CLK 91 +#define GCC_CAMSS_MICRO_AHB_CLK 92 +#define GCC_CAMSS_CSI0PHYTIMER_CLK 93 +#define GCC_CAMSS_CSI1PHYTIMER_CLK 94 +#define GCC_CAMSS_AHB_CLK 95 +#define GCC_CAMSS_TOP_AHB_CLK 96 +#define GCC_CAMSS_CPP_AHB_CLK 97 +#define GCC_CAMSS_CPP_CLK 98 +#define GCC_CAMSS_VFE0_CLK 99 +#define GCC_CAMSS_VFE_AHB_CLK 100 +#define GCC_CAMSS_VFE_AXI_CLK 101 +#define GCC_CRYPTO_AHB_CLK 102 +#define GCC_CRYPTO_AXI_CLK 103 +#define GCC_CRYPTO_CLK 104 +#define GCC_OXILI_GMEM_CLK 105 +#define GCC_GP1_CLK 106 +#define GCC_GP2_CLK 107 +#define GCC_GP3_CLK 108 +#define GCC_MDSS_AHB_CLK 109 +#define GCC_MDSS_AXI_CLK 110 +#define GCC_MDSS_BYTE0_CLK 111 +#define GCC_MDSS_ESC0_CLK 112 +#define GCC_MDSS_MDP_CLK 113 +#define GCC_MDSS_PCLK0_CLK 114 +#define GCC_MDSS_VSYNC_CLK 115 +#define GCC_MSS_CFG_AHB_CLK 116 +#define GCC_OXILI_AHB_CLK 117 +#define GCC_OXILI_GFX3D_CLK 118 +#define GCC_PDM2_CLK 119 +#define GCC_PDM_AHB_CLK 120 +#define GCC_PRNG_AHB_CLK 121 +#define GCC_SDCC1_AHB_CLK 122 +#define GCC_SDCC1_APPS_CLK 123 +#define GCC_SDCC2_AHB_CLK 124 +#define GCC_SDCC2_APPS_CLK 125 +#define GCC_GTCU_AHB_CLK 126 +#define GCC_JPEG_TBU_CLK 127 +#define GCC_MDP_TBU_CLK 128 +#define GCC_SMMU_CFG_CLK 129 +#define GCC_VENUS_TBU_CLK 130 +#define GCC_VFE_TBU_CLK 131 +#define GCC_USB2A_PHY_SLEEP_CLK 132 +#define GCC_USB_HS_AHB_CLK 133 +#define GCC_USB_HS_SYSTEM_CLK 134 +#define GCC_VENUS0_AHB_CLK 135 +#define GCC_VENUS0_AXI_CLK 136 +#define GCC_VENUS0_VCODEC0_CLK 137 +#define BIMC_DDR_CLK_SRC 138 +#define GCC_APSS_TCU_CLK 139 +#define GCC_GFX_TCU_CLK 140 +#define BIMC_GPU_CLK_SRC 141 +#define GCC_BIMC_GFX_CLK 142 +#define GCC_BIMC_GPU_CLK 143 +#define ULTAUDIO_LPAIF_PRI_I2S_CLK_SRC 144 +#define ULTAUDIO_LPAIF_SEC_I2S_CLK_SRC 145 +#define ULTAUDIO_LPAIF_AUX_I2S_CLK_SRC 146 +#define ULTAUDIO_XO_CLK_SRC 147 +#define ULTAUDIO_AHBFABRIC_CLK_SRC 148 +#define CODEC_DIGCODEC_CLK_SRC 149 +#define GCC_ULTAUDIO_PCNOC_MPORT_CLK 150 +#define GCC_ULTAUDIO_PCNOC_SWAY_CLK 151 +#define GCC_ULTAUDIO_AVSYNC_XO_CLK 152 +#define GCC_ULTAUDIO_STC_XO_CLK 153 +#define GCC_ULTAUDIO_AHBFABRIC_IXFABRIC_CLK 154 +#define GCC_ULTAUDIO_AHBFABRIC_IXFABRIC_LPM_CLK 155 +#define GCC_ULTAUDIO_LPAIF_PRI_I2S_CLK 156 +#define GCC_ULTAUDIO_LPAIF_SEC_I2S_CLK 157 +#define GCC_ULTAUDIO_LPAIF_AUX_I2S_CLK 158 +#define GCC_CODEC_DIGCODEC_CLK 159 +#define GCC_MSS_Q6_BIMC_AXI_CLK 160 +#define GPLL3 161 +#define GPLL3_VOTE 162 +#define GPLL4 163 +#define GPLL4_VOTE 164 +#define GPLL5 165 +#define GPLL5_VOTE 166 +#define GPLL6 167 +#define GPLL6_VOTE 168 +#define BYTE1_CLK_SRC 169 +#define GCC_MDSS_BYTE1_CLK 170 +#define ESC1_CLK_SRC 171 +#define GCC_MDSS_ESC1_CLK 172 +#define PCLK1_CLK_SRC 173 +#define GCC_MDSS_PCLK1_CLK 174 +#define GCC_GFX_TBU_CLK 175 +#define GCC_CPP_TBU_CLK 176 +#define GCC_MDP_RT_TBU_CLK 177 +#define USB_FS_SYSTEM_CLK_SRC 178 +#define USB_FS_IC_CLK_SRC 179 +#define GCC_USB_FS_AHB_CLK 180 +#define GCC_USB_FS_IC_CLK 181 +#define GCC_USB_FS_SYSTEM_CLK 182 +#define GCC_VENUS0_CORE0_VCODEC0_CLK 183 +#define GCC_VENUS0_CORE1_VCODEC0_CLK 184 +#define GCC_OXILI_TIMER_CLK 185 + +/* Indexes for GDSCs */ +#define BIMC_GDSC 0 +#define VENUS_GDSC 1 +#define MDSS_GDSC 2 +#define JPEG_GDSC 3 +#define VFE_GDSC 4 +#define OXILI_GDSC 5 +#define VENUS_CORE0_GDSC 6 +#define VENUS_CORE1_GDSC 7 + +#endif diff --git a/include/dt-bindings/reset/qcom,gcc-msm8939.h b/include/dt-bindings/reset/qcom,gcc-msm8939.h new file mode 100644 index 0000000000000..fa41ffeae7a24 --- /dev/null +++ b/include/dt-bindings/reset/qcom,gcc-msm8939.h @@ -0,0 +1,110 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright 2020 Linaro Limited + */ + +#ifndef _DT_BINDINGS_RESET_MSM_GCC_8939_H +#define _DT_BINDINGS_RESET_MSM_GCC_8939_H + +#define GCC_BLSP1_BCR 0 +#define GCC_BLSP1_QUP1_BCR 1 +#define GCC_BLSP1_UART1_BCR 2 +#define GCC_BLSP1_QUP2_BCR 3 +#define GCC_BLSP1_UART2_BCR 4 +#define GCC_BLSP1_QUP3_BCR 5 +#define GCC_BLSP1_QUP4_BCR 6 +#define GCC_BLSP1_QUP5_BCR 7 +#define GCC_BLSP1_QUP6_BCR 8 +#define GCC_IMEM_BCR 9 +#define GCC_SMMU_BCR 10 +#define GCC_APSS_TCU_BCR 11 +#define GCC_SMMU_XPU_BCR 12 +#define GCC_PCNOC_TBU_BCR 13 +#define GCC_PRNG_BCR 14 +#define GCC_BOOT_ROM_BCR 15 +#define GCC_CRYPTO_BCR 16 +#define GCC_SEC_CTRL_BCR 17 +#define GCC_AUDIO_CORE_BCR 18 +#define GCC_ULT_AUDIO_BCR 19 +#define GCC_DEHR_BCR 20 +#define GCC_SYSTEM_NOC_BCR 21 +#define GCC_PCNOC_BCR 22 +#define GCC_TCSR_BCR 23 +#define GCC_QDSS_BCR 24 +#define GCC_DCD_BCR 25 +#define GCC_MSG_RAM_BCR 26 +#define GCC_MPM_BCR 27 +#define GCC_SPMI_BCR 28 +#define GCC_SPDM_BCR 29 +#define GCC_MM_SPDM_BCR 30 +#define GCC_BIMC_BCR 31 +#define GCC_RBCPR_BCR 32 +#define GCC_TLMM_BCR 33 +#define GCC_USB_HS_BCR 34 +#define GCC_USB2A_PHY_BCR 35 +#define GCC_SDCC1_BCR 36 +#define GCC_SDCC2_BCR 37 +#define GCC_PDM_BCR 38 +#define GCC_SNOC_BUS_TIMEOUT0_BCR 39 +#define GCC_PCNOC_BUS_TIMEOUT0_BCR 40 +#define GCC_PCNOC_BUS_TIMEOUT1_BCR 41 +#define GCC_PCNOC_BUS_TIMEOUT2_BCR 42 +#define GCC_PCNOC_BUS_TIMEOUT3_BCR 43 +#define GCC_PCNOC_BUS_TIMEOUT4_BCR 44 +#define GCC_PCNOC_BUS_TIMEOUT5_BCR 45 +#define GCC_PCNOC_BUS_TIMEOUT6_BCR 46 +#define GCC_PCNOC_BUS_TIMEOUT7_BCR 47 +#define GCC_PCNOC_BUS_TIMEOUT8_BCR 48 +#define GCC_PCNOC_BUS_TIMEOUT9_BCR 49 +#define GCC_MMSS_BCR 50 +#define GCC_VENUS0_BCR 51 +#define GCC_MDSS_BCR 52 +#define GCC_CAMSS_PHY0_BCR 53 +#define GCC_CAMSS_CSI0_BCR 54 +#define GCC_CAMSS_CSI0PHY_BCR 55 +#define GCC_CAMSS_CSI0RDI_BCR 56 +#define GCC_CAMSS_CSI0PIX_BCR 57 +#define GCC_CAMSS_PHY1_BCR 58 +#define GCC_CAMSS_CSI1_BCR 59 +#define GCC_CAMSS_CSI1PHY_BCR 60 +#define GCC_CAMSS_CSI1RDI_BCR 61 +#define GCC_CAMSS_CSI1PIX_BCR 62 +#define GCC_CAMSS_ISPIF_BCR 63 +#define GCC_CAMSS_CCI_BCR 64 +#define GCC_CAMSS_MCLK0_BCR 65 +#define GCC_CAMSS_MCLK1_BCR 66 +#define GCC_CAMSS_GP0_BCR 67 +#define GCC_CAMSS_GP1_BCR 68 +#define GCC_CAMSS_TOP_BCR 69 +#define GCC_CAMSS_MICRO_BCR 70 +#define GCC_CAMSS_JPEG_BCR 71 +#define GCC_CAMSS_VFE_BCR 72 +#define GCC_CAMSS_CSI_VFE0_BCR 73 +#define GCC_OXILI_BCR 74 +#define GCC_GMEM_BCR 75 +#define GCC_CAMSS_AHB_BCR 76 +#define GCC_MDP_TBU_BCR 77 +#define GCC_GFX_TBU_BCR 78 +#define GCC_GFX_TCU_BCR 79 +#define GCC_MSS_TBU_AXI_BCR 80 +#define GCC_MSS_TBU_GSS_AXI_BCR 81 +#define GCC_MSS_TBU_Q6_AXI_BCR 82 +#define GCC_GTCU_AHB_BCR 83 +#define GCC_SMMU_CFG_BCR 84 +#define GCC_VFE_TBU_BCR 85 +#define GCC_VENUS_TBU_BCR 86 +#define GCC_JPEG_TBU_BCR 87 +#define GCC_PRONTO_TBU_BCR 88 +#define GCC_SMMU_CATS_BCR 89 +#define GCC_BLSP1_UART3_BCR 90 +#define GCC_CAMSS_CSI2_BCR 91 +#define GCC_CAMSS_CSI2PHY_BCR 92 +#define GCC_CAMSS_CSI2RDI_BCR 93 +#define GCC_CAMSS_CSI2PIX_BCR 94 +#define GCC_USB_FS_BCR 95 +#define GCC_BLSP1_QUP4_SPI_APPS_CBCR 96 +#define GCC_CAMSS_MCLK2_BCR 97 +#define GCC_CPP_TBU_BCR 98 +#define GCC_MDP_RT_TBU_BCR 99 + +#endif -- GitLab From 5bbeea34bc7ab579516486fc387da8bde94b09a4 Mon Sep 17 00:00:00 2001 From: Peter Ujfalusi Date: Tue, 12 May 2020 16:45:44 +0300 Subject: [PATCH 0504/1971] dmaengine: ti: k3-udma: Add missing dma_sync call for rx flush descriptor The TR mode rx flush descriptor did not had a dma_sync_single_for_device() call to make sure that the DMA see the correct information. Signed-off-by: Peter Ujfalusi Link: https://lore.kernel.org/r/20200512134544.5839-1-peter.ujfalusi@ti.com Signed-off-by: Vinod Koul --- drivers/dma/ti/k3-udma.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/dma/ti/k3-udma.c b/drivers/dma/ti/k3-udma.c index 80d64f71a7cb4..58af578d8dc1d 100644 --- a/drivers/dma/ti/k3-udma.c +++ b/drivers/dma/ti/k3-udma.c @@ -3463,6 +3463,9 @@ static int udma_setup_rx_flush(struct udma_dev *ud) tr_req->icnt0 = rx_flush->buffer_size; tr_req->icnt1 = 1; + dma_sync_single_for_device(dev, hwdesc->cppi5_desc_paddr, + hwdesc->cppi5_desc_size, DMA_TO_DEVICE); + /* Set up descriptor to be used for packet mode */ hwdesc = &rx_flush->hwdescs[1]; hwdesc->cppi5_desc_size = ALIGN(sizeof(struct cppi5_host_desc_t) + -- GitLab From 6fea8735fd96f39c1a0ba52961069ae66e549595 Mon Sep 17 00:00:00 2001 From: Peter Ujfalusi Date: Tue, 12 May 2020 16:46:11 +0300 Subject: [PATCH 0505/1971] dmaengine: ti: k3-udma: Remove udma_chan.in_ring_cnt The in_ring_cnt is not used for anything, it can be removed. Signed-off-by: Peter Ujfalusi Link: https://lore.kernel.org/r/20200512134611.6015-1-peter.ujfalusi@ti.com Signed-off-by: Vinod Koul --- drivers/dma/ti/k3-udma.c | 16 +--------------- 1 file changed, 1 insertion(+), 15 deletions(-) diff --git a/drivers/dma/ti/k3-udma.c b/drivers/dma/ti/k3-udma.c index 58af578d8dc1d..653aea3f6de22 100644 --- a/drivers/dma/ti/k3-udma.c +++ b/drivers/dma/ti/k3-udma.c @@ -231,7 +231,6 @@ struct udma_chan { struct udma_tx_drain tx_drain; u32 bcnt; /* number of bytes completed since the start of the channel */ - u32 in_ring_cnt; /* number of descriptors in flight */ /* Channel configuration parameters */ struct udma_chan_config config; @@ -574,7 +573,6 @@ static int udma_push_to_ring(struct udma_chan *uc, int idx) struct udma_desc *d = uc->desc; struct k3_ring *ring = NULL; dma_addr_t paddr; - int ret; switch (uc->config.dir) { case DMA_DEV_TO_MEM: @@ -598,11 +596,7 @@ static int udma_push_to_ring(struct udma_chan *uc, int idx) udma_sync_for_device(uc, idx); } - ret = k3_ringacc_ring_push(ring, &paddr); - if (!ret) - uc->in_ring_cnt++; - - return ret; + return k3_ringacc_ring_push(ring, &paddr); } static bool udma_desc_is_rx_flush(struct udma_chan *uc, dma_addr_t addr) @@ -655,9 +649,6 @@ static int udma_pop_from_ring(struct udma_chan *uc, dma_addr_t *addr) d->hwdesc[0].cppi5_desc_size, DMA_FROM_DEVICE); rmb(); /* Ensure that reads are not moved before this point */ - - if (!ret) - uc->in_ring_cnt--; } return ret; @@ -697,8 +688,6 @@ static void udma_reset_rings(struct udma_chan *uc) udma_desc_free(&uc->terminated_desc->vd); uc->terminated_desc = NULL; } - - uc->in_ring_cnt = 0; } static void udma_reset_counters(struct udma_chan *uc) @@ -1073,9 +1062,6 @@ static irqreturn_t udma_ring_irq_handler(int irq, void *data) /* Teardown completion message */ if (cppi5_desc_is_tdcm(paddr)) { - /* Compensate our internal pop/push counter */ - uc->in_ring_cnt++; - complete_all(&uc->teardown_completed); if (uc->terminated_desc) { -- GitLab From 7ae6d7bd7397e46bd72cb85ab573a669c4dac925 Mon Sep 17 00:00:00 2001 From: Peter Ujfalusi Date: Tue, 12 May 2020 16:45:19 +0300 Subject: [PATCH 0506/1971] dmaengine: ti: k3-udma: Use proper return code in alloc_chan_resources In udma_alloc_chan_resources() if the channel is not willing to stop then the function should return with error code. Signed-off-by: Peter Ujfalusi Link: https://lore.kernel.org/r/20200512134519.5642-1-peter.ujfalusi@ti.com Signed-off-by: Vinod Koul --- drivers/dma/ti/k3-udma.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/dma/ti/k3-udma.c b/drivers/dma/ti/k3-udma.c index 653aea3f6de22..9769a4699cac8 100644 --- a/drivers/dma/ti/k3-udma.c +++ b/drivers/dma/ti/k3-udma.c @@ -1850,6 +1850,7 @@ static int udma_alloc_chan_resources(struct dma_chan *chan) udma_stop(uc); if (udma_is_chan_running(uc)) { dev_err(ud->dev, "chan%d: won't stop!\n", uc->id); + ret = -EBUSY; goto err_res_free; } } -- GitLab From be4cf718cd9929e867ed1ff06d23fb4d08cc2d36 Mon Sep 17 00:00:00 2001 From: Sascha Hauer Date: Wed, 13 May 2020 08:04:05 +0200 Subject: [PATCH 0507/1971] dmaengine: imx-sdma: initialize all script addresses The script addresses array increases with each new version. The driver initializes the array to -EINVAL initially, but only up to the size of the v1 array. Initialize the additional addresses for the newer versions as well. Without this uninitialized values of the newer arrays are treated as valid. Signed-off-by: Sascha Hauer Reviewed-by: Robin Gong Link: https://lore.kernel.org/r/20200513060405.18685-1-s.hauer@pengutronix.de Signed-off-by: Vinod Koul --- drivers/dma/imx-sdma.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c index 4d4477df4ede7..91774039ae5d6 100644 --- a/drivers/dma/imx-sdma.c +++ b/drivers/dma/imx-sdma.c @@ -2063,7 +2063,7 @@ static int sdma_probe(struct platform_device *pdev) /* initially no scripts available */ saddr_arr = (s32 *)sdma->script_addrs; - for (i = 0; i < SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V1; i++) + for (i = 0; i < sizeof(*sdma->script_addrs) / sizeof(s32); i++) saddr_arr[i] = -EINVAL; dma_cap_set(DMA_SLAVE, sdma->dma_device.cap_mask); -- GitLab From eda8ffcc5edf17a5b895aa1c77633dc55955c040 Mon Sep 17 00:00:00 2001 From: Dan Murphy Date: Tue, 12 May 2020 14:19:00 -0500 Subject: [PATCH 0508/1971] dt-bindings: power: Convert bq27xxx dt to yaml MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Convert the bq27xxx.txt to yaml format CC: Pali Rohár CC: Andrew F. Davis Signed-off-by: Dan Murphy Reviewed-by: Rob Herring Signed-off-by: Sebastian Reichel --- .../bindings/power/supply/bq27xxx.txt | 56 ------------ .../bindings/power/supply/bq27xxx.yaml | 91 +++++++++++++++++++ 2 files changed, 91 insertions(+), 56 deletions(-) delete mode 100644 Documentation/devicetree/bindings/power/supply/bq27xxx.txt create mode 100644 Documentation/devicetree/bindings/power/supply/bq27xxx.yaml diff --git a/Documentation/devicetree/bindings/power/supply/bq27xxx.txt b/Documentation/devicetree/bindings/power/supply/bq27xxx.txt deleted file mode 100644 index 4fa8e08df2b6c..0000000000000 --- a/Documentation/devicetree/bindings/power/supply/bq27xxx.txt +++ /dev/null @@ -1,56 +0,0 @@ -TI BQ27XXX fuel gauge family - -Required properties: -- compatible: contains one of the following: - * "ti,bq27200" - BQ27200 - * "ti,bq27210" - BQ27210 - * "ti,bq27500" - deprecated, use revision specific property below - * "ti,bq27510" - deprecated, use revision specific property below - * "ti,bq27520" - deprecated, use revision specific property below - * "ti,bq27500-1" - BQ27500/1 - * "ti,bq27510g1" - BQ27510-g1 - * "ti,bq27510g2" - BQ27510-g2 - * "ti,bq27510g3" - BQ27510-g3 - * "ti,bq27520g1" - BQ27520-g1 - * "ti,bq27520g2" - BQ27520-g2 - * "ti,bq27520g3" - BQ27520-g3 - * "ti,bq27520g4" - BQ27520-g4 - * "ti,bq27521" - BQ27521 - * "ti,bq27530" - BQ27530 - * "ti,bq27531" - BQ27531 - * "ti,bq27541" - BQ27541 - * "ti,bq27542" - BQ27542 - * "ti,bq27546" - BQ27546 - * "ti,bq27742" - BQ27742 - * "ti,bq27545" - BQ27545 - * "ti,bq27411" - BQ27411 - * "ti,bq27421" - BQ27421 - * "ti,bq27425" - BQ27425 - * "ti,bq27426" - BQ27426 - * "ti,bq27441" - BQ27441 - * "ti,bq27621" - BQ27621 -- reg: integer, I2C address of the fuel gauge. - -Optional properties: -- monitored-battery: phandle of battery characteristics node - The fuel gauge uses the following battery properties: - + energy-full-design-microwatt-hours - + charge-full-design-microamp-hours - + voltage-min-design-microvolt - Both or neither of the *-full-design-*-hours properties must be set. - See Documentation/devicetree/bindings/power/supply/battery.txt - -Example: - - bat: battery { - compatible = "simple-battery"; - voltage-min-design-microvolt = <3200000>; - energy-full-design-microwatt-hours = <5290000>; - charge-full-design-microamp-hours = <1430000>; - }; - - bq27510g3: fuel-gauge@55 { - compatible = "ti,bq27510g3"; - reg = <0x55>; - monitored-battery = <&bat>; - }; diff --git a/Documentation/devicetree/bindings/power/supply/bq27xxx.yaml b/Documentation/devicetree/bindings/power/supply/bq27xxx.yaml new file mode 100644 index 0000000000000..03d1020a2e470 --- /dev/null +++ b/Documentation/devicetree/bindings/power/supply/bq27xxx.yaml @@ -0,0 +1,91 @@ +# SPDX-License-Identifier: GPL-2.0 +# Copyright (C) 2020 Texas Instruments Incorporated +%YAML 1.2 +--- +$id: "http://devicetree.org/schemas/power/supply/bq27xxx.yaml#" +$schema: "http://devicetree.org/meta-schemas/core.yaml#" + +title: TI BQ27XXX fuel gauge family + +maintainers: + - Pali Rohár + - Andrew F. Davis + - Sebastian Reichel + +description: | + Support various Texas Instruments fuel gauge devices that share similar + register maps and power supply properties + +allOf: + - $ref: power-supply.yaml# + +properties: + compatible: + enum: + - ti,bq27200 + - ti,bq27210 + - ti,bq27500 # deprecated, use revision specific property below + - ti,bq27510 # deprecated, use revision specific property below + - ti,bq27520 # deprecated, use revision specific property below + - ti,bq27500-1 + - ti,bq27510g1 + - ti,bq27510g2 + - ti,bq27510g3 + - ti,bq27520g1 + - ti,bq27520g2 + - ti,bq27520g3 + - ti,bq27520g4 + - ti,bq27521 + - ti,bq27530 + - ti,bq27531 + - ti,bq27541 + - ti,bq27542 + - ti,bq27546 + - ti,bq27742 + - ti,bq27545 + - ti,bq27411 + - ti,bq27421 + - ti,bq27425 + - ti,bq27426 + - ti,bq27441 + - ti,bq27621 + + reg: + maxItems: 1 + description: integer, I2C address of the fuel gauge. + + monitored-battery: + description: | + phandle of battery characteristics node. + The fuel gauge uses the following battery properties: + - energy-full-design-microwatt-hours + - charge-full-design-microamp-hours + - voltage-min-design-microvolt + Both or neither of the *-full-design-*-hours properties must be set. + See Documentation/devicetree/bindings/power/supply/battery.txt + + power-supplies: true + +required: + - compatible + - reg +additionalProperties: false + +examples: + - | + i2c0 { + #address-cells = <1>; + #size-cells = <0>; + bat: battery { + compatible = "simple-battery"; + voltage-min-design-microvolt = <3200000>; + energy-full-design-microwatt-hours = <5290000>; + charge-full-design-microamp-hours = <1430000>; + }; + + bq27510g3: fuel-gauge@55 { + compatible = "ti,bq27510g3"; + reg = <0x55>; + monitored-battery = <&bat>; + }; + }; -- GitLab From 430ee40d007d5eace32a2e70aab8c54a70f0b3ec Mon Sep 17 00:00:00 2001 From: Jeff LaBundy Date: Sun, 17 May 2020 17:42:00 -0700 Subject: [PATCH 0509/1971] dt-bindings: input: Add bindings for Azoteq IQS269A This patch adds device tree bindings for the Azoteq IQS269A capacitive touch controller. Signed-off-by: Jeff LaBundy Reviewed-by: Rob Herring Link: https://lore.kernel.org/r/1588352982-5117-1-git-send-email-jeff@labundy.com Signed-off-by: Dmitry Torokhov --- .../devicetree/bindings/input/iqs269a.yaml | 581 ++++++++++++++++++ 1 file changed, 581 insertions(+) create mode 100644 Documentation/devicetree/bindings/input/iqs269a.yaml diff --git a/Documentation/devicetree/bindings/input/iqs269a.yaml b/Documentation/devicetree/bindings/input/iqs269a.yaml new file mode 100644 index 0000000000000..f0242bb4be81d --- /dev/null +++ b/Documentation/devicetree/bindings/input/iqs269a.yaml @@ -0,0 +1,581 @@ +# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/input/iqs269a.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Azoteq IQS269A Capacitive Touch Controller + +maintainers: + - Jeff LaBundy + +description: | + The Azoteq IQS269A is an 8-channel capacitive touch controller that features + additional Hall-effect and inductive sensing capabilities. + + Link to datasheet: https://www.azoteq.com/ + +properties: + compatible: + const: azoteq,iqs269a + + reg: + maxItems: 1 + + interrupts: + maxItems: 1 + + "#address-cells": + const: 1 + + "#size-cells": + const: 0 + + azoteq,hall-enable: + type: boolean + description: + Enables Hall-effect sensing on channels 6 and 7. In this case, keycodes + assigned to channel 6 are ignored and keycodes assigned to channel 7 are + interpreted as switch codes. Refer to the datasheet for requirements im- + posed on channels 6 and 7 by Hall-effect sensing. + + azoteq,suspend-mode: + allOf: + - $ref: /schemas/types.yaml#/definitions/uint32 + - enum: [0, 1, 2, 3] + default: 0 + description: | + Specifies the power mode during suspend as follows: + 0: Automatic (same as normal runtime, i.e. suspend/resume disabled) + 1: Low power (all sensing at a reduced reporting rate) + 2: Ultra-low power (channel 0 proximity sensing) + 3: Halt (no sensing) + + azoteq,clk-div: + type: boolean + description: Divides the device's core clock by a factor of 4. + + azoteq,ulp-update: + allOf: + - $ref: /schemas/types.yaml#/definitions/uint32 + - minimum: 0 + maximum: 7 + default: 3 + description: Specifies the ultra-low-power mode update rate. + + azoteq,reseed-offset: + type: boolean + description: + Applies an 8-count offset to all long-term averages upon either ATI or + reseed events. + + azoteq,filt-str-lp-lta: + allOf: + - $ref: /schemas/types.yaml#/definitions/uint32 + - enum: [0, 1, 2, 3] + default: 0 + description: + Specifies the long-term average filter strength during low-power mode. + + azoteq,filt-str-lp-cnt: + allOf: + - $ref: /schemas/types.yaml#/definitions/uint32 + - enum: [0, 1, 2, 3] + default: 0 + description: + Specifies the raw count filter strength during low-power mode. + + azoteq,filt-str-np-lta: + allOf: + - $ref: /schemas/types.yaml#/definitions/uint32 + - enum: [0, 1, 2, 3] + default: 0 + description: + Specifies the long-term average filter strength during normal-power mode. + + azoteq,filt-str-np-cnt: + allOf: + - $ref: /schemas/types.yaml#/definitions/uint32 + - enum: [0, 1, 2, 3] + default: 0 + description: + Specifies the raw count filter strength during normal-power mode. + + azoteq,rate-np-ms: + minimum: 0 + maximum: 255 + default: 16 + description: Specifies the report rate (in ms) during normal-power mode. + + azoteq,rate-lp-ms: + minimum: 0 + maximum: 255 + default: 160 + description: Specifies the report rate (in ms) during low-power mode. + + azoteq,rate-ulp-ms: + multipleOf: 16 + minimum: 0 + maximum: 4080 + default: 160 + description: Specifies the report rate (in ms) during ultra-low-power mode. + + azoteq,timeout-pwr-ms: + multipleOf: 512 + minimum: 0 + maximum: 130560 + default: 2560 + description: + Specifies the length of time (in ms) to wait for an event during normal- + power mode before transitioning to low-power mode. + + azoteq,timeout-lta-ms: + multipleOf: 512 + minimum: 0 + maximum: 130560 + default: 32768 + description: + Specifies the length of time (in ms) to wait before resetting the long- + term average of all channels. Specify the maximum timeout to disable it + altogether. + + azoteq,ati-band-disable: + type: boolean + description: Disables the ATI band check. + + azoteq,ati-lp-only: + type: boolean + description: Limits automatic ATI to low-power mode. + + azoteq,ati-band-tighten: + type: boolean + description: Tightens the ATI band from 1/8 to 1/16 of the desired target. + + azoteq,filt-disable: + type: boolean + description: Disables all raw count filtering. + + azoteq,gpio3-select: + allOf: + - $ref: /schemas/types.yaml#/definitions/uint32 + - minimum: 0 + maximum: 7 + default: 0 + description: + Selects the channel for which the GPIO3 pin represents touch state. + + azoteq,dual-direction: + type: boolean + description: + Specifies that long-term averages are to freeze in the presence of either + increasing or decreasing counts, thereby permitting events to be reported + in either direction. + + azoteq,tx-freq: + allOf: + - $ref: /schemas/types.yaml#/definitions/uint32 + - enum: [0, 1, 2, 3] + default: 0 + description: | + Specifies the inductive sensing excitation frequency as follows (paren- + thesized numbers represent the frequency if 'azoteq,clk-div' is present): + 0: 16 MHz (4 MHz) + 1: 8 MHz (2 MHz) + 2: 4 MHz (1 MHz) + 3: 2 MHz (500 kHz) + + azoteq,global-cap-increase: + type: boolean + description: Increases the global capacitance adder from 0.5 pF to 1.5 pF. + + azoteq,reseed-select: + allOf: + - $ref: /schemas/types.yaml#/definitions/uint32 + - enum: [0, 1, 2, 3] + default: 0 + description: | + Specifies the event(s) that prompt the device to reseed (i.e. reset the + long-term average) of an associated channel as follows: + 0: None + 1: Proximity + 2: Proximity or touch + 3: Proximity, touch or deep touch + + azoteq,tracking-enable: + type: boolean + description: + Enables all associated channels to track their respective reference + channels. + + azoteq,filt-str-slider: + allOf: + - $ref: /schemas/types.yaml#/definitions/uint32 + - enum: [0, 1, 2, 3] + default: 1 + description: Specifies the slider coordinate filter strength. + +patternProperties: + "^channel@[0-7]$": + type: object + description: + Represents a single sensing channel. A channel is active if defined and + inactive otherwise. + + properties: + reg: + minimum: 0 + maximum: 7 + description: Index of the channel. + + azoteq,reseed-disable: + type: boolean + description: + Prevents the channel from being reseeded if the long-term average + timeout (defined in 'azoteq,timeout-lta') expires. + + azoteq,blocking-enable: + type: boolean + description: Specifies that the channel is a blocking channel. + + azoteq,slider0-select: + type: boolean + description: Specifies that the channel participates in slider 0. + + azoteq,slider1-select: + type: boolean + description: Specifies that the channel participates in slider 1. + + azoteq,rx-enable: + allOf: + - $ref: /schemas/types.yaml#/definitions/uint32-array + - minItems: 1 + maxItems: 8 + items: + minimum: 0 + maximum: 7 + description: + Specifies the CRX pin(s) associated with the channel. By default, only + the CRX pin corresponding to the channel's index is enabled (e.g. CRX0 + for channel 0). + + azoteq,tx-enable: + allOf: + - $ref: /schemas/types.yaml#/definitions/uint32-array + - minItems: 1 + maxItems: 8 + items: + minimum: 0 + maximum: 7 + default: [0, 1, 2, 3, 4, 5, 6, 7] + description: Specifies the TX pin(s) associated with the channel. + + azoteq,meas-cap-decrease: + type: boolean + description: + Decreases the internal measurement capacitance from 60 pF to 15 pF. + + azoteq,rx-float-inactive: + type: boolean + description: Floats any inactive CRX pins instead of grounding them. + + azoteq,local-cap-size: + allOf: + - $ref: /schemas/types.yaml#/definitions/uint32 + - enum: [0, 1, 2] + default: 0 + description: | + Specifies the capacitance to be added to the channel as follows: + 0: None + 1: Global adder (based on 'azoteq,global-cap-increase') + 2: Global adder + 0.5 pF + + azoteq,invert-enable: + type: boolean + description: + Inverts the polarity of the states reported for proximity, touch and + deep-touch events relative to their respective thresholds. + + azoteq,proj-bias: + allOf: + - $ref: /schemas/types.yaml#/definitions/uint32 + - enum: [0, 1, 2, 3] + default: 2 + description: | + Specifies the bias current applied during projected-capacitance + sensing as follows: + 0: 2.5 uA + 1: 5 uA + 2: 10 uA + 3: 20 uA + + azoteq,sense-mode: + allOf: + - $ref: /schemas/types.yaml#/definitions/uint32 + - enum: [0, 1, 9, 14, 15] + default: 0 + description: | + Specifies the channel's sensing mode as follows: + 0: Self capacitance + 1: Projected capacitance + 9: Self or mutual inductance + 14: Hall effect + 15: Temperature + + azoteq,sense-freq: + allOf: + - $ref: /schemas/types.yaml#/definitions/uint32 + - enum: [0, 1, 2, 3] + default: 1 + description: | + Specifies the channel's sensing frequency as follows (parenthesized + numbers represent the frequency if 'azoteq,clk-div' is present): + 0: 4 MHz (1 MHz) + 1: 2 MHz (500 kHz) + 2: 1 MHz (250 kHz) + 3: 500 kHz (125 kHz) + + azoteq,static-enable: + type: boolean + description: Enables the static front-end for the channel. + + azoteq,ati-mode: + allOf: + - $ref: /schemas/types.yaml#/definitions/uint32 + - enum: [0, 1, 2, 3] + default: 3 + description: | + Specifies the channel's ATI mode as follows: + 0: Disabled + 1: Semi-partial + 2: Partial + 3: Full + + azoteq,ati-base: + allOf: + - $ref: /schemas/types.yaml#/definitions/uint32 + - enum: [75, 100, 150, 200] + default: 100 + description: Specifies the channel's ATI base. + + azoteq,ati-target: + allOf: + - $ref: /schemas/types.yaml#/definitions/uint32 + - multipleOf: 32 + minimum: 0 + maximum: 2016 + default: 512 + description: Specifies the channel's ATI target. + + azoteq,assoc-select: + allOf: + - $ref: /schemas/types.yaml#/definitions/uint32-array + - minItems: 1 + maxItems: 8 + items: + minimum: 0 + maximum: 7 + description: + Specifies the associated channels for which the channel serves as a + reference channel. By default, no channels are selected. + + azoteq,assoc-weight: + allOf: + - $ref: /schemas/types.yaml#/definitions/uint32 + - minimum: 0 + maximum: 255 + default: 0 + description: + Specifies the channel's impact weight if it acts as an associated + channel (0 = 0% impact, 255 = 200% impact). + + patternProperties: + "^event-prox(-alt)?$": + type: object + description: + Represents a proximity event reported by the channel in response to + a decrease in counts. Node names suffixed with '-alt' instead corre- + spond to an increase in counts. + + By default, the long-term average tracks an increase in counts such + that only events corresponding to a decrease in counts are reported + (refer to the datasheet for more information). + + Specify 'azoteq,dual-direction' to freeze the long-term average when + the counts increase or decrease such that events of either direction + can be reported. Alternatively, specify 'azoteq,invert-enable' to in- + vert the polarity of the states reported by the channel. + + Complementary events (e.g. event-touch and event-touch-alt) can both + be present and specify different key or switch codes, but not differ- + ent thresholds or hysteresis (if applicable). + + properties: + azoteq,thresh: + allOf: + - $ref: /schemas/types.yaml#/definitions/uint32 + - minimum: 0 + maximum: 255 + default: 10 + description: Specifies the threshold for the event. + + linux,code: + $ref: /schemas/types.yaml#/definitions/uint32 + description: Numeric key or switch code associated with the event. + + additionalProperties: false + + "^event-touch(-alt)?$": + type: object + description: Represents a touch event reported by the channel. + + properties: + azoteq,thresh: + allOf: + - $ref: /schemas/types.yaml#/definitions/uint32 + - minimum: 0 + maximum: 255 + default: 8 + description: Specifies the threshold for the event. + + azoteq,hyst: + allOf: + - $ref: /schemas/types.yaml#/definitions/uint32 + - minimum: 0 + maximum: 15 + default: 4 + description: Specifies the hysteresis for the event. + + linux,code: + $ref: /schemas/types.yaml#/definitions/uint32 + description: Numeric key or switch code associated with the event. + + additionalProperties: false + + "^event-deep(-alt)?$": + type: object + description: Represents a deep-touch event reported by the channel. + + properties: + azoteq,thresh: + allOf: + - $ref: /schemas/types.yaml#/definitions/uint32 + - minimum: 0 + maximum: 255 + default: 26 + description: Specifies the threshold for the event. + + azoteq,hyst: + allOf: + - $ref: /schemas/types.yaml#/definitions/uint32 + - minimum: 0 + maximum: 15 + default: 0 + description: Specifies the hysteresis for the event. + + linux,code: + $ref: /schemas/types.yaml#/definitions/uint32 + description: Numeric key or switch code associated with the event. + + additionalProperties: false + + required: + - reg + + additionalProperties: false + +required: + - compatible + - reg + - interrupts + - "#address-cells" + - "#size-cells" + +additionalProperties: false + +examples: + - | + #include + #include + + i2c { + #address-cells = <1>; + #size-cells = <0>; + + iqs269a@44 { + #address-cells = <1>; + #size-cells = <0>; + + compatible = "azoteq,iqs269a"; + reg = <0x44>; + interrupt-parent = <&gpio>; + interrupts = <17 IRQ_TYPE_LEVEL_LOW>; + + azoteq,hall-enable; + azoteq,suspend-mode = <2>; + + channel@0 { + reg = <0x0>; + + event-prox { + linux,code = ; + }; + }; + + channel@1 { + reg = <0x1>; + azoteq,slider0-select; + }; + + channel@2 { + reg = <0x2>; + azoteq,slider0-select; + }; + + channel@3 { + reg = <0x3>; + azoteq,slider0-select; + }; + + channel@4 { + reg = <0x4>; + azoteq,slider0-select; + }; + + channel@5 { + reg = <0x5>; + azoteq,slider0-select; + }; + + channel@6 { + reg = <0x6>; + azoteq,invert-enable; + azoteq,static-enable; + azoteq,reseed-disable; + azoteq,rx-enable = <0>; + azoteq,sense-freq = <0x0>; + azoteq,sense-mode = <0xE>; + azoteq,ati-mode = <0x0>; + azoteq,ati-base = <200>; + azoteq,ati-target = <320>; + }; + + channel@7 { + reg = <0x7>; + azoteq,invert-enable; + azoteq,static-enable; + azoteq,reseed-disable; + azoteq,rx-enable = <0>, <6>; + azoteq,sense-freq = <0x0>; + azoteq,sense-mode = <0xE>; + azoteq,ati-mode = <0x3>; + azoteq,ati-base = <200>; + azoteq,ati-target = <320>; + + event-touch { + linux,code = ; + }; + }; + }; + }; + +... -- GitLab From 04e49867fad1f4a0739862bb15133f96ace1d190 Mon Sep 17 00:00:00 2001 From: Jeff LaBundy Date: Sun, 17 May 2020 17:42:22 -0700 Subject: [PATCH 0510/1971] Input: add support for Azoteq IQS269A This patch adds support for the Azoteq IQS269A capacitive touch controller. Signed-off-by: Jeff LaBundy Link: https://lore.kernel.org/r/1588352982-5117-2-git-send-email-jeff@labundy.com Signed-off-by: Dmitry Torokhov --- drivers/input/misc/Kconfig | 10 + drivers/input/misc/Makefile | 1 + drivers/input/misc/iqs269a.c | 1833 ++++++++++++++++++++++++++++++++++ 3 files changed, 1844 insertions(+) create mode 100644 drivers/input/misc/iqs269a.c diff --git a/drivers/input/misc/Kconfig b/drivers/input/misc/Kconfig index 315b14d59ad7d..943cf69b5b733 100644 --- a/drivers/input/misc/Kconfig +++ b/drivers/input/misc/Kconfig @@ -718,6 +718,16 @@ config INPUT_IMS_PCU To compile this driver as a module, choose M here: the module will be called ims_pcu. +config INPUT_IQS269A + tristate "Azoteq IQS269A capacitive touch controller" + select REGMAP_I2C + help + Say Y to enable support for the Azoteq IQS269A capacitive + touch controller. + + To compile this driver as a module, choose M here: the + module will be called iqs269a. + config INPUT_CMA3000 tristate "VTI CMA3000 Tri-axis accelerometer" help diff --git a/drivers/input/misc/Makefile b/drivers/input/misc/Makefile index 546766f29acae..a48e5f2d859d4 100644 --- a/drivers/input/misc/Makefile +++ b/drivers/input/misc/Makefile @@ -39,6 +39,7 @@ obj-$(CONFIG_INPUT_GPIO_VIBRA) += gpio-vibra.o obj-$(CONFIG_INPUT_HISI_POWERKEY) += hisi_powerkey.o obj-$(CONFIG_HP_SDC_RTC) += hp_sdc_rtc.o obj-$(CONFIG_INPUT_IMS_PCU) += ims-pcu.o +obj-$(CONFIG_INPUT_IQS269A) += iqs269a.o obj-$(CONFIG_INPUT_IXP4XX_BEEPER) += ixp4xx-beeper.o obj-$(CONFIG_INPUT_KEYSPAN_REMOTE) += keyspan_remote.o obj-$(CONFIG_INPUT_KXTJ9) += kxtj9.o diff --git a/drivers/input/misc/iqs269a.c b/drivers/input/misc/iqs269a.c new file mode 100644 index 0000000000000..6699eb160a0f4 --- /dev/null +++ b/drivers/input/misc/iqs269a.c @@ -0,0 +1,1833 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Azoteq IQS269A Capacitive Touch Controller + * + * Copyright (C) 2020 Jeff LaBundy + * + * This driver registers up to 3 input devices: one representing capacitive or + * inductive keys as well as Hall-effect switches, and one for each of the two + * axial sliders presented by the device. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define IQS269_VER_INFO 0x00 +#define IQS269_VER_INFO_PROD_NUM 0x4F + +#define IQS269_SYS_FLAGS 0x02 +#define IQS269_SYS_FLAGS_SHOW_RESET BIT(15) +#define IQS269_SYS_FLAGS_PWR_MODE_MASK GENMASK(12, 11) +#define IQS269_SYS_FLAGS_PWR_MODE_SHIFT 11 +#define IQS269_SYS_FLAGS_IN_ATI BIT(10) + +#define IQS269_CHx_COUNTS 0x08 + +#define IQS269_SLIDER_X 0x30 + +#define IQS269_CAL_DATA_A 0x35 +#define IQS269_CAL_DATA_A_HALL_BIN_L_MASK GENMASK(15, 12) +#define IQS269_CAL_DATA_A_HALL_BIN_L_SHIFT 12 +#define IQS269_CAL_DATA_A_HALL_BIN_R_MASK GENMASK(11, 8) +#define IQS269_CAL_DATA_A_HALL_BIN_R_SHIFT 8 + +#define IQS269_SYS_SETTINGS 0x80 +#define IQS269_SYS_SETTINGS_CLK_DIV BIT(15) +#define IQS269_SYS_SETTINGS_ULP_AUTO BIT(14) +#define IQS269_SYS_SETTINGS_DIS_AUTO BIT(13) +#define IQS269_SYS_SETTINGS_PWR_MODE_MASK GENMASK(12, 11) +#define IQS269_SYS_SETTINGS_PWR_MODE_SHIFT 11 +#define IQS269_SYS_SETTINGS_PWR_MODE_MAX 3 +#define IQS269_SYS_SETTINGS_ULP_UPDATE_MASK GENMASK(10, 8) +#define IQS269_SYS_SETTINGS_ULP_UPDATE_SHIFT 8 +#define IQS269_SYS_SETTINGS_ULP_UPDATE_MAX 7 +#define IQS269_SYS_SETTINGS_RESEED_OFFSET BIT(6) +#define IQS269_SYS_SETTINGS_EVENT_MODE BIT(5) +#define IQS269_SYS_SETTINGS_EVENT_MODE_LP BIT(4) +#define IQS269_SYS_SETTINGS_REDO_ATI BIT(2) +#define IQS269_SYS_SETTINGS_ACK_RESET BIT(0) + +#define IQS269_FILT_STR_LP_LTA_MASK GENMASK(7, 6) +#define IQS269_FILT_STR_LP_LTA_SHIFT 6 +#define IQS269_FILT_STR_LP_CNT_MASK GENMASK(5, 4) +#define IQS269_FILT_STR_LP_CNT_SHIFT 4 +#define IQS269_FILT_STR_NP_LTA_MASK GENMASK(3, 2) +#define IQS269_FILT_STR_NP_LTA_SHIFT 2 +#define IQS269_FILT_STR_NP_CNT_MASK GENMASK(1, 0) +#define IQS269_FILT_STR_MAX 3 + +#define IQS269_EVENT_MASK_SYS BIT(6) +#define IQS269_EVENT_MASK_DEEP BIT(2) +#define IQS269_EVENT_MASK_TOUCH BIT(1) +#define IQS269_EVENT_MASK_PROX BIT(0) + +#define IQS269_RATE_NP_MS_MAX 255 +#define IQS269_RATE_LP_MS_MAX 255 +#define IQS269_RATE_ULP_MS_MAX 4080 +#define IQS269_TIMEOUT_PWR_MS_MAX 130560 +#define IQS269_TIMEOUT_LTA_MS_MAX 130560 + +#define IQS269_MISC_A_ATI_BAND_DISABLE BIT(15) +#define IQS269_MISC_A_ATI_LP_ONLY BIT(14) +#define IQS269_MISC_A_ATI_BAND_TIGHTEN BIT(13) +#define IQS269_MISC_A_FILT_DISABLE BIT(12) +#define IQS269_MISC_A_GPIO3_SELECT_MASK GENMASK(10, 8) +#define IQS269_MISC_A_GPIO3_SELECT_SHIFT 8 +#define IQS269_MISC_A_DUAL_DIR BIT(6) +#define IQS269_MISC_A_TX_FREQ_MASK GENMASK(5, 4) +#define IQS269_MISC_A_TX_FREQ_SHIFT 4 +#define IQS269_MISC_A_TX_FREQ_MAX 3 +#define IQS269_MISC_A_GLOBAL_CAP_SIZE BIT(0) + +#define IQS269_MISC_B_RESEED_UI_SEL_MASK GENMASK(7, 6) +#define IQS269_MISC_B_RESEED_UI_SEL_SHIFT 6 +#define IQS269_MISC_B_RESEED_UI_SEL_MAX 3 +#define IQS269_MISC_B_TRACKING_UI_ENABLE BIT(4) +#define IQS269_MISC_B_FILT_STR_SLIDER GENMASK(1, 0) + +#define IQS269_CHx_SETTINGS 0x8C + +#define IQS269_CHx_ENG_A_MEAS_CAP_SIZE BIT(15) +#define IQS269_CHx_ENG_A_RX_GND_INACTIVE BIT(13) +#define IQS269_CHx_ENG_A_LOCAL_CAP_SIZE BIT(12) +#define IQS269_CHx_ENG_A_ATI_MODE_MASK GENMASK(9, 8) +#define IQS269_CHx_ENG_A_ATI_MODE_SHIFT 8 +#define IQS269_CHx_ENG_A_ATI_MODE_MAX 3 +#define IQS269_CHx_ENG_A_INV_LOGIC BIT(7) +#define IQS269_CHx_ENG_A_PROJ_BIAS_MASK GENMASK(6, 5) +#define IQS269_CHx_ENG_A_PROJ_BIAS_SHIFT 5 +#define IQS269_CHx_ENG_A_PROJ_BIAS_MAX 3 +#define IQS269_CHx_ENG_A_SENSE_MODE_MASK GENMASK(3, 0) +#define IQS269_CHx_ENG_A_SENSE_MODE_MAX 15 + +#define IQS269_CHx_ENG_B_LOCAL_CAP_ENABLE BIT(13) +#define IQS269_CHx_ENG_B_SENSE_FREQ_MASK GENMASK(10, 9) +#define IQS269_CHx_ENG_B_SENSE_FREQ_SHIFT 9 +#define IQS269_CHx_ENG_B_SENSE_FREQ_MAX 3 +#define IQS269_CHx_ENG_B_STATIC_ENABLE BIT(8) +#define IQS269_CHx_ENG_B_ATI_BASE_MASK GENMASK(7, 6) +#define IQS269_CHx_ENG_B_ATI_BASE_75 0x00 +#define IQS269_CHx_ENG_B_ATI_BASE_100 0x40 +#define IQS269_CHx_ENG_B_ATI_BASE_150 0x80 +#define IQS269_CHx_ENG_B_ATI_BASE_200 0xC0 +#define IQS269_CHx_ENG_B_ATI_TARGET_MASK GENMASK(5, 0) +#define IQS269_CHx_ENG_B_ATI_TARGET_MAX 2016 + +#define IQS269_CHx_WEIGHT_MAX 255 +#define IQS269_CHx_THRESH_MAX 255 +#define IQS269_CHx_HYST_DEEP_MASK GENMASK(7, 4) +#define IQS269_CHx_HYST_DEEP_SHIFT 4 +#define IQS269_CHx_HYST_TOUCH_MASK GENMASK(3, 0) +#define IQS269_CHx_HYST_MAX 15 + +#define IQS269_CHx_HALL_INACTIVE 6 +#define IQS269_CHx_HALL_ACTIVE 7 + +#define IQS269_HALL_PAD_R BIT(0) +#define IQS269_HALL_PAD_L BIT(1) +#define IQS269_HALL_PAD_INV BIT(6) + +#define IQS269_HALL_UI 0xF5 +#define IQS269_HALL_UI_ENABLE BIT(15) + +#define IQS269_MAX_REG 0xFF + +#define IQS269_NUM_CH 8 +#define IQS269_NUM_SL 2 + +#define IQS269_ATI_POLL_SLEEP_US (iqs269->delay_mult * 10000) +#define IQS269_ATI_POLL_TIMEOUT_US (iqs269->delay_mult * 500000) +#define IQS269_ATI_STABLE_DELAY_MS (iqs269->delay_mult * 150) + +#define IQS269_PWR_MODE_POLL_SLEEP_US IQS269_ATI_POLL_SLEEP_US +#define IQS269_PWR_MODE_POLL_TIMEOUT_US IQS269_ATI_POLL_TIMEOUT_US + +#define iqs269_irq_wait() usleep_range(100, 150) + +enum iqs269_local_cap_size { + IQS269_LOCAL_CAP_SIZE_0, + IQS269_LOCAL_CAP_SIZE_GLOBAL_ONLY, + IQS269_LOCAL_CAP_SIZE_GLOBAL_0pF5, +}; + +enum iqs269_st_offs { + IQS269_ST_OFFS_PROX, + IQS269_ST_OFFS_DIR, + IQS269_ST_OFFS_TOUCH, + IQS269_ST_OFFS_DEEP, +}; + +enum iqs269_th_offs { + IQS269_TH_OFFS_PROX, + IQS269_TH_OFFS_TOUCH, + IQS269_TH_OFFS_DEEP, +}; + +enum iqs269_event_id { + IQS269_EVENT_PROX_DN, + IQS269_EVENT_PROX_UP, + IQS269_EVENT_TOUCH_DN, + IQS269_EVENT_TOUCH_UP, + IQS269_EVENT_DEEP_DN, + IQS269_EVENT_DEEP_UP, +}; + +struct iqs269_switch_desc { + unsigned int code; + bool enabled; +}; + +struct iqs269_event_desc { + const char *name; + enum iqs269_st_offs st_offs; + enum iqs269_th_offs th_offs; + bool dir_up; + u8 mask; +}; + +static const struct iqs269_event_desc iqs269_events[] = { + [IQS269_EVENT_PROX_DN] = { + .name = "event-prox", + .st_offs = IQS269_ST_OFFS_PROX, + .th_offs = IQS269_TH_OFFS_PROX, + .mask = IQS269_EVENT_MASK_PROX, + }, + [IQS269_EVENT_PROX_UP] = { + .name = "event-prox-alt", + .st_offs = IQS269_ST_OFFS_PROX, + .th_offs = IQS269_TH_OFFS_PROX, + .dir_up = true, + .mask = IQS269_EVENT_MASK_PROX, + }, + [IQS269_EVENT_TOUCH_DN] = { + .name = "event-touch", + .st_offs = IQS269_ST_OFFS_TOUCH, + .th_offs = IQS269_TH_OFFS_TOUCH, + .mask = IQS269_EVENT_MASK_TOUCH, + }, + [IQS269_EVENT_TOUCH_UP] = { + .name = "event-touch-alt", + .st_offs = IQS269_ST_OFFS_TOUCH, + .th_offs = IQS269_TH_OFFS_TOUCH, + .dir_up = true, + .mask = IQS269_EVENT_MASK_TOUCH, + }, + [IQS269_EVENT_DEEP_DN] = { + .name = "event-deep", + .st_offs = IQS269_ST_OFFS_DEEP, + .th_offs = IQS269_TH_OFFS_DEEP, + .mask = IQS269_EVENT_MASK_DEEP, + }, + [IQS269_EVENT_DEEP_UP] = { + .name = "event-deep-alt", + .st_offs = IQS269_ST_OFFS_DEEP, + .th_offs = IQS269_TH_OFFS_DEEP, + .dir_up = true, + .mask = IQS269_EVENT_MASK_DEEP, + }, +}; + +struct iqs269_ver_info { + u8 prod_num; + u8 sw_num; + u8 hw_num; + u8 padding; +} __packed; + +struct iqs269_sys_reg { + __be16 general; + u8 active; + u8 filter; + u8 reseed; + u8 event_mask; + u8 rate_np; + u8 rate_lp; + u8 rate_ulp; + u8 timeout_pwr; + u8 timeout_rdy; + u8 timeout_lta; + __be16 misc_a; + __be16 misc_b; + u8 blocking; + u8 padding; + u8 slider_select[IQS269_NUM_SL]; + u8 timeout_tap; + u8 timeout_swipe; + u8 thresh_swipe; + u8 redo_ati; +} __packed; + +struct iqs269_ch_reg { + u8 rx_enable; + u8 tx_enable; + __be16 engine_a; + __be16 engine_b; + __be16 ati_comp; + u8 thresh[3]; + u8 hyst; + u8 assoc_select; + u8 assoc_weight; +} __packed; + +struct iqs269_flags { + __be16 system; + u8 gesture; + u8 padding; + u8 states[4]; +} __packed; + +struct iqs269_private { + struct i2c_client *client; + struct regmap *regmap; + struct mutex lock; + struct iqs269_switch_desc switches[ARRAY_SIZE(iqs269_events)]; + struct iqs269_ch_reg ch_reg[IQS269_NUM_CH]; + struct iqs269_sys_reg sys_reg; + struct input_dev *keypad; + struct input_dev *slider[IQS269_NUM_SL]; + unsigned int keycode[ARRAY_SIZE(iqs269_events) * IQS269_NUM_CH]; + unsigned int suspend_mode; + unsigned int delay_mult; + unsigned int ch_num; + bool hall_enable; + bool ati_current; +}; + +static int iqs269_ati_mode_set(struct iqs269_private *iqs269, + unsigned int ch_num, unsigned int mode) +{ + u16 engine_a; + + if (ch_num >= IQS269_NUM_CH) + return -EINVAL; + + if (mode > IQS269_CHx_ENG_A_ATI_MODE_MAX) + return -EINVAL; + + mutex_lock(&iqs269->lock); + + engine_a = be16_to_cpu(iqs269->ch_reg[ch_num].engine_a); + + engine_a &= ~IQS269_CHx_ENG_A_ATI_MODE_MASK; + engine_a |= (mode << IQS269_CHx_ENG_A_ATI_MODE_SHIFT); + + iqs269->ch_reg[ch_num].engine_a = cpu_to_be16(engine_a); + iqs269->ati_current = false; + + mutex_unlock(&iqs269->lock); + + return 0; +} + +static int iqs269_ati_mode_get(struct iqs269_private *iqs269, + unsigned int ch_num, unsigned int *mode) +{ + u16 engine_a; + + if (ch_num >= IQS269_NUM_CH) + return -EINVAL; + + mutex_lock(&iqs269->lock); + engine_a = be16_to_cpu(iqs269->ch_reg[ch_num].engine_a); + mutex_unlock(&iqs269->lock); + + engine_a &= IQS269_CHx_ENG_A_ATI_MODE_MASK; + *mode = (engine_a >> IQS269_CHx_ENG_A_ATI_MODE_SHIFT); + + return 0; +} + +static int iqs269_ati_base_set(struct iqs269_private *iqs269, + unsigned int ch_num, unsigned int base) +{ + u16 engine_b; + + if (ch_num >= IQS269_NUM_CH) + return -EINVAL; + + switch (base) { + case 75: + base = IQS269_CHx_ENG_B_ATI_BASE_75; + break; + + case 100: + base = IQS269_CHx_ENG_B_ATI_BASE_100; + break; + + case 150: + base = IQS269_CHx_ENG_B_ATI_BASE_150; + break; + + case 200: + base = IQS269_CHx_ENG_B_ATI_BASE_200; + break; + + default: + return -EINVAL; + } + + mutex_lock(&iqs269->lock); + + engine_b = be16_to_cpu(iqs269->ch_reg[ch_num].engine_b); + + engine_b &= ~IQS269_CHx_ENG_B_ATI_BASE_MASK; + engine_b |= base; + + iqs269->ch_reg[ch_num].engine_b = cpu_to_be16(engine_b); + iqs269->ati_current = false; + + mutex_unlock(&iqs269->lock); + + return 0; +} + +static int iqs269_ati_base_get(struct iqs269_private *iqs269, + unsigned int ch_num, unsigned int *base) +{ + u16 engine_b; + + if (ch_num >= IQS269_NUM_CH) + return -EINVAL; + + mutex_lock(&iqs269->lock); + engine_b = be16_to_cpu(iqs269->ch_reg[ch_num].engine_b); + mutex_unlock(&iqs269->lock); + + switch (engine_b & IQS269_CHx_ENG_B_ATI_BASE_MASK) { + case IQS269_CHx_ENG_B_ATI_BASE_75: + *base = 75; + return 0; + + case IQS269_CHx_ENG_B_ATI_BASE_100: + *base = 100; + return 0; + + case IQS269_CHx_ENG_B_ATI_BASE_150: + *base = 150; + return 0; + + case IQS269_CHx_ENG_B_ATI_BASE_200: + *base = 200; + return 0; + + default: + return -EINVAL; + } +} + +static int iqs269_ati_target_set(struct iqs269_private *iqs269, + unsigned int ch_num, unsigned int target) +{ + u16 engine_b; + + if (ch_num >= IQS269_NUM_CH) + return -EINVAL; + + if (target > IQS269_CHx_ENG_B_ATI_TARGET_MAX) + return -EINVAL; + + mutex_lock(&iqs269->lock); + + engine_b = be16_to_cpu(iqs269->ch_reg[ch_num].engine_b); + + engine_b &= ~IQS269_CHx_ENG_B_ATI_TARGET_MASK; + engine_b |= target / 32; + + iqs269->ch_reg[ch_num].engine_b = cpu_to_be16(engine_b); + iqs269->ati_current = false; + + mutex_unlock(&iqs269->lock); + + return 0; +} + +static int iqs269_ati_target_get(struct iqs269_private *iqs269, + unsigned int ch_num, unsigned int *target) +{ + u16 engine_b; + + if (ch_num >= IQS269_NUM_CH) + return -EINVAL; + + mutex_lock(&iqs269->lock); + engine_b = be16_to_cpu(iqs269->ch_reg[ch_num].engine_b); + mutex_unlock(&iqs269->lock); + + *target = (engine_b & IQS269_CHx_ENG_B_ATI_TARGET_MASK) * 32; + + return 0; +} + +static int iqs269_parse_mask(const struct fwnode_handle *fwnode, + const char *propname, u8 *mask) +{ + unsigned int val[IQS269_NUM_CH]; + int count, error, i; + + count = fwnode_property_count_u32(fwnode, propname); + if (count < 0) + return 0; + + if (count > IQS269_NUM_CH) + return -EINVAL; + + error = fwnode_property_read_u32_array(fwnode, propname, val, count); + if (error) + return error; + + *mask = 0; + + for (i = 0; i < count; i++) { + if (val[i] >= IQS269_NUM_CH) + return -EINVAL; + + *mask |= BIT(val[i]); + } + + return 0; +} + +static int iqs269_parse_chan(struct iqs269_private *iqs269, + const struct fwnode_handle *ch_node) +{ + struct i2c_client *client = iqs269->client; + struct fwnode_handle *ev_node; + struct iqs269_ch_reg *ch_reg; + u16 engine_a, engine_b; + unsigned int reg, val; + int error, i; + + error = fwnode_property_read_u32(ch_node, "reg", ®); + if (error) { + dev_err(&client->dev, "Failed to read channel number: %d\n", + error); + return error; + } else if (reg >= IQS269_NUM_CH) { + dev_err(&client->dev, "Invalid channel number: %u\n", reg); + return -EINVAL; + } + + iqs269->sys_reg.active |= BIT(reg); + if (!fwnode_property_present(ch_node, "azoteq,reseed-disable")) + iqs269->sys_reg.reseed |= BIT(reg); + + if (fwnode_property_present(ch_node, "azoteq,blocking-enable")) + iqs269->sys_reg.blocking |= BIT(reg); + + if (fwnode_property_present(ch_node, "azoteq,slider0-select")) + iqs269->sys_reg.slider_select[0] |= BIT(reg); + + if (fwnode_property_present(ch_node, "azoteq,slider1-select")) + iqs269->sys_reg.slider_select[1] |= BIT(reg); + + ch_reg = &iqs269->ch_reg[reg]; + + error = regmap_raw_read(iqs269->regmap, + IQS269_CHx_SETTINGS + reg * sizeof(*ch_reg) / 2, + ch_reg, sizeof(*ch_reg)); + if (error) + return error; + + error = iqs269_parse_mask(ch_node, "azoteq,rx-enable", + &ch_reg->rx_enable); + if (error) { + dev_err(&client->dev, "Invalid channel %u RX enable mask: %d\n", + reg, error); + return error; + } + + error = iqs269_parse_mask(ch_node, "azoteq,tx-enable", + &ch_reg->tx_enable); + if (error) { + dev_err(&client->dev, "Invalid channel %u TX enable mask: %d\n", + reg, error); + return error; + } + + engine_a = be16_to_cpu(ch_reg->engine_a); + engine_b = be16_to_cpu(ch_reg->engine_b); + + engine_a |= IQS269_CHx_ENG_A_MEAS_CAP_SIZE; + if (fwnode_property_present(ch_node, "azoteq,meas-cap-decrease")) + engine_a &= ~IQS269_CHx_ENG_A_MEAS_CAP_SIZE; + + engine_a |= IQS269_CHx_ENG_A_RX_GND_INACTIVE; + if (fwnode_property_present(ch_node, "azoteq,rx-float-inactive")) + engine_a &= ~IQS269_CHx_ENG_A_RX_GND_INACTIVE; + + engine_a &= ~IQS269_CHx_ENG_A_LOCAL_CAP_SIZE; + engine_b &= ~IQS269_CHx_ENG_B_LOCAL_CAP_ENABLE; + if (!fwnode_property_read_u32(ch_node, "azoteq,local-cap-size", &val)) { + switch (val) { + case IQS269_LOCAL_CAP_SIZE_0: + break; + + case IQS269_LOCAL_CAP_SIZE_GLOBAL_0pF5: + engine_a |= IQS269_CHx_ENG_A_LOCAL_CAP_SIZE; + + /* fall through */ + + case IQS269_LOCAL_CAP_SIZE_GLOBAL_ONLY: + engine_b |= IQS269_CHx_ENG_B_LOCAL_CAP_ENABLE; + break; + + default: + dev_err(&client->dev, + "Invalid channel %u local cap. size: %u\n", reg, + val); + return -EINVAL; + } + } + + engine_a &= ~IQS269_CHx_ENG_A_INV_LOGIC; + if (fwnode_property_present(ch_node, "azoteq,invert-enable")) + engine_a |= IQS269_CHx_ENG_A_INV_LOGIC; + + if (!fwnode_property_read_u32(ch_node, "azoteq,proj-bias", &val)) { + if (val > IQS269_CHx_ENG_A_PROJ_BIAS_MAX) { + dev_err(&client->dev, + "Invalid channel %u bias current: %u\n", reg, + val); + return -EINVAL; + } + + engine_a &= ~IQS269_CHx_ENG_A_PROJ_BIAS_MASK; + engine_a |= (val << IQS269_CHx_ENG_A_PROJ_BIAS_SHIFT); + } + + if (!fwnode_property_read_u32(ch_node, "azoteq,sense-mode", &val)) { + if (val > IQS269_CHx_ENG_A_SENSE_MODE_MAX) { + dev_err(&client->dev, + "Invalid channel %u sensing mode: %u\n", reg, + val); + return -EINVAL; + } + + engine_a &= ~IQS269_CHx_ENG_A_SENSE_MODE_MASK; + engine_a |= val; + } + + if (!fwnode_property_read_u32(ch_node, "azoteq,sense-freq", &val)) { + if (val > IQS269_CHx_ENG_B_SENSE_FREQ_MAX) { + dev_err(&client->dev, + "Invalid channel %u sensing frequency: %u\n", + reg, val); + return -EINVAL; + } + + engine_b &= ~IQS269_CHx_ENG_B_SENSE_FREQ_MASK; + engine_b |= (val << IQS269_CHx_ENG_B_SENSE_FREQ_SHIFT); + } + + engine_b &= ~IQS269_CHx_ENG_B_STATIC_ENABLE; + if (fwnode_property_present(ch_node, "azoteq,static-enable")) + engine_b |= IQS269_CHx_ENG_B_STATIC_ENABLE; + + ch_reg->engine_a = cpu_to_be16(engine_a); + ch_reg->engine_b = cpu_to_be16(engine_b); + + if (!fwnode_property_read_u32(ch_node, "azoteq,ati-mode", &val)) { + error = iqs269_ati_mode_set(iqs269, reg, val); + if (error) { + dev_err(&client->dev, + "Invalid channel %u ATI mode: %u\n", reg, val); + return error; + } + } + + if (!fwnode_property_read_u32(ch_node, "azoteq,ati-base", &val)) { + error = iqs269_ati_base_set(iqs269, reg, val); + if (error) { + dev_err(&client->dev, + "Invalid channel %u ATI base: %u\n", reg, val); + return error; + } + } + + if (!fwnode_property_read_u32(ch_node, "azoteq,ati-target", &val)) { + error = iqs269_ati_target_set(iqs269, reg, val); + if (error) { + dev_err(&client->dev, + "Invalid channel %u ATI target: %u\n", reg, + val); + return error; + } + } + + error = iqs269_parse_mask(ch_node, "azoteq,assoc-select", + &ch_reg->assoc_select); + if (error) { + dev_err(&client->dev, "Invalid channel %u association: %d\n", + reg, error); + return error; + } + + if (!fwnode_property_read_u32(ch_node, "azoteq,assoc-weight", &val)) { + if (val > IQS269_CHx_WEIGHT_MAX) { + dev_err(&client->dev, + "Invalid channel %u associated weight: %u\n", + reg, val); + return -EINVAL; + } + + ch_reg->assoc_weight = val; + } + + for (i = 0; i < ARRAY_SIZE(iqs269_events); i++) { + ev_node = fwnode_get_named_child_node(ch_node, + iqs269_events[i].name); + if (!ev_node) + continue; + + if (!fwnode_property_read_u32(ev_node, "azoteq,thresh", &val)) { + if (val > IQS269_CHx_THRESH_MAX) { + dev_err(&client->dev, + "Invalid channel %u threshold: %u\n", + reg, val); + return -EINVAL; + } + + ch_reg->thresh[iqs269_events[i].th_offs] = val; + } + + if (!fwnode_property_read_u32(ev_node, "azoteq,hyst", &val)) { + u8 *hyst = &ch_reg->hyst; + + if (val > IQS269_CHx_HYST_MAX) { + dev_err(&client->dev, + "Invalid channel %u hysteresis: %u\n", + reg, val); + return -EINVAL; + } + + if (i == IQS269_EVENT_DEEP_DN || + i == IQS269_EVENT_DEEP_UP) { + *hyst &= ~IQS269_CHx_HYST_DEEP_MASK; + *hyst |= (val << IQS269_CHx_HYST_DEEP_SHIFT); + } else if (i == IQS269_EVENT_TOUCH_DN || + i == IQS269_EVENT_TOUCH_UP) { + *hyst &= ~IQS269_CHx_HYST_TOUCH_MASK; + *hyst |= val; + } + } + + if (fwnode_property_read_u32(ev_node, "linux,code", &val)) + continue; + + switch (reg) { + case IQS269_CHx_HALL_ACTIVE: + if (iqs269->hall_enable) { + iqs269->switches[i].code = val; + iqs269->switches[i].enabled = true; + } + + /* fall through */ + + case IQS269_CHx_HALL_INACTIVE: + if (iqs269->hall_enable) + break; + + /* fall through */ + + default: + iqs269->keycode[i * IQS269_NUM_CH + reg] = val; + } + + iqs269->sys_reg.event_mask &= ~iqs269_events[i].mask; + } + + return 0; +} + +static int iqs269_parse_prop(struct iqs269_private *iqs269) +{ + struct iqs269_sys_reg *sys_reg = &iqs269->sys_reg; + struct i2c_client *client = iqs269->client; + struct fwnode_handle *ch_node; + u16 general, misc_a, misc_b; + unsigned int val; + int error; + + iqs269->hall_enable = device_property_present(&client->dev, + "azoteq,hall-enable"); + + if (!device_property_read_u32(&client->dev, "azoteq,suspend-mode", + &val)) { + if (val > IQS269_SYS_SETTINGS_PWR_MODE_MAX) { + dev_err(&client->dev, "Invalid suspend mode: %u\n", + val); + return -EINVAL; + } + + iqs269->suspend_mode = val; + } + + error = regmap_raw_read(iqs269->regmap, IQS269_SYS_SETTINGS, sys_reg, + sizeof(*sys_reg)); + if (error) + return error; + + if (!device_property_read_u32(&client->dev, "azoteq,filt-str-lp-lta", + &val)) { + if (val > IQS269_FILT_STR_MAX) { + dev_err(&client->dev, "Invalid filter strength: %u\n", + val); + return -EINVAL; + } + + sys_reg->filter &= ~IQS269_FILT_STR_LP_LTA_MASK; + sys_reg->filter |= (val << IQS269_FILT_STR_LP_LTA_SHIFT); + } + + if (!device_property_read_u32(&client->dev, "azoteq,filt-str-lp-cnt", + &val)) { + if (val > IQS269_FILT_STR_MAX) { + dev_err(&client->dev, "Invalid filter strength: %u\n", + val); + return -EINVAL; + } + + sys_reg->filter &= ~IQS269_FILT_STR_LP_CNT_MASK; + sys_reg->filter |= (val << IQS269_FILT_STR_LP_CNT_SHIFT); + } + + if (!device_property_read_u32(&client->dev, "azoteq,filt-str-np-lta", + &val)) { + if (val > IQS269_FILT_STR_MAX) { + dev_err(&client->dev, "Invalid filter strength: %u\n", + val); + return -EINVAL; + } + + sys_reg->filter &= ~IQS269_FILT_STR_NP_LTA_MASK; + sys_reg->filter |= (val << IQS269_FILT_STR_NP_LTA_SHIFT); + } + + if (!device_property_read_u32(&client->dev, "azoteq,filt-str-np-cnt", + &val)) { + if (val > IQS269_FILT_STR_MAX) { + dev_err(&client->dev, "Invalid filter strength: %u\n", + val); + return -EINVAL; + } + + sys_reg->filter &= ~IQS269_FILT_STR_NP_CNT_MASK; + sys_reg->filter |= val; + } + + if (!device_property_read_u32(&client->dev, "azoteq,rate-np-ms", + &val)) { + if (val > IQS269_RATE_NP_MS_MAX) { + dev_err(&client->dev, "Invalid report rate: %u\n", val); + return -EINVAL; + } + + sys_reg->rate_np = val; + } + + if (!device_property_read_u32(&client->dev, "azoteq,rate-lp-ms", + &val)) { + if (val > IQS269_RATE_LP_MS_MAX) { + dev_err(&client->dev, "Invalid report rate: %u\n", val); + return -EINVAL; + } + + sys_reg->rate_lp = val; + } + + if (!device_property_read_u32(&client->dev, "azoteq,rate-ulp-ms", + &val)) { + if (val > IQS269_RATE_ULP_MS_MAX) { + dev_err(&client->dev, "Invalid report rate: %u\n", val); + return -EINVAL; + } + + sys_reg->rate_ulp = val / 16; + } + + if (!device_property_read_u32(&client->dev, "azoteq,timeout-pwr-ms", + &val)) { + if (val > IQS269_TIMEOUT_PWR_MS_MAX) { + dev_err(&client->dev, "Invalid timeout: %u\n", val); + return -EINVAL; + } + + sys_reg->timeout_pwr = val / 512; + } + + if (!device_property_read_u32(&client->dev, "azoteq,timeout-lta-ms", + &val)) { + if (val > IQS269_TIMEOUT_LTA_MS_MAX) { + dev_err(&client->dev, "Invalid timeout: %u\n", val); + return -EINVAL; + } + + sys_reg->timeout_lta = val / 512; + } + + misc_a = be16_to_cpu(sys_reg->misc_a); + misc_b = be16_to_cpu(sys_reg->misc_b); + + misc_a &= ~IQS269_MISC_A_ATI_BAND_DISABLE; + if (device_property_present(&client->dev, "azoteq,ati-band-disable")) + misc_a |= IQS269_MISC_A_ATI_BAND_DISABLE; + + misc_a &= ~IQS269_MISC_A_ATI_LP_ONLY; + if (device_property_present(&client->dev, "azoteq,ati-lp-only")) + misc_a |= IQS269_MISC_A_ATI_LP_ONLY; + + misc_a &= ~IQS269_MISC_A_ATI_BAND_TIGHTEN; + if (device_property_present(&client->dev, "azoteq,ati-band-tighten")) + misc_a |= IQS269_MISC_A_ATI_BAND_TIGHTEN; + + misc_a &= ~IQS269_MISC_A_FILT_DISABLE; + if (device_property_present(&client->dev, "azoteq,filt-disable")) + misc_a |= IQS269_MISC_A_FILT_DISABLE; + + if (!device_property_read_u32(&client->dev, "azoteq,gpio3-select", + &val)) { + if (val >= IQS269_NUM_CH) { + dev_err(&client->dev, "Invalid GPIO3 selection: %u\n", + val); + return -EINVAL; + } + + misc_a &= ~IQS269_MISC_A_GPIO3_SELECT_MASK; + misc_a |= (val << IQS269_MISC_A_GPIO3_SELECT_SHIFT); + } + + misc_a &= ~IQS269_MISC_A_DUAL_DIR; + if (device_property_present(&client->dev, "azoteq,dual-direction")) + misc_a |= IQS269_MISC_A_DUAL_DIR; + + if (!device_property_read_u32(&client->dev, "azoteq,tx-freq", &val)) { + if (val > IQS269_MISC_A_TX_FREQ_MAX) { + dev_err(&client->dev, + "Invalid excitation frequency: %u\n", val); + return -EINVAL; + } + + misc_a &= ~IQS269_MISC_A_TX_FREQ_MASK; + misc_a |= (val << IQS269_MISC_A_TX_FREQ_SHIFT); + } + + misc_a &= ~IQS269_MISC_A_GLOBAL_CAP_SIZE; + if (device_property_present(&client->dev, "azoteq,global-cap-increase")) + misc_a |= IQS269_MISC_A_GLOBAL_CAP_SIZE; + + if (!device_property_read_u32(&client->dev, "azoteq,reseed-select", + &val)) { + if (val > IQS269_MISC_B_RESEED_UI_SEL_MAX) { + dev_err(&client->dev, "Invalid reseed selection: %u\n", + val); + return -EINVAL; + } + + misc_b &= ~IQS269_MISC_B_RESEED_UI_SEL_MASK; + misc_b |= (val << IQS269_MISC_B_RESEED_UI_SEL_SHIFT); + } + + misc_b &= ~IQS269_MISC_B_TRACKING_UI_ENABLE; + if (device_property_present(&client->dev, "azoteq,tracking-enable")) + misc_b |= IQS269_MISC_B_TRACKING_UI_ENABLE; + + if (!device_property_read_u32(&client->dev, "azoteq,filt-str-slider", + &val)) { + if (val > IQS269_FILT_STR_MAX) { + dev_err(&client->dev, "Invalid filter strength: %u\n", + val); + return -EINVAL; + } + + misc_b &= ~IQS269_MISC_B_FILT_STR_SLIDER; + misc_b |= val; + } + + sys_reg->misc_a = cpu_to_be16(misc_a); + sys_reg->misc_b = cpu_to_be16(misc_b); + + sys_reg->active = 0; + sys_reg->reseed = 0; + + sys_reg->blocking = 0; + + sys_reg->slider_select[0] = 0; + sys_reg->slider_select[1] = 0; + + sys_reg->event_mask = ~((u8)IQS269_EVENT_MASK_SYS); + + device_for_each_child_node(&client->dev, ch_node) { + error = iqs269_parse_chan(iqs269, ch_node); + if (error) { + fwnode_handle_put(ch_node); + return error; + } + } + + /* + * Volunteer all active channels to participate in ATI when REDO-ATI is + * manually triggered. + */ + sys_reg->redo_ati = sys_reg->active; + + general = be16_to_cpu(sys_reg->general); + + if (device_property_present(&client->dev, "azoteq,clk-div")) { + general |= IQS269_SYS_SETTINGS_CLK_DIV; + iqs269->delay_mult = 4; + } else { + general &= ~IQS269_SYS_SETTINGS_CLK_DIV; + iqs269->delay_mult = 1; + } + + /* + * Configure the device to automatically switch between normal and low- + * power modes as a function of sensing activity. Ultra-low-power mode, + * if enabled, is reserved for suspend. + */ + general &= ~IQS269_SYS_SETTINGS_ULP_AUTO; + general &= ~IQS269_SYS_SETTINGS_DIS_AUTO; + general &= ~IQS269_SYS_SETTINGS_PWR_MODE_MASK; + + if (!device_property_read_u32(&client->dev, "azoteq,ulp-update", + &val)) { + if (val > IQS269_SYS_SETTINGS_ULP_UPDATE_MAX) { + dev_err(&client->dev, "Invalid update rate: %u\n", val); + return -EINVAL; + } + + general &= ~IQS269_SYS_SETTINGS_ULP_UPDATE_MASK; + general |= (val << IQS269_SYS_SETTINGS_ULP_UPDATE_SHIFT); + } + + general &= ~IQS269_SYS_SETTINGS_RESEED_OFFSET; + if (device_property_present(&client->dev, "azoteq,reseed-offset")) + general |= IQS269_SYS_SETTINGS_RESEED_OFFSET; + + general |= IQS269_SYS_SETTINGS_EVENT_MODE; + + /* + * As per the datasheet, enable streaming during normal-power mode if + * either slider is in use. In that case, the device returns to event + * mode during low-power mode. + */ + if (sys_reg->slider_select[0] || sys_reg->slider_select[1]) + general |= IQS269_SYS_SETTINGS_EVENT_MODE_LP; + + general |= IQS269_SYS_SETTINGS_REDO_ATI; + general |= IQS269_SYS_SETTINGS_ACK_RESET; + + sys_reg->general = cpu_to_be16(general); + + return 0; +} + +static int iqs269_dev_init(struct iqs269_private *iqs269) +{ + struct iqs269_sys_reg *sys_reg = &iqs269->sys_reg; + struct iqs269_ch_reg *ch_reg; + unsigned int val; + int error, i; + + mutex_lock(&iqs269->lock); + + error = regmap_update_bits(iqs269->regmap, IQS269_HALL_UI, + IQS269_HALL_UI_ENABLE, + iqs269->hall_enable ? ~0 : 0); + if (error) + goto err_mutex; + + for (i = 0; i < IQS269_NUM_CH; i++) { + if (!(sys_reg->active & BIT(i))) + continue; + + ch_reg = &iqs269->ch_reg[i]; + + error = regmap_raw_write(iqs269->regmap, + IQS269_CHx_SETTINGS + i * + sizeof(*ch_reg) / 2, ch_reg, + sizeof(*ch_reg)); + if (error) + goto err_mutex; + } + + /* + * The REDO-ATI and ATI channel selection fields must be written in the + * same block write, so every field between registers 0x80 through 0x8B + * (inclusive) must be written as well. + */ + error = regmap_raw_write(iqs269->regmap, IQS269_SYS_SETTINGS, sys_reg, + sizeof(*sys_reg)); + if (error) + goto err_mutex; + + error = regmap_read_poll_timeout(iqs269->regmap, IQS269_SYS_FLAGS, val, + !(val & IQS269_SYS_FLAGS_IN_ATI), + IQS269_ATI_POLL_SLEEP_US, + IQS269_ATI_POLL_TIMEOUT_US); + if (error) + goto err_mutex; + + msleep(IQS269_ATI_STABLE_DELAY_MS); + iqs269->ati_current = true; + +err_mutex: + mutex_unlock(&iqs269->lock); + + return error; +} + +static int iqs269_input_init(struct iqs269_private *iqs269) +{ + struct i2c_client *client = iqs269->client; + struct iqs269_flags flags; + unsigned int sw_code, keycode; + int error, i, j; + u8 dir_mask, state; + + iqs269->keypad = devm_input_allocate_device(&client->dev); + if (!iqs269->keypad) + return -ENOMEM; + + iqs269->keypad->keycodemax = ARRAY_SIZE(iqs269->keycode); + iqs269->keypad->keycode = iqs269->keycode; + iqs269->keypad->keycodesize = sizeof(*iqs269->keycode); + + iqs269->keypad->name = "iqs269a_keypad"; + iqs269->keypad->id.bustype = BUS_I2C; + + if (iqs269->hall_enable) { + error = regmap_raw_read(iqs269->regmap, IQS269_SYS_FLAGS, + &flags, sizeof(flags)); + if (error) { + dev_err(&client->dev, + "Failed to read initial status: %d\n", error); + return error; + } + } + + for (i = 0; i < ARRAY_SIZE(iqs269_events); i++) { + dir_mask = flags.states[IQS269_ST_OFFS_DIR]; + if (!iqs269_events[i].dir_up) + dir_mask = ~dir_mask; + + state = flags.states[iqs269_events[i].st_offs] & dir_mask; + + sw_code = iqs269->switches[i].code; + + for (j = 0; j < IQS269_NUM_CH; j++) { + keycode = iqs269->keycode[i * IQS269_NUM_CH + j]; + + /* + * Hall-effect sensing repurposes a pair of dedicated + * channels, only one of which reports events. + */ + switch (j) { + case IQS269_CHx_HALL_ACTIVE: + if (iqs269->hall_enable && + iqs269->switches[i].enabled) { + input_set_capability(iqs269->keypad, + EV_SW, sw_code); + input_report_switch(iqs269->keypad, + sw_code, + state & BIT(j)); + } + + /* fall through */ + + case IQS269_CHx_HALL_INACTIVE: + if (iqs269->hall_enable) + continue; + + /* fall through */ + + default: + if (keycode != KEY_RESERVED) + input_set_capability(iqs269->keypad, + EV_KEY, keycode); + } + } + } + + input_sync(iqs269->keypad); + + error = input_register_device(iqs269->keypad); + if (error) { + dev_err(&client->dev, "Failed to register keypad: %d\n", error); + return error; + } + + for (i = 0; i < IQS269_NUM_SL; i++) { + if (!iqs269->sys_reg.slider_select[i]) + continue; + + iqs269->slider[i] = devm_input_allocate_device(&client->dev); + if (!iqs269->slider[i]) + return -ENOMEM; + + iqs269->slider[i]->name = i ? "iqs269a_slider_1" + : "iqs269a_slider_0"; + iqs269->slider[i]->id.bustype = BUS_I2C; + + input_set_capability(iqs269->slider[i], EV_KEY, BTN_TOUCH); + input_set_abs_params(iqs269->slider[i], ABS_X, 0, 255, 0, 0); + + error = input_register_device(iqs269->slider[i]); + if (error) { + dev_err(&client->dev, + "Failed to register slider %d: %d\n", i, error); + return error; + } + } + + return 0; +} + +static int iqs269_report(struct iqs269_private *iqs269) +{ + struct i2c_client *client = iqs269->client; + struct iqs269_flags flags; + unsigned int sw_code, keycode; + int error, i, j; + u8 slider_x[IQS269_NUM_SL]; + u8 dir_mask, state; + + error = regmap_raw_read(iqs269->regmap, IQS269_SYS_FLAGS, &flags, + sizeof(flags)); + if (error) { + dev_err(&client->dev, "Failed to read device status: %d\n", + error); + return error; + } + + /* + * The device resets itself if its own watchdog bites, which can happen + * in the event of an I2C communication error. In this case, the device + * asserts a SHOW_RESET interrupt and all registers must be restored. + */ + if (be16_to_cpu(flags.system) & IQS269_SYS_FLAGS_SHOW_RESET) { + dev_err(&client->dev, "Unexpected device reset\n"); + + error = iqs269_dev_init(iqs269); + if (error) + dev_err(&client->dev, + "Failed to re-initialize device: %d\n", error); + + return error; + } + + error = regmap_raw_read(iqs269->regmap, IQS269_SLIDER_X, slider_x, + sizeof(slider_x)); + if (error) { + dev_err(&client->dev, "Failed to read slider position: %d\n", + error); + return error; + } + + for (i = 0; i < IQS269_NUM_SL; i++) { + if (!iqs269->sys_reg.slider_select[i]) + continue; + + /* + * Report BTN_TOUCH if any channel that participates in the + * slider is in a state of touch. + */ + if (flags.states[IQS269_ST_OFFS_TOUCH] & + iqs269->sys_reg.slider_select[i]) { + input_report_key(iqs269->slider[i], BTN_TOUCH, 1); + input_report_abs(iqs269->slider[i], ABS_X, slider_x[i]); + } else { + input_report_key(iqs269->slider[i], BTN_TOUCH, 0); + } + + input_sync(iqs269->slider[i]); + } + + for (i = 0; i < ARRAY_SIZE(iqs269_events); i++) { + dir_mask = flags.states[IQS269_ST_OFFS_DIR]; + if (!iqs269_events[i].dir_up) + dir_mask = ~dir_mask; + + state = flags.states[iqs269_events[i].st_offs] & dir_mask; + + sw_code = iqs269->switches[i].code; + + for (j = 0; j < IQS269_NUM_CH; j++) { + keycode = iqs269->keycode[i * IQS269_NUM_CH + j]; + + switch (j) { + case IQS269_CHx_HALL_ACTIVE: + if (iqs269->hall_enable && + iqs269->switches[i].enabled) + input_report_switch(iqs269->keypad, + sw_code, + state & BIT(j)); + + /* fall through */ + + case IQS269_CHx_HALL_INACTIVE: + if (iqs269->hall_enable) + continue; + + /* fall through */ + + default: + input_report_key(iqs269->keypad, keycode, + state & BIT(j)); + } + } + } + + input_sync(iqs269->keypad); + + return 0; +} + +static irqreturn_t iqs269_irq(int irq, void *context) +{ + struct iqs269_private *iqs269 = context; + + if (iqs269_report(iqs269)) + return IRQ_NONE; + + /* + * The device does not deassert its interrupt (RDY) pin until shortly + * after receiving an I2C stop condition; the following delay ensures + * the interrupt handler does not return before this time. + */ + iqs269_irq_wait(); + + return IRQ_HANDLED; +} + +static ssize_t counts_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct iqs269_private *iqs269 = dev_get_drvdata(dev); + struct i2c_client *client = iqs269->client; + __le16 counts; + int error; + + if (!iqs269->ati_current || iqs269->hall_enable) + return -EPERM; + + /* + * Unsolicited I2C communication prompts the device to assert its RDY + * pin, so disable the interrupt line until the operation is finished + * and RDY has been deasserted. + */ + disable_irq(client->irq); + + error = regmap_raw_read(iqs269->regmap, + IQS269_CHx_COUNTS + iqs269->ch_num * 2, + &counts, sizeof(counts)); + + iqs269_irq_wait(); + enable_irq(client->irq); + + if (error) + return error; + + return scnprintf(buf, PAGE_SIZE, "%u\n", le16_to_cpu(counts)); +} + +static ssize_t hall_bin_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct iqs269_private *iqs269 = dev_get_drvdata(dev); + struct i2c_client *client = iqs269->client; + unsigned int val; + int error; + + disable_irq(client->irq); + + error = regmap_read(iqs269->regmap, IQS269_CAL_DATA_A, &val); + + iqs269_irq_wait(); + enable_irq(client->irq); + + if (error) + return error; + + switch (iqs269->ch_reg[IQS269_CHx_HALL_ACTIVE].rx_enable & + iqs269->ch_reg[IQS269_CHx_HALL_INACTIVE].rx_enable) { + case IQS269_HALL_PAD_R: + val &= IQS269_CAL_DATA_A_HALL_BIN_R_MASK; + val >>= IQS269_CAL_DATA_A_HALL_BIN_R_SHIFT; + break; + + case IQS269_HALL_PAD_L: + val &= IQS269_CAL_DATA_A_HALL_BIN_L_MASK; + val >>= IQS269_CAL_DATA_A_HALL_BIN_L_SHIFT; + break; + + default: + return -EINVAL; + } + + return scnprintf(buf, PAGE_SIZE, "%u\n", val); +} + +static ssize_t hall_enable_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct iqs269_private *iqs269 = dev_get_drvdata(dev); + + return scnprintf(buf, PAGE_SIZE, "%u\n", iqs269->hall_enable); +} + +static ssize_t hall_enable_store(struct device *dev, + struct device_attribute *attr, const char *buf, + size_t count) +{ + struct iqs269_private *iqs269 = dev_get_drvdata(dev); + unsigned int val; + int error; + + error = kstrtouint(buf, 10, &val); + if (error) + return error; + + mutex_lock(&iqs269->lock); + + iqs269->hall_enable = val; + iqs269->ati_current = false; + + mutex_unlock(&iqs269->lock); + + return count; +} + +static ssize_t ch_number_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct iqs269_private *iqs269 = dev_get_drvdata(dev); + + return scnprintf(buf, PAGE_SIZE, "%u\n", iqs269->ch_num); +} + +static ssize_t ch_number_store(struct device *dev, + struct device_attribute *attr, const char *buf, + size_t count) +{ + struct iqs269_private *iqs269 = dev_get_drvdata(dev); + unsigned int val; + int error; + + error = kstrtouint(buf, 10, &val); + if (error) + return error; + + if (val >= IQS269_NUM_CH) + return -EINVAL; + + iqs269->ch_num = val; + + return count; +} + +static ssize_t rx_enable_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct iqs269_private *iqs269 = dev_get_drvdata(dev); + + return scnprintf(buf, PAGE_SIZE, "%u\n", + iqs269->ch_reg[iqs269->ch_num].rx_enable); +} + +static ssize_t rx_enable_store(struct device *dev, + struct device_attribute *attr, const char *buf, + size_t count) +{ + struct iqs269_private *iqs269 = dev_get_drvdata(dev); + unsigned int val; + int error; + + error = kstrtouint(buf, 10, &val); + if (error) + return error; + + if (val > 0xFF) + return -EINVAL; + + mutex_lock(&iqs269->lock); + + iqs269->ch_reg[iqs269->ch_num].rx_enable = val; + iqs269->ati_current = false; + + mutex_unlock(&iqs269->lock); + + return count; +} + +static ssize_t ati_mode_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct iqs269_private *iqs269 = dev_get_drvdata(dev); + unsigned int val; + int error; + + error = iqs269_ati_mode_get(iqs269, iqs269->ch_num, &val); + if (error) + return error; + + return scnprintf(buf, PAGE_SIZE, "%u\n", val); +} + +static ssize_t ati_mode_store(struct device *dev, + struct device_attribute *attr, const char *buf, + size_t count) +{ + struct iqs269_private *iqs269 = dev_get_drvdata(dev); + unsigned int val; + int error; + + error = kstrtouint(buf, 10, &val); + if (error) + return error; + + error = iqs269_ati_mode_set(iqs269, iqs269->ch_num, val); + if (error) + return error; + + return count; +} + +static ssize_t ati_base_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct iqs269_private *iqs269 = dev_get_drvdata(dev); + unsigned int val; + int error; + + error = iqs269_ati_base_get(iqs269, iqs269->ch_num, &val); + if (error) + return error; + + return scnprintf(buf, PAGE_SIZE, "%u\n", val); +} + +static ssize_t ati_base_store(struct device *dev, + struct device_attribute *attr, const char *buf, + size_t count) +{ + struct iqs269_private *iqs269 = dev_get_drvdata(dev); + unsigned int val; + int error; + + error = kstrtouint(buf, 10, &val); + if (error) + return error; + + error = iqs269_ati_base_set(iqs269, iqs269->ch_num, val); + if (error) + return error; + + return count; +} + +static ssize_t ati_target_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct iqs269_private *iqs269 = dev_get_drvdata(dev); + unsigned int val; + int error; + + error = iqs269_ati_target_get(iqs269, iqs269->ch_num, &val); + if (error) + return error; + + return scnprintf(buf, PAGE_SIZE, "%u\n", val); +} + +static ssize_t ati_target_store(struct device *dev, + struct device_attribute *attr, const char *buf, + size_t count) +{ + struct iqs269_private *iqs269 = dev_get_drvdata(dev); + unsigned int val; + int error; + + error = kstrtouint(buf, 10, &val); + if (error) + return error; + + error = iqs269_ati_target_set(iqs269, iqs269->ch_num, val); + if (error) + return error; + + return count; +} + +static ssize_t ati_trigger_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct iqs269_private *iqs269 = dev_get_drvdata(dev); + + return scnprintf(buf, PAGE_SIZE, "%u\n", iqs269->ati_current); +} + +static ssize_t ati_trigger_store(struct device *dev, + struct device_attribute *attr, const char *buf, + size_t count) +{ + struct iqs269_private *iqs269 = dev_get_drvdata(dev); + struct i2c_client *client = iqs269->client; + unsigned int val; + int error; + + error = kstrtouint(buf, 10, &val); + if (error) + return error; + + if (!val) + return count; + + disable_irq(client->irq); + + error = iqs269_dev_init(iqs269); + + iqs269_irq_wait(); + enable_irq(client->irq); + + if (error) + return error; + + return count; +} + +static DEVICE_ATTR_RO(counts); +static DEVICE_ATTR_RO(hall_bin); +static DEVICE_ATTR_RW(hall_enable); +static DEVICE_ATTR_RW(ch_number); +static DEVICE_ATTR_RW(rx_enable); +static DEVICE_ATTR_RW(ati_mode); +static DEVICE_ATTR_RW(ati_base); +static DEVICE_ATTR_RW(ati_target); +static DEVICE_ATTR_RW(ati_trigger); + +static struct attribute *iqs269_attrs[] = { + &dev_attr_counts.attr, + &dev_attr_hall_bin.attr, + &dev_attr_hall_enable.attr, + &dev_attr_ch_number.attr, + &dev_attr_rx_enable.attr, + &dev_attr_ati_mode.attr, + &dev_attr_ati_base.attr, + &dev_attr_ati_target.attr, + &dev_attr_ati_trigger.attr, + NULL, +}; + +static const struct attribute_group iqs269_attr_group = { + .attrs = iqs269_attrs, +}; + +static const struct regmap_config iqs269_regmap_config = { + .reg_bits = 8, + .val_bits = 16, + .max_register = IQS269_MAX_REG, +}; + +static int iqs269_probe(struct i2c_client *client) +{ + struct iqs269_ver_info ver_info; + struct iqs269_private *iqs269; + int error; + + iqs269 = devm_kzalloc(&client->dev, sizeof(*iqs269), GFP_KERNEL); + if (!iqs269) + return -ENOMEM; + + i2c_set_clientdata(client, iqs269); + iqs269->client = client; + + iqs269->regmap = devm_regmap_init_i2c(client, &iqs269_regmap_config); + if (IS_ERR(iqs269->regmap)) { + error = PTR_ERR(iqs269->regmap); + dev_err(&client->dev, "Failed to initialize register map: %d\n", + error); + return error; + } + + mutex_init(&iqs269->lock); + + error = regmap_raw_read(iqs269->regmap, IQS269_VER_INFO, &ver_info, + sizeof(ver_info)); + if (error) + return error; + + if (ver_info.prod_num != IQS269_VER_INFO_PROD_NUM) { + dev_err(&client->dev, "Unrecognized product number: 0x%02X\n", + ver_info.prod_num); + return -EINVAL; + } + + error = iqs269_parse_prop(iqs269); + if (error) + return error; + + error = iqs269_dev_init(iqs269); + if (error) { + dev_err(&client->dev, "Failed to initialize device: %d\n", + error); + return error; + } + + error = iqs269_input_init(iqs269); + if (error) + return error; + + error = devm_request_threaded_irq(&client->dev, client->irq, + NULL, iqs269_irq, IRQF_ONESHOT, + client->name, iqs269); + if (error) { + dev_err(&client->dev, "Failed to request IRQ: %d\n", error); + return error; + } + + error = devm_device_add_group(&client->dev, &iqs269_attr_group); + if (error) + dev_err(&client->dev, "Failed to add attributes: %d\n", error); + + return error; +} + +static int __maybe_unused iqs269_suspend(struct device *dev) +{ + struct iqs269_private *iqs269 = dev_get_drvdata(dev); + struct i2c_client *client = iqs269->client; + unsigned int val; + int error; + + if (!iqs269->suspend_mode) + return 0; + + disable_irq(client->irq); + + /* + * Automatic power mode switching must be disabled before the device is + * forced into any particular power mode. In this case, the device will + * transition into normal-power mode. + */ + error = regmap_update_bits(iqs269->regmap, IQS269_SYS_SETTINGS, + IQS269_SYS_SETTINGS_DIS_AUTO, ~0); + if (error) + goto err_irq; + + /* + * The following check ensures the device has completed its transition + * into normal-power mode before a manual mode switch is performed. + */ + error = regmap_read_poll_timeout(iqs269->regmap, IQS269_SYS_FLAGS, val, + !(val & IQS269_SYS_FLAGS_PWR_MODE_MASK), + IQS269_PWR_MODE_POLL_SLEEP_US, + IQS269_PWR_MODE_POLL_TIMEOUT_US); + if (error) + goto err_irq; + + error = regmap_update_bits(iqs269->regmap, IQS269_SYS_SETTINGS, + IQS269_SYS_SETTINGS_PWR_MODE_MASK, + iqs269->suspend_mode << + IQS269_SYS_SETTINGS_PWR_MODE_SHIFT); + if (error) + goto err_irq; + + /* + * This last check ensures the device has completed its transition into + * the desired power mode to prevent any spurious interrupts from being + * triggered after iqs269_suspend has already returned. + */ + error = regmap_read_poll_timeout(iqs269->regmap, IQS269_SYS_FLAGS, val, + (val & IQS269_SYS_FLAGS_PWR_MODE_MASK) + == (iqs269->suspend_mode << + IQS269_SYS_FLAGS_PWR_MODE_SHIFT), + IQS269_PWR_MODE_POLL_SLEEP_US, + IQS269_PWR_MODE_POLL_TIMEOUT_US); + +err_irq: + iqs269_irq_wait(); + enable_irq(client->irq); + + return error; +} + +static int __maybe_unused iqs269_resume(struct device *dev) +{ + struct iqs269_private *iqs269 = dev_get_drvdata(dev); + struct i2c_client *client = iqs269->client; + unsigned int val; + int error; + + if (!iqs269->suspend_mode) + return 0; + + disable_irq(client->irq); + + error = regmap_update_bits(iqs269->regmap, IQS269_SYS_SETTINGS, + IQS269_SYS_SETTINGS_PWR_MODE_MASK, 0); + if (error) + goto err_irq; + + /* + * This check ensures the device has returned to normal-power mode + * before automatic power mode switching is re-enabled. + */ + error = regmap_read_poll_timeout(iqs269->regmap, IQS269_SYS_FLAGS, val, + !(val & IQS269_SYS_FLAGS_PWR_MODE_MASK), + IQS269_PWR_MODE_POLL_SLEEP_US, + IQS269_PWR_MODE_POLL_TIMEOUT_US); + if (error) + goto err_irq; + + error = regmap_update_bits(iqs269->regmap, IQS269_SYS_SETTINGS, + IQS269_SYS_SETTINGS_DIS_AUTO, 0); + if (error) + goto err_irq; + + /* + * This step reports any events that may have been "swallowed" as a + * result of polling PWR_MODE (which automatically acknowledges any + * pending interrupts). + */ + error = iqs269_report(iqs269); + +err_irq: + iqs269_irq_wait(); + enable_irq(client->irq); + + return error; +} + +static SIMPLE_DEV_PM_OPS(iqs269_pm, iqs269_suspend, iqs269_resume); + +static const struct of_device_id iqs269_of_match[] = { + { .compatible = "azoteq,iqs269a" }, + { } +}; +MODULE_DEVICE_TABLE(of, iqs269_of_match); + +static struct i2c_driver iqs269_i2c_driver = { + .driver = { + .name = "iqs269a", + .of_match_table = iqs269_of_match, + .pm = &iqs269_pm, + }, + .probe_new = iqs269_probe, +}; +module_i2c_driver(iqs269_i2c_driver); + +MODULE_AUTHOR("Jeff LaBundy "); +MODULE_DESCRIPTION("Azoteq IQS269A Capacitive Touch Controller"); +MODULE_LICENSE("GPL"); -- GitLab From 6def17b12ba3d30196d4f35a5cf1edf38f791d1d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Micha=C5=82=20Miros=C5=82aw?= Date: Sun, 17 May 2020 20:38:43 -0700 Subject: [PATCH 0511/1971] Input: elants - remove unused axes MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Driver only ever reports MT events and input_mt_init_slots() sets up emulated axes already. Clear the capabilities not generated directly and move MT axes setup, so they are visible by input_mt_init_slots(). Signed-off-by: Michał Mirosław Reviewed-by: Dmitry Osipenko Tested-by: Dmitry Osipenko Link: https://lore.kernel.org/r/d5eee8cd305adb144a11264d70da94f7b6570366.1587923061.git.mirq-linux@rere.qmqm.pl Signed-off-by: Dmitry Torokhov --- drivers/input/touchscreen/elants_i2c.c | 26 ++++++++------------------ 1 file changed, 8 insertions(+), 18 deletions(-) diff --git a/drivers/input/touchscreen/elants_i2c.c b/drivers/input/touchscreen/elants_i2c.c index 0c4bbd2eb3a2f..3abfa3f7db38e 100644 --- a/drivers/input/touchscreen/elants_i2c.c +++ b/drivers/input/touchscreen/elants_i2c.c @@ -1316,25 +1316,7 @@ static int elants_i2c_probe(struct i2c_client *client, ts->input->name = "Elan Touchscreen"; ts->input->id.bustype = BUS_I2C; - __set_bit(BTN_TOUCH, ts->input->keybit); - __set_bit(EV_ABS, ts->input->evbit); - __set_bit(EV_KEY, ts->input->evbit); - - /* Single touch input params setup */ - input_set_abs_params(ts->input, ABS_X, 0, ts->x_max, 0, 0); - input_set_abs_params(ts->input, ABS_Y, 0, ts->y_max, 0, 0); - input_set_abs_params(ts->input, ABS_PRESSURE, 0, 255, 0, 0); - input_abs_set_res(ts->input, ABS_X, ts->x_res); - input_abs_set_res(ts->input, ABS_Y, ts->y_res); - /* Multitouch input params setup */ - error = input_mt_init_slots(ts->input, MAX_CONTACT_NUM, - INPUT_MT_DIRECT | INPUT_MT_DROP_UNUSED); - if (error) { - dev_err(&client->dev, - "failed to initialize MT slots: %d\n", error); - return error; - } input_set_abs_params(ts->input, ABS_MT_POSITION_X, 0, ts->x_max, 0, 0); input_set_abs_params(ts->input, ABS_MT_POSITION_Y, 0, ts->y_max, 0, 0); @@ -1346,6 +1328,14 @@ static int elants_i2c_probe(struct i2c_client *client, input_abs_set_res(ts->input, ABS_MT_POSITION_Y, ts->y_res); input_abs_set_res(ts->input, ABS_MT_TOUCH_MAJOR, 1); + error = input_mt_init_slots(ts->input, MAX_CONTACT_NUM, + INPUT_MT_DIRECT | INPUT_MT_DROP_UNUSED); + if (error) { + dev_err(&client->dev, + "failed to initialize MT slots: %d\n", error); + return error; + } + error = input_register_device(ts->input); if (error) { dev_err(&client->dev, -- GitLab From 68334dbab13bc5d637fdf1aa2ccfe7e1932b2af1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Micha=C5=82=20Miros=C5=82aw?= Date: Sun, 17 May 2020 20:39:40 -0700 Subject: [PATCH 0512/1971] Input: elants - override touchscreen info with DT properties MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Allow overriding of information from hardware and support additional common DT properties like axis inversion. This is required for eg. Nexus 7 and TF300T where the programmed values in firmware differ from reality. Signed-off-by: Dmitry Osipenko [moved "prop" before DMA buffer] Signed-off-by: Michał Mirosław Link: https://lore.kernel.org/r/49ea996878264f7c8bde25204e4ddf4b1e85ae71.1587923061.git.mirq-linux@rere.qmqm.pl Signed-off-by: Dmitry Torokhov --- drivers/input/touchscreen/elants_i2c.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/drivers/input/touchscreen/elants_i2c.c b/drivers/input/touchscreen/elants_i2c.c index 3abfa3f7db38e..ccaef40315560 100644 --- a/drivers/input/touchscreen/elants_i2c.c +++ b/drivers/input/touchscreen/elants_i2c.c @@ -33,6 +33,7 @@ #include #include #include +#include #include #include #include @@ -137,6 +138,7 @@ struct elants_data { unsigned int y_res; unsigned int x_max; unsigned int y_max; + struct touchscreen_properties prop; enum elants_state state; enum elants_iap_mode iap_mode; @@ -876,8 +878,7 @@ static void elants_i2c_mt_event(struct elants_data *ts, u8 *buf) input_mt_slot(input, i); input_mt_report_slot_state(input, tool_type, true); - input_event(input, EV_ABS, ABS_MT_POSITION_X, x); - input_event(input, EV_ABS, ABS_MT_POSITION_Y, y); + touchscreen_report_pos(input, &ts->prop, x, y, true); input_event(input, EV_ABS, ABS_MT_PRESSURE, p); input_event(input, EV_ABS, ABS_MT_TOUCH_MAJOR, w); @@ -1328,6 +1329,8 @@ static int elants_i2c_probe(struct i2c_client *client, input_abs_set_res(ts->input, ABS_MT_POSITION_Y, ts->y_res); input_abs_set_res(ts->input, ABS_MT_TOUCH_MAJOR, 1); + touchscreen_parse_properties(ts->input, true, &ts->prop); + error = input_mt_init_slots(ts->input, MAX_CONTACT_NUM, INPUT_MT_DIRECT | INPUT_MT_DROP_UNUSED); if (error) { -- GitLab From 918e2844d940da7c624262a7aa327615d3eb5abd Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Micha=C5=82=20Miros=C5=82aw?= Date: Sun, 17 May 2020 20:41:05 -0700 Subject: [PATCH 0513/1971] Input: elants - refactor elants_i2c_execute_command() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Apply some DRY-ing to elants_i2c_execute_command() callers. This pulls polling and error printk()s into a single function. Signed-off-by: Michał Mirosław Link: https://lore.kernel.org/r/6c576f688b385235c65b461410a917080d27e825.1587923061.git.mirq-linux@rere.qmqm.pl Signed-off-by: Dmitry Torokhov --- drivers/input/touchscreen/elants_i2c.c | 200 ++++++++++++------------- 1 file changed, 99 insertions(+), 101 deletions(-) diff --git a/drivers/input/touchscreen/elants_i2c.c b/drivers/input/touchscreen/elants_i2c.c index ccaef40315560..233cb1085bbdd 100644 --- a/drivers/input/touchscreen/elants_i2c.c +++ b/drivers/input/touchscreen/elants_i2c.c @@ -192,7 +192,8 @@ static int elants_i2c_read(struct i2c_client *client, void *data, size_t size) static int elants_i2c_execute_command(struct i2c_client *client, const u8 *cmd, size_t cmd_size, - u8 *resp, size_t resp_size) + u8 *resp, size_t resp_size, + int retries, const char *cmd_name) { struct i2c_msg msgs[2]; int ret; @@ -212,30 +213,55 @@ static int elants_i2c_execute_command(struct i2c_client *client, break; default: - dev_err(&client->dev, "%s: invalid command %*ph\n", - __func__, (int)cmd_size, cmd); + dev_err(&client->dev, "(%s): invalid command: %*ph\n", + cmd_name, (int)cmd_size, cmd); return -EINVAL; } - msgs[0].addr = client->addr; - msgs[0].flags = client->flags & I2C_M_TEN; - msgs[0].len = cmd_size; - msgs[0].buf = (u8 *)cmd; + for (;;) { + msgs[0].addr = client->addr; + msgs[0].flags = client->flags & I2C_M_TEN; + msgs[0].len = cmd_size; + msgs[0].buf = (u8 *)cmd; + + msgs[1].addr = client->addr; + msgs[1].flags = (client->flags & I2C_M_TEN) | I2C_M_RD; + msgs[1].flags |= I2C_M_RD; + msgs[1].len = resp_size; + msgs[1].buf = resp; + + ret = i2c_transfer(client->adapter, msgs, ARRAY_SIZE(msgs)); + if (ret < 0) { + if (--retries > 0) { + dev_dbg(&client->dev, + "(%s) I2C transfer failed: %pe (retrying)\n", + cmd_name, ERR_PTR(ret)); + continue; + } - msgs[1].addr = client->addr; - msgs[1].flags = client->flags & I2C_M_TEN; - msgs[1].flags |= I2C_M_RD; - msgs[1].len = resp_size; - msgs[1].buf = resp; + dev_err(&client->dev, + "(%s) I2C transfer failed: %pe\n", + cmd_name, ERR_PTR(ret)); + return ret; + } - ret = i2c_transfer(client->adapter, msgs, ARRAY_SIZE(msgs)); - if (ret < 0) - return ret; + if (ret != ARRAY_SIZE(msgs) || + resp[FW_HDR_TYPE] != expected_response) { + if (--retries > 0) { + dev_dbg(&client->dev, + "(%s) unexpected response: %*ph (retrying)\n", + cmd_name, ret, resp); + continue; + } - if (ret != ARRAY_SIZE(msgs) || resp[FW_HDR_TYPE] != expected_response) - return -EIO; + dev_err(&client->dev, + "(%s) unexpected response: %*ph\n", + cmd_name, ret, resp); + return -EIO; + } - return 0; + return 0; + } } static int elants_i2c_calibrate(struct elants_data *ts) @@ -308,27 +334,21 @@ static u16 elants_i2c_parse_version(u8 *buf) static int elants_i2c_query_hw_version(struct elants_data *ts) { struct i2c_client *client = ts->client; - int error, retry_cnt; + int retry_cnt = MAX_RETRIES; const u8 cmd[] = { CMD_HEADER_READ, E_ELAN_INFO_FW_ID, 0x00, 0x01 }; u8 resp[HEADER_SIZE]; + int error; - for (retry_cnt = 0; retry_cnt < MAX_RETRIES; retry_cnt++) { + while (retry_cnt--) { error = elants_i2c_execute_command(client, cmd, sizeof(cmd), - resp, sizeof(resp)); - if (!error) { - ts->hw_version = elants_i2c_parse_version(resp); - if (ts->hw_version != 0xffff) - return 0; - } - - dev_dbg(&client->dev, "read fw id error=%d, buf=%*phC\n", - error, (int)sizeof(resp), resp); - } + resp, sizeof(resp), 1, + "read fw id"); + if (error) + return error; - if (error) { - dev_err(&client->dev, - "Failed to read fw id: %d\n", error); - return error; + ts->hw_version = elants_i2c_parse_version(resp); + if (ts->hw_version != 0xffff) + return 0; } dev_err(&client->dev, "Invalid fw id: %#04x\n", ts->hw_version); @@ -339,26 +359,27 @@ static int elants_i2c_query_hw_version(struct elants_data *ts) static int elants_i2c_query_fw_version(struct elants_data *ts) { struct i2c_client *client = ts->client; - int error, retry_cnt; + int retry_cnt = MAX_RETRIES; const u8 cmd[] = { CMD_HEADER_READ, E_ELAN_INFO_FW_VER, 0x00, 0x01 }; u8 resp[HEADER_SIZE]; + int error; - for (retry_cnt = 0; retry_cnt < MAX_RETRIES; retry_cnt++) { + while (retry_cnt--) { error = elants_i2c_execute_command(client, cmd, sizeof(cmd), - resp, sizeof(resp)); - if (!error) { - ts->fw_version = elants_i2c_parse_version(resp); - if (ts->fw_version != 0x0000 && - ts->fw_version != 0xffff) - return 0; - } + resp, sizeof(resp), 1, + "read fw version"); + if (error) + return error; + + ts->fw_version = elants_i2c_parse_version(resp); + if (ts->fw_version != 0x0000 && ts->fw_version != 0xffff) + return 0; - dev_dbg(&client->dev, "read fw version error=%d, buf=%*phC\n", - error, (int)sizeof(resp), resp); + dev_dbg(&client->dev, "(read fw version) resp %*phC\n", + (int)sizeof(resp), resp); } - dev_err(&client->dev, - "Failed to read fw version or fw version is invalid\n"); + dev_err(&client->dev, "Invalid fw ver: %#04x\n", ts->fw_version); return -EINVAL; } @@ -366,30 +387,24 @@ static int elants_i2c_query_fw_version(struct elants_data *ts) static int elants_i2c_query_test_version(struct elants_data *ts) { struct i2c_client *client = ts->client; - int error, retry_cnt; + int error; u16 version; const u8 cmd[] = { CMD_HEADER_READ, E_ELAN_INFO_TEST_VER, 0x00, 0x01 }; u8 resp[HEADER_SIZE]; - for (retry_cnt = 0; retry_cnt < MAX_RETRIES; retry_cnt++) { - error = elants_i2c_execute_command(client, cmd, sizeof(cmd), - resp, sizeof(resp)); - if (!error) { - version = elants_i2c_parse_version(resp); - ts->test_version = version >> 8; - ts->solution_version = version & 0xff; - - return 0; - } - - dev_dbg(&client->dev, - "read test version error rc=%d, buf=%*phC\n", - error, (int)sizeof(resp), resp); + error = elants_i2c_execute_command(client, cmd, sizeof(cmd), + resp, sizeof(resp), MAX_RETRIES, + "read test version"); + if (error) { + dev_err(&client->dev, "Failed to read test version\n"); + return error; } - dev_err(&client->dev, "Failed to read test version\n"); + version = elants_i2c_parse_version(resp); + ts->test_version = version >> 8; + ts->solution_version = version & 0xff; - return -EINVAL; + return 0; } static int elants_i2c_query_bc_version(struct elants_data *ts) @@ -401,13 +416,10 @@ static int elants_i2c_query_bc_version(struct elants_data *ts) int error; error = elants_i2c_execute_command(client, cmd, sizeof(cmd), - resp, sizeof(resp)); - if (error) { - dev_err(&client->dev, - "read BC version error=%d, buf=%*phC\n", - error, (int)sizeof(resp), resp); + resp, sizeof(resp), 1, + "read BC version"); + if (error) return error; - } version = elants_i2c_parse_version(resp); ts->bc_version = version >> 8; @@ -439,12 +451,10 @@ static int elants_i2c_query_ts_info(struct elants_data *ts) error = elants_i2c_execute_command(client, get_resolution_cmd, sizeof(get_resolution_cmd), - resp, sizeof(resp)); - if (error) { - dev_err(&client->dev, "get resolution command failed: %d\n", - error); + resp, sizeof(resp), 1, + "get resolution"); + if (error) return error; - } rows = resp[2] + resp[6] + resp[10]; cols = resp[3] + resp[7] + resp[11]; @@ -452,36 +462,29 @@ static int elants_i2c_query_ts_info(struct elants_data *ts) /* Process mm_to_pixel information */ error = elants_i2c_execute_command(client, get_osr_cmd, sizeof(get_osr_cmd), - resp, sizeof(resp)); - if (error) { - dev_err(&client->dev, "get osr command failed: %d\n", - error); + resp, sizeof(resp), 1, "get osr"); + if (error) return error; - } osr = resp[3]; error = elants_i2c_execute_command(client, get_physical_scan_cmd, sizeof(get_physical_scan_cmd), - resp, sizeof(resp)); - if (error) { - dev_err(&client->dev, "get physical scan command failed: %d\n", - error); + resp, sizeof(resp), 1, + "get physical scan"); + if (error) return error; - } phy_x = get_unaligned_be16(&resp[2]); error = elants_i2c_execute_command(client, get_physical_drive_cmd, sizeof(get_physical_drive_cmd), - resp, sizeof(resp)); - if (error) { - dev_err(&client->dev, "get physical drive command failed: %d\n", - error); + resp, sizeof(resp), 1, + "get physical drive"); + if (error) return error; - } phy_y = get_unaligned_be16(&resp[2]); @@ -636,11 +639,10 @@ static int elants_i2c_validate_remark_id(struct elants_data *ts, /* Compare TS Remark ID and FW Remark ID */ error = elants_i2c_execute_command(client, cmd, sizeof(cmd), - resp, sizeof(resp)); - if (error) { - dev_err(&client->dev, "failed to query Remark ID: %d\n", error); + resp, sizeof(resp), + 1, "read Remark ID"); + if (error) return error; - } ts_remark_id = get_unaligned_be16(&resp[3]); @@ -1075,16 +1077,12 @@ static ssize_t show_calibration_count(struct device *dev, int error; error = elants_i2c_execute_command(client, cmd, sizeof(cmd), - resp, sizeof(resp)); - if (error) { - dev_err(&client->dev, - "read ReK status error=%d, buf=%*phC\n", - error, (int)sizeof(resp), resp); + resp, sizeof(resp), 1, + "read ReK status"); + if (error) return sprintf(buf, "%d\n", error); - } rek_count = get_unaligned_be16(&resp[2]); - return sprintf(buf, "0x%04x\n", rek_count); } -- GitLab From 6cc3d0e9a097981c8a0317c65b8a2278593bd2b0 Mon Sep 17 00:00:00 2001 From: Mian Yousaf Kaukab Date: Wed, 23 Oct 2019 11:27:03 +0200 Subject: [PATCH 0514/1971] cpufreq: tegra186: add CPUFREQ_NEED_INITIAL_FREQ_CHECK flag The driver doesn't provide ->get() method to read current frequency and the frequency is set to 0 at initialization which makes the driver fail at initialization time. Set the CPUFREQ_NEED_INITIAL_FREQ_CHECK flag for the driver, so the cpufreq core checks for the unlisted frequency and sets the CPU to a valid frequency from the frequency table. Signed-off-by: Mian Yousaf Kaukab [ Viresh: Massaged change log ] Signed-off-by: Viresh Kumar --- drivers/cpufreq/tegra186-cpufreq.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/cpufreq/tegra186-cpufreq.c b/drivers/cpufreq/tegra186-cpufreq.c index 2e233ad727589..3d2f143748ef8 100644 --- a/drivers/cpufreq/tegra186-cpufreq.c +++ b/drivers/cpufreq/tegra186-cpufreq.c @@ -93,7 +93,8 @@ static int tegra186_cpufreq_set_target(struct cpufreq_policy *policy, static struct cpufreq_driver tegra186_cpufreq_driver = { .name = "tegra186", - .flags = CPUFREQ_STICKY | CPUFREQ_HAVE_GOVERNOR_PER_POLICY, + .flags = CPUFREQ_STICKY | CPUFREQ_HAVE_GOVERNOR_PER_POLICY | + CPUFREQ_NEED_INITIAL_FREQ_CHECK, .verify = cpufreq_generic_frequency_table_verify, .target_index = tegra186_cpufreq_set_target, .init = tegra186_cpufreq_init, -- GitLab From ace342097768e35fd41934285604fa97da1e235a Mon Sep 17 00:00:00 2001 From: Geert Uytterhoeven Date: Thu, 7 May 2020 09:47:13 +0200 Subject: [PATCH 0515/1971] clk: renesas: cpg-mssr: Fix STBCR suspend/resume handling On SoCs with Standby Control Registers (STBCRs) instead of Module Stop Control Registers (MSTPCRs), the suspend handler saves the wrong registers, and the resume handler prints the wrong register in an error message. Fortunately this cannot happen yet, as the suspend/resume code is used on PSCI systems only, and systems with STBCRs (RZ/A1 and RZ/A2) do not use PSCI. Still, it is better to fix this, to avoid this becoming a problem in the future. Distinguish between STBCRs and MSTPCRs where needed. Replace the useless printing of the virtual register address in the resume error message by printing the register index. Fixes: fde35c9c7db5732c ("clk: renesas: cpg-mssr: Add R7S9210 support") Signed-off-by: Geert Uytterhoeven Link: https://lore.kernel.org/r/20200507074713.30113-1-geert+renesas@glider.be --- drivers/clk/renesas/renesas-cpg-mssr.c | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/drivers/clk/renesas/renesas-cpg-mssr.c b/drivers/clk/renesas/renesas-cpg-mssr.c index 8f6dff362869b..dcb6e2706d372 100644 --- a/drivers/clk/renesas/renesas-cpg-mssr.c +++ b/drivers/clk/renesas/renesas-cpg-mssr.c @@ -818,7 +818,8 @@ static int cpg_mssr_suspend_noirq(struct device *dev) /* Save module registers with bits under our control */ for (reg = 0; reg < ARRAY_SIZE(priv->smstpcr_saved); reg++) { if (priv->smstpcr_saved[reg].mask) - priv->smstpcr_saved[reg].val = + priv->smstpcr_saved[reg].val = priv->stbyctrl ? + readb(priv->base + STBCR(reg)) : readl(priv->base + SMSTPCR(reg)); } @@ -878,8 +879,9 @@ static int cpg_mssr_resume_noirq(struct device *dev) } if (!i) - dev_warn(dev, "Failed to enable SMSTP %p[0x%x]\n", - priv->base + SMSTPCR(reg), oldval & mask); + dev_warn(dev, "Failed to enable %s%u[0x%x]\n", + priv->stbyctrl ? "STB" : "SMSTP", reg, + oldval & mask); } return 0; -- GitLab From 59e7166fe0e2cb210252c3454a9241e586a2ba42 Mon Sep 17 00:00:00 2001 From: Geert Uytterhoeven Date: Thu, 7 May 2020 09:50:26 +0200 Subject: [PATCH 0516/1971] dt-bindings: clock: renesas: div6: Convert to json-schema MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Convert the Renesas CPG DIV6 Clock Device Tree binding documentation to json-schema. Drop R-Car Gen2 compatible values, which were obsoleted by the unified "Renesas Clock Pulse Generator / Module Standby and Software Reset" DT bindings. Update the example to match reality. Signed-off-by: Geert Uytterhoeven Reviewed-by: Stephen Boyd Reviewed-by: Rob Herring Reviewed-by: Niklas Söderlund Link: https://lore.kernel.org/r/20200507075026.31941-1-geert+renesas@glider.be --- .../clock/renesas,cpg-div6-clock.yaml | 60 +++++++++++++++++++ .../clock/renesas,cpg-div6-clocks.txt | 40 ------------- 2 files changed, 60 insertions(+), 40 deletions(-) create mode 100644 Documentation/devicetree/bindings/clock/renesas,cpg-div6-clock.yaml delete mode 100644 Documentation/devicetree/bindings/clock/renesas,cpg-div6-clocks.txt diff --git a/Documentation/devicetree/bindings/clock/renesas,cpg-div6-clock.yaml b/Documentation/devicetree/bindings/clock/renesas,cpg-div6-clock.yaml new file mode 100644 index 0000000000000..c55a7c494e013 --- /dev/null +++ b/Documentation/devicetree/bindings/clock/renesas,cpg-div6-clock.yaml @@ -0,0 +1,60 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/clock/renesas,cpg-div6-clock.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Renesas CPG DIV6 Clock + +maintainers: + - Geert Uytterhoeven + +description: + The CPG DIV6 clocks are variable factor clocks provided by the Clock Pulse + Generator (CPG). Their clock input is divided by a configurable factor from 1 + to 64. + +properties: + compatible: + items: + - enum: + - renesas,r8a73a4-div6-clock # R-Mobile APE6 + - renesas,r8a7740-div6-clock # R-Mobile A1 + - renesas,sh73a0-div6-clock # SH-Mobile AG5 + - const: renesas,cpg-div6-clock + + reg: + maxItems: 1 + + clocks: + oneOf: + - maxItems: 1 + - maxItems: 4 + - maxItems: 8 + description: + For clocks with multiple parents, invalid settings must be specified as + "<0>". + + '#clock-cells': + const: 0 + + clock-output-names: true + +required: + - compatible + - reg + - clocks + - '#clock-cells' + +additionalProperties: false + +examples: + - | + #include + sdhi2_clk: sdhi2_clk@e615007c { + compatible = "renesas,r8a73a4-div6-clock", "renesas,cpg-div6-clock"; + reg = <0xe615007c 4>; + clocks = <&pll1_div2_clk>, <&cpg_clocks R8A73A4_CLK_PLL2S>, <0>, + <&extal2_clk>; + #clock-cells = <0>; + }; diff --git a/Documentation/devicetree/bindings/clock/renesas,cpg-div6-clocks.txt b/Documentation/devicetree/bindings/clock/renesas,cpg-div6-clocks.txt deleted file mode 100644 index ae36ab8429198..0000000000000 --- a/Documentation/devicetree/bindings/clock/renesas,cpg-div6-clocks.txt +++ /dev/null @@ -1,40 +0,0 @@ -* Renesas CPG DIV6 Clock - -The CPG DIV6 clocks are variable factor clocks provided by the Clock Pulse -Generator (CPG). Their clock input is divided by a configurable factor from 1 -to 64. - -Required Properties: - - - compatible: Must be one of the following - - "renesas,r8a73a4-div6-clock" for R8A73A4 (R-Mobile APE6) DIV6 clocks - - "renesas,r8a7740-div6-clock" for R8A7740 (R-Mobile A1) DIV6 clocks - - "renesas,r8a7790-div6-clock" for R8A7790 (R-Car H2) DIV6 clocks - - "renesas,r8a7791-div6-clock" for R8A7791 (R-Car M2-W) DIV6 clocks - - "renesas,r8a7793-div6-clock" for R8A7793 (R-Car M2-N) DIV6 clocks - - "renesas,r8a7794-div6-clock" for R8A7794 (R-Car E2) DIV6 clocks - - "renesas,sh73a0-div6-clock" for SH73A0 (SH-Mobile AG5) DIV6 clocks - and "renesas,cpg-div6-clock" as a fallback. - - reg: Base address and length of the memory resource used by the DIV6 clock - - clocks: Reference to the parent clock(s); either one, four, or eight - clocks must be specified. For clocks with multiple parents, invalid - settings must be specified as "<0>". - - #clock-cells: Must be 0 - - -Optional Properties: - - - clock-output-names: The name of the clock as a free-form string - - -Example -------- - - sdhi2_clk: sdhi2_clk@e615007c { - compatible = "renesas,r8a73a4-div6-clock", "renesas,cpg-div6-clock"; - reg = <0 0xe615007c 0 4>; - clocks = <&pll1_div2_clk>, <&cpg_clocks R8A73A4_CLK_PLL2S>, - <0>, <&extal2_clk>; - #clock-cells = <0>; - clock-output-names = "sdhi2ck"; - }; -- GitLab From ca4faf543a33373bed3650812d5f0cd0bd295b1a Mon Sep 17 00:00:00 2001 From: Chuck Lever Date: Sat, 2 May 2020 10:37:44 -0400 Subject: [PATCH 0517/1971] SUNRPC: Move xpt_mutex into socket xpo_sendto methods It appears that the RPC/RDMA transport does not need serialization of calls to its xpo_sendto method. Move the mutex into the socket methods that still need that serialization. Tail latencies are unambiguously better with this patch applied. fio randrw 8KB 70/30 on NFSv3, smaller numbers are better: clat percentiles (usec): With xpt_mutex: r | 99.99th=[ 8848] w | 99.99th=[ 9634] Without xpt_mutex: r | 99.99th=[ 8586] w | 99.99th=[ 8979] Serializing the construction of RPC/RDMA transport headers is not really necessary at this point, because the Linux NFS server implementation never changes its credit grant on a connection. If that should change, then svc_rdma_sendto will need to serialize access to the transport's credit grant fields. Reported-by: kbuild test robot [ cel: fix uninitialized variable warning ] Signed-off-by: Chuck Lever --- include/linux/sunrpc/svc_xprt.h | 6 ++++ net/sunrpc/svc_xprt.c | 12 ++------ net/sunrpc/svcsock.c | 25 ++++++++++++++++ net/sunrpc/xprtrdma/svc_rdma_backchannel.c | 35 ++++++++++------------ net/sunrpc/xprtrdma/svc_rdma_sendto.c | 10 +++---- net/sunrpc/xprtsock.c | 12 ++++++-- 6 files changed, 64 insertions(+), 36 deletions(-) diff --git a/include/linux/sunrpc/svc_xprt.h b/include/linux/sunrpc/svc_xprt.h index 9e1e046de1761..aca35ab5cff24 100644 --- a/include/linux/sunrpc/svc_xprt.h +++ b/include/linux/sunrpc/svc_xprt.h @@ -117,6 +117,12 @@ static inline int register_xpt_user(struct svc_xprt *xpt, struct svc_xpt_user *u return 0; } +static inline bool svc_xprt_is_dead(const struct svc_xprt *xprt) +{ + return (test_bit(XPT_DEAD, &xprt->xpt_flags) != 0) || + (test_bit(XPT_CLOSE, &xprt->xpt_flags) != 0); +} + int svc_reg_xprt_class(struct svc_xprt_class *); void svc_unreg_xprt_class(struct svc_xprt_class *); void svc_xprt_init(struct net *, struct svc_xprt_class *, struct svc_xprt *, diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c index 2284ff038dadb..07cdbf7d57648 100644 --- a/net/sunrpc/svc_xprt.c +++ b/net/sunrpc/svc_xprt.c @@ -914,16 +914,10 @@ int svc_send(struct svc_rqst *rqstp) xb->page_len + xb->tail[0].iov_len; trace_svc_sendto(xb); - - /* Grab mutex to serialize outgoing data. */ - mutex_lock(&xprt->xpt_mutex); trace_svc_stats_latency(rqstp); - if (test_bit(XPT_DEAD, &xprt->xpt_flags) - || test_bit(XPT_CLOSE, &xprt->xpt_flags)) - len = -ENOTCONN; - else - len = xprt->xpt_ops->xpo_sendto(rqstp); - mutex_unlock(&xprt->xpt_mutex); + + len = xprt->xpt_ops->xpo_sendto(rqstp); + trace_svc_send(rqstp, len); svc_xprt_release(rqstp); diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c index 023514e392b31..3e7b6445e3172 100644 --- a/net/sunrpc/svcsock.c +++ b/net/sunrpc/svcsock.c @@ -506,6 +506,9 @@ out_free: * svc_udp_sendto - Send out a reply on a UDP socket * @rqstp: completed svc_rqst * + * xpt_mutex ensures @rqstp's whole message is written to the socket + * without interruption. + * * Returns the number of bytes sent, or a negative errno. */ static int svc_udp_sendto(struct svc_rqst *rqstp) @@ -531,6 +534,11 @@ static int svc_udp_sendto(struct svc_rqst *rqstp) svc_set_cmsg_data(rqstp, cmh); + mutex_lock(&xprt->xpt_mutex); + + if (svc_xprt_is_dead(xprt)) + goto out_notconn; + err = xprt_sock_sendmsg(svsk->sk_sock, &msg, xdr, 0, 0, &sent); xdr_free_bvec(xdr); if (err == -ECONNREFUSED) { @@ -538,9 +546,15 @@ static int svc_udp_sendto(struct svc_rqst *rqstp) err = xprt_sock_sendmsg(svsk->sk_sock, &msg, xdr, 0, 0, &sent); xdr_free_bvec(xdr); } + + mutex_unlock(&xprt->xpt_mutex); if (err < 0) return err; return sent; + +out_notconn: + mutex_unlock(&xprt->xpt_mutex); + return -ENOTCONN; } static int svc_udp_has_wspace(struct svc_xprt *xprt) @@ -1063,6 +1077,9 @@ err_noclose: * svc_tcp_sendto - Send out a reply on a TCP socket * @rqstp: completed svc_rqst * + * xpt_mutex ensures @rqstp's whole message is written to the socket + * without interruption. + * * Returns the number of bytes sent, or a negative errno. */ static int svc_tcp_sendto(struct svc_rqst *rqstp) @@ -1080,12 +1097,19 @@ static int svc_tcp_sendto(struct svc_rqst *rqstp) svc_release_skb(rqstp); + mutex_lock(&xprt->xpt_mutex); + if (svc_xprt_is_dead(xprt)) + goto out_notconn; err = xprt_sock_sendmsg(svsk->sk_sock, &msg, xdr, 0, marker, &sent); xdr_free_bvec(xdr); if (err < 0 || sent != (xdr->len + sizeof(marker))) goto out_close; + mutex_unlock(&xprt->xpt_mutex); return sent; +out_notconn: + mutex_unlock(&xprt->xpt_mutex); + return -ENOTCONN; out_close: pr_notice("rpc-srv/tcp: %s: %s %d when sending %d bytes - shutting down socket\n", xprt->xpt_server->sv_name, @@ -1093,6 +1117,7 @@ out_close: (err < 0) ? err : sent, xdr->len); set_bit(XPT_CLOSE, &xprt->xpt_flags); svc_xprt_enqueue(xprt); + mutex_unlock(&xprt->xpt_mutex); return -EAGAIN; } diff --git a/net/sunrpc/xprtrdma/svc_rdma_backchannel.c b/net/sunrpc/xprtrdma/svc_rdma_backchannel.c index af7eb8d202ae7..d9aab4504f2c5 100644 --- a/net/sunrpc/xprtrdma/svc_rdma_backchannel.c +++ b/net/sunrpc/xprtrdma/svc_rdma_backchannel.c @@ -210,34 +210,31 @@ drop_connection: return -ENOTCONN; } -/* Send an RPC call on the passive end of a transport - * connection. +/** + * xprt_rdma_bc_send_request - Send a reverse-direction Call + * @rqst: rpc_rqst containing Call message to be sent + * + * Return values: + * %0 if the message was sent successfully + * %ENOTCONN if the message was not sent */ -static int -xprt_rdma_bc_send_request(struct rpc_rqst *rqst) +static int xprt_rdma_bc_send_request(struct rpc_rqst *rqst) { struct svc_xprt *sxprt = rqst->rq_xprt->bc_xprt; - struct svcxprt_rdma *rdma; + struct svcxprt_rdma *rdma = + container_of(sxprt, struct svcxprt_rdma, sc_xprt); int ret; dprintk("svcrdma: sending bc call with xid: %08x\n", be32_to_cpu(rqst->rq_xid)); - mutex_lock(&sxprt->xpt_mutex); - - ret = -ENOTCONN; - rdma = container_of(sxprt, struct svcxprt_rdma, sc_xprt); - if (!test_bit(XPT_DEAD, &sxprt->xpt_flags)) { - ret = rpcrdma_bc_send_request(rdma, rqst); - if (ret == -ENOTCONN) - svc_close_xprt(sxprt); - } + if (test_bit(XPT_DEAD, &sxprt->xpt_flags)) + return -ENOTCONN; - mutex_unlock(&sxprt->xpt_mutex); - - if (ret < 0) - return ret; - return 0; + ret = rpcrdma_bc_send_request(rdma, rqst); + if (ret == -ENOTCONN) + svc_close_xprt(sxprt); + return ret; } static void diff --git a/net/sunrpc/xprtrdma/svc_rdma_sendto.c b/net/sunrpc/xprtrdma/svc_rdma_sendto.c index b6c8643867f2c..38e7c3c8c4a9c 100644 --- a/net/sunrpc/xprtrdma/svc_rdma_sendto.c +++ b/net/sunrpc/xprtrdma/svc_rdma_sendto.c @@ -868,12 +868,10 @@ int svc_rdma_sendto(struct svc_rqst *rqstp) __be32 *p; int ret; - /* Create the RDMA response header. xprt->xpt_mutex, - * acquired in svc_send(), serializes RPC replies. The - * code path below that inserts the credit grant value - * into each transport header runs only inside this - * critical section. - */ + ret = -ENOTCONN; + if (svc_xprt_is_dead(xprt)) + goto err0; + ret = -ENOMEM; sctxt = svc_rdma_send_ctxt_get(rdma); if (!sctxt) diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c index 845d0be805ece..839c493307854 100644 --- a/net/sunrpc/xprtsock.c +++ b/net/sunrpc/xprtsock.c @@ -2548,8 +2548,16 @@ static int bc_sendto(struct rpc_rqst *req) return sent; } -/* - * The send routine. Borrows from svc_send +/** + * bc_send_request - Send a backchannel Call on a TCP socket + * @req: rpc_rqst containing Call message to be sent + * + * xpt_mutex ensures @rqstp's whole message is written to the socket + * without interruption. + * + * Return values: + * %0 if the message was sent successfully + * %ENOTCONN if the message was not sent */ static int bc_send_request(struct rpc_rqst *req) { -- GitLab From 2abfbe7e3a725380f22f572c69da41515a0dd43b Mon Sep 17 00:00:00 2001 From: Chuck Lever Date: Fri, 20 Mar 2020 14:02:49 -0400 Subject: [PATCH 0518/1971] svcrdma: Clean up the tracing for rw_ctx_init errors - De-duplicate code - Rename the tracepoint with "_err" to allow enabling via glob - Report the sg_cnt for the failing rw_ctx - Fix a dumb signage issue Signed-off-by: Chuck Lever --- include/trace/events/rpcrdma.h | 12 ++++--- net/sunrpc/xprtrdma/svc_rdma_rw.c | 56 +++++++++++++++++++------------ 2 files changed, 43 insertions(+), 25 deletions(-) diff --git a/include/trace/events/rpcrdma.h b/include/trace/events/rpcrdma.h index 132c3c778a432..f231975064cb9 100644 --- a/include/trace/events/rpcrdma.h +++ b/include/trace/events/rpcrdma.h @@ -1583,28 +1583,32 @@ DECLARE_EVENT_CLASS(svcrdma_dma_map_class, DEFINE_SVC_DMA_EVENT(dma_map_page); DEFINE_SVC_DMA_EVENT(dma_unmap_page); -TRACE_EVENT(svcrdma_dma_map_rwctx, +TRACE_EVENT(svcrdma_dma_map_rw_err, TP_PROTO( const struct svcxprt_rdma *rdma, + unsigned int nents, int status ), - TP_ARGS(rdma, status), + TP_ARGS(rdma, nents, status), TP_STRUCT__entry( __field(int, status) + __field(unsigned int, nents) __string(device, rdma->sc_cm_id->device->name) __string(addr, rdma->sc_xprt.xpt_remotebuf) ), TP_fast_assign( __entry->status = status; + __entry->nents = nents; __assign_str(device, rdma->sc_cm_id->device->name); __assign_str(addr, rdma->sc_xprt.xpt_remotebuf); ), - TP_printk("addr=%s device=%s status=%d", - __get_str(addr), __get_str(device), __entry->status + TP_printk("addr=%s device=%s nents=%u status=%d", + __get_str(addr), __get_str(device), __entry->nents, + __entry->status ) ); diff --git a/net/sunrpc/xprtrdma/svc_rdma_rw.c b/net/sunrpc/xprtrdma/svc_rdma_rw.c index 23c2d3ce0dc9a..db70709e165ac 100644 --- a/net/sunrpc/xprtrdma/svc_rdma_rw.c +++ b/net/sunrpc/xprtrdma/svc_rdma_rw.c @@ -39,7 +39,7 @@ static void svc_rdma_wc_read_done(struct ib_cq *cq, struct ib_wc *wc); struct svc_rdma_rw_ctxt { struct list_head rw_list; struct rdma_rw_ctx rw_ctx; - int rw_nents; + unsigned int rw_nents; struct sg_table rw_sg_table; struct scatterlist rw_first_sgl[]; }; @@ -107,6 +107,34 @@ void svc_rdma_destroy_rw_ctxts(struct svcxprt_rdma *rdma) } } +/** + * svc_rdma_rw_ctx_init - Prepare a R/W context for I/O + * @rdma: controlling transport instance + * @ctxt: R/W context to prepare + * @offset: RDMA offset + * @handle: RDMA tag/handle + * @direction: I/O direction + * + * Returns on success, the number of WQEs that will be needed + * on the workqueue, or a negative errno. + */ +static int svc_rdma_rw_ctx_init(struct svcxprt_rdma *rdma, + struct svc_rdma_rw_ctxt *ctxt, + u64 offset, u32 handle, + enum dma_data_direction direction) +{ + int ret; + + ret = rdma_rw_ctx_init(&ctxt->rw_ctx, rdma->sc_qp, rdma->sc_port_num, + ctxt->rw_sg_table.sgl, ctxt->rw_nents, + 0, offset, handle, direction); + if (unlikely(ret < 0)) { + svc_rdma_put_rw_ctxt(rdma, ctxt); + trace_svcrdma_dma_map_rw_err(rdma, ctxt->rw_nents, ret); + } + return ret; +} + /* A chunk context tracks all I/O for moving one Read or Write * chunk. This is a a set of rdma_rw's that handle data movement * for all segments of one chunk. @@ -431,12 +459,10 @@ svc_rdma_build_writes(struct svc_rdma_write_info *info, goto out_noctx; constructor(info, write_len, ctxt); - ret = rdma_rw_ctx_init(&ctxt->rw_ctx, rdma->sc_qp, - rdma->sc_port_num, ctxt->rw_sg_table.sgl, - ctxt->rw_nents, 0, seg_offset, - seg_handle, DMA_TO_DEVICE); + ret = svc_rdma_rw_ctx_init(rdma, ctxt, seg_offset, seg_handle, + DMA_TO_DEVICE); if (ret < 0) - goto out_initerr; + return -EIO; trace_svcrdma_send_wseg(seg_handle, write_len, seg_offset); @@ -462,11 +488,6 @@ out_overflow: out_noctx: dprintk("svcrdma: no R/W ctxs available\n"); return -ENOMEM; - -out_initerr: - svc_rdma_put_rw_ctxt(rdma, ctxt); - trace_svcrdma_dma_map_rwctx(rdma, ret); - return -EIO; } /* Send one of an xdr_buf's kvecs by itself. To send a Reply @@ -646,12 +667,10 @@ static int svc_rdma_build_read_segment(struct svc_rdma_read_info *info, goto out_overrun; } - ret = rdma_rw_ctx_init(&ctxt->rw_ctx, cc->cc_rdma->sc_qp, - cc->cc_rdma->sc_port_num, - ctxt->rw_sg_table.sgl, ctxt->rw_nents, - 0, offset, rkey, DMA_FROM_DEVICE); + ret = svc_rdma_rw_ctx_init(cc->cc_rdma, ctxt, offset, rkey, + DMA_FROM_DEVICE); if (ret < 0) - goto out_initerr; + return -EIO; list_add(&ctxt->rw_list, &cc->cc_rwctxts); cc->cc_sqecount += ret; @@ -664,11 +683,6 @@ out_noctx: out_overrun: dprintk("svcrdma: request overruns rq_pages\n"); return -EINVAL; - -out_initerr: - trace_svcrdma_dma_map_rwctx(cc->cc_rdma, ret); - svc_rdma_put_rw_ctxt(cc->cc_rdma, ctxt); - return -EIO; } /* Walk the segments in the Read chunk starting at @p and construct -- GitLab From f4e53e1ce3e56a799c47fef5f4f94cb815b52804 Mon Sep 17 00:00:00 2001 From: Chuck Lever Date: Fri, 20 Mar 2020 14:23:13 -0400 Subject: [PATCH 0519/1971] svcrdma: Clean up handling of get_rw_ctx errors Clean up: Replace two dprintk call sites with a tracepoint. Signed-off-by: Chuck Lever --- include/trace/events/rpcrdma.h | 25 +++++++++++++++++++++++++ net/sunrpc/xprtrdma/svc_rdma_rw.c | 27 +++++++++++---------------- 2 files changed, 36 insertions(+), 16 deletions(-) diff --git a/include/trace/events/rpcrdma.h b/include/trace/events/rpcrdma.h index f231975064cb9..aca9d0f3d7690 100644 --- a/include/trace/events/rpcrdma.h +++ b/include/trace/events/rpcrdma.h @@ -1612,6 +1612,31 @@ TRACE_EVENT(svcrdma_dma_map_rw_err, ) ); +TRACE_EVENT(svcrdma_no_rwctx_err, + TP_PROTO( + const struct svcxprt_rdma *rdma, + unsigned int num_sges + ), + + TP_ARGS(rdma, num_sges), + + TP_STRUCT__entry( + __field(unsigned int, num_sges) + __string(device, rdma->sc_cm_id->device->name) + __string(addr, rdma->sc_xprt.xpt_remotebuf) + ), + + TP_fast_assign( + __entry->num_sges = num_sges; + __assign_str(device, rdma->sc_cm_id->device->name); + __assign_str(addr, rdma->sc_xprt.xpt_remotebuf); + ), + + TP_printk("addr=%s device=%s num_sges=%d", + __get_str(addr), __get_str(device), __entry->num_sges + ) +); + TRACE_EVENT(svcrdma_send_pullup, TP_PROTO( unsigned int len diff --git a/net/sunrpc/xprtrdma/svc_rdma_rw.c b/net/sunrpc/xprtrdma/svc_rdma_rw.c index db70709e165ac..c2d49f607cfe8 100644 --- a/net/sunrpc/xprtrdma/svc_rdma_rw.c +++ b/net/sunrpc/xprtrdma/svc_rdma_rw.c @@ -67,19 +67,22 @@ svc_rdma_get_rw_ctxt(struct svcxprt_rdma *rdma, unsigned int sges) ctxt = kmalloc(struct_size(ctxt, rw_first_sgl, SG_CHUNK_SIZE), GFP_KERNEL); if (!ctxt) - goto out; + goto out_noctx; INIT_LIST_HEAD(&ctxt->rw_list); } ctxt->rw_sg_table.sgl = ctxt->rw_first_sgl; if (sg_alloc_table_chained(&ctxt->rw_sg_table, sges, ctxt->rw_sg_table.sgl, - SG_CHUNK_SIZE)) { - kfree(ctxt); - ctxt = NULL; - } -out: + SG_CHUNK_SIZE)) + goto out_free; return ctxt; + +out_free: + kfree(ctxt); +out_noctx: + trace_svcrdma_no_rwctx_err(rdma, sges); + return NULL; } static void svc_rdma_put_rw_ctxt(struct svcxprt_rdma *rdma, @@ -456,7 +459,7 @@ svc_rdma_build_writes(struct svc_rdma_write_info *info, ctxt = svc_rdma_get_rw_ctxt(rdma, (write_len >> PAGE_SHIFT) + 2); if (!ctxt) - goto out_noctx; + return -ENOMEM; constructor(info, write_len, ctxt); ret = svc_rdma_rw_ctx_init(rdma, ctxt, seg_offset, seg_handle, @@ -484,10 +487,6 @@ out_overflow: dprintk("svcrdma: inadequate space in Write chunk (%u)\n", info->wi_nsegs); return -E2BIG; - -out_noctx: - dprintk("svcrdma: no R/W ctxs available\n"); - return -ENOMEM; } /* Send one of an xdr_buf's kvecs by itself. To send a Reply @@ -637,7 +636,7 @@ static int svc_rdma_build_read_segment(struct svc_rdma_read_info *info, sge_no = PAGE_ALIGN(info->ri_pageoff + len) >> PAGE_SHIFT; ctxt = svc_rdma_get_rw_ctxt(cc->cc_rdma, sge_no); if (!ctxt) - goto out_noctx; + return -ENOMEM; ctxt->rw_nents = sge_no; sg = ctxt->rw_sg_table.sgl; @@ -676,10 +675,6 @@ static int svc_rdma_build_read_segment(struct svc_rdma_read_info *info, cc->cc_sqecount += ret; return 0; -out_noctx: - dprintk("svcrdma: no R/W ctxs available\n"); - return -ENOMEM; - out_overrun: dprintk("svcrdma: request overruns rq_pages\n"); return -EINVAL; -- GitLab From 9d20063892624a98bab093725fae6999cfcb328e Mon Sep 17 00:00:00 2001 From: Chuck Lever Date: Fri, 20 Mar 2020 14:33:05 -0400 Subject: [PATCH 0520/1971] svcrdma: Trace page overruns when constructing RDMA Reads Clean up: Replace a dprintk call site with a tracepoint. Signed-off-by: Chuck Lever --- include/trace/events/rpcrdma.h | 28 ++++++++++++++++++++++++++++ net/sunrpc/xprtrdma/svc_rdma_rw.c | 2 +- 2 files changed, 29 insertions(+), 1 deletion(-) diff --git a/include/trace/events/rpcrdma.h b/include/trace/events/rpcrdma.h index aca9d0f3d7690..d6da6b8d521de 100644 --- a/include/trace/events/rpcrdma.h +++ b/include/trace/events/rpcrdma.h @@ -1637,6 +1637,34 @@ TRACE_EVENT(svcrdma_no_rwctx_err, ) ); +TRACE_EVENT(svcrdma_page_overrun_err, + TP_PROTO( + const struct svcxprt_rdma *rdma, + const struct svc_rqst *rqst, + unsigned int pageno + ), + + TP_ARGS(rdma, rqst, pageno), + + TP_STRUCT__entry( + __field(unsigned int, pageno) + __field(u32, xid) + __string(device, rdma->sc_cm_id->device->name) + __string(addr, rdma->sc_xprt.xpt_remotebuf) + ), + + TP_fast_assign( + __entry->pageno = pageno; + __entry->xid = __be32_to_cpu(rqst->rq_xid); + __assign_str(device, rdma->sc_cm_id->device->name); + __assign_str(addr, rdma->sc_xprt.xpt_remotebuf); + ), + + TP_printk("addr=%s device=%s xid=0x%08x pageno=%u", __get_str(addr), + __get_str(device), __entry->xid, __entry->pageno + ) +); + TRACE_EVENT(svcrdma_send_pullup, TP_PROTO( unsigned int len diff --git a/net/sunrpc/xprtrdma/svc_rdma_rw.c b/net/sunrpc/xprtrdma/svc_rdma_rw.c index c2d49f607cfe8..17098a11d2ade 100644 --- a/net/sunrpc/xprtrdma/svc_rdma_rw.c +++ b/net/sunrpc/xprtrdma/svc_rdma_rw.c @@ -676,7 +676,7 @@ static int svc_rdma_build_read_segment(struct svc_rdma_read_info *info, return 0; out_overrun: - dprintk("svcrdma: request overruns rq_pages\n"); + trace_svcrdma_page_overrun_err(cc->cc_rdma, rqstp, info->ri_pageno); return -EINVAL; } -- GitLab From dbc17acd5d42be457c7311c141f993d9ba5be014 Mon Sep 17 00:00:00 2001 From: Chuck Lever Date: Fri, 20 Mar 2020 14:36:44 -0400 Subject: [PATCH 0521/1971] svcrdma: trace undersized Write chunks Clean up: Replace a dprintk call site. This is the last remaining dprintk call site in svc_rdma_rw.c, so remove dprintk infrastructure as well. Signed-off-by: Chuck Lever --- include/trace/events/rpcrdma.h | 32 +++++++++++++++++++++++++++++++ net/sunrpc/xprtrdma/svc_rdma_rw.c | 7 ++----- 2 files changed, 34 insertions(+), 5 deletions(-) diff --git a/include/trace/events/rpcrdma.h b/include/trace/events/rpcrdma.h index d6da6b8d521de..c046b198072ad 100644 --- a/include/trace/events/rpcrdma.h +++ b/include/trace/events/rpcrdma.h @@ -1665,6 +1665,38 @@ TRACE_EVENT(svcrdma_page_overrun_err, ) ); +TRACE_EVENT(svcrdma_small_wrch_err, + TP_PROTO( + const struct svcxprt_rdma *rdma, + unsigned int remaining, + unsigned int seg_no, + unsigned int num_segs + ), + + TP_ARGS(rdma, remaining, seg_no, num_segs), + + TP_STRUCT__entry( + __field(unsigned int, remaining) + __field(unsigned int, seg_no) + __field(unsigned int, num_segs) + __string(device, rdma->sc_cm_id->device->name) + __string(addr, rdma->sc_xprt.xpt_remotebuf) + ), + + TP_fast_assign( + __entry->remaining = remaining; + __entry->seg_no = seg_no; + __entry->num_segs = num_segs; + __assign_str(device, rdma->sc_cm_id->device->name); + __assign_str(addr, rdma->sc_xprt.xpt_remotebuf); + ), + + TP_printk("addr=%s device=%s remaining=%u seg_no=%u num_segs=%u", + __get_str(addr), __get_str(device), __entry->remaining, + __entry->seg_no, __entry->num_segs + ) +); + TRACE_EVENT(svcrdma_send_pullup, TP_PROTO( unsigned int len diff --git a/net/sunrpc/xprtrdma/svc_rdma_rw.c b/net/sunrpc/xprtrdma/svc_rdma_rw.c index 17098a11d2ade..5eb35309ecef1 100644 --- a/net/sunrpc/xprtrdma/svc_rdma_rw.c +++ b/net/sunrpc/xprtrdma/svc_rdma_rw.c @@ -9,13 +9,10 @@ #include #include -#include #include "xprt_rdma.h" #include -#define RPCDBG_FACILITY RPCDBG_SVCXPRT - static void svc_rdma_write_done(struct ib_cq *cq, struct ib_wc *wc); static void svc_rdma_wc_read_done(struct ib_cq *cq, struct ib_wc *wc); @@ -484,8 +481,8 @@ svc_rdma_build_writes(struct svc_rdma_write_info *info, return 0; out_overflow: - dprintk("svcrdma: inadequate space in Write chunk (%u)\n", - info->wi_nsegs); + trace_svcrdma_small_wrch_err(rdma, remaining, info->wi_seg_no, + info->wi_nsegs); return -E2BIG; } -- GitLab From ea740bd5f58e2912e74f401fd01a9d6aa985ca05 Mon Sep 17 00:00:00 2001 From: Chuck Lever Date: Fri, 20 Mar 2020 17:32:41 -0400 Subject: [PATCH 0522/1971] svcrdma: Fix backchannel return code Way back when I was writing the RPC/RDMA server-side backchannel code, I misread the TCP backchannel reply handler logic. When svc_tcp_recvfrom() successfully receives a backchannel reply, it does not return -EAGAIN. It sets XPT_DATA and returns zero. Update svc_rdma_recvfrom() to return zero. Here, XPT_DATA doesn't need to be set again: it is set whenever a new message is received, behind a spin lock in a single threaded context. Also, if handling the cb reply is not successful, the message is simply dropped. There's no special message framing to deal with as there is in the TCP case. Now that the handle_bc_reply() return value is ignored, I've removed the dprintk call sites in the error exit of handle_bc_reply() in favor of trace points in other areas that already report the error cases. Signed-off-by: Chuck Lever --- include/linux/sunrpc/svc_rdma.h | 5 ++- net/sunrpc/xprtrdma/svc_rdma_backchannel.c | 38 ++++++---------------- net/sunrpc/xprtrdma/svc_rdma_recvfrom.c | 11 +++---- 3 files changed, 17 insertions(+), 37 deletions(-) diff --git a/include/linux/sunrpc/svc_rdma.h b/include/linux/sunrpc/svc_rdma.h index cbcfbd0521e37..8518c3f37e560 100644 --- a/include/linux/sunrpc/svc_rdma.h +++ b/include/linux/sunrpc/svc_rdma.h @@ -160,9 +160,8 @@ struct svc_rdma_send_ctxt { }; /* svc_rdma_backchannel.c */ -extern int svc_rdma_handle_bc_reply(struct rpc_xprt *xprt, - __be32 *rdma_resp, - struct xdr_buf *rcvbuf); +extern void svc_rdma_handle_bc_reply(struct svc_rqst *rqstp, + struct svc_rdma_recv_ctxt *rctxt); /* svc_rdma_recvfrom.c */ extern void svc_rdma_recv_ctxts_destroy(struct svcxprt_rdma *rdma); diff --git a/net/sunrpc/xprtrdma/svc_rdma_backchannel.c b/net/sunrpc/xprtrdma/svc_rdma_backchannel.c index d9aab4504f2c5..b174f3b109a59 100644 --- a/net/sunrpc/xprtrdma/svc_rdma_backchannel.c +++ b/net/sunrpc/xprtrdma/svc_rdma_backchannel.c @@ -15,26 +15,25 @@ #undef SVCRDMA_BACKCHANNEL_DEBUG /** - * svc_rdma_handle_bc_reply - Process incoming backchannel reply - * @xprt: controlling backchannel transport - * @rdma_resp: pointer to incoming transport header - * @rcvbuf: XDR buffer into which to decode the reply + * svc_rdma_handle_bc_reply - Process incoming backchannel Reply + * @rqstp: resources for handling the Reply + * @rctxt: Received message * - * Returns: - * %0 if @rcvbuf is filled in, xprt_complete_rqst called, - * %-EAGAIN if server should call ->recvfrom again. */ -int svc_rdma_handle_bc_reply(struct rpc_xprt *xprt, __be32 *rdma_resp, - struct xdr_buf *rcvbuf) +void svc_rdma_handle_bc_reply(struct svc_rqst *rqstp, + struct svc_rdma_recv_ctxt *rctxt) { + struct svc_xprt *sxprt = rqstp->rq_xprt; + struct rpc_xprt *xprt = sxprt->xpt_bc_xprt; struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt); + struct xdr_buf *rcvbuf = &rqstp->rq_arg; struct kvec *dst, *src = &rcvbuf->head[0]; + __be32 *rdma_resp = rctxt->rc_recv_buf; struct rpc_rqst *req; u32 credits; size_t len; __be32 xid; __be32 *p; - int ret; p = (__be32 *)src->iov_base; len = src->iov_len; @@ -49,14 +48,10 @@ int svc_rdma_handle_bc_reply(struct rpc_xprt *xprt, __be32 *rdma_resp, __func__, (int)len, p); #endif - ret = -EAGAIN; - if (src->iov_len < 24) - goto out_shortreply; - spin_lock(&xprt->queue_lock); req = xprt_lookup_rqst(xprt, xid); if (!req) - goto out_notfound; + goto out_unlock; dst = &req->rq_private_buf.head[0]; memcpy(&req->rq_private_buf, &req->rq_rcv_buf, sizeof(struct xdr_buf)); @@ -77,25 +72,12 @@ int svc_rdma_handle_bc_reply(struct rpc_xprt *xprt, __be32 *rdma_resp, spin_unlock(&xprt->transport_lock); spin_lock(&xprt->queue_lock); - ret = 0; xprt_complete_rqst(req->rq_task, rcvbuf->len); xprt_unpin_rqst(req); rcvbuf->len = 0; out_unlock: spin_unlock(&xprt->queue_lock); -out: - return ret; - -out_shortreply: - dprintk("svcrdma: short bc reply: xprt=%p, len=%zu\n", - xprt, src->iov_len); - goto out; - -out_notfound: - dprintk("svcrdma: unrecognized bc reply: xprt=%p, xid=%08x\n", - xprt, be32_to_cpu(xid)); - goto out_unlock; } /* Send a backwards direction RPC call. diff --git a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c index efa5fcb5793f7..eee7c6478b30a 100644 --- a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c +++ b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c @@ -878,12 +878,9 @@ int svc_rdma_recvfrom(struct svc_rqst *rqstp) goto out_drop; rqstp->rq_xprt_hlen = ret; - if (svc_rdma_is_backchannel_reply(xprt, p)) { - ret = svc_rdma_handle_bc_reply(xprt->xpt_bc_xprt, p, - &rqstp->rq_arg); - svc_rdma_recv_ctxt_put(rdma_xprt, ctxt); - return ret; - } + if (svc_rdma_is_backchannel_reply(xprt, p)) + goto out_backchannel; + svc_rdma_get_inv_rkey(rdma_xprt, ctxt); p += rpcrdma_fixed_maxsz; @@ -913,6 +910,8 @@ out_postfail: svc_rdma_recv_ctxt_put(rdma_xprt, ctxt); return ret; +out_backchannel: + svc_rdma_handle_bc_reply(rqstp, ctxt); out_drop: svc_rdma_recv_ctxt_put(rdma_xprt, ctxt); return 0; -- GitLab From f5046b8f43d180d6f4c9129a750b98f9a503de2d Mon Sep 17 00:00:00 2001 From: Chuck Lever Date: Fri, 20 Mar 2020 15:11:56 -0400 Subject: [PATCH 0523/1971] svcrdma: Remove backchannel dprintk call sites Clean up. Signed-off-by: Chuck Lever --- net/sunrpc/xprtrdma/svc_rdma_backchannel.c | 48 +++------------------- 1 file changed, 5 insertions(+), 43 deletions(-) diff --git a/net/sunrpc/xprtrdma/svc_rdma_backchannel.c b/net/sunrpc/xprtrdma/svc_rdma_backchannel.c index b174f3b109a59..1ee73f7cf9315 100644 --- a/net/sunrpc/xprtrdma/svc_rdma_backchannel.c +++ b/net/sunrpc/xprtrdma/svc_rdma_backchannel.c @@ -10,10 +10,6 @@ #include "xprt_rdma.h" #include -#define RPCDBG_FACILITY RPCDBG_SVCXPRT - -#undef SVCRDMA_BACKCHANNEL_DEBUG - /** * svc_rdma_handle_bc_reply - Process incoming backchannel Reply * @rqstp: resources for handling the Reply @@ -31,33 +27,17 @@ void svc_rdma_handle_bc_reply(struct svc_rqst *rqstp, __be32 *rdma_resp = rctxt->rc_recv_buf; struct rpc_rqst *req; u32 credits; - size_t len; - __be32 xid; - __be32 *p; - - p = (__be32 *)src->iov_base; - len = src->iov_len; - xid = *rdma_resp; - -#ifdef SVCRDMA_BACKCHANNEL_DEBUG - pr_info("%s: xid=%08x, length=%zu\n", - __func__, be32_to_cpu(xid), len); - pr_info("%s: RPC/RDMA: %*ph\n", - __func__, (int)RPCRDMA_HDRLEN_MIN, rdma_resp); - pr_info("%s: RPC: %*ph\n", - __func__, (int)len, p); -#endif spin_lock(&xprt->queue_lock); - req = xprt_lookup_rqst(xprt, xid); + req = xprt_lookup_rqst(xprt, *rdma_resp); if (!req) goto out_unlock; dst = &req->rq_private_buf.head[0]; memcpy(&req->rq_private_buf, &req->rq_rcv_buf, sizeof(struct xdr_buf)); - if (dst->iov_len < len) + if (dst->iov_len < src->iov_len) goto out_unlock; - memcpy(dst->iov_base, p, len); + memcpy(dst->iov_base, src->iov_base, src->iov_len); xprt_pin_rqst(req); spin_unlock(&xprt->queue_lock); @@ -66,7 +46,6 @@ void svc_rdma_handle_bc_reply(struct svc_rqst *rqstp, credits = 1; /* don't deadlock */ else if (credits > r_xprt->rx_buf.rb_bc_max_requests) credits = r_xprt->rx_buf.rb_bc_max_requests; - spin_lock(&xprt->transport_lock); xprt->cwnd = credits << RPC_CWNDSHIFT; spin_unlock(&xprt->transport_lock); @@ -174,10 +153,6 @@ rpcrdma_bc_send_request(struct svcxprt_rdma *rdma, struct rpc_rqst *rqst) *p++ = xdr_zero; *p = xdr_zero; -#ifdef SVCRDMA_BACKCHANNEL_DEBUG - pr_info("%s: %*ph\n", __func__, 64, rqst->rq_buffer); -#endif - rqst->rq_xtime = ktime_get(); rc = svc_rdma_bc_sendto(rdma, rqst, ctxt); if (rc) @@ -188,7 +163,6 @@ put_ctxt: svc_rdma_send_ctxt_put(rdma, ctxt); drop_connection: - dprintk("svcrdma: failed to send bc call\n"); return -ENOTCONN; } @@ -207,9 +181,6 @@ static int xprt_rdma_bc_send_request(struct rpc_rqst *rqst) container_of(sxprt, struct svcxprt_rdma, sc_xprt); int ret; - dprintk("svcrdma: sending bc call with xid: %08x\n", - be32_to_cpu(rqst->rq_xid)); - if (test_bit(XPT_DEAD, &sxprt->xpt_flags)) return -ENOTCONN; @@ -222,8 +193,6 @@ static int xprt_rdma_bc_send_request(struct rpc_rqst *rqst) static void xprt_rdma_bc_close(struct rpc_xprt *xprt) { - dprintk("svcrdma: %s: xprt %p\n", __func__, xprt); - xprt_disconnect_done(xprt); xprt->cwnd = RPC_CWNDSHIFT; } @@ -231,8 +200,6 @@ xprt_rdma_bc_close(struct rpc_xprt *xprt) static void xprt_rdma_bc_put(struct rpc_xprt *xprt) { - dprintk("svcrdma: %s: xprt %p\n", __func__, xprt); - xprt_rdma_free_addresses(xprt); xprt_free(xprt); } @@ -267,19 +234,14 @@ xprt_setup_rdma_bc(struct xprt_create *args) struct rpc_xprt *xprt; struct rpcrdma_xprt *new_xprt; - if (args->addrlen > sizeof(xprt->addr)) { - dprintk("RPC: %s: address too large\n", __func__); + if (args->addrlen > sizeof(xprt->addr)) return ERR_PTR(-EBADF); - } xprt = xprt_alloc(args->net, sizeof(*new_xprt), RPCRDMA_MAX_BC_REQUESTS, RPCRDMA_MAX_BC_REQUESTS); - if (!xprt) { - dprintk("RPC: %s: couldn't allocate rpc_xprt\n", - __func__); + if (!xprt) return ERR_PTR(-ENOMEM); - } xprt->timeout = &xprt_rdma_bc_timeout; xprt_set_bound(xprt); -- GitLab From 27ce6294445aebe314ff3baefea7c720b50fcc8a Mon Sep 17 00:00:00 2001 From: Chuck Lever Date: Mon, 23 Mar 2020 16:33:16 -0400 Subject: [PATCH 0524/1971] svcrdma: Rename tracepoints that record header decoding errors Clean up: Use a consistent naming convention so that these trace points can be enabled quickly via a glob. Signed-off-by: Chuck Lever --- include/trace/events/rpcrdma.h | 5 +++-- net/sunrpc/xprtrdma/svc_rdma_recvfrom.c | 10 +++++----- 2 files changed, 8 insertions(+), 7 deletions(-) diff --git a/include/trace/events/rpcrdma.h b/include/trace/events/rpcrdma.h index c046b198072ad..53b24c8c78601 100644 --- a/include/trace/events/rpcrdma.h +++ b/include/trace/events/rpcrdma.h @@ -1355,7 +1355,7 @@ TRACE_EVENT(svcrdma_decode_rqst, show_rpcrdma_proc(__entry->proc), __entry->hdrlen) ); -TRACE_EVENT(svcrdma_decode_short, +TRACE_EVENT(svcrdma_decode_short_err, TP_PROTO( unsigned int hdrlen ), @@ -1399,7 +1399,8 @@ DECLARE_EVENT_CLASS(svcrdma_badreq_event, ); #define DEFINE_BADREQ_EVENT(name) \ - DEFINE_EVENT(svcrdma_badreq_event, svcrdma_decode_##name,\ + DEFINE_EVENT(svcrdma_badreq_event, \ + svcrdma_decode_##name##_err, \ TP_PROTO( \ __be32 *p \ ), \ diff --git a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c index eee7c6478b30a..e426fedb9524f 100644 --- a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c +++ b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c @@ -665,23 +665,23 @@ static int svc_rdma_xdr_decode_req(struct xdr_buf *rq_arg, return hdr_len; out_short: - trace_svcrdma_decode_short(rq_arg->len); + trace_svcrdma_decode_short_err(rq_arg->len); return -EINVAL; out_version: - trace_svcrdma_decode_badvers(rdma_argp); + trace_svcrdma_decode_badvers_err(rdma_argp); return -EPROTONOSUPPORT; out_drop: - trace_svcrdma_decode_drop(rdma_argp); + trace_svcrdma_decode_drop_err(rdma_argp); return 0; out_proc: - trace_svcrdma_decode_badproc(rdma_argp); + trace_svcrdma_decode_badproc_err(rdma_argp); return -EINVAL; out_inval: - trace_svcrdma_decode_parse(rdma_argp); + trace_svcrdma_decode_parse_err(rdma_argp); return -EINVAL; } -- GitLab From 08e3c9f181bfb78d842c4f432888116d66e07c2f Mon Sep 17 00:00:00 2001 From: Chuck Lever Date: Wed, 29 Apr 2020 16:00:12 -0400 Subject: [PATCH 0525/1971] svcrdma: Remove the SVCRDMA_DEBUG macro Clean up: Commit d21b05f101ae ("rdma: SVCRMDA Header File") introduced the SVCRDMA_DEBUG macro, but it doesn't seem to have been used. Signed-off-by: Chuck Lever --- include/linux/sunrpc/svc_rdma.h | 1 - 1 file changed, 1 deletion(-) diff --git a/include/linux/sunrpc/svc_rdma.h b/include/linux/sunrpc/svc_rdma.h index 8518c3f37e560..7ed82625dc0be 100644 --- a/include/linux/sunrpc/svc_rdma.h +++ b/include/linux/sunrpc/svc_rdma.h @@ -48,7 +48,6 @@ #include #include #include -#define SVCRDMA_DEBUG /* Default and maximum inline threshold sizes */ enum { -- GitLab From decc13f7eb258b8c5c564ec1f11a24f14af275f8 Mon Sep 17 00:00:00 2001 From: Chuck Lever Date: Sat, 2 May 2020 10:42:47 -0400 Subject: [PATCH 0526/1971] svcrdma: Displayed remote IP address should match stored address Clean up: After commit 1e091c3bbf51 ("svcrdma: Ignore source port when computing DRC hash"), the IP address stored in xpt_remote always has a port number of zero. Thus, there's no need to display the port number when displaying the IP address of a remote NFS/RDMA client. Signed-off-by: Chuck Lever --- net/sunrpc/xprtrdma/svc_rdma_transport.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/net/sunrpc/xprtrdma/svc_rdma_transport.c b/net/sunrpc/xprtrdma/svc_rdma_transport.c index ea54785db4f8f..0a1125277a488 100644 --- a/net/sunrpc/xprtrdma/svc_rdma_transport.c +++ b/net/sunrpc/xprtrdma/svc_rdma_transport.c @@ -211,7 +211,12 @@ static void handle_connect_req(struct rdma_cm_id *new_cma_id, newxprt->sc_ord = param->initiator_depth; sa = (struct sockaddr *)&newxprt->sc_cm_id->route.addr.dst_addr; - svc_xprt_set_remote(&newxprt->sc_xprt, sa, svc_addr_len(sa)); + newxprt->sc_xprt.xpt_remotelen = svc_addr_len(sa); + memcpy(&newxprt->sc_xprt.xpt_remote, sa, + newxprt->sc_xprt.xpt_remotelen); + snprintf(newxprt->sc_xprt.xpt_remotebuf, + sizeof(newxprt->sc_xprt.xpt_remotebuf) - 1, "%pISc", sa); + /* The remote port is arbitrary and not under the control of the * client ULP. Set it to a fixed value so that the DRC continues * to be effective after a reconnect. -- GitLab From e979a173a0b8dc1a41bc3194e34b92d79c049ad3 Mon Sep 17 00:00:00 2001 From: Chuck Lever Date: Thu, 30 Apr 2020 15:25:01 -0400 Subject: [PATCH 0527/1971] svcrdma: Add tracepoints to report ->xpo_accept failures Failure to accept a connection is typically due to a problem specific to a transport type. Also, ->xpo_accept returns NULL on error rather than reporting a specific problem. So, add failure-specific tracepoints in svc_rdma_accept(). Signed-off-by: Chuck Lever --- include/trace/events/rpcrdma.h | 38 +++++++++++++++++++++++- net/sunrpc/xprtrdma/svc_rdma_transport.c | 27 +++++++---------- 2 files changed, 48 insertions(+), 17 deletions(-) diff --git a/include/trace/events/rpcrdma.h b/include/trace/events/rpcrdma.h index 53b24c8c78601..79ef2ab7743cc 100644 --- a/include/trace/events/rpcrdma.h +++ b/include/trace/events/rpcrdma.h @@ -1309,9 +1309,45 @@ DECLARE_EVENT_CLASS(svcrdma_xprt_event, TP_ARGS(xprt)) DEFINE_XPRT_EVENT(accept); -DEFINE_XPRT_EVENT(fail); DEFINE_XPRT_EVENT(free); +DECLARE_EVENT_CLASS(svcrdma_accept_class, + TP_PROTO( + const struct svcxprt_rdma *rdma, + long status + ), + + TP_ARGS(rdma, status), + + TP_STRUCT__entry( + __field(long, status) + __string(addr, rdma->sc_xprt.xpt_remotebuf) + ), + + TP_fast_assign( + __entry->status = status; + __assign_str(addr, rdma->sc_xprt.xpt_remotebuf); + ), + + TP_printk("addr=%s status=%ld", + __get_str(addr), __entry->status + ) +); + +#define DEFINE_ACCEPT_EVENT(name) \ + DEFINE_EVENT(svcrdma_accept_class, svcrdma_##name##_err, \ + TP_PROTO( \ + const struct svcxprt_rdma *rdma, \ + long status \ + ), \ + TP_ARGS(rdma, status)) + +DEFINE_ACCEPT_EVENT(pd); +DEFINE_ACCEPT_EVENT(qp); +DEFINE_ACCEPT_EVENT(fabric); +DEFINE_ACCEPT_EVENT(initdepth); +DEFINE_ACCEPT_EVENT(accept); + TRACE_DEFINE_ENUM(RDMA_MSG); TRACE_DEFINE_ENUM(RDMA_NOMSG); TRACE_DEFINE_ENUM(RDMA_MSGP); diff --git a/net/sunrpc/xprtrdma/svc_rdma_transport.c b/net/sunrpc/xprtrdma/svc_rdma_transport.c index 0a1125277a488..f3b5ad2bec2f9 100644 --- a/net/sunrpc/xprtrdma/svc_rdma_transport.c +++ b/net/sunrpc/xprtrdma/svc_rdma_transport.c @@ -410,9 +410,6 @@ static struct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt) if (!newxprt) return NULL; - dprintk("svcrdma: newxprt from accept queue = %p, cm_id=%p\n", - newxprt, newxprt->sc_cm_id); - dev = newxprt->sc_cm_id->device; newxprt->sc_port_num = newxprt->sc_cm_id->port_num; @@ -448,21 +445,17 @@ static struct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt) newxprt->sc_pd = ib_alloc_pd(dev, 0); if (IS_ERR(newxprt->sc_pd)) { - dprintk("svcrdma: error creating PD for connect request\n"); + trace_svcrdma_pd_err(newxprt, PTR_ERR(newxprt->sc_pd)); goto errout; } newxprt->sc_sq_cq = ib_alloc_cq_any(dev, newxprt, newxprt->sc_sq_depth, IB_POLL_WORKQUEUE); - if (IS_ERR(newxprt->sc_sq_cq)) { - dprintk("svcrdma: error creating SQ CQ for connect request\n"); + if (IS_ERR(newxprt->sc_sq_cq)) goto errout; - } newxprt->sc_rq_cq = ib_alloc_cq_any(dev, newxprt, rq_depth, IB_POLL_WORKQUEUE); - if (IS_ERR(newxprt->sc_rq_cq)) { - dprintk("svcrdma: error creating RQ CQ for connect request\n"); + if (IS_ERR(newxprt->sc_rq_cq)) goto errout; - } memset(&qp_attr, 0, sizeof qp_attr); qp_attr.event_handler = qp_event_handler; @@ -486,7 +479,7 @@ static struct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt) ret = rdma_create_qp(newxprt->sc_cm_id, newxprt->sc_pd, &qp_attr); if (ret) { - dprintk("svcrdma: failed to create QP, ret=%d\n", ret); + trace_svcrdma_qp_err(newxprt, ret); goto errout; } newxprt->sc_qp = newxprt->sc_cm_id->qp; @@ -494,8 +487,10 @@ static struct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt) if (!(dev->attrs.device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS)) newxprt->sc_snd_w_inv = false; if (!rdma_protocol_iwarp(dev, newxprt->sc_port_num) && - !rdma_ib_or_roce(dev, newxprt->sc_port_num)) + !rdma_ib_or_roce(dev, newxprt->sc_port_num)) { + trace_svcrdma_fabric_err(newxprt, -EINVAL); goto errout; + } if (!svc_rdma_post_recvs(newxprt)) goto errout; @@ -517,15 +512,17 @@ static struct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt) conn_param.initiator_depth = min_t(int, newxprt->sc_ord, dev->attrs.max_qp_init_rd_atom); if (!conn_param.initiator_depth) { - dprintk("svcrdma: invalid ORD setting\n"); ret = -EINVAL; + trace_svcrdma_initdepth_err(newxprt, ret); goto errout; } conn_param.private_data = &pmsg; conn_param.private_data_len = sizeof(pmsg); ret = rdma_accept(newxprt->sc_cm_id, &conn_param); - if (ret) + if (ret) { + trace_svcrdma_accept_err(newxprt, ret); goto errout; + } #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) dprintk("svcrdma: new connection %p accepted:\n", newxprt); @@ -544,8 +541,6 @@ static struct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt) return &newxprt->sc_xprt; errout: - dprintk("svcrdma: failure accepting new connection rc=%d.\n", ret); - trace_svcrdma_xprt_fail(&newxprt->sc_xprt); /* Take a reference in case the DTO handler runs */ svc_xprt_get(&newxprt->sc_xprt); if (newxprt->sc_qp && !IS_ERR(newxprt->sc_qp)) -- GitLab From e3e67621e307920c338393d9a33ccb902354ca31 Mon Sep 17 00:00:00 2001 From: Chuck Lever Date: Tue, 28 Apr 2020 17:38:13 -0400 Subject: [PATCH 0528/1971] SUNRPC: Remove kernel memory address from svc_xprt tracepoints Clean up: The xprt=%p was meant to distinguish events from different transports, but the addr=%s does that just as well and does not expose kernel memory addresses. Signed-off-by: Chuck Lever --- include/trace/events/sunrpc.h | 27 +++++++-------------------- 1 file changed, 7 insertions(+), 20 deletions(-) diff --git a/include/trace/events/sunrpc.h b/include/trace/events/sunrpc.h index ffd2215950dcb..53f2461cf552b 100644 --- a/include/trace/events/sunrpc.h +++ b/include/trace/events/sunrpc.h @@ -1173,22 +1173,19 @@ TRACE_EVENT(svc_xprt_do_enqueue, TP_ARGS(xprt, rqst), TP_STRUCT__entry( - __field(struct svc_xprt *, xprt) __field(int, pid) __field(unsigned long, flags) __string(addr, xprt->xpt_remotebuf) ), TP_fast_assign( - __entry->xprt = xprt; __entry->pid = rqst? rqst->rq_task->pid : 0; __entry->flags = xprt->xpt_flags; __assign_str(addr, xprt->xpt_remotebuf); ), - TP_printk("xprt=%p addr=%s pid=%d flags=%s", - __entry->xprt, __get_str(addr), - __entry->pid, show_svc_xprt_flags(__entry->flags)) + TP_printk("addr=%s pid=%d flags=%s", __get_str(addr), + __entry->pid, show_svc_xprt_flags(__entry->flags)) ); DECLARE_EVENT_CLASS(svc_xprt_event, @@ -1197,20 +1194,17 @@ DECLARE_EVENT_CLASS(svc_xprt_event, TP_ARGS(xprt), TP_STRUCT__entry( - __field(struct svc_xprt *, xprt) __field(unsigned long, flags) __string(addr, xprt->xpt_remotebuf) ), TP_fast_assign( - __entry->xprt = xprt; __entry->flags = xprt->xpt_flags; __assign_str(addr, xprt->xpt_remotebuf); ), - TP_printk("xprt=%p addr=%s flags=%s", - __entry->xprt, __get_str(addr), - show_svc_xprt_flags(__entry->flags)) + TP_printk("addr=%s flags=%s", __get_str(addr), + show_svc_xprt_flags(__entry->flags)) ); DEFINE_EVENT(svc_xprt_event, svc_xprt_no_write_space, @@ -1223,24 +1217,20 @@ TRACE_EVENT(svc_xprt_dequeue, TP_ARGS(rqst), TP_STRUCT__entry( - __field(struct svc_xprt *, xprt) __field(unsigned long, flags) __field(unsigned long, wakeup) __string(addr, rqst->rq_xprt->xpt_remotebuf) ), TP_fast_assign( - __entry->xprt = rqst->rq_xprt; __entry->flags = rqst->rq_xprt->xpt_flags; __entry->wakeup = ktime_to_us(ktime_sub(ktime_get(), rqst->rq_qtime)); __assign_str(addr, rqst->rq_xprt->xpt_remotebuf); ), - TP_printk("xprt=%p addr=%s flags=%s wakeup-us=%lu", - __entry->xprt, __get_str(addr), - show_svc_xprt_flags(__entry->flags), - __entry->wakeup) + TP_printk("addr=%s flags=%s wakeup-us=%lu", __get_str(addr), + show_svc_xprt_flags(__entry->flags), __entry->wakeup) ); TRACE_EVENT(svc_wake_up, @@ -1265,21 +1255,18 @@ TRACE_EVENT(svc_handle_xprt, TP_ARGS(xprt, len), TP_STRUCT__entry( - __field(struct svc_xprt *, xprt) __field(int, len) __field(unsigned long, flags) __string(addr, xprt->xpt_remotebuf) ), TP_fast_assign( - __entry->xprt = xprt; __entry->len = len; __entry->flags = xprt->xpt_flags; __assign_str(addr, xprt->xpt_remotebuf); ), - TP_printk("xprt=%p addr=%s len=%d flags=%s", - __entry->xprt, __get_str(addr), + TP_printk("addr=%s len=%d flags=%s", __get_str(addr), __entry->len, show_svc_xprt_flags(__entry->flags)) ); -- GitLab From 4b8f380e46e4d3f8e9f6f9545fe2b78b872b6070 Mon Sep 17 00:00:00 2001 From: Chuck Lever Date: Tue, 28 Apr 2020 17:13:32 -0400 Subject: [PATCH 0529/1971] SUNRPC: Tracepoint to record errors in svc_xpo_create() Capture transport creation failures. Signed-off-by: Chuck Lever --- include/trace/events/sunrpc.h | 29 +++++++++++++++++++++++++++++ net/sunrpc/svc_xprt.c | 7 ++++++- 2 files changed, 35 insertions(+), 1 deletion(-) diff --git a/include/trace/events/sunrpc.h b/include/trace/events/sunrpc.h index 53f2461cf552b..f3296ed2b7531 100644 --- a/include/trace/events/sunrpc.h +++ b/include/trace/events/sunrpc.h @@ -1167,6 +1167,35 @@ DEFINE_EVENT(svc_rqst_status, svc_send, { (1UL << XPT_KILL_TEMP), "XPT_KILL_TEMP"}, \ { (1UL << XPT_CONG_CTRL), "XPT_CONG_CTRL"}) +TRACE_EVENT(svc_xprt_create_err, + TP_PROTO( + const char *program, + const char *protocol, + struct sockaddr *sap, + const struct svc_xprt *xprt + ), + + TP_ARGS(program, protocol, sap, xprt), + + TP_STRUCT__entry( + __field(long, error) + __string(program, program) + __string(protocol, protocol) + __array(unsigned char, addr, sizeof(struct sockaddr_in6)) + ), + + TP_fast_assign( + __entry->error = PTR_ERR(xprt); + __assign_str(program, program); + __assign_str(protocol, protocol); + memcpy(__entry->addr, sap, sizeof(__entry->addr)); + ), + + TP_printk("addr=%pISpc program=%s protocol=%s error=%ld", + __entry->addr, __get_str(program), __get_str(protocol), + __entry->error) +); + TRACE_EVENT(svc_xprt_do_enqueue, TP_PROTO(struct svc_xprt *xprt, struct svc_rqst *rqst), diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c index 07cdbf7d57648..f89e04210a483 100644 --- a/net/sunrpc/svc_xprt.c +++ b/net/sunrpc/svc_xprt.c @@ -206,6 +206,7 @@ static struct svc_xprt *__svc_xpo_create(struct svc_xprt_class *xcl, .sin6_port = htons(port), }; #endif + struct svc_xprt *xprt; struct sockaddr *sap; size_t len; @@ -224,7 +225,11 @@ static struct svc_xprt *__svc_xpo_create(struct svc_xprt_class *xcl, return ERR_PTR(-EAFNOSUPPORT); } - return xcl->xcl_ops->xpo_create(serv, net, sap, len, flags); + xprt = xcl->xcl_ops->xpo_create(serv, net, sap, len, flags); + if (IS_ERR(xprt)) + trace_svc_xprt_create_err(serv->sv_program->pg_name, + xcl->xcl_name, sap, xprt); + return xprt; } /* -- GitLab From 11bbb0f76e995cb617f582e7a4ec6cb8f6daf910 Mon Sep 17 00:00:00 2001 From: Chuck Lever Date: Tue, 17 Mar 2020 17:41:43 -0400 Subject: [PATCH 0530/1971] SUNRPC: Trace a few more generic svc_xprt events In lieu of dprintks or tracepoints in each individual transport implementation, introduce tracepoints in the generic part of the RPC layer. These typically fire for connection lifetime events, so shouldn't contribute a lot of noise. Signed-off-by: Chuck Lever --- include/trace/events/rpcrdma.h | 32 ------------------- include/trace/events/sunrpc.h | 39 ++++++++++++++++++++++-- net/sunrpc/svc_xprt.c | 22 +++---------- net/sunrpc/svcsock.c | 12 -------- net/sunrpc/xprtrdma/svc_rdma_transport.c | 21 +++---------- 5 files changed, 45 insertions(+), 81 deletions(-) diff --git a/include/trace/events/rpcrdma.h b/include/trace/events/rpcrdma.h index 79ef2ab7743cc..bdcde7d33f14c 100644 --- a/include/trace/events/rpcrdma.h +++ b/include/trace/events/rpcrdma.h @@ -1279,38 +1279,6 @@ TRACE_EVENT(xprtrdma_leaked_rep, ** Server-side RPC/RDMA events **/ -DECLARE_EVENT_CLASS(svcrdma_xprt_event, - TP_PROTO( - const struct svc_xprt *xprt - ), - - TP_ARGS(xprt), - - TP_STRUCT__entry( - __field(const void *, xprt) - __string(addr, xprt->xpt_remotebuf) - ), - - TP_fast_assign( - __entry->xprt = xprt; - __assign_str(addr, xprt->xpt_remotebuf); - ), - - TP_printk("xprt=%p addr=%s", - __entry->xprt, __get_str(addr) - ) -); - -#define DEFINE_XPRT_EVENT(name) \ - DEFINE_EVENT(svcrdma_xprt_event, svcrdma_xprt_##name, \ - TP_PROTO( \ - const struct svc_xprt *xprt \ - ), \ - TP_ARGS(xprt)) - -DEFINE_XPRT_EVENT(accept); -DEFINE_XPRT_EVENT(free); - DECLARE_EVENT_CLASS(svcrdma_accept_class, TP_PROTO( const struct svcxprt_rdma *rdma, diff --git a/include/trace/events/sunrpc.h b/include/trace/events/sunrpc.h index f3296ed2b7531..d40ec8f5c220c 100644 --- a/include/trace/events/sunrpc.h +++ b/include/trace/events/sunrpc.h @@ -1236,9 +1236,42 @@ DECLARE_EVENT_CLASS(svc_xprt_event, show_svc_xprt_flags(__entry->flags)) ); -DEFINE_EVENT(svc_xprt_event, svc_xprt_no_write_space, - TP_PROTO(struct svc_xprt *xprt), - TP_ARGS(xprt)); +#define DEFINE_SVC_XPRT_EVENT(name) \ + DEFINE_EVENT(svc_xprt_event, svc_xprt_##name, \ + TP_PROTO( \ + struct svc_xprt *xprt \ + ), \ + TP_ARGS(xprt)) + +DEFINE_SVC_XPRT_EVENT(no_write_space); +DEFINE_SVC_XPRT_EVENT(close); +DEFINE_SVC_XPRT_EVENT(detach); +DEFINE_SVC_XPRT_EVENT(free); + +TRACE_EVENT(svc_xprt_accept, + TP_PROTO( + const struct svc_xprt *xprt, + const char *service + ), + + TP_ARGS(xprt, service), + + TP_STRUCT__entry( + __string(addr, xprt->xpt_remotebuf) + __string(protocol, xprt->xpt_class->xcl_name) + __string(service, service) + ), + + TP_fast_assign( + __assign_str(addr, xprt->xpt_remotebuf); + __assign_str(protocol, xprt->xpt_class->xcl_name) + __assign_str(service, service); + ), + + TP_printk("addr=%s protocol=%s service=%s", + __get_str(addr), __get_str(protocol), __get_str(service) + ) +); TRACE_EVENT(svc_xprt_dequeue, TP_PROTO(struct svc_rqst *rqst), diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c index f89e04210a483..0a546ef02ddea 100644 --- a/net/sunrpc/svc_xprt.c +++ b/net/sunrpc/svc_xprt.c @@ -153,6 +153,7 @@ static void svc_xprt_free(struct kref *kref) xprt_put(xprt->xpt_bc_xprt); if (xprt->xpt_bc_xps) xprt_switch_put(xprt->xpt_bc_xps); + trace_svc_xprt_free(xprt); xprt->xpt_ops->xpo_free(xprt); module_put(owner); } @@ -309,15 +310,11 @@ int svc_create_xprt(struct svc_serv *serv, const char *xprt_name, { int err; - dprintk("svc: creating transport %s[%d]\n", xprt_name, port); err = _svc_create_xprt(serv, xprt_name, net, family, port, flags, cred); if (err == -EPROTONOSUPPORT) { request_module("svc%s", xprt_name); err = _svc_create_xprt(serv, xprt_name, net, family, port, flags, cred); } - if (err < 0) - dprintk("svc: transport %s not found, err %d\n", - xprt_name, -err); return err; } EXPORT_SYMBOL_GPL(svc_create_xprt); @@ -785,7 +782,6 @@ static int svc_handle_xprt(struct svc_rqst *rqstp, struct svc_xprt *xprt) int len = 0; if (test_bit(XPT_CLOSE, &xprt->xpt_flags)) { - dprintk("svc_recv: found XPT_CLOSE\n"); if (test_and_clear_bit(XPT_KILL_TEMP, &xprt->xpt_flags)) xprt->xpt_ops->xpo_kill_temp_xprt(xprt); svc_delete_xprt(xprt); @@ -804,6 +800,7 @@ static int svc_handle_xprt(struct svc_rqst *rqstp, struct svc_xprt *xprt) if (newxpt) { newxpt->xpt_cred = get_cred(xprt->xpt_cred); svc_add_new_temp_xprt(serv, newxpt); + trace_svc_xprt_accept(newxpt, serv->sv_name); } else module_put(xprt->xpt_class->xcl_owner); } else if (svc_xprt_reserve_slot(rqstp, xprt)) { @@ -840,14 +837,6 @@ int svc_recv(struct svc_rqst *rqstp, long timeout) struct svc_serv *serv = rqstp->rq_server; int len, err; - dprintk("svc: server %p waiting for data (to = %ld)\n", - rqstp, timeout); - - if (rqstp->rq_xprt) - printk(KERN_ERR - "svc_recv: service %p, transport not NULL!\n", - rqstp); - err = svc_alloc_arg(rqstp); if (err) goto out; @@ -895,7 +884,6 @@ EXPORT_SYMBOL_GPL(svc_recv); void svc_drop(struct svc_rqst *rqstp) { trace_svc_drop(rqstp); - dprintk("svc: xprt %p dropped request\n", rqstp->rq_xprt); svc_xprt_release(rqstp); } EXPORT_SYMBOL_GPL(svc_drop); @@ -1030,11 +1018,10 @@ static void svc_delete_xprt(struct svc_xprt *xprt) struct svc_serv *serv = xprt->xpt_server; struct svc_deferred_req *dr; - /* Only do this once */ if (test_and_set_bit(XPT_DEAD, &xprt->xpt_flags)) - BUG(); + return; - dprintk("svc: svc_delete_xprt(%p)\n", xprt); + trace_svc_xprt_detach(xprt); xprt->xpt_ops->xpo_detach(xprt); if (xprt->xpt_bc_xprt) xprt->xpt_bc_xprt->ops->close(xprt->xpt_bc_xprt); @@ -1055,6 +1042,7 @@ static void svc_delete_xprt(struct svc_xprt *xprt) void svc_close_xprt(struct svc_xprt *xprt) { + trace_svc_xprt_close(xprt); set_bit(XPT_CLOSE, &xprt->xpt_flags); if (test_and_set_bit(XPT_BUSY, &xprt->xpt_flags)) /* someone else will have to effect the close */ diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c index 3e7b6445e3172..cf4bd198c19d9 100644 --- a/net/sunrpc/svcsock.c +++ b/net/sunrpc/svcsock.c @@ -727,7 +727,6 @@ static struct svc_xprt *svc_tcp_accept(struct svc_xprt *xprt) int err, slen; RPC_IFDEBUG(char buf[RPC_MAX_ADDRBUFLEN]); - dprintk("svc: tcp_accept %p sock %p\n", svsk, sock); if (!sock) return NULL; @@ -1363,11 +1362,6 @@ static struct svc_xprt *svc_create_socket(struct svc_serv *serv, int newlen; int family; int val; - RPC_IFDEBUG(char buf[RPC_MAX_ADDRBUFLEN]); - - dprintk("svc: svc_create_socket(%s, %d, %s)\n", - serv->sv_program->pg_name, protocol, - __svc_print_addr(sin, buf, sizeof(buf))); if (protocol != IPPROTO_UDP && protocol != IPPROTO_TCP) { printk(KERN_WARNING "svc: only UDP and TCP " @@ -1427,7 +1421,6 @@ static struct svc_xprt *svc_create_socket(struct svc_serv *serv, svc_xprt_set_local(&svsk->sk_xprt, newsin, newlen); return (struct svc_xprt *)svsk; bummer: - dprintk("svc: svc_create_socket error = %d\n", -error); sock_release(sock); return ERR_PTR(error); } @@ -1441,8 +1434,6 @@ static void svc_sock_detach(struct svc_xprt *xprt) struct svc_sock *svsk = container_of(xprt, struct svc_sock, sk_xprt); struct sock *sk = svsk->sk_sk; - dprintk("svc: svc_sock_detach(%p)\n", svsk); - /* put back the old socket callbacks */ lock_sock(sk); sk->sk_state_change = svsk->sk_ostate; @@ -1459,8 +1450,6 @@ static void svc_tcp_sock_detach(struct svc_xprt *xprt) { struct svc_sock *svsk = container_of(xprt, struct svc_sock, sk_xprt); - dprintk("svc: svc_tcp_sock_detach(%p)\n", svsk); - svc_sock_detach(xprt); if (!test_bit(XPT_LISTENER, &xprt->xpt_flags)) { @@ -1475,7 +1464,6 @@ static void svc_tcp_sock_detach(struct svc_xprt *xprt) static void svc_sock_free(struct svc_xprt *xprt) { struct svc_sock *svsk = container_of(xprt, struct svc_sock, sk_xprt); - dprintk("svc: svc_sock_free(%p)\n", svsk); if (svsk->sk_sock->file) sockfd_put(svsk->sk_sock); diff --git a/net/sunrpc/xprtrdma/svc_rdma_transport.c b/net/sunrpc/xprtrdma/svc_rdma_transport.c index f3b5ad2bec2f9..d38be57b00edf 100644 --- a/net/sunrpc/xprtrdma/svc_rdma_transport.c +++ b/net/sunrpc/xprtrdma/svc_rdma_transport.c @@ -314,11 +314,8 @@ static struct svc_xprt *svc_rdma_create(struct svc_serv *serv, struct svcxprt_rdma *cma_xprt; int ret; - dprintk("svcrdma: Creating RDMA listener\n"); - if ((sa->sa_family != AF_INET) && (sa->sa_family != AF_INET6)) { - dprintk("svcrdma: Address family %d is not supported.\n", sa->sa_family); + if (sa->sa_family != AF_INET && sa->sa_family != AF_INET6) return ERR_PTR(-EAFNOSUPPORT); - } cma_xprt = svc_rdma_create_xprt(serv, net); if (!cma_xprt) return ERR_PTR(-ENOMEM); @@ -329,7 +326,6 @@ static struct svc_xprt *svc_rdma_create(struct svc_serv *serv, RDMA_PS_TCP, IB_QPT_RC); if (IS_ERR(listen_id)) { ret = PTR_ERR(listen_id); - dprintk("svcrdma: rdma_create_id failed = %d\n", ret); goto err0; } @@ -338,23 +334,17 @@ static struct svc_xprt *svc_rdma_create(struct svc_serv *serv, */ #if IS_ENABLED(CONFIG_IPV6) ret = rdma_set_afonly(listen_id, 1); - if (ret) { - dprintk("svcrdma: rdma_set_afonly failed = %d\n", ret); + if (ret) goto err1; - } #endif ret = rdma_bind_addr(listen_id, sa); - if (ret) { - dprintk("svcrdma: rdma_bind_addr failed = %d\n", ret); + if (ret) goto err1; - } cma_xprt->sc_cm_id = listen_id; ret = rdma_listen(listen_id, RPCRDMA_LISTEN_BACKLOG); - if (ret) { - dprintk("svcrdma: rdma_listen failed = %d\n", ret); + if (ret) goto err1; - } /* * We need to use the address from the cm_id in case the @@ -537,7 +527,6 @@ static struct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt) dprintk(" ord : %d\n", conn_param.initiator_depth); #endif - trace_svcrdma_xprt_accept(&newxprt->sc_xprt); return &newxprt->sc_xprt; errout: @@ -578,8 +567,6 @@ static void __svc_rdma_free(struct work_struct *work) container_of(work, struct svcxprt_rdma, sc_work); struct svc_xprt *xprt = &rdma->sc_xprt; - trace_svcrdma_xprt_free(xprt); - if (rdma->sc_qp && !IS_ERR(rdma->sc_qp)) ib_drain_qp(rdma->sc_qp); -- GitLab From d998882b4b1b08e72f4414d8eb6647af72bffe2f Mon Sep 17 00:00:00 2001 From: Chuck Lever Date: Fri, 1 May 2020 10:40:47 -0400 Subject: [PATCH 0531/1971] SUNRPC: Remove "#include " Clean up: Commit 850cbaddb52d ("udp: use it's own memory accounting schema") removed the last skb-related tracepoint from svcsock.c, so it is no longer necessary to include trace/events/skb.h. Signed-off-by: Chuck Lever --- net/sunrpc/svcsock.c | 1 - 1 file changed, 1 deletion(-) diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c index cf4bd198c19d9..1c4d0aacc5318 100644 --- a/net/sunrpc/svcsock.c +++ b/net/sunrpc/svcsock.c @@ -45,7 +45,6 @@ #include #include #include -#include #include #include -- GitLab From 998024dee197944a7018a0bdc85b83b569ddec22 Mon Sep 17 00:00:00 2001 From: Chuck Lever Date: Tue, 17 Mar 2020 15:06:31 -0400 Subject: [PATCH 0532/1971] SUNRPC: Add more svcsock tracepoints In addition to tracing recently-updated socket sendto events, this commit adds a trace event class that can be used for additional svcsock-related tracepoints in subsequent patches. Signed-off-by: Chuck Lever --- include/trace/events/sunrpc.h | 97 +++++++++++++++++++++++++++++++++++ net/sunrpc/svcsock.c | 30 ++++------- 2 files changed, 107 insertions(+), 20 deletions(-) diff --git a/include/trace/events/sunrpc.h b/include/trace/events/sunrpc.h index d40ec8f5c220c..bf086640b14a7 100644 --- a/include/trace/events/sunrpc.h +++ b/include/trace/events/sunrpc.h @@ -14,6 +14,39 @@ #include #include +TRACE_DEFINE_ENUM(SOCK_STREAM); +TRACE_DEFINE_ENUM(SOCK_DGRAM); +TRACE_DEFINE_ENUM(SOCK_RAW); +TRACE_DEFINE_ENUM(SOCK_RDM); +TRACE_DEFINE_ENUM(SOCK_SEQPACKET); +TRACE_DEFINE_ENUM(SOCK_DCCP); +TRACE_DEFINE_ENUM(SOCK_PACKET); + +#define show_socket_type(type) \ + __print_symbolic(type, \ + { SOCK_STREAM, "STREAM" }, \ + { SOCK_DGRAM, "DGRAM" }, \ + { SOCK_RAW, "RAW" }, \ + { SOCK_RDM, "RDM" }, \ + { SOCK_SEQPACKET, "SEQPACKET" }, \ + { SOCK_DCCP, "DCCP" }, \ + { SOCK_PACKET, "PACKET" }) + +/* This list is known to be incomplete, add new enums as needed. */ +TRACE_DEFINE_ENUM(AF_UNSPEC); +TRACE_DEFINE_ENUM(AF_UNIX); +TRACE_DEFINE_ENUM(AF_LOCAL); +TRACE_DEFINE_ENUM(AF_INET); +TRACE_DEFINE_ENUM(AF_INET6); + +#define rpc_show_address_family(family) \ + __print_symbolic(family, \ + { AF_UNSPEC, "AF_UNSPEC" }, \ + { AF_UNIX, "AF_UNIX" }, \ + { AF_LOCAL, "AF_LOCAL" }, \ + { AF_INET, "AF_INET" }, \ + { AF_INET6, "AF_INET6" }) + DECLARE_EVENT_CLASS(xdr_buf_class, TP_PROTO( const struct xdr_buf *xdr @@ -1384,6 +1417,70 @@ DECLARE_EVENT_CLASS(svc_deferred_event, DEFINE_SVC_DEFERRED_EVENT(drop); DEFINE_SVC_DEFERRED_EVENT(revisit); +TRACE_EVENT(svcsock_new_socket, + TP_PROTO( + const struct socket *socket + ), + + TP_ARGS(socket), + + TP_STRUCT__entry( + __field(unsigned long, type) + __field(unsigned long, family) + __field(bool, listener) + ), + + TP_fast_assign( + __entry->type = socket->type; + __entry->family = socket->sk->sk_family; + __entry->listener = (socket->sk->sk_state == TCP_LISTEN); + ), + + TP_printk("type=%s family=%s%s", + show_socket_type(__entry->type), + rpc_show_address_family(__entry->family), + __entry->listener ? " (listener)" : "" + ) +); + +DECLARE_EVENT_CLASS(svcsock_class, + TP_PROTO( + const struct svc_xprt *xprt, + ssize_t result + ), + + TP_ARGS(xprt, result), + + TP_STRUCT__entry( + __field(ssize_t, result) + __field(unsigned long, flags) + __string(addr, xprt->xpt_remotebuf) + ), + + TP_fast_assign( + __entry->result = result; + __entry->flags = xprt->xpt_flags; + __assign_str(addr, xprt->xpt_remotebuf); + ), + + TP_printk("addr=%s result=%zd flags=%s", __get_str(addr), + __entry->result, show_svc_xprt_flags(__entry->flags) + ) +); + +#define DEFINE_SVCSOCK_EVENT(name) \ + DEFINE_EVENT(svcsock_class, svcsock_##name, \ + TP_PROTO( \ + const struct svc_xprt *xprt, \ + ssize_t result \ + ), \ + TP_ARGS(xprt, result)) + +DEFINE_SVCSOCK_EVENT(udp_send); +DEFINE_SVCSOCK_EVENT(tcp_send); +DEFINE_SVCSOCK_EVENT(data_ready); +DEFINE_SVCSOCK_EVENT(write_space); + DECLARE_EVENT_CLASS(cache_event, TP_PROTO( const struct cache_detail *cd, diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c index 1c4d0aacc5318..758b835ad4ce4 100644 --- a/net/sunrpc/svcsock.c +++ b/net/sunrpc/svcsock.c @@ -54,6 +54,8 @@ #include #include +#include + #include "socklib.h" #include "sunrpc.h" @@ -281,13 +283,10 @@ static void svc_data_ready(struct sock *sk) struct svc_sock *svsk = (struct svc_sock *)sk->sk_user_data; if (svsk) { - dprintk("svc: socket %p(inet %p), busy=%d\n", - svsk, sk, - test_bit(XPT_BUSY, &svsk->sk_xprt.xpt_flags)); - /* Refer to svc_setup_socket() for details. */ rmb(); svsk->sk_odata(sk); + trace_svcsock_data_ready(&svsk->sk_xprt, 0); if (!test_and_set_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags)) svc_xprt_enqueue(&svsk->sk_xprt); } @@ -301,11 +300,9 @@ static void svc_write_space(struct sock *sk) struct svc_sock *svsk = (struct svc_sock *)(sk->sk_user_data); if (svsk) { - dprintk("svc: socket %p(inet %p), write_space busy=%d\n", - svsk, sk, test_bit(XPT_BUSY, &svsk->sk_xprt.xpt_flags)); - /* Refer to svc_setup_socket() for details. */ rmb(); + trace_svcsock_write_space(&svsk->sk_xprt, 0); svsk->sk_owspace(sk); svc_xprt_enqueue(&svsk->sk_xprt); } @@ -545,6 +542,7 @@ static int svc_udp_sendto(struct svc_rqst *rqstp) err = xprt_sock_sendmsg(svsk->sk_sock, &msg, xdr, 0, 0, &sent); xdr_free_bvec(xdr); } + trace_svcsock_udp_send(xprt, err); mutex_unlock(&xprt->xpt_mutex); if (err < 0) @@ -616,7 +614,7 @@ static struct svc_xprt_class svc_udp_class = { static void svc_udp_init(struct svc_sock *svsk, struct svc_serv *serv) { - int err, level, optname, one = 1; + int level, optname, one = 1; svc_xprt_init(sock_net(svsk->sk_sock->sk), &svc_udp_class, &svsk->sk_xprt, serv); @@ -647,9 +645,8 @@ static void svc_udp_init(struct svc_sock *svsk, struct svc_serv *serv) default: BUG(); } - err = kernel_setsockopt(svsk->sk_sock, level, optname, - (char *)&one, sizeof(one)); - dprintk("svc: kernel_setsockopt returned %d\n", err); + kernel_setsockopt(svsk->sk_sock, level, optname, (char *)&one, + sizeof(one)); } /* @@ -1100,6 +1097,7 @@ static int svc_tcp_sendto(struct svc_rqst *rqstp) goto out_notconn; err = xprt_sock_sendmsg(svsk->sk_sock, &msg, xdr, 0, marker, &sent); xdr_free_bvec(xdr); + trace_svcsock_tcp_send(xprt, err < 0 ? err : sent); if (err < 0 || sent != (xdr->len + sizeof(marker))) goto out_close; mutex_unlock(&xprt->xpt_mutex); @@ -1170,13 +1168,11 @@ static void svc_tcp_init(struct svc_sock *svsk, struct svc_serv *serv) set_bit(XPT_CACHE_AUTH, &svsk->sk_xprt.xpt_flags); set_bit(XPT_CONG_CTRL, &svsk->sk_xprt.xpt_flags); if (sk->sk_state == TCP_LISTEN) { - dprintk("setting up TCP socket for listening\n"); strcpy(svsk->sk_xprt.xpt_remotebuf, "listener"); set_bit(XPT_LISTENER, &svsk->sk_xprt.xpt_flags); sk->sk_data_ready = svc_tcp_listen_data_ready; set_bit(XPT_CONN, &svsk->sk_xprt.xpt_flags); } else { - dprintk("setting up TCP socket for reading\n"); sk->sk_state_change = svc_tcp_state_change; sk->sk_data_ready = svc_data_ready; sk->sk_write_space = svc_write_space; @@ -1226,7 +1222,6 @@ static struct svc_sock *svc_setup_socket(struct svc_serv *serv, int pmap_register = !(flags & SVC_SOCK_ANONYMOUS); int err = 0; - dprintk("svc: svc_setup_socket %p\n", sock); svsk = kzalloc(sizeof(*svsk), GFP_KERNEL); if (!svsk) return ERR_PTR(-ENOMEM); @@ -1263,12 +1258,7 @@ static struct svc_sock *svc_setup_socket(struct svc_serv *serv, else svc_tcp_init(svsk, serv); - dprintk("svc: svc_setup_socket created %p (inet %p), " - "listen %d close %d\n", - svsk, svsk->sk_sk, - test_bit(XPT_LISTENER, &svsk->sk_xprt.xpt_flags), - test_bit(XPT_CLOSE, &svsk->sk_xprt.xpt_flags)); - + trace_svcsock_new_socket(sock); return svsk; } -- GitLab From a0469f46faab786e8ec9f8c8526a185357b38772 Mon Sep 17 00:00:00 2001 From: Chuck Lever Date: Wed, 18 Mar 2020 11:20:50 -0400 Subject: [PATCH 0533/1971] SUNRPC: Replace dprintk call sites in TCP state change callouts Report TCP socket state changes and accept failures via tracepoints, replacing dprintk() call sites. No tracepoint is added in svc_tcp_listen_data_ready. There's no information available there that isn't also reported by the svcsock_new_socket and the accept failure tracepoints. Signed-off-by: Chuck Lever --- include/trace/events/sunrpc.h | 67 +++++++++++++++++++++++++++++++++++ net/sunrpc/svcsock.c | 35 ++++-------------- 2 files changed, 73 insertions(+), 29 deletions(-) diff --git a/include/trace/events/sunrpc.h b/include/trace/events/sunrpc.h index bf086640b14a7..ed8c991d4f043 100644 --- a/include/trace/events/sunrpc.h +++ b/include/trace/events/sunrpc.h @@ -1481,6 +1481,73 @@ DEFINE_SVCSOCK_EVENT(tcp_send); DEFINE_SVCSOCK_EVENT(data_ready); DEFINE_SVCSOCK_EVENT(write_space); +TRACE_EVENT(svcsock_tcp_state, + TP_PROTO( + const struct svc_xprt *xprt, + const struct socket *socket + ), + + TP_ARGS(xprt, socket), + + TP_STRUCT__entry( + __field(unsigned long, socket_state) + __field(unsigned long, sock_state) + __field(unsigned long, flags) + __string(addr, xprt->xpt_remotebuf) + ), + + TP_fast_assign( + __entry->socket_state = socket->state; + __entry->sock_state = socket->sk->sk_state; + __entry->flags = xprt->xpt_flags; + __assign_str(addr, xprt->xpt_remotebuf); + ), + + TP_printk("addr=%s state=%s sk_state=%s flags=%s", __get_str(addr), + rpc_show_socket_state(__entry->socket_state), + rpc_show_sock_state(__entry->sock_state), + show_svc_xprt_flags(__entry->flags) + ) +); + +DECLARE_EVENT_CLASS(svcsock_accept_class, + TP_PROTO( + const struct svc_xprt *xprt, + const char *service, + long status + ), + + TP_ARGS(xprt, service, status), + + TP_STRUCT__entry( + __field(long, status) + __string(service, service) + __array(unsigned char, addr, sizeof(struct sockaddr_in6)) + ), + + TP_fast_assign( + __entry->status = status; + __assign_str(service, service); + memcpy(__entry->addr, &xprt->xpt_local, sizeof(__entry->addr)); + ), + + TP_printk("listener=%pISpc service=%s status=%ld", + __entry->addr, __get_str(service), __entry->status + ) +); + +#define DEFINE_ACCEPT_EVENT(name) \ + DEFINE_EVENT(svcsock_accept_class, svcsock_##name##_err, \ + TP_PROTO( \ + const struct svc_xprt *xprt, \ + const char *service, \ + long status \ + ), \ + TP_ARGS(xprt, service, status)) + +DEFINE_ACCEPT_EVENT(accept); +DEFINE_ACCEPT_EVENT(getpeername); + DECLARE_EVENT_CLASS(cache_event, TP_PROTO( const struct cache_detail *cd, diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c index 758b835ad4ce4..4ac1180c63063 100644 --- a/net/sunrpc/svcsock.c +++ b/net/sunrpc/svcsock.c @@ -657,9 +657,6 @@ static void svc_tcp_listen_data_ready(struct sock *sk) { struct svc_sock *svsk = (struct svc_sock *)sk->sk_user_data; - dprintk("svc: socket %p TCP (listen) state change %d\n", - sk, sk->sk_state); - if (svsk) { /* Refer to svc_setup_socket() for details. */ rmb(); @@ -680,8 +677,7 @@ static void svc_tcp_listen_data_ready(struct sock *sk) if (svsk) { set_bit(XPT_CONN, &svsk->sk_xprt.xpt_flags); svc_xprt_enqueue(&svsk->sk_xprt); - } else - printk("svc: socket %p: no user data\n", sk); + } } } @@ -692,15 +688,11 @@ static void svc_tcp_state_change(struct sock *sk) { struct svc_sock *svsk = (struct svc_sock *)sk->sk_user_data; - dprintk("svc: socket %p TCP (connected) state change %d (svsk %p)\n", - sk, sk->sk_state, sk->sk_user_data); - - if (!svsk) - printk("svc: socket %p: no user data\n", sk); - else { + if (svsk) { /* Refer to svc_setup_socket() for details. */ rmb(); svsk->sk_ostate(sk); + trace_svcsock_tcp_state(&svsk->sk_xprt, svsk->sk_sock); if (sk->sk_state != TCP_ESTABLISHED) { set_bit(XPT_CLOSE, &svsk->sk_xprt.xpt_flags); svc_xprt_enqueue(&svsk->sk_xprt); @@ -721,7 +713,6 @@ static struct svc_xprt *svc_tcp_accept(struct svc_xprt *xprt) struct socket *newsock; struct svc_sock *newsvsk; int err, slen; - RPC_IFDEBUG(char buf[RPC_MAX_ADDRBUFLEN]); if (!sock) return NULL; @@ -735,30 +726,18 @@ static struct svc_xprt *svc_tcp_accept(struct svc_xprt *xprt) else if (err != -EAGAIN) net_warn_ratelimited("%s: accept failed (err %d)!\n", serv->sv_name, -err); + trace_svcsock_accept_err(xprt, serv->sv_name, err); return NULL; } set_bit(XPT_CONN, &svsk->sk_xprt.xpt_flags); err = kernel_getpeername(newsock, sin); if (err < 0) { - net_warn_ratelimited("%s: peername failed (err %d)!\n", - serv->sv_name, -err); + trace_svcsock_getpeername_err(xprt, serv->sv_name, err); goto failed; /* aborted connection or whatever */ } slen = err; - /* Ideally, we would want to reject connections from unauthorized - * hosts here, but when we get encryption, the IP of the host won't - * tell us anything. For now just warn about unpriv connections. - */ - if (!svc_port_is_privileged(sin)) { - dprintk("%s: connect from unprivileged port: %s\n", - serv->sv_name, - __svc_print_addr(sin, buf, sizeof(buf))); - } - dprintk("%s: connect from %s\n", serv->sv_name, - __svc_print_addr(sin, buf, sizeof(buf))); - /* Reset the inherited callbacks before calling svc_setup_socket */ newsock->sk->sk_state_change = svsk->sk_ostate; newsock->sk->sk_data_ready = svsk->sk_odata; @@ -776,10 +755,8 @@ static struct svc_xprt *svc_tcp_accept(struct svc_xprt *xprt) svc_xprt_set_remote(&newsvsk->sk_xprt, sin, slen); err = kernel_getsockname(newsock, sin); slen = err; - if (unlikely(err < 0)) { - dprintk("svc_tcp_accept: kernel_getsockname error %d\n", -err); + if (unlikely(err < 0)) slen = offsetof(struct sockaddr, sa_data); - } svc_xprt_set_local(&newsvsk->sk_xprt, sin, slen); if (sock_is_loopback(newsock->sk)) -- GitLab From b4af59328c25bdd585bc1da14f25d8ba4ebc616c Mon Sep 17 00:00:00 2001 From: Chuck Lever Date: Sat, 2 May 2020 11:59:37 -0400 Subject: [PATCH 0534/1971] SUNRPC: Trace server-side rpcbind registration events Signed-off-by: Chuck Lever --- include/trace/events/sunrpc.h | 80 +++++++++++++++++++++++++++++++++++ net/sunrpc/svc.c | 19 ++------- 2 files changed, 84 insertions(+), 15 deletions(-) diff --git a/include/trace/events/sunrpc.h b/include/trace/events/sunrpc.h index ed8c991d4f043..6d85bbb7b8b12 100644 --- a/include/trace/events/sunrpc.h +++ b/include/trace/events/sunrpc.h @@ -1581,6 +1581,86 @@ DEFINE_CACHE_EVENT(cache_entry_update); DEFINE_CACHE_EVENT(cache_entry_make_negative); DEFINE_CACHE_EVENT(cache_entry_no_listener); +DECLARE_EVENT_CLASS(register_class, + TP_PROTO( + const char *program, + const u32 version, + const int family, + const unsigned short protocol, + const unsigned short port, + int error + ), + + TP_ARGS(program, version, family, protocol, port, error), + + TP_STRUCT__entry( + __field(u32, version) + __field(unsigned long, family) + __field(unsigned short, protocol) + __field(unsigned short, port) + __field(int, error) + __string(program, program) + ), + + TP_fast_assign( + __entry->version = version; + __entry->family = family; + __entry->protocol = protocol; + __entry->port = port; + __entry->error = error; + __assign_str(program, program); + ), + + TP_printk("program=%sv%u proto=%s port=%u family=%s error=%d", + __get_str(program), __entry->version, + __entry->protocol == IPPROTO_UDP ? "udp" : "tcp", + __entry->port, rpc_show_address_family(__entry->family), + __entry->error + ) +); + +#define DEFINE_REGISTER_EVENT(name) \ + DEFINE_EVENT(register_class, svc_##name, \ + TP_PROTO( \ + const char *program, \ + const u32 version, \ + const int family, \ + const unsigned short protocol, \ + const unsigned short port, \ + int error \ + ), \ + TP_ARGS(program, version, family, protocol, \ + port, error)) + +DEFINE_REGISTER_EVENT(register); +DEFINE_REGISTER_EVENT(noregister); + +TRACE_EVENT(svc_unregister, + TP_PROTO( + const char *program, + const u32 version, + int error + ), + + TP_ARGS(program, version, error), + + TP_STRUCT__entry( + __field(u32, version) + __field(int, error) + __string(program, program) + ), + + TP_fast_assign( + __entry->version = version; + __entry->error = error; + __assign_str(program, program); + ), + + TP_printk("program=%sv%u error=%d", + __get_str(program), __entry->version, __entry->error + ) +); + #endif /* _TRACE_SUNRPC_H */ #include diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c index 9ed3126600ce8..3e74d61ca7da8 100644 --- a/net/sunrpc/svc.c +++ b/net/sunrpc/svc.c @@ -991,6 +991,7 @@ static int __svc_register(struct net *net, const char *progname, #endif } + trace_svc_register(progname, version, protocol, port, family, error); return error; } @@ -1000,11 +1001,6 @@ int svc_rpcbind_set_version(struct net *net, unsigned short proto, unsigned short port) { - dprintk("svc: svc_register(%sv%d, %s, %u, %u)\n", - progp->pg_name, version, - proto == IPPROTO_UDP? "udp" : "tcp", - port, family); - return __svc_register(net, progp->pg_name, progp->pg_prog, version, family, proto, port); @@ -1024,11 +1020,8 @@ int svc_generic_rpcbind_set(struct net *net, return 0; if (vers->vs_hidden) { - dprintk("svc: svc_register(%sv%d, %s, %u, %u)" - " (but not telling portmap)\n", - progp->pg_name, version, - proto == IPPROTO_UDP? "udp" : "tcp", - port, family); + trace_svc_noregister(progp->pg_name, version, proto, + port, family, 0); return 0; } @@ -1106,8 +1099,7 @@ static void __svc_unregister(struct net *net, const u32 program, const u32 versi if (error == -EPROTONOSUPPORT) error = rpcb_register(net, program, version, 0, 0); - dprintk("svc: %s(%sv%u), error %d\n", - __func__, progname, version, error); + trace_svc_unregister(progname, version, error); } /* @@ -1132,9 +1124,6 @@ static void svc_unregister(const struct svc_serv *serv, struct net *net) continue; if (progp->pg_vers[i]->vs_hidden) continue; - - dprintk("svc: attempting to unregister %sv%u\n", - progp->pg_name, i); __svc_unregister(net, progp->pg_prog, i, progp->pg_name); } } -- GitLab From 02648908d19a99532b0839959e38ae53d95d2798 Mon Sep 17 00:00:00 2001 From: Chuck Lever Date: Tue, 17 Mar 2020 14:12:15 -0400 Subject: [PATCH 0535/1971] SUNRPC: Rename svc_sock::sk_reclen Clean up. I find the name of the svc_sock::sk_reclen field confusing, so I've changed it to better reflect its function. This field is not read directly to get the record length. Rather, it is a buffer containing a record marker that needs to be decoded. Signed-off-by: Chuck Lever --- include/linux/sunrpc/svcsock.h | 6 +++--- net/sunrpc/svcsock.c | 6 +++--- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/include/linux/sunrpc/svcsock.h b/include/linux/sunrpc/svcsock.h index 771baadaee9d6..b7ac7fe683067 100644 --- a/include/linux/sunrpc/svcsock.h +++ b/include/linux/sunrpc/svcsock.h @@ -28,7 +28,7 @@ struct svc_sock { /* private TCP part */ /* On-the-wire fragment header: */ - __be32 sk_reclen; + __be32 sk_marker; /* As we receive a record, this includes the length received so * far (including the fragment header): */ u32 sk_tcplen; @@ -41,12 +41,12 @@ struct svc_sock { static inline u32 svc_sock_reclen(struct svc_sock *svsk) { - return ntohl(svsk->sk_reclen) & RPC_FRAGMENT_SIZE_MASK; + return be32_to_cpu(svsk->sk_marker) & RPC_FRAGMENT_SIZE_MASK; } static inline u32 svc_sock_final_rec(struct svc_sock *svsk) { - return ntohl(svsk->sk_reclen) & RPC_LAST_STREAM_FRAGMENT; + return be32_to_cpu(svsk->sk_marker) & RPC_LAST_STREAM_FRAGMENT; } /* diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c index 4ac1180c63063..d63b21f3f207f 100644 --- a/net/sunrpc/svcsock.c +++ b/net/sunrpc/svcsock.c @@ -841,7 +841,7 @@ static int svc_tcp_recv_record(struct svc_sock *svsk, struct svc_rqst *rqstp) struct kvec iov; want = sizeof(rpc_fraghdr) - svsk->sk_tcplen; - iov.iov_base = ((char *) &svsk->sk_reclen) + svsk->sk_tcplen; + iov.iov_base = ((char *)&svsk->sk_marker) + svsk->sk_tcplen; iov.iov_len = want; len = svc_recvfrom(rqstp, &iov, 1, want, 0); if (len < 0) @@ -938,7 +938,7 @@ static void svc_tcp_fragment_received(struct svc_sock *svsk) svc_sock_final_rec(svsk) ? "final" : "nonfinal", svc_sock_reclen(svsk)); svsk->sk_tcplen = 0; - svsk->sk_reclen = 0; + svsk->sk_marker = xdr_zero; } /* @@ -1154,7 +1154,7 @@ static void svc_tcp_init(struct svc_sock *svsk, struct svc_serv *serv) sk->sk_data_ready = svc_data_ready; sk->sk_write_space = svc_write_space; - svsk->sk_reclen = 0; + svsk->sk_marker = xdr_zero; svsk->sk_tcplen = 0; svsk->sk_datalen = 0; memset(&svsk->sk_pages[0], 0, sizeof(svsk->sk_pages)); -- GitLab From a5cda73e49aaaac58b25750f4b591c0dc9726a44 Mon Sep 17 00:00:00 2001 From: Chuck Lever Date: Mon, 16 Mar 2020 14:53:04 -0400 Subject: [PATCH 0536/1971] SUNRPC: Restructure svc_tcp_recv_record() Refactor: svc_recvfrom() is going to be converted to read into bio_vecs in a moment. Unhook the only other caller, svc_tcp_recv_record(), which just wants to read the 4-byte stream record marker into a kvec. While we're in the area, streamline this helper by straight-lining the hot path, replace dprintk call sites with tracepoints, and reduce the number of atomic bit operations in this path. Signed-off-by: Chuck Lever --- include/trace/events/sunrpc.h | 24 +++++++++++++++++ net/sunrpc/svcsock.c | 51 +++++++++++++++++------------------ 2 files changed, 49 insertions(+), 26 deletions(-) diff --git a/include/trace/events/sunrpc.h b/include/trace/events/sunrpc.h index 6d85bbb7b8b12..ec4ae34a1f84c 100644 --- a/include/trace/events/sunrpc.h +++ b/include/trace/events/sunrpc.h @@ -1443,6 +1443,30 @@ TRACE_EVENT(svcsock_new_socket, ) ); +TRACE_EVENT(svcsock_marker, + TP_PROTO( + const struct svc_xprt *xprt, + __be32 marker + ), + + TP_ARGS(xprt, marker), + + TP_STRUCT__entry( + __field(unsigned int, length) + __field(bool, last) + __string(addr, xprt->xpt_remotebuf) + ), + + TP_fast_assign( + __entry->length = be32_to_cpu(marker) & RPC_FRAGMENT_SIZE_MASK; + __entry->last = be32_to_cpu(marker) & RPC_LAST_STREAM_FRAGMENT; + __assign_str(addr, xprt->xpt_remotebuf); + ), + + TP_printk("addr=%s length=%u%s", __get_str(addr), + __entry->length, __entry->last ? " (last)" : "") +); + DECLARE_EVENT_CLASS(svcsock_class, TP_PROTO( const struct svc_xprt *xprt, diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c index d63b21f3f207f..9c1eb13aa9b82 100644 --- a/net/sunrpc/svcsock.c +++ b/net/sunrpc/svcsock.c @@ -828,47 +828,45 @@ out: } /* - * Receive fragment record header. - * If we haven't gotten the record length yet, get the next four bytes. + * Receive fragment record header into sk_marker. */ -static int svc_tcp_recv_record(struct svc_sock *svsk, struct svc_rqst *rqstp) +static ssize_t svc_tcp_read_marker(struct svc_sock *svsk, + struct svc_rqst *rqstp) { - struct svc_serv *serv = svsk->sk_xprt.xpt_server; - unsigned int want; - int len; + ssize_t want, len; + /* If we haven't gotten the record length yet, + * get the next four bytes. + */ if (svsk->sk_tcplen < sizeof(rpc_fraghdr)) { + struct msghdr msg = { NULL }; struct kvec iov; want = sizeof(rpc_fraghdr) - svsk->sk_tcplen; iov.iov_base = ((char *)&svsk->sk_marker) + svsk->sk_tcplen; iov.iov_len = want; - len = svc_recvfrom(rqstp, &iov, 1, want, 0); + iov_iter_kvec(&msg.msg_iter, READ, &iov, 1, want); + len = sock_recvmsg(svsk->sk_sock, &msg, MSG_DONTWAIT); if (len < 0) - goto error; + return len; svsk->sk_tcplen += len; - if (len < want) { - dprintk("svc: short recvfrom while reading record " - "length (%d of %d)\n", len, want); - return -EAGAIN; + /* call again to read the remaining bytes */ + goto err_short; } - - dprintk("svc: TCP record, %d bytes\n", svc_sock_reclen(svsk)); + trace_svcsock_marker(&svsk->sk_xprt, svsk->sk_marker); if (svc_sock_reclen(svsk) + svsk->sk_datalen > - serv->sv_max_mesg) { - net_notice_ratelimited("RPC: fragment too large: %d\n", - svc_sock_reclen(svsk)); - goto err_delete; - } + svsk->sk_xprt.xpt_server->sv_max_mesg) + goto err_too_large; } - return svc_sock_reclen(svsk); -error: - dprintk("RPC: TCP recv_record got %d\n", len); - return len; -err_delete: + +err_too_large: + net_notice_ratelimited("svc: %s %s RPC fragment too large: %d\n", + __func__, svsk->sk_xprt.xpt_server->sv_name, + svc_sock_reclen(svsk)); set_bit(XPT_CLOSE, &svsk->sk_xprt.xpt_flags); +err_short: return -EAGAIN; } @@ -961,12 +959,13 @@ static int svc_tcp_recvfrom(struct svc_rqst *rqstp) test_bit(XPT_CONN, &svsk->sk_xprt.xpt_flags), test_bit(XPT_CLOSE, &svsk->sk_xprt.xpt_flags)); - len = svc_tcp_recv_record(svsk, rqstp); + clear_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags); + len = svc_tcp_read_marker(svsk, rqstp); if (len < 0) goto error; base = svc_tcp_restore_pages(svsk, rqstp); - want = svc_sock_reclen(svsk) - (svsk->sk_tcplen - sizeof(rpc_fraghdr)); + want = len - (svsk->sk_tcplen - sizeof(rpc_fraghdr)); vec = rqstp->rq_vec; -- GitLab From 1ae18f71cb522684bac1718f5c188fb5e30eb23d Mon Sep 17 00:00:00 2001 From: Jaegeuk Kim Date: Fri, 15 May 2020 17:20:50 -0700 Subject: [PATCH 0537/1971] f2fs: fix checkpoint=disable:%u%% When parsing the mount option, we don't have sbi->user_block_count. Should do it after getting it. Cc: Reviewed-by: Chao Yu Signed-off-by: Jaegeuk Kim --- fs/f2fs/f2fs.h | 1 + fs/f2fs/super.c | 25 +++++++++++++++++++------ 2 files changed, 20 insertions(+), 6 deletions(-) diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h index 51863e4f5d4e0..fb180020e175c 100644 --- a/fs/f2fs/f2fs.h +++ b/fs/f2fs/f2fs.h @@ -139,6 +139,7 @@ struct f2fs_mount_info { int fs_mode; /* fs mode: LFS or ADAPTIVE */ int bggc_mode; /* bggc mode: off, on or sync */ bool test_dummy_encryption; /* test dummy encryption */ + block_t unusable_cap_perc; /* percentage for cap */ block_t unusable_cap; /* Amount of space allowed to be * unusable when disabling checkpoint */ diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c index 441eaaf9739c4..a71da699cb2d5 100644 --- a/fs/f2fs/super.c +++ b/fs/f2fs/super.c @@ -284,6 +284,22 @@ static inline void limit_reserve_root(struct f2fs_sb_info *sbi) F2FS_OPTION(sbi).s_resgid)); } +static inline void adjust_unusable_cap_perc(struct f2fs_sb_info *sbi) +{ + if (!F2FS_OPTION(sbi).unusable_cap_perc) + return; + + if (F2FS_OPTION(sbi).unusable_cap_perc == 100) + F2FS_OPTION(sbi).unusable_cap = sbi->user_block_count; + else + F2FS_OPTION(sbi).unusable_cap = (sbi->user_block_count / 100) * + F2FS_OPTION(sbi).unusable_cap_perc; + + f2fs_info(sbi, "Adjust unusable cap for checkpoint=disable = %u / %u%%", + F2FS_OPTION(sbi).unusable_cap, + F2FS_OPTION(sbi).unusable_cap_perc); +} + static void init_once(void *foo) { struct f2fs_inode_info *fi = (struct f2fs_inode_info *) foo; @@ -785,12 +801,7 @@ static int parse_options(struct super_block *sb, char *options) return -EINVAL; if (arg < 0 || arg > 100) return -EINVAL; - if (arg == 100) - F2FS_OPTION(sbi).unusable_cap = - sbi->user_block_count; - else - F2FS_OPTION(sbi).unusable_cap = - (sbi->user_block_count / 100) * arg; + F2FS_OPTION(sbi).unusable_cap_perc = arg; set_opt(sbi, DISABLE_CHECKPOINT); break; case Opt_checkpoint_disable_cap: @@ -1840,6 +1851,7 @@ skip: (test_opt(sbi, POSIX_ACL) ? SB_POSIXACL : 0); limit_reserve_root(sbi); + adjust_unusable_cap_perc(sbi); *flags = (*flags & ~SB_LAZYTIME) | (sb->s_flags & SB_LAZYTIME); return 0; restore_gc: @@ -3516,6 +3528,7 @@ try_onemore: sbi->reserved_blocks = 0; sbi->current_reserved_blocks = 0; limit_reserve_root(sbi); + adjust_unusable_cap_perc(sbi); for (i = 0; i < NR_INODE_TYPE; i++) { INIT_LIST_HEAD(&sbi->inode_list[i]); -- GitLab From 9c30df7c5a304e383592587cb2a41bb2ab0ac80d Mon Sep 17 00:00:00 2001 From: Jaegeuk Kim Date: Wed, 13 May 2020 21:12:53 -0700 Subject: [PATCH 0538/1971] f2fs: flush dirty meta pages when flushing them Let's guarantee flusing dirty meta pages to avoid infinite loop. Signed-off-by: Jaegeuk Kim --- fs/f2fs/checkpoint.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/fs/f2fs/checkpoint.c b/fs/f2fs/checkpoint.c index 620a386d82c1a..3dc3ac6fe1432 100644 --- a/fs/f2fs/checkpoint.c +++ b/fs/f2fs/checkpoint.c @@ -1266,6 +1266,9 @@ void f2fs_wait_on_all_pages(struct f2fs_sb_info *sbi, int type) if (unlikely(f2fs_cp_error(sbi))) break; + if (type == F2FS_DIRTY_META) + f2fs_sync_meta_pages(sbi, META, LONG_MAX, + FS_CP_META_IO); io_schedule_timeout(DEFAULT_IO_TIMEOUT); } finish_wait(&sbi->cp_wait, &wait); -- GitLab From d0ac7079d5fcaaaf3eb99c197a08ac1b399754f4 Mon Sep 17 00:00:00 2001 From: Jeff LaBundy Date: Mon, 18 May 2020 11:32:18 -0700 Subject: [PATCH 0539/1971] Input: iqs269a - add missing I2C dependency If CONFIG_INPUT_IQS269A is selected yet CONFIG_I2C is not, the build fails. To solve this problem, add I2C as a dependency. Signed-off-by: Jeff LaBundy Reported-by: kbuild test robot Fixes: 04e49867fad1 ("Input: add support for Azoteq IQS269A") Link: https://lore.kernel.org/r/1589809466-22748-1-git-send-email-jeff@labundy.com Signed-off-by: Dmitry Torokhov --- drivers/input/misc/Kconfig | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/input/misc/Kconfig b/drivers/input/misc/Kconfig index 943cf69b5b733..362e8a01980cd 100644 --- a/drivers/input/misc/Kconfig +++ b/drivers/input/misc/Kconfig @@ -720,6 +720,7 @@ config INPUT_IMS_PCU config INPUT_IQS269A tristate "Azoteq IQS269A capacitive touch controller" + depends on I2C select REGMAP_I2C help Say Y to enable support for the Azoteq IQS269A capacitive -- GitLab From 333cff6c963fbc8b9820ca2b6a8b2e22a572cd43 Mon Sep 17 00:00:00 2001 From: Daniel Lezcano Date: Wed, 29 Apr 2020 12:36:39 +0200 Subject: [PATCH 0540/1971] powercap/drivers/idle_inject: Specify idle state max latency Currently the idle injection framework uses the play_idle() function which puts the current CPU in an idle state. The idle state is the deepest one, as specified by the latency constraint when calling the subsequent play_idle_precise() function with the INT_MAX. The idle_injection is used by the cpuidle_cooling device which computes the idle / run duration to mitigate the temperature by injecting idle cycles. The cooling device has no control on the depth of the idle state. Allow finer control of the idle injection mechanism by allowing to specify the latency for the idle state. Thus the cooling device has the ability to have a guarantee on the exit latency of the idle states it is injecting. Acked-by: Rafael J. Wysocki Signed-off-by: Daniel Lezcano Reviewed-by: Amit Kucheria Link: https://lore.kernel.org/r/20200429103644.5492-1-daniel.lezcano@linaro.org --- drivers/powercap/idle_inject.c | 16 +++++++++++++++- include/linux/idle_inject.h | 4 ++++ 2 files changed, 19 insertions(+), 1 deletion(-) diff --git a/drivers/powercap/idle_inject.c b/drivers/powercap/idle_inject.c index e9bbd3c42eeff..c90f0990968be 100644 --- a/drivers/powercap/idle_inject.c +++ b/drivers/powercap/idle_inject.c @@ -61,12 +61,14 @@ struct idle_inject_thread { * @timer: idle injection period timer * @idle_duration_us: duration of CPU idle time to inject * @run_duration_us: duration of CPU run time to allow + * @latency_us: max allowed latency * @cpumask: mask of CPUs affected by idle injection */ struct idle_inject_device { struct hrtimer timer; unsigned int idle_duration_us; unsigned int run_duration_us; + unsigned int latency_us; unsigned long cpumask[]; }; @@ -138,7 +140,8 @@ static void idle_inject_fn(unsigned int cpu) */ iit->should_run = 0; - play_idle(READ_ONCE(ii_dev->idle_duration_us)); + play_idle_precise(READ_ONCE(ii_dev->idle_duration_us) * NSEC_PER_USEC, + READ_ONCE(ii_dev->latency_us) * NSEC_PER_USEC); } /** @@ -169,6 +172,16 @@ void idle_inject_get_duration(struct idle_inject_device *ii_dev, *idle_duration_us = READ_ONCE(ii_dev->idle_duration_us); } +/** + * idle_inject_set_latency - set the maximum latency allowed + * @latency_us: set the latency requirement for the idle state + */ +void idle_inject_set_latency(struct idle_inject_device *ii_dev, + unsigned int latency_us) +{ + WRITE_ONCE(ii_dev->latency_us, latency_us); +} + /** * idle_inject_start - start idle injections * @ii_dev: idle injection control device structure @@ -297,6 +310,7 @@ struct idle_inject_device *idle_inject_register(struct cpumask *cpumask) cpumask_copy(to_cpumask(ii_dev->cpumask), cpumask); hrtimer_init(&ii_dev->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); ii_dev->timer.function = idle_inject_timer_fn; + ii_dev->latency_us = UINT_MAX; for_each_cpu(cpu, to_cpumask(ii_dev->cpumask)) { diff --git a/include/linux/idle_inject.h b/include/linux/idle_inject.h index a445cd1a36c5d..91a8612b8bf9e 100644 --- a/include/linux/idle_inject.h +++ b/include/linux/idle_inject.h @@ -26,4 +26,8 @@ void idle_inject_set_duration(struct idle_inject_device *ii_dev, void idle_inject_get_duration(struct idle_inject_device *ii_dev, unsigned int *run_duration_us, unsigned int *idle_duration_us); + +void idle_inject_set_latency(struct idle_inject_device *ii_dev, + unsigned int latency_ns); + #endif /* __IDLE_INJECT_H__ */ -- GitLab From 3b25846fbbca2ee3aaa67fe5abb750806d28a98e Mon Sep 17 00:00:00 2001 From: Daniel Lezcano Date: Wed, 29 Apr 2020 12:36:40 +0200 Subject: [PATCH 0541/1971] dt-bindings: thermal: Add the idle cooling device Some devices are not able to cool down by reducing their voltage / frequency because it could be not available or the system does not allow voltage scaling. In this configuration, it is not possible to use this strategy and the idle injection cooling device can be used instead. One idle cooling device is now present for the CPU as implemented by the combination of the idle injection framework belonging to the power capping framework and the thermal cooling device. The missing part is the DT binding providing a way to describe how the cooling device will work on the system. A first iteration was done by making the cooling device to point to the idle state. Unfortunately it does not make sense because it would need to duplicate the idle state description for each CPU in order to have a different phandle and make the thermal internal framework happy. It was proposed to add an cooling-cells to <3>, unfortunately the thermal framework is expecting a value of <2> as stated by the documentation and it is not possible from the cooling device generic code to loop this third value to the back end cooling device. Another proposal was to add a child 'thermal-idle' node as the SCMI does. This approach allows to have a self-contained configuration for the idle cooling device without colliding with the cpufreq cooling device which is based on the CPU node. In addition, it allows to have the cpufreq cooling device and the idle cooling device to co-exist together as shown in the example. Reviewed-by: Rob Herring Signed-off-by: Daniel Lezcano Reviewed-by: Amit Kucheria Link: https://lore.kernel.org/r/20200429103644.5492-2-daniel.lezcano@linaro.org --- .../bindings/thermal/thermal-idle.yaml | 145 ++++++++++++++++++ 1 file changed, 145 insertions(+) create mode 100644 Documentation/devicetree/bindings/thermal/thermal-idle.yaml diff --git a/Documentation/devicetree/bindings/thermal/thermal-idle.yaml b/Documentation/devicetree/bindings/thermal/thermal-idle.yaml new file mode 100644 index 0000000000000..7a922f5409347 --- /dev/null +++ b/Documentation/devicetree/bindings/thermal/thermal-idle.yaml @@ -0,0 +1,145 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +# Copyright 2020 Linaro Ltd. +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/thermal/thermal-idle.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Thermal idle cooling device binding + +maintainers: + - Daniel Lezcano + +description: | + The thermal idle cooling device allows the system to passively + mitigate the temperature on the device by injecting idle cycles, + forcing it to cool down. + + This binding describes the thermal idle node. + +properties: + $nodename: + const: thermal-idle + description: | + A thermal-idle node describes the idle cooling device properties to + cool down efficiently the attached thermal zone. + + '#cooling-cells': + const: 2 + description: | + Must be 2, in order to specify minimum and maximum cooling state used in + the cooling-maps reference. The first cell is the minimum cooling state + and the second cell is the maximum cooling state requested. + + duration-us: + description: | + The idle duration in microsecond the device should cool down. + + exit-latency-us: + description: | + The exit latency constraint in microsecond for the injected + idle state for the device. It is the latency constraint to + apply when selecting an idle state from among all the present + ones. + +required: + - '#cooling-cells' + +examples: + - | + #include + + // Example: Combining idle cooling device on big CPUs with cpufreq cooling device + cpus { + #address-cells = <2>; + #size-cells = <0>; + + /* ... */ + + cpu_b0: cpu@100 { + device_type = "cpu"; + compatible = "arm,cortex-a72"; + reg = <0x0 0x100>; + enable-method = "psci"; + capacity-dmips-mhz = <1024>; + dynamic-power-coefficient = <436>; + #cooling-cells = <2>; /* min followed by max */ + cpu-idle-states = <&CPU_SLEEP &CLUSTER_SLEEP>; + thermal-idle { + #cooling-cells = <2>; + duration-us = <10000>; + exit-latency-us = <500>; + }; + }; + + cpu_b1: cpu@101 { + device_type = "cpu"; + compatible = "arm,cortex-a72"; + reg = <0x0 0x101>; + enable-method = "psci"; + capacity-dmips-mhz = <1024>; + dynamic-power-coefficient = <436>; + #cooling-cells = <2>; /* min followed by max */ + cpu-idle-states = <&CPU_SLEEP &CLUSTER_SLEEP>; + thermal-idle { + #cooling-cells = <2>; + duration-us = <10000>; + exit-latency-us = <500>; + }; + }; + + /* ... */ + + }; + + /* ... */ + + thermal_zones { + cpu_thermal: cpu { + polling-delay-passive = <100>; + polling-delay = <1000>; + + /* ... */ + + trips { + cpu_alert0: cpu_alert0 { + temperature = <65000>; + hysteresis = <2000>; + type = "passive"; + }; + + cpu_alert1: cpu_alert1 { + temperature = <70000>; + hysteresis = <2000>; + type = "passive"; + }; + + cpu_alert2: cpu_alert2 { + temperature = <75000>; + hysteresis = <2000>; + type = "passive"; + }; + + cpu_crit: cpu_crit { + temperature = <95000>; + hysteresis = <2000>; + type = "critical"; + }; + }; + + cooling-maps { + map0 { + trip = <&cpu_alert1>; + cooling-device = <&{/cpus/cpu@100/thermal-idle} 0 15 >, + <&{/cpus/cpu@101/thermal-idle} 0 15>; + }; + + map1 { + trip = <&cpu_alert2>; + cooling-device = + <&cpu_b0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>, + <&cpu_b1 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>; + }; + }; + }; + }; -- GitLab From dfd0bda3703cdaf1fccd5da72cb7101a4fedfe68 Mon Sep 17 00:00:00 2001 From: Daniel Lezcano Date: Wed, 29 Apr 2020 12:36:41 +0200 Subject: [PATCH 0542/1971] thermal/drivers/cpuidle_cooling: Change the registration function Today, there is no user for the cpuidle cooling device. The targetted platform is ARM and ARM64. The cpuidle and the cpufreq cooling device are based on the device tree. As the cpuidle cooling device can have its own configuration depending on the platform and the available idle states. The DT node description will give the optional properties to set the cooling device up. Do no longer rely on the CPU node which is prone to error and will lead to a confusion in the DT because the cpufreq cooling device is also using it. Let initialize the cpuidle cooling device with the DT binding. This was tested on: - hikey960 - hikey6220 - rock960 - db845c Acked-by: Viresh Kumar Signed-off-by: Daniel Lezcano Reviewed-by: Lukasz Luba Reviewed-by: Amit Kucheria Tested-by: Amit Kucheria Link: https://lore.kernel.org/r/20200429103644.5492-3-daniel.lezcano@linaro.org --- drivers/thermal/cpuidle_cooling.c | 63 +++++++++++++++++++++++++------ include/linux/cpu_cooling.h | 12 +----- 2 files changed, 53 insertions(+), 22 deletions(-) diff --git a/drivers/thermal/cpuidle_cooling.c b/drivers/thermal/cpuidle_cooling.c index 0bb843246f592..78e3e8238116c 100644 --- a/drivers/thermal/cpuidle_cooling.c +++ b/drivers/thermal/cpuidle_cooling.c @@ -5,11 +5,14 @@ * Author: Daniel Lezcano * */ +#define pr_fmt(fmt) "cpuidle cooling: " fmt + #include #include #include #include #include +#include #include #include @@ -154,22 +157,25 @@ static struct thermal_cooling_device_ops cpuidle_cooling_ops = { }; /** - * cpuidle_of_cooling_register - Idle cooling device initialization function + * __cpuidle_cooling_register: register the cooling device * @drv: a cpuidle driver structure pointer - * @np: a node pointer to a device tree cooling device node + * @np: a device node structure pointer used for the thermal binding * - * This function is in charge of creating a cooling device per cpuidle - * driver and register it to thermal framework. + * This function is in charge of allocating the cpuidle cooling device + * structure, the idle injection, initialize them and register the + * cooling device to the thermal framework. * - * Return: zero on success, or negative value corresponding to the - * error detected in the underlying subsystems. + * Return: zero on success, a negative value returned by one of the + * underlying subsystem in case of error */ -int cpuidle_of_cooling_register(struct device_node *np, - struct cpuidle_driver *drv) +static int __cpuidle_cooling_register(struct device_node *np, + struct cpuidle_driver *drv) { struct idle_inject_device *ii_dev; struct cpuidle_cooling_device *idle_cdev; struct thermal_cooling_device *cdev; + unsigned int idle_duration_us = TICK_USEC; + unsigned int latency_us = UINT_MAX; char dev_name[THERMAL_NAME_LENGTH]; int id, ret; @@ -191,7 +197,11 @@ int cpuidle_of_cooling_register(struct device_node *np, goto out_id; } - idle_inject_set_duration(ii_dev, TICK_USEC, TICK_USEC); + of_property_read_u32(np, "duration-us", &idle_duration_us); + of_property_read_u32(np, "exit-latency-us", &latency_us); + + idle_inject_set_duration(ii_dev, TICK_USEC, idle_duration_us); + idle_inject_set_latency(ii_dev, latency_us); idle_cdev->ii_dev = ii_dev; @@ -204,6 +214,9 @@ int cpuidle_of_cooling_register(struct device_node *np, goto out_unregister; } + pr_debug("%s: Idle injection set with idle duration=%u, latency=%u\n", + dev_name, idle_duration_us, latency_us); + return 0; out_unregister: @@ -221,12 +234,38 @@ out: * @drv: a cpuidle driver structure pointer * * This function is in charge of creating a cooling device per cpuidle - * driver and register it to thermal framework. + * driver and register it to the thermal framework. * * Return: zero on success, or negative value corresponding to the * error detected in the underlying subsystems. */ -int cpuidle_cooling_register(struct cpuidle_driver *drv) +void cpuidle_cooling_register(struct cpuidle_driver *drv) { - return cpuidle_of_cooling_register(NULL, drv); + struct device_node *cooling_node; + struct device_node *cpu_node; + int cpu, ret; + + for_each_cpu(cpu, drv->cpumask) { + + cpu_node = of_cpu_device_node_get(cpu); + + cooling_node = of_get_child_by_name(cpu_node, "thermal-idle"); + + of_node_put(cpu_node); + + if (!cooling_node) { + pr_debug("'thermal-idle' node not found for cpu%d\n", cpu); + continue; + } + + ret = __cpuidle_cooling_register(cooling_node, drv); + + of_node_put(cooling_node); + + if (ret) { + pr_err("Failed to register the cpuidle cooling device" \ + "for cpu%d: %d\n", cpu, ret); + break; + } + } } diff --git a/include/linux/cpu_cooling.h b/include/linux/cpu_cooling.h index 65501d8f97788..a3bdc8a98f2c4 100644 --- a/include/linux/cpu_cooling.h +++ b/include/linux/cpu_cooling.h @@ -63,18 +63,10 @@ of_cpufreq_cooling_register(struct cpufreq_policy *policy) struct cpuidle_driver; #ifdef CONFIG_CPU_IDLE_THERMAL -int cpuidle_cooling_register(struct cpuidle_driver *drv); -int cpuidle_of_cooling_register(struct device_node *np, - struct cpuidle_driver *drv); +void cpuidle_cooling_register(struct cpuidle_driver *drv); #else /* CONFIG_CPU_IDLE_THERMAL */ -static inline int cpuidle_cooling_register(struct cpuidle_driver *drv) +static inline void cpuidle_cooling_register(struct cpuidle_driver *drv) { - return 0; -} -static inline int cpuidle_of_cooling_register(struct device_node *np, - struct cpuidle_driver *drv) -{ - return 0; } #endif /* CONFIG_CPU_IDLE_THERMAL */ -- GitLab From fc7a3d9e9cd01e2679076c655f2bc4b04efbfa01 Mon Sep 17 00:00:00 2001 From: Daniel Lezcano Date: Wed, 29 Apr 2020 12:36:42 +0200 Subject: [PATCH 0543/1971] thermal: cpuidle: Register cpuidle cooling device The cpuidle driver can be used as a cooling device by injecting idle cycles. When the property is set, register the cpuidle driver with the idle state node pointer as a cooling device. The thermal framework will do the association automatically with the thermal zone via the cooling-device defined in the device tree cooling-maps section. Signed-off-by: Daniel Lezcano Reviewed-by: Lukasz Luba Reviewed-by: Amit Kucheria Acked-by: Sudeep Holla Link: https://lore.kernel.org/r/20200429103644.5492-4-daniel.lezcano@linaro.org --- drivers/cpuidle/cpuidle-arm.c | 3 +++ drivers/cpuidle/cpuidle-psci.c | 3 +++ 2 files changed, 6 insertions(+) diff --git a/drivers/cpuidle/cpuidle-arm.c b/drivers/cpuidle/cpuidle-arm.c index 9e5156d396276..8c758920d6995 100644 --- a/drivers/cpuidle/cpuidle-arm.c +++ b/drivers/cpuidle/cpuidle-arm.c @@ -8,6 +8,7 @@ #define pr_fmt(fmt) "CPUidle arm: " fmt +#include #include #include #include @@ -124,6 +125,8 @@ static int __init arm_idle_init_cpu(int cpu) if (ret) goto out_kfree_drv; + cpuidle_cooling_register(drv); + return 0; out_kfree_drv: diff --git a/drivers/cpuidle/cpuidle-psci.c b/drivers/cpuidle/cpuidle-psci.c index bae9140a65a55..1f38e0dfc9b22 100644 --- a/drivers/cpuidle/cpuidle-psci.c +++ b/drivers/cpuidle/cpuidle-psci.c @@ -9,6 +9,7 @@ #define pr_fmt(fmt) "CPUidle PSCI: " fmt #include +#include #include #include #include @@ -313,6 +314,8 @@ static int __init psci_idle_init_cpu(int cpu) if (ret) goto out_kfree_drv; + cpuidle_cooling_register(drv); + return 0; out_kfree_drv: -- GitLab From cf576c58b3a283333fc6e9a7c1c8e5342fa59b97 Mon Sep 17 00:00:00 2001 From: Eryu Guan Date: Tue, 12 May 2020 10:29:04 +0800 Subject: [PATCH 0544/1971] fuse: invalidate inode attr in writeback cache mode Under writeback mode, inode->i_blocks is not updated, making utils du read st.blocks as 0. For example, when using virtiofs (cache=always & nondax mode) with writeback_cache enabled, writing a new file and check its disk usage with du, du reports 0 usage. # uname -r 5.6.0-rc6+ # mount -t virtiofs virtiofs /mnt/virtiofs # rm -f /mnt/virtiofs/testfile # create new file and do extend write # xfs_io -fc "pwrite 0 4k" /mnt/virtiofs/testfile wrote 4096/4096 bytes at offset 0 4 KiB, 1 ops; 0.0001 sec (28.103 MiB/sec and 7194.2446 ops/sec) # du -k /mnt/virtiofs/testfile 0 <==== disk usage is 0 # stat -c %s,%b /mnt/virtiofs/testfile 4096,0 <==== i_size is correct, but st_blocks is 0 Fix it by invalidating attr in fuse_flush(), so we get up-to-date attr from server on next getattr. Signed-off-by: Eryu Guan Signed-off-by: Miklos Szeredi --- fs/fuse/file.c | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/fs/fuse/file.c b/fs/fuse/file.c index d400b71b98d55..262c5e20f3242 100644 --- a/fs/fuse/file.c +++ b/fs/fuse/file.c @@ -445,8 +445,9 @@ static int fuse_flush(struct file *file, fl_owner_t id) if (is_bad_inode(inode)) return -EIO; + err = 0; if (fc->no_flush) - return 0; + goto inval_attr_out; err = write_inode_now(inode, 1); if (err) @@ -475,6 +476,14 @@ static int fuse_flush(struct file *file, fl_owner_t id) fc->no_flush = 1; err = 0; } + +inval_attr_out: + /* + * In memory i_blocks is not maintained by fuse, if writeback cache is + * enabled, i_blocks from cached attr may not be accurate. + */ + if (!err && fc->writeback_cache) + fuse_invalidate_attr(inode); return err; } -- GitLab From 614c026e8a46636198da93ec30719f93975bb26a Mon Sep 17 00:00:00 2001 From: Miklos Szeredi Date: Tue, 19 May 2020 14:50:37 +0200 Subject: [PATCH 0545/1971] fuse: always flush dirty data on close(2) We want cached data to synced with the userspace filesystem on close(), for example to allow getting correct st_blocks value. Do this regardless of whether the userspace filesystem implements a FLUSH method or not. Signed-off-by: Miklos Szeredi --- fs/fuse/file.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/fs/fuse/file.c b/fs/fuse/file.c index 262c5e20f3242..4aa750d08d622 100644 --- a/fs/fuse/file.c +++ b/fs/fuse/file.c @@ -445,10 +445,6 @@ static int fuse_flush(struct file *file, fl_owner_t id) if (is_bad_inode(inode)) return -EIO; - err = 0; - if (fc->no_flush) - goto inval_attr_out; - err = write_inode_now(inode, 1); if (err) return err; @@ -461,6 +457,10 @@ static int fuse_flush(struct file *file, fl_owner_t id) if (err) return err; + err = 0; + if (fc->no_flush) + goto inval_attr_out; + memset(&inarg, 0, sizeof(inarg)); inarg.fh = ff->fh; inarg.lock_owner = fuse_lock_owner_id(fc, id); -- GitLab From 5157da2ca42cbeca01709e29ce7b797cffed2432 Mon Sep 17 00:00:00 2001 From: Miklos Szeredi Date: Tue, 19 May 2020 14:50:37 +0200 Subject: [PATCH 0546/1971] fuse: always allow query of st_dev Fuse mounts without "allow_other" are off-limits to all non-owners. Yet it makes sense to allow querying st_dev on the root, since this value is provided by the kernel, not the userspace filesystem. Allow statx(2) with a zero request mask to succeed on a fuse mounts for all users. Reported-by: Nikolaus Rath Signed-off-by: Miklos Szeredi --- fs/fuse/dir.c | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c index de1e2fde60bd4..26f028bc760b2 100644 --- a/fs/fuse/dir.c +++ b/fs/fuse/dir.c @@ -1689,8 +1689,18 @@ static int fuse_getattr(const struct path *path, struct kstat *stat, struct inode *inode = d_inode(path->dentry); struct fuse_conn *fc = get_fuse_conn(inode); - if (!fuse_allow_current_process(fc)) + if (!fuse_allow_current_process(fc)) { + if (!request_mask) { + /* + * If user explicitly requested *nothing* then don't + * error out, but return st_dev only. + */ + stat->result_mask = 0; + stat->dev = inode->i_sb->s_dev; + return 0; + } return -EACCES; + } return fuse_update_get_attr(inode, NULL, stat, request_mask, flags); } -- GitLab From 7fd3abfa8dd7c08ecacd25b2f9f9e1d3fb642440 Mon Sep 17 00:00:00 2001 From: Vivek Goyal Date: Mon, 4 May 2020 14:33:15 -0400 Subject: [PATCH 0547/1971] virtiofs: do not use fuse_fill_super_common() for device installation fuse_fill_super_common() allocates and installs one fuse_device. Hence virtiofs allocates and install all fuse devices by itself except one. This makes logic little twisted. There does not seem to be any real need that why virtiofs can't allocate and install all fuse devices itself. So opt out of fuse device allocation and installation while calling fuse_fill_super_common(). Regular fuse still wants fuse_fill_super_common() to install fuse_device. It needs to prevent against races where two mounters are trying to mount fuse using same fd. In that case one will succeed while other will get -EINVAL. virtiofs does not have this issue because sget_fc() resolves the race w.r.t multiple mounters and only one instance of virtio_fs_fill_super() should be in progress for same filesystem. Signed-off-by: Vivek Goyal Signed-off-by: Miklos Szeredi --- fs/fuse/inode.c | 19 ++++++++++++------- fs/fuse/virtio_fs.c | 9 +++------ 2 files changed, 15 insertions(+), 13 deletions(-) diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c index 95d712d44ca13..6fae66cc096a7 100644 --- a/fs/fuse/inode.c +++ b/fs/fuse/inode.c @@ -1113,7 +1113,7 @@ EXPORT_SYMBOL_GPL(fuse_dev_free); int fuse_fill_super_common(struct super_block *sb, struct fuse_fs_context *ctx) { - struct fuse_dev *fud; + struct fuse_dev *fud = NULL; struct fuse_conn *fc = get_fuse_conn_super(sb); struct inode *root; struct dentry *root_dentry; @@ -1155,9 +1155,12 @@ int fuse_fill_super_common(struct super_block *sb, struct fuse_fs_context *ctx) if (sb->s_user_ns != &init_user_ns) sb->s_xattr = fuse_no_acl_xattr_handlers; - fud = fuse_dev_alloc_install(fc); - if (!fud) - goto err; + if (ctx->fudptr) { + err = -ENOMEM; + fud = fuse_dev_alloc_install(fc); + if (!fud) + goto err; + } fc->dev = sb->s_dev; fc->sb = sb; @@ -1191,7 +1194,7 @@ int fuse_fill_super_common(struct super_block *sb, struct fuse_fs_context *ctx) mutex_lock(&fuse_mutex); err = -EINVAL; - if (*ctx->fudptr) + if (ctx->fudptr && *ctx->fudptr) goto err_unlock; err = fuse_ctl_add_conn(fc); @@ -1200,7 +1203,8 @@ int fuse_fill_super_common(struct super_block *sb, struct fuse_fs_context *ctx) list_add_tail(&fc->entry, &fuse_conn_list); sb->s_root = root_dentry; - *ctx->fudptr = fud; + if (ctx->fudptr) + *ctx->fudptr = fud; mutex_unlock(&fuse_mutex); return 0; @@ -1208,7 +1212,8 @@ int fuse_fill_super_common(struct super_block *sb, struct fuse_fs_context *ctx) mutex_unlock(&fuse_mutex); dput(root_dentry); err_dev_free: - fuse_dev_free(fud); + if (fud) + fuse_dev_free(fud); err: return err; } diff --git a/fs/fuse/virtio_fs.c b/fs/fuse/virtio_fs.c index 0c6ef5d3c6ab8..4c4ef5d692981 100644 --- a/fs/fuse/virtio_fs.c +++ b/fs/fuse/virtio_fs.c @@ -1103,7 +1103,7 @@ static int virtio_fs_fill_super(struct super_block *sb) err = -ENOMEM; /* Allocate fuse_dev for hiprio and notification queues */ - for (i = 0; i < VQ_REQUEST; i++) { + for (i = 0; i < fs->nvqs; i++) { struct virtio_fs_vq *fsvq = &fs->vqs[i]; fsvq->fud = fuse_dev_alloc(); @@ -1111,18 +1111,15 @@ static int virtio_fs_fill_super(struct super_block *sb) goto err_free_fuse_devs; } - ctx.fudptr = (void **)&fs->vqs[VQ_REQUEST].fud; + /* virtiofs allocates and installs its own fuse devices */ + ctx.fudptr = NULL; err = fuse_fill_super_common(sb, &ctx); if (err < 0) goto err_free_fuse_devs; - fc = fs->vqs[VQ_REQUEST].fud->fc; - for (i = 0; i < fs->nvqs; i++) { struct virtio_fs_vq *fsvq = &fs->vqs[i]; - if (i == VQ_REQUEST) - continue; /* already initialized */ fuse_dev_install(fsvq->fud, fc); } -- GitLab From 00589386172ac577e64e3d67a3c5d4968174dcad Mon Sep 17 00:00:00 2001 From: Miklos Szeredi Date: Tue, 19 May 2020 14:50:37 +0200 Subject: [PATCH 0548/1971] fuse: use dump_page Instead of custom page dumping, use the standard helper. Reported-by: Matthew Wilcox Signed-off-by: Miklos Szeredi --- fs/fuse/dev.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c index 6bda7d498f1ae..1cfc68f8fea9c 100644 --- a/fs/fuse/dev.c +++ b/fs/fuse/dev.c @@ -772,8 +772,7 @@ static int fuse_check_page(struct page *page) 1 << PG_lru | 1 << PG_active | 1 << PG_reclaim))) { - pr_warn("trying to steal weird page\n"); - pr_warn(" page=%p index=%li flags=%08lx, count=%i, mapcount=%i, mapping=%p\n", page, page->index, page->flags, page_count(page), page_mapcount(page), page->mapping); + dump_page(page, "fuse: trying to steal weird page"); return 1; } return 0; -- GitLab From a5005c3cda6eeb6b95645e6cc32f58dafeffc976 Mon Sep 17 00:00:00 2001 From: Miklos Szeredi Date: Tue, 19 May 2020 14:50:37 +0200 Subject: [PATCH 0549/1971] fuse: fix weird page warning When PageWaiters was added, updating this check was missed. Reported-by: Nikolaus Rath Reported-by: Hugh Dickins Fixes: 62906027091f ("mm: add PageWaiters indicating tasks are waiting for a page bit") Signed-off-by: Miklos Szeredi --- fs/fuse/dev.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c index 1cfc68f8fea9c..b8962581c6992 100644 --- a/fs/fuse/dev.c +++ b/fs/fuse/dev.c @@ -771,7 +771,8 @@ static int fuse_check_page(struct page *page) 1 << PG_uptodate | 1 << PG_lru | 1 << PG_active | - 1 << PG_reclaim))) { + 1 << PG_reclaim | + 1 << PG_waiters))) { dump_page(page, "fuse: trying to steal weird page"); return 1; } -- GitLab From 32f98877c57bee6bc27f443a96f49678a2cd6a50 Mon Sep 17 00:00:00 2001 From: Miklos Szeredi Date: Tue, 19 May 2020 14:50:37 +0200 Subject: [PATCH 0550/1971] fuse: don't check refcount after stealing page page_count() is unstable. Unless there has been an RCU grace period between when the page was removed from the page cache and now, a speculative reference may exist from the page cache. Reported-by: Matthew Wilcox Signed-off-by: Miklos Szeredi --- fs/fuse/dev.c | 1 - 1 file changed, 1 deletion(-) diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c index b8962581c6992..ec97cabe51ce8 100644 --- a/fs/fuse/dev.c +++ b/fs/fuse/dev.c @@ -764,7 +764,6 @@ static int fuse_check_page(struct page *page) { if (page_mapcount(page) || page->mapping != NULL || - page_count(page) != 1 || (page->flags & PAGE_FLAGS_CHECK_AT_PREP & ~(1 << PG_locked | 1 << PG_referenced | -- GitLab From 5ddd9ced9aef6cfa76af27d384c17c9e2d610ce8 Mon Sep 17 00:00:00 2001 From: Miklos Szeredi Date: Tue, 19 May 2020 14:50:38 +0200 Subject: [PATCH 0551/1971] fuse: update attr_version counter on fuse_notify_inval_inode() A GETATTR request can race with FUSE_NOTIFY_INVAL_INODE, resulting in the attribute cache being updated with stale information after the invalidation. Fix this by bumping the attribute version in fuse_reverse_inval_inode(). Reported-by: Krzysztof Rusek Signed-off-by: Miklos Szeredi --- fs/fuse/inode.c | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c index 6fae66cc096a7..5b4aebf5821fe 100644 --- a/fs/fuse/inode.c +++ b/fs/fuse/inode.c @@ -321,6 +321,8 @@ struct inode *fuse_iget(struct super_block *sb, u64 nodeid, int fuse_reverse_inval_inode(struct super_block *sb, u64 nodeid, loff_t offset, loff_t len) { + struct fuse_conn *fc = get_fuse_conn_super(sb); + struct fuse_inode *fi; struct inode *inode; pgoff_t pg_start; pgoff_t pg_end; @@ -329,6 +331,11 @@ int fuse_reverse_inval_inode(struct super_block *sb, u64 nodeid, if (!inode) return -ENOENT; + fi = get_fuse_inode(inode); + spin_lock(&fi->lock); + fi->attr_version = atomic64_inc_return(&fc->attr_version); + spin_unlock(&fi->lock); + fuse_invalidate_attr(inode); forget_all_cached_acls(inode); if (offset >= 0) { -- GitLab From 6b2fb79963fbed7db3ef850926d913518fd5c62f Mon Sep 17 00:00:00 2001 From: Maxim Patlasov Date: Thu, 19 Sep 2019 17:11:20 +0300 Subject: [PATCH 0552/1971] fuse: optimize writepages search Re-work fi->writepages, replacing list with rb-tree. This improves performance because kernel fuse iterates through fi->writepages for each writeback page and typical number of entries is about 800 (for 100MB of fuse writeback). Before patch: 10240+0 records in 10240+0 records out 10737418240 bytes (11 GB) copied, 41.3473 s, 260 MB/s 2 1 0 57445400 40416 6323676 0 0 33 374743 8633 19210 1 8 88 3 0 29.86% [kernel] [k] _raw_spin_lock 26.62% [fuse] [k] fuse_page_is_writeback After patch: 10240+0 records in 10240+0 records out 10737418240 bytes (11 GB) copied, 21.4954 s, 500 MB/s 2 9 0 53676040 31744 10265984 0 0 64 854790 10956 48387 1 6 88 6 0 23.55% [kernel] [k] copy_user_enhanced_fast_string 9.87% [kernel] [k] __memcpy 3.10% [kernel] [k] _raw_spin_lock Signed-off-by: Maxim Patlasov Signed-off-by: Vasily Averin Signed-off-by: Miklos Szeredi --- fs/fuse/file.c | 62 ++++++++++++++++++++++++++++++++++++++---------- fs/fuse/fuse_i.h | 2 +- 2 files changed, 50 insertions(+), 14 deletions(-) diff --git a/fs/fuse/file.c b/fs/fuse/file.c index 4aa750d08d622..15812a8b38343 100644 --- a/fs/fuse/file.c +++ b/fs/fuse/file.c @@ -357,7 +357,7 @@ u64 fuse_lock_owner_id(struct fuse_conn *fc, fl_owner_t id) struct fuse_writepage_args { struct fuse_io_args ia; - struct list_head writepages_entry; + struct rb_node writepages_entry; struct list_head queue_entry; struct fuse_writepage_args *next; struct inode *inode; @@ -366,17 +366,23 @@ struct fuse_writepage_args { static struct fuse_writepage_args *fuse_find_writeback(struct fuse_inode *fi, pgoff_t idx_from, pgoff_t idx_to) { - struct fuse_writepage_args *wpa; + struct rb_node *n; + + n = fi->writepages.rb_node; - list_for_each_entry(wpa, &fi->writepages, writepages_entry) { + while (n) { + struct fuse_writepage_args *wpa; pgoff_t curr_index; + wpa = rb_entry(n, struct fuse_writepage_args, writepages_entry); WARN_ON(get_fuse_inode(wpa->inode) != fi); curr_index = wpa->ia.write.in.offset >> PAGE_SHIFT; - if (idx_from < curr_index + wpa->ia.ap.num_pages && - curr_index <= idx_to) { + if (idx_from >= curr_index + wpa->ia.ap.num_pages) + n = n->rb_right; + else if (idx_to < curr_index) + n = n->rb_left; + else return wpa; - } } return NULL; } @@ -1624,7 +1630,7 @@ static void fuse_writepage_finish(struct fuse_conn *fc, struct backing_dev_info *bdi = inode_to_bdi(inode); int i; - list_del(&wpa->writepages_entry); + rb_erase(&wpa->writepages_entry, &fi->writepages); for (i = 0; i < ap->num_pages; i++) { dec_wb_stat(&bdi->wb, WB_WRITEBACK); dec_node_page_state(ap->pages[i], NR_WRITEBACK_TEMP); @@ -1712,6 +1718,36 @@ __acquires(fi->lock) } } +static void tree_insert(struct rb_root *root, struct fuse_writepage_args *wpa) +{ + pgoff_t idx_from = wpa->ia.write.in.offset >> PAGE_SHIFT; + pgoff_t idx_to = idx_from + wpa->ia.ap.num_pages - 1; + struct rb_node **p = &root->rb_node; + struct rb_node *parent = NULL; + + WARN_ON(!wpa->ia.ap.num_pages); + while (*p) { + struct fuse_writepage_args *curr; + pgoff_t curr_index; + + parent = *p; + curr = rb_entry(parent, struct fuse_writepage_args, + writepages_entry); + WARN_ON(curr->inode != wpa->inode); + curr_index = curr->ia.write.in.offset >> PAGE_SHIFT; + + if (idx_from >= curr_index + curr->ia.ap.num_pages) + p = &(*p)->rb_right; + else if (idx_to < curr_index) + p = &(*p)->rb_left; + else + return (void) WARN_ON(true); + } + + rb_link_node(&wpa->writepages_entry, parent, p); + rb_insert_color(&wpa->writepages_entry, root); +} + static void fuse_writepage_end(struct fuse_conn *fc, struct fuse_args *args, int error) { @@ -1730,7 +1766,7 @@ static void fuse_writepage_end(struct fuse_conn *fc, struct fuse_args *args, wpa->next = next->next; next->next = NULL; next->ia.ff = fuse_file_get(wpa->ia.ff); - list_add(&next->writepages_entry, &fi->writepages); + tree_insert(&fi->writepages, next); /* * Skip fuse_flush_writepages() to make it easy to crop requests @@ -1865,7 +1901,7 @@ static int fuse_writepage_locked(struct page *page) inc_node_page_state(tmp_page, NR_WRITEBACK_TEMP); spin_lock(&fi->lock); - list_add(&wpa->writepages_entry, &fi->writepages); + tree_insert(&fi->writepages, wpa); list_add_tail(&wpa->queue_entry, &fi->queued_writes); fuse_flush_writepages(inode); spin_unlock(&fi->lock); @@ -1977,10 +2013,10 @@ static bool fuse_writepage_in_flight(struct fuse_writepage_args *new_wpa, WARN_ON(new_ap->num_pages != 0); spin_lock(&fi->lock); - list_del(&new_wpa->writepages_entry); + rb_erase(&new_wpa->writepages_entry, &fi->writepages); old_wpa = fuse_find_writeback(fi, page->index, page->index); if (!old_wpa) { - list_add(&new_wpa->writepages_entry, &fi->writepages); + tree_insert(&fi->writepages, new_wpa); spin_unlock(&fi->lock); return false; } @@ -2095,7 +2131,7 @@ static int fuse_writepages_fill(struct page *page, wpa->inode = inode; spin_lock(&fi->lock); - list_add(&wpa->writepages_entry, &fi->writepages); + tree_insert(&fi->writepages, wpa); spin_unlock(&fi->lock); data->wpa = wpa; @@ -3405,5 +3441,5 @@ void fuse_init_file_inode(struct inode *inode) INIT_LIST_HEAD(&fi->queued_writes); fi->writectr = 0; init_waitqueue_head(&fi->page_waitq); - INIT_LIST_HEAD(&fi->writepages); + fi->writepages = RB_ROOT; } diff --git a/fs/fuse/fuse_i.h b/fs/fuse/fuse_i.h index d7cde216fc871..740a8a7d7ae6f 100644 --- a/fs/fuse/fuse_i.h +++ b/fs/fuse/fuse_i.h @@ -111,7 +111,7 @@ struct fuse_inode { wait_queue_head_t page_waitq; /* List of writepage requestst (pending or sent) */ - struct list_head writepages; + struct rb_root writepages; }; /* readdir cache (directory only) */ -- GitLab From 0858caa419e6cf9d31e734d09d70b34f64443ef6 Mon Sep 17 00:00:00 2001 From: David Howells Date: Wed, 12 Feb 2020 13:58:35 +0000 Subject: [PATCH 0553/1971] uapi: General notification queue definitions Add UAPI definitions for the general notification queue, including the following pieces: (*) struct watch_notification. This is the metadata header for notification messages. It includes a type and subtype that indicate the source of the message (eg. WATCH_TYPE_MOUNT_NOTIFY) and the kind of the message (eg. NOTIFY_MOUNT_NEW_MOUNT). The header also contains an information field that conveys the following information: - WATCH_INFO_LENGTH. The size of the entry (entries are variable length). - WATCH_INFO_ID. The watch ID specified when the watchpoint was set. - WATCH_INFO_TYPE_INFO. (Sub)type-specific information. - WATCH_INFO_FLAG_*. Flag bits overlain on the type-specific information. For use by the type. All the information in the header can be used in filtering messages at the point of writing into the buffer. (*) struct watch_notification_removal This is an extended watch-removal notification record that includes an 'id' field that can indicate the identifier of the object being removed if available (for instance, a keyring serial number). Signed-off-by: David Howells --- include/uapi/linux/watch_queue.h | 55 ++++++++++++++++++++++++++++++++ 1 file changed, 55 insertions(+) create mode 100644 include/uapi/linux/watch_queue.h diff --git a/include/uapi/linux/watch_queue.h b/include/uapi/linux/watch_queue.h new file mode 100644 index 0000000000000..5f3d21e8a34b0 --- /dev/null +++ b/include/uapi/linux/watch_queue.h @@ -0,0 +1,55 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +#ifndef _UAPI_LINUX_WATCH_QUEUE_H +#define _UAPI_LINUX_WATCH_QUEUE_H + +#include + +enum watch_notification_type { + WATCH_TYPE_META = 0, /* Special record */ + WATCH_TYPE__NR = 1 +}; + +enum watch_meta_notification_subtype { + WATCH_META_REMOVAL_NOTIFICATION = 0, /* Watched object was removed */ + WATCH_META_LOSS_NOTIFICATION = 1, /* Data loss occurred */ +}; + +/* + * Notification record header. This is aligned to 64-bits so that subclasses + * can contain __u64 fields. + */ +struct watch_notification { + __u32 type:24; /* enum watch_notification_type */ + __u32 subtype:8; /* Type-specific subtype (filterable) */ + __u32 info; +#define WATCH_INFO_LENGTH 0x0000007f /* Length of record */ +#define WATCH_INFO_LENGTH__SHIFT 0 +#define WATCH_INFO_ID 0x0000ff00 /* ID of watchpoint */ +#define WATCH_INFO_ID__SHIFT 8 +#define WATCH_INFO_TYPE_INFO 0xffff0000 /* Type-specific info */ +#define WATCH_INFO_TYPE_INFO__SHIFT 16 +#define WATCH_INFO_FLAG_0 0x00010000 /* Type-specific info, flag bit 0 */ +#define WATCH_INFO_FLAG_1 0x00020000 /* ... */ +#define WATCH_INFO_FLAG_2 0x00040000 +#define WATCH_INFO_FLAG_3 0x00080000 +#define WATCH_INFO_FLAG_4 0x00100000 +#define WATCH_INFO_FLAG_5 0x00200000 +#define WATCH_INFO_FLAG_6 0x00400000 +#define WATCH_INFO_FLAG_7 0x00800000 +}; + + +/* + * Extended watch removal notification. This is used optionally if the type + * wants to indicate an identifier for the object being watched, if there is + * such. This can be distinguished by the length. + * + * type -> WATCH_TYPE_META + * subtype -> WATCH_META_REMOVAL_NOTIFICATION + */ +struct watch_notification_removal { + struct watch_notification watch; + __u64 id; /* Type-dependent identifier */ +}; + +#endif /* _UAPI_LINUX_WATCH_QUEUE_H */ -- GitLab From 344fa64ef8f6740e99b32ab788b6e3742d7284b3 Mon Sep 17 00:00:00 2001 From: David Howells Date: Wed, 12 Feb 2020 13:58:35 +0000 Subject: [PATCH 0554/1971] security: Add a hook for the point of notification insertion Add a security hook that allows an LSM to rule on whether a notification message is allowed to be inserted into a particular watch queue. The hook is given the following information: (1) The credentials of the triggerer (which may be init_cred for a system notification, eg. a hardware error). (2) The credentials of the whoever set the watch. (3) The notification message. Signed-off-by: David Howells Acked-by: James Morris cc: Casey Schaufler cc: Stephen Smalley cc: linux-security-module@vger.kernel.org --- include/linux/lsm_hook_defs.h | 5 +++++ include/linux/lsm_hooks.h | 9 +++++++++ include/linux/security.h | 15 +++++++++++++++ security/security.c | 9 +++++++++ 4 files changed, 38 insertions(+) diff --git a/include/linux/lsm_hook_defs.h b/include/linux/lsm_hook_defs.h index 5616b2567aa7f..e0def45b5cc56 100644 --- a/include/linux/lsm_hook_defs.h +++ b/include/linux/lsm_hook_defs.h @@ -253,6 +253,11 @@ LSM_HOOK(int, 0, inode_setsecctx, struct dentry *dentry, void *ctx, u32 ctxlen) LSM_HOOK(int, 0, inode_getsecctx, struct inode *inode, void **ctx, u32 *ctxlen) +#if defined(CONFIG_SECURITY) && defined(CONFIG_WATCH_QUEUE) +LSM_HOOK(int, 0, post_notification, const struct cred *w_cred, + const struct cred *cred, struct watch_notification *n) +#endif /* CONFIG_SECURITY && CONFIG_KEY_NOTIFICATIONS */ + #ifdef CONFIG_SECURITY_NETWORK LSM_HOOK(int, 0, unix_stream_connect, struct sock *sock, struct sock *other, struct sock *newsk) diff --git a/include/linux/lsm_hooks.h b/include/linux/lsm_hooks.h index 988ca0df7824c..0b5e5769b8367 100644 --- a/include/linux/lsm_hooks.h +++ b/include/linux/lsm_hooks.h @@ -1437,6 +1437,15 @@ * @ctx is a pointer in which to place the allocated security context. * @ctxlen points to the place to put the length of @ctx. * + * Security hooks for the general notification queue: + * + * @post_notification: + * Check to see if a watch notification can be posted to a particular + * queue. + * @w_cred: The credentials of the whoever set the watch. + * @cred: The event-triggerer's credentials + * @n: The notification being posted + * * Security hooks for using the eBPF maps and programs functionalities through * eBPF syscalls. * diff --git a/include/linux/security.h b/include/linux/security.h index a8d9310472dfa..9a5d12ab491bc 100644 --- a/include/linux/security.h +++ b/include/linux/security.h @@ -56,6 +56,8 @@ struct mm_struct; struct fs_context; struct fs_parameter; enum fs_value_type; +struct watch; +struct watch_notification; /* Default (no) options for the capable function */ #define CAP_OPT_NONE 0x0 @@ -1275,6 +1277,19 @@ static inline int security_locked_down(enum lockdown_reason what) } #endif /* CONFIG_SECURITY */ +#if defined(CONFIG_SECURITY) && defined(CONFIG_WATCH_QUEUE) +int security_post_notification(const struct cred *w_cred, + const struct cred *cred, + struct watch_notification *n); +#else +static inline int security_post_notification(const struct cred *w_cred, + const struct cred *cred, + struct watch_notification *n) +{ + return 0; +} +#endif + #ifdef CONFIG_SECURITY_NETWORK int security_unix_stream_connect(struct sock *sock, struct sock *other, struct sock *newsk); diff --git a/security/security.c b/security/security.c index 7fed24b9d57e6..7d55607120b48 100644 --- a/security/security.c +++ b/security/security.c @@ -2007,6 +2007,15 @@ int security_inode_getsecctx(struct inode *inode, void **ctx, u32 *ctxlen) } EXPORT_SYMBOL(security_inode_getsecctx); +#ifdef CONFIG_WATCH_QUEUE +int security_post_notification(const struct cred *w_cred, + const struct cred *cred, + struct watch_notification *n) +{ + return call_int_hook(post_notification, 0, w_cred, cred, n); +} +#endif /* CONFIG_WATCH_QUEUE */ + #ifdef CONFIG_SECURITY_NETWORK int security_unix_stream_connect(struct sock *sock, struct sock *other, struct sock *newsk) -- GitLab From b580b93664f91db8cb503429030df0f1c1e53528 Mon Sep 17 00:00:00 2001 From: David Howells Date: Wed, 12 Feb 2020 13:58:35 +0000 Subject: [PATCH 0555/1971] pipe: Add O_NOTIFICATION_PIPE Add an O_NOTIFICATION_PIPE flag that can be passed to pipe2() to indicate that the pipe being created is going to be used for notifications. This suppresses the use of splice(), vmsplice(), tee() and sendfile() on the pipe as calling iov_iter_revert() on a pipe when a kernel notification message has been inserted into the middle of a multi-buffer splice will be messy. The flag is given the same value as O_EXCL as it seems unlikely that this flag will ever be applicable to pipes and I don't want to use up another O_* bit unnecessarily. An alternative could be to add a pipe3() system call. Signed-off-by: David Howells --- include/uapi/linux/watch_queue.h | 3 +++ 1 file changed, 3 insertions(+) diff --git a/include/uapi/linux/watch_queue.h b/include/uapi/linux/watch_queue.h index 5f3d21e8a34b0..9df72227f5154 100644 --- a/include/uapi/linux/watch_queue.h +++ b/include/uapi/linux/watch_queue.h @@ -3,6 +3,9 @@ #define _UAPI_LINUX_WATCH_QUEUE_H #include +#include + +#define O_NOTIFICATION_PIPE O_EXCL /* Parameter to pipe2() selecting notification pipe */ enum watch_notification_type { WATCH_TYPE_META = 0, /* Special record */ -- GitLab From c73be61cede5882f9605a852414db559c0ebedfd Mon Sep 17 00:00:00 2001 From: David Howells Date: Tue, 14 Jan 2020 17:07:11 +0000 Subject: [PATCH 0556/1971] pipe: Add general notification queue support Make it possible to have a general notification queue built on top of a standard pipe. Notifications are 'spliced' into the pipe and then read out. splice(), vmsplice() and sendfile() are forbidden on pipes used for notifications as post_one_notification() cannot take pipe->mutex. This means that notifications could be posted in between individual pipe buffers, making iov_iter_revert() difficult to effect. The way the notification queue is used is: (1) An application opens a pipe with a special flag and indicates the number of messages it wishes to be able to queue at once (this can only be set once): pipe2(fds, O_NOTIFICATION_PIPE); ioctl(fds[0], IOC_WATCH_QUEUE_SET_SIZE, queue_depth); (2) The application then uses poll() and read() as normal to extract data from the pipe. read() will return multiple notifications if the buffer is big enough, but it will not split a notification across buffers - rather it will return a short read or EMSGSIZE. Notification messages include a length in the header so that the caller can split them up. Each message has a header that describes it: struct watch_notification { __u32 type:24; __u32 subtype:8; __u32 info; }; The type indicates the source (eg. mount tree changes, superblock events, keyring changes, block layer events) and the subtype indicates the event type (eg. mount, unmount; EIO, EDQUOT; link, unlink). The info field indicates a number of things, including the entry length, an ID assigned to a watchpoint contributing to this buffer and type-specific flags. Supplementary data, such as the key ID that generated an event, can be attached in additional slots. The maximum message size is 127 bytes. Messages may not be padded or aligned, so there is no guarantee, for example, that the notification type will be on a 4-byte bounary. Signed-off-by: David Howells --- .../userspace-api/ioctl/ioctl-number.rst | 1 + Documentation/watch_queue.rst | 339 +++++++++ fs/pipe.c | 206 ++++-- fs/splice.c | 12 +- include/linux/pipe_fs_i.h | 19 +- include/linux/watch_queue.h | 127 ++++ include/uapi/linux/watch_queue.h | 20 + init/Kconfig | 12 + kernel/Makefile | 1 + kernel/watch_queue.c | 657 ++++++++++++++++++ 10 files changed, 1318 insertions(+), 76 deletions(-) create mode 100644 Documentation/watch_queue.rst create mode 100644 include/linux/watch_queue.h create mode 100644 kernel/watch_queue.c diff --git a/Documentation/userspace-api/ioctl/ioctl-number.rst b/Documentation/userspace-api/ioctl/ioctl-number.rst index f759edafd938a..9425377615ce4 100644 --- a/Documentation/userspace-api/ioctl/ioctl-number.rst +++ b/Documentation/userspace-api/ioctl/ioctl-number.rst @@ -201,6 +201,7 @@ Code Seq# Include File Comments 'W' 00-1F linux/wanrouter.h conflict! (pre 3.9) 'W' 00-3F sound/asound.h conflict! 'W' 40-5F drivers/pci/switch/switchtec.c +'W' 60-61 linux/watch_queue.h 'X' all fs/xfs/xfs_fs.h, conflict! fs/xfs/linux-2.6/xfs_ioctl32.h, include/linux/falloc.h, diff --git a/Documentation/watch_queue.rst b/Documentation/watch_queue.rst new file mode 100644 index 0000000000000..849fad6893efa --- /dev/null +++ b/Documentation/watch_queue.rst @@ -0,0 +1,339 @@ +============================== +General notification mechanism +============================== + +The general notification mechanism is built on top of the standard pipe driver +whereby it effectively splices notification messages from the kernel into pipes +opened by userspace. This can be used in conjunction with:: + + * Key/keyring notifications + + +The notifications buffers can be enabled by: + + "General setup"/"General notification queue" + (CONFIG_WATCH_QUEUE) + +This document has the following sections: + +.. contents:: :local: + + +Overview +======== + +This facility appears as a pipe that is opened in a special mode. The pipe's +internal ring buffer is used to hold messages that are generated by the kernel. +These messages are then read out by read(). Splice and similar are disabled on +such pipes due to them wanting to, under some circumstances, revert their +additions to the ring - which might end up interleaved with notification +messages. + +The owner of the pipe has to tell the kernel which sources it would like to +watch through that pipe. Only sources that have been connected to a pipe will +insert messages into it. Note that a source may be bound to multiple pipes and +insert messages into all of them simultaneously. + +Filters may also be emplaced on a pipe so that certain source types and +subevents can be ignored if they're not of interest. + +A message will be discarded if there isn't a slot available in the ring or if +no preallocated message buffer is available. In both of these cases, read() +will insert a WATCH_META_LOSS_NOTIFICATION message into the output buffer after +the last message currently in the buffer has been read. + +Note that when producing a notification, the kernel does not wait for the +consumers to collect it, but rather just continues on. This means that +notifications can be generated whilst spinlocks are held and also protects the +kernel from being held up indefinitely by a userspace malfunction. + + +Message Structure +================= + +Notification messages begin with a short header:: + + struct watch_notification { + __u32 type:24; + __u32 subtype:8; + __u32 info; + }; + +"type" indicates the source of the notification record and "subtype" indicates +the type of record from that source (see the Watch Sources section below). The +type may also be "WATCH_TYPE_META". This is a special record type generated +internally by the watch queue itself. There are two subtypes: + + * WATCH_META_REMOVAL_NOTIFICATION + * WATCH_META_LOSS_NOTIFICATION + +The first indicates that an object on which a watch was installed was removed +or destroyed and the second indicates that some messages have been lost. + +"info" indicates a bunch of things, including: + + * The length of the message in bytes, including the header (mask with + WATCH_INFO_LENGTH and shift by WATCH_INFO_LENGTH__SHIFT). This indicates + the size of the record, which may be between 8 and 127 bytes. + + * The watch ID (mask with WATCH_INFO_ID and shift by WATCH_INFO_ID__SHIFT). + This indicates that caller's ID of the watch, which may be between 0 + and 255. Multiple watches may share a queue, and this provides a means to + distinguish them. + + * A type-specific field (WATCH_INFO_TYPE_INFO). This is set by the + notification producer to indicate some meaning specific to the type and + subtype. + +Everything in info apart from the length can be used for filtering. + +The header can be followed by supplementary information. The format of this is +at the discretion is defined by the type and subtype. + + +Watch List (Notification Source) API +==================================== + +A "watch list" is a list of watchers that are subscribed to a source of +notifications. A list may be attached to an object (say a key or a superblock) +or may be global (say for device events). From a userspace perspective, a +non-global watch list is typically referred to by reference to the object it +belongs to (such as using KEYCTL_NOTIFY and giving it a key serial number to +watch that specific key). + +To manage a watch list, the following functions are provided: + + * ``void init_watch_list(struct watch_list *wlist, + void (*release_watch)(struct watch *wlist));`` + + Initialise a watch list. If ``release_watch`` is not NULL, then this + indicates a function that should be called when the watch_list object is + destroyed to discard any references the watch list holds on the watched + object. + + * ``void remove_watch_list(struct watch_list *wlist);`` + + This removes all of the watches subscribed to a watch_list and frees them + and then destroys the watch_list object itself. + + +Watch Queue (Notification Output) API +===================================== + +A "watch queue" is the buffer allocated by an application that notification +records will be written into. The workings of this are hidden entirely inside +of the pipe device driver, but it is necessary to gain a reference to it to set +a watch. These can be managed with: + + * ``struct watch_queue *get_watch_queue(int fd);`` + + Since watch queues are indicated to the kernel by the fd of the pipe that + implements the buffer, userspace must hand that fd through a system call. + This can be used to look up an opaque pointer to the watch queue from the + system call. + + * ``void put_watch_queue(struct watch_queue *wqueue);`` + + This discards the reference obtained from ``get_watch_queue()``. + + +Watch Subscription API +====================== + +A "watch" is a subscription on a watch list, indicating the watch queue, and +thus the buffer, into which notification records should be written. The watch +queue object may also carry filtering rules for that object, as set by +userspace. Some parts of the watch struct can be set by the driver:: + + struct watch { + union { + u32 info_id; /* ID to be OR'd in to info field */ + ... + }; + void *private; /* Private data for the watched object */ + u64 id; /* Internal identifier */ + ... + }; + +The ``info_id`` value should be an 8-bit number obtained from userspace and +shifted by WATCH_INFO_ID__SHIFT. This is OR'd into the WATCH_INFO_ID field of +struct watch_notification::info when and if the notification is written into +the associated watch queue buffer. + +The ``private`` field is the driver's data associated with the watch_list and +is cleaned up by the ``watch_list::release_watch()`` method. + +The ``id`` field is the source's ID. Notifications that are posted with a +different ID are ignored. + +The following functions are provided to manage watches: + + * ``void init_watch(struct watch *watch, struct watch_queue *wqueue);`` + + Initialise a watch object, setting its pointer to the watch queue, using + appropriate barriering to avoid lockdep complaints. + + * ``int add_watch_to_object(struct watch *watch, struct watch_list *wlist);`` + + Subscribe a watch to a watch list (notification source). The + driver-settable fields in the watch struct must have been set before this + is called. + + * ``int remove_watch_from_object(struct watch_list *wlist, + struct watch_queue *wqueue, + u64 id, false);`` + + Remove a watch from a watch list, where the watch must match the specified + watch queue (``wqueue``) and object identifier (``id``). A notification + (``WATCH_META_REMOVAL_NOTIFICATION``) is sent to the watch queue to + indicate that the watch got removed. + + * ``int remove_watch_from_object(struct watch_list *wlist, NULL, 0, true);`` + + Remove all the watches from a watch list. It is expected that this will be + called preparatory to destruction and that the watch list will be + inaccessible to new watches by this point. A notification + (``WATCH_META_REMOVAL_NOTIFICATION``) is sent to the watch queue of each + subscribed watch to indicate that the watch got removed. + + +Notification Posting API +======================== + +To post a notification to watch list so that the subscribed watches can see it, +the following function should be used:: + + void post_watch_notification(struct watch_list *wlist, + struct watch_notification *n, + const struct cred *cred, + u64 id); + +The notification should be preformatted and a pointer to the header (``n``) +should be passed in. The notification may be larger than this and the size in +units of buffer slots is noted in ``n->info & WATCH_INFO_LENGTH``. + +The ``cred`` struct indicates the credentials of the source (subject) and is +passed to the LSMs, such as SELinux, to allow or suppress the recording of the +note in each individual queue according to the credentials of that queue +(object). + +The ``id`` is the ID of the source object (such as the serial number on a key). +Only watches that have the same ID set in them will see this notification. + + +Watch Sources +============= + +Any particular buffer can be fed from multiple sources. Sources include: + + * WATCH_TYPE_KEY_NOTIFY + + Notifications of this type indicate changes to keys and keyrings, including + the changes of keyring contents or the attributes of keys. + + See Documentation/security/keys/core.rst for more information. + + +Event Filtering +=============== + +Once a watch queue has been created, a set of filters can be applied to limit +the events that are received using:: + + struct watch_notification_filter filter = { + ... + }; + ioctl(fd, IOC_WATCH_QUEUE_SET_FILTER, &filter) + +The filter description is a variable of type:: + + struct watch_notification_filter { + __u32 nr_filters; + __u32 __reserved; + struct watch_notification_type_filter filters[]; + }; + +Where "nr_filters" is the number of filters in filters[] and "__reserved" +should be 0. The "filters" array has elements of the following type:: + + struct watch_notification_type_filter { + __u32 type; + __u32 info_filter; + __u32 info_mask; + __u32 subtype_filter[8]; + }; + +Where: + + * ``type`` is the event type to filter for and should be something like + "WATCH_TYPE_KEY_NOTIFY" + + * ``info_filter`` and ``info_mask`` act as a filter on the info field of the + notification record. The notification is only written into the buffer if:: + + (watch.info & info_mask) == info_filter + + This could be used, for example, to ignore events that are not exactly on + the watched point in a mount tree. + + * ``subtype_filter`` is a bitmask indicating the subtypes that are of + interest. Bit 0 of subtype_filter[0] corresponds to subtype 0, bit 1 to + subtype 1, and so on. + +If the argument to the ioctl() is NULL, then the filters will be removed and +all events from the watched sources will come through. + + +Userspace Code Example +====================== + +A buffer is created with something like the following:: + + pipe2(fds, O_TMPFILE); + ioctl(fds[1], IOC_WATCH_QUEUE_SET_SIZE, 256); + +It can then be set to receive keyring change notifications:: + + keyctl(KEYCTL_WATCH_KEY, KEY_SPEC_SESSION_KEYRING, fds[1], 0x01); + +The notifications can then be consumed by something like the following:: + + static void consumer(int rfd, struct watch_queue_buffer *buf) + { + unsigned char buffer[128]; + ssize_t buf_len; + + while (buf_len = read(rfd, buffer, sizeof(buffer)), + buf_len > 0 + ) { + void *p = buffer; + void *end = buffer + buf_len; + while (p < end) { + union { + struct watch_notification n; + unsigned char buf1[128]; + } n; + size_t largest, len; + + largest = end - p; + if (largest > 128) + largest = 128; + memcpy(&n, p, largest); + + len = (n->info & WATCH_INFO_LENGTH) >> + WATCH_INFO_LENGTH__SHIFT; + if (len == 0 || len > largest) + return; + + switch (n.n.type) { + case WATCH_TYPE_META: + got_meta(&n.n); + case WATCH_TYPE_KEY_NOTIFY: + saw_key_change(&n.n); + break; + } + + p += len; + } + } + } diff --git a/fs/pipe.c b/fs/pipe.c index 16fb72e9abf7f..da9bc1f21fd19 100644 --- a/fs/pipe.c +++ b/fs/pipe.c @@ -24,6 +24,7 @@ #include #include #include +#include #include #include @@ -459,6 +460,13 @@ pipe_write(struct kiocb *iocb, struct iov_iter *from) goto out; } +#ifdef CONFIG_WATCH_QUEUE + if (pipe->watch_queue) { + ret = -EXDEV; + goto out; + } +#endif + /* * Only wake up if the pipe started out empty, since * otherwise there should be no readers waiting. @@ -628,22 +636,37 @@ static long pipe_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) int count, head, tail, mask; switch (cmd) { - case FIONREAD: - __pipe_lock(pipe); - count = 0; - head = pipe->head; - tail = pipe->tail; - mask = pipe->ring_size - 1; + case FIONREAD: + __pipe_lock(pipe); + count = 0; + head = pipe->head; + tail = pipe->tail; + mask = pipe->ring_size - 1; - while (tail != head) { - count += pipe->bufs[tail & mask].len; - tail++; - } - __pipe_unlock(pipe); + while (tail != head) { + count += pipe->bufs[tail & mask].len; + tail++; + } + __pipe_unlock(pipe); - return put_user(count, (int __user *)arg); - default: - return -ENOIOCTLCMD; + return put_user(count, (int __user *)arg); + +#ifdef CONFIG_WATCH_QUEUE + case IOC_WATCH_QUEUE_SET_SIZE: { + int ret; + __pipe_lock(pipe); + ret = watch_queue_set_size(pipe, arg); + __pipe_unlock(pipe); + return ret; + } + + case IOC_WATCH_QUEUE_SET_FILTER: + return watch_queue_set_filter( + pipe, (struct watch_notification_filter __user *)arg); +#endif + + default: + return -ENOIOCTLCMD; } } @@ -754,27 +777,27 @@ pipe_fasync(int fd, struct file *filp, int on) return retval; } -static unsigned long account_pipe_buffers(struct user_struct *user, - unsigned long old, unsigned long new) +unsigned long account_pipe_buffers(struct user_struct *user, + unsigned long old, unsigned long new) { return atomic_long_add_return(new - old, &user->pipe_bufs); } -static bool too_many_pipe_buffers_soft(unsigned long user_bufs) +bool too_many_pipe_buffers_soft(unsigned long user_bufs) { unsigned long soft_limit = READ_ONCE(pipe_user_pages_soft); return soft_limit && user_bufs > soft_limit; } -static bool too_many_pipe_buffers_hard(unsigned long user_bufs) +bool too_many_pipe_buffers_hard(unsigned long user_bufs) { unsigned long hard_limit = READ_ONCE(pipe_user_pages_hard); return hard_limit && user_bufs > hard_limit; } -static bool is_unprivileged_user(void) +bool pipe_is_unprivileged_user(void) { return !capable(CAP_SYS_RESOURCE) && !capable(CAP_SYS_ADMIN); } @@ -796,12 +819,12 @@ struct pipe_inode_info *alloc_pipe_info(void) user_bufs = account_pipe_buffers(user, 0, pipe_bufs); - if (too_many_pipe_buffers_soft(user_bufs) && is_unprivileged_user()) { + if (too_many_pipe_buffers_soft(user_bufs) && pipe_is_unprivileged_user()) { user_bufs = account_pipe_buffers(user, pipe_bufs, 1); pipe_bufs = 1; } - if (too_many_pipe_buffers_hard(user_bufs) && is_unprivileged_user()) + if (too_many_pipe_buffers_hard(user_bufs) && pipe_is_unprivileged_user()) goto out_revert_acct; pipe->bufs = kcalloc(pipe_bufs, sizeof(struct pipe_buffer), @@ -813,6 +836,7 @@ struct pipe_inode_info *alloc_pipe_info(void) pipe->r_counter = pipe->w_counter = 1; pipe->max_usage = pipe_bufs; pipe->ring_size = pipe_bufs; + pipe->nr_accounted = pipe_bufs; pipe->user = user; mutex_init(&pipe->mutex); return pipe; @@ -830,7 +854,14 @@ void free_pipe_info(struct pipe_inode_info *pipe) { int i; - (void) account_pipe_buffers(pipe->user, pipe->ring_size, 0); +#ifdef CONFIG_WATCH_QUEUE + if (pipe->watch_queue) { + watch_queue_clear(pipe->watch_queue); + put_watch_queue(pipe->watch_queue); + } +#endif + + (void) account_pipe_buffers(pipe->user, pipe->nr_accounted, 0); free_uid(pipe->user); for (i = 0; i < pipe->ring_size; i++) { struct pipe_buffer *buf = pipe->bufs + i; @@ -906,6 +937,17 @@ int create_pipe_files(struct file **res, int flags) if (!inode) return -ENFILE; + if (flags & O_NOTIFICATION_PIPE) { +#ifdef CONFIG_WATCH_QUEUE + if (watch_queue_init(inode->i_pipe) < 0) { + iput(inode); + return -ENOMEM; + } +#else + return -ENOPKG; +#endif + } + f = alloc_file_pseudo(inode, pipe_mnt, "", O_WRONLY | (flags & (O_NONBLOCK | O_DIRECT)), &pipefifo_fops); @@ -936,7 +978,7 @@ static int __do_pipe_flags(int *fd, struct file **files, int flags) int error; int fdw, fdr; - if (flags & ~(O_CLOEXEC | O_NONBLOCK | O_DIRECT)) + if (flags & ~(O_CLOEXEC | O_NONBLOCK | O_DIRECT | O_NOTIFICATION_PIPE)) return -EINVAL; error = create_pipe_files(files, flags); @@ -1184,42 +1226,12 @@ unsigned int round_pipe_size(unsigned long size) } /* - * Allocate a new array of pipe buffers and copy the info over. Returns the - * pipe size if successful, or return -ERROR on error. + * Resize the pipe ring to a number of slots. */ -static long pipe_set_size(struct pipe_inode_info *pipe, unsigned long arg) +int pipe_resize_ring(struct pipe_inode_info *pipe, unsigned int nr_slots) { struct pipe_buffer *bufs; - unsigned int size, nr_slots, head, tail, mask, n; - unsigned long user_bufs; - long ret = 0; - - size = round_pipe_size(arg); - nr_slots = size >> PAGE_SHIFT; - - if (!nr_slots) - return -EINVAL; - - /* - * If trying to increase the pipe capacity, check that an - * unprivileged user is not trying to exceed various limits - * (soft limit check here, hard limit check just below). - * Decreasing the pipe capacity is always permitted, even - * if the user is currently over a limit. - */ - if (nr_slots > pipe->ring_size && - size > pipe_max_size && !capable(CAP_SYS_RESOURCE)) - return -EPERM; - - user_bufs = account_pipe_buffers(pipe->user, pipe->ring_size, nr_slots); - - if (nr_slots > pipe->ring_size && - (too_many_pipe_buffers_hard(user_bufs) || - too_many_pipe_buffers_soft(user_bufs)) && - is_unprivileged_user()) { - ret = -EPERM; - goto out_revert_acct; - } + unsigned int head, tail, mask, n; /* * We can shrink the pipe, if arg is greater than the ring occupancy. @@ -1231,17 +1243,13 @@ static long pipe_set_size(struct pipe_inode_info *pipe, unsigned long arg) head = pipe->head; tail = pipe->tail; n = pipe_occupancy(pipe->head, pipe->tail); - if (nr_slots < n) { - ret = -EBUSY; - goto out_revert_acct; - } + if (nr_slots < n) + return -EBUSY; bufs = kcalloc(nr_slots, sizeof(*bufs), GFP_KERNEL_ACCOUNT | __GFP_NOWARN); - if (unlikely(!bufs)) { - ret = -ENOMEM; - goto out_revert_acct; - } + if (unlikely(!bufs)) + return -ENOMEM; /* * The pipe array wraps around, so just start the new one at zero @@ -1269,16 +1277,68 @@ static long pipe_set_size(struct pipe_inode_info *pipe, unsigned long arg) kfree(pipe->bufs); pipe->bufs = bufs; pipe->ring_size = nr_slots; - pipe->max_usage = nr_slots; + if (pipe->max_usage > nr_slots) + pipe->max_usage = nr_slots; pipe->tail = tail; pipe->head = head; /* This might have made more room for writers */ wake_up_interruptible(&pipe->wr_wait); + return 0; +} + +/* + * Allocate a new array of pipe buffers and copy the info over. Returns the + * pipe size if successful, or return -ERROR on error. + */ +static long pipe_set_size(struct pipe_inode_info *pipe, unsigned long arg) +{ + unsigned long user_bufs; + unsigned int nr_slots, size; + long ret = 0; + +#ifdef CONFIG_WATCH_QUEUE + if (pipe->watch_queue) + return -EBUSY; +#endif + + size = round_pipe_size(arg); + nr_slots = size >> PAGE_SHIFT; + + if (!nr_slots) + return -EINVAL; + + /* + * If trying to increase the pipe capacity, check that an + * unprivileged user is not trying to exceed various limits + * (soft limit check here, hard limit check just below). + * Decreasing the pipe capacity is always permitted, even + * if the user is currently over a limit. + */ + if (nr_slots > pipe->max_usage && + size > pipe_max_size && !capable(CAP_SYS_RESOURCE)) + return -EPERM; + + user_bufs = account_pipe_buffers(pipe->user, pipe->nr_accounted, nr_slots); + + if (nr_slots > pipe->max_usage && + (too_many_pipe_buffers_hard(user_bufs) || + too_many_pipe_buffers_soft(user_bufs)) && + pipe_is_unprivileged_user()) { + ret = -EPERM; + goto out_revert_acct; + } + + ret = pipe_resize_ring(pipe, nr_slots); + if (ret < 0) + goto out_revert_acct; + + pipe->max_usage = nr_slots; + pipe->nr_accounted = nr_slots; return pipe->max_usage * PAGE_SIZE; out_revert_acct: - (void) account_pipe_buffers(pipe->user, nr_slots, pipe->ring_size); + (void) account_pipe_buffers(pipe->user, nr_slots, pipe->nr_accounted); return ret; } @@ -1287,9 +1347,17 @@ out_revert_acct: * location, so checking ->i_pipe is not enough to verify that this is a * pipe. */ -struct pipe_inode_info *get_pipe_info(struct file *file) +struct pipe_inode_info *get_pipe_info(struct file *file, bool for_splice) { - return file->f_op == &pipefifo_fops ? file->private_data : NULL; + struct pipe_inode_info *pipe = file->private_data; + + if (file->f_op != &pipefifo_fops || !pipe) + return NULL; +#ifdef CONFIG_WATCH_QUEUE + if (for_splice && pipe->watch_queue) + return NULL; +#endif + return pipe; } long pipe_fcntl(struct file *file, unsigned int cmd, unsigned long arg) @@ -1297,7 +1365,7 @@ long pipe_fcntl(struct file *file, unsigned int cmd, unsigned long arg) struct pipe_inode_info *pipe; long ret; - pipe = get_pipe_info(file); + pipe = get_pipe_info(file, false); if (!pipe) return -EBADF; diff --git a/fs/splice.c b/fs/splice.c index fd0a1e7e5959a..50f3c0260c005 100644 --- a/fs/splice.c +++ b/fs/splice.c @@ -1122,8 +1122,8 @@ long do_splice(struct file *in, loff_t __user *off_in, !(out->f_mode & FMODE_WRITE))) return -EBADF; - ipipe = get_pipe_info(in); - opipe = get_pipe_info(out); + ipipe = get_pipe_info(in, true); + opipe = get_pipe_info(out, true); if (ipipe && opipe) { if (off_in || off_out) @@ -1273,7 +1273,7 @@ static int pipe_to_user(struct pipe_inode_info *pipe, struct pipe_buffer *buf, static long vmsplice_to_user(struct file *file, struct iov_iter *iter, unsigned int flags) { - struct pipe_inode_info *pipe = get_pipe_info(file); + struct pipe_inode_info *pipe = get_pipe_info(file, true); struct splice_desc sd = { .total_len = iov_iter_count(iter), .flags = flags, @@ -1308,7 +1308,7 @@ static long vmsplice_to_pipe(struct file *file, struct iov_iter *iter, if (flags & SPLICE_F_GIFT) buf_flag = PIPE_BUF_FLAG_GIFT; - pipe = get_pipe_info(file); + pipe = get_pipe_info(file, true); if (!pipe) return -EBADF; @@ -1757,8 +1757,8 @@ static int link_pipe(struct pipe_inode_info *ipipe, static long do_tee(struct file *in, struct file *out, size_t len, unsigned int flags) { - struct pipe_inode_info *ipipe = get_pipe_info(in); - struct pipe_inode_info *opipe = get_pipe_info(out); + struct pipe_inode_info *ipipe = get_pipe_info(in, true); + struct pipe_inode_info *opipe = get_pipe_info(out, true); int ret = -EINVAL; if (unlikely(!(in->f_mode & FMODE_READ) || diff --git a/include/linux/pipe_fs_i.h b/include/linux/pipe_fs_i.h index ae58fad7f1e0d..1d3eaa233f4a8 100644 --- a/include/linux/pipe_fs_i.h +++ b/include/linux/pipe_fs_i.h @@ -35,6 +35,7 @@ struct pipe_buffer { * @tail: The point of buffer consumption * @max_usage: The maximum number of slots that may be used in the ring * @ring_size: total number of buffers (should be a power of 2) + * @nr_accounted: The amount this pipe accounts for in user->pipe_bufs * @tmp_page: cached released page * @readers: number of current readers of this pipe * @writers: number of current writers of this pipe @@ -45,6 +46,7 @@ struct pipe_buffer { * @fasync_writers: writer side fasync * @bufs: the circular array of pipe buffers * @user: the user who created this pipe + * @watch_queue: If this pipe is a watch_queue, this is the stuff for that **/ struct pipe_inode_info { struct mutex mutex; @@ -53,6 +55,7 @@ struct pipe_inode_info { unsigned int tail; unsigned int max_usage; unsigned int ring_size; + unsigned int nr_accounted; unsigned int readers; unsigned int writers; unsigned int files; @@ -63,6 +66,9 @@ struct pipe_inode_info { struct fasync_struct *fasync_writers; struct pipe_buffer *bufs; struct user_struct *user; +#ifdef CONFIG_WATCH_QUEUE + struct watch_queue *watch_queue; +#endif }; /* @@ -237,9 +243,20 @@ void pipe_buf_mark_unmergeable(struct pipe_buffer *buf); extern const struct pipe_buf_operations nosteal_pipe_buf_ops; +#ifdef CONFIG_WATCH_QUEUE +unsigned long account_pipe_buffers(struct user_struct *user, + unsigned long old, unsigned long new); +bool too_many_pipe_buffers_soft(unsigned long user_bufs); +bool too_many_pipe_buffers_hard(unsigned long user_bufs); +bool pipe_is_unprivileged_user(void); +#endif + /* for F_SETPIPE_SZ and F_GETPIPE_SZ */ +#ifdef CONFIG_WATCH_QUEUE +int pipe_resize_ring(struct pipe_inode_info *pipe, unsigned int nr_slots); +#endif long pipe_fcntl(struct file *, unsigned int, unsigned long arg); -struct pipe_inode_info *get_pipe_info(struct file *file); +struct pipe_inode_info *get_pipe_info(struct file *file, bool for_splice); int create_pipe_files(struct file **, int); unsigned int round_pipe_size(unsigned long size); diff --git a/include/linux/watch_queue.h b/include/linux/watch_queue.h new file mode 100644 index 0000000000000..5e08db2adc319 --- /dev/null +++ b/include/linux/watch_queue.h @@ -0,0 +1,127 @@ +// SPDX-License-Identifier: GPL-2.0 +/* User-mappable watch queue + * + * Copyright (C) 2020 Red Hat, Inc. All Rights Reserved. + * Written by David Howells (dhowells@redhat.com) + * + * See Documentation/watch_queue.rst + */ + +#ifndef _LINUX_WATCH_QUEUE_H +#define _LINUX_WATCH_QUEUE_H + +#include +#include +#include + +#ifdef CONFIG_WATCH_QUEUE + +struct cred; + +struct watch_type_filter { + enum watch_notification_type type; + __u32 subtype_filter[1]; /* Bitmask of subtypes to filter on */ + __u32 info_filter; /* Filter on watch_notification::info */ + __u32 info_mask; /* Mask of relevant bits in info_filter */ +}; + +struct watch_filter { + union { + struct rcu_head rcu; + unsigned long type_filter[2]; /* Bitmask of accepted types */ + }; + u32 nr_filters; /* Number of filters */ + struct watch_type_filter filters[]; +}; + +struct watch_queue { + struct rcu_head rcu; + struct watch_filter __rcu *filter; + struct pipe_inode_info *pipe; /* The pipe we're using as a buffer */ + struct hlist_head watches; /* Contributory watches */ + struct page **notes; /* Preallocated notifications */ + unsigned long *notes_bitmap; /* Allocation bitmap for notes */ + struct kref usage; /* Object usage count */ + spinlock_t lock; + unsigned int nr_notes; /* Number of notes */ + unsigned int nr_pages; /* Number of pages in notes[] */ + bool defunct; /* T when queues closed */ +}; + +/* + * Representation of a watch on an object. + */ +struct watch { + union { + struct rcu_head rcu; + u32 info_id; /* ID to be OR'd in to info field */ + }; + struct watch_queue __rcu *queue; /* Queue to post events to */ + struct hlist_node queue_node; /* Link in queue->watches */ + struct watch_list __rcu *watch_list; + struct hlist_node list_node; /* Link in watch_list->watchers */ + const struct cred *cred; /* Creds of the owner of the watch */ + void *private; /* Private data for the watched object */ + u64 id; /* Internal identifier */ + struct kref usage; /* Object usage count */ +}; + +/* + * List of watches on an object. + */ +struct watch_list { + struct rcu_head rcu; + struct hlist_head watchers; + void (*release_watch)(struct watch *); + spinlock_t lock; +}; + +extern void __post_watch_notification(struct watch_list *, + struct watch_notification *, + const struct cred *, + u64); +extern struct watch_queue *get_watch_queue(int); +extern void put_watch_queue(struct watch_queue *); +extern void init_watch(struct watch *, struct watch_queue *); +extern int add_watch_to_object(struct watch *, struct watch_list *); +extern int remove_watch_from_object(struct watch_list *, struct watch_queue *, u64, bool); +extern long watch_queue_set_size(struct pipe_inode_info *, unsigned int); +extern long watch_queue_set_filter(struct pipe_inode_info *, + struct watch_notification_filter __user *); +extern int watch_queue_init(struct pipe_inode_info *); +extern void watch_queue_clear(struct watch_queue *); + +static inline void init_watch_list(struct watch_list *wlist, + void (*release_watch)(struct watch *)) +{ + INIT_HLIST_HEAD(&wlist->watchers); + spin_lock_init(&wlist->lock); + wlist->release_watch = release_watch; +} + +static inline void post_watch_notification(struct watch_list *wlist, + struct watch_notification *n, + const struct cred *cred, + u64 id) +{ + if (unlikely(wlist)) + __post_watch_notification(wlist, n, cred, id); +} + +static inline void remove_watch_list(struct watch_list *wlist, u64 id) +{ + if (wlist) { + remove_watch_from_object(wlist, NULL, id, true); + kfree_rcu(wlist, rcu); + } +} + +/** + * watch_sizeof - Calculate the information part of the size of a watch record, + * given the structure size. + */ +#define watch_sizeof(STRUCT) (sizeof(STRUCT) << WATCH_INFO_LENGTH__SHIFT) + +#endif + +#endif /* _LINUX_WATCH_QUEUE_H */ diff --git a/include/uapi/linux/watch_queue.h b/include/uapi/linux/watch_queue.h index 9df72227f5154..3a5790f1f05de 100644 --- a/include/uapi/linux/watch_queue.h +++ b/include/uapi/linux/watch_queue.h @@ -4,9 +4,13 @@ #include #include +#include #define O_NOTIFICATION_PIPE O_EXCL /* Parameter to pipe2() selecting notification pipe */ +#define IOC_WATCH_QUEUE_SET_SIZE _IO('W', 0x60) /* Set the size in pages */ +#define IOC_WATCH_QUEUE_SET_FILTER _IO('W', 0x61) /* Set the filter */ + enum watch_notification_type { WATCH_TYPE_META = 0, /* Special record */ WATCH_TYPE__NR = 1 @@ -41,6 +45,22 @@ struct watch_notification { #define WATCH_INFO_FLAG_7 0x00800000 }; +/* + * Notification filtering rules (IOC_WATCH_QUEUE_SET_FILTER). + */ +struct watch_notification_type_filter { + __u32 type; /* Type to apply filter to */ + __u32 info_filter; /* Filter on watch_notification::info */ + __u32 info_mask; /* Mask of relevant bits in info_filter */ + __u32 subtype_filter[8]; /* Bitmask of subtypes to filter on */ +}; + +struct watch_notification_filter { + __u32 nr_filters; /* Number of filters */ + __u32 __reserved; /* Must be 0 */ + struct watch_notification_type_filter filters[]; +}; + /* * Extended watch removal notification. This is used optionally if the type diff --git a/init/Kconfig b/init/Kconfig index 74a5ac65644f3..c95a2a5654a90 100644 --- a/init/Kconfig +++ b/init/Kconfig @@ -326,6 +326,18 @@ config POSIX_MQUEUE_SYSCTL depends on SYSCTL default y +config WATCH_QUEUE + bool "General notification queue" + default n + help + + This is a general notification queue for the kernel to pass events to + userspace by splicing them into pipes. It can be used in conjunction + with watches for key/keyring change notifications and device + notifications. + + See Documentation/watch_queue.rst + config CROSS_MEMORY_ATTACH bool "Enable process_vm_readv/writev syscalls" depends on MMU diff --git a/kernel/Makefile b/kernel/Makefile index 4cb4130ced329..41e7e3ae07ec1 100644 --- a/kernel/Makefile +++ b/kernel/Makefile @@ -115,6 +115,7 @@ obj-$(CONFIG_TORTURE_TEST) += torture.o obj-$(CONFIG_HAS_IOMEM) += iomem.o obj-$(CONFIG_RSEQ) += rseq.o +obj-$(CONFIG_WATCH_QUEUE) += watch_queue.o obj-$(CONFIG_SYSCTL_KUNIT_TEST) += sysctl-test.o diff --git a/kernel/watch_queue.c b/kernel/watch_queue.c new file mode 100644 index 0000000000000..c103e34f87056 --- /dev/null +++ b/kernel/watch_queue.c @@ -0,0 +1,657 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Watch queue and general notification mechanism, built on pipes + * + * Copyright (C) 2020 Red Hat, Inc. All Rights Reserved. + * Written by David Howells (dhowells@redhat.com) + * + * See Documentation/watch_queue.rst + */ + +#define pr_fmt(fmt) "watchq: " fmt +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +MODULE_DESCRIPTION("Watch queue"); +MODULE_AUTHOR("Red Hat, Inc."); +MODULE_LICENSE("GPL"); + +#define WATCH_QUEUE_NOTE_SIZE 128 +#define WATCH_QUEUE_NOTES_PER_PAGE (PAGE_SIZE / WATCH_QUEUE_NOTE_SIZE) + +static void watch_queue_pipe_buf_release(struct pipe_inode_info *pipe, + struct pipe_buffer *buf) +{ + struct watch_queue *wqueue = (struct watch_queue *)buf->private; + struct page *page; + unsigned int bit; + + /* We need to work out which note within the page this refers to, but + * the note might have been maximum size, so merely ANDing the offset + * off doesn't work. OTOH, the note must've been more than zero size. + */ + bit = buf->offset + buf->len; + if ((bit & (WATCH_QUEUE_NOTE_SIZE - 1)) == 0) + bit -= WATCH_QUEUE_NOTE_SIZE; + bit /= WATCH_QUEUE_NOTE_SIZE; + + page = buf->page; + bit += page->index; + + set_bit(bit, wqueue->notes_bitmap); +} + +static int watch_queue_pipe_buf_steal(struct pipe_inode_info *pipe, + struct pipe_buffer *buf) +{ + return -1; /* No. */ +} + +/* New data written to a pipe may be appended to a buffer with this type. */ +static const struct pipe_buf_operations watch_queue_pipe_buf_ops = { + .confirm = generic_pipe_buf_confirm, + .release = watch_queue_pipe_buf_release, + .steal = watch_queue_pipe_buf_steal, + .get = generic_pipe_buf_get, +}; + +/* + * Post a notification to a watch queue. + */ +static bool post_one_notification(struct watch_queue *wqueue, + struct watch_notification *n) +{ + void *p; + struct pipe_inode_info *pipe = wqueue->pipe; + struct pipe_buffer *buf; + struct page *page; + unsigned int head, tail, mask, note, offset, len; + bool done = false; + + if (!pipe) + return false; + + spin_lock_irq(&pipe->rd_wait.lock); + + if (wqueue->defunct) + goto out; + + mask = pipe->ring_size - 1; + head = pipe->head; + tail = pipe->tail; + if (pipe_full(head, tail, pipe->ring_size)) + goto lost; + + note = find_first_bit(wqueue->notes_bitmap, wqueue->nr_notes); + if (note >= wqueue->nr_notes) + goto lost; + + page = wqueue->notes[note / WATCH_QUEUE_NOTES_PER_PAGE]; + offset = note % WATCH_QUEUE_NOTES_PER_PAGE * WATCH_QUEUE_NOTE_SIZE; + get_page(page); + len = n->info & WATCH_INFO_LENGTH; + p = kmap_atomic(page); + memcpy(p + offset, n, len); + kunmap_atomic(p); + + buf = &pipe->bufs[head & mask]; + buf->page = page; + buf->private = (unsigned long)wqueue; + buf->ops = &watch_queue_pipe_buf_ops; + buf->offset = offset; + buf->len = len; + buf->flags = 0; + pipe->head = head + 1; + + if (!test_and_clear_bit(note, wqueue->notes_bitmap)) { + spin_unlock_irq(&pipe->rd_wait.lock); + BUG(); + } + wake_up_interruptible_sync_poll_locked(&pipe->rd_wait, EPOLLIN | EPOLLRDNORM); + done = true; + +out: + spin_unlock_irq(&pipe->rd_wait.lock); + if (done) + kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN); + return done; + +lost: + goto out; +} + +/* + * Apply filter rules to a notification. + */ +static bool filter_watch_notification(const struct watch_filter *wf, + const struct watch_notification *n) +{ + const struct watch_type_filter *wt; + unsigned int st_bits = sizeof(wt->subtype_filter[0]) * 8; + unsigned int st_index = n->subtype / st_bits; + unsigned int st_bit = 1U << (n->subtype % st_bits); + int i; + + if (!test_bit(n->type, wf->type_filter)) + return false; + + for (i = 0; i < wf->nr_filters; i++) { + wt = &wf->filters[i]; + if (n->type == wt->type && + (wt->subtype_filter[st_index] & st_bit) && + (n->info & wt->info_mask) == wt->info_filter) + return true; + } + + return false; /* If there is a filter, the default is to reject. */ +} + +/** + * __post_watch_notification - Post an event notification + * @wlist: The watch list to post the event to. + * @n: The notification record to post. + * @cred: The creds of the process that triggered the notification. + * @id: The ID to match on the watch. + * + * Post a notification of an event into a set of watch queues and let the users + * know. + * + * The size of the notification should be set in n->info & WATCH_INFO_LENGTH and + * should be in units of sizeof(*n). + */ +void __post_watch_notification(struct watch_list *wlist, + struct watch_notification *n, + const struct cred *cred, + u64 id) +{ + const struct watch_filter *wf; + struct watch_queue *wqueue; + struct watch *watch; + + if (((n->info & WATCH_INFO_LENGTH) >> WATCH_INFO_LENGTH__SHIFT) == 0) { + WARN_ON(1); + return; + } + + rcu_read_lock(); + + hlist_for_each_entry_rcu(watch, &wlist->watchers, list_node) { + if (watch->id != id) + continue; + n->info &= ~WATCH_INFO_ID; + n->info |= watch->info_id; + + wqueue = rcu_dereference(watch->queue); + wf = rcu_dereference(wqueue->filter); + if (wf && !filter_watch_notification(wf, n)) + continue; + + if (security_post_notification(watch->cred, cred, n) < 0) + continue; + + post_one_notification(wqueue, n); + } + + rcu_read_unlock(); +} +EXPORT_SYMBOL(__post_watch_notification); + +/* + * Allocate sufficient pages to preallocation for the requested number of + * notifications. + */ +long watch_queue_set_size(struct pipe_inode_info *pipe, unsigned int nr_notes) +{ + struct watch_queue *wqueue = pipe->watch_queue; + struct page **pages; + unsigned long *bitmap; + unsigned long user_bufs; + unsigned int bmsize; + int ret, i, nr_pages; + + if (!wqueue) + return -ENODEV; + if (wqueue->notes) + return -EBUSY; + + if (nr_notes < 1 || + nr_notes > 512) /* TODO: choose a better hard limit */ + return -EINVAL; + + nr_pages = (nr_notes + WATCH_QUEUE_NOTES_PER_PAGE - 1); + nr_pages /= WATCH_QUEUE_NOTES_PER_PAGE; + user_bufs = account_pipe_buffers(pipe->user, pipe->nr_accounted, nr_pages); + + if (nr_pages > pipe->max_usage && + (too_many_pipe_buffers_hard(user_bufs) || + too_many_pipe_buffers_soft(user_bufs)) && + pipe_is_unprivileged_user()) { + ret = -EPERM; + goto error; + } + + ret = pipe_resize_ring(pipe, nr_notes); + if (ret < 0) + goto error; + + pages = kcalloc(sizeof(struct page *), nr_pages, GFP_KERNEL); + if (!pages) + goto error; + + for (i = 0; i < nr_pages; i++) { + pages[i] = alloc_page(GFP_KERNEL); + if (!pages[i]) + goto error_p; + pages[i]->index = i * WATCH_QUEUE_NOTES_PER_PAGE; + } + + bmsize = (nr_notes + BITS_PER_LONG - 1) / BITS_PER_LONG; + bmsize *= sizeof(unsigned long); + bitmap = kmalloc(bmsize, GFP_KERNEL); + if (!bitmap) + goto error_p; + + memset(bitmap, 0xff, bmsize); + wqueue->notes = pages; + wqueue->notes_bitmap = bitmap; + wqueue->nr_pages = nr_pages; + wqueue->nr_notes = nr_pages * WATCH_QUEUE_NOTES_PER_PAGE; + return 0; + +error_p: + for (i = 0; i < nr_pages; i++) + __free_page(pages[i]); + kfree(pages); +error: + (void) account_pipe_buffers(pipe->user, nr_pages, pipe->nr_accounted); + return ret; +} + +/* + * Set the filter on a watch queue. + */ +long watch_queue_set_filter(struct pipe_inode_info *pipe, + struct watch_notification_filter __user *_filter) +{ + struct watch_notification_type_filter *tf; + struct watch_notification_filter filter; + struct watch_type_filter *q; + struct watch_filter *wfilter; + struct watch_queue *wqueue = pipe->watch_queue; + int ret, nr_filter = 0, i; + + if (!wqueue) + return -ENODEV; + + if (!_filter) { + /* Remove the old filter */ + wfilter = NULL; + goto set; + } + + /* Grab the user's filter specification */ + if (copy_from_user(&filter, _filter, sizeof(filter)) != 0) + return -EFAULT; + if (filter.nr_filters == 0 || + filter.nr_filters > 16 || + filter.__reserved != 0) + return -EINVAL; + + tf = memdup_user(_filter->filters, filter.nr_filters * sizeof(*tf)); + if (IS_ERR(tf)) + return PTR_ERR(tf); + + ret = -EINVAL; + for (i = 0; i < filter.nr_filters; i++) { + if ((tf[i].info_filter & ~tf[i].info_mask) || + tf[i].info_mask & WATCH_INFO_LENGTH) + goto err_filter; + /* Ignore any unknown types */ + if (tf[i].type >= sizeof(wfilter->type_filter) * 8) + continue; + nr_filter++; + } + + /* Now we need to build the internal filter from only the relevant + * user-specified filters. + */ + ret = -ENOMEM; + wfilter = kzalloc(struct_size(wfilter, filters, nr_filter), GFP_KERNEL); + if (!wfilter) + goto err_filter; + wfilter->nr_filters = nr_filter; + + q = wfilter->filters; + for (i = 0; i < filter.nr_filters; i++) { + if (tf[i].type >= sizeof(wfilter->type_filter) * BITS_PER_LONG) + continue; + + q->type = tf[i].type; + q->info_filter = tf[i].info_filter; + q->info_mask = tf[i].info_mask; + q->subtype_filter[0] = tf[i].subtype_filter[0]; + __set_bit(q->type, wfilter->type_filter); + q++; + } + + kfree(tf); +set: + pipe_lock(pipe); + wfilter = rcu_replace_pointer(wqueue->filter, wfilter, + lockdep_is_held(&pipe->mutex)); + pipe_unlock(pipe); + if (wfilter) + kfree_rcu(wfilter, rcu); + return 0; + +err_filter: + kfree(tf); + return ret; +} + +static void __put_watch_queue(struct kref *kref) +{ + struct watch_queue *wqueue = + container_of(kref, struct watch_queue, usage); + struct watch_filter *wfilter; + int i; + + for (i = 0; i < wqueue->nr_pages; i++) + __free_page(wqueue->notes[i]); + + wfilter = rcu_access_pointer(wqueue->filter); + if (wfilter) + kfree_rcu(wfilter, rcu); + kfree_rcu(wqueue, rcu); +} + +/** + * put_watch_queue - Dispose of a ref on a watchqueue. + * @wqueue: The watch queue to unref. + */ +void put_watch_queue(struct watch_queue *wqueue) +{ + kref_put(&wqueue->usage, __put_watch_queue); +} +EXPORT_SYMBOL(put_watch_queue); + +static void free_watch(struct rcu_head *rcu) +{ + struct watch *watch = container_of(rcu, struct watch, rcu); + + put_watch_queue(rcu_access_pointer(watch->queue)); + put_cred(watch->cred); +} + +static void __put_watch(struct kref *kref) +{ + struct watch *watch = container_of(kref, struct watch, usage); + + call_rcu(&watch->rcu, free_watch); +} + +/* + * Discard a watch. + */ +static void put_watch(struct watch *watch) +{ + kref_put(&watch->usage, __put_watch); +} + +/** + * init_watch_queue - Initialise a watch + * @watch: The watch to initialise. + * @wqueue: The queue to assign. + * + * Initialise a watch and set the watch queue. + */ +void init_watch(struct watch *watch, struct watch_queue *wqueue) +{ + kref_init(&watch->usage); + INIT_HLIST_NODE(&watch->list_node); + INIT_HLIST_NODE(&watch->queue_node); + rcu_assign_pointer(watch->queue, wqueue); +} + +/** + * add_watch_to_object - Add a watch on an object to a watch list + * @watch: The watch to add + * @wlist: The watch list to add to + * + * @watch->queue must have been set to point to the queue to post notifications + * to and the watch list of the object to be watched. @watch->cred must also + * have been set to the appropriate credentials and a ref taken on them. + * + * The caller must pin the queue and the list both and must hold the list + * locked against racing watch additions/removals. + */ +int add_watch_to_object(struct watch *watch, struct watch_list *wlist) +{ + struct watch_queue *wqueue = rcu_access_pointer(watch->queue); + struct watch *w; + + hlist_for_each_entry(w, &wlist->watchers, list_node) { + struct watch_queue *wq = rcu_access_pointer(w->queue); + if (wqueue == wq && watch->id == w->id) + return -EBUSY; + } + + watch->cred = get_current_cred(); + rcu_assign_pointer(watch->watch_list, wlist); + + spin_lock_bh(&wqueue->lock); + kref_get(&wqueue->usage); + kref_get(&watch->usage); + hlist_add_head(&watch->queue_node, &wqueue->watches); + spin_unlock_bh(&wqueue->lock); + + hlist_add_head(&watch->list_node, &wlist->watchers); + return 0; +} +EXPORT_SYMBOL(add_watch_to_object); + +/** + * remove_watch_from_object - Remove a watch or all watches from an object. + * @wlist: The watch list to remove from + * @wq: The watch queue of interest (ignored if @all is true) + * @id: The ID of the watch to remove (ignored if @all is true) + * @all: True to remove all objects + * + * Remove a specific watch or all watches from an object. A notification is + * sent to the watcher to tell them that this happened. + */ +int remove_watch_from_object(struct watch_list *wlist, struct watch_queue *wq, + u64 id, bool all) +{ + struct watch_notification_removal n; + struct watch_queue *wqueue; + struct watch *watch; + int ret = -EBADSLT; + + rcu_read_lock(); + +again: + spin_lock(&wlist->lock); + hlist_for_each_entry(watch, &wlist->watchers, list_node) { + if (all || + (watch->id == id && rcu_access_pointer(watch->queue) == wq)) + goto found; + } + spin_unlock(&wlist->lock); + goto out; + +found: + ret = 0; + hlist_del_init_rcu(&watch->list_node); + rcu_assign_pointer(watch->watch_list, NULL); + spin_unlock(&wlist->lock); + + /* We now own the reference on watch that used to belong to wlist. */ + + n.watch.type = WATCH_TYPE_META; + n.watch.subtype = WATCH_META_REMOVAL_NOTIFICATION; + n.watch.info = watch->info_id | watch_sizeof(n.watch); + n.id = id; + if (id != 0) + n.watch.info = watch->info_id | watch_sizeof(n); + + wqueue = rcu_dereference(watch->queue); + + /* We don't need the watch list lock for the next bit as RCU is + * protecting *wqueue from deallocation. + */ + if (wqueue) { + post_one_notification(wqueue, &n.watch); + + spin_lock_bh(&wqueue->lock); + + if (!hlist_unhashed(&watch->queue_node)) { + hlist_del_init_rcu(&watch->queue_node); + put_watch(watch); + } + + spin_unlock_bh(&wqueue->lock); + } + + if (wlist->release_watch) { + void (*release_watch)(struct watch *); + + release_watch = wlist->release_watch; + rcu_read_unlock(); + (*release_watch)(watch); + rcu_read_lock(); + } + put_watch(watch); + + if (all && !hlist_empty(&wlist->watchers)) + goto again; +out: + rcu_read_unlock(); + return ret; +} +EXPORT_SYMBOL(remove_watch_from_object); + +/* + * Remove all the watches that are contributory to a queue. This has the + * potential to race with removal of the watches by the destruction of the + * objects being watched or with the distribution of notifications. + */ +void watch_queue_clear(struct watch_queue *wqueue) +{ + struct watch_list *wlist; + struct watch *watch; + bool release; + + rcu_read_lock(); + spin_lock_bh(&wqueue->lock); + + /* Prevent new additions and prevent notifications from happening */ + wqueue->defunct = true; + + while (!hlist_empty(&wqueue->watches)) { + watch = hlist_entry(wqueue->watches.first, struct watch, queue_node); + hlist_del_init_rcu(&watch->queue_node); + /* We now own a ref on the watch. */ + spin_unlock_bh(&wqueue->lock); + + /* We can't do the next bit under the queue lock as we need to + * get the list lock - which would cause a deadlock if someone + * was removing from the opposite direction at the same time or + * posting a notification. + */ + wlist = rcu_dereference(watch->watch_list); + if (wlist) { + void (*release_watch)(struct watch *); + + spin_lock(&wlist->lock); + + release = !hlist_unhashed(&watch->list_node); + if (release) { + hlist_del_init_rcu(&watch->list_node); + rcu_assign_pointer(watch->watch_list, NULL); + + /* We now own a second ref on the watch. */ + } + + release_watch = wlist->release_watch; + spin_unlock(&wlist->lock); + + if (release) { + if (release_watch) { + rcu_read_unlock(); + /* This might need to call dput(), so + * we have to drop all the locks. + */ + (*release_watch)(watch); + rcu_read_lock(); + } + put_watch(watch); + } + } + + put_watch(watch); + spin_lock_bh(&wqueue->lock); + } + + spin_unlock_bh(&wqueue->lock); + rcu_read_unlock(); +} + +/** + * get_watch_queue - Get a watch queue from its file descriptor. + * @fd: The fd to query. + */ +struct watch_queue *get_watch_queue(int fd) +{ + struct pipe_inode_info *pipe; + struct watch_queue *wqueue = ERR_PTR(-EINVAL); + struct fd f; + + f = fdget(fd); + if (f.file) { + pipe = get_pipe_info(f.file, false); + if (pipe && pipe->watch_queue) { + wqueue = pipe->watch_queue; + kref_get(&wqueue->usage); + } + fdput(f); + } + + return wqueue; +} +EXPORT_SYMBOL(get_watch_queue); + +/* + * Initialise a watch queue + */ +int watch_queue_init(struct pipe_inode_info *pipe) +{ + struct watch_queue *wqueue; + + wqueue = kzalloc(sizeof(*wqueue), GFP_KERNEL); + if (!wqueue) + return -ENOMEM; + + wqueue->pipe = pipe; + kref_init(&wqueue->usage); + spin_lock_init(&wqueue->lock); + INIT_HLIST_HEAD(&wqueue->watches); + + pipe->watch_queue = wqueue; + return 0; +} -- GitLab From 998f50407ffc9370565c7ed3fcd1366adccdfbbf Mon Sep 17 00:00:00 2001 From: David Howells Date: Wed, 12 Feb 2020 13:58:35 +0000 Subject: [PATCH 0557/1971] security: Add hooks to rule on setting a watch Add security hooks that will allow an LSM to rule on whether or not a watch may be set. More than one hook is required as the watches watch different types of object. Signed-off-by: David Howells Acked-by: James Morris cc: Casey Schaufler cc: Stephen Smalley cc: linux-security-module@vger.kernel.org --- include/linux/lsm_hook_defs.h | 4 ++++ include/linux/lsm_hooks.h | 5 +++++ include/linux/security.h | 9 +++++++++ security/security.c | 7 +++++++ 4 files changed, 25 insertions(+) diff --git a/include/linux/lsm_hook_defs.h b/include/linux/lsm_hook_defs.h index e0def45b5cc56..a54f49e957088 100644 --- a/include/linux/lsm_hook_defs.h +++ b/include/linux/lsm_hook_defs.h @@ -256,6 +256,10 @@ LSM_HOOK(int, 0, inode_getsecctx, struct inode *inode, void **ctx, #if defined(CONFIG_SECURITY) && defined(CONFIG_WATCH_QUEUE) LSM_HOOK(int, 0, post_notification, const struct cred *w_cred, const struct cred *cred, struct watch_notification *n) +#endif /* CONFIG_SECURITY && CONFIG_WATCH_QUEUE */ + +#if defined(CONFIG_SECURITY) && defined(CONFIG_KEY_NOTIFICATIONS) +LSM_HOOK(int, 0, watch_key, struct key *key) #endif /* CONFIG_SECURITY && CONFIG_KEY_NOTIFICATIONS */ #ifdef CONFIG_SECURITY_NETWORK diff --git a/include/linux/lsm_hooks.h b/include/linux/lsm_hooks.h index 0b5e5769b8367..3f1374cffb76c 100644 --- a/include/linux/lsm_hooks.h +++ b/include/linux/lsm_hooks.h @@ -1446,6 +1446,11 @@ * @cred: The event-triggerer's credentials * @n: The notification being posted * + * @watch_key: + * Check to see if a process is allowed to watch for event notifications + * from a key or keyring. + * @key: The key to watch. + * * Security hooks for using the eBPF maps and programs functionalities through * eBPF syscalls. * diff --git a/include/linux/security.h b/include/linux/security.h index 9a5d12ab491bc..e7914e4e0b026 100644 --- a/include/linux/security.h +++ b/include/linux/security.h @@ -1290,6 +1290,15 @@ static inline int security_post_notification(const struct cred *w_cred, } #endif +#if defined(CONFIG_SECURITY) && defined(CONFIG_KEY_NOTIFICATIONS) +int security_watch_key(struct key *key); +#else +static inline int security_watch_key(struct key *key) +{ + return 0; +} +#endif + #ifdef CONFIG_SECURITY_NETWORK int security_unix_stream_connect(struct sock *sock, struct sock *other, struct sock *newsk); diff --git a/security/security.c b/security/security.c index 7d55607120b48..c73334ab2882b 100644 --- a/security/security.c +++ b/security/security.c @@ -2016,6 +2016,13 @@ int security_post_notification(const struct cred *w_cred, } #endif /* CONFIG_WATCH_QUEUE */ +#ifdef CONFIG_KEY_NOTIFICATIONS +int security_watch_key(struct key *key) +{ + return call_int_hook(watch_key, 0, key); +} +#endif + #ifdef CONFIG_SECURITY_NETWORK int security_unix_stream_connect(struct sock *sock, struct sock *other, struct sock *newsk) -- GitLab From f7e47677e39a03057dcced2016c92a9c868693ec Mon Sep 17 00:00:00 2001 From: David Howells Date: Tue, 14 Jan 2020 17:07:11 +0000 Subject: [PATCH 0558/1971] watch_queue: Add a key/keyring notification facility Add a key/keyring change notification facility whereby notifications about changes in key and keyring content and attributes can be received. Firstly, an event queue needs to be created: pipe2(fds, O_NOTIFICATION_PIPE); ioctl(fds[1], IOC_WATCH_QUEUE_SET_SIZE, 256); then a notification can be set up to report notifications via that queue: struct watch_notification_filter filter = { .nr_filters = 1, .filters = { [0] = { .type = WATCH_TYPE_KEY_NOTIFY, .subtype_filter[0] = UINT_MAX, }, }, }; ioctl(fds[1], IOC_WATCH_QUEUE_SET_FILTER, &filter); keyctl_watch_key(KEY_SPEC_SESSION_KEYRING, fds[1], 0x01); After that, records will be placed into the queue when events occur in which keys are changed in some way. Records are of the following format: struct key_notification { struct watch_notification watch; __u32 key_id; __u32 aux; } *n; Where: n->watch.type will be WATCH_TYPE_KEY_NOTIFY. n->watch.subtype will indicate the type of event, such as NOTIFY_KEY_REVOKED. n->watch.info & WATCH_INFO_LENGTH will indicate the length of the record. n->watch.info & WATCH_INFO_ID will be the second argument to keyctl_watch_key(), shifted. n->key will be the ID of the affected key. n->aux will hold subtype-dependent information, such as the key being linked into the keyring specified by n->key in the case of NOTIFY_KEY_LINKED. Note that it is permissible for event records to be of variable length - or, at least, the length may be dependent on the subtype. Note also that the queue can be shared between multiple notifications of various types. Signed-off-by: David Howells Reviewed-by: James Morris --- Documentation/security/keys/core.rst | 57 ++++++++++++++++ include/linux/key.h | 3 + include/uapi/linux/keyctl.h | 2 + include/uapi/linux/watch_queue.h | 28 +++++++- security/keys/Kconfig | 9 +++ security/keys/compat.c | 3 + security/keys/gc.c | 5 ++ security/keys/internal.h | 30 ++++++++- security/keys/key.c | 38 +++++++---- security/keys/keyctl.c | 99 +++++++++++++++++++++++++++- security/keys/keyring.c | 20 ++++-- security/keys/request_key.c | 4 +- 12 files changed, 270 insertions(+), 28 deletions(-) diff --git a/Documentation/security/keys/core.rst b/Documentation/security/keys/core.rst index d9b0b859018b4..6cff2b5f88edc 100644 --- a/Documentation/security/keys/core.rst +++ b/Documentation/security/keys/core.rst @@ -1026,6 +1026,63 @@ The keyctl syscall functions are: written into the output buffer. Verification returns 0 on success. + * Watch a key or keyring for changes:: + + long keyctl(KEYCTL_WATCH_KEY, key_serial_t key, int queue_fd, + const struct watch_notification_filter *filter); + + This will set or remove a watch for changes on the specified key or + keyring. + + "key" is the ID of the key to be watched. + + "queue_fd" is a file descriptor referring to an open "/dev/watch_queue" + which manages the buffer into which notifications will be delivered. + + "filter" is either NULL to remove a watch or a filter specification to + indicate what events are required from the key. + + See Documentation/watch_queue.rst for more information. + + Note that only one watch may be emplaced for any particular { key, + queue_fd } combination. + + Notification records look like:: + + struct key_notification { + struct watch_notification watch; + __u32 key_id; + __u32 aux; + }; + + In this, watch::type will be "WATCH_TYPE_KEY_NOTIFY" and subtype will be + one of:: + + NOTIFY_KEY_INSTANTIATED + NOTIFY_KEY_UPDATED + NOTIFY_KEY_LINKED + NOTIFY_KEY_UNLINKED + NOTIFY_KEY_CLEARED + NOTIFY_KEY_REVOKED + NOTIFY_KEY_INVALIDATED + NOTIFY_KEY_SETATTR + + Where these indicate a key being instantiated/rejected, updated, a link + being made in a keyring, a link being removed from a keyring, a keyring + being cleared, a key being revoked, a key being invalidated or a key + having one of its attributes changed (user, group, perm, timeout, + restriction). + + If a watched key is deleted, a basic watch_notification will be issued + with "type" set to WATCH_TYPE_META and "subtype" set to + watch_meta_removal_notification. The watchpoint ID will be set in the + "info" field. + + This needs to be configured by enabling: + + "Provide key/keyring change notifications" (KEY_NOTIFICATIONS) + + Kernel Services =============== diff --git a/include/linux/key.h b/include/linux/key.h index 6cf8e71cf8b7c..b99b40db08fc6 100644 --- a/include/linux/key.h +++ b/include/linux/key.h @@ -176,6 +176,9 @@ struct key { struct list_head graveyard_link; struct rb_node serial_node; }; +#ifdef CONFIG_KEY_NOTIFICATIONS + struct watch_list *watchers; /* Entities watching this key for changes */ +#endif struct rw_semaphore sem; /* change vs change sem */ struct key_user *user; /* owner of this key */ void *security; /* security data for this key */ diff --git a/include/uapi/linux/keyctl.h b/include/uapi/linux/keyctl.h index ed3d5893830df..4c8884eea8084 100644 --- a/include/uapi/linux/keyctl.h +++ b/include/uapi/linux/keyctl.h @@ -69,6 +69,7 @@ #define KEYCTL_RESTRICT_KEYRING 29 /* Restrict keys allowed to link to a keyring */ #define KEYCTL_MOVE 30 /* Move keys between keyrings */ #define KEYCTL_CAPABILITIES 31 /* Find capabilities of keyrings subsystem */ +#define KEYCTL_WATCH_KEY 32 /* Watch a key or ring of keys for changes */ /* keyctl structures */ struct keyctl_dh_params { @@ -130,5 +131,6 @@ struct keyctl_pkey_params { #define KEYCTL_CAPS0_MOVE 0x80 /* KEYCTL_MOVE supported */ #define KEYCTL_CAPS1_NS_KEYRING_NAME 0x01 /* Keyring names are per-user_namespace */ #define KEYCTL_CAPS1_NS_KEY_TAG 0x02 /* Key indexing can include a namespace tag */ +#define KEYCTL_CAPS1_NOTIFICATIONS 0x04 /* Keys generate watchable notifications */ #endif /* _LINUX_KEYCTL_H */ diff --git a/include/uapi/linux/watch_queue.h b/include/uapi/linux/watch_queue.h index 3a5790f1f05de..c3d8320b5d3a6 100644 --- a/include/uapi/linux/watch_queue.h +++ b/include/uapi/linux/watch_queue.h @@ -13,7 +13,8 @@ enum watch_notification_type { WATCH_TYPE_META = 0, /* Special record */ - WATCH_TYPE__NR = 1 + WATCH_TYPE_KEY_NOTIFY = 1, /* Key change event notification */ + WATCH_TYPE__NR = 2 }; enum watch_meta_notification_subtype { @@ -75,4 +76,29 @@ struct watch_notification_removal { __u64 id; /* Type-dependent identifier */ }; +/* + * Type of key/keyring change notification. + */ +enum key_notification_subtype { + NOTIFY_KEY_INSTANTIATED = 0, /* Key was instantiated (aux is error code) */ + NOTIFY_KEY_UPDATED = 1, /* Key was updated */ + NOTIFY_KEY_LINKED = 2, /* Key (aux) was added to watched keyring */ + NOTIFY_KEY_UNLINKED = 3, /* Key (aux) was removed from watched keyring */ + NOTIFY_KEY_CLEARED = 4, /* Keyring was cleared */ + NOTIFY_KEY_REVOKED = 5, /* Key was revoked */ + NOTIFY_KEY_INVALIDATED = 6, /* Key was invalidated */ + NOTIFY_KEY_SETATTR = 7, /* Key's attributes got changed */ +}; + +/* + * Key/keyring notification record. + * - watch.type = WATCH_TYPE_KEY_NOTIFY + * - watch.subtype = enum key_notification_type + */ +struct key_notification { + struct watch_notification watch; + __u32 key_id; /* The key/keyring affected */ + __u32 aux; /* Per-type auxiliary data */ +}; + #endif /* _UAPI_LINUX_WATCH_QUEUE_H */ diff --git a/security/keys/Kconfig b/security/keys/Kconfig index 47c041563d41c..d4dc5ea208af1 100644 --- a/security/keys/Kconfig +++ b/security/keys/Kconfig @@ -116,3 +116,12 @@ config KEY_DH_OPERATIONS in the kernel. If you are unsure as to whether this is required, answer N. + +config KEY_NOTIFICATIONS + bool "Provide key/keyring change notifications" + depends on KEYS && WATCH_QUEUE + help + This option provides support for getting change notifications on keys + and keyrings on which the caller has View permission. This makes use + of the /dev/watch_queue misc device to handle the notification + buffer and provides KEYCTL_WATCH_KEY to enable/disable watches. diff --git a/security/keys/compat.c b/security/keys/compat.c index b975f8f11124b..6ee9d8f6a4a5b 100644 --- a/security/keys/compat.c +++ b/security/keys/compat.c @@ -156,6 +156,9 @@ COMPAT_SYSCALL_DEFINE5(keyctl, u32, option, case KEYCTL_CAPABILITIES: return keyctl_capabilities(compat_ptr(arg2), arg3); + case KEYCTL_WATCH_KEY: + return keyctl_watch_key(arg2, arg3, arg4); + default: return -EOPNOTSUPP; } diff --git a/security/keys/gc.c b/security/keys/gc.c index 671dd730ecfc9..3c90807476eb0 100644 --- a/security/keys/gc.c +++ b/security/keys/gc.c @@ -131,6 +131,11 @@ static noinline void key_gc_unused_keys(struct list_head *keys) kdebug("- %u", key->serial); key_check(key); +#ifdef CONFIG_KEY_NOTIFICATIONS + remove_watch_list(key->watchers, key->serial); + key->watchers = NULL; +#endif + /* Throw away the key data if the key is instantiated */ if (state == KEY_IS_POSITIVE && key->type->destroy) key->type->destroy(key); diff --git a/security/keys/internal.h b/security/keys/internal.h index 6d0ca48ae9a50..28e17f4f3328f 100644 --- a/security/keys/internal.h +++ b/security/keys/internal.h @@ -15,6 +15,7 @@ #include #include #include +#include #include #include #include @@ -99,7 +100,8 @@ extern int __key_link_begin(struct key *keyring, const struct keyring_index_key *index_key, struct assoc_array_edit **_edit); extern int __key_link_check_live_key(struct key *keyring, struct key *key); -extern void __key_link(struct key *key, struct assoc_array_edit **_edit); +extern void __key_link(struct key *keyring, struct key *key, + struct assoc_array_edit **_edit); extern void __key_link_end(struct key *keyring, const struct keyring_index_key *index_key, struct assoc_array_edit *edit); @@ -183,6 +185,23 @@ extern int key_task_permission(const key_ref_t key_ref, const struct cred *cred, key_perm_t perm); +static inline void notify_key(struct key *key, + enum key_notification_subtype subtype, u32 aux) +{ +#ifdef CONFIG_KEY_NOTIFICATIONS + struct key_notification n = { + .watch.type = WATCH_TYPE_KEY_NOTIFY, + .watch.subtype = subtype, + .watch.info = watch_sizeof(n), + .key_id = key_serial(key), + .aux = aux, + }; + + post_watch_notification(key->watchers, &n.watch, current_cred(), + n.key_id); +#endif +} + /* * Check to see whether permission is granted to use a key in the desired way. */ @@ -333,6 +352,15 @@ static inline long keyctl_pkey_e_d_s(int op, extern long keyctl_capabilities(unsigned char __user *_buffer, size_t buflen); +#ifdef CONFIG_KEY_NOTIFICATIONS +extern long keyctl_watch_key(key_serial_t, int, int); +#else +static inline long keyctl_watch_key(key_serial_t key_id, int watch_fd, int watch_id) +{ + return -EOPNOTSUPP; +} +#endif + /* * Debugging key validation */ diff --git a/security/keys/key.c b/security/keys/key.c index e959b3c96b487..e282c6179b21d 100644 --- a/security/keys/key.c +++ b/security/keys/key.c @@ -444,6 +444,7 @@ static int __key_instantiate_and_link(struct key *key, /* mark the key as being instantiated */ atomic_inc(&key->user->nikeys); mark_key_instantiated(key, 0); + notify_key(key, NOTIFY_KEY_INSTANTIATED, 0); if (test_and_clear_bit(KEY_FLAG_USER_CONSTRUCT, &key->flags)) awaken = 1; @@ -453,7 +454,7 @@ static int __key_instantiate_and_link(struct key *key, if (test_bit(KEY_FLAG_KEEP, &keyring->flags)) set_bit(KEY_FLAG_KEEP, &key->flags); - __key_link(key, _edit); + __key_link(keyring, key, _edit); } /* disable the authorisation key */ @@ -601,6 +602,7 @@ int key_reject_and_link(struct key *key, /* mark the key as being negatively instantiated */ atomic_inc(&key->user->nikeys); mark_key_instantiated(key, -error); + notify_key(key, NOTIFY_KEY_INSTANTIATED, -error); key->expiry = ktime_get_real_seconds() + timeout; key_schedule_gc(key->expiry + key_gc_delay); @@ -611,7 +613,7 @@ int key_reject_and_link(struct key *key, /* and link it into the destination keyring */ if (keyring && link_ret == 0) - __key_link(key, &edit); + __key_link(keyring, key, &edit); /* disable the authorisation key */ if (authkey) @@ -764,9 +766,11 @@ static inline key_ref_t __key_update(key_ref_t key_ref, down_write(&key->sem); ret = key->type->update(key, prep); - if (ret == 0) + if (ret == 0) { /* Updating a negative key positively instantiates it */ mark_key_instantiated(key, 0); + notify_key(key, NOTIFY_KEY_UPDATED, 0); + } up_write(&key->sem); @@ -1023,9 +1027,11 @@ int key_update(key_ref_t key_ref, const void *payload, size_t plen) down_write(&key->sem); ret = key->type->update(key, &prep); - if (ret == 0) + if (ret == 0) { /* Updating a negative key positively instantiates it */ mark_key_instantiated(key, 0); + notify_key(key, NOTIFY_KEY_UPDATED, 0); + } up_write(&key->sem); @@ -1057,15 +1063,17 @@ void key_revoke(struct key *key) * instantiated */ down_write_nested(&key->sem, 1); - if (!test_and_set_bit(KEY_FLAG_REVOKED, &key->flags) && - key->type->revoke) - key->type->revoke(key); - - /* set the death time to no more than the expiry time */ - time = ktime_get_real_seconds(); - if (key->revoked_at == 0 || key->revoked_at > time) { - key->revoked_at = time; - key_schedule_gc(key->revoked_at + key_gc_delay); + if (!test_and_set_bit(KEY_FLAG_REVOKED, &key->flags)) { + notify_key(key, NOTIFY_KEY_REVOKED, 0); + if (key->type->revoke) + key->type->revoke(key); + + /* set the death time to no more than the expiry time */ + time = ktime_get_real_seconds(); + if (key->revoked_at == 0 || key->revoked_at > time) { + key->revoked_at = time; + key_schedule_gc(key->revoked_at + key_gc_delay); + } } up_write(&key->sem); @@ -1087,8 +1095,10 @@ void key_invalidate(struct key *key) if (!test_bit(KEY_FLAG_INVALIDATED, &key->flags)) { down_write_nested(&key->sem, 1); - if (!test_and_set_bit(KEY_FLAG_INVALIDATED, &key->flags)) + if (!test_and_set_bit(KEY_FLAG_INVALIDATED, &key->flags)) { + notify_key(key, NOTIFY_KEY_INVALIDATED, 0); key_schedule_gc_links(); + } up_write(&key->sem); } } diff --git a/security/keys/keyctl.c b/security/keys/keyctl.c index 5e01192e222a0..7d8de1c9a4781 100644 --- a/security/keys/keyctl.c +++ b/security/keys/keyctl.c @@ -37,7 +37,9 @@ static const unsigned char keyrings_capabilities[2] = { KEYCTL_CAPS0_MOVE ), [1] = (KEYCTL_CAPS1_NS_KEYRING_NAME | - KEYCTL_CAPS1_NS_KEY_TAG), + KEYCTL_CAPS1_NS_KEY_TAG | + (IS_ENABLED(CONFIG_KEY_NOTIFICATIONS) ? KEYCTL_CAPS1_NOTIFICATIONS : 0) + ), }; static int key_get_type_from_user(char *type, @@ -1039,6 +1041,7 @@ long keyctl_chown_key(key_serial_t id, uid_t user, gid_t group) if (group != (gid_t) -1) key->gid = gid; + notify_key(key, NOTIFY_KEY_SETATTR, 0); ret = 0; error_put: @@ -1089,6 +1092,7 @@ long keyctl_setperm_key(key_serial_t id, key_perm_t perm) /* if we're not the sysadmin, we can only change a key that we own */ if (capable(CAP_SYS_ADMIN) || uid_eq(key->uid, current_fsuid())) { key->perm = perm; + notify_key(key, NOTIFY_KEY_SETATTR, 0); ret = 0; } @@ -1480,10 +1484,12 @@ long keyctl_set_timeout(key_serial_t id, unsigned timeout) okay: key = key_ref_to_ptr(key_ref); ret = 0; - if (test_bit(KEY_FLAG_KEEP, &key->flags)) + if (test_bit(KEY_FLAG_KEEP, &key->flags)) { ret = -EPERM; - else + } else { key_set_timeout(key, timeout); + notify_key(key, NOTIFY_KEY_SETATTR, 0); + } key_put(key); error: @@ -1757,6 +1763,90 @@ error: return ret; } +#ifdef CONFIG_KEY_NOTIFICATIONS +/* + * Watch for changes to a key. + * + * The caller must have View permission to watch a key or keyring. + */ +long keyctl_watch_key(key_serial_t id, int watch_queue_fd, int watch_id) +{ + struct watch_queue *wqueue; + struct watch_list *wlist = NULL; + struct watch *watch = NULL; + struct key *key; + key_ref_t key_ref; + long ret; + + if (watch_id < -1 || watch_id > 0xff) + return -EINVAL; + + key_ref = lookup_user_key(id, KEY_LOOKUP_CREATE, KEY_NEED_VIEW); + if (IS_ERR(key_ref)) + return PTR_ERR(key_ref); + key = key_ref_to_ptr(key_ref); + + wqueue = get_watch_queue(watch_queue_fd); + if (IS_ERR(wqueue)) { + ret = PTR_ERR(wqueue); + goto err_key; + } + + if (watch_id >= 0) { + ret = -ENOMEM; + if (!key->watchers) { + wlist = kzalloc(sizeof(*wlist), GFP_KERNEL); + if (!wlist) + goto err_wqueue; + init_watch_list(wlist, NULL); + } + + watch = kzalloc(sizeof(*watch), GFP_KERNEL); + if (!watch) + goto err_wlist; + + init_watch(watch, wqueue); + watch->id = key->serial; + watch->info_id = (u32)watch_id << WATCH_INFO_ID__SHIFT; + + ret = security_watch_key(key); + if (ret < 0) + goto err_watch; + + down_write(&key->sem); + if (!key->watchers) { + key->watchers = wlist; + wlist = NULL; + } + + ret = add_watch_to_object(watch, key->watchers); + up_write(&key->sem); + + if (ret == 0) + watch = NULL; + } else { + ret = -EBADSLT; + if (key->watchers) { + down_write(&key->sem); + ret = remove_watch_from_object(key->watchers, + wqueue, key_serial(key), + false); + up_write(&key->sem); + } + } + +err_watch: + kfree(watch); +err_wlist: + kfree(wlist); +err_wqueue: + put_watch_queue(wqueue); +err_key: + key_put(key); + return ret; +} +#endif /* CONFIG_KEY_NOTIFICATIONS */ + /* * Get keyrings subsystem capabilities. */ @@ -1926,6 +2016,9 @@ SYSCALL_DEFINE5(keyctl, int, option, unsigned long, arg2, unsigned long, arg3, case KEYCTL_CAPABILITIES: return keyctl_capabilities((unsigned char __user *)arg2, (size_t)arg3); + case KEYCTL_WATCH_KEY: + return keyctl_watch_key((key_serial_t)arg2, (int)arg3, (int)arg4); + default: return -EOPNOTSUPP; } diff --git a/security/keys/keyring.c b/security/keys/keyring.c index 5ca620d31cd30..14abfe765b7e7 100644 --- a/security/keys/keyring.c +++ b/security/keys/keyring.c @@ -1056,12 +1056,14 @@ int keyring_restrict(key_ref_t keyring_ref, const char *type, down_write(&keyring->sem); down_write(&keyring_serialise_restrict_sem); - if (keyring->restrict_link) + if (keyring->restrict_link) { ret = -EEXIST; - else if (keyring_detect_restriction_cycle(keyring, restrict_link)) + } else if (keyring_detect_restriction_cycle(keyring, restrict_link)) { ret = -EDEADLK; - else + } else { keyring->restrict_link = restrict_link; + notify_key(keyring, NOTIFY_KEY_SETATTR, 0); + } up_write(&keyring_serialise_restrict_sem); up_write(&keyring->sem); @@ -1362,12 +1364,14 @@ int __key_link_check_live_key(struct key *keyring, struct key *key) * holds at most one link to any given key of a particular type+description * combination. */ -void __key_link(struct key *key, struct assoc_array_edit **_edit) +void __key_link(struct key *keyring, struct key *key, + struct assoc_array_edit **_edit) { __key_get(key); assoc_array_insert_set_object(*_edit, keyring_key_to_ptr(key)); assoc_array_apply_edit(*_edit); *_edit = NULL; + notify_key(keyring, NOTIFY_KEY_LINKED, key_serial(key)); } /* @@ -1451,7 +1455,7 @@ int key_link(struct key *keyring, struct key *key) if (ret == 0) ret = __key_link_check_live_key(keyring, key); if (ret == 0) - __key_link(key, &edit); + __key_link(keyring, key, &edit); error_end: __key_link_end(keyring, &key->index_key, edit); @@ -1483,7 +1487,7 @@ static int __key_unlink_begin(struct key *keyring, struct key *key, struct assoc_array_edit *edit; BUG_ON(*_edit != NULL); - + edit = assoc_array_delete(&keyring->keys, &keyring_assoc_array_ops, &key->index_key); if (IS_ERR(edit)) @@ -1503,6 +1507,7 @@ static void __key_unlink(struct key *keyring, struct key *key, struct assoc_array_edit **_edit) { assoc_array_apply_edit(*_edit); + notify_key(keyring, NOTIFY_KEY_UNLINKED, key_serial(key)); *_edit = NULL; key_payload_reserve(keyring, keyring->datalen - KEYQUOTA_LINK_BYTES); } @@ -1621,7 +1626,7 @@ int key_move(struct key *key, goto error; __key_unlink(from_keyring, key, &from_edit); - __key_link(key, &to_edit); + __key_link(to_keyring, key, &to_edit); error: __key_link_end(to_keyring, &key->index_key, to_edit); __key_unlink_end(from_keyring, key, from_edit); @@ -1655,6 +1660,7 @@ int keyring_clear(struct key *keyring) } else { if (edit) assoc_array_apply_edit(edit); + notify_key(keyring, NOTIFY_KEY_CLEARED, 0); key_payload_reserve(keyring, 0); ret = 0; } diff --git a/security/keys/request_key.c b/security/keys/request_key.c index 957b9e3e14924..e1b9f1a80676e 100644 --- a/security/keys/request_key.c +++ b/security/keys/request_key.c @@ -418,7 +418,7 @@ static int construct_alloc_key(struct keyring_search_context *ctx, goto key_already_present; if (dest_keyring) - __key_link(key, &edit); + __key_link(dest_keyring, key, &edit); mutex_unlock(&key_construction_mutex); if (dest_keyring) @@ -437,7 +437,7 @@ key_already_present: if (dest_keyring) { ret = __key_link_check_live_key(dest_keyring, key); if (ret == 0) - __key_link(key, &edit); + __key_link(dest_keyring, key, &edit); __key_link_end(dest_keyring, &ctx->index_key, edit); if (ret < 0) goto link_check_failed; -- GitLab From f5b5a164f9a11aab5b225f082b33a8f03c07516c Mon Sep 17 00:00:00 2001 From: David Howells Date: Tue, 14 Jan 2020 17:07:11 +0000 Subject: [PATCH 0559/1971] Add sample notification program The sample program is run like: ./samples/watch_queue/watch_test and watches "/" for mount changes and the current session keyring for key changes: # keyctl add user a a @s 1035096409 # keyctl unlink 1035096409 @s producing: # ./watch_test read() = 16 NOTIFY[000]: ty=000001 sy=02 i=00000110 KEY 2ffc2e5d change=2[linked] aux=1035096409 read() = 16 NOTIFY[000]: ty=000001 sy=02 i=00000110 KEY 2ffc2e5d change=3[unlinked] aux=1035096409 Other events may be produced, such as with a failing disk: read() = 22 NOTIFY[000]: ty=000003 sy=02 i=00000416 USB 3-7.7 dev-reset e=0 r=0 read() = 24 NOTIFY[000]: ty=000002 sy=06 i=00000418 BLOCK 00800050 e=6[critical medium] s=64000ef8 This corresponds to: blk_update_request: critical medium error, dev sdf, sector 1677725432 op 0x0:(READ) flags 0x0 phys_seg 1 prio class 0 in dmesg. Signed-off-by: David Howells --- samples/Kconfig | 6 + samples/Makefile | 1 + samples/watch_queue/Makefile | 7 ++ samples/watch_queue/watch_test.c | 183 +++++++++++++++++++++++++++++++ 4 files changed, 197 insertions(+) create mode 100644 samples/watch_queue/Makefile create mode 100644 samples/watch_queue/watch_test.c diff --git a/samples/Kconfig b/samples/Kconfig index 9d236c346de50..5c31971a5745c 100644 --- a/samples/Kconfig +++ b/samples/Kconfig @@ -190,5 +190,11 @@ config SAMPLE_INTEL_MEI help Build a sample program to work with mei device. +config SAMPLE_WATCH_QUEUE + bool "Build example /dev/watch_queue notification consumer" + depends on HEADERS_INSTALL + help + Build example userspace program to use the new mount_notify(), + sb_notify() syscalls and the KEYCTL_WATCH_KEY keyctl() function. endif # SAMPLES diff --git a/samples/Makefile b/samples/Makefile index f8f847b4f61f5..8617edf7f19a3 100644 --- a/samples/Makefile +++ b/samples/Makefile @@ -24,3 +24,4 @@ obj-$(CONFIG_VIDEO_PCI_SKELETON) += v4l/ obj-y += vfio-mdev/ subdir-$(CONFIG_SAMPLE_VFS) += vfs obj-$(CONFIG_SAMPLE_INTEL_MEI) += mei/ +subdir-$(CONFIG_SAMPLE_WATCH_QUEUE) += watch_queue diff --git a/samples/watch_queue/Makefile b/samples/watch_queue/Makefile new file mode 100644 index 0000000000000..8511fb6c53d2b --- /dev/null +++ b/samples/watch_queue/Makefile @@ -0,0 +1,7 @@ +# List of programs to build +hostprogs := watch_test + +# Tell kbuild to always build the programs +always-y := $(hostprogs) + +HOSTCFLAGS_watch_test.o += -I$(objtree)/usr/include diff --git a/samples/watch_queue/watch_test.c b/samples/watch_queue/watch_test.c new file mode 100644 index 0000000000000..4a311790d4ca1 --- /dev/null +++ b/samples/watch_queue/watch_test.c @@ -0,0 +1,183 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Use /dev/watch_queue to watch for notifications. + * + * Copyright (C) 2020 Red Hat, Inc. All Rights Reserved. + * Written by David Howells (dhowells@redhat.com) + */ + +#define _GNU_SOURCE +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#ifndef KEYCTL_WATCH_KEY +#define KEYCTL_WATCH_KEY -1 +#endif +#ifndef __NR_keyctl +#define __NR_keyctl -1 +#endif + +#define BUF_SIZE 256 + +static long keyctl_watch_key(int key, int watch_fd, int watch_id) +{ + return syscall(__NR_keyctl, KEYCTL_WATCH_KEY, key, watch_fd, watch_id); +} + +static const char *key_subtypes[256] = { + [NOTIFY_KEY_INSTANTIATED] = "instantiated", + [NOTIFY_KEY_UPDATED] = "updated", + [NOTIFY_KEY_LINKED] = "linked", + [NOTIFY_KEY_UNLINKED] = "unlinked", + [NOTIFY_KEY_CLEARED] = "cleared", + [NOTIFY_KEY_REVOKED] = "revoked", + [NOTIFY_KEY_INVALIDATED] = "invalidated", + [NOTIFY_KEY_SETATTR] = "setattr", +}; + +static void saw_key_change(struct watch_notification *n, size_t len) +{ + struct key_notification *k = (struct key_notification *)n; + + if (len != sizeof(struct key_notification)) { + fprintf(stderr, "Incorrect key message length\n"); + return; + } + + printf("KEY %08x change=%u[%s] aux=%u\n", + k->key_id, n->subtype, key_subtypes[n->subtype], k->aux); +} + +/* + * Consume and display events. + */ +static void consumer(int fd) +{ + unsigned char buffer[4096], *p, *end; + union { + struct watch_notification n; + unsigned char buf1[128]; + } n; + ssize_t buf_len; + + for (;;) { + buf_len = read(fd, buffer, sizeof(buffer)); + if (buf_len == -1) { + perror("read"); + exit(1); + } + + if (buf_len == 0) { + printf("-- END --\n"); + return; + } + + if (buf_len > sizeof(buffer)) { + fprintf(stderr, "Read buffer overrun: %zd\n", buf_len); + return; + } + + printf("read() = %zd\n", buf_len); + + p = buffer; + end = buffer + buf_len; + while (p < end) { + size_t largest, len; + + largest = end - p; + if (largest > 128) + largest = 128; + if (largest < sizeof(struct watch_notification)) { + fprintf(stderr, "Short message header: %zu\n", largest); + return; + } + memcpy(&n, p, largest); + + printf("NOTIFY[%03zx]: ty=%06x sy=%02x i=%08x\n", + p - buffer, n.n.type, n.n.subtype, n.n.info); + + len = n.n.info & WATCH_INFO_LENGTH; + if (len < sizeof(n.n) || len > largest) { + fprintf(stderr, "Bad message length: %zu/%zu\n", len, largest); + exit(1); + } + + switch (n.n.type) { + case WATCH_TYPE_META: + switch (n.n.subtype) { + case WATCH_META_REMOVAL_NOTIFICATION: + printf("REMOVAL of watchpoint %08x\n", + (n.n.info & WATCH_INFO_ID) >> + WATCH_INFO_ID__SHIFT); + break; + default: + printf("other meta record\n"); + break; + } + break; + case WATCH_TYPE_KEY_NOTIFY: + saw_key_change(&n.n, len); + break; + default: + printf("other type\n"); + break; + } + + p += len; + } + } +} + +static struct watch_notification_filter filter = { + .nr_filters = 1, + .filters = { + [0] = { + .type = WATCH_TYPE_KEY_NOTIFY, + .subtype_filter[0] = UINT_MAX, + }, + }, +}; + +int main(int argc, char **argv) +{ + int pipefd[2], fd; + + if (pipe2(pipefd, O_NOTIFICATION_PIPE) == -1) { + perror("pipe2"); + exit(1); + } + fd = pipefd[0]; + + if (ioctl(fd, IOC_WATCH_QUEUE_SET_SIZE, BUF_SIZE) == -1) { + perror("watch_queue(size)"); + exit(1); + } + + if (ioctl(fd, IOC_WATCH_QUEUE_SET_FILTER, &filter) == -1) { + perror("watch_queue(filter)"); + exit(1); + } + + if (keyctl_watch_key(KEY_SPEC_SESSION_KEYRING, fd, 0x01) == -1) { + perror("keyctl"); + exit(1); + } + + if (keyctl_watch_key(KEY_SPEC_USER_KEYRING, fd, 0x02) == -1) { + perror("keyctl"); + exit(1); + } + + consumer(fd); + exit(0); +} -- GitLab From 8cfba76383e902acbed95092163052b1572f17a8 Mon Sep 17 00:00:00 2001 From: David Howells Date: Tue, 14 Jan 2020 17:07:11 +0000 Subject: [PATCH 0560/1971] pipe: Allow buffers to be marked read-whole-or-error for notifications Allow a buffer to be marked such that read() must return the entire buffer in one go or return ENOBUFS. Multiple buffers can be amalgamated into a single read, but a short read will occur if the next "whole" buffer won't fit. This is useful for watch queue notifications to make sure we don't split a notification across multiple reads, especially given that we need to fabricate an overrun record under some circumstances - and that isn't in the buffers. Signed-off-by: David Howells --- fs/pipe.c | 8 +++++++- include/linux/pipe_fs_i.h | 1 + kernel/watch_queue.c | 2 +- samples/watch_queue/watch_test.c | 2 +- 4 files changed, 10 insertions(+), 3 deletions(-) diff --git a/fs/pipe.c b/fs/pipe.c index da9bc1f21fd19..0f9fd897ceb56 100644 --- a/fs/pipe.c +++ b/fs/pipe.c @@ -320,8 +320,14 @@ pipe_read(struct kiocb *iocb, struct iov_iter *to) size_t written; int error; - if (chars > total_len) + if (chars > total_len) { + if (buf->flags & PIPE_BUF_FLAG_WHOLE) { + if (ret == 0) + ret = -ENOBUFS; + break; + } chars = total_len; + } error = pipe_buf_confirm(pipe, buf); if (error) { diff --git a/include/linux/pipe_fs_i.h b/include/linux/pipe_fs_i.h index 1d3eaa233f4a8..eaff59a2f0743 100644 --- a/include/linux/pipe_fs_i.h +++ b/include/linux/pipe_fs_i.h @@ -8,6 +8,7 @@ #define PIPE_BUF_FLAG_ATOMIC 0x02 /* was atomically mapped */ #define PIPE_BUF_FLAG_GIFT 0x04 /* page is a gift */ #define PIPE_BUF_FLAG_PACKET 0x08 /* read() as a packet */ +#define PIPE_BUF_FLAG_WHOLE 0x10 /* read() must return entire buffer or error */ /** * struct pipe_buffer - a linux kernel pipe buffer diff --git a/kernel/watch_queue.c b/kernel/watch_queue.c index c103e34f87056..ad64ea300f6df 100644 --- a/kernel/watch_queue.c +++ b/kernel/watch_queue.c @@ -115,7 +115,7 @@ static bool post_one_notification(struct watch_queue *wqueue, buf->ops = &watch_queue_pipe_buf_ops; buf->offset = offset; buf->len = len; - buf->flags = 0; + buf->flags = PIPE_BUF_FLAG_WHOLE; pipe->head = head + 1; if (!test_and_clear_bit(note, wqueue->notes_bitmap)) { diff --git a/samples/watch_queue/watch_test.c b/samples/watch_queue/watch_test.c index 4a311790d4ca1..8628b4c5d5676 100644 --- a/samples/watch_queue/watch_test.c +++ b/samples/watch_queue/watch_test.c @@ -63,7 +63,7 @@ static void saw_key_change(struct watch_notification *n, size_t len) */ static void consumer(int fd) { - unsigned char buffer[4096], *p, *end; + unsigned char buffer[433], *p, *end; union { struct watch_notification n; unsigned char buf1[128]; -- GitLab From e7d553d69cf63aec7de0f38fed49ccbb30922e1e Mon Sep 17 00:00:00 2001 From: David Howells Date: Tue, 14 Jan 2020 17:07:12 +0000 Subject: [PATCH 0561/1971] pipe: Add notification lossage handling Add handling for loss of notifications by having read() insert a loss-notification message after it has read the pipe buffer that was last in the ring when the loss occurred. Lossage can come about either by running out of notification descriptors or by running out of space in the pipe ring. Signed-off-by: David Howells --- fs/pipe.c | 28 ++++++++++++++++++++++++++++ include/linux/pipe_fs_i.h | 7 +++++++ kernel/watch_queue.c | 2 ++ samples/watch_queue/watch_test.c | 3 +++ 4 files changed, 40 insertions(+) diff --git a/fs/pipe.c b/fs/pipe.c index 0f9fd897ceb56..620a113f92eb9 100644 --- a/fs/pipe.c +++ b/fs/pipe.c @@ -314,6 +314,30 @@ pipe_read(struct kiocb *iocb, struct iov_iter *to) unsigned int tail = pipe->tail; unsigned int mask = pipe->ring_size - 1; +#ifdef CONFIG_WATCH_QUEUE + if (pipe->note_loss) { + struct watch_notification n; + + if (total_len < 8) { + if (ret == 0) + ret = -ENOBUFS; + break; + } + + n.type = WATCH_TYPE_META; + n.subtype = WATCH_META_LOSS_NOTIFICATION; + n.info = watch_sizeof(n); + if (copy_to_iter(&n, sizeof(n), to) != sizeof(n)) { + if (ret == 0) + ret = -EFAULT; + break; + } + ret += sizeof(n); + total_len -= sizeof(n); + pipe->note_loss = false; + } +#endif + if (!pipe_empty(head, tail)) { struct pipe_buffer *buf = &pipe->bufs[tail & mask]; size_t chars = buf->len; @@ -355,6 +379,10 @@ pipe_read(struct kiocb *iocb, struct iov_iter *to) if (!buf->len) { pipe_buf_release(pipe, buf); spin_lock_irq(&pipe->rd_wait.lock); +#ifdef CONFIG_WATCH_QUEUE + if (buf->flags & PIPE_BUF_FLAG_LOSS) + pipe->note_loss = true; +#endif tail++; pipe->tail = tail; spin_unlock_irq(&pipe->rd_wait.lock); diff --git a/include/linux/pipe_fs_i.h b/include/linux/pipe_fs_i.h index eaff59a2f0743..6626f511de6fb 100644 --- a/include/linux/pipe_fs_i.h +++ b/include/linux/pipe_fs_i.h @@ -9,6 +9,9 @@ #define PIPE_BUF_FLAG_GIFT 0x04 /* page is a gift */ #define PIPE_BUF_FLAG_PACKET 0x08 /* read() as a packet */ #define PIPE_BUF_FLAG_WHOLE 0x10 /* read() must return entire buffer or error */ +#ifdef CONFIG_WATCH_QUEUE +#define PIPE_BUF_FLAG_LOSS 0x20 /* Message loss happened after this buffer */ +#endif /** * struct pipe_buffer - a linux kernel pipe buffer @@ -34,6 +37,7 @@ struct pipe_buffer { * @wr_wait: writer wait point in case of full pipe * @head: The point of buffer production * @tail: The point of buffer consumption + * @note_loss: The next read() should insert a data-lost message * @max_usage: The maximum number of slots that may be used in the ring * @ring_size: total number of buffers (should be a power of 2) * @nr_accounted: The amount this pipe accounts for in user->pipe_bufs @@ -56,6 +60,9 @@ struct pipe_inode_info { unsigned int tail; unsigned int max_usage; unsigned int ring_size; +#ifdef CONFIG_WATCH_QUEUE + bool note_loss; +#endif unsigned int nr_accounted; unsigned int readers; unsigned int writers; diff --git a/kernel/watch_queue.c b/kernel/watch_queue.c index ad64ea300f6df..9a9699c06709b 100644 --- a/kernel/watch_queue.c +++ b/kernel/watch_queue.c @@ -132,6 +132,8 @@ out: return done; lost: + buf = &pipe->bufs[(head - 1) & mask]; + buf->flags |= PIPE_BUF_FLAG_LOSS; goto out; } diff --git a/samples/watch_queue/watch_test.c b/samples/watch_queue/watch_test.c index 8628b4c5d5676..46e618a897fef 100644 --- a/samples/watch_queue/watch_test.c +++ b/samples/watch_queue/watch_test.c @@ -120,6 +120,9 @@ static void consumer(int fd) (n.n.info & WATCH_INFO_ID) >> WATCH_INFO_ID__SHIFT); break; + case WATCH_META_LOSS_NOTIFICATION: + printf("-- LOSS --\n"); + break; default: printf("other meta record\n"); break; -- GitLab From 8c0637e950d68933a67f7438f779d79b049b5e5c Mon Sep 17 00:00:00 2001 From: David Howells Date: Tue, 12 May 2020 15:16:29 +0100 Subject: [PATCH 0562/1971] keys: Make the KEY_NEED_* perms an enum rather than a mask Since the meaning of combining the KEY_NEED_* constants is undefined, make it so that you can't do that by turning them into an enum. The enum is also given some extra values to represent special circumstances, such as: (1) The '0' value is reserved and causes a warning to trap the parameter being unset. (2) The key is to be unlinked and we require no permissions on it, only the keyring, (this replaces the KEY_LOOKUP_FOR_UNLINK flag). (3) An override due to CAP_SYS_ADMIN. (4) An override due to an instantiation token being present. (5) The permissions check is being deferred to later key_permission() calls. The extra values give the opportunity for LSMs to audit these situations. [Note: This really needs overhauling so that lookup_user_key() tells key_task_permission() and the LSM what operation is being done and leaves it to those functions to decide how to map that onto the available permits. However, I don't really want to make these change in the middle of the notifications patchset.] Signed-off-by: David Howells cc: Jarkko Sakkinen cc: Paul Moore cc: Stephen Smalley cc: Casey Schaufler cc: keyrings@vger.kernel.org cc: selinux@vger.kernel.org --- include/linux/key.h | 30 +++++++++++++---------- include/linux/security.h | 6 ++--- security/keys/internal.h | 8 +++---- security/keys/keyctl.c | 16 +++++++------ security/keys/permission.c | 31 ++++++++++++++++++------ security/keys/process_keys.c | 46 +++++++++++++++++------------------- security/security.c | 6 ++--- security/selinux/hooks.c | 37 +++++++++++++++++++++++------ security/smack/smack_lsm.c | 29 +++++++++++++++++------ 9 files changed, 135 insertions(+), 74 deletions(-) diff --git a/include/linux/key.h b/include/linux/key.h index b99b40db08fc6..0f2e24f13c2bd 100644 --- a/include/linux/key.h +++ b/include/linux/key.h @@ -71,6 +71,23 @@ struct net; #define KEY_PERM_UNDEF 0xffffffff +/* + * The permissions required on a key that we're looking up. + */ +enum key_need_perm { + KEY_NEED_UNSPECIFIED, /* Needed permission unspecified */ + KEY_NEED_VIEW, /* Require permission to view attributes */ + KEY_NEED_READ, /* Require permission to read content */ + KEY_NEED_WRITE, /* Require permission to update / modify */ + KEY_NEED_SEARCH, /* Require permission to search (keyring) or find (key) */ + KEY_NEED_LINK, /* Require permission to link */ + KEY_NEED_SETATTR, /* Require permission to change attributes */ + KEY_NEED_UNLINK, /* Require permission to unlink key */ + KEY_SYSADMIN_OVERRIDE, /* Special: override by CAP_SYS_ADMIN */ + KEY_AUTHTOKEN_OVERRIDE, /* Special: override by possession of auth token */ + KEY_DEFER_PERM_CHECK, /* Special: permission check is deferred */ +}; + struct seq_file; struct user_struct; struct signal_struct; @@ -420,20 +437,9 @@ static inline key_serial_t key_serial(const struct key *key) extern void key_set_timeout(struct key *, unsigned); extern key_ref_t lookup_user_key(key_serial_t id, unsigned long flags, - key_perm_t perm); + enum key_need_perm need_perm); extern void key_free_user_ns(struct user_namespace *); -/* - * The permissions required on a key that we're looking up. - */ -#define KEY_NEED_VIEW 0x01 /* Require permission to view attributes */ -#define KEY_NEED_READ 0x02 /* Require permission to read content */ -#define KEY_NEED_WRITE 0x04 /* Require permission to update / modify */ -#define KEY_NEED_SEARCH 0x08 /* Require permission to search (keyring) or find (key) */ -#define KEY_NEED_LINK 0x10 /* Require permission to link */ -#define KEY_NEED_SETATTR 0x20 /* Require permission to change attributes */ -#define KEY_NEED_ALL 0x3f /* All the above permissions */ - static inline short key_read_state(const struct key *key) { /* Barrier versus mark_key_instantiated(). */ diff --git a/include/linux/security.h b/include/linux/security.h index e7914e4e0b026..57aac14e34186 100644 --- a/include/linux/security.h +++ b/include/linux/security.h @@ -1767,8 +1767,8 @@ static inline int security_path_chroot(const struct path *path) int security_key_alloc(struct key *key, const struct cred *cred, unsigned long flags); void security_key_free(struct key *key); -int security_key_permission(key_ref_t key_ref, - const struct cred *cred, unsigned perm); +int security_key_permission(key_ref_t key_ref, const struct cred *cred, + enum key_need_perm need_perm); int security_key_getsecurity(struct key *key, char **_buffer); #else @@ -1786,7 +1786,7 @@ static inline void security_key_free(struct key *key) static inline int security_key_permission(key_ref_t key_ref, const struct cred *cred, - unsigned perm) + enum key_need_perm need_perm) { return 0; } diff --git a/security/keys/internal.h b/security/keys/internal.h index 28e17f4f3328f..1fc17cb317a99 100644 --- a/security/keys/internal.h +++ b/security/keys/internal.h @@ -167,7 +167,6 @@ extern bool lookup_user_key_possessed(const struct key *key, const struct key_match_data *match_data); #define KEY_LOOKUP_CREATE 0x01 #define KEY_LOOKUP_PARTIAL 0x02 -#define KEY_LOOKUP_FOR_UNLINK 0x04 extern long join_session_keyring(const char *name); extern void key_change_session_keyring(struct callback_head *twork); @@ -183,7 +182,7 @@ extern void key_gc_keytype(struct key_type *ktype); extern int key_task_permission(const key_ref_t key_ref, const struct cred *cred, - key_perm_t perm); + enum key_need_perm need_perm); static inline void notify_key(struct key *key, enum key_notification_subtype subtype, u32 aux) @@ -205,9 +204,10 @@ static inline void notify_key(struct key *key, /* * Check to see whether permission is granted to use a key in the desired way. */ -static inline int key_permission(const key_ref_t key_ref, unsigned perm) +static inline int key_permission(const key_ref_t key_ref, + enum key_need_perm need_perm) { - return key_task_permission(key_ref, current_cred(), perm); + return key_task_permission(key_ref, current_cred(), need_perm); } extern struct key_type key_type_request_key_auth; diff --git a/security/keys/keyctl.c b/security/keys/keyctl.c index 7d8de1c9a4781..6763ee45e04d1 100644 --- a/security/keys/keyctl.c +++ b/security/keys/keyctl.c @@ -434,7 +434,7 @@ long keyctl_invalidate_key(key_serial_t id) /* Root is permitted to invalidate certain special keys */ if (capable(CAP_SYS_ADMIN)) { - key_ref = lookup_user_key(id, 0, 0); + key_ref = lookup_user_key(id, 0, KEY_SYSADMIN_OVERRIDE); if (IS_ERR(key_ref)) goto error; if (test_bit(KEY_FLAG_ROOT_CAN_INVAL, @@ -479,7 +479,8 @@ long keyctl_keyring_clear(key_serial_t ringid) /* Root is permitted to invalidate certain special keyrings */ if (capable(CAP_SYS_ADMIN)) { - keyring_ref = lookup_user_key(ringid, 0, 0); + keyring_ref = lookup_user_key(ringid, 0, + KEY_SYSADMIN_OVERRIDE); if (IS_ERR(keyring_ref)) goto error; if (test_bit(KEY_FLAG_ROOT_CAN_CLEAR, @@ -563,7 +564,7 @@ long keyctl_keyring_unlink(key_serial_t id, key_serial_t ringid) goto error; } - key_ref = lookup_user_key(id, KEY_LOOKUP_FOR_UNLINK, 0); + key_ref = lookup_user_key(id, KEY_LOOKUP_PARTIAL, KEY_NEED_UNLINK); if (IS_ERR(key_ref)) { ret = PTR_ERR(key_ref); goto error2; @@ -663,7 +664,7 @@ long keyctl_describe_key(key_serial_t keyid, key_put(instkey); key_ref = lookup_user_key(keyid, KEY_LOOKUP_PARTIAL, - 0); + KEY_AUTHTOKEN_OVERRIDE); if (!IS_ERR(key_ref)) goto okay; } @@ -833,7 +834,7 @@ long keyctl_read_key(key_serial_t keyid, char __user *buffer, size_t buflen) size_t key_data_len; /* find the key first */ - key_ref = lookup_user_key(keyid, 0, 0); + key_ref = lookup_user_key(keyid, 0, KEY_DEFER_PERM_CHECK); if (IS_ERR(key_ref)) { ret = -ENOKEY; goto out; @@ -1471,7 +1472,7 @@ long keyctl_set_timeout(key_serial_t id, unsigned timeout) key_put(instkey); key_ref = lookup_user_key(id, KEY_LOOKUP_PARTIAL, - 0); + KEY_AUTHTOKEN_OVERRIDE); if (!IS_ERR(key_ref)) goto okay; } @@ -1579,7 +1580,8 @@ long keyctl_get_security(key_serial_t keyid, return PTR_ERR(instkey); key_put(instkey); - key_ref = lookup_user_key(keyid, KEY_LOOKUP_PARTIAL, 0); + key_ref = lookup_user_key(keyid, KEY_LOOKUP_PARTIAL, + KEY_AUTHTOKEN_OVERRIDE); if (IS_ERR(key_ref)) return PTR_ERR(key_ref); } diff --git a/security/keys/permission.c b/security/keys/permission.c index 085f907b64ac5..4a61f804e80f6 100644 --- a/security/keys/permission.c +++ b/security/keys/permission.c @@ -13,7 +13,7 @@ * key_task_permission - Check a key can be used * @key_ref: The key to check. * @cred: The credentials to use. - * @perm: The permissions to check for. + * @need_perm: The permission required. * * Check to see whether permission is granted to use a key in the desired way, * but permit the security modules to override. @@ -24,12 +24,30 @@ * permissions bits or the LSM check. */ int key_task_permission(const key_ref_t key_ref, const struct cred *cred, - unsigned perm) + enum key_need_perm need_perm) { struct key *key; - key_perm_t kperm; + key_perm_t kperm, mask; int ret; + switch (need_perm) { + default: + WARN_ON(1); + return -EACCES; + case KEY_NEED_UNLINK: + case KEY_SYSADMIN_OVERRIDE: + case KEY_AUTHTOKEN_OVERRIDE: + case KEY_DEFER_PERM_CHECK: + goto lsm; + + case KEY_NEED_VIEW: mask = KEY_OTH_VIEW; break; + case KEY_NEED_READ: mask = KEY_OTH_READ; break; + case KEY_NEED_WRITE: mask = KEY_OTH_WRITE; break; + case KEY_NEED_SEARCH: mask = KEY_OTH_SEARCH; break; + case KEY_NEED_LINK: mask = KEY_OTH_LINK; break; + case KEY_NEED_SETATTR: mask = KEY_OTH_SETATTR; break; + } + key = key_ref_to_ptr(key_ref); /* use the second 8-bits of permissions for keys the caller owns */ @@ -64,13 +82,12 @@ use_these_perms: if (is_key_possessed(key_ref)) kperm |= key->perm >> 24; - kperm = kperm & perm & KEY_NEED_ALL; - - if (kperm != perm) + if ((kperm & mask) != mask) return -EACCES; /* let LSM be the final arbiter */ - return security_key_permission(key_ref, cred, perm); +lsm: + return security_key_permission(key_ref, cred, need_perm); } EXPORT_SYMBOL(key_task_permission); diff --git a/security/keys/process_keys.c b/security/keys/process_keys.c index 09541de31f2f1..7e0232db1707e 100644 --- a/security/keys/process_keys.c +++ b/security/keys/process_keys.c @@ -609,7 +609,7 @@ bool lookup_user_key_possessed(const struct key *key, * returned key reference. */ key_ref_t lookup_user_key(key_serial_t id, unsigned long lflags, - key_perm_t perm) + enum key_need_perm need_perm) { struct keyring_search_context ctx = { .match_data.cmp = lookup_user_key_possessed, @@ -773,35 +773,33 @@ try_again: /* unlink does not use the nominated key in any way, so can skip all * the permission checks as it is only concerned with the keyring */ - if (lflags & KEY_LOOKUP_FOR_UNLINK) { - ret = 0; - goto error; - } - - if (!(lflags & KEY_LOOKUP_PARTIAL)) { - ret = wait_for_key_construction(key, true); - switch (ret) { - case -ERESTARTSYS: - goto invalid_key; - default: - if (perm) + if (need_perm != KEY_NEED_UNLINK) { + if (!(lflags & KEY_LOOKUP_PARTIAL)) { + ret = wait_for_key_construction(key, true); + switch (ret) { + case -ERESTARTSYS: + goto invalid_key; + default: + if (need_perm != KEY_AUTHTOKEN_OVERRIDE && + need_perm != KEY_DEFER_PERM_CHECK) + goto invalid_key; + case 0: + break; + } + } else if (need_perm != KEY_DEFER_PERM_CHECK) { + ret = key_validate(key); + if (ret < 0) goto invalid_key; - case 0: - break; } - } else if (perm) { - ret = key_validate(key); - if (ret < 0) + + ret = -EIO; + if (!(lflags & KEY_LOOKUP_PARTIAL) && + key_read_state(key) == KEY_IS_UNINSTANTIATED) goto invalid_key; } - ret = -EIO; - if (!(lflags & KEY_LOOKUP_PARTIAL) && - key_read_state(key) == KEY_IS_UNINSTANTIATED) - goto invalid_key; - /* check the permissions */ - ret = key_task_permission(key_ref, ctx.cred, perm); + ret = key_task_permission(key_ref, ctx.cred, need_perm); if (ret < 0) goto invalid_key; diff --git a/security/security.c b/security/security.c index c73334ab2882b..af32d4cd0462a 100644 --- a/security/security.c +++ b/security/security.c @@ -2398,10 +2398,10 @@ void security_key_free(struct key *key) call_void_hook(key_free, key); } -int security_key_permission(key_ref_t key_ref, - const struct cred *cred, unsigned perm) +int security_key_permission(key_ref_t key_ref, const struct cred *cred, + enum key_need_perm need_perm) { - return call_int_hook(key_permission, 0, key_ref, cred, perm); + return call_int_hook(key_permission, 0, key_ref, cred, need_perm); } int security_key_getsecurity(struct key *key, char **_buffer) diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c index 4c037c2545c16..196acaccbfddf 100644 --- a/security/selinux/hooks.c +++ b/security/selinux/hooks.c @@ -6561,20 +6561,43 @@ static void selinux_key_free(struct key *k) static int selinux_key_permission(key_ref_t key_ref, const struct cred *cred, - unsigned perm) + enum key_need_perm need_perm) { struct key *key; struct key_security_struct *ksec; - u32 sid; + u32 perm, sid; - /* if no specific permissions are requested, we skip the - permission check. No serious, additional covert channels - appear to be created. */ - if (perm == 0) + switch (need_perm) { + case KEY_NEED_VIEW: + perm = KEY__VIEW; + break; + case KEY_NEED_READ: + perm = KEY__READ; + break; + case KEY_NEED_WRITE: + perm = KEY__WRITE; + break; + case KEY_NEED_SEARCH: + perm = KEY__SEARCH; + break; + case KEY_NEED_LINK: + perm = KEY__LINK; + break; + case KEY_NEED_SETATTR: + perm = KEY__SETATTR; + break; + case KEY_NEED_UNLINK: + case KEY_SYSADMIN_OVERRIDE: + case KEY_AUTHTOKEN_OVERRIDE: + case KEY_DEFER_PERM_CHECK: return 0; + default: + WARN_ON(1); + return -EPERM; - sid = cred_sid(cred); + } + sid = cred_sid(cred); key = key_ref_to_ptr(key_ref); ksec = key->security; diff --git a/security/smack/smack_lsm.c b/security/smack/smack_lsm.c index 8c61d175e1954..0d6bb53efe745 100644 --- a/security/smack/smack_lsm.c +++ b/security/smack/smack_lsm.c @@ -4230,13 +4230,14 @@ static void smack_key_free(struct key *key) * smack_key_permission - Smack access on a key * @key_ref: gets to the object * @cred: the credentials to use - * @perm: requested key permissions + * @need_perm: requested key permission * * Return 0 if the task has read and write to the object, * an error code otherwise */ static int smack_key_permission(key_ref_t key_ref, - const struct cred *cred, unsigned perm) + const struct cred *cred, + enum key_need_perm need_perm) { struct key *keyp; struct smk_audit_info ad; @@ -4247,8 +4248,26 @@ static int smack_key_permission(key_ref_t key_ref, /* * Validate requested permissions */ - if (perm & ~KEY_NEED_ALL) + switch (need_perm) { + case KEY_NEED_READ: + case KEY_NEED_SEARCH: + case KEY_NEED_VIEW: + request |= MAY_READ; + break; + case KEY_NEED_WRITE: + case KEY_NEED_LINK: + case KEY_NEED_SETATTR: + request |= MAY_WRITE; + break; + case KEY_NEED_UNSPECIFIED: + case KEY_NEED_UNLINK: + case KEY_SYSADMIN_OVERRIDE: + case KEY_AUTHTOKEN_OVERRIDE: + case KEY_DEFER_PERM_CHECK: + return 0; + default: return -EINVAL; + } keyp = key_ref_to_ptr(key_ref); if (keyp == NULL) @@ -4273,10 +4292,6 @@ static int smack_key_permission(key_ref_t key_ref, ad.a.u.key_struct.key = keyp->serial; ad.a.u.key_struct.key_desc = keyp->description; #endif - if (perm & (KEY_NEED_READ | KEY_NEED_SEARCH | KEY_NEED_VIEW)) - request |= MAY_READ; - if (perm & (KEY_NEED_WRITE | KEY_NEED_LINK | KEY_NEED_SETATTR)) - request |= MAY_WRITE; rc = smk_access(tkp, keyp->security, request, &ad); rc = smk_bu_note("key access", tkp, keyp->security, request, rc); return rc; -- GitLab From 3e412ccc22e25666772094fb5ca01af056c54471 Mon Sep 17 00:00:00 2001 From: David Howells Date: Tue, 14 Jan 2020 17:07:13 +0000 Subject: [PATCH 0563/1971] selinux: Implement the watch_key security hook Implement the watch_key security hook to make sure that a key grants the caller View permission in order to set a watch on a key. For the moment, the watch_devices security hook is left unimplemented as it's not obvious what the object should be since the queue is global and didn't previously exist. Signed-off-by: David Howells Acked-by: Stephen Smalley Reviewed-by: James Morris --- security/selinux/hooks.c | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c index 196acaccbfddf..5b3191bd6130b 100644 --- a/security/selinux/hooks.c +++ b/security/selinux/hooks.c @@ -6619,6 +6619,17 @@ static int selinux_key_getsecurity(struct key *key, char **_buffer) *_buffer = context; return rc; } + +#ifdef CONFIG_KEY_NOTIFICATIONS +static int selinux_watch_key(struct key *key) +{ + struct key_security_struct *ksec = key->security; + u32 sid = current_sid(); + + return avc_has_perm(&selinux_state, + sid, ksec->sid, SECCLASS_KEY, KEY__VIEW, NULL); +} +#endif #endif #ifdef CONFIG_SECURITY_INFINIBAND @@ -7134,6 +7145,9 @@ static struct security_hook_list selinux_hooks[] __lsm_ro_after_init = { LSM_HOOK_INIT(key_free, selinux_key_free), LSM_HOOK_INIT(key_permission, selinux_key_permission), LSM_HOOK_INIT(key_getsecurity, selinux_key_getsecurity), +#ifdef CONFIG_KEY_NOTIFICATIONS + LSM_HOOK_INIT(watch_key, selinux_watch_key), +#endif #endif #ifdef CONFIG_AUDIT -- GitLab From a8478a602913dc89a7cd2060e613edecd07e1dbd Mon Sep 17 00:00:00 2001 From: David Howells Date: Tue, 14 Jan 2020 17:07:13 +0000 Subject: [PATCH 0564/1971] smack: Implement the watch_key and post_notification hooks Implement the watch_key security hook in Smack to make sure that a key grants the caller Read permission in order to set a watch on a key. Also implement the post_notification security hook to make sure that the notification source is granted Write permission by the watch queue. For the moment, the watch_devices security hook is left unimplemented as it's not obvious what the object should be since the queue is global and didn't previously exist. Signed-off-by: David Howells Acked-by: Casey Schaufler --- include/linux/lsm_audit.h | 1 + security/smack/smack_lsm.c | 83 +++++++++++++++++++++++++++++++++++++- 2 files changed, 83 insertions(+), 1 deletion(-) diff --git a/include/linux/lsm_audit.h b/include/linux/lsm_audit.h index 99d629fd99445..28f23b341c1ca 100644 --- a/include/linux/lsm_audit.h +++ b/include/linux/lsm_audit.h @@ -75,6 +75,7 @@ struct common_audit_data { #define LSM_AUDIT_DATA_IBPKEY 13 #define LSM_AUDIT_DATA_IBENDPORT 14 #define LSM_AUDIT_DATA_LOCKDOWN 15 +#define LSM_AUDIT_DATA_NOTIFICATION 16 union { struct path path; struct dentry *dentry; diff --git a/security/smack/smack_lsm.c b/security/smack/smack_lsm.c index 0d6bb53efe745..3c4d4836da4a0 100644 --- a/security/smack/smack_lsm.c +++ b/security/smack/smack_lsm.c @@ -41,6 +41,7 @@ #include #include #include +#include #include "smack.h" #define TRANS_TRUE "TRUE" @@ -4284,7 +4285,7 @@ static int smack_key_permission(key_ref_t key_ref, if (tkp == NULL) return -EACCES; - if (smack_privileged_cred(CAP_MAC_OVERRIDE, cred)) + if (smack_privileged(CAP_MAC_OVERRIDE)) return 0; #ifdef CONFIG_AUDIT @@ -4326,8 +4327,81 @@ static int smack_key_getsecurity(struct key *key, char **_buffer) return length; } + +#ifdef CONFIG_KEY_NOTIFICATIONS +/** + * smack_watch_key - Smack access to watch a key for notifications. + * @key: The key to be watched + * + * Return 0 if the @watch->cred has permission to read from the key object and + * an error otherwise. + */ +static int smack_watch_key(struct key *key) +{ + struct smk_audit_info ad; + struct smack_known *tkp = smk_of_current(); + int rc; + + if (key == NULL) + return -EINVAL; + /* + * If the key hasn't been initialized give it access so that + * it may do so. + */ + if (key->security == NULL) + return 0; + /* + * This should not occur + */ + if (tkp == NULL) + return -EACCES; + + if (smack_privileged_cred(CAP_MAC_OVERRIDE, current_cred())) + return 0; + +#ifdef CONFIG_AUDIT + smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_KEY); + ad.a.u.key_struct.key = key->serial; + ad.a.u.key_struct.key_desc = key->description; +#endif + rc = smk_access(tkp, key->security, MAY_READ, &ad); + rc = smk_bu_note("key watch", tkp, key->security, MAY_READ, rc); + return rc; +} +#endif /* CONFIG_KEY_NOTIFICATIONS */ #endif /* CONFIG_KEYS */ +#ifdef CONFIG_WATCH_QUEUE +/** + * smack_post_notification - Smack access to post a notification to a queue + * @w_cred: The credentials of the watcher. + * @cred: The credentials of the event source (may be NULL). + * @n: The notification message to be posted. + */ +static int smack_post_notification(const struct cred *w_cred, + const struct cred *cred, + struct watch_notification *n) +{ + struct smk_audit_info ad; + struct smack_known *subj, *obj; + int rc; + + /* Always let maintenance notifications through. */ + if (n->type == WATCH_TYPE_META) + return 0; + + if (!cred) + return 0; + subj = smk_of_task(smack_cred(cred)); + obj = smk_of_task(smack_cred(w_cred)); + + smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_NOTIFICATION); + rc = smk_access(subj, obj, MAY_WRITE, &ad); + rc = smk_bu_note("notification", subj, obj, MAY_WRITE, rc); + return rc; +} +#endif /* CONFIG_WATCH_QUEUE */ + /* * Smack Audit hooks * @@ -4716,8 +4790,15 @@ static struct security_hook_list smack_hooks[] __lsm_ro_after_init = { LSM_HOOK_INIT(key_free, smack_key_free), LSM_HOOK_INIT(key_permission, smack_key_permission), LSM_HOOK_INIT(key_getsecurity, smack_key_getsecurity), +#ifdef CONFIG_KEY_NOTIFICATIONS + LSM_HOOK_INIT(watch_key, smack_watch_key), +#endif #endif /* CONFIG_KEYS */ +#ifdef CONFIG_WATCH_QUEUE + LSM_HOOK_INIT(post_notification, smack_post_notification), +#endif + /* Audit hooks */ #ifdef CONFIG_AUDIT LSM_HOOK_INIT(audit_rule_init, smack_audit_rule_init), -- GitLab From 81eeae43c3f61ce875c9ceab1be47bd02b0099a2 Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Tue, 10 Mar 2020 21:51:24 -0700 Subject: [PATCH 0565/1971] ARM/SAMSUNG EXYNOS ARM ARCHITECTURES: Use fallthrough; Convert the various uses of fallthrough comments to fallthrough; Done via script Link: https://lore.kernel.org/lkml/b56602fcf79f849e733e7b521bb0e17895d390fa.1582230379.git.joe@perches.com/ Signed-off-by: Joe Perches Reviewed-by: Stephen Boyd Acked-by: Wolfram Sang # for the I2C part Signed-off-by: Sylwester Nawrocki --- drivers/clk/samsung/clk-s3c2443.c | 2 +- drivers/i2c/busses/i2c-s3c2410.c | 3 +-- 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/drivers/clk/samsung/clk-s3c2443.c b/drivers/clk/samsung/clk-s3c2443.c index 5f30fe72cd512..c7aba1e1af70f 100644 --- a/drivers/clk/samsung/clk-s3c2443.c +++ b/drivers/clk/samsung/clk-s3c2443.c @@ -387,7 +387,7 @@ void __init s3c2443_common_clk_init(struct device_node *np, unsigned long xti_f, ARRAY_SIZE(s3c2450_gates)); samsung_clk_register_alias(ctx, s3c2450_aliases, ARRAY_SIZE(s3c2450_aliases)); - /* fall through - as s3c2450 extends the s3c2416 clocks */ + fallthrough; /* as s3c2450 extends the s3c2416 clocks */ case S3C2416: samsung_clk_register_div(ctx, s3c2416_dividers, ARRAY_SIZE(s3c2416_dividers)); diff --git a/drivers/i2c/busses/i2c-s3c2410.c b/drivers/i2c/busses/i2c-s3c2410.c index 5a5638e1daa1d..57986984a90bd 100644 --- a/drivers/i2c/busses/i2c-s3c2410.c +++ b/drivers/i2c/busses/i2c-s3c2410.c @@ -435,8 +435,7 @@ static int i2c_s3c_irq_nextbyte(struct s3c24xx_i2c *i2c, unsigned long iicstat) * fall through to the write state, as we will need to * send a byte as well */ - /* Fall through */ - + fallthrough; case STATE_WRITE: /* * we are writing data to the device... check for the -- GitLab From 25bdae0f1c6609ceaf55fe6700654f0be2253d8e Mon Sep 17 00:00:00 2001 From: Marek Szyprowski Date: Tue, 19 May 2020 12:26:52 +0200 Subject: [PATCH 0566/1971] clk: samsung: exynos5433: Add IGNORE_UNUSED flag to sclk_i2s1 Mark the SCLK clock for Exynos5433 I2S1 device with IGNORE_UNUSED flag to match its behaviour with SCLK clock for AUD_I2S (I2S0) device until a proper fix for Exynos I2S driver is ready. This fixes the following synchronous abort issue revealed by the probe order change caused by the commit 93d2e4322aa7 ("of: platform: Batch fwnode parsing when adding all top level devices") Internal error: synchronous external abort: 96000210 [#1] PREEMPT SMP Modules linked in: CPU: 0 PID: 50 Comm: kworker/0:1 Not tainted 5.7.0-rc5+ #701 Hardware name: Samsung TM2E board (DT) Workqueue: events deferred_probe_work_func pstate: 60000005 (nZCv daif -PAN -UAO) pc : samsung_i2s_probe+0x768/0x8f0 lr : samsung_i2s_probe+0x688/0x8f0 ... Call trace: samsung_i2s_probe+0x768/0x8f0 platform_drv_probe+0x50/0xa8 really_probe+0x108/0x370 driver_probe_device+0x54/0xb8 __device_attach_driver+0x90/0xc0 bus_for_each_drv+0x70/0xc8 __device_attach+0xdc/0x140 device_initial_probe+0x10/0x18 bus_probe_device+0x94/0xa0 deferred_probe_work_func+0x70/0xa8 process_one_work+0x2a8/0x718 worker_thread+0x48/0x470 kthread+0x134/0x160 ret_from_fork+0x10/0x1c Code: 17ffffaf d503201f f94086c0 91003000 (88dffc00) ---[ end trace ccf721c9400ddbd6 ]--- Signed-off-by: Marek Szyprowski Signed-off-by: Sylwester Nawrocki --- drivers/clk/samsung/clk-exynos5433.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/clk/samsung/clk-exynos5433.c b/drivers/clk/samsung/clk-exynos5433.c index 4b1aa9382ad28..6f29ecd0442e1 100644 --- a/drivers/clk/samsung/clk-exynos5433.c +++ b/drivers/clk/samsung/clk-exynos5433.c @@ -1706,7 +1706,8 @@ static const struct samsung_gate_clock peric_gate_clks[] __initconst = { GATE(CLK_SCLK_PCM1, "sclk_pcm1", "sclk_pcm1_peric", ENABLE_SCLK_PERIC, 7, CLK_SET_RATE_PARENT, 0), GATE(CLK_SCLK_I2S1, "sclk_i2s1", "sclk_i2s1_peric", - ENABLE_SCLK_PERIC, 6, CLK_SET_RATE_PARENT, 0), + ENABLE_SCLK_PERIC, 6, + CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED, 0), GATE(CLK_SCLK_SPI2, "sclk_spi2", "sclk_spi2_peric", ENABLE_SCLK_PERIC, 5, CLK_SET_RATE_PARENT, 0), GATE(CLK_SCLK_SPI1, "sclk_spi1", "sclk_spi1_peric", ENABLE_SCLK_PERIC, -- GitLab From a5f304670b80973dfce5bc86cacff20244926cf6 Mon Sep 17 00:00:00 2001 From: Veronika Kabatova Date: Tue, 19 May 2020 22:00:45 +0200 Subject: [PATCH 0567/1971] selftests: introduce gen_tar Makefile target The gen_kselftest_tar.sh always packages *all* selftests and doesn't pass along any variables to `make install` to influence what should be built. This can result in an early error on the command line ("Unknown tarball format TARGETS=XXX"), or unexpected test failures as the tarball contains tests people wanted to skip on purpose. Since the makefile already contains all the logic, we can add a target for packaging. Keep the default .gz target the script uses, and actually extend the supported formats by using tar's autodetection. To not break current workflows, keep the gen_kselftest_tar.sh script as it is, with an added suggestion to use the makefile target instead. Signed-off-by: Veronika Kabatova Reviewed-by: Stefano Brivio Signed-off-by: Shuah Khan --- Documentation/dev-tools/kselftest.rst | 23 ++++++++++++++++++++ tools/testing/selftests/Makefile | 9 +++++++- tools/testing/selftests/gen_kselftest_tar.sh | 5 +++++ 3 files changed, 36 insertions(+), 1 deletion(-) diff --git a/Documentation/dev-tools/kselftest.rst b/Documentation/dev-tools/kselftest.rst index 61ae13c44f915..d4f6341cf4d93 100644 --- a/Documentation/dev-tools/kselftest.rst +++ b/Documentation/dev-tools/kselftest.rst @@ -151,6 +151,29 @@ note some tests will require root privileges:: $ cd kselftest $ ./run_kselftest.sh +Packaging selftests +=================== + +In some cases packaging is desired, such as when tests need to run on a +different system. To package selftests, run:: + + $ make -C tools/testing/selftests gen_tar + +This generates a tarball in the `INSTALL_PATH/kselftest-packages` directory. By +default, `.gz` format is used. The tar format can be overridden by specifying +a `FORMAT` make variable. Any value recognized by `tar's auto-compress`_ option +is supported, such as:: + + $ make -C tools/testing/selftests gen_tar FORMAT=.xz + +`make gen_tar` invokes `make install` so you can use it to package a subset of +tests by using variables specified in `Running a subset of selftests`_ +section:: + + $ make -C tools/testing/selftests gen_tar TARGETS="bpf" FORMAT=.xz + +.. _tar's auto-compress: https://www.gnu.org/software/tar/manual/html_node/gzip.html#auto_002dcompress + Contributing new tests ====================== diff --git a/tools/testing/selftests/Makefile b/tools/testing/selftests/Makefile index 2ff68702fd412..1195bd85af386 100644 --- a/tools/testing/selftests/Makefile +++ b/tools/testing/selftests/Makefile @@ -249,10 +249,17 @@ else $(error Error: set INSTALL_PATH to use install) endif +FORMAT ?= .gz +TAR_PATH = $(abspath ${INSTALL_PATH}/kselftest-packages/kselftest.tar${FORMAT}) +gen_tar: install + @mkdir -p ${INSTALL_PATH}/kselftest-packages/ + @tar caf ${TAR_PATH} --exclude=kselftest-packages -C ${INSTALL_PATH} . + @echo "Created ${TAR_PATH}" + clean: @for TARGET in $(TARGETS); do \ BUILD_TARGET=$$BUILD/$$TARGET; \ $(MAKE) OUTPUT=$$BUILD_TARGET -C $$TARGET clean;\ done; -.PHONY: khdr all run_tests hotplug run_hotplug clean_hotplug run_pstore_crash install clean +.PHONY: khdr all run_tests hotplug run_hotplug clean_hotplug run_pstore_crash install clean gen_tar diff --git a/tools/testing/selftests/gen_kselftest_tar.sh b/tools/testing/selftests/gen_kselftest_tar.sh index 8b2b6088540d4..4a974bc03385b 100755 --- a/tools/testing/selftests/gen_kselftest_tar.sh +++ b/tools/testing/selftests/gen_kselftest_tar.sh @@ -49,6 +49,11 @@ main() # directory ./kselftest_install.sh "$install_dir" (cd "$install_work"; tar $copts "$dest"/kselftest${ext} $install_name) + + # Don't put the message at the actual end as people may be parsing the + # "archive created" line in their scripts. + echo -e "\nConsider using 'make gen_tar' instead of this script\n" + echo "Kselftest archive kselftest${ext} created!" # clean up top-level install work directory -- GitLab From c143b7753b308d5f7d03b165a7fdff1dda1f271d Mon Sep 17 00:00:00 2001 From: Cheng Jian Date: Fri, 15 May 2020 10:08:28 +0000 Subject: [PATCH 0568/1971] ftrace: show debugging information when panic_on_warn set When an anomaly is detected in the function call modification code, ftrace_bug() is called to disable function tracing as well as give some warn and information that may help debug the problem. But currently, we call FTRACE_WARN_ON_ONCE() first in ftrace_bug(), so when panic_on_warn is set, we can't see the debugging information here. Call FTRACE_WARN_ON_ONCE() at the end of ftrace_bug() to ensure that the debugging information is displayed first. after this patch, the dmesg looks like: ------------[ ftrace bug ]------------ ftrace failed to modify [] bcm2835_handle_irq+0x4/0x58 actual: 1f:20:03:d5 Setting ftrace call site to call ftrace function ftrace record flags: 80000001 (1) expected tramp: ffff80001009d6f0 ------------[ cut here ]------------ WARNING: CPU: 2 PID: 1635 at kernel/trace/ftrace.c:2078 ftrace_bug+0x204/0x238 Kernel panic - not syncing: panic_on_warn set ... CPU: 2 PID: 1635 Comm: sh Not tainted 5.7.0-rc5-00033-gb922183867f5 #14 Hardware name: linux,dummy-virt (DT) Call trace: dump_backtrace+0x0/0x1b0 show_stack+0x20/0x30 dump_stack+0xc0/0x10c panic+0x16c/0x368 __warn+0x120/0x160 report_bug+0xc8/0x160 bug_handler+0x28/0x98 brk_handler+0x70/0xd0 do_debug_exception+0xcc/0x1ac el1_sync_handler+0xe4/0x120 el1_sync+0x7c/0x100 ftrace_bug+0x204/0x238 Link: https://lkml.kernel.org/r/20200515100828.7091-1-cj.chengjian@huawei.com Signed-off-by: Cheng Jian Signed-off-by: Steven Rostedt (VMware) --- kernel/trace/ftrace.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index bd030b1b95148..cd39cbf3631a5 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c @@ -2027,14 +2027,14 @@ void ftrace_bug(int failed, struct dyn_ftrace *rec) { unsigned long ip = rec ? rec->ip : 0; + pr_info("------------[ ftrace bug ]------------\n"); + switch (failed) { case -EFAULT: - FTRACE_WARN_ON_ONCE(1); pr_info("ftrace faulted on modifying "); print_ip_sym(ip); break; case -EINVAL: - FTRACE_WARN_ON_ONCE(1); pr_info("ftrace failed to modify "); print_ip_sym(ip); print_ip_ins(" actual: ", (unsigned char *)ip); @@ -2045,12 +2045,10 @@ void ftrace_bug(int failed, struct dyn_ftrace *rec) } break; case -EPERM: - FTRACE_WARN_ON_ONCE(1); pr_info("ftrace faulted on writing "); print_ip_sym(ip); break; default: - FTRACE_WARN_ON_ONCE(1); pr_info("ftrace faulted on unknown error "); print_ip_sym(ip); } @@ -2077,6 +2075,8 @@ void ftrace_bug(int failed, struct dyn_ftrace *rec) ip = ftrace_get_addr_curr(rec); pr_cont("\n expected tramp: %lx\n", ip); } + + FTRACE_WARN_ON_ONCE(1); } static int ftrace_check_record(struct dyn_ftrace *rec, bool enable, bool update) -- GitLab From 98d0a685cf8bcde23f4b0d1aa0a348fcef84569b Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Sat, 9 May 2020 09:58:25 +0900 Subject: [PATCH 0569/1971] tools/bootconfig: Add a summary of test cases and return error Add summary lines of test cases and return an error code if any test case fails so that tester don't have to monitor the output. Link: https://lkml.kernel.org/r/158898590533.22749.10269622752797822320.stgit@devnote2 Signed-off-by: Masami Hiramatsu Signed-off-by: Steven Rostedt (VMware) --- tools/bootconfig/test-bootconfig.sh | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/tools/bootconfig/test-bootconfig.sh b/tools/bootconfig/test-bootconfig.sh index 81b350ffd03fb..eff16b77d5ebe 100755 --- a/tools/bootconfig/test-bootconfig.sh +++ b/tools/bootconfig/test-bootconfig.sh @@ -124,9 +124,16 @@ for i in samples/good-* ; do xpass $BOOTCONF -a $i $INITRD done + +echo +echo "=== Summary ===" +echo "# of Passed: $(expr $NO - $NG - 1)" +echo "# of Failed: $NG" + echo if [ $NG -eq 0 ]; then echo "All tests passed" else echo "$NG tests failed" + exit 1 fi -- GitLab From fc9d276f22330e9322a9c592c71e0571810205f7 Mon Sep 17 00:00:00 2001 From: Julia Lawall Date: Thu, 7 May 2020 21:30:08 +0200 Subject: [PATCH 0570/1971] tracing/probe: reverse arguments to list_add Elsewhere in the file, the function trace_kprobe_has_same_kprobe uses a trace_probe_event.probes object as the second argument of list_for_each_entry, ie as a list head, while the list_for_each_entry iterates over the list fields of the trace_probe structures, making them the list elements. So, exchange the arguments on the list_add call to put the list head in the second argument. Since both list_head structures were just initialized, this problem did not cause any loss of information. Link: https://lkml.kernel.org/r/1588879808-24488-1-git-send-email-Julia.Lawall@inria.fr Fixes: 60d53e2c3b75 ("tracing/probe: Split trace_event related data from trace_probe") Acked-by: Masami Hiramatsu Signed-off-by: Julia Lawall Signed-off-by: Steven Rostedt (VMware) --- kernel/trace/trace_probe.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kernel/trace/trace_probe.c b/kernel/trace/trace_probe.c index ab8b6436d53fd..b8a928e925c70 100644 --- a/kernel/trace/trace_probe.c +++ b/kernel/trace/trace_probe.c @@ -1006,7 +1006,7 @@ int trace_probe_init(struct trace_probe *tp, const char *event, INIT_LIST_HEAD(&tp->event->class.fields); INIT_LIST_HEAD(&tp->event->probes); INIT_LIST_HEAD(&tp->list); - list_add(&tp->event->probes, &tp->list); + list_add(&tp->list, &tp->event->probes); call = trace_probe_event_call(tp); call->class = &tp->event->class; -- GitLab From cccc46474227eaaa7cd8f5601bba58489e237991 Mon Sep 17 00:00:00 2001 From: Peng Fan Date: Thu, 7 May 2020 13:56:11 +0800 Subject: [PATCH 0571/1971] clk: imx8m: drop clk_hw_set_parent for A53 The parent settings have been moved to dtsi, we no need to set parent here. And clk_hw_set_parent will trigger lockdep warning, because this api not have prepare_lock. Reported-by: Leonard Crestez Reviewed-by: Dong Aisheng Reviewed-by: Leonard Crestez Signed-off-by: Peng Fan Signed-off-by: Shawn Guo --- drivers/clk/imx/clk-imx8mm.c | 3 --- drivers/clk/imx/clk-imx8mn.c | 3 --- drivers/clk/imx/clk-imx8mp.c | 3 --- drivers/clk/imx/clk-imx8mq.c | 3 --- 4 files changed, 12 deletions(-) diff --git a/drivers/clk/imx/clk-imx8mm.c b/drivers/clk/imx/clk-imx8mm.c index 925670438f23b..5435042a06e3a 100644 --- a/drivers/clk/imx/clk-imx8mm.c +++ b/drivers/clk/imx/clk-imx8mm.c @@ -614,9 +614,6 @@ static int imx8mm_clocks_probe(struct platform_device *pdev) hws[IMX8MM_ARM_PLL_OUT]->clk, hws[IMX8MM_CLK_A53_DIV]->clk); - clk_hw_set_parent(hws[IMX8MM_CLK_A53_SRC], hws[IMX8MM_SYS_PLL1_800M]); - clk_hw_set_parent(hws[IMX8MM_CLK_A53_CORE], hws[IMX8MM_ARM_PLL_OUT]); - imx_check_clk_hws(hws, IMX8MM_CLK_END); ret = of_clk_add_hw_provider(np, of_clk_hw_onecell_get, clk_hw_data); diff --git a/drivers/clk/imx/clk-imx8mn.c b/drivers/clk/imx/clk-imx8mn.c index 0bc7070235bdc..6cac6ca03e122 100644 --- a/drivers/clk/imx/clk-imx8mn.c +++ b/drivers/clk/imx/clk-imx8mn.c @@ -565,9 +565,6 @@ static int imx8mn_clocks_probe(struct platform_device *pdev) hws[IMX8MN_ARM_PLL_OUT]->clk, hws[IMX8MN_CLK_A53_DIV]->clk); - clk_hw_set_parent(hws[IMX8MN_CLK_A53_SRC], hws[IMX8MN_SYS_PLL1_800M]); - clk_hw_set_parent(hws[IMX8MN_CLK_A53_CORE], hws[IMX8MN_ARM_PLL_OUT]); - imx_check_clk_hws(hws, IMX8MN_CLK_END); ret = of_clk_add_hw_provider(np, of_clk_hw_onecell_get, clk_hw_data); diff --git a/drivers/clk/imx/clk-imx8mp.c b/drivers/clk/imx/clk-imx8mp.c index 41469e2cc3de5..e05ec56df2859 100644 --- a/drivers/clk/imx/clk-imx8mp.c +++ b/drivers/clk/imx/clk-imx8mp.c @@ -735,9 +735,6 @@ static int imx8mp_clocks_probe(struct platform_device *pdev) hws[IMX8MP_ARM_PLL_OUT]->clk, hws[IMX8MP_CLK_A53_DIV]->clk); - clk_hw_set_parent(hws[IMX8MP_CLK_A53_SRC], hws[IMX8MP_SYS_PLL1_800M]); - clk_hw_set_parent(hws[IMX8MP_CLK_A53_CORE], hws[IMX8MP_ARM_PLL_OUT]); - imx_check_clk_hws(hws, IMX8MP_CLK_END); of_clk_add_hw_provider(np, of_clk_hw_onecell_get, clk_hw_data); diff --git a/drivers/clk/imx/clk-imx8mq.c b/drivers/clk/imx/clk-imx8mq.c index fdc68db68de5a..201c7bbb201f4 100644 --- a/drivers/clk/imx/clk-imx8mq.c +++ b/drivers/clk/imx/clk-imx8mq.c @@ -599,9 +599,6 @@ static int imx8mq_clocks_probe(struct platform_device *pdev) hws[IMX8MQ_ARM_PLL_OUT]->clk, hws[IMX8MQ_CLK_A53_DIV]->clk); - clk_hw_set_parent(hws[IMX8MQ_CLK_A53_SRC], hws[IMX8MQ_SYS1_PLL_800M]); - clk_hw_set_parent(hws[IMX8MQ_CLK_A53_CORE], hws[IMX8MQ_ARM_PLL_OUT]); - imx_check_clk_hws(hws, IMX8MQ_CLK_END); err = of_clk_add_hw_provider(np, of_clk_hw_onecell_get, clk_hw_data); -- GitLab From dc6e21da340297604f217bcff016389cf78b2a49 Mon Sep 17 00:00:00 2001 From: Peng Fan Date: Thu, 7 May 2020 13:56:12 +0800 Subject: [PATCH 0572/1971] clk: imx: imx8mp: fix pll mux bit Same to i.MX8MN/i.MX8MM, pll BYPASS bit should be kept inside pll driver for glitchless freq setting following spec. If exposing the bit, that means pll driver and clk driver has two paths to touch this bit, which is wrong. So use EXT_BYPASS bit here. Reviewed-by: Dong Aisheng Reviewed-by: Leonard Crestez Signed-off-by: Peng Fan Signed-off-by: Shawn Guo --- drivers/clk/imx/clk-imx8mp.c | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/drivers/clk/imx/clk-imx8mp.c b/drivers/clk/imx/clk-imx8mp.c index e05ec56df2859..e9ed8a1880310 100644 --- a/drivers/clk/imx/clk-imx8mp.c +++ b/drivers/clk/imx/clk-imx8mp.c @@ -486,16 +486,16 @@ static int imx8mp_clocks_probe(struct platform_device *pdev) hws[IMX8MP_SYS_PLL2] = imx_clk_hw_pll14xx("sys_pll2", "sys_pll2_ref_sel", anatop_base + 0x104, &imx_1416x_pll); hws[IMX8MP_SYS_PLL3] = imx_clk_hw_pll14xx("sys_pll3", "sys_pll3_ref_sel", anatop_base + 0x114, &imx_1416x_pll); - hws[IMX8MP_AUDIO_PLL1_BYPASS] = imx_clk_hw_mux_flags("audio_pll1_bypass", anatop_base, 4, 1, audio_pll1_bypass_sels, ARRAY_SIZE(audio_pll1_bypass_sels), CLK_SET_RATE_PARENT); - hws[IMX8MP_AUDIO_PLL2_BYPASS] = imx_clk_hw_mux_flags("audio_pll2_bypass", anatop_base + 0x14, 4, 1, audio_pll2_bypass_sels, ARRAY_SIZE(audio_pll2_bypass_sels), CLK_SET_RATE_PARENT); - hws[IMX8MP_VIDEO_PLL1_BYPASS] = imx_clk_hw_mux_flags("video_pll1_bypass", anatop_base + 0x28, 4, 1, video_pll1_bypass_sels, ARRAY_SIZE(video_pll1_bypass_sels), CLK_SET_RATE_PARENT); - hws[IMX8MP_DRAM_PLL_BYPASS] = imx_clk_hw_mux_flags("dram_pll_bypass", anatop_base + 0x50, 4, 1, dram_pll_bypass_sels, ARRAY_SIZE(dram_pll_bypass_sels), CLK_SET_RATE_PARENT); - hws[IMX8MP_GPU_PLL_BYPASS] = imx_clk_hw_mux_flags("gpu_pll_bypass", anatop_base + 0x64, 4, 1, gpu_pll_bypass_sels, ARRAY_SIZE(gpu_pll_bypass_sels), CLK_SET_RATE_PARENT); - hws[IMX8MP_VPU_PLL_BYPASS] = imx_clk_hw_mux_flags("vpu_pll_bypass", anatop_base + 0x74, 4, 1, vpu_pll_bypass_sels, ARRAY_SIZE(vpu_pll_bypass_sels), CLK_SET_RATE_PARENT); - hws[IMX8MP_ARM_PLL_BYPASS] = imx_clk_hw_mux_flags("arm_pll_bypass", anatop_base + 0x84, 4, 1, arm_pll_bypass_sels, ARRAY_SIZE(arm_pll_bypass_sels), CLK_SET_RATE_PARENT); - hws[IMX8MP_SYS_PLL1_BYPASS] = imx_clk_hw_mux_flags("sys_pll1_bypass", anatop_base + 0x94, 4, 1, sys_pll1_bypass_sels, ARRAY_SIZE(sys_pll1_bypass_sels), CLK_SET_RATE_PARENT); - hws[IMX8MP_SYS_PLL2_BYPASS] = imx_clk_hw_mux_flags("sys_pll2_bypass", anatop_base + 0x104, 4, 1, sys_pll2_bypass_sels, ARRAY_SIZE(sys_pll2_bypass_sels), CLK_SET_RATE_PARENT); - hws[IMX8MP_SYS_PLL3_BYPASS] = imx_clk_hw_mux_flags("sys_pll3_bypass", anatop_base + 0x114, 4, 1, sys_pll3_bypass_sels, ARRAY_SIZE(sys_pll3_bypass_sels), CLK_SET_RATE_PARENT); + hws[IMX8MP_AUDIO_PLL1_BYPASS] = imx_clk_hw_mux_flags("audio_pll1_bypass", anatop_base, 16, 1, audio_pll1_bypass_sels, ARRAY_SIZE(audio_pll1_bypass_sels), CLK_SET_RATE_PARENT); + hws[IMX8MP_AUDIO_PLL2_BYPASS] = imx_clk_hw_mux_flags("audio_pll2_bypass", anatop_base + 0x14, 16, 1, audio_pll2_bypass_sels, ARRAY_SIZE(audio_pll2_bypass_sels), CLK_SET_RATE_PARENT); + hws[IMX8MP_VIDEO_PLL1_BYPASS] = imx_clk_hw_mux_flags("video_pll1_bypass", anatop_base + 0x28, 16, 1, video_pll1_bypass_sels, ARRAY_SIZE(video_pll1_bypass_sels), CLK_SET_RATE_PARENT); + hws[IMX8MP_DRAM_PLL_BYPASS] = imx_clk_hw_mux_flags("dram_pll_bypass", anatop_base + 0x50, 16, 1, dram_pll_bypass_sels, ARRAY_SIZE(dram_pll_bypass_sels), CLK_SET_RATE_PARENT); + hws[IMX8MP_GPU_PLL_BYPASS] = imx_clk_hw_mux_flags("gpu_pll_bypass", anatop_base + 0x64, 28, 1, gpu_pll_bypass_sels, ARRAY_SIZE(gpu_pll_bypass_sels), CLK_SET_RATE_PARENT); + hws[IMX8MP_VPU_PLL_BYPASS] = imx_clk_hw_mux_flags("vpu_pll_bypass", anatop_base + 0x74, 28, 1, vpu_pll_bypass_sels, ARRAY_SIZE(vpu_pll_bypass_sels), CLK_SET_RATE_PARENT); + hws[IMX8MP_ARM_PLL_BYPASS] = imx_clk_hw_mux_flags("arm_pll_bypass", anatop_base + 0x84, 28, 1, arm_pll_bypass_sels, ARRAY_SIZE(arm_pll_bypass_sels), CLK_SET_RATE_PARENT); + hws[IMX8MP_SYS_PLL1_BYPASS] = imx_clk_hw_mux_flags("sys_pll1_bypass", anatop_base + 0x94, 28, 1, sys_pll1_bypass_sels, ARRAY_SIZE(sys_pll1_bypass_sels), CLK_SET_RATE_PARENT); + hws[IMX8MP_SYS_PLL2_BYPASS] = imx_clk_hw_mux_flags("sys_pll2_bypass", anatop_base + 0x104, 28, 1, sys_pll2_bypass_sels, ARRAY_SIZE(sys_pll2_bypass_sels), CLK_SET_RATE_PARENT); + hws[IMX8MP_SYS_PLL3_BYPASS] = imx_clk_hw_mux_flags("sys_pll3_bypass", anatop_base + 0x114, 28, 1, sys_pll3_bypass_sels, ARRAY_SIZE(sys_pll3_bypass_sels), CLK_SET_RATE_PARENT); hws[IMX8MP_AUDIO_PLL1_OUT] = imx_clk_hw_gate("audio_pll1_out", "audio_pll1_bypass", anatop_base, 13); hws[IMX8MP_AUDIO_PLL2_OUT] = imx_clk_hw_gate("audio_pll2_out", "audio_pll2_bypass", anatop_base + 0x14, 13); -- GitLab From 77f5d2d97353149d43b401ae98bd0c071cdd2fb6 Mon Sep 17 00:00:00 2001 From: Peng Fan Date: Thu, 7 May 2020 13:56:13 +0800 Subject: [PATCH 0573/1971] clk: imx8mp: Define gates for pll1/2 fixed dividers Inspried from commit e8688fe8df7d ("clk: imx8mn: Define gates for pll1/2 fixed dividers") On imx8mp there are 9 fixed-factor dividers for SYS_PLL1 and SYS_PLL2 each with their own gate. Only one of these gates (the one "dividing" by one) is currently defined and it's incorrectly set as the parent of all the fixed-factor dividers. Add the other 8 gates to the clock tree between sys_pll1/2_bypass and the fixed dividers. Reviewed-by: Dong Aisheng Reviewed-by: Leonard Crestez Signed-off-by: Peng Fan Signed-off-by: Shawn Guo --- drivers/clk/imx/clk-imx8mp.c | 54 ++++++++++++++++-------- include/dt-bindings/clock/imx8mp-clock.h | 19 ++++++++- 2 files changed, 54 insertions(+), 19 deletions(-) diff --git a/drivers/clk/imx/clk-imx8mp.c b/drivers/clk/imx/clk-imx8mp.c index e9ed8a1880310..a7613c7355c8e 100644 --- a/drivers/clk/imx/clk-imx8mp.c +++ b/drivers/clk/imx/clk-imx8mp.c @@ -504,28 +504,46 @@ static int imx8mp_clocks_probe(struct platform_device *pdev) hws[IMX8MP_GPU_PLL_OUT] = imx_clk_hw_gate("gpu_pll_out", "gpu_pll_bypass", anatop_base + 0x64, 11); hws[IMX8MP_VPU_PLL_OUT] = imx_clk_hw_gate("vpu_pll_out", "vpu_pll_bypass", anatop_base + 0x74, 11); hws[IMX8MP_ARM_PLL_OUT] = imx_clk_hw_gate("arm_pll_out", "arm_pll_bypass", anatop_base + 0x84, 11); - hws[IMX8MP_SYS_PLL1_OUT] = imx_clk_hw_gate("sys_pll1_out", "sys_pll1_bypass", anatop_base + 0x94, 11); - hws[IMX8MP_SYS_PLL2_OUT] = imx_clk_hw_gate("sys_pll2_out", "sys_pll2_bypass", anatop_base + 0x104, 11); hws[IMX8MP_SYS_PLL3_OUT] = imx_clk_hw_gate("sys_pll3_out", "sys_pll3_bypass", anatop_base + 0x114, 11); - hws[IMX8MP_SYS_PLL1_40M] = imx_clk_hw_fixed_factor("sys_pll1_40m", "sys_pll1_out", 1, 20); - hws[IMX8MP_SYS_PLL1_80M] = imx_clk_hw_fixed_factor("sys_pll1_80m", "sys_pll1_out", 1, 10); - hws[IMX8MP_SYS_PLL1_100M] = imx_clk_hw_fixed_factor("sys_pll1_100m", "sys_pll1_out", 1, 8); - hws[IMX8MP_SYS_PLL1_133M] = imx_clk_hw_fixed_factor("sys_pll1_133m", "sys_pll1_out", 1, 6); - hws[IMX8MP_SYS_PLL1_160M] = imx_clk_hw_fixed_factor("sys_pll1_160m", "sys_pll1_out", 1, 5); - hws[IMX8MP_SYS_PLL1_200M] = imx_clk_hw_fixed_factor("sys_pll1_200m", "sys_pll1_out", 1, 4); - hws[IMX8MP_SYS_PLL1_266M] = imx_clk_hw_fixed_factor("sys_pll1_266m", "sys_pll1_out", 1, 3); - hws[IMX8MP_SYS_PLL1_400M] = imx_clk_hw_fixed_factor("sys_pll1_400m", "sys_pll1_out", 1, 2); + hws[IMX8MP_SYS_PLL1_40M_CG] = imx_clk_hw_gate("sys_pll1_40m_cg", "sys_pll1_bypass", anatop_base + 0x94, 27); + hws[IMX8MP_SYS_PLL1_80M_CG] = imx_clk_hw_gate("sys_pll1_80m_cg", "sys_pll1_bypass", anatop_base + 0x94, 25); + hws[IMX8MP_SYS_PLL1_100M_CG] = imx_clk_hw_gate("sys_pll1_100m_cg", "sys_pll1_bypass", anatop_base + 0x94, 23); + hws[IMX8MP_SYS_PLL1_133M_CG] = imx_clk_hw_gate("sys_pll1_133m_cg", "sys_pll1_bypass", anatop_base + 0x94, 21); + hws[IMX8MP_SYS_PLL1_160M_CG] = imx_clk_hw_gate("sys_pll1_160m_cg", "sys_pll1_bypass", anatop_base + 0x94, 19); + hws[IMX8MP_SYS_PLL1_200M_CG] = imx_clk_hw_gate("sys_pll1_200m_cg", "sys_pll1_bypass", anatop_base + 0x94, 17); + hws[IMX8MP_SYS_PLL1_266M_CG] = imx_clk_hw_gate("sys_pll1_266m_cg", "sys_pll1_bypass", anatop_base + 0x94, 15); + hws[IMX8MP_SYS_PLL1_400M_CG] = imx_clk_hw_gate("sys_pll1_400m_cg", "sys_pll1_bypass", anatop_base + 0x94, 13); + hws[IMX8MP_SYS_PLL1_OUT] = imx_clk_hw_gate("sys_pll1_out", "sys_pll1_bypass", anatop_base + 0x94, 11); + + hws[IMX8MP_SYS_PLL1_40M] = imx_clk_hw_fixed_factor("sys_pll1_40m", "sys_pll1_40m_cg", 1, 20); + hws[IMX8MP_SYS_PLL1_80M] = imx_clk_hw_fixed_factor("sys_pll1_80m", "sys_pll1_80m_cg", 1, 10); + hws[IMX8MP_SYS_PLL1_100M] = imx_clk_hw_fixed_factor("sys_pll1_100m", "sys_pll1_100m_cg", 1, 8); + hws[IMX8MP_SYS_PLL1_133M] = imx_clk_hw_fixed_factor("sys_pll1_133m", "sys_pll1_133m_cg", 1, 6); + hws[IMX8MP_SYS_PLL1_160M] = imx_clk_hw_fixed_factor("sys_pll1_160m", "sys_pll1_160m_cg", 1, 5); + hws[IMX8MP_SYS_PLL1_200M] = imx_clk_hw_fixed_factor("sys_pll1_200m", "sys_pll1_200m_cg", 1, 4); + hws[IMX8MP_SYS_PLL1_266M] = imx_clk_hw_fixed_factor("sys_pll1_266m", "sys_pll1_266m_cg", 1, 3); + hws[IMX8MP_SYS_PLL1_400M] = imx_clk_hw_fixed_factor("sys_pll1_400m", "sys_pll1_400m_cg", 1, 2); hws[IMX8MP_SYS_PLL1_800M] = imx_clk_hw_fixed_factor("sys_pll1_800m", "sys_pll1_out", 1, 1); - hws[IMX8MP_SYS_PLL2_50M] = imx_clk_hw_fixed_factor("sys_pll2_50m", "sys_pll2_out", 1, 20); - hws[IMX8MP_SYS_PLL2_100M] = imx_clk_hw_fixed_factor("sys_pll2_100m", "sys_pll2_out", 1, 10); - hws[IMX8MP_SYS_PLL2_125M] = imx_clk_hw_fixed_factor("sys_pll2_125m", "sys_pll2_out", 1, 8); - hws[IMX8MP_SYS_PLL2_166M] = imx_clk_hw_fixed_factor("sys_pll2_166m", "sys_pll2_out", 1, 6); - hws[IMX8MP_SYS_PLL2_200M] = imx_clk_hw_fixed_factor("sys_pll2_200m", "sys_pll2_out", 1, 5); - hws[IMX8MP_SYS_PLL2_250M] = imx_clk_hw_fixed_factor("sys_pll2_250m", "sys_pll2_out", 1, 4); - hws[IMX8MP_SYS_PLL2_333M] = imx_clk_hw_fixed_factor("sys_pll2_333m", "sys_pll2_out", 1, 3); - hws[IMX8MP_SYS_PLL2_500M] = imx_clk_hw_fixed_factor("sys_pll2_500m", "sys_pll2_out", 1, 2); + hws[IMX8MP_SYS_PLL2_50M_CG] = imx_clk_hw_gate("sys_pll2_50m_cg", "sys_pll2_bypass", anatop_base + 0x104, 27); + hws[IMX8MP_SYS_PLL2_100M_CG] = imx_clk_hw_gate("sys_pll2_100m_cg", "sys_pll2_bypass", anatop_base + 0x104, 25); + hws[IMX8MP_SYS_PLL2_125M_CG] = imx_clk_hw_gate("sys_pll2_125m_cg", "sys_pll2_bypass", anatop_base + 0x104, 23); + hws[IMX8MP_SYS_PLL2_166M_CG] = imx_clk_hw_gate("sys_pll2_166m_cg", "sys_pll2_bypass", anatop_base + 0x104, 21); + hws[IMX8MP_SYS_PLL2_200M_CG] = imx_clk_hw_gate("sys_pll2_200m_cg", "sys_pll2_bypass", anatop_base + 0x104, 19); + hws[IMX8MP_SYS_PLL2_250M_CG] = imx_clk_hw_gate("sys_pll2_250m_cg", "sys_pll2_bypass", anatop_base + 0x104, 17); + hws[IMX8MP_SYS_PLL2_333M_CG] = imx_clk_hw_gate("sys_pll2_333m_cg", "sys_pll2_bypass", anatop_base + 0x104, 15); + hws[IMX8MP_SYS_PLL2_500M_CG] = imx_clk_hw_gate("sys_pll2_500m_cg", "sys_pll2_bypass", anatop_base + 0x104, 13); + hws[IMX8MP_SYS_PLL2_OUT] = imx_clk_hw_gate("sys_pll2_out", "sys_pll2_bypass", anatop_base + 0x104, 11); + + hws[IMX8MP_SYS_PLL2_50M] = imx_clk_hw_fixed_factor("sys_pll2_50m", "sys_pll2_50m_cg", 1, 20); + hws[IMX8MP_SYS_PLL2_100M] = imx_clk_hw_fixed_factor("sys_pll2_100m", "sys_pll2_100m_cg", 1, 10); + hws[IMX8MP_SYS_PLL2_125M] = imx_clk_hw_fixed_factor("sys_pll2_125m", "sys_pll2_125m_cg", 1, 8); + hws[IMX8MP_SYS_PLL2_166M] = imx_clk_hw_fixed_factor("sys_pll2_166m", "sys_pll2_166m_cg", 1, 6); + hws[IMX8MP_SYS_PLL2_200M] = imx_clk_hw_fixed_factor("sys_pll2_200m", "sys_pll2_200m_cg", 1, 5); + hws[IMX8MP_SYS_PLL2_250M] = imx_clk_hw_fixed_factor("sys_pll2_250m", "sys_pll2_250m_cg", 1, 4); + hws[IMX8MP_SYS_PLL2_333M] = imx_clk_hw_fixed_factor("sys_pll2_333m", "sys_pll2_333m_cg", 1, 3); + hws[IMX8MP_SYS_PLL2_500M] = imx_clk_hw_fixed_factor("sys_pll2_500m", "sys_pll2_500m_cg", 1, 2); hws[IMX8MP_SYS_PLL2_1000M] = imx_clk_hw_fixed_factor("sys_pll2_1000m", "sys_pll2_out", 1, 1); hws[IMX8MP_CLK_A53_SRC] = imx_clk_hw_mux2("arm_a53_src", ccm_base + 0x8000, 24, 3, imx8mp_a53_sels, ARRAY_SIZE(imx8mp_a53_sels)); diff --git a/include/dt-bindings/clock/imx8mp-clock.h b/include/dt-bindings/clock/imx8mp-clock.h index 305433f9cc074..3a8c55a11c1ea 100644 --- a/include/dt-bindings/clock/imx8mp-clock.h +++ b/include/dt-bindings/clock/imx8mp-clock.h @@ -296,7 +296,24 @@ #define IMX8MP_CLK_ARM 287 #define IMX8MP_CLK_A53_CORE 288 -#define IMX8MP_CLK_END 289 +#define IMX8MP_SYS_PLL1_40M_CG 289 +#define IMX8MP_SYS_PLL1_80M_CG 290 +#define IMX8MP_SYS_PLL1_100M_CG 291 +#define IMX8MP_SYS_PLL1_133M_CG 292 +#define IMX8MP_SYS_PLL1_160M_CG 293 +#define IMX8MP_SYS_PLL1_200M_CG 294 +#define IMX8MP_SYS_PLL1_266M_CG 295 +#define IMX8MP_SYS_PLL1_400M_CG 296 +#define IMX8MP_SYS_PLL2_50M_CG 297 +#define IMX8MP_SYS_PLL2_100M_CG 298 +#define IMX8MP_SYS_PLL2_125M_CG 299 +#define IMX8MP_SYS_PLL2_166M_CG 300 +#define IMX8MP_SYS_PLL2_200M_CG 301 +#define IMX8MP_SYS_PLL2_250M_CG 302 +#define IMX8MP_SYS_PLL2_333M_CG 303 +#define IMX8MP_SYS_PLL2_500M_CG 304 + +#define IMX8MP_CLK_END 305 #define IMX8MP_CLK_AUDIOMIX_SAI1_IPG 0 #define IMX8MP_CLK_AUDIOMIX_SAI1_MCLK1 1 -- GitLab From 8c83a8ff4dd92878c2e1163a1b4a12ce29f8fa68 Mon Sep 17 00:00:00 2001 From: Peng Fan Date: Thu, 7 May 2020 13:56:14 +0800 Subject: [PATCH 0574/1971] clk: imx8mp: use imx8m_clk_hw_composite_core to simplify code Use imx8m_clk_hw_composite_core to simpliy clks that belong to core clk slice. Reviewed-by: Leonard Crestez Signed-off-by: Peng Fan Reviewed-by: Dong Aisheng Signed-off-by: Shawn Guo --- drivers/clk/imx/clk-imx8mp.c | 47 ++++++++---------------- include/dt-bindings/clock/imx8mp-clock.h | 11 +++++- 2 files changed, 26 insertions(+), 32 deletions(-) diff --git a/drivers/clk/imx/clk-imx8mp.c b/drivers/clk/imx/clk-imx8mp.c index a7613c7355c8e..998e9e63f8311 100644 --- a/drivers/clk/imx/clk-imx8mp.c +++ b/drivers/clk/imx/clk-imx8mp.c @@ -546,33 +546,18 @@ static int imx8mp_clocks_probe(struct platform_device *pdev) hws[IMX8MP_SYS_PLL2_500M] = imx_clk_hw_fixed_factor("sys_pll2_500m", "sys_pll2_500m_cg", 1, 2); hws[IMX8MP_SYS_PLL2_1000M] = imx_clk_hw_fixed_factor("sys_pll2_1000m", "sys_pll2_out", 1, 1); - hws[IMX8MP_CLK_A53_SRC] = imx_clk_hw_mux2("arm_a53_src", ccm_base + 0x8000, 24, 3, imx8mp_a53_sels, ARRAY_SIZE(imx8mp_a53_sels)); - hws[IMX8MP_CLK_M7_SRC] = imx_clk_hw_mux2("arm_m7_src", ccm_base + 0x8080, 24, 3, imx8mp_m7_sels, ARRAY_SIZE(imx8mp_m7_sels)); - hws[IMX8MP_CLK_ML_SRC] = imx_clk_hw_mux2("ml_src", ccm_base + 0x8100, 24, 3, imx8mp_ml_sels, ARRAY_SIZE(imx8mp_ml_sels)); - hws[IMX8MP_CLK_GPU3D_CORE_SRC] = imx_clk_hw_mux2("gpu3d_core_src", ccm_base + 0x8180, 24, 3, imx8mp_gpu3d_core_sels, ARRAY_SIZE(imx8mp_gpu3d_core_sels)); - hws[IMX8MP_CLK_GPU3D_SHADER_SRC] = imx_clk_hw_mux2("gpu3d_shader_src", ccm_base + 0x8200, 24, 3, imx8mp_gpu3d_shader_sels, ARRAY_SIZE(imx8mp_gpu3d_shader_sels)); - hws[IMX8MP_CLK_GPU2D_SRC] = imx_clk_hw_mux2("gpu2d_src", ccm_base + 0x8280, 24, 3, imx8mp_gpu2d_sels, ARRAY_SIZE(imx8mp_gpu2d_sels)); - hws[IMX8MP_CLK_AUDIO_AXI_SRC] = imx_clk_hw_mux2("audio_axi_src", ccm_base + 0x8300, 24, 3, imx8mp_audio_axi_sels, ARRAY_SIZE(imx8mp_audio_axi_sels)); - hws[IMX8MP_CLK_HSIO_AXI_SRC] = imx_clk_hw_mux2("hsio_axi_src", ccm_base + 0x8380, 24, 3, imx8mp_hsio_axi_sels, ARRAY_SIZE(imx8mp_hsio_axi_sels)); - hws[IMX8MP_CLK_MEDIA_ISP_SRC] = imx_clk_hw_mux2("media_isp_src", ccm_base + 0x8400, 24, 3, imx8mp_media_isp_sels, ARRAY_SIZE(imx8mp_media_isp_sels)); - hws[IMX8MP_CLK_A53_CG] = imx_clk_hw_gate3("arm_a53_cg", "arm_a53_src", ccm_base + 0x8000, 28); - hws[IMX8MP_CLK_M4_CG] = imx_clk_hw_gate3("arm_m7_cg", "arm_m7_src", ccm_base + 0x8080, 28); - hws[IMX8MP_CLK_ML_CG] = imx_clk_hw_gate3("ml_cg", "ml_src", ccm_base + 0x8100, 28); - hws[IMX8MP_CLK_GPU3D_CORE_CG] = imx_clk_hw_gate3("gpu3d_core_cg", "gpu3d_core_src", ccm_base + 0x8180, 28); - hws[IMX8MP_CLK_GPU3D_SHADER_CG] = imx_clk_hw_gate3("gpu3d_shader_cg", "gpu3d_shader_src", ccm_base + 0x8200, 28); - hws[IMX8MP_CLK_GPU2D_CG] = imx_clk_hw_gate3("gpu2d_cg", "gpu2d_src", ccm_base + 0x8280, 28); - hws[IMX8MP_CLK_AUDIO_AXI_CG] = imx_clk_hw_gate3("audio_axi_cg", "audio_axi_src", ccm_base + 0x8300, 28); - hws[IMX8MP_CLK_HSIO_AXI_CG] = imx_clk_hw_gate3("hsio_axi_cg", "hsio_axi_src", ccm_base + 0x8380, 28); - hws[IMX8MP_CLK_MEDIA_ISP_CG] = imx_clk_hw_gate3("media_isp_cg", "media_isp_src", ccm_base + 0x8400, 28); - hws[IMX8MP_CLK_A53_DIV] = imx_clk_hw_divider2("arm_a53_div", "arm_a53_cg", ccm_base + 0x8000, 0, 3); - hws[IMX8MP_CLK_M7_DIV] = imx_clk_hw_divider2("arm_m7_div", "arm_m7_cg", ccm_base + 0x8080, 0, 3); - hws[IMX8MP_CLK_ML_DIV] = imx_clk_hw_divider2("ml_div", "ml_cg", ccm_base + 0x8100, 0, 3); - hws[IMX8MP_CLK_GPU3D_CORE_DIV] = imx_clk_hw_divider2("gpu3d_core_div", "gpu3d_core_cg", ccm_base + 0x8180, 0, 3); - hws[IMX8MP_CLK_GPU3D_SHADER_DIV] = imx_clk_hw_divider2("gpu3d_shader_div", "gpu3d_shader_cg", ccm_base + 0x8200, 0, 3); - hws[IMX8MP_CLK_GPU2D_DIV] = imx_clk_hw_divider2("gpu2d_div", "gpu2d_cg", ccm_base + 0x8280, 0, 3); - hws[IMX8MP_CLK_AUDIO_AXI_DIV] = imx_clk_hw_divider2("audio_axi_div", "audio_axi_cg", ccm_base + 0x8300, 0, 3); - hws[IMX8MP_CLK_HSIO_AXI_DIV] = imx_clk_hw_divider2("hsio_axi_div", "hsio_axi_cg", ccm_base + 0x8380, 0, 3); - hws[IMX8MP_CLK_MEDIA_ISP_DIV] = imx_clk_hw_divider2("media_isp_div", "media_isp_cg", ccm_base + 0x8400, 0, 3); + hws[IMX8MP_CLK_A53_DIV] = imx8m_clk_hw_composite_core("arm_a53_div", imx8mp_a53_sels, ccm_base + 0x8000); + hws[IMX8MP_CLK_A53_SRC] = hws[IMX8MP_CLK_A53_DIV]; + hws[IMX8MP_CLK_A53_CG] = hws[IMX8MP_CLK_A53_DIV]; + hws[IMX8MP_CLK_M7_CORE] = imx8m_clk_hw_composite_core("m7_core", imx8mp_m7_sels, ccm_base + 0x8080); + hws[IMX8MP_CLK_ML_CORE] = imx8m_clk_hw_composite_core("ml_core", imx8mp_ml_sels, ccm_base + 0x8100); + hws[IMX8MP_CLK_GPU3D_CORE] = imx8m_clk_hw_composite_core("gpu3d_core", imx8mp_gpu3d_core_sels, ccm_base + 0x8180); + hws[IMX8MP_CLK_GPU3D_SHADER_CORE] = imx8m_clk_hw_composite("gpu3d_shader_core", imx8mp_gpu3d_shader_sels, ccm_base + 0x8200); + hws[IMX8MP_CLK_GPU2D_CORE] = imx8m_clk_hw_composite("gpu2d_core", imx8mp_gpu2d_sels, ccm_base + 0x8280); + hws[IMX8MP_CLK_AUDIO_AXI] = imx8m_clk_hw_composite("audio_axi", imx8mp_audio_axi_sels, ccm_base + 0x8300); + hws[IMX8MP_CLK_AUDIO_AXI_SRC] = hws[IMX8MP_CLK_AUDIO_AXI]; + hws[IMX8MP_CLK_HSIO_AXI] = imx8m_clk_hw_composite("hsio_axi", imx8mp_hsio_axi_sels, ccm_base + 0x8380); + hws[IMX8MP_CLK_MEDIA_ISP] = imx8m_clk_hw_composite("media_isp", imx8mp_media_isp_sels, ccm_base + 0x8400); /* CORE SEL */ hws[IMX8MP_CLK_A53_CORE] = imx_clk_hw_mux2("arm_a53_core", ccm_base + 0x9880, 24, 1, imx8mp_a53_core_sels, ARRAY_SIZE(imx8mp_a53_core_sels)); @@ -713,8 +698,8 @@ static int imx8mp_clocks_probe(struct platform_device *pdev) hws[IMX8MP_CLK_SDMA1_ROOT] = imx_clk_hw_gate4("sdma1_root_clk", "ipg_root", ccm_base + 0x43a0, 0); hws[IMX8MP_CLK_ENET_QOS_ROOT] = imx_clk_hw_gate4("enet_qos_root_clk", "sim_enet_root_clk", ccm_base + 0x43b0, 0); hws[IMX8MP_CLK_SIM_ENET_ROOT] = imx_clk_hw_gate4("sim_enet_root_clk", "enet_axi", ccm_base + 0x4400, 0); - hws[IMX8MP_CLK_GPU2D_ROOT] = imx_clk_hw_gate4("gpu2d_root_clk", "gpu2d_div", ccm_base + 0x4450, 0); - hws[IMX8MP_CLK_GPU3D_ROOT] = imx_clk_hw_gate4("gpu3d_root_clk", "gpu3d_core_div", ccm_base + 0x4460, 0); + hws[IMX8MP_CLK_GPU2D_ROOT] = imx_clk_hw_gate4("gpu2d_root_clk", "gpu2d_core", ccm_base + 0x4450, 0); + hws[IMX8MP_CLK_GPU3D_ROOT] = imx_clk_hw_gate4("gpu3d_root_clk", "gpu3d_core", ccm_base + 0x4460, 0); hws[IMX8MP_CLK_SNVS_ROOT] = imx_clk_hw_gate4("snvs_root_clk", "ipg_root", ccm_base + 0x4470, 0); hws[IMX8MP_CLK_UART1_ROOT] = imx_clk_hw_gate4("uart1_root_clk", "uart1", ccm_base + 0x4490, 0); hws[IMX8MP_CLK_UART2_ROOT] = imx_clk_hw_gate4("uart2_root_clk", "uart2", ccm_base + 0x44a0, 0); @@ -731,7 +716,7 @@ static int imx8mp_clocks_probe(struct platform_device *pdev) hws[IMX8MP_CLK_GPU_ROOT] = imx_clk_hw_gate4("gpu_root_clk", "gpu_axi", ccm_base + 0x4570, 0); hws[IMX8MP_CLK_VPU_VC8KE_ROOT] = imx_clk_hw_gate4("vpu_vc8ke_root_clk", "vpu_vc8000e", ccm_base + 0x4590, 0); hws[IMX8MP_CLK_VPU_G2_ROOT] = imx_clk_hw_gate4("vpu_g2_root_clk", "vpu_g2", ccm_base + 0x45a0, 0); - hws[IMX8MP_CLK_NPU_ROOT] = imx_clk_hw_gate4("npu_root_clk", "ml_div", ccm_base + 0x45b0, 0); + hws[IMX8MP_CLK_NPU_ROOT] = imx_clk_hw_gate4("npu_root_clk", "ml_core", ccm_base + 0x45b0, 0); hws[IMX8MP_CLK_HSIO_ROOT] = imx_clk_hw_gate4("hsio_root_clk", "ipg_root", ccm_base + 0x45c0, 0); hws[IMX8MP_CLK_MEDIA_APB_ROOT] = imx_clk_hw_gate2_shared2("media_apb_root_clk", "media_apb", ccm_base + 0x45d0, 0, &share_count_media); hws[IMX8MP_CLK_MEDIA_AXI_ROOT] = imx_clk_hw_gate2_shared2("media_axi_root_clk", "media_axi", ccm_base + 0x45d0, 0, &share_count_media); @@ -739,7 +724,7 @@ static int imx8mp_clocks_probe(struct platform_device *pdev) hws[IMX8MP_CLK_MEDIA_CAM2_PIX_ROOT] = imx_clk_hw_gate2_shared2("media_cam2_pix_root_clk", "media_cam2_pix", ccm_base + 0x45d0, 0, &share_count_media); hws[IMX8MP_CLK_MEDIA_DISP1_PIX_ROOT] = imx_clk_hw_gate2_shared2("media_disp1_pix_root_clk", "media_disp1_pix", ccm_base + 0x45d0, 0, &share_count_media); hws[IMX8MP_CLK_MEDIA_DISP2_PIX_ROOT] = imx_clk_hw_gate2_shared2("media_disp2_pix_root_clk", "media_disp2_pix", ccm_base + 0x45d0, 0, &share_count_media); - hws[IMX8MP_CLK_MEDIA_ISP_ROOT] = imx_clk_hw_gate2_shared2("media_isp_root_clk", "media_isp_div", ccm_base + 0x45d0, 0, &share_count_media); + hws[IMX8MP_CLK_MEDIA_ISP_ROOT] = imx_clk_hw_gate2_shared2("media_isp_root_clk", "media_isp", ccm_base + 0x45d0, 0, &share_count_media); hws[IMX8MP_CLK_USDHC3_ROOT] = imx_clk_hw_gate4("usdhc3_root_clk", "usdhc3", ccm_base + 0x45e0, 0); hws[IMX8MP_CLK_HDMI_ROOT] = imx_clk_hw_gate4("hdmi_root_clk", "hdmi_axi", ccm_base + 0x45f0, 0); diff --git a/include/dt-bindings/clock/imx8mp-clock.h b/include/dt-bindings/clock/imx8mp-clock.h index 3a8c55a11c1ea..7a23f289b27f9 100644 --- a/include/dt-bindings/clock/imx8mp-clock.h +++ b/include/dt-bindings/clock/imx8mp-clock.h @@ -313,7 +313,16 @@ #define IMX8MP_SYS_PLL2_333M_CG 303 #define IMX8MP_SYS_PLL2_500M_CG 304 -#define IMX8MP_CLK_END 305 +#define IMX8MP_CLK_M7_CORE 305 +#define IMX8MP_CLK_ML_CORE 306 +#define IMX8MP_CLK_GPU3D_CORE 307 +#define IMX8MP_CLK_GPU3D_SHADER_CORE 308 +#define IMX8MP_CLK_GPU2D_CORE 309 +#define IMX8MP_CLK_AUDIO_AXI 310 +#define IMX8MP_CLK_HSIO_AXI 311 +#define IMX8MP_CLK_MEDIA_ISP 312 + +#define IMX8MP_CLK_END 313 #define IMX8MP_CLK_AUDIOMIX_SAI1_IPG 0 #define IMX8MP_CLK_AUDIOMIX_SAI1_MCLK1 1 -- GitLab From b737beddb32994ad878cff4ccfd24d68e082dcb7 Mon Sep 17 00:00:00 2001 From: Peng Fan Date: Thu, 7 May 2020 13:56:15 +0800 Subject: [PATCH 0575/1971] clk: imx8m: migrate A53 clk root to use composite core Migrate A53 clk root to use composite core clk type. It will simplify code and make it easy to use composite specific mux operation. Reviewed-by: Leonard Crestez Signed-off-by: Peng Fan Reviewed-by: Dong Aisheng Signed-off-by: Shawn Guo --- drivers/clk/imx/clk-imx8mm.c | 6 +++--- drivers/clk/imx/clk-imx8mn.c | 6 +++--- drivers/clk/imx/clk-imx8mq.c | 6 +++--- 3 files changed, 9 insertions(+), 9 deletions(-) diff --git a/drivers/clk/imx/clk-imx8mm.c b/drivers/clk/imx/clk-imx8mm.c index 5435042a06e3a..12443e06f3292 100644 --- a/drivers/clk/imx/clk-imx8mm.c +++ b/drivers/clk/imx/clk-imx8mm.c @@ -416,9 +416,9 @@ static int imx8mm_clocks_probe(struct platform_device *pdev) return PTR_ERR(base); /* Core Slice */ - hws[IMX8MM_CLK_A53_SRC] = imx_clk_hw_mux2("arm_a53_src", base + 0x8000, 24, 3, imx8mm_a53_sels, ARRAY_SIZE(imx8mm_a53_sels)); - hws[IMX8MM_CLK_A53_CG] = imx_clk_hw_gate3("arm_a53_cg", "arm_a53_src", base + 0x8000, 28); - hws[IMX8MM_CLK_A53_DIV] = imx_clk_hw_divider2("arm_a53_div", "arm_a53_cg", base + 0x8000, 0, 3); + hws[IMX8MM_CLK_A53_DIV] = imx8m_clk_hw_composite_core("arm_a53_div", imx8mm_a53_sels, base + 0x8000); + hws[IMX8MM_CLK_A53_CG] = hws[IMX8MM_CLK_A53_DIV]; + hws[IMX8MM_CLK_A53_SRC] = hws[IMX8MM_CLK_A53_DIV]; hws[IMX8MM_CLK_M4_CORE] = imx8m_clk_hw_composite_core("arm_m4_core", imx8mm_m4_sels, base + 0x8080); hws[IMX8MM_CLK_VPU_CORE] = imx8m_clk_hw_composite_core("vpu_core", imx8mm_vpu_sels, base + 0x8100); diff --git a/drivers/clk/imx/clk-imx8mn.c b/drivers/clk/imx/clk-imx8mn.c index 6cac6ca03e122..bd3759b4afd06 100644 --- a/drivers/clk/imx/clk-imx8mn.c +++ b/drivers/clk/imx/clk-imx8mn.c @@ -413,9 +413,9 @@ static int imx8mn_clocks_probe(struct platform_device *pdev) } /* CORE */ - hws[IMX8MN_CLK_A53_SRC] = imx_clk_hw_mux2("arm_a53_src", base + 0x8000, 24, 3, imx8mn_a53_sels, ARRAY_SIZE(imx8mn_a53_sels)); - hws[IMX8MN_CLK_A53_CG] = imx_clk_hw_gate3("arm_a53_cg", "arm_a53_src", base + 0x8000, 28); - hws[IMX8MN_CLK_A53_DIV] = imx_clk_hw_divider2("arm_a53_div", "arm_a53_cg", base + 0x8000, 0, 3); + hws[IMX8MN_CLK_A53_DIV] = imx8m_clk_hw_composite_core("arm_a53_div", imx8mn_a53_sels, base + 0x8000); + hws[IMX8MN_CLK_A53_SRC] = hws[IMX8MN_CLK_A53_DIV]; + hws[IMX8MN_CLK_A53_CG] = hws[IMX8MN_CLK_A53_DIV]; hws[IMX8MN_CLK_GPU_CORE] = imx8m_clk_hw_composite_core("gpu_core", imx8mn_gpu_core_sels, base + 0x8180); hws[IMX8MN_CLK_GPU_SHADER] = imx8m_clk_hw_composite_core("gpu_shader", imx8mn_gpu_shader_sels, base + 0x8200); diff --git a/drivers/clk/imx/clk-imx8mq.c b/drivers/clk/imx/clk-imx8mq.c index 201c7bbb201f4..91309ff654417 100644 --- a/drivers/clk/imx/clk-imx8mq.c +++ b/drivers/clk/imx/clk-imx8mq.c @@ -405,9 +405,9 @@ static int imx8mq_clocks_probe(struct platform_device *pdev) return PTR_ERR(base); /* CORE */ - hws[IMX8MQ_CLK_A53_SRC] = imx_clk_hw_mux2("arm_a53_src", base + 0x8000, 24, 3, imx8mq_a53_sels, ARRAY_SIZE(imx8mq_a53_sels)); - hws[IMX8MQ_CLK_A53_CG] = imx_clk_hw_gate3_flags("arm_a53_cg", "arm_a53_src", base + 0x8000, 28, CLK_IS_CRITICAL); - hws[IMX8MQ_CLK_A53_DIV] = imx_clk_hw_divider2("arm_a53_div", "arm_a53_cg", base + 0x8000, 0, 3); + hws[IMX8MQ_CLK_A53_DIV] = imx8m_clk_hw_composite_core("arm_a53_div", imx8mq_a53_sels, base + 0x8000); + hws[IMX8MQ_CLK_A53_CG] = hws[IMX8MQ_CLK_A53_DIV]; + hws[IMX8MQ_CLK_A53_SRC] = hws[IMX8MQ_CLK_A53_DIV]; hws[IMX8MQ_CLK_M4_CORE] = imx8m_clk_hw_composite_core("arm_m4_core", imx8mq_arm_m4_sels, base + 0x8080); hws[IMX8MQ_CLK_VPU_CORE] = imx8m_clk_hw_composite_core("vpu_core", imx8mq_vpu_sels, base + 0x8100); -- GitLab From 2c4656dfd994538176db30ce09c02cc0dfc361ae Mon Sep 17 00:00:00 2001 From: Miklos Szeredi Date: Wed, 20 May 2020 11:39:35 +0200 Subject: [PATCH 0576/1971] fuse: fix copy_file_range cache issues a) Dirty cache needs to be written back not just in the writeback_cache case, since the dirty pages may come from memory maps. b) The fuse_writeback_range() helper takes an inclusive interval, so the end position needs to be pos+len-1 instead of pos+len. Fixes: 88bc7d5097a1 ("fuse: add support for copy_file_range()") Signed-off-by: Miklos Szeredi --- fs/fuse/file.c | 20 ++++++++------------ 1 file changed, 8 insertions(+), 12 deletions(-) diff --git a/fs/fuse/file.c b/fs/fuse/file.c index 15812a8b38343..d365008fda443 100644 --- a/fs/fuse/file.c +++ b/fs/fuse/file.c @@ -3325,13 +3325,11 @@ static ssize_t __fuse_copy_file_range(struct file *file_in, loff_t pos_in, if (file_inode(file_in)->i_sb != file_inode(file_out)->i_sb) return -EXDEV; - if (fc->writeback_cache) { - inode_lock(inode_in); - err = fuse_writeback_range(inode_in, pos_in, pos_in + len); - inode_unlock(inode_in); - if (err) - return err; - } + inode_lock(inode_in); + err = fuse_writeback_range(inode_in, pos_in, pos_in + len - 1); + inode_unlock(inode_in); + if (err) + return err; inode_lock(inode_out); @@ -3339,11 +3337,9 @@ static ssize_t __fuse_copy_file_range(struct file *file_in, loff_t pos_in, if (err) goto out; - if (fc->writeback_cache) { - err = fuse_writeback_range(inode_out, pos_out, pos_out + len); - if (err) - goto out; - } + err = fuse_writeback_range(inode_out, pos_out, pos_out + len - 1); + if (err) + goto out; if (is_unstable) set_bit(FUSE_I_SIZE_UNSTABLE, &fi_out->state); -- GitLab From 9b46418c40fe910e6537618f9932a8be78a3dd6c Mon Sep 17 00:00:00 2001 From: Miklos Szeredi Date: Wed, 20 May 2020 11:39:35 +0200 Subject: [PATCH 0577/1971] fuse: copy_file_range should truncate cache After the copy operation completes the cache is not up-to-date. Truncate all pages in the interval that has successfully been copied. Truncating completely copied dirty pages is okay, since the data has been overwritten anyway. Truncating partially copied dirty pages is not okay; add a comment for now. Fixes: 88bc7d5097a1 ("fuse: add support for copy_file_range()") Signed-off-by: Miklos Szeredi --- fs/fuse/file.c | 22 ++++++++++++++++++++++ 1 file changed, 22 insertions(+) diff --git a/fs/fuse/file.c b/fs/fuse/file.c index d365008fda443..336d1cf72da00 100644 --- a/fs/fuse/file.c +++ b/fs/fuse/file.c @@ -3337,6 +3337,24 @@ static ssize_t __fuse_copy_file_range(struct file *file_in, loff_t pos_in, if (err) goto out; + /* + * Write out dirty pages in the destination file before sending the COPY + * request to userspace. After the request is completed, truncate off + * pages (including partial ones) from the cache that have been copied, + * since these contain stale data at that point. + * + * This should be mostly correct, but if the COPY writes to partial + * pages (at the start or end) and the parts not covered by the COPY are + * written through a memory map after calling fuse_writeback_range(), + * then these partial page modifications will be lost on truncation. + * + * It is unlikely that someone would rely on such mixed style + * modifications. Yet this does give less guarantees than if the + * copying was performed with write(2). + * + * To fix this a i_mmap_sem style lock could be used to prevent new + * faults while the copy is ongoing. + */ err = fuse_writeback_range(inode_out, pos_out, pos_out + len - 1); if (err) goto out; @@ -3360,6 +3378,10 @@ static ssize_t __fuse_copy_file_range(struct file *file_in, loff_t pos_in, if (err) goto out; + truncate_inode_pages_range(inode_out->i_mapping, + ALIGN_DOWN(pos_out, PAGE_SIZE), + ALIGN(pos_out + outarg.size, PAGE_SIZE) - 1); + if (fc->writeback_cache) { fuse_write_update_size(inode_out, pos_out + outarg.size); file_update_time(file_out); -- GitLab From 9b9df63b50306b9602954d2f40fa8e05c0c27fda Mon Sep 17 00:00:00 2001 From: Geert Uytterhoeven Date: Fri, 8 May 2020 12:03:21 +0200 Subject: [PATCH 0578/1971] dt-bindings: clock: renesas: mstp: Convert to json-schema Convert the Renesas Clock Pulse Generator (CPG) Module Stop (MSTP) Clocks Device Tree binding documentation to json-schema. Drop R-Car Gen2 compatible values, which were obsoleted by the unified "Renesas Clock Pulse Generator / Module Standby and Software Reset" DT bindings. Replace the obsolete example for R-Car H2 by an example that is still valid. Signed-off-by: Geert Uytterhoeven Reviewed-by: Rob Herring Link: https://lore.kernel.org/r/20200508100321.6720-1-geert+renesas@glider.be --- .../clock/renesas,cpg-mstp-clocks.txt | 60 -------------- .../clock/renesas,cpg-mstp-clocks.yaml | 82 +++++++++++++++++++ 2 files changed, 82 insertions(+), 60 deletions(-) delete mode 100644 Documentation/devicetree/bindings/clock/renesas,cpg-mstp-clocks.txt create mode 100644 Documentation/devicetree/bindings/clock/renesas,cpg-mstp-clocks.yaml diff --git a/Documentation/devicetree/bindings/clock/renesas,cpg-mstp-clocks.txt b/Documentation/devicetree/bindings/clock/renesas,cpg-mstp-clocks.txt deleted file mode 100644 index da578ebdda288..0000000000000 --- a/Documentation/devicetree/bindings/clock/renesas,cpg-mstp-clocks.txt +++ /dev/null @@ -1,60 +0,0 @@ -* Renesas CPG Module Stop (MSTP) Clocks - -The CPG can gate SoC device clocks. The gates are organized in groups of up to -32 gates. - -This device tree binding describes a single 32 gate clocks group per node. -Clocks are referenced by user nodes by the MSTP node phandle and the clock -index in the group, from 0 to 31. - -Required Properties: - - - compatible: Must be one of the following - - "renesas,r7s72100-mstp-clocks" for R7S72100 (RZ) MSTP gate clocks - - "renesas,r8a73a4-mstp-clocks" for R8A73A4 (R-Mobile APE6) MSTP gate clocks - - "renesas,r8a7740-mstp-clocks" for R8A7740 (R-Mobile A1) MSTP gate clocks - - "renesas,r8a7778-mstp-clocks" for R8A7778 (R-Car M1) MSTP gate clocks - - "renesas,r8a7779-mstp-clocks" for R8A7779 (R-Car H1) MSTP gate clocks - - "renesas,r8a7790-mstp-clocks" for R8A7790 (R-Car H2) MSTP gate clocks - - "renesas,r8a7791-mstp-clocks" for R8A7791 (R-Car M2-W) MSTP gate clocks - - "renesas,r8a7792-mstp-clocks" for R8A7792 (R-Car V2H) MSTP gate clocks - - "renesas,r8a7793-mstp-clocks" for R8A7793 (R-Car M2-N) MSTP gate clocks - - "renesas,r8a7794-mstp-clocks" for R8A7794 (R-Car E2) MSTP gate clocks - - "renesas,sh73a0-mstp-clocks" for SH73A0 (SH-MobileAG5) MSTP gate clocks - and "renesas,cpg-mstp-clocks" as a fallback. - - reg: Base address and length of the I/O mapped registers used by the MSTP - clocks. The first register is the clock control register and is mandatory. - The second register is the clock status register and is optional when not - implemented in hardware. - - clocks: Reference to the parent clocks, one per output clock. The parents - must appear in the same order as the output clocks. - - #clock-cells: Must be 1 - - clock-output-names: The name of the clocks as free-form strings - - clock-indices: Indices of the gate clocks into the group (0 to 31) - -The clocks, clock-output-names and clock-indices properties contain one entry -per gate clock. The MSTP groups are sparsely populated. Unimplemented gate -clocks must not be declared. - - -Example -------- - - #include - - mstp3_clks: mstp3_clks@e615013c { - compatible = "renesas,r8a7790-mstp-clocks", "renesas,cpg-mstp-clocks"; - reg = <0 0xe615013c 0 4>, <0 0xe6150048 0 4>; - clocks = <&cp_clk>, <&mmc1_clk>, <&sd3_clk>, <&sd2_clk>, - <&cpg_clocks R8A7790_CLK_SD1>, <&cpg_clocks R8A7790_CLK_SD0>, - <&mmc0_clk>; - #clock-cells = <1>; - clock-output-names = - "tpu0", "mmcif1", "sdhi3", "sdhi2", - "sdhi1", "sdhi0", "mmcif0"; - clock-indices = < - R8A7790_CLK_TPU0 R8A7790_CLK_MMCIF1 R8A7790_CLK_SDHI3 - R8A7790_CLK_SDHI2 R8A7790_CLK_SDHI1 R8A7790_CLK_SDHI0 - R8A7790_CLK_MMCIF0 - >; - }; diff --git a/Documentation/devicetree/bindings/clock/renesas,cpg-mstp-clocks.yaml b/Documentation/devicetree/bindings/clock/renesas,cpg-mstp-clocks.yaml new file mode 100644 index 0000000000000..9752ac63288b1 --- /dev/null +++ b/Documentation/devicetree/bindings/clock/renesas,cpg-mstp-clocks.yaml @@ -0,0 +1,82 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/clock/renesas,cpg-mstp-clocks.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Renesas Clock Pulse Generator (CPG) Module Stop (MSTP) Clocks + +maintainers: + - Geert Uytterhoeven + +description: + The Clock Pulse Generator (CPG) can gate SoC device clocks. The gates are + organized in groups of up to 32 gates. + + This device tree binding describes a single 32 gate clocks group per node. + Clocks are referenced by user nodes by the Module Stop (MSTP) node phandle + and the clock index in the group, from 0 to 31. + +properties: + compatible: + items: + - enum: + - renesas,r7s72100-mstp-clocks # RZ/A1 + - renesas,r8a73a4-mstp-clocks # R-Mobile APE6 + - renesas,r8a7740-mstp-clocks # R-Mobile A1 + - renesas,r8a7778-mstp-clocks # R-Car M1 + - renesas,r8a7779-mstp-clocks # R-Car H1 + - renesas,sh73a0-mstp-clocks # SH-Mobile AG5 + - const: renesas,cpg-mstp-clocks + + reg: + minItems: 1 + items: + - description: Module Stop Control Register (MSTPCR) + - description: Module Stop Status Register (MSTPSR) + + clocks: + minItems: 1 + maxItems: 32 + + '#clock-cells': + const: 1 + + clock-indices: + minItems: 1 + maxItems: 32 + + clock-output-names: + minItems: 1 + maxItems: 32 + +required: + - compatible + - reg + - clocks + - '#clock-cells' + - clock-indices + - clock-output-names + +additionalProperties: false + +examples: + - | + #include + mstp2_clks: mstp2_clks@e6150138 { + compatible = "renesas,r8a73a4-mstp-clocks", + "renesas,cpg-mstp-clocks"; + reg = <0xe6150138 4>, <0xe6150040 4>; + clocks = <&mp_clk>, <&mp_clk>, <&mp_clk>, <&mp_clk>, <&mp_clk>, + <&mp_clk>, <&cpg_clocks R8A73A4_CLK_HP>; + #clock-cells = <1>; + clock-indices = < + R8A73A4_CLK_SCIFA0 R8A73A4_CLK_SCIFA1 + R8A73A4_CLK_SCIFB0 R8A73A4_CLK_SCIFB1 + R8A73A4_CLK_SCIFB2 R8A73A4_CLK_SCIFB3 + R8A73A4_CLK_DMAC + >; + clock-output-names = + "scifa0", "scifa1", "scifb0", "scifb1", "scifb2", "scifb3", + "dmac"; + }; -- GitLab From be5ce0e97cc7a5c0d2da45d617b7bc567c3d3fa1 Mon Sep 17 00:00:00 2001 From: Qii Wang Date: Thu, 14 May 2020 21:09:05 +0800 Subject: [PATCH 0579/1971] i2c: mediatek: Add i2c ac-timing adjust support This patch adds a algorithm to calculate some ac-timing parameters which can fully meet I2C Spec. Signed-off-by: Qii Wang Signed-off-by: Wolfram Sang --- drivers/i2c/busses/i2c-mt65xx.c | 329 +++++++++++++++++++++++++++----- 1 file changed, 278 insertions(+), 51 deletions(-) diff --git a/drivers/i2c/busses/i2c-mt65xx.c b/drivers/i2c/busses/i2c-mt65xx.c index 0ca6c38a15ebc..deef69e569062 100644 --- a/drivers/i2c/busses/i2c-mt65xx.c +++ b/drivers/i2c/busses/i2c-mt65xx.c @@ -40,12 +40,11 @@ #define I2C_SOFT_RST 0x0001 #define I2C_FIFO_ADDR_CLR 0x0001 #define I2C_DELAY_LEN 0x0002 -#define I2C_ST_START_CON 0x8001 -#define I2C_FS_START_CON 0x1800 #define I2C_TIME_CLR_VALUE 0x0000 #define I2C_TIME_DEFAULT_VALUE 0x0003 #define I2C_WRRD_TRANAC_VALUE 0x0002 #define I2C_RD_TRANAC_VALUE 0x0001 +#define I2C_SCL_MIS_COMP_VALUE 0x0000 #define I2C_DMA_CON_TX 0x0000 #define I2C_DMA_CON_RX 0x0001 @@ -55,10 +54,13 @@ #define I2C_DMA_HARD_RST 0x0002 #define I2C_DMA_4G_MODE 0x0001 -#define I2C_DEFAULT_CLK_DIV 5 #define MAX_SAMPLE_CNT_DIV 8 #define MAX_STEP_CNT_DIV 64 +#define MAX_CLOCK_DIV 256 #define MAX_HS_STEP_CNT_DIV 8 +#define I2C_STANDARD_MODE_BUFFER (1000 / 2) +#define I2C_FAST_MODE_BUFFER (300 / 2) +#define I2C_FAST_MODE_PLUS_BUFFER (20 / 2) #define I2C_CONTROL_RS (0x1 << 1) #define I2C_CONTROL_DMA_EN (0x1 << 2) @@ -123,6 +125,12 @@ enum I2C_REGS_OFFSET { OFFSET_TRANSFER_LEN_AUX, OFFSET_CLOCK_DIV, OFFSET_LTIMING, + OFFSET_SCL_HIGH_LOW_RATIO, + OFFSET_HS_SCL_HIGH_LOW_RATIO, + OFFSET_SCL_MIS_COMP_POINT, + OFFSET_STA_STO_AC_TIMING, + OFFSET_HS_STA_STO_AC_TIMING, + OFFSET_SDA_TIMING, }; static const u16 mt_i2c_regs_v1[] = { @@ -150,6 +158,12 @@ static const u16 mt_i2c_regs_v1[] = { [OFFSET_DEBUGCTRL] = 0x68, [OFFSET_TRANSFER_LEN_AUX] = 0x6c, [OFFSET_CLOCK_DIV] = 0x70, + [OFFSET_SCL_HIGH_LOW_RATIO] = 0x74, + [OFFSET_HS_SCL_HIGH_LOW_RATIO] = 0x78, + [OFFSET_SCL_MIS_COMP_POINT] = 0x7C, + [OFFSET_STA_STO_AC_TIMING] = 0x80, + [OFFSET_HS_STA_STO_AC_TIMING] = 0x84, + [OFFSET_SDA_TIMING] = 0x88, }; static const u16 mt_i2c_regs_v2[] = { @@ -168,9 +182,11 @@ static const u16 mt_i2c_regs_v2[] = { [OFFSET_HS] = 0x30, [OFFSET_IO_CONFIG] = 0x34, [OFFSET_FIFO_ADDR_CLR] = 0x38, + [OFFSET_SDA_TIMING] = 0x3c, [OFFSET_TRANSFER_LEN_AUX] = 0x44, [OFFSET_CLOCK_DIV] = 0x48, [OFFSET_SOFTRESET] = 0x50, + [OFFSET_SCL_MIS_COMP_POINT] = 0x90, [OFFSET_DEBUGSTAT] = 0xe0, [OFFSET_DEBUGCTRL] = 0xe8, [OFFSET_FIFO_STAT] = 0xf4, @@ -191,6 +207,19 @@ struct mtk_i2c_compatible { unsigned char ltiming_adjust: 1; }; +struct mtk_i2c_ac_timing { + u16 htiming; + u16 ltiming; + u16 hs; + u16 ext; + u16 inter_clk_div; + u16 scl_hl_ratio; + u16 hs_scl_hl_ratio; + u16 sta_stop; + u16 hs_sta_stop; + u16 sda_timing; +}; + struct mtk_i2c { struct i2c_adapter adap; /* i2c host adapter */ struct device *dev; @@ -215,9 +244,46 @@ struct mtk_i2c { u16 ltiming_reg; unsigned char auto_restart; bool ignore_restart_irq; + struct mtk_i2c_ac_timing ac_timing; const struct mtk_i2c_compatible *dev_comp; }; +/** + * struct i2c_spec_values: + * min_low_ns: min LOW period of the SCL clock + * min_su_sta_ns: min set-up time for a repeated START condition + * max_hd_dat_ns: max data hold time + * min_su_dat_ns: min data set-up time + */ +struct i2c_spec_values { + unsigned int min_low_ns; + unsigned int min_high_ns; + unsigned int min_su_sta_ns; + unsigned int max_hd_dat_ns; + unsigned int min_su_dat_ns; +}; + +static const struct i2c_spec_values standard_mode_spec = { + .min_low_ns = 4700 + I2C_STANDARD_MODE_BUFFER, + .min_su_sta_ns = 4700 + I2C_STANDARD_MODE_BUFFER, + .max_hd_dat_ns = 3450 - I2C_STANDARD_MODE_BUFFER, + .min_su_dat_ns = 250 + I2C_STANDARD_MODE_BUFFER, +}; + +static const struct i2c_spec_values fast_mode_spec = { + .min_low_ns = 1300 + I2C_FAST_MODE_BUFFER, + .min_su_sta_ns = 600 + I2C_FAST_MODE_BUFFER, + .max_hd_dat_ns = 900 - I2C_FAST_MODE_BUFFER, + .min_su_dat_ns = 100 + I2C_FAST_MODE_BUFFER, +}; + +static const struct i2c_spec_values fast_mode_plus_spec = { + .min_low_ns = 500 + I2C_FAST_MODE_PLUS_BUFFER, + .min_su_sta_ns = 260 + I2C_FAST_MODE_PLUS_BUFFER, + .max_hd_dat_ns = 400 - I2C_FAST_MODE_PLUS_BUFFER, + .min_su_dat_ns = 50 + I2C_FAST_MODE_PLUS_BUFFER, +}; + static const struct i2c_adapter_quirks mt6577_i2c_quirks = { .flags = I2C_AQ_COMB_WRITE_THEN_READ, .max_num_msgs = 1, @@ -397,14 +463,38 @@ static void mtk_i2c_init_hw(struct mtk_i2c *i2c) if (i2c->dev_comp->dcm) mtk_i2c_writew(i2c, I2C_DCM_DISABLE, OFFSET_DCM_EN); - if (i2c->dev_comp->timing_adjust) - mtk_i2c_writew(i2c, I2C_DEFAULT_CLK_DIV - 1, OFFSET_CLOCK_DIV); - mtk_i2c_writew(i2c, i2c->timing_reg, OFFSET_TIMING); mtk_i2c_writew(i2c, i2c->high_speed_reg, OFFSET_HS); if (i2c->dev_comp->ltiming_adjust) mtk_i2c_writew(i2c, i2c->ltiming_reg, OFFSET_LTIMING); + if (i2c->dev_comp->timing_adjust) { + mtk_i2c_writew(i2c, i2c->ac_timing.ext, OFFSET_EXT_CONF); + mtk_i2c_writew(i2c, i2c->ac_timing.inter_clk_div, + OFFSET_CLOCK_DIV); + mtk_i2c_writew(i2c, I2C_SCL_MIS_COMP_VALUE, + OFFSET_SCL_MIS_COMP_POINT); + mtk_i2c_writew(i2c, i2c->ac_timing.sda_timing, + OFFSET_SDA_TIMING); + + if (i2c->dev_comp->ltiming_adjust) { + mtk_i2c_writew(i2c, i2c->ac_timing.htiming, + OFFSET_TIMING); + mtk_i2c_writew(i2c, i2c->ac_timing.hs, OFFSET_HS); + mtk_i2c_writew(i2c, i2c->ac_timing.ltiming, + OFFSET_LTIMING); + } else { + mtk_i2c_writew(i2c, i2c->ac_timing.scl_hl_ratio, + OFFSET_SCL_HIGH_LOW_RATIO); + mtk_i2c_writew(i2c, i2c->ac_timing.hs_scl_hl_ratio, + OFFSET_HS_SCL_HIGH_LOW_RATIO); + mtk_i2c_writew(i2c, i2c->ac_timing.sta_stop, + OFFSET_STA_STO_AC_TIMING); + mtk_i2c_writew(i2c, i2c->ac_timing.hs_sta_stop, + OFFSET_HS_STA_STO_AC_TIMING); + } + } + /* If use i2c pin from PMIC mt6397 side, need set PATH_DIR first */ if (i2c->have_pmic) mtk_i2c_writew(i2c, I2C_CONTROL_WRAPPER, OFFSET_PATH_DIR); @@ -422,6 +512,126 @@ static void mtk_i2c_init_hw(struct mtk_i2c *i2c) writel(I2C_DMA_CLR_FLAG, i2c->pdmabase + OFFSET_RST); } +static const struct i2c_spec_values *mtk_i2c_get_spec(unsigned int speed) +{ + if (speed <= I2C_MAX_STANDARD_MODE_FREQ) + return &standard_mode_spec; + else if (speed <= I2C_MAX_FAST_MODE_FREQ) + return &fast_mode_spec; + else + return &fast_mode_plus_spec; +} + +static int mtk_i2c_max_step_cnt(unsigned int target_speed) +{ + if (target_speed > I2C_MAX_FAST_MODE_FREQ) + return MAX_HS_STEP_CNT_DIV; + else + return MAX_STEP_CNT_DIV; +} + +/* + * Check and Calculate i2c ac-timing + * + * Hardware design: + * sample_ns = (1000000000 * (sample_cnt + 1)) / clk_src + * xxx_cnt_div = spec->min_xxx_ns / sample_ns + * + * Sample_ns is rounded down for xxx_cnt_div would be greater + * than the smallest spec. + * The sda_timing is chosen as the middle value between + * the largest and smallest. + */ +static int mtk_i2c_check_ac_timing(struct mtk_i2c *i2c, + unsigned int clk_src, + unsigned int check_speed, + unsigned int step_cnt, + unsigned int sample_cnt) +{ + const struct i2c_spec_values *spec; + unsigned int su_sta_cnt, low_cnt, high_cnt, max_step_cnt; + unsigned int sda_max, sda_min, clk_ns, max_sta_cnt = 0x3f; + unsigned int sample_ns = div_u64(1000000000ULL * (sample_cnt + 1), + clk_src); + + if (!i2c->dev_comp->timing_adjust) + return 0; + + if (i2c->dev_comp->ltiming_adjust) + max_sta_cnt = 0x100; + + spec = mtk_i2c_get_spec(check_speed); + + if (i2c->dev_comp->ltiming_adjust) + clk_ns = 1000000000 / clk_src; + else + clk_ns = sample_ns / 2; + + su_sta_cnt = DIV_ROUND_UP(spec->min_su_sta_ns, clk_ns); + if (su_sta_cnt > max_sta_cnt) + return -1; + + low_cnt = DIV_ROUND_UP(spec->min_low_ns, sample_ns); + max_step_cnt = mtk_i2c_max_step_cnt(check_speed); + if ((2 * step_cnt) > low_cnt && low_cnt < max_step_cnt) { + if (low_cnt > step_cnt) { + high_cnt = 2 * step_cnt - low_cnt; + } else { + high_cnt = step_cnt; + low_cnt = step_cnt; + } + } else { + return -2; + } + + sda_max = spec->max_hd_dat_ns / sample_ns; + if (sda_max > low_cnt) + sda_max = 0; + + sda_min = DIV_ROUND_UP(spec->min_su_dat_ns, sample_ns); + if (sda_min < low_cnt) + sda_min = 0; + + if (sda_min > sda_max) + return -3; + + if (check_speed > I2C_MAX_FAST_MODE_FREQ) { + if (i2c->dev_comp->ltiming_adjust) { + i2c->ac_timing.hs = I2C_TIME_DEFAULT_VALUE | + (sample_cnt << 12) | (high_cnt << 8); + i2c->ac_timing.ltiming &= ~GENMASK(15, 9); + i2c->ac_timing.ltiming |= (sample_cnt << 12) | + (low_cnt << 9); + i2c->ac_timing.ext &= ~GENMASK(7, 1); + i2c->ac_timing.ext |= (su_sta_cnt << 1) | (1 << 0); + } else { + i2c->ac_timing.hs_scl_hl_ratio = (1 << 12) | + (high_cnt << 6) | low_cnt; + i2c->ac_timing.hs_sta_stop = (su_sta_cnt << 8) | + su_sta_cnt; + } + i2c->ac_timing.sda_timing &= ~GENMASK(11, 6); + i2c->ac_timing.sda_timing |= (1 << 12) | + ((sda_max + sda_min) / 2) << 6; + } else { + if (i2c->dev_comp->ltiming_adjust) { + i2c->ac_timing.htiming = (sample_cnt << 8) | (high_cnt); + i2c->ac_timing.ltiming = (sample_cnt << 6) | (low_cnt); + i2c->ac_timing.ext = (su_sta_cnt << 8) | (1 << 0); + } else { + i2c->ac_timing.scl_hl_ratio = (1 << 12) | + (high_cnt << 6) | low_cnt; + i2c->ac_timing.sta_stop = (su_sta_cnt << 8) | + su_sta_cnt; + } + + i2c->ac_timing.sda_timing = (1 << 12) | + (sda_max + sda_min) / 2; + } + + return 0; +} + /* * Calculate i2c port speed * @@ -446,15 +656,12 @@ static int mtk_i2c_calculate_speed(struct mtk_i2c *i2c, unsigned int clk_src, unsigned int opt_div; unsigned int best_mul; unsigned int cnt_mul; + int ret = -EINVAL; if (target_speed > I2C_MAX_FAST_MODE_PLUS_FREQ) target_speed = I2C_MAX_FAST_MODE_PLUS_FREQ; - if (target_speed > I2C_MAX_FAST_MODE_FREQ) - max_step_cnt = MAX_HS_STEP_CNT_DIV; - else - max_step_cnt = MAX_STEP_CNT_DIV; - + max_step_cnt = mtk_i2c_max_step_cnt(target_speed); base_step_cnt = max_step_cnt; /* Find the best combination */ opt_div = DIV_ROUND_UP(clk_src >> 1, target_speed); @@ -473,6 +680,11 @@ static int mtk_i2c_calculate_speed(struct mtk_i2c *i2c, unsigned int clk_src, continue; if (cnt_mul < best_mul) { + ret = mtk_i2c_check_ac_timing(i2c, clk_src, + target_speed, step_cnt - 1, sample_cnt - 1); + if (ret) + continue; + best_mul = cnt_mul; base_sample_cnt = sample_cnt; base_step_cnt = step_cnt; @@ -481,6 +693,9 @@ static int mtk_i2c_calculate_speed(struct mtk_i2c *i2c, unsigned int clk_src, } } + if (ret) + return -EINVAL; + sample_cnt = base_sample_cnt; step_cnt = base_step_cnt; @@ -506,47 +721,68 @@ static int mtk_i2c_set_speed(struct mtk_i2c *i2c, unsigned int parent_clk) unsigned int l_step_cnt; unsigned int l_sample_cnt; unsigned int target_speed; + unsigned int clk_div; + unsigned int max_clk_div; int ret; - clk_src = parent_clk / i2c->clk_src_div; target_speed = i2c->speed_hz; + parent_clk /= i2c->clk_src_div; - if (target_speed > I2C_MAX_FAST_MODE_FREQ) { - /* Set master code speed register */ - ret = mtk_i2c_calculate_speed(i2c, clk_src, I2C_MAX_FAST_MODE_FREQ, - &l_step_cnt, &l_sample_cnt); - if (ret < 0) - return ret; - - i2c->timing_reg = (l_sample_cnt << 8) | l_step_cnt; - - /* Set the high speed mode register */ - ret = mtk_i2c_calculate_speed(i2c, clk_src, target_speed, - &step_cnt, &sample_cnt); - if (ret < 0) - return ret; - - i2c->high_speed_reg = I2C_TIME_DEFAULT_VALUE | - (sample_cnt << 12) | (step_cnt << 8); + if (i2c->dev_comp->timing_adjust) + max_clk_div = MAX_CLOCK_DIV; + else + max_clk_div = 1; + + for (clk_div = 1; clk_div <= max_clk_div; clk_div++) { + clk_src = parent_clk / clk_div; + + if (target_speed > I2C_MAX_FAST_MODE_FREQ) { + /* Set master code speed register */ + ret = mtk_i2c_calculate_speed(i2c, clk_src, + I2C_MAX_FAST_MODE_FREQ, + &l_step_cnt, + &l_sample_cnt); + if (ret < 0) + continue; + + i2c->timing_reg = (l_sample_cnt << 8) | l_step_cnt; + + /* Set the high speed mode register */ + ret = mtk_i2c_calculate_speed(i2c, clk_src, + target_speed, &step_cnt, + &sample_cnt); + if (ret < 0) + continue; + + i2c->high_speed_reg = I2C_TIME_DEFAULT_VALUE | + (sample_cnt << 12) | (step_cnt << 8); + + if (i2c->dev_comp->ltiming_adjust) + i2c->ltiming_reg = + (l_sample_cnt << 6) | l_step_cnt | + (sample_cnt << 12) | (step_cnt << 9); + } else { + ret = mtk_i2c_calculate_speed(i2c, clk_src, + target_speed, &l_step_cnt, + &l_sample_cnt); + if (ret < 0) + continue; - if (i2c->dev_comp->ltiming_adjust) - i2c->ltiming_reg = (l_sample_cnt << 6) | l_step_cnt | - (sample_cnt << 12) | (step_cnt << 9); - } else { - ret = mtk_i2c_calculate_speed(i2c, clk_src, target_speed, - &step_cnt, &sample_cnt); - if (ret < 0) - return ret; + i2c->timing_reg = (l_sample_cnt << 8) | l_step_cnt; - i2c->timing_reg = (sample_cnt << 8) | step_cnt; + /* Disable the high speed transaction */ + i2c->high_speed_reg = I2C_TIME_CLR_VALUE; - /* Disable the high speed transaction */ - i2c->high_speed_reg = I2C_TIME_CLR_VALUE; + if (i2c->dev_comp->ltiming_adjust) + i2c->ltiming_reg = + (l_sample_cnt << 6) | l_step_cnt; + } - if (i2c->dev_comp->ltiming_adjust) - i2c->ltiming_reg = (sample_cnt << 6) | step_cnt; + break; } + i2c->ac_timing.inter_clk_div = clk_div - 1; + return 0; } @@ -586,12 +822,6 @@ static int mtk_i2c_do_transfer(struct mtk_i2c *i2c, struct i2c_msg *msgs, mtk_i2c_writew(i2c, control_reg, OFFSET_CONTROL); - /* set start condition */ - if (i2c->speed_hz <= I2C_MAX_STANDARD_MODE_FREQ) - mtk_i2c_writew(i2c, I2C_ST_START_CON, OFFSET_EXT_CONF); - else - mtk_i2c_writew(i2c, I2C_FS_START_CON, OFFSET_EXT_CONF); - addr_reg = i2c_8bit_addr_from_msg(msgs); mtk_i2c_writew(i2c, addr_reg, OFFSET_SLAVE_ADDR); @@ -948,9 +1178,6 @@ static int mtk_i2c_probe(struct platform_device *pdev) if (ret) return -EINVAL; - if (i2c->dev_comp->timing_adjust) - i2c->clk_src_div *= I2C_DEFAULT_CLK_DIV; - if (i2c->have_pmic && !i2c->dev_comp->pmic_i2c) return -EINVAL; -- GitLab From 5c71ca4d4f9872e0db105141e3bce25eec2be233 Mon Sep 17 00:00:00 2001 From: Wolfram Sang Date: Tue, 12 May 2020 16:20:46 +0200 Subject: [PATCH 0580/1971] i2c: slave-eeprom: add support for 24c512 EEPROMs I don't plan to support every EEPROM type, but the 24c512 ones need a tiny code update, so let's have that upstream. Reported-by: Patrick Williams Signed-off-by: Wolfram Sang Reviewed-by: Patrick Williams Signed-off-by: Wolfram Sang --- drivers/i2c/i2c-slave-eeprom.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/drivers/i2c/i2c-slave-eeprom.c b/drivers/i2c/i2c-slave-eeprom.c index f868dfc362a6c..787fdb7f332f0 100644 --- a/drivers/i2c/i2c-slave-eeprom.c +++ b/drivers/i2c/i2c-slave-eeprom.c @@ -41,7 +41,7 @@ struct eeprom_data { #define I2C_SLAVE_BYTELEN GENMASK(15, 0) #define I2C_SLAVE_FLAG_ADDR16 BIT(16) #define I2C_SLAVE_FLAG_RO BIT(17) -#define I2C_SLAVE_DEVICE_MAGIC(_len, _flags) ((_flags) | (_len)) +#define I2C_SLAVE_DEVICE_MAGIC(_len, _flags) ((_flags) | ((_len) - 1)) static int i2c_slave_eeprom_slave_cb(struct i2c_client *client, enum i2c_slave_event event, u8 *val) @@ -145,7 +145,7 @@ static int i2c_slave_eeprom_probe(struct i2c_client *client, const struct i2c_de { struct eeprom_data *eeprom; int ret; - unsigned int size = FIELD_GET(I2C_SLAVE_BYTELEN, id->driver_data); + unsigned int size = FIELD_GET(I2C_SLAVE_BYTELEN, id->driver_data) + 1; unsigned int flag_addr16 = FIELD_GET(I2C_SLAVE_FLAG_ADDR16, id->driver_data); eeprom = devm_kzalloc(&client->dev, sizeof(struct eeprom_data) + size, GFP_KERNEL); @@ -200,6 +200,8 @@ static const struct i2c_device_id i2c_slave_eeprom_id[] = { { "slave-24c32ro", I2C_SLAVE_DEVICE_MAGIC(32768 / 8, I2C_SLAVE_FLAG_ADDR16 | I2C_SLAVE_FLAG_RO) }, { "slave-24c64", I2C_SLAVE_DEVICE_MAGIC(65536 / 8, I2C_SLAVE_FLAG_ADDR16) }, { "slave-24c64ro", I2C_SLAVE_DEVICE_MAGIC(65536 / 8, I2C_SLAVE_FLAG_ADDR16 | I2C_SLAVE_FLAG_RO) }, + { "slave-24c512", I2C_SLAVE_DEVICE_MAGIC(524288 / 8, I2C_SLAVE_FLAG_ADDR16) }, + { "slave-24c512ro", I2C_SLAVE_DEVICE_MAGIC(524288 / 8, I2C_SLAVE_FLAG_ADDR16 | I2C_SLAVE_FLAG_RO) }, { } }; MODULE_DEVICE_TABLE(i2c, i2c_slave_eeprom_id); -- GitLab From df7b4d6f7d5fff53d4dcf8d972f331dbbb7d49e0 Mon Sep 17 00:00:00 2001 From: Wolfram Sang Date: Tue, 5 May 2020 18:01:01 +0200 Subject: [PATCH 0581/1971] i2c: reword explanation about atomic transfers MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Atomic transfers are not only about sending messages like the original wording suggested. Speak of 'accessing' now like in i2c.h. Reported-by: Michał Mirosław Signed-off-by: Wolfram Sang Signed-off-by: Wolfram Sang --- drivers/i2c/i2c-core.h | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/i2c/i2c-core.h b/drivers/i2c/i2c-core.h index 517d98be68d25..94ff1693b3913 100644 --- a/drivers/i2c/i2c-core.h +++ b/drivers/i2c/i2c-core.h @@ -23,9 +23,9 @@ int i2c_dev_irq_from_resources(const struct resource *resources, unsigned int num_resources); /* - * We only allow atomic transfers for very late communication, e.g. to send - * the powerdown command to a PMIC. Atomic transfers are a corner case and not - * for generic use! + * We only allow atomic transfers for very late communication, e.g. to access a + * PMIC when powering down. Atomic transfers are a corner case and not for + * generic use! */ static inline bool i2c_in_atomic_xfer_mode(void) { -- GitLab From 6aab46bc52a8f579879d491c9d8062e03caa5c61 Mon Sep 17 00:00:00 2001 From: Bibby Hsieh Date: Tue, 19 May 2020 15:27:28 +0800 Subject: [PATCH 0582/1971] dt-binding: i2c: add bus-supply property In some platforms, they disable the power-supply of i2c due to power consumption reduction. This patch add bus-supply property. Signed-off-by: Bibby Hsieh Acked-by: Rob Herring [wsa: rebased to i2c/for-next] Signed-off-by: Wolfram Sang --- Documentation/devicetree/bindings/i2c/i2c.txt | 3 +++ 1 file changed, 3 insertions(+) diff --git a/Documentation/devicetree/bindings/i2c/i2c.txt b/Documentation/devicetree/bindings/i2c/i2c.txt index 819436b48faef..7aff425c099bd 100644 --- a/Documentation/devicetree/bindings/i2c/i2c.txt +++ b/Documentation/devicetree/bindings/i2c/i2c.txt @@ -23,6 +23,9 @@ Optional properties (per bus) These properties may not be supported by all drivers. However, if a driver wants to support one of the below features, it should adapt these bindings. +- bus-supply + phandle to the regulator that provides power to SCL/SDA. + - clock-frequency frequency of bus clock in Hz. -- GitLab From 6fe12cdbcfe35ad4726a619a9546822d34fc934c Mon Sep 17 00:00:00 2001 From: Bibby Hsieh Date: Tue, 19 May 2020 15:27:29 +0800 Subject: [PATCH 0583/1971] i2c: core: support bus regulator controlling in adapter Although in the most platforms, the bus power of i2c are alway on, some platforms disable the i2c bus power in order to meet low power request. We get and enable bulk regulator in i2c adapter device. Signed-off-by: Bibby Hsieh Reviewed-by: Tomasz Figa Signed-off-by: Wolfram Sang --- drivers/i2c/i2c-core-base.c | 84 +++++++++++++++++++++++++++++++++++++ include/linux/i2c.h | 2 + 2 files changed, 86 insertions(+) diff --git a/drivers/i2c/i2c-core-base.c b/drivers/i2c/i2c-core-base.c index a7c21d4cbd901..5be24bf8a1949 100644 --- a/drivers/i2c/i2c-core-base.c +++ b/drivers/i2c/i2c-core-base.c @@ -313,12 +313,14 @@ static int i2c_smbus_host_notify_to_irq(const struct i2c_client *client) static int i2c_device_probe(struct device *dev) { struct i2c_client *client = i2c_verify_client(dev); + struct i2c_adapter *adap; struct i2c_driver *driver; int status; if (!client) return 0; + adap = client->adapter; driver = to_i2c_driver(dev->driver); client->irq = client->init_irq; @@ -378,6 +380,12 @@ static int i2c_device_probe(struct device *dev) dev_dbg(dev, "probe\n"); + status = regulator_enable(adap->bus_regulator); + if (status < 0) { + dev_err(&adap->dev, "Failed to enable power regulator\n"); + goto err_clear_wakeup_irq; + } + status = of_clk_set_defaults(dev->of_node, false); if (status < 0) goto err_clear_wakeup_irq; @@ -414,12 +422,14 @@ err_clear_wakeup_irq: static int i2c_device_remove(struct device *dev) { struct i2c_client *client = i2c_verify_client(dev); + struct i2c_adapter *adap; struct i2c_driver *driver; int status = 0; if (!client || !dev->driver) return 0; + adap = client->adapter; driver = to_i2c_driver(dev->driver); if (driver->remove) { dev_dbg(dev, "remove\n"); @@ -427,6 +437,8 @@ static int i2c_device_remove(struct device *dev) } dev_pm_domain_detach(&client->dev, true); + if (!pm_runtime_status_suspended(&client->dev)) + regulator_disable(adap->bus_regulator); dev_pm_clear_wake_irq(&client->dev); device_init_wakeup(&client->dev, false); @@ -438,6 +450,72 @@ static int i2c_device_remove(struct device *dev) return status; } +#ifdef CONFIG_PM_SLEEP +static int i2c_resume_early(struct device *dev) +{ + struct i2c_client *client = i2c_verify_client(dev); + struct i2c_adapter *adap = client->adapter; + int err; + + if (!pm_runtime_status_suspended(&client->dev)) { + err = regulator_enable(adap->bus_regulator); + if (err) + return err; + } + + return pm_generic_resume_early(&client->dev); +} + +static int i2c_suspend_late(struct device *dev) +{ + struct i2c_client *client = i2c_verify_client(dev); + struct i2c_adapter *adap = client->adapter; + int err; + + err = pm_generic_suspend_late(&client->dev); + if (err) + return err; + + if (!pm_runtime_status_suspended(&client->dev)) + return regulator_disable(adap->bus_regulator); + + return 0; +} +#endif + +#ifdef CONFIG_PM +static int i2c_runtime_resume(struct device *dev) +{ + struct i2c_client *client = i2c_verify_client(dev); + struct i2c_adapter *adap = client->adapter; + int err; + + err = regulator_enable(adap->bus_regulator); + if (err) + return err; + + return pm_generic_runtime_resume(&client->dev); +} + +static int i2c_runtime_suspend(struct device *dev) +{ + struct i2c_client *client = i2c_verify_client(dev); + struct i2c_adapter *adap = client->adapter; + int err; + + err = pm_generic_runtime_suspend(&client->dev); + if (err) + return err; + + return regulator_disable(adap->bus_regulator); +} +#endif + +static const struct dev_pm_ops i2c_device_pm = { + SET_LATE_SYSTEM_SLEEP_PM_OPS(i2c_suspend_late, i2c_resume_early) + SET_RUNTIME_PM_OPS(i2c_runtime_suspend, i2c_runtime_resume, NULL) +}; + static void i2c_device_shutdown(struct device *dev) { struct i2c_client *client = i2c_verify_client(dev); @@ -495,6 +573,7 @@ struct bus_type i2c_bus_type = { .probe = i2c_device_probe, .remove = i2c_device_remove, .shutdown = i2c_device_shutdown, + .pm = &i2c_device_pm, }; EXPORT_SYMBOL_GPL(i2c_bus_type); @@ -1333,6 +1412,11 @@ static int i2c_register_adapter(struct i2c_adapter *adap) if (res) goto out_reg; + adap->bus_regulator = devm_regulator_get(&adap->dev, "bus"); + if (IS_ERR(adap->bus_regulator)) { + res = PTR_ERR(adap->bus_regulator); + goto out_reg; + } dev_dbg(&adap->dev, "adapter [%s] registered\n", adap->name); pm_runtime_no_callbacks(&adap->dev); diff --git a/include/linux/i2c.h b/include/linux/i2c.h index 456fc17ecb1c3..bc83af0d38d1d 100644 --- a/include/linux/i2c.h +++ b/include/linux/i2c.h @@ -15,6 +15,7 @@ #include /* for struct device */ #include /* for completion */ #include +#include #include #include /* for Host Notify IRQ */ #include /* for struct device_node */ @@ -721,6 +722,7 @@ struct i2c_adapter { const struct i2c_adapter_quirks *quirks; struct irq_domain *host_notify_domain; + struct regulator *bus_regulator; }; #define to_i2c_adapter(d) container_of(d, struct i2c_adapter, dev) -- GitLab From ab1c6093f689c48bb28889b3c23888289afd5c34 Mon Sep 17 00:00:00 2001 From: Atsushi Nemoto Date: Fri, 8 May 2020 22:14:36 +0900 Subject: [PATCH 0584/1971] i2c: altera: cleanup spinlock Protect altr_i2c_int_enable() by the mutex and remove unneeded spinlock. Signed-off-by: Atsushi Nemoto Reviewed-by: Thor Thayer Signed-off-by: Wolfram Sang --- drivers/i2c/busses/i2c-altera.c | 12 ++++-------- 1 file changed, 4 insertions(+), 8 deletions(-) diff --git a/drivers/i2c/busses/i2c-altera.c b/drivers/i2c/busses/i2c-altera.c index 70c81f88b293f..7d62cbda6e06c 100644 --- a/drivers/i2c/busses/i2c-altera.c +++ b/drivers/i2c/busses/i2c-altera.c @@ -69,7 +69,6 @@ * @fifo_size: size of the FIFO passed in. * @isr_mask: cached copy of local ISR enables. * @isr_status: cached copy of local ISR status. - * @lock: spinlock for IRQ synchronization. * @isr_mutex: mutex for IRQ thread. */ struct altr_i2c_dev { @@ -86,18 +85,14 @@ struct altr_i2c_dev { u32 fifo_size; u32 isr_mask; u32 isr_status; - spinlock_t lock; /* IRQ synchronization */ struct mutex isr_mutex; }; static void altr_i2c_int_enable(struct altr_i2c_dev *idev, u32 mask, bool enable) { - unsigned long flags; u32 int_en; - spin_lock_irqsave(&idev->lock, flags); - int_en = readl(idev->base + ALTR_I2C_ISER); if (enable) idev->isr_mask = int_en | mask; @@ -105,8 +100,6 @@ altr_i2c_int_enable(struct altr_i2c_dev *idev, u32 mask, bool enable) idev->isr_mask = int_en & ~mask; writel(idev->isr_mask, idev->base + ALTR_I2C_ISER); - - spin_unlock_irqrestore(&idev->lock, flags); } static void altr_i2c_int_clear(struct altr_i2c_dev *idev, u32 mask) @@ -346,6 +339,7 @@ static int altr_i2c_xfer_msg(struct altr_i2c_dev *idev, struct i2c_msg *msg) time_left = wait_for_completion_timeout(&idev->msg_complete, ALTR_I2C_XFER_TIMEOUT); + mutex_lock(&idev->isr_mutex); altr_i2c_int_enable(idev, imask, false); value = readl(idev->base + ALTR_I2C_STATUS) & ALTR_I2C_STAT_CORE; @@ -358,6 +352,7 @@ static int altr_i2c_xfer_msg(struct altr_i2c_dev *idev, struct i2c_msg *msg) } altr_i2c_core_disable(idev); + mutex_unlock(&idev->isr_mutex); return idev->msg_err; } @@ -411,7 +406,6 @@ static int altr_i2c_probe(struct platform_device *pdev) idev->dev = &pdev->dev; init_completion(&idev->msg_complete); - spin_lock_init(&idev->lock); mutex_init(&idev->isr_mutex); ret = device_property_read_u32(idev->dev, "fifo-size", @@ -449,7 +443,9 @@ static int altr_i2c_probe(struct platform_device *pdev) return ret; } + mutex_lock(&idev->isr_mutex); altr_i2c_init(idev); + mutex_unlock(&idev->isr_mutex); i2c_set_adapdata(&idev->adapter, idev); strlcpy(idev->adapter.name, pdev->name, sizeof(idev->adapter.name)); -- GitLab From 7c9ec2c5251851f5a3888d1a7fbb2eaf700a538a Mon Sep 17 00:00:00 2001 From: Russell King Date: Wed, 13 May 2020 10:33:12 +0100 Subject: [PATCH 0585/1971] i2c: pxa: implement generic i2c bus recovery Implement generic GPIO-based I2C bus recovery for the PXA I2C driver. Reviewed-by: Andrew Lunn Signed-off-by: Russell King Signed-off-by: Wolfram Sang --- drivers/i2c/busses/i2c-pxa.c | 176 +++++++++++++++++++++++++++++++---- 1 file changed, 159 insertions(+), 17 deletions(-) diff --git a/drivers/i2c/busses/i2c-pxa.c b/drivers/i2c/busses/i2c-pxa.c index 0e194d6cd1b56..a7885b8b5031c 100644 --- a/drivers/i2c/busses/i2c-pxa.c +++ b/drivers/i2c/busses/i2c-pxa.c @@ -20,6 +20,7 @@ #include #include #include +#include #include #include #include @@ -28,6 +29,7 @@ #include #include #include +#include #include #include #include @@ -260,6 +262,11 @@ struct pxa_i2c { bool highmode_enter; u32 fm_mask; u32 hs_mask; + + struct i2c_bus_recovery_info recovery; + struct pinctrl *pinctrl; + struct pinctrl_state *pinctrl_default; + struct pinctrl_state *pinctrl_recovery; }; #define _IBMR(i2c) ((i2c)->reg_ibmr) @@ -559,13 +566,8 @@ static void i2c_pxa_set_slave(struct pxa_i2c *i2c, int errcode) #define i2c_pxa_set_slave(i2c, err) do { } while (0) #endif -static void i2c_pxa_reset(struct pxa_i2c *i2c) +static void i2c_pxa_do_reset(struct pxa_i2c *i2c) { - pr_debug("Resetting I2C Controller Unit\n"); - - /* abort any transfer currently under way */ - i2c_pxa_abort(i2c); - /* reset according to 9.8 */ writel(ICR_UR, _ICR(i2c)); writel(I2C_ISR_INIT, _ISR(i2c)); @@ -584,12 +586,25 @@ static void i2c_pxa_reset(struct pxa_i2c *i2c) #endif i2c_pxa_set_slave(i2c, 0); +} +static void i2c_pxa_enable(struct pxa_i2c *i2c) +{ /* enable unit */ writel(readl(_ICR(i2c)) | ICR_IUE, _ICR(i2c)); udelay(100); } +static void i2c_pxa_reset(struct pxa_i2c *i2c) +{ + pr_debug("Resetting I2C Controller Unit\n"); + + /* abort any transfer currently under way */ + i2c_pxa_abort(i2c); + i2c_pxa_do_reset(i2c); + i2c_pxa_enable(i2c); +} + #ifdef CONFIG_I2C_PXA_SLAVE /* @@ -1043,6 +1058,7 @@ static int i2c_pxa_do_xfer(struct pxa_i2c *i2c, struct i2c_msg *msg, int num) ret = i2c_pxa_wait_bus_not_busy(i2c); if (ret) { dev_err(&i2c->adap.dev, "i2c_pxa: timeout waiting for bus free\n"); + i2c_recover_bus(&i2c->adap); goto out; } @@ -1088,6 +1104,7 @@ static int i2c_pxa_do_xfer(struct pxa_i2c *i2c, struct i2c_msg *msg, int num) if (!timeout && i2c->msg_num) { i2c_pxa_scream_blue_murder(i2c, "timeout with active message"); + i2c_recover_bus(&i2c->adap); ret = I2C_RETRY; } @@ -1277,6 +1294,129 @@ static int i2c_pxa_probe_pdata(struct platform_device *pdev, return 0; } +static void i2c_pxa_prepare_recovery(struct i2c_adapter *adap) +{ + struct pxa_i2c *i2c = adap->algo_data; + u32 ibmr = readl(_IBMR(i2c)); + + /* + * Program the GPIOs to reflect the current I2C bus state while + * we transition to recovery; this avoids glitching the bus. + */ + gpiod_set_value(i2c->recovery.scl_gpiod, ibmr & IBMR_SCLS); + gpiod_set_value(i2c->recovery.sda_gpiod, ibmr & IBMR_SDAS); + + WARN_ON(pinctrl_select_state(i2c->pinctrl, i2c->pinctrl_recovery)); +} + +static void i2c_pxa_unprepare_recovery(struct i2c_adapter *adap) +{ + struct pxa_i2c *i2c = adap->algo_data; + u32 isr; + + /* + * The bus should now be free. Clear up the I2C controller before + * handing control of the bus back to avoid the bus changing state. + */ + isr = readl(_ISR(i2c)); + if (isr & (ISR_UB | ISR_IBB)) { + dev_dbg(&i2c->adap.dev, + "recovery: resetting controller, ISR=0x%08x\n", isr); + i2c_pxa_do_reset(i2c); + } + + WARN_ON(pinctrl_select_state(i2c->pinctrl, i2c->pinctrl_default)); + + dev_dbg(&i2c->adap.dev, "recovery: IBMR 0x%08x ISR 0x%08x\n", + readl(_IBMR(i2c)), readl(_ISR(i2c))); + + i2c_pxa_enable(i2c); +} + +static int i2c_pxa_init_recovery(struct pxa_i2c *i2c) +{ + struct i2c_bus_recovery_info *bri = &i2c->recovery; + struct device *dev = i2c->adap.dev.parent; + + /* + * When slave mode is enabled, we are not the only master on the bus. + * Bus recovery can only be performed when we are the master, which + * we can't be certain of. Therefore, when slave mode is enabled, do + * not configure bus recovery. + */ + if (IS_ENABLED(CONFIG_I2C_PXA_SLAVE)) + return 0; + + i2c->pinctrl = devm_pinctrl_get(dev); + if (IS_ERR(i2c->pinctrl)) + return PTR_ERR(i2c->pinctrl); + + if (!i2c->pinctrl) + return 0; + + i2c->pinctrl_default = pinctrl_lookup_state(i2c->pinctrl, + PINCTRL_STATE_DEFAULT); + i2c->pinctrl_recovery = pinctrl_lookup_state(i2c->pinctrl, "recovery"); + + if (IS_ERR(i2c->pinctrl_default) || IS_ERR(i2c->pinctrl_recovery)) { + dev_info(dev, "missing pinmux recovery information: %ld %ld\n", + PTR_ERR(i2c->pinctrl_default), + PTR_ERR(i2c->pinctrl_recovery)); + return 0; + } + + /* + * Claiming GPIOs can influence the pinmux state, and may glitch the + * I2C bus. Do this carefully. + */ + bri->scl_gpiod = devm_gpiod_get(dev, "scl", GPIOD_OUT_HIGH_OPEN_DRAIN); + if (bri->scl_gpiod == ERR_PTR(-EPROBE_DEFER)) + return -EPROBE_DEFER; + if (IS_ERR(bri->scl_gpiod)) { + dev_info(dev, "missing scl gpio recovery information: %pe\n", + bri->scl_gpiod); + return 0; + } + + /* + * We have SCL. Pull SCL low and wait a bit so that SDA glitches + * have no effect. + */ + gpiod_direction_output(bri->scl_gpiod, 0); + udelay(10); + bri->sda_gpiod = devm_gpiod_get(dev, "sda", GPIOD_OUT_HIGH_OPEN_DRAIN); + + /* Wait a bit in case of a SDA glitch, and then release SCL. */ + udelay(10); + gpiod_direction_output(bri->scl_gpiod, 1); + + if (bri->sda_gpiod == ERR_PTR(-EPROBE_DEFER)) + return -EPROBE_DEFER; + + if (IS_ERR(bri->sda_gpiod)) { + dev_info(dev, "missing sda gpio recovery information: %pe\n", + bri->sda_gpiod); + return 0; + } + + bri->prepare_recovery = i2c_pxa_prepare_recovery; + bri->unprepare_recovery = i2c_pxa_unprepare_recovery; + bri->recover_bus = i2c_generic_scl_recovery; + + i2c->adap.bus_recovery_info = bri; + + /* + * Claiming GPIOs can change the pinmux state, which confuses the + * pinctrl since pinctrl's idea of the current setting is unaffected + * by the pinmux change caused by claiming the GPIO. Work around that + * by switching pinctrl to the GPIO state here. We do it this way to + * avoid glitching the I2C bus. + */ + pinctrl_select_state(i2c->pinctrl, i2c->pinctrl_recovery); + + return pinctrl_select_state(i2c->pinctrl, i2c->pinctrl_default); +} + static int i2c_pxa_probe(struct platform_device *dev) { struct i2c_pxa_platform_data *plat = dev_get_platdata(&dev->dev); @@ -1289,6 +1429,16 @@ static int i2c_pxa_probe(struct platform_device *dev) if (!i2c) return -ENOMEM; + /* Default adapter num to device id; i2c_pxa_probe_dt can override. */ + i2c->adap.nr = dev->id; + i2c->adap.owner = THIS_MODULE; + i2c->adap.retries = 5; + i2c->adap.algo_data = i2c; + i2c->adap.dev.parent = &dev->dev; +#ifdef CONFIG_OF + i2c->adap.dev.of_node = dev->dev.of_node; +#endif + res = platform_get_resource(dev, IORESOURCE_MEM, 0); i2c->reg_base = devm_ioremap_resource(&dev->dev, res); if (IS_ERR(i2c->reg_base)) @@ -1298,8 +1448,9 @@ static int i2c_pxa_probe(struct platform_device *dev) if (irq < 0) return irq; - /* Default adapter num to device id; i2c_pxa_probe_dt can override. */ - i2c->adap.nr = dev->id; + ret = i2c_pxa_init_recovery(i2c); + if (ret) + return ret; ret = i2c_pxa_probe_dt(dev, i2c, &i2c_type); if (ret > 0) @@ -1307,9 +1458,6 @@ static int i2c_pxa_probe(struct platform_device *dev) if (ret < 0) return ret; - i2c->adap.owner = THIS_MODULE; - i2c->adap.retries = 5; - spin_lock_init(&i2c->lock); init_waitqueue_head(&i2c->wait); @@ -1375,12 +1523,6 @@ static int i2c_pxa_probe(struct platform_device *dev) i2c_pxa_reset(i2c); - i2c->adap.algo_data = i2c; - i2c->adap.dev.parent = &dev->dev; -#ifdef CONFIG_OF - i2c->adap.dev.of_node = dev->dev.of_node; -#endif - ret = i2c_add_numbered_adapter(&i2c->adap); if (ret < 0) goto ereqirq; -- GitLab From 7dae1dd726aac7871d9cc56ed9d13fa09c0212f9 Mon Sep 17 00:00:00 2001 From: Chuck Lever Date: Wed, 20 May 2020 13:41:02 -0400 Subject: [PATCH 0586/1971] SUNRPC: Replace dprintk() call sites in TCP receive path Signed-off-by: Chuck Lever --- include/trace/events/sunrpc.h | 1 + net/sunrpc/svcsock.c | 16 ++-------------- 2 files changed, 3 insertions(+), 14 deletions(-) diff --git a/include/trace/events/sunrpc.h b/include/trace/events/sunrpc.h index ec4ae34a1f84c..bfea554bd91f0 100644 --- a/include/trace/events/sunrpc.h +++ b/include/trace/events/sunrpc.h @@ -1502,6 +1502,7 @@ DECLARE_EVENT_CLASS(svcsock_class, DEFINE_SVCSOCK_EVENT(udp_send); DEFINE_SVCSOCK_EVENT(tcp_send); +DEFINE_SVCSOCK_EVENT(tcp_recv); DEFINE_SVCSOCK_EVENT(data_ready); DEFINE_SVCSOCK_EVENT(write_space); diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c index 9c1eb13aa9b82..8cf06b6768310 100644 --- a/net/sunrpc/svcsock.c +++ b/net/sunrpc/svcsock.c @@ -119,9 +119,8 @@ static void svc_release_skb(struct svc_rqst *rqstp) if (skb) { struct svc_sock *svsk = container_of(rqstp->rq_xprt, struct svc_sock, sk_xprt); - rqstp->rq_xprt_ctxt = NULL; - dprintk("svc: service %p, releasing skb %p\n", rqstp, skb); + rqstp->rq_xprt_ctxt = NULL; skb_free_datagram_locked(svsk->sk_sk, skb); } } @@ -132,8 +131,6 @@ static void svc_release_udp_skb(struct svc_rqst *rqstp) if (skb) { rqstp->rq_xprt_ctxt = NULL; - - dprintk("svc: service %p, releasing skb %p\n", rqstp, skb); consume_skb(skb); } } @@ -245,8 +242,6 @@ static ssize_t svc_recvfrom(struct svc_rqst *rqstp, struct kvec *iov, if (len == buflen) set_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags); - dprintk("svc: socket %p recvfrom(%p, %zu) = %zd\n", - svsk, iov[0].iov_base, iov[0].iov_len, len); return len; } @@ -932,9 +927,6 @@ static int copy_pages_to_kvecs(struct kvec *vec, struct page **pages, int len) static void svc_tcp_fragment_received(struct svc_sock *svsk) { /* If we have more data, signal svc_xprt_enqueue() to try again */ - dprintk("svc: TCP %s record (%d bytes)\n", - svc_sock_final_rec(svsk) ? "final" : "nonfinal", - svc_sock_reclen(svsk)); svsk->sk_tcplen = 0; svsk->sk_marker = xdr_zero; } @@ -954,11 +946,6 @@ static int svc_tcp_recvfrom(struct svc_rqst *rqstp) __be32 calldir; int pnum; - dprintk("svc: tcp_recv %p data %d conn %d close %d\n", - svsk, test_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags), - test_bit(XPT_CONN, &svsk->sk_xprt.xpt_flags), - test_bit(XPT_CLOSE, &svsk->sk_xprt.xpt_flags)); - clear_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags); len = svc_tcp_read_marker(svsk, rqstp); if (len < 0) @@ -977,6 +964,7 @@ static int svc_tcp_recvfrom(struct svc_rqst *rqstp) /* Now receive data */ len = svc_recvfrom(rqstp, vec, pnum, base + want, base); if (len >= 0) { + trace_svcsock_tcp_recv(&svsk->sk_xprt, len); svsk->sk_tcplen += len; svsk->sk_datalen += len; } -- GitLab From 6be8c5949149ff45c86dd9e49dfab920078bfcd5 Mon Sep 17 00:00:00 2001 From: Chuck Lever Date: Wed, 20 May 2020 12:29:13 -0400 Subject: [PATCH 0587/1971] SUNRPC: Refactor recvfrom path dealing with incomplete TCP receives Clean up: move exception processing out of the main path. Signed-off-by: Chuck Lever --- include/trace/events/sunrpc.h | 31 ++++++++++++++++++++++++++++ net/sunrpc/svcsock.c | 39 +++++++++++++++++------------------ 2 files changed, 50 insertions(+), 20 deletions(-) diff --git a/include/trace/events/sunrpc.h b/include/trace/events/sunrpc.h index bfea554bd91f0..81659876b4afa 100644 --- a/include/trace/events/sunrpc.h +++ b/include/trace/events/sunrpc.h @@ -1503,9 +1503,40 @@ DECLARE_EVENT_CLASS(svcsock_class, DEFINE_SVCSOCK_EVENT(udp_send); DEFINE_SVCSOCK_EVENT(tcp_send); DEFINE_SVCSOCK_EVENT(tcp_recv); +DEFINE_SVCSOCK_EVENT(tcp_recv_eagain); +DEFINE_SVCSOCK_EVENT(tcp_recv_err); DEFINE_SVCSOCK_EVENT(data_ready); DEFINE_SVCSOCK_EVENT(write_space); +TRACE_EVENT(svcsock_tcp_recv_short, + TP_PROTO( + const struct svc_xprt *xprt, + u32 expected, + u32 received + ), + + TP_ARGS(xprt, expected, received), + + TP_STRUCT__entry( + __field(u32, expected) + __field(u32, received) + __field(unsigned long, flags) + __string(addr, xprt->xpt_remotebuf) + ), + + TP_fast_assign( + __entry->expected = expected; + __entry->received = received; + __entry->flags = xprt->xpt_flags; + __assign_str(addr, xprt->xpt_remotebuf); + ), + + TP_printk("addr=%s flags=%s expected=%u received=%u", + __get_str(addr), show_svc_xprt_flags(__entry->flags), + __entry->expected, __entry->received + ) +); + TRACE_EVENT(svcsock_tcp_state, TP_PROTO( const struct svc_xprt *xprt, diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c index 8cf06b6768310..087e21b0f1bb5 100644 --- a/net/sunrpc/svcsock.c +++ b/net/sunrpc/svcsock.c @@ -968,23 +968,10 @@ static int svc_tcp_recvfrom(struct svc_rqst *rqstp) svsk->sk_tcplen += len; svsk->sk_datalen += len; } - if (len != want || !svc_sock_final_rec(svsk)) { - svc_tcp_save_pages(svsk, rqstp); - if (len < 0 && len != -EAGAIN) - goto err_delete; - if (len == want) - svc_tcp_fragment_received(svsk); - else - dprintk("svc: incomplete TCP record (%d of %d)\n", - (int)(svsk->sk_tcplen - sizeof(rpc_fraghdr)), - svc_sock_reclen(svsk)); - goto err_noclose; - } - - if (svsk->sk_datalen < 8) { - svsk->sk_datalen = 0; - goto err_delete; /* client is nuts. */ - } + if (len != want || !svc_sock_final_rec(svsk)) + goto err_incomplete; + if (svsk->sk_datalen < 8) + goto err_nuts; rqstp->rq_arg.len = svsk->sk_datalen; rqstp->rq_arg.page_base = 0; @@ -1019,14 +1006,26 @@ static int svc_tcp_recvfrom(struct svc_rqst *rqstp) return rqstp->rq_arg.len; +err_incomplete: + svc_tcp_save_pages(svsk, rqstp); + if (len < 0 && len != -EAGAIN) + goto err_delete; + if (len == want) + svc_tcp_fragment_received(svsk); + else + trace_svcsock_tcp_recv_short(&svsk->sk_xprt, + svc_sock_reclen(svsk), + svsk->sk_tcplen - sizeof(rpc_fraghdr)); + goto err_noclose; error: if (len != -EAGAIN) goto err_delete; - dprintk("RPC: TCP recvfrom got EAGAIN\n"); + trace_svcsock_tcp_recv_eagain(&svsk->sk_xprt, 0); return 0; +err_nuts: + svsk->sk_datalen = 0; err_delete: - printk(KERN_NOTICE "%s: recvfrom returned errno %d\n", - svsk->sk_xprt.xpt_server->sv_name, -len); + trace_svcsock_tcp_recv_err(&svsk->sk_xprt, len); set_bit(XPT_CLOSE, &svsk->sk_xprt.xpt_flags); err_noclose: return 0; /* record not complete */ -- GitLab From cca557a5a60faaf307bbb76035dd90ec47cf0e0c Mon Sep 17 00:00:00 2001 From: Chuck Lever Date: Wed, 20 May 2020 12:53:05 -0400 Subject: [PATCH 0588/1971] SUNRPC: Clean up svc_release_skb() functions Rename these functions using the convention used for other xpo method entry points. Signed-off-by: Chuck Lever --- net/sunrpc/svcsock.c | 23 +++++++++++++++-------- 1 file changed, 15 insertions(+), 8 deletions(-) diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c index 087e21b0f1bb5..5b2981a0711c7 100644 --- a/net/sunrpc/svcsock.c +++ b/net/sunrpc/svcsock.c @@ -109,10 +109,12 @@ static void svc_reclassify_socket(struct socket *sock) } #endif -/* - * Release an skbuff after use +/** + * svc_tcp_release_rqst - Release transport-related resources + * @rqstp: request structure with resources to be released + * */ -static void svc_release_skb(struct svc_rqst *rqstp) +static void svc_tcp_release_rqst(struct svc_rqst *rqstp) { struct sk_buff *skb = rqstp->rq_xprt_ctxt; @@ -125,7 +127,12 @@ static void svc_release_skb(struct svc_rqst *rqstp) } } -static void svc_release_udp_skb(struct svc_rqst *rqstp) +/** + * svc_udp_release_rqst - Release transport-related resources + * @rqstp: request structure with resources to be released + * + */ +static void svc_udp_release_rqst(struct svc_rqst *rqstp) { struct sk_buff *skb = rqstp->rq_xprt_ctxt; @@ -521,7 +528,7 @@ static int svc_udp_sendto(struct svc_rqst *rqstp) unsigned int uninitialized_var(sent); int err; - svc_release_udp_skb(rqstp); + svc_udp_release_rqst(rqstp); svc_set_cmsg_data(rqstp, cmh); @@ -590,7 +597,7 @@ static const struct svc_xprt_ops svc_udp_ops = { .xpo_recvfrom = svc_udp_recvfrom, .xpo_sendto = svc_udp_sendto, .xpo_read_payload = svc_sock_read_payload, - .xpo_release_rqst = svc_release_udp_skb, + .xpo_release_rqst = svc_udp_release_rqst, .xpo_detach = svc_sock_detach, .xpo_free = svc_sock_free, .xpo_has_wspace = svc_udp_has_wspace, @@ -1053,7 +1060,7 @@ static int svc_tcp_sendto(struct svc_rqst *rqstp) unsigned int uninitialized_var(sent); int err; - svc_release_skb(rqstp); + svc_tcp_release_rqst(rqstp); mutex_lock(&xprt->xpt_mutex); if (svc_xprt_is_dead(xprt)) @@ -1093,7 +1100,7 @@ static const struct svc_xprt_ops svc_tcp_ops = { .xpo_recvfrom = svc_tcp_recvfrom, .xpo_sendto = svc_tcp_sendto, .xpo_read_payload = svc_sock_read_payload, - .xpo_release_rqst = svc_release_skb, + .xpo_release_rqst = svc_tcp_release_rqst, .xpo_detach = svc_tcp_sock_detach, .xpo_free = svc_sock_free, .xpo_has_wspace = svc_tcp_has_wspace, -- GitLab From ca07eda33e01eafa7a26ec06974f7eacee6a89c8 Mon Sep 17 00:00:00 2001 From: Chuck Lever Date: Wed, 20 May 2020 17:30:12 -0400 Subject: [PATCH 0589/1971] SUNRPC: Refactor svc_recvfrom() This function is not currently "generic" so remove the documenting comment and rename it appropriately. Its internals are converted to use bio_vecs for reading from the transport socket. In existing typical sunrpc uses of bio_vecs, the bio_vec array is allocated dynamically. Here, instead, an array of bio_vecs is added to svc_rqst. The lifetime of this array can be greater than one call to xpo_recvfrom(): - Multiple calls to xpo_recvfrom() might be needed to read an RPC message completely. - At some later point, rq_arg.bvecs will point to this array and it will carry the received message into svc_process(). I also expect that a future optimization will remove either the rq_vec or rq_pages array in favor of rq_bvec, thus conserving the size of struct svc_rqst. Signed-off-by: Chuck Lever --- include/linux/sunrpc/svc.h | 1 + net/sunrpc/svcsock.c | 109 +++++++++++++++++++++++-------------- 2 files changed, 69 insertions(+), 41 deletions(-) diff --git a/include/linux/sunrpc/svc.h b/include/linux/sunrpc/svc.h index fd390894a5849..05da19a0516db 100644 --- a/include/linux/sunrpc/svc.h +++ b/include/linux/sunrpc/svc.h @@ -254,6 +254,7 @@ struct svc_rqst { struct page * *rq_page_end; /* one past the last page */ struct kvec rq_vec[RPCSVC_MAXPAGES]; /* generally useful.. */ + struct bio_vec rq_bvec[RPCSVC_MAXPAGES]; __be32 rq_xid; /* transmission id */ u32 rq_prog; /* program number */ diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c index 5b2981a0711c7..3e25511b800e8 100644 --- a/net/sunrpc/svcsock.c +++ b/net/sunrpc/svcsock.c @@ -223,26 +223,62 @@ static int svc_one_sock_name(struct svc_sock *svsk, char *buf, int remaining) return len; } +#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE +static void svc_flush_bvec(const struct bio_vec *bvec, size_t size, size_t seek) +{ + struct bvec_iter bi = { + .bi_size = size, + }; + struct bio_vec bv; + + bvec_iter_advance(bvec, &bi, seek & PAGE_MASK); + for_each_bvec(bv, bvec, bi, bi) + flush_dcache_page(bv.bv_page); +} +#else +static inline void svc_flush_bvec(const struct bio_vec *bvec, size_t size, + size_t seek) +{ +} +#endif + /* - * Generic recvfrom routine. + * Read from @rqstp's transport socket. The incoming message fills whole + * pages in @rqstp's rq_pages array until the last page of the message + * has been received into a partial page. */ -static ssize_t svc_recvfrom(struct svc_rqst *rqstp, struct kvec *iov, - unsigned int nr, size_t buflen, unsigned int base) +static ssize_t svc_tcp_read_msg(struct svc_rqst *rqstp, size_t buflen, + size_t seek) { struct svc_sock *svsk = container_of(rqstp->rq_xprt, struct svc_sock, sk_xprt); + struct bio_vec *bvec = rqstp->rq_bvec; struct msghdr msg = { NULL }; + unsigned int i; ssize_t len; + size_t t; rqstp->rq_xprt_hlen = 0; clear_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags); - iov_iter_kvec(&msg.msg_iter, READ, iov, nr, buflen); - if (base != 0) { - iov_iter_advance(&msg.msg_iter, base); - buflen -= base; + + for (i = 0, t = 0; t < buflen; i++, t += PAGE_SIZE) { + bvec[i].bv_page = rqstp->rq_pages[i]; + bvec[i].bv_len = PAGE_SIZE; + bvec[i].bv_offset = 0; + } + rqstp->rq_respages = &rqstp->rq_pages[i]; + rqstp->rq_next_page = rqstp->rq_respages + 1; + + iov_iter_bvec(&msg.msg_iter, READ, bvec, i, buflen); + if (seek) { + iov_iter_advance(&msg.msg_iter, seek); + buflen -= seek; } len = sock_recvmsg(svsk->sk_sock, &msg, MSG_DONTWAIT); + if (len > 0) + svc_flush_bvec(bvec, len, seek); + /* If we read a full record, then assume there may be more * data to read (stream based sockets only!) */ @@ -775,13 +811,14 @@ failed: return NULL; } -static unsigned int svc_tcp_restore_pages(struct svc_sock *svsk, struct svc_rqst *rqstp) +static size_t svc_tcp_restore_pages(struct svc_sock *svsk, + struct svc_rqst *rqstp) { - unsigned int i, len, npages; + size_t len = svsk->sk_datalen; + unsigned int i, npages; - if (svsk->sk_datalen == 0) + if (!len) return 0; - len = svsk->sk_datalen; npages = (len + PAGE_SIZE - 1) >> PAGE_SHIFT; for (i = 0; i < npages; i++) { if (rqstp->rq_pages[i] != NULL) @@ -917,20 +954,6 @@ unlock_eagain: return -EAGAIN; } -static int copy_pages_to_kvecs(struct kvec *vec, struct page **pages, int len) -{ - int i = 0; - int t = 0; - - while (t < len) { - vec[i].iov_base = page_address(pages[i]); - vec[i].iov_len = PAGE_SIZE; - i++; - t += PAGE_SIZE; - } - return i; -} - static void svc_tcp_fragment_received(struct svc_sock *svsk) { /* If we have more data, signal svc_xprt_enqueue() to try again */ @@ -938,20 +961,33 @@ static void svc_tcp_fragment_received(struct svc_sock *svsk) svsk->sk_marker = xdr_zero; } -/* - * Receive data from a TCP socket. +/** + * svc_tcp_recvfrom - Receive data from a TCP socket + * @rqstp: request structure into which to receive an RPC Call + * + * Called in a loop when XPT_DATA has been set. + * + * Read the 4-byte stream record marker, then use the record length + * in that marker to set up exactly the resources needed to receive + * the next RPC message into @rqstp. + * + * Returns: + * On success, the number of bytes in a received RPC Call, or + * %0 if a complete RPC Call message was not ready to return + * + * The zero return case handles partial receives and callback Replies. + * The state of a partial receive is preserved in the svc_sock for + * the next call to svc_tcp_recvfrom. */ static int svc_tcp_recvfrom(struct svc_rqst *rqstp) { struct svc_sock *svsk = container_of(rqstp->rq_xprt, struct svc_sock, sk_xprt); struct svc_serv *serv = svsk->sk_xprt.xpt_server; - int len; - struct kvec *vec; - unsigned int want, base; + size_t want, base; + ssize_t len; __be32 *p; __be32 calldir; - int pnum; clear_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags); len = svc_tcp_read_marker(svsk, rqstp); @@ -960,16 +996,7 @@ static int svc_tcp_recvfrom(struct svc_rqst *rqstp) base = svc_tcp_restore_pages(svsk, rqstp); want = len - (svsk->sk_tcplen - sizeof(rpc_fraghdr)); - - vec = rqstp->rq_vec; - - pnum = copy_pages_to_kvecs(&vec[0], &rqstp->rq_pages[0], base + want); - - rqstp->rq_respages = &rqstp->rq_pages[pnum]; - rqstp->rq_next_page = rqstp->rq_respages + 1; - - /* Now receive data */ - len = svc_recvfrom(rqstp, vec, pnum, base + want, base); + len = svc_tcp_read_msg(rqstp, base + want, base); if (len >= 0) { trace_svcsock_tcp_recv(&svsk->sk_xprt, len); svsk->sk_tcplen += len; -- GitLab From fff1ebb269b6c18b21bc0ddab7dd8b0c5e68e0a1 Mon Sep 17 00:00:00 2001 From: Chuck Lever Date: Wed, 20 May 2020 17:30:24 -0400 Subject: [PATCH 0590/1971] SUNRPC: Restructure svc_udp_recvfrom() Clean up. At this point, we are not ready yet to support bio_vecs in the UDP transport implementation. However, we can clean up svc_udp_recvfrom() to match the tracing and straight-lining recently changes made in svc_tcp_recvfrom(). Signed-off-by: Chuck Lever --- include/trace/events/sunrpc.h | 2 ++ net/sunrpc/svcsock.c | 60 ++++++++++++++++++++--------------- 2 files changed, 37 insertions(+), 25 deletions(-) diff --git a/include/trace/events/sunrpc.h b/include/trace/events/sunrpc.h index 81659876b4afa..08c7d618ceb49 100644 --- a/include/trace/events/sunrpc.h +++ b/include/trace/events/sunrpc.h @@ -1501,6 +1501,8 @@ DECLARE_EVENT_CLASS(svcsock_class, TP_ARGS(xprt, result)) DEFINE_SVCSOCK_EVENT(udp_send); +DEFINE_SVCSOCK_EVENT(udp_recv); +DEFINE_SVCSOCK_EVENT(udp_recv_err); DEFINE_SVCSOCK_EVENT(tcp_send); DEFINE_SVCSOCK_EVENT(tcp_recv); DEFINE_SVCSOCK_EVENT(tcp_recv_eagain); diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c index 3e25511b800e8..97d2b6f8c7918 100644 --- a/net/sunrpc/svcsock.c +++ b/net/sunrpc/svcsock.c @@ -425,8 +425,15 @@ static int svc_udp_get_dest_address(struct svc_rqst *rqstp, return 0; } -/* - * Receive a datagram from a UDP socket. +/** + * svc_udp_recvfrom - Receive a datagram from a UDP socket. + * @rqstp: request structure into which to receive an RPC Call + * + * Called in a loop when XPT_DATA has been set. + * + * Returns: + * On success, the number of bytes in a received RPC Call, or + * %0 if a complete RPC Call message was not ready to return */ static int svc_udp_recvfrom(struct svc_rqst *rqstp) { @@ -460,20 +467,14 @@ static int svc_udp_recvfrom(struct svc_rqst *rqstp) svc_sock_setbufsize(svsk, serv->sv_nrthreads + 3); clear_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags); - skb = NULL; err = kernel_recvmsg(svsk->sk_sock, &msg, NULL, 0, 0, MSG_PEEK | MSG_DONTWAIT); - if (err >= 0) - skb = skb_recv_udp(svsk->sk_sk, 0, 1, &err); - - if (skb == NULL) { - if (err != -EAGAIN) { - /* possibly an icmp error */ - dprintk("svc: recvfrom returned error %d\n", -err); - set_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags); - } - return 0; - } + if (err < 0) + goto out_recv_err; + skb = skb_recv_udp(svsk->sk_sk, 0, 1, &err); + if (!skb) + goto out_recv_err; + len = svc_addr_len(svc_addr(rqstp)); rqstp->rq_addrlen = len; if (skb->tstamp == 0) { @@ -484,26 +485,21 @@ static int svc_udp_recvfrom(struct svc_rqst *rqstp) sock_write_timestamp(svsk->sk_sk, skb->tstamp); set_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags); /* there may be more data... */ - len = skb->len; + len = skb->len; rqstp->rq_arg.len = len; + trace_svcsock_udp_recv(&svsk->sk_xprt, len); rqstp->rq_prot = IPPROTO_UDP; - if (!svc_udp_get_dest_address(rqstp, cmh)) { - net_warn_ratelimited("svc: received unknown control message %d/%d; dropping RPC reply datagram\n", - cmh->cmsg_level, cmh->cmsg_type); - goto out_free; - } + if (!svc_udp_get_dest_address(rqstp, cmh)) + goto out_cmsg_err; rqstp->rq_daddrlen = svc_addr_len(svc_daddr(rqstp)); if (skb_is_nonlinear(skb)) { /* we have to copy */ local_bh_disable(); - if (csum_partial_copy_to_xdr(&rqstp->rq_arg, skb)) { - local_bh_enable(); - /* checksum error */ - goto out_free; - } + if (csum_partial_copy_to_xdr(&rqstp->rq_arg, skb)) + goto out_bh_enable; local_bh_enable(); consume_skb(skb); } else { @@ -531,6 +527,20 @@ static int svc_udp_recvfrom(struct svc_rqst *rqstp) serv->sv_stats->netudpcnt++; return len; + +out_recv_err: + if (err != -EAGAIN) { + /* possibly an icmp error */ + set_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags); + } + trace_svcsock_udp_recv_err(&svsk->sk_xprt, err); + return 0; +out_cmsg_err: + net_warn_ratelimited("svc: received unknown control message %d/%d; dropping RPC reply datagram\n", + cmh->cmsg_level, cmh->cmsg_type); + goto out_free; +out_bh_enable: + local_bh_enable(); out_free: kfree_skb(skb); return 0; -- GitLab From d88ff9587bce1c962835017d2b2c449b2e6bb500 Mon Sep 17 00:00:00 2001 From: Chuck Lever Date: Wed, 15 Apr 2020 09:47:41 -0400 Subject: [PATCH 0591/1971] SUNRPC: svc_show_status() macro should have enum definitions Clean up: Add missing TRACE_DEFINE_ENUMs in include/trace/events/sunrpc.h Signed-off-by: Chuck Lever --- include/trace/events/sunrpc.h | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/include/trace/events/sunrpc.h b/include/trace/events/sunrpc.h index 08c7d618ceb49..2a7f6f83341fb 100644 --- a/include/trace/events/sunrpc.h +++ b/include/trace/events/sunrpc.h @@ -1057,6 +1057,17 @@ TRACE_EVENT(svc_recv, show_rqstp_flags(__entry->flags)) ); +TRACE_DEFINE_ENUM(SVC_GARBAGE); +TRACE_DEFINE_ENUM(SVC_SYSERR); +TRACE_DEFINE_ENUM(SVC_VALID); +TRACE_DEFINE_ENUM(SVC_NEGATIVE); +TRACE_DEFINE_ENUM(SVC_OK); +TRACE_DEFINE_ENUM(SVC_DROP); +TRACE_DEFINE_ENUM(SVC_CLOSE); +TRACE_DEFINE_ENUM(SVC_DENIED); +TRACE_DEFINE_ENUM(SVC_PENDING); +TRACE_DEFINE_ENUM(SVC_COMPLETE); + #define svc_show_status(status) \ __print_symbolic(status, \ { SVC_GARBAGE, "SVC_GARBAGE" }, \ -- GitLab From 0b175b18648ebedfe255b11a7792f1d76848a8f7 Mon Sep 17 00:00:00 2001 From: Chuck Lever Date: Sat, 2 May 2020 11:34:40 -0400 Subject: [PATCH 0592/1971] NFSD: Add tracepoints to NFSD's duplicate reply cache Try to capture DRC failures. Two additional clean-ups: - Introduce Doxygen-style comments for the main entry points - Remove a dprintk that fires for an allocation failure. This was the only dprintk in the REPCACHE class. Reported-by: kbuild test robot [ cel: force typecast for display of checksum values ] Signed-off-by: Chuck Lever --- fs/nfsd/nfscache.c | 57 +++++++++++++++++++++++++++----------------- fs/nfsd/trace.h | 59 ++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 94 insertions(+), 22 deletions(-) diff --git a/fs/nfsd/nfscache.c b/fs/nfsd/nfscache.c index 96352ab7bd810..945d2f5e760e8 100644 --- a/fs/nfsd/nfscache.c +++ b/fs/nfsd/nfscache.c @@ -20,8 +20,7 @@ #include "nfsd.h" #include "cache.h" - -#define NFSDDBG_FACILITY NFSDDBG_REPCACHE +#include "trace.h" /* * We use this value to determine the number of hash buckets from the max @@ -323,8 +322,10 @@ nfsd_cache_key_cmp(const struct svc_cacherep *key, const struct svc_cacherep *rp, struct nfsd_net *nn) { if (key->c_key.k_xid == rp->c_key.k_xid && - key->c_key.k_csum != rp->c_key.k_csum) + key->c_key.k_csum != rp->c_key.k_csum) { ++nn->payload_misses; + trace_nfsd_drc_mismatch(nn, key, rp); + } return memcmp(&key->c_key, &rp->c_key, sizeof(key->c_key)); } @@ -377,15 +378,22 @@ out: return ret; } -/* +/** + * nfsd_cache_lookup - Find an entry in the duplicate reply cache + * @rqstp: Incoming Call to find + * * Try to find an entry matching the current call in the cache. When none * is found, we try to grab the oldest expired entry off the LRU list. If * a suitable one isn't there, then drop the cache_lock and allocate a * new one, then search again in case one got inserted while this thread * didn't hold the lock. + * + * Return values: + * %RC_DOIT: Process the request normally + * %RC_REPLY: Reply from cache + * %RC_DROPIT: Do not process the request further */ -int -nfsd_cache_lookup(struct svc_rqst *rqstp) +int nfsd_cache_lookup(struct svc_rqst *rqstp) { struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id); struct svc_cacherep *rp, *found; @@ -399,7 +407,7 @@ nfsd_cache_lookup(struct svc_rqst *rqstp) rqstp->rq_cacherep = NULL; if (type == RC_NOCACHE) { nfsdstats.rcnocache++; - return rtn; + goto out; } csum = nfsd_cache_csum(rqstp); @@ -409,10 +417,8 @@ nfsd_cache_lookup(struct svc_rqst *rqstp) * preallocate an entry. */ rp = nfsd_reply_cache_alloc(rqstp, csum, nn); - if (!rp) { - dprintk("nfsd: unable to allocate DRC entry!\n"); - return rtn; - } + if (!rp) + goto out; spin_lock(&b->cache_lock); found = nfsd_cache_insert(b, rp, nn); @@ -431,8 +437,10 @@ nfsd_cache_lookup(struct svc_rqst *rqstp) /* go ahead and prune the cache */ prune_bucket(b, nn); - out: + +out_unlock: spin_unlock(&b->cache_lock); +out: return rtn; found_entry: @@ -442,13 +450,13 @@ found_entry: /* Request being processed */ if (rp->c_state == RC_INPROG) - goto out; + goto out_trace; /* From the hall of fame of impractical attacks: * Is this a user who tries to snoop on the cache? */ rtn = RC_DOIT; if (!test_bit(RQ_SECURE, &rqstp->rq_flags) && rp->c_secure) - goto out; + goto out_trace; /* Compose RPC reply header */ switch (rp->c_type) { @@ -460,7 +468,7 @@ found_entry: break; case RC_REPLBUFF: if (!nfsd_cache_append(rqstp, &rp->c_replvec)) - goto out; /* should not happen */ + goto out_unlock; /* should not happen */ rtn = RC_REPLY; break; default: @@ -468,13 +476,19 @@ found_entry: nfsd_reply_cache_free_locked(b, rp, nn); } - goto out; +out_trace: + trace_nfsd_drc_found(nn, rqstp, rtn); + goto out_unlock; } -/* - * Update a cache entry. This is called from nfsd_dispatch when - * the procedure has been executed and the complete reply is in - * rqstp->rq_res. +/** + * nfsd_cache_update - Update an entry in the duplicate reply cache. + * @rqstp: svc_rqst with a finished Reply + * @cachetype: which cache to update + * @statp: Reply's status code + * + * This is called from nfsd_dispatch when the procedure has been + * executed and the complete reply is in rqstp->rq_res. * * We're copying around data here rather than swapping buffers because * the toplevel loop requires max-sized buffers, which would be a waste @@ -487,8 +501,7 @@ found_entry: * nfsd failed to encode a reply that otherwise would have been cached. * In this case, nfsd_cache_update is called with statp == NULL. */ -void -nfsd_cache_update(struct svc_rqst *rqstp, int cachetype, __be32 *statp) +void nfsd_cache_update(struct svc_rqst *rqstp, int cachetype, __be32 *statp) { struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id); struct svc_cacherep *rp = rqstp->rq_cacherep; diff --git a/fs/nfsd/trace.h b/fs/nfsd/trace.h index 78c574251c608..371dc198d28ee 100644 --- a/fs/nfsd/trace.h +++ b/fs/nfsd/trace.h @@ -432,6 +432,65 @@ TRACE_EVENT(nfsd_file_fsnotify_handle_event, __entry->nlink, __entry->mode, __entry->mask) ); +#include "cache.h" + +TRACE_DEFINE_ENUM(RC_DROPIT); +TRACE_DEFINE_ENUM(RC_REPLY); +TRACE_DEFINE_ENUM(RC_DOIT); + +#define show_drc_retval(x) \ + __print_symbolic(x, \ + { RC_DROPIT, "DROPIT" }, \ + { RC_REPLY, "REPLY" }, \ + { RC_DOIT, "DOIT" }) + +TRACE_EVENT(nfsd_drc_found, + TP_PROTO( + const struct nfsd_net *nn, + const struct svc_rqst *rqstp, + int result + ), + TP_ARGS(nn, rqstp, result), + TP_STRUCT__entry( + __field(unsigned long long, boot_time) + __field(unsigned long, result) + __field(u32, xid) + ), + TP_fast_assign( + __entry->boot_time = nn->boot_time; + __entry->result = result; + __entry->xid = be32_to_cpu(rqstp->rq_xid); + ), + TP_printk("boot_time=%16llx xid=0x%08x result=%s", + __entry->boot_time, __entry->xid, + show_drc_retval(__entry->result)) + +); + +TRACE_EVENT(nfsd_drc_mismatch, + TP_PROTO( + const struct nfsd_net *nn, + const struct svc_cacherep *key, + const struct svc_cacherep *rp + ), + TP_ARGS(nn, key, rp), + TP_STRUCT__entry( + __field(unsigned long long, boot_time) + __field(u32, xid) + __field(u32, cached) + __field(u32, ingress) + ), + TP_fast_assign( + __entry->boot_time = nn->boot_time; + __entry->xid = be32_to_cpu(key->c_key.k_xid); + __entry->cached = (__force u32)key->c_key.k_csum; + __entry->ingress = (__force u32)rp->c_key.k_csum; + ), + TP_printk("boot_time=%16llx xid=0x%08x cached-csum=0x%08x ingress-csum=0x%08x", + __entry->boot_time, __entry->xid, __entry->cached, + __entry->ingress) +); + #endif /* _NFSD_TRACE_H */ #undef TRACE_INCLUDE_PATH -- GitLab From dd5e3fbc1f472951afd7e7643ace5d1672b31358 Mon Sep 17 00:00:00 2001 From: Chuck Lever Date: Sun, 5 Apr 2020 11:42:29 -0400 Subject: [PATCH 0593/1971] NFSD: Add tracepoints to the NFSD state management code Capture obvious events and replace dprintk() call sites. Introduce infrastructure so that adding more tracepoints in this code later is simplified. Signed-off-by: Chuck Lever --- fs/nfsd/nfs4state.c | 52 +++++++---------- fs/nfsd/state.h | 7 --- fs/nfsd/trace.h | 133 ++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 152 insertions(+), 40 deletions(-) diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c index c107caa565254..04d80f9e2f919 100644 --- a/fs/nfsd/nfs4state.c +++ b/fs/nfsd/nfs4state.c @@ -51,6 +51,7 @@ #include "netns.h" #include "pnfs.h" #include "filecache.h" +#include "trace.h" #define NFSDDBG_FACILITY NFSDDBG_PROC @@ -167,9 +168,6 @@ renew_client_locked(struct nfs4_client *clp) return; } - dprintk("renewing client (clientid %08x/%08x)\n", - clp->cl_clientid.cl_boot, - clp->cl_clientid.cl_id); list_move_tail(&clp->cl_lru, &nn->client_lru); clp->cl_time = ktime_get_boottime_seconds(); } @@ -1922,8 +1920,7 @@ STALE_CLIENTID(clientid_t *clid, struct nfsd_net *nn) */ if (clid->cl_boot == (u32)nn->boot_time) return 0; - dprintk("NFSD stale clientid (%08x/%08x) boot_time %08llx\n", - clid->cl_boot, clid->cl_id, nn->boot_time); + trace_nfsd_clid_stale(clid); return 1; } @@ -3879,11 +3876,7 @@ nfsd4_setclientid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, if (clp_used_exchangeid(conf)) goto out; if (!same_creds(&conf->cl_cred, &rqstp->rq_cred)) { - char addr_str[INET6_ADDRSTRLEN]; - rpc_ntop((struct sockaddr *) &conf->cl_addr, addr_str, - sizeof(addr_str)); - dprintk("NFSD: setclientid: string in use by client " - "at %s\n", addr_str); + trace_nfsd_clid_inuse_err(conf); goto out; } } @@ -4076,7 +4069,6 @@ out_free_openowner_slab: out_free_client_slab: kmem_cache_destroy(client_slab); out: - dprintk("nfsd4: out of memory while initializing nfsv4\n"); return -ENOMEM; } @@ -4508,6 +4500,8 @@ nfsd_break_deleg_cb(struct file_lock *fl) struct nfs4_delegation *dp = (struct nfs4_delegation *)fl->fl_owner; struct nfs4_file *fp = dp->dl_stid.sc_file; + trace_nfsd_deleg_break(&dp->dl_stid.sc_stateid); + /* * We don't want the locks code to timeout the lease for us; * we'll remove it ourself if a delegation isn't returned @@ -5018,8 +5012,7 @@ nfs4_open_delegation(struct svc_fh *fh, struct nfsd4_open *open, memcpy(&open->op_delegate_stateid, &dp->dl_stid.sc_stateid, sizeof(dp->dl_stid.sc_stateid)); - dprintk("NFSD: delegation stateid=" STATEID_FMT "\n", - STATEID_VAL(&dp->dl_stid.sc_stateid)); + trace_nfsd_deleg_open(&dp->dl_stid.sc_stateid); open->op_delegate_type = NFS4_OPEN_DELEGATE_READ; nfs4_put_stid(&dp->dl_stid); return; @@ -5136,9 +5129,7 @@ nfsd4_process_open2(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nf nfs4_open_delegation(current_fh, open, stp); nodeleg: status = nfs_ok; - - dprintk("%s: stateid=" STATEID_FMT "\n", __func__, - STATEID_VAL(&stp->st_stid.sc_stateid)); + trace_nfsd_deleg_none(&stp->st_stid.sc_stateid); out: /* 4.1 client trying to upgrade/downgrade delegation? */ if (open->op_delegate_type == NFS4_OPEN_DELEGATE_NONE && dp && @@ -5192,8 +5183,7 @@ nfsd4_renew(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, __be32 status; struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id); - dprintk("process_renew(%08x/%08x): starting\n", - clid->cl_boot, clid->cl_id); + trace_nfsd_clid_renew(clid); status = lookup_clientid(clid, cstate, nn, false); if (status) goto out; @@ -5214,6 +5204,7 @@ nfsd4_end_grace(struct nfsd_net *nn) if (nn->grace_ended) return; + trace_nfsd_grace_complete(nn); nn->grace_ended = true; /* * If the server goes down again right now, an NFSv4 @@ -5279,13 +5270,10 @@ nfs4_laundromat(struct nfsd_net *nn) copy_stateid_t *cps_t; int i; - dprintk("NFSD: laundromat service - starting\n"); - if (clients_still_reclaiming(nn)) { new_timeo = 0; goto out; } - dprintk("NFSD: end of grace period\n"); nfsd4_end_grace(nn); INIT_LIST_HEAD(&reaplist); @@ -5307,8 +5295,7 @@ nfs4_laundromat(struct nfsd_net *nn) break; } if (mark_client_expired_locked(clp)) { - dprintk("NFSD: client in use (clientid %08x)\n", - clp->cl_clientid.cl_id); + trace_nfsd_clid_expired(&clp->cl_clientid); continue; } list_add(&clp->cl_lru, &reaplist); @@ -5316,8 +5303,7 @@ nfs4_laundromat(struct nfsd_net *nn) spin_unlock(&nn->client_lock); list_for_each_safe(pos, next, &reaplist) { clp = list_entry(pos, struct nfs4_client, cl_lru); - dprintk("NFSD: purging unused client (clientid %08x)\n", - clp->cl_clientid.cl_id); + trace_nfsd_clid_purged(&clp->cl_clientid); list_del_init(&clp->cl_lru); expire_client(clp); } @@ -5407,7 +5393,6 @@ laundromat_main(struct work_struct *laundry) laundromat_work); t = nfs4_laundromat(nn); - dprintk("NFSD: laundromat_main - sleeping for %lld seconds\n", t); queue_delayed_work(laundry_wq, &nn->laundromat_work, t*HZ); } @@ -5948,8 +5933,7 @@ nfs4_preprocess_seqid_op(struct nfsd4_compound_state *cstate, u32 seqid, struct nfs4_stid *s; struct nfs4_ol_stateid *stp = NULL; - dprintk("NFSD: %s: seqid=%d stateid = " STATEID_FMT "\n", __func__, - seqid, STATEID_VAL(stateid)); + trace_nfsd_preprocess(seqid, stateid); *stpp = NULL; status = nfsd4_lookup_stateid(cstate, stateid, typemask, &s, nn); @@ -6018,9 +6002,7 @@ nfsd4_open_confirm(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, oo->oo_flags |= NFS4_OO_CONFIRMED; nfs4_inc_and_copy_stateid(&oc->oc_resp_stateid, &stp->st_stid); mutex_unlock(&stp->st_mutex); - dprintk("NFSD: %s: success, seqid=%d stateid=" STATEID_FMT "\n", - __func__, oc->oc_seqid, STATEID_VAL(&stp->st_stid.sc_stateid)); - + trace_nfsd_open_confirm(oc->oc_seqid, &stp->st_stid.sc_stateid); nfsd4_client_record_create(oo->oo_owner.so_client); status = nfs_ok; put_stateid: @@ -7072,7 +7054,7 @@ nfs4_client_to_reclaim(struct xdr_netobj name, struct xdr_netobj princhash, unsigned int strhashval; struct nfs4_client_reclaim *crp; - dprintk("NFSD nfs4_client_to_reclaim NAME: %.*s\n", name.len, name.data); + trace_nfsd_clid_reclaim(nn, name.len, name.data); crp = alloc_reclaim(); if (crp) { strhashval = clientstr_hashval(name); @@ -7122,7 +7104,7 @@ nfsd4_find_reclaim_client(struct xdr_netobj name, struct nfsd_net *nn) unsigned int strhashval; struct nfs4_client_reclaim *crp = NULL; - dprintk("NFSD: nfs4_find_reclaim_client for name %.*s\n", name.len, name.data); + trace_nfsd_clid_find(nn, name.len, name.data); strhashval = clientstr_hashval(name); list_for_each_entry(crp, &nn->reclaim_str_hashtbl[strhashval], cr_strhash) { @@ -7686,6 +7668,9 @@ nfsd_recall_delegations(struct list_head *reaplist) list_for_each_entry_safe(dp, next, reaplist, dl_recall_lru) { list_del_init(&dp->dl_recall_lru); clp = dp->dl_stid.sc_client; + + trace_nfsd_deleg_recall(&dp->dl_stid.sc_stateid); + /* * We skipped all entries that had a zero dl_time before, * so we can now reset the dl_time back to 0. If a delegation @@ -7868,6 +7853,7 @@ nfs4_state_start_net(struct net *net) goto skip_grace; printk(KERN_INFO "NFSD: starting %lld-second grace period (net %x)\n", nn->nfsd4_grace, net->ns.inum); + trace_nfsd_grace_start(nn); queue_delayed_work(laundry_wq, &nn->laundromat_work, nn->nfsd4_grace * HZ); return 0; diff --git a/fs/nfsd/state.h b/fs/nfsd/state.h index 68d3f30ee760c..3b408532a5dc2 100644 --- a/fs/nfsd/state.h +++ b/fs/nfsd/state.h @@ -64,13 +64,6 @@ typedef struct { refcount_t sc_count; } copy_stateid_t; -#define STATEID_FMT "(%08x/%08x/%08x/%08x)" -#define STATEID_VAL(s) \ - (s)->si_opaque.so_clid.cl_boot, \ - (s)->si_opaque.so_clid.cl_id, \ - (s)->si_opaque.so_id, \ - (s)->si_generation - struct nfsd4_callback { struct nfs4_client *cb_clp; struct rpc_message cb_msg; diff --git a/fs/nfsd/trace.h b/fs/nfsd/trace.h index 371dc198d28ee..7237fe2f3de95 100644 --- a/fs/nfsd/trace.h +++ b/fs/nfsd/trace.h @@ -277,6 +277,7 @@ DECLARE_EVENT_CLASS(nfsd_stateid_class, DEFINE_EVENT(nfsd_stateid_class, nfsd_##name, \ TP_PROTO(stateid_t *stp), \ TP_ARGS(stp)) + DEFINE_STATEID_EVENT(layoutstate_alloc); DEFINE_STATEID_EVENT(layoutstate_unhash); DEFINE_STATEID_EVENT(layoutstate_free); @@ -288,6 +289,138 @@ DEFINE_STATEID_EVENT(layout_recall_done); DEFINE_STATEID_EVENT(layout_recall_fail); DEFINE_STATEID_EVENT(layout_recall_release); +DEFINE_STATEID_EVENT(deleg_open); +DEFINE_STATEID_EVENT(deleg_none); +DEFINE_STATEID_EVENT(deleg_break); +DEFINE_STATEID_EVENT(deleg_recall); + +DECLARE_EVENT_CLASS(nfsd_stateseqid_class, + TP_PROTO(u32 seqid, const stateid_t *stp), + TP_ARGS(seqid, stp), + TP_STRUCT__entry( + __field(u32, seqid) + __field(u32, cl_boot) + __field(u32, cl_id) + __field(u32, si_id) + __field(u32, si_generation) + ), + TP_fast_assign( + __entry->seqid = seqid; + __entry->cl_boot = stp->si_opaque.so_clid.cl_boot; + __entry->cl_id = stp->si_opaque.so_clid.cl_id; + __entry->si_id = stp->si_opaque.so_id; + __entry->si_generation = stp->si_generation; + ), + TP_printk("seqid=%u client %08x:%08x stateid %08x:%08x", + __entry->seqid, __entry->cl_boot, __entry->cl_id, + __entry->si_id, __entry->si_generation) +) + +#define DEFINE_STATESEQID_EVENT(name) \ +DEFINE_EVENT(nfsd_stateseqid_class, nfsd_##name, \ + TP_PROTO(u32 seqid, const stateid_t *stp), \ + TP_ARGS(seqid, stp)) + +DEFINE_STATESEQID_EVENT(preprocess); +DEFINE_STATESEQID_EVENT(open_confirm); + +DECLARE_EVENT_CLASS(nfsd_clientid_class, + TP_PROTO(const clientid_t *clid), + TP_ARGS(clid), + TP_STRUCT__entry( + __field(u32, cl_boot) + __field(u32, cl_id) + ), + TP_fast_assign( + __entry->cl_boot = clid->cl_boot; + __entry->cl_id = clid->cl_id; + ), + TP_printk("client %08x:%08x", __entry->cl_boot, __entry->cl_id) +) + +#define DEFINE_CLIENTID_EVENT(name) \ +DEFINE_EVENT(nfsd_clientid_class, nfsd_clid_##name, \ + TP_PROTO(const clientid_t *clid), \ + TP_ARGS(clid)) + +DEFINE_CLIENTID_EVENT(expired); +DEFINE_CLIENTID_EVENT(purged); +DEFINE_CLIENTID_EVENT(renew); +DEFINE_CLIENTID_EVENT(stale); + +DECLARE_EVENT_CLASS(nfsd_net_class, + TP_PROTO(const struct nfsd_net *nn), + TP_ARGS(nn), + TP_STRUCT__entry( + __field(unsigned long long, boot_time) + ), + TP_fast_assign( + __entry->boot_time = nn->boot_time; + ), + TP_printk("boot_time=%16llx", __entry->boot_time) +) + +#define DEFINE_NET_EVENT(name) \ +DEFINE_EVENT(nfsd_net_class, nfsd_##name, \ + TP_PROTO(const struct nfsd_net *nn), \ + TP_ARGS(nn)) + +DEFINE_NET_EVENT(grace_start); +DEFINE_NET_EVENT(grace_complete); + +DECLARE_EVENT_CLASS(nfsd_clid_class, + TP_PROTO(const struct nfsd_net *nn, + unsigned int namelen, + const unsigned char *namedata), + TP_ARGS(nn, namelen, namedata), + TP_STRUCT__entry( + __field(unsigned long long, boot_time) + __field(unsigned int, namelen) + __dynamic_array(unsigned char, name, namelen) + ), + TP_fast_assign( + __entry->boot_time = nn->boot_time; + __entry->namelen = namelen; + memcpy(__get_dynamic_array(name), namedata, namelen); + ), + TP_printk("boot_time=%16llx nfs4_clientid=%.*s", + __entry->boot_time, __entry->namelen, __get_str(name)) +) + +#define DEFINE_CLID_EVENT(name) \ +DEFINE_EVENT(nfsd_clid_class, nfsd_clid_##name, \ + TP_PROTO(const struct nfsd_net *nn, \ + unsigned int namelen, \ + const unsigned char *namedata), \ + TP_ARGS(nn, namelen, namedata)) + +DEFINE_CLID_EVENT(find); +DEFINE_CLID_EVENT(reclaim); + +TRACE_EVENT(nfsd_clid_inuse_err, + TP_PROTO(const struct nfs4_client *clp), + TP_ARGS(clp), + TP_STRUCT__entry( + __field(u32, cl_boot) + __field(u32, cl_id) + __array(unsigned char, addr, sizeof(struct sockaddr_in6)) + __field(unsigned int, namelen) + __dynamic_array(unsigned char, name, clp->cl_name.len) + ), + TP_fast_assign( + __entry->cl_boot = clp->cl_clientid.cl_boot; + __entry->cl_id = clp->cl_clientid.cl_id; + memcpy(__entry->addr, &clp->cl_addr, + sizeof(struct sockaddr_in6)); + __entry->namelen = clp->cl_name.len; + memcpy(__get_dynamic_array(name), clp->cl_name.data, + clp->cl_name.len); + ), + TP_printk("nfs4_clientid %.*s already in use by %pISpc, client %08x:%08x", + __entry->namelen, __get_str(name), __entry->addr, + __entry->cl_boot, __entry->cl_id) +) + TRACE_DEFINE_ENUM(NFSD_FILE_HASHED); TRACE_DEFINE_ENUM(NFSD_FILE_PENDING); TRACE_DEFINE_ENUM(NFSD_FILE_BREAK_READ); -- GitLab From 1eace0d1e9d0db393be87572f79f6a07405461e9 Mon Sep 17 00:00:00 2001 From: Chuck Lever Date: Sun, 5 Apr 2020 14:15:29 -0400 Subject: [PATCH 0594/1971] NFSD: Add tracepoints for monitoring NFSD callbacks Signed-off-by: Chuck Lever --- fs/nfsd/nfs4callback.c | 37 ++++++---- fs/nfsd/nfs4state.c | 6 +- fs/nfsd/trace.h | 153 +++++++++++++++++++++++++++++++++++++++++ 3 files changed, 177 insertions(+), 19 deletions(-) diff --git a/fs/nfsd/nfs4callback.c b/fs/nfsd/nfs4callback.c index 5cf91322de0fc..966ca75418c81 100644 --- a/fs/nfsd/nfs4callback.c +++ b/fs/nfsd/nfs4callback.c @@ -38,6 +38,7 @@ #include "nfsd.h" #include "state.h" #include "netns.h" +#include "trace.h" #include "xdr4cb.h" #include "xdr4.h" @@ -904,16 +905,20 @@ static int setup_callback_client(struct nfs4_client *clp, struct nfs4_cb_conn *c if (clp->cl_minorversion == 0) { if (!clp->cl_cred.cr_principal && - (clp->cl_cred.cr_flavor >= RPC_AUTH_GSS_KRB5)) + (clp->cl_cred.cr_flavor >= RPC_AUTH_GSS_KRB5)) { + trace_nfsd_cb_setup_err(clp, -EINVAL); return -EINVAL; + } args.client_name = clp->cl_cred.cr_principal; args.prognumber = conn->cb_prog; args.protocol = XPRT_TRANSPORT_TCP; args.authflavor = clp->cl_cred.cr_flavor; clp->cl_cb_ident = conn->cb_ident; } else { - if (!conn->cb_xprt) + if (!conn->cb_xprt) { + trace_nfsd_cb_setup_err(clp, -EINVAL); return -EINVAL; + } clp->cl_cb_conn.cb_xprt = conn->cb_xprt; clp->cl_cb_session = ses; args.bc_xprt = conn->cb_xprt; @@ -925,32 +930,27 @@ static int setup_callback_client(struct nfs4_client *clp, struct nfs4_cb_conn *c /* Create RPC client */ client = rpc_create(&args); if (IS_ERR(client)) { - dprintk("NFSD: couldn't create callback client: %ld\n", - PTR_ERR(client)); + trace_nfsd_cb_setup_err(clp, PTR_ERR(client)); return PTR_ERR(client); } cred = get_backchannel_cred(clp, client, ses); if (!cred) { + trace_nfsd_cb_setup_err(clp, -ENOMEM); rpc_shutdown_client(client); return -ENOMEM; } clp->cl_cb_client = client; clp->cl_cb_cred = cred; + trace_nfsd_cb_setup(clp); return 0; } -static void warn_no_callback_path(struct nfs4_client *clp, int reason) -{ - dprintk("NFSD: warning: no callback path to client %.*s: error %d\n", - (int)clp->cl_name.len, clp->cl_name.data, reason); -} - static void nfsd4_mark_cb_down(struct nfs4_client *clp, int reason) { if (test_bit(NFSD4_CLIENT_CB_UPDATE, &clp->cl_flags)) return; clp->cl_cb_state = NFSD4_CB_DOWN; - warn_no_callback_path(clp, reason); + trace_nfsd_cb_state(clp); } static void nfsd4_mark_cb_fault(struct nfs4_client *clp, int reason) @@ -958,17 +958,20 @@ static void nfsd4_mark_cb_fault(struct nfs4_client *clp, int reason) if (test_bit(NFSD4_CLIENT_CB_UPDATE, &clp->cl_flags)) return; clp->cl_cb_state = NFSD4_CB_FAULT; - warn_no_callback_path(clp, reason); + trace_nfsd_cb_state(clp); } static void nfsd4_cb_probe_done(struct rpc_task *task, void *calldata) { struct nfs4_client *clp = container_of(calldata, struct nfs4_client, cl_cb_null); + trace_nfsd_cb_done(clp, task->tk_status); if (task->tk_status) nfsd4_mark_cb_down(clp, task->tk_status); - else + else { clp->cl_cb_state = NFSD4_CB_UP; + trace_nfsd_cb_state(clp); + } } static void nfsd4_cb_probe_release(void *calldata) @@ -993,6 +996,7 @@ static const struct rpc_call_ops nfsd4_cb_probe_ops = { void nfsd4_probe_callback(struct nfs4_client *clp) { clp->cl_cb_state = NFSD4_CB_UNKNOWN; + trace_nfsd_cb_state(clp); set_bit(NFSD4_CLIENT_CB_UPDATE, &clp->cl_flags); nfsd4_run_cb(&clp->cl_cb_null); } @@ -1009,6 +1013,7 @@ void nfsd4_change_callback(struct nfs4_client *clp, struct nfs4_cb_conn *conn) spin_lock(&clp->cl_lock); memcpy(&clp->cl_cb_conn, conn, sizeof(struct nfs4_cb_conn)); spin_unlock(&clp->cl_lock); + trace_nfsd_cb_state(clp); } /* @@ -1165,8 +1170,7 @@ static void nfsd4_cb_done(struct rpc_task *task, void *calldata) struct nfsd4_callback *cb = calldata; struct nfs4_client *clp = cb->cb_clp; - dprintk("%s: minorversion=%d\n", __func__, - clp->cl_minorversion); + trace_nfsd_cb_done(clp, task->tk_status); if (!nfsd4_cb_sequence_done(task, cb)) return; @@ -1271,6 +1275,7 @@ static void nfsd4_process_cb_update(struct nfsd4_callback *cb) * kill the old client: */ if (clp->cl_cb_client) { + trace_nfsd_cb_shutdown(clp); rpc_shutdown_client(clp->cl_cb_client); clp->cl_cb_client = NULL; put_cred(clp->cl_cb_cred); @@ -1314,6 +1319,8 @@ nfsd4_run_cb_work(struct work_struct *work) struct rpc_clnt *clnt; int flags; + trace_nfsd_cb_work(clp, cb->cb_msg.rpc_proc->p_name); + if (cb->cb_need_restart) { cb->cb_need_restart = false; } else { diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c index 04d80f9e2f919..88433be551b74 100644 --- a/fs/nfsd/nfs4state.c +++ b/fs/nfsd/nfs4state.c @@ -2842,14 +2842,12 @@ gen_callback(struct nfs4_client *clp, struct nfsd4_setclientid *se, struct svc_r conn->cb_prog = se->se_callback_prog; conn->cb_ident = se->se_callback_ident; memcpy(&conn->cb_saddr, &rqstp->rq_daddr, rqstp->rq_daddrlen); + trace_nfsd_cb_args(clp, conn); return; out_err: conn->cb_addr.ss_family = AF_UNSPEC; conn->cb_addrlen = 0; - dprintk("NFSD: this client (clientid %08x/%08x) " - "will not receive delegations\n", - clp->cl_clientid.cl_boot, clp->cl_clientid.cl_id); - + trace_nfsd_cb_nodelegs(clp); return; } diff --git a/fs/nfsd/trace.h b/fs/nfsd/trace.h index 7237fe2f3de95..1861db1bdc670 100644 --- a/fs/nfsd/trace.h +++ b/fs/nfsd/trace.h @@ -624,6 +624,159 @@ TRACE_EVENT(nfsd_drc_mismatch, __entry->ingress) ); +TRACE_EVENT(nfsd_cb_args, + TP_PROTO( + const struct nfs4_client *clp, + const struct nfs4_cb_conn *conn + ), + TP_ARGS(clp, conn), + TP_STRUCT__entry( + __field(u32, cl_boot) + __field(u32, cl_id) + __field(u32, prog) + __field(u32, ident) + __array(unsigned char, addr, sizeof(struct sockaddr_in6)) + ), + TP_fast_assign( + __entry->cl_boot = clp->cl_clientid.cl_boot; + __entry->cl_id = clp->cl_clientid.cl_id; + __entry->prog = conn->cb_prog; + __entry->ident = conn->cb_ident; + memcpy(__entry->addr, &conn->cb_addr, + sizeof(struct sockaddr_in6)); + ), + TP_printk("client %08x:%08x callback addr=%pISpc prog=%u ident=%u", + __entry->cl_boot, __entry->cl_id, + __entry->addr, __entry->prog, __entry->ident) +); + +TRACE_EVENT(nfsd_cb_nodelegs, + TP_PROTO(const struct nfs4_client *clp), + TP_ARGS(clp), + TP_STRUCT__entry( + __field(u32, cl_boot) + __field(u32, cl_id) + ), + TP_fast_assign( + __entry->cl_boot = clp->cl_clientid.cl_boot; + __entry->cl_id = clp->cl_clientid.cl_id; + ), + TP_printk("client %08x:%08x", __entry->cl_boot, __entry->cl_id) +) + +TRACE_DEFINE_ENUM(NFSD4_CB_UP); +TRACE_DEFINE_ENUM(NFSD4_CB_UNKNOWN); +TRACE_DEFINE_ENUM(NFSD4_CB_DOWN); +TRACE_DEFINE_ENUM(NFSD4_CB_FAULT); + +#define show_cb_state(val) \ + __print_symbolic(val, \ + { NFSD4_CB_UP, "UP" }, \ + { NFSD4_CB_UNKNOWN, "UNKNOWN" }, \ + { NFSD4_CB_DOWN, "DOWN" }, \ + { NFSD4_CB_FAULT, "FAULT"}) + +DECLARE_EVENT_CLASS(nfsd_cb_class, + TP_PROTO(const struct nfs4_client *clp), + TP_ARGS(clp), + TP_STRUCT__entry( + __field(unsigned long, state) + __field(u32, cl_boot) + __field(u32, cl_id) + __array(unsigned char, addr, sizeof(struct sockaddr_in6)) + ), + TP_fast_assign( + __entry->state = clp->cl_cb_state; + __entry->cl_boot = clp->cl_clientid.cl_boot; + __entry->cl_id = clp->cl_clientid.cl_id; + memcpy(__entry->addr, &clp->cl_cb_conn.cb_addr, + sizeof(struct sockaddr_in6)); + ), + TP_printk("addr=%pISpc client %08x:%08x state=%s", + __entry->addr, __entry->cl_boot, __entry->cl_id, + show_cb_state(__entry->state)) +); + +#define DEFINE_NFSD_CB_EVENT(name) \ +DEFINE_EVENT(nfsd_cb_class, nfsd_cb_##name, \ + TP_PROTO(const struct nfs4_client *clp), \ + TP_ARGS(clp)) + +DEFINE_NFSD_CB_EVENT(setup); +DEFINE_NFSD_CB_EVENT(state); +DEFINE_NFSD_CB_EVENT(shutdown); + +TRACE_EVENT(nfsd_cb_setup_err, + TP_PROTO( + const struct nfs4_client *clp, + long error + ), + TP_ARGS(clp, error), + TP_STRUCT__entry( + __field(long, error) + __field(u32, cl_boot) + __field(u32, cl_id) + __array(unsigned char, addr, sizeof(struct sockaddr_in6)) + ), + TP_fast_assign( + __entry->error = error; + __entry->cl_boot = clp->cl_clientid.cl_boot; + __entry->cl_id = clp->cl_clientid.cl_id; + memcpy(__entry->addr, &clp->cl_cb_conn.cb_addr, + sizeof(struct sockaddr_in6)); + ), + TP_printk("addr=%pISpc client %08x:%08x error=%ld", + __entry->addr, __entry->cl_boot, __entry->cl_id, __entry->error) +); + +TRACE_EVENT(nfsd_cb_work, + TP_PROTO( + const struct nfs4_client *clp, + const char *procedure + ), + TP_ARGS(clp, procedure), + TP_STRUCT__entry( + __field(u32, cl_boot) + __field(u32, cl_id) + __string(procedure, procedure) + __array(unsigned char, addr, sizeof(struct sockaddr_in6)) + ), + TP_fast_assign( + __entry->cl_boot = clp->cl_clientid.cl_boot; + __entry->cl_id = clp->cl_clientid.cl_id; + __assign_str(procedure, procedure) + memcpy(__entry->addr, &clp->cl_cb_conn.cb_addr, + sizeof(struct sockaddr_in6)); + ), + TP_printk("addr=%pISpc client %08x:%08x procedure=%s", + __entry->addr, __entry->cl_boot, __entry->cl_id, + __get_str(procedure)) +); + +TRACE_EVENT(nfsd_cb_done, + TP_PROTO( + const struct nfs4_client *clp, + int status + ), + TP_ARGS(clp, status), + TP_STRUCT__entry( + __field(u32, cl_boot) + __field(u32, cl_id) + __field(int, status) + __array(unsigned char, addr, sizeof(struct sockaddr_in6)) + ), + TP_fast_assign( + __entry->cl_boot = clp->cl_clientid.cl_boot; + __entry->cl_id = clp->cl_clientid.cl_id; + __entry->status = status; + memcpy(__entry->addr, &clp->cl_cb_conn.cb_addr, + sizeof(struct sockaddr_in6)); + ), + TP_printk("addr=%pISpc client %08x:%08x status=%d", + __entry->addr, __entry->cl_boot, __entry->cl_id, + __entry->status) +); + #endif /* _NFSD_TRACE_H */ #undef TRACE_INCLUDE_PATH -- GitLab From 8954c5c212d321404a7aaf5b48f04a49655c928d Mon Sep 17 00:00:00 2001 From: Chuck Lever Date: Wed, 15 Apr 2020 09:05:26 -0400 Subject: [PATCH 0595/1971] SUNRPC: Clean up request deferral tracepoints - Rename these so they are easy to enable and search for as a set - Move the tracepoints to get a more accurate sense of control flow - Tracepoints should not fire on xprt shutdown - Display memory address in case data structure had been corrupted - Abandon dprintk in these paths I haven't ever gotten one of these tracepoints to trigger. I wonder if we should simply remove them. Signed-off-by: Chuck Lever --- include/trace/events/sunrpc.h | 11 ++++++++--- net/sunrpc/svc_xprt.c | 12 ++++++------ 2 files changed, 14 insertions(+), 9 deletions(-) diff --git a/include/trace/events/sunrpc.h b/include/trace/events/sunrpc.h index 2a7f6f83341fb..852413cbb7d90 100644 --- a/include/trace/events/sunrpc.h +++ b/include/trace/events/sunrpc.h @@ -1406,27 +1406,32 @@ DECLARE_EVENT_CLASS(svc_deferred_event, TP_ARGS(dr), TP_STRUCT__entry( + __field(const void *, dr) __field(u32, xid) __string(addr, dr->xprt->xpt_remotebuf) ), TP_fast_assign( + __entry->dr = dr; __entry->xid = be32_to_cpu(*(__be32 *)(dr->args + (dr->xprt_hlen>>2))); __assign_str(addr, dr->xprt->xpt_remotebuf); ), - TP_printk("addr=%s xid=0x%08x", __get_str(addr), __entry->xid) + TP_printk("addr=%s dr=%p xid=0x%08x", __get_str(addr), __entry->dr, + __entry->xid) ); + #define DEFINE_SVC_DEFERRED_EVENT(name) \ - DEFINE_EVENT(svc_deferred_event, svc_##name##_deferred, \ + DEFINE_EVENT(svc_deferred_event, svc_defer_##name, \ TP_PROTO( \ const struct svc_deferred_req *dr \ ), \ TP_ARGS(dr)) DEFINE_SVC_DEFERRED_EVENT(drop); -DEFINE_SVC_DEFERRED_EVENT(revisit); +DEFINE_SVC_DEFERRED_EVENT(queue); +DEFINE_SVC_DEFERRED_EVENT(recv); TRACE_EVENT(svcsock_new_socket, TP_PROTO( diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c index 0a546ef02ddea..c1ff8cdb5b2bb 100644 --- a/net/sunrpc/svc_xprt.c +++ b/net/sunrpc/svc_xprt.c @@ -1145,16 +1145,15 @@ static void svc_revisit(struct cache_deferred_req *dreq, int too_many) set_bit(XPT_DEFERRED, &xprt->xpt_flags); if (too_many || test_bit(XPT_DEAD, &xprt->xpt_flags)) { spin_unlock(&xprt->xpt_lock); - dprintk("revisit canceled\n"); + trace_svc_defer_drop(dr); svc_xprt_put(xprt); - trace_svc_drop_deferred(dr); kfree(dr); return; } - dprintk("revisit queued\n"); dr->xprt = NULL; list_add(&dr->handle.recent, &xprt->xpt_deferred); spin_unlock(&xprt->xpt_lock); + trace_svc_defer_queue(dr); svc_xprt_enqueue(xprt); svc_xprt_put(xprt); } @@ -1200,22 +1199,24 @@ static struct cache_deferred_req *svc_defer(struct cache_req *req) memcpy(dr->args, rqstp->rq_arg.head[0].iov_base - skip, dr->argslen << 2); } + trace_svc_defer(rqstp); svc_xprt_get(rqstp->rq_xprt); dr->xprt = rqstp->rq_xprt; set_bit(RQ_DROPME, &rqstp->rq_flags); dr->handle.revisit = svc_revisit; - trace_svc_defer(rqstp); return &dr->handle; } /* * recv data from a deferred request into an active one */ -static int svc_deferred_recv(struct svc_rqst *rqstp) +static noinline int svc_deferred_recv(struct svc_rqst *rqstp) { struct svc_deferred_req *dr = rqstp->rq_deferred; + trace_svc_defer_recv(dr); + /* setup iov_base past transport header */ rqstp->rq_arg.head[0].iov_base = dr->args + (dr->xprt_hlen>>2); /* The iov_len does not include the transport header bytes */ @@ -1246,7 +1247,6 @@ static struct svc_deferred_req *svc_deferred_dequeue(struct svc_xprt *xprt) struct svc_deferred_req, handle.recent); list_del_init(&dr->handle.recent); - trace_svc_revisit_deferred(dr); } else clear_bit(XPT_DEFERRED, &xprt->xpt_flags); spin_unlock(&xprt->xpt_lock); -- GitLab From 45f56da8224e6bc472320b154ca8821e50fc1819 Mon Sep 17 00:00:00 2001 From: Chuck Lever Date: Sun, 5 Apr 2020 14:40:25 -0400 Subject: [PATCH 0596/1971] NFSD: Squash an annoying compiler warning MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Clean up: Fix gcc empty-body warning when -Wextra is used. ../fs/nfsd/nfs4state.c:3898:3: warning: suggest braces around empty body in an ‘else’ statement [-Wempty-body] Signed-off-by: Chuck Lever --- fs/nfsd/nfs4state.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c index 88433be551b74..8b83341cae6b9 100644 --- a/fs/nfsd/nfs4state.c +++ b/fs/nfsd/nfs4state.c @@ -3881,12 +3881,11 @@ nfsd4_setclientid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, unconf = find_unconfirmed_client_by_name(&clname, nn); if (unconf) unhash_client_locked(unconf); + /* We need to handle only case 1: probable callback update */ if (conf && same_verf(&conf->cl_verifier, &clverifier)) { - /* case 1: probable callback update */ copy_clid(new, conf); gen_confirm(new, nn); - } else /* case 4 (new client) or cases 2, 3 (client reboot): */ - ; + } new->cl_minorversion = 0; gen_callback(new, setclid, rqstp); add_to_unconfirmed(new); -- GitLab From f2453978a4f2ddb1938fa80e9bf0c9d6252bd5f8 Mon Sep 17 00:00:00 2001 From: Chuck Lever Date: Mon, 6 Apr 2020 16:01:42 -0400 Subject: [PATCH 0597/1971] NFSD: Fix improperly-formatted Doxygen comments fs/nfsd/nfsctl.c:256: warning: Function parameter or member 'file' not described in 'write_unlock_ip' fs/nfsd/nfsctl.c:256: warning: Function parameter or member 'buf' not described in 'write_unlock_ip' fs/nfsd/nfsctl.c:256: warning: Function parameter or member 'size' not described in 'write_unlock_ip' fs/nfsd/nfsctl.c:295: warning: Function parameter or member 'file' not described in 'write_unlock_fs' fs/nfsd/nfsctl.c:295: warning: Function parameter or member 'buf' not described in 'write_unlock_fs' fs/nfsd/nfsctl.c:295: warning: Function parameter or member 'size' not described in 'write_unlock_fs' fs/nfsd/nfsctl.c:352: warning: Function parameter or member 'file' not described in 'write_filehandle' fs/nfsd/nfsctl.c:352: warning: Function parameter or member 'buf' not described in 'write_filehandle' fs/nfsd/nfsctl.c:352: warning: Function parameter or member 'size' not described in 'write_filehandle' fs/nfsd/nfsctl.c:434: warning: Function parameter or member 'file' not described in 'write_threads' fs/nfsd/nfsctl.c:434: warning: Function parameter or member 'buf' not described in 'write_threads' fs/nfsd/nfsctl.c:434: warning: Function parameter or member 'size' not described in 'write_threads' fs/nfsd/nfsctl.c:478: warning: Function parameter or member 'file' not described in 'write_pool_threads' fs/nfsd/nfsctl.c:478: warning: Function parameter or member 'buf' not described in 'write_pool_threads' fs/nfsd/nfsctl.c:478: warning: Function parameter or member 'size' not described in 'write_pool_threads' fs/nfsd/nfsctl.c:697: warning: Function parameter or member 'file' not described in 'write_versions' fs/nfsd/nfsctl.c:697: warning: Function parameter or member 'buf' not described in 'write_versions' fs/nfsd/nfsctl.c:697: warning: Function parameter or member 'size' not described in 'write_versions' fs/nfsd/nfsctl.c:858: warning: Function parameter or member 'file' not described in 'write_ports' fs/nfsd/nfsctl.c:858: warning: Function parameter or member 'buf' not described in 'write_ports' fs/nfsd/nfsctl.c:858: warning: Function parameter or member 'size' not described in 'write_ports' fs/nfsd/nfsctl.c:892: warning: Function parameter or member 'file' not described in 'write_maxblksize' fs/nfsd/nfsctl.c:892: warning: Function parameter or member 'buf' not described in 'write_maxblksize' fs/nfsd/nfsctl.c:892: warning: Function parameter or member 'size' not described in 'write_maxblksize' fs/nfsd/nfsctl.c:941: warning: Function parameter or member 'file' not described in 'write_maxconn' fs/nfsd/nfsctl.c:941: warning: Function parameter or member 'buf' not described in 'write_maxconn' fs/nfsd/nfsctl.c:941: warning: Function parameter or member 'size' not described in 'write_maxconn' fs/nfsd/nfsctl.c:1023: warning: Function parameter or member 'file' not described in 'write_leasetime' fs/nfsd/nfsctl.c:1023: warning: Function parameter or member 'buf' not described in 'write_leasetime' fs/nfsd/nfsctl.c:1023: warning: Function parameter or member 'size' not described in 'write_leasetime' fs/nfsd/nfsctl.c:1039: warning: Function parameter or member 'file' not described in 'write_gracetime' fs/nfsd/nfsctl.c:1039: warning: Function parameter or member 'buf' not described in 'write_gracetime' fs/nfsd/nfsctl.c:1039: warning: Function parameter or member 'size' not described in 'write_gracetime' fs/nfsd/nfsctl.c:1094: warning: Function parameter or member 'file' not described in 'write_recoverydir' fs/nfsd/nfsctl.c:1094: warning: Function parameter or member 'buf' not described in 'write_recoverydir' fs/nfsd/nfsctl.c:1094: warning: Function parameter or member 'size' not described in 'write_recoverydir' fs/nfsd/nfsctl.c:1125: warning: Function parameter or member 'file' not described in 'write_v4_end_grace' fs/nfsd/nfsctl.c:1125: warning: Function parameter or member 'buf' not described in 'write_v4_end_grace' fs/nfsd/nfsctl.c:1125: warning: Function parameter or member 'size' not described in 'write_v4_end_grace' fs/nfsd/nfs4proc.c:1164: warning: Function parameter or member 'nss' not described in 'nfsd4_interssc_connect' fs/nfsd/nfs4proc.c:1164: warning: Function parameter or member 'rqstp' not described in 'nfsd4_interssc_connect' fs/nfsd/nfs4proc.c:1164: warning: Function parameter or member 'mount' not described in 'nfsd4_interssc_connect' fs/nfsd/nfs4proc.c:1262: warning: Function parameter or member 'rqstp' not described in 'nfsd4_setup_inter_ssc' fs/nfsd/nfs4proc.c:1262: warning: Function parameter or member 'cstate' not described in 'nfsd4_setup_inter_ssc' fs/nfsd/nfs4proc.c:1262: warning: Function parameter or member 'copy' not described in 'nfsd4_setup_inter_ssc' fs/nfsd/nfs4proc.c:1262: warning: Function parameter or member 'mount' not described in 'nfsd4_setup_inter_ssc' Signed-off-by: Chuck Lever --- fs/nfsd/nfs4proc.c | 7 +++---- fs/nfsd/nfsctl.c | 26 +++++++++++++------------- 2 files changed, 16 insertions(+), 17 deletions(-) diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c index 0e75f7fb5fec0..ec1b28d08c1c7 100644 --- a/fs/nfsd/nfs4proc.c +++ b/fs/nfsd/nfs4proc.c @@ -1155,7 +1155,7 @@ extern void nfs_sb_deactive(struct super_block *sb); #define NFSD42_INTERSSC_MOUNTOPS "vers=4.2,addr=%s,sec=sys" -/** +/* * Support one copy source server for now. */ static __be32 @@ -1245,10 +1245,9 @@ nfsd4_interssc_disconnect(struct vfsmount *ss_mnt) mntput(ss_mnt); } -/** - * nfsd4_setup_inter_ssc - * +/* * Verify COPY destination stateid. + * * Connect to the source server with NFSv4.1. * Create the source struct file for nfsd_copy_range. * Called with COPY cstate: diff --git a/fs/nfsd/nfsctl.c b/fs/nfsd/nfsctl.c index 3bb2db947d291..b48eac3bb72b9 100644 --- a/fs/nfsd/nfsctl.c +++ b/fs/nfsd/nfsctl.c @@ -238,7 +238,7 @@ static inline struct net *netns(struct file *file) return file_inode(file)->i_sb->s_fs_info; } -/** +/* * write_unlock_ip - Release all locks used by a client * * Experimental. @@ -277,7 +277,7 @@ static ssize_t write_unlock_ip(struct file *file, char *buf, size_t size) return nlmsvc_unlock_all_by_ip(sap); } -/** +/* * write_unlock_fs - Release all locks on a local file system * * Experimental. @@ -327,7 +327,7 @@ static ssize_t write_unlock_fs(struct file *file, char *buf, size_t size) return error; } -/** +/* * write_filehandle - Get a variable-length NFS file handle by path * * On input, the buffer contains a '\n'-terminated C string comprised of @@ -402,7 +402,7 @@ static ssize_t write_filehandle(struct file *file, char *buf, size_t size) return mesg - buf; } -/** +/* * write_threads - Start NFSD, or report the current number of running threads * * Input: @@ -452,7 +452,7 @@ static ssize_t write_threads(struct file *file, char *buf, size_t size) return scnprintf(buf, SIMPLE_TRANSACTION_LIMIT, "%d\n", rv); } -/** +/* * write_pool_threads - Set or report the current number of threads per pool * * Input: @@ -661,7 +661,7 @@ out: return tlen + len; } -/** +/* * write_versions - Set or report the available NFS protocol versions * * Input: @@ -811,7 +811,7 @@ static ssize_t __write_ports(struct file *file, char *buf, size_t size, return -EINVAL; } -/** +/* * write_ports - Pass a socket file descriptor or transport name to listen on * * Input: @@ -867,7 +867,7 @@ static ssize_t write_ports(struct file *file, char *buf, size_t size) int nfsd_max_blksize; -/** +/* * write_maxblksize - Set or report the current NFS blksize * * Input: @@ -917,7 +917,7 @@ static ssize_t write_maxblksize(struct file *file, char *buf, size_t size) nfsd_max_blksize); } -/** +/* * write_maxconn - Set or report the current max number of connections * * Input: @@ -998,7 +998,7 @@ static ssize_t nfsd4_write_time(struct file *file, char *buf, size_t size, return rv; } -/** +/* * write_leasetime - Set or report the current NFSv4 lease time * * Input: @@ -1025,7 +1025,7 @@ static ssize_t write_leasetime(struct file *file, char *buf, size_t size) return nfsd4_write_time(file, buf, size, &nn->nfsd4_lease, nn); } -/** +/* * write_gracetime - Set or report current NFSv4 grace period time * * As above, but sets the time of the NFSv4 grace period. @@ -1069,7 +1069,7 @@ static ssize_t __write_recoverydir(struct file *file, char *buf, size_t size, nfs4_recoverydir()); } -/** +/* * write_recoverydir - Set or report the pathname of the recovery directory * * Input: @@ -1101,7 +1101,7 @@ static ssize_t write_recoverydir(struct file *file, char *buf, size_t size) return rv; } -/** +/* * write_v4_end_grace - release grace period for nfsd's v4.x lock manager * * Input: -- GitLab From f90b68d6c8b008549a4538f608575ff3f9ed2905 Mon Sep 17 00:00:00 2001 From: Peng Fan Date: Thu, 7 May 2020 13:56:16 +0800 Subject: [PATCH 0598/1971] clk: imx: add mux ops for i.MX8M composite clk The CORE/BUS root slice has following design, simplied graph: The difference is core not have pre_div block. A composite core/bus clk has 8 inputs for mux to select, saying clk[0-7]. It support target(smart) interface and normal interface. Target interface is exported for programmer easy to configure ccm root. Normal interface is also exported, but we not use it in our driver, because it will introduce more complexity compared with target interface. The normal interface simplified as below: SEL_A GA +--+ +-+ | +->+ +------+ CLK[0-7]--->+ | +-+ | | | | +----v---+ +----+ | +--+ |pre_diva+----> | +---------+ | +--------+ |mux +--+post_div | | +--+ |pre_divb+--->+ | +---------+ | | | +----^---+ +----+ +--->+ | +-+ | | +->+ +------+ +--+ +-+ SEL_B GB The mux in the upper pic is not the target interface MUX, target interface MUX is hiding SEL_A and SEL_B. When you choose clk[0-7], you are actually writing SEL_A or SEL_B depends on the internal counter which will also control the internal "mux". The target interface simplified as below which is used by Linux Kernel: CLK[0-7]--->MUX-->Gate-->pre_div-->post_div A requirement of the Target Interface's software is that the target clock source is active, it means when setting SEL_A, the current input clk to SEL_A must be active, same to SEL_B. We touch target interface, but hardware logic actually also need configure normal interface. There will be system hang, when doing the following steps: The initial state: SEL_A/SEL_B are both sourcing from clk0, the internal counter choose SEL_A. 1. switch mux from clk0 to clk1 The hardware logic will choose SEL_B and configure SEL_B to clk1. SEL_A no changed. 2. gate off clk0 Disable clk0, then the input to SEL_A is off. 3. swtich from clk1 to clk2 The hardware logic will choose SEL_A and configure SEL_A to clk2, however the current SEL_A input clk0 is off, the system hang. The solution to fix the issue is in step 1, write twice to target interface MUX, it will make SEL_A/SEL_B both sources from clk1, then no need to care about the state of clk0. And finally system performs well. Signed-off-by: Peng Fan Reviewed-by: Dong Aisheng Signed-off-by: Shawn Guo --- drivers/clk/imx/clk-composite-8m.c | 51 +++++++++++++++++++++++++++++- 1 file changed, 50 insertions(+), 1 deletion(-) diff --git a/drivers/clk/imx/clk-composite-8m.c b/drivers/clk/imx/clk-composite-8m.c index 99773519b5a50..2d9562ebddc3f 100644 --- a/drivers/clk/imx/clk-composite-8m.c +++ b/drivers/clk/imx/clk-composite-8m.c @@ -124,6 +124,52 @@ static const struct clk_ops imx8m_clk_composite_divider_ops = { .set_rate = imx8m_clk_composite_divider_set_rate, }; +static u8 imx8m_clk_composite_mux_get_parent(struct clk_hw *hw) +{ + return clk_mux_ops.get_parent(hw); +} + +static int imx8m_clk_composite_mux_set_parent(struct clk_hw *hw, u8 index) +{ + struct clk_mux *mux = to_clk_mux(hw); + u32 val = clk_mux_index_to_val(mux->table, mux->flags, index); + unsigned long flags = 0; + u32 reg; + + if (mux->lock) + spin_lock_irqsave(mux->lock, flags); + + reg = readl(mux->reg); + reg &= ~(mux->mask << mux->shift); + val = val << mux->shift; + reg |= val; + /* + * write twice to make sure non-target interface + * SEL_A/B point the same clk input. + */ + writel(reg, mux->reg); + writel(reg, mux->reg); + + if (mux->lock) + spin_unlock_irqrestore(mux->lock, flags); + + return 0; +} + +static int +imx8m_clk_composite_mux_determine_rate(struct clk_hw *hw, + struct clk_rate_request *req) +{ + return clk_mux_ops.determine_rate(hw, req); +} + + +static const struct clk_ops imx8m_clk_composite_mux_ops = { + .get_parent = imx8m_clk_composite_mux_get_parent, + .set_parent = imx8m_clk_composite_mux_set_parent, + .determine_rate = imx8m_clk_composite_mux_determine_rate, +}; + struct clk_hw *imx8m_clk_hw_composite_flags(const char *name, const char * const *parent_names, int num_parents, void __iomem *reg, @@ -136,6 +182,7 @@ struct clk_hw *imx8m_clk_hw_composite_flags(const char *name, struct clk_gate *gate = NULL; struct clk_mux *mux = NULL; const struct clk_ops *divider_ops; + const struct clk_ops *mux_ops; mux = kzalloc(sizeof(*mux), GFP_KERNEL); if (!mux) @@ -157,10 +204,12 @@ struct clk_hw *imx8m_clk_hw_composite_flags(const char *name, div->shift = PCG_DIV_SHIFT; div->width = PCG_CORE_DIV_WIDTH; divider_ops = &clk_divider_ops; + mux_ops = &imx8m_clk_composite_mux_ops; } else { div->shift = PCG_PREDIV_SHIFT; div->width = PCG_PREDIV_WIDTH; divider_ops = &imx8m_clk_composite_divider_ops; + mux_ops = &clk_mux_ops; } div->lock = &imx_ccm_lock; @@ -176,7 +225,7 @@ struct clk_hw *imx8m_clk_hw_composite_flags(const char *name, gate->lock = &imx_ccm_lock; hw = clk_hw_register_composite(NULL, name, parent_names, num_parents, - mux_hw, &clk_mux_ops, div_hw, + mux_hw, mux_ops, div_hw, divider_ops, gate_hw, &clk_gate_ops, flags); if (IS_ERR(hw)) goto fail; -- GitLab From 0e40198dc28b620ead39de6e42db291418cd1183 Mon Sep 17 00:00:00 2001 From: Peng Fan Date: Thu, 7 May 2020 13:56:17 +0800 Subject: [PATCH 0599/1971] clk: imx: add imx8m_clk_hw_composite_bus Introduce imx8m_clk_hw_composite_bus api for bus clk root slice usage. Because the mux switch sequence issue, we could not reuse Peripheral Clock Slice code, need use composite specific mux operation. Signed-off-by: Peng Fan Reviewed-by: Dong Aisheng Signed-off-by: Shawn Guo --- drivers/clk/imx/clk-composite-8m.c | 5 +++++ drivers/clk/imx/clk.h | 7 +++++++ 2 files changed, 12 insertions(+) diff --git a/drivers/clk/imx/clk-composite-8m.c b/drivers/clk/imx/clk-composite-8m.c index 2d9562ebddc3f..d2b5af826f2ca 100644 --- a/drivers/clk/imx/clk-composite-8m.c +++ b/drivers/clk/imx/clk-composite-8m.c @@ -205,6 +205,11 @@ struct clk_hw *imx8m_clk_hw_composite_flags(const char *name, div->width = PCG_CORE_DIV_WIDTH; divider_ops = &clk_divider_ops; mux_ops = &imx8m_clk_composite_mux_ops; + } else if (composite_flags & IMX_COMPOSITE_BUS) { + div->shift = PCG_PREDIV_SHIFT; + div->width = PCG_PREDIV_WIDTH; + divider_ops = &imx8m_clk_composite_divider_ops; + mux_ops = &imx8m_clk_composite_mux_ops; } else { div->shift = PCG_PREDIV_SHIFT; div->width = PCG_PREDIV_WIDTH; diff --git a/drivers/clk/imx/clk.h b/drivers/clk/imx/clk.h index b91b1b18a4a21..16adbc34e05fa 100644 --- a/drivers/clk/imx/clk.h +++ b/drivers/clk/imx/clk.h @@ -527,6 +527,7 @@ struct clk_hw *imx_clk_hw_cpu(const char *name, const char *parent_name, struct clk *step); #define IMX_COMPOSITE_CORE BIT(0) +#define IMX_COMPOSITE_BUS BIT(1) struct clk_hw *imx8m_clk_hw_composite_flags(const char *name, const char * const *parent_names, @@ -535,6 +536,12 @@ struct clk_hw *imx8m_clk_hw_composite_flags(const char *name, u32 composite_flags, unsigned long flags); +#define imx8m_clk_hw_composite_bus(name, parent_names, reg) \ + imx8m_clk_hw_composite_flags(name, parent_names, \ + ARRAY_SIZE(parent_names), reg, \ + IMX_COMPOSITE_BUS, \ + CLK_SET_RATE_NO_REPARENT | CLK_OPS_PARENT_ENABLE) + #define imx8m_clk_hw_composite_core(name, parent_names, reg) \ imx8m_clk_hw_composite_flags(name, parent_names, \ ARRAY_SIZE(parent_names), reg, \ -- GitLab From b1657ad708f761f9ca6d166d4dda685ca39b1254 Mon Sep 17 00:00:00 2001 From: Peng Fan Date: Thu, 7 May 2020 13:56:18 +0800 Subject: [PATCH 0600/1971] clk: imx: use imx8m_clk_hw_composite_bus for i.MX8M bus clk slice Switch the bus clk use imx8m_clk_hw_composite_bus, then we could avoid possible issue when setting mux of the clk. Signed-off-by: Peng Fan Reviewed-by: Dong Aisheng Signed-off-by: Shawn Guo --- drivers/clk/imx/clk-imx8mm.c | 18 +++++++++--------- drivers/clk/imx/clk-imx8mn.c | 16 ++++++++-------- drivers/clk/imx/clk-imx8mp.c | 24 ++++++++++++------------ drivers/clk/imx/clk-imx8mq.c | 20 ++++++++++---------- 4 files changed, 39 insertions(+), 39 deletions(-) diff --git a/drivers/clk/imx/clk-imx8mm.c b/drivers/clk/imx/clk-imx8mm.c index 12443e06f3292..b793264c21c6c 100644 --- a/drivers/clk/imx/clk-imx8mm.c +++ b/drivers/clk/imx/clk-imx8mm.c @@ -444,21 +444,21 @@ static int imx8mm_clocks_probe(struct platform_device *pdev) /* BUS */ hws[IMX8MM_CLK_MAIN_AXI] = imx8m_clk_hw_composite_critical("main_axi", imx8mm_main_axi_sels, base + 0x8800); - hws[IMX8MM_CLK_ENET_AXI] = imx8m_clk_hw_composite("enet_axi", imx8mm_enet_axi_sels, base + 0x8880); + hws[IMX8MM_CLK_ENET_AXI] = imx8m_clk_hw_composite_bus("enet_axi", imx8mm_enet_axi_sels, base + 0x8880); hws[IMX8MM_CLK_NAND_USDHC_BUS] = imx8m_clk_hw_composite_critical("nand_usdhc_bus", imx8mm_nand_usdhc_sels, base + 0x8900); - hws[IMX8MM_CLK_VPU_BUS] = imx8m_clk_hw_composite("vpu_bus", imx8mm_vpu_bus_sels, base + 0x8980); - hws[IMX8MM_CLK_DISP_AXI] = imx8m_clk_hw_composite("disp_axi", imx8mm_disp_axi_sels, base + 0x8a00); - hws[IMX8MM_CLK_DISP_APB] = imx8m_clk_hw_composite("disp_apb", imx8mm_disp_apb_sels, base + 0x8a80); - hws[IMX8MM_CLK_DISP_RTRM] = imx8m_clk_hw_composite("disp_rtrm", imx8mm_disp_rtrm_sels, base + 0x8b00); - hws[IMX8MM_CLK_USB_BUS] = imx8m_clk_hw_composite("usb_bus", imx8mm_usb_bus_sels, base + 0x8b80); - hws[IMX8MM_CLK_GPU_AXI] = imx8m_clk_hw_composite("gpu_axi", imx8mm_gpu_axi_sels, base + 0x8c00); - hws[IMX8MM_CLK_GPU_AHB] = imx8m_clk_hw_composite("gpu_ahb", imx8mm_gpu_ahb_sels, base + 0x8c80); + hws[IMX8MM_CLK_VPU_BUS] = imx8m_clk_hw_composite_bus("vpu_bus", imx8mm_vpu_bus_sels, base + 0x8980); + hws[IMX8MM_CLK_DISP_AXI] = imx8m_clk_hw_composite_bus("disp_axi", imx8mm_disp_axi_sels, base + 0x8a00); + hws[IMX8MM_CLK_DISP_APB] = imx8m_clk_hw_composite_bus("disp_apb", imx8mm_disp_apb_sels, base + 0x8a80); + hws[IMX8MM_CLK_DISP_RTRM] = imx8m_clk_hw_composite_bus("disp_rtrm", imx8mm_disp_rtrm_sels, base + 0x8b00); + hws[IMX8MM_CLK_USB_BUS] = imx8m_clk_hw_composite_bus("usb_bus", imx8mm_usb_bus_sels, base + 0x8b80); + hws[IMX8MM_CLK_GPU_AXI] = imx8m_clk_hw_composite_bus("gpu_axi", imx8mm_gpu_axi_sels, base + 0x8c00); + hws[IMX8MM_CLK_GPU_AHB] = imx8m_clk_hw_composite_bus("gpu_ahb", imx8mm_gpu_ahb_sels, base + 0x8c80); hws[IMX8MM_CLK_NOC] = imx8m_clk_hw_composite_critical("noc", imx8mm_noc_sels, base + 0x8d00); hws[IMX8MM_CLK_NOC_APB] = imx8m_clk_hw_composite_critical("noc_apb", imx8mm_noc_apb_sels, base + 0x8d80); /* AHB */ hws[IMX8MM_CLK_AHB] = imx8m_clk_hw_composite_critical("ahb", imx8mm_ahb_sels, base + 0x9000); - hws[IMX8MM_CLK_AUDIO_AHB] = imx8m_clk_hw_composite("audio_ahb", imx8mm_audio_ahb_sels, base + 0x9100); + hws[IMX8MM_CLK_AUDIO_AHB] = imx8m_clk_hw_composite_bus("audio_ahb", imx8mm_audio_ahb_sels, base + 0x9100); /* IPG */ hws[IMX8MM_CLK_IPG_ROOT] = imx_clk_hw_divider2("ipg_root", "ahb", base + 0x9080, 0, 1); diff --git a/drivers/clk/imx/clk-imx8mn.c b/drivers/clk/imx/clk-imx8mn.c index bd3759b4afd06..213cc37b31730 100644 --- a/drivers/clk/imx/clk-imx8mn.c +++ b/drivers/clk/imx/clk-imx8mn.c @@ -432,17 +432,17 @@ static int imx8mn_clocks_probe(struct platform_device *pdev) /* BUS */ hws[IMX8MN_CLK_MAIN_AXI] = imx8m_clk_hw_composite_critical("main_axi", imx8mn_main_axi_sels, base + 0x8800); - hws[IMX8MN_CLK_ENET_AXI] = imx8m_clk_hw_composite("enet_axi", imx8mn_enet_axi_sels, base + 0x8880); - hws[IMX8MN_CLK_NAND_USDHC_BUS] = imx8m_clk_hw_composite("nand_usdhc_bus", imx8mn_nand_usdhc_sels, base + 0x8900); - hws[IMX8MN_CLK_DISP_AXI] = imx8m_clk_hw_composite("disp_axi", imx8mn_disp_axi_sels, base + 0x8a00); - hws[IMX8MN_CLK_DISP_APB] = imx8m_clk_hw_composite("disp_apb", imx8mn_disp_apb_sels, base + 0x8a80); - hws[IMX8MN_CLK_USB_BUS] = imx8m_clk_hw_composite("usb_bus", imx8mn_usb_bus_sels, base + 0x8b80); - hws[IMX8MN_CLK_GPU_AXI] = imx8m_clk_hw_composite("gpu_axi", imx8mn_gpu_axi_sels, base + 0x8c00); - hws[IMX8MN_CLK_GPU_AHB] = imx8m_clk_hw_composite("gpu_ahb", imx8mn_gpu_ahb_sels, base + 0x8c80); + hws[IMX8MN_CLK_ENET_AXI] = imx8m_clk_hw_composite_bus("enet_axi", imx8mn_enet_axi_sels, base + 0x8880); + hws[IMX8MN_CLK_NAND_USDHC_BUS] = imx8m_clk_hw_composite_bus("nand_usdhc_bus", imx8mn_nand_usdhc_sels, base + 0x8900); + hws[IMX8MN_CLK_DISP_AXI] = imx8m_clk_hw_composite_bus("disp_axi", imx8mn_disp_axi_sels, base + 0x8a00); + hws[IMX8MN_CLK_DISP_APB] = imx8m_clk_hw_composite_bus("disp_apb", imx8mn_disp_apb_sels, base + 0x8a80); + hws[IMX8MN_CLK_USB_BUS] = imx8m_clk_hw_composite_bus("usb_bus", imx8mn_usb_bus_sels, base + 0x8b80); + hws[IMX8MN_CLK_GPU_AXI] = imx8m_clk_hw_composite_bus("gpu_axi", imx8mn_gpu_axi_sels, base + 0x8c00); + hws[IMX8MN_CLK_GPU_AHB] = imx8m_clk_hw_composite_bus("gpu_ahb", imx8mn_gpu_ahb_sels, base + 0x8c80); hws[IMX8MN_CLK_NOC] = imx8m_clk_hw_composite_critical("noc", imx8mn_noc_sels, base + 0x8d00); hws[IMX8MN_CLK_AHB] = imx8m_clk_hw_composite_critical("ahb", imx8mn_ahb_sels, base + 0x9000); - hws[IMX8MN_CLK_AUDIO_AHB] = imx8m_clk_hw_composite("audio_ahb", imx8mn_audio_ahb_sels, base + 0x9100); + hws[IMX8MN_CLK_AUDIO_AHB] = imx8m_clk_hw_composite_bus("audio_ahb", imx8mn_audio_ahb_sels, base + 0x9100); hws[IMX8MN_CLK_IPG_ROOT] = imx_clk_hw_divider2("ipg_root", "ahb", base + 0x9080, 0, 1); hws[IMX8MN_CLK_IPG_AUDIO_ROOT] = imx_clk_hw_divider2("ipg_audio_root", "audio_ahb", base + 0x9180, 0, 1); hws[IMX8MN_CLK_DRAM_CORE] = imx_clk_hw_mux2_flags("dram_core_clk", base + 0x9800, 24, 1, imx8mn_dram_core_sels, ARRAY_SIZE(imx8mn_dram_core_sels), CLK_IS_CRITICAL); diff --git a/drivers/clk/imx/clk-imx8mp.c b/drivers/clk/imx/clk-imx8mp.c index 998e9e63f8311..b4d9db9d5bf17 100644 --- a/drivers/clk/imx/clk-imx8mp.c +++ b/drivers/clk/imx/clk-imx8mp.c @@ -563,23 +563,23 @@ static int imx8mp_clocks_probe(struct platform_device *pdev) hws[IMX8MP_CLK_A53_CORE] = imx_clk_hw_mux2("arm_a53_core", ccm_base + 0x9880, 24, 1, imx8mp_a53_core_sels, ARRAY_SIZE(imx8mp_a53_core_sels)); hws[IMX8MP_CLK_MAIN_AXI] = imx8m_clk_hw_composite_critical("main_axi", imx8mp_main_axi_sels, ccm_base + 0x8800); - hws[IMX8MP_CLK_ENET_AXI] = imx8m_clk_hw_composite("enet_axi", imx8mp_enet_axi_sels, ccm_base + 0x8880); + hws[IMX8MP_CLK_ENET_AXI] = imx8m_clk_hw_composite_bus("enet_axi", imx8mp_enet_axi_sels, ccm_base + 0x8880); hws[IMX8MP_CLK_NAND_USDHC_BUS] = imx8m_clk_hw_composite_critical("nand_usdhc_bus", imx8mp_nand_usdhc_sels, ccm_base + 0x8900); - hws[IMX8MP_CLK_VPU_BUS] = imx8m_clk_hw_composite("vpu_bus", imx8mp_vpu_bus_sels, ccm_base + 0x8980); - hws[IMX8MP_CLK_MEDIA_AXI] = imx8m_clk_hw_composite("media_axi", imx8mp_media_axi_sels, ccm_base + 0x8a00); - hws[IMX8MP_CLK_MEDIA_APB] = imx8m_clk_hw_composite("media_apb", imx8mp_media_apb_sels, ccm_base + 0x8a80); - hws[IMX8MP_CLK_HDMI_APB] = imx8m_clk_hw_composite("hdmi_apb", imx8mp_media_apb_sels, ccm_base + 0x8b00); - hws[IMX8MP_CLK_HDMI_AXI] = imx8m_clk_hw_composite("hdmi_axi", imx8mp_media_axi_sels, ccm_base + 0x8b80); - hws[IMX8MP_CLK_GPU_AXI] = imx8m_clk_hw_composite("gpu_axi", imx8mp_gpu_axi_sels, ccm_base + 0x8c00); - hws[IMX8MP_CLK_GPU_AHB] = imx8m_clk_hw_composite("gpu_ahb", imx8mp_gpu_ahb_sels, ccm_base + 0x8c80); + hws[IMX8MP_CLK_VPU_BUS] = imx8m_clk_hw_composite_bus("vpu_bus", imx8mp_vpu_bus_sels, ccm_base + 0x8980); + hws[IMX8MP_CLK_MEDIA_AXI] = imx8m_clk_hw_composite_bus("media_axi", imx8mp_media_axi_sels, ccm_base + 0x8a00); + hws[IMX8MP_CLK_MEDIA_APB] = imx8m_clk_hw_composite_bus("media_apb", imx8mp_media_apb_sels, ccm_base + 0x8a80); + hws[IMX8MP_CLK_HDMI_APB] = imx8m_clk_hw_composite_bus("hdmi_apb", imx8mp_media_apb_sels, ccm_base + 0x8b00); + hws[IMX8MP_CLK_HDMI_AXI] = imx8m_clk_hw_composite_bus("hdmi_axi", imx8mp_media_axi_sels, ccm_base + 0x8b80); + hws[IMX8MP_CLK_GPU_AXI] = imx8m_clk_hw_composite_bus("gpu_axi", imx8mp_gpu_axi_sels, ccm_base + 0x8c00); + hws[IMX8MP_CLK_GPU_AHB] = imx8m_clk_hw_composite_bus("gpu_ahb", imx8mp_gpu_ahb_sels, ccm_base + 0x8c80); hws[IMX8MP_CLK_NOC] = imx8m_clk_hw_composite_critical("noc", imx8mp_noc_sels, ccm_base + 0x8d00); hws[IMX8MP_CLK_NOC_IO] = imx8m_clk_hw_composite_critical("noc_io", imx8mp_noc_io_sels, ccm_base + 0x8d80); - hws[IMX8MP_CLK_ML_AXI] = imx8m_clk_hw_composite("ml_axi", imx8mp_ml_axi_sels, ccm_base + 0x8e00); - hws[IMX8MP_CLK_ML_AHB] = imx8m_clk_hw_composite("ml_ahb", imx8mp_ml_ahb_sels, ccm_base + 0x8e80); + hws[IMX8MP_CLK_ML_AXI] = imx8m_clk_hw_composite_bus("ml_axi", imx8mp_ml_axi_sels, ccm_base + 0x8e00); + hws[IMX8MP_CLK_ML_AHB] = imx8m_clk_hw_composite_bus("ml_ahb", imx8mp_ml_ahb_sels, ccm_base + 0x8e80); hws[IMX8MP_CLK_AHB] = imx8m_clk_hw_composite_critical("ahb_root", imx8mp_ahb_sels, ccm_base + 0x9000); - hws[IMX8MP_CLK_AUDIO_AHB] = imx8m_clk_hw_composite("audio_ahb", imx8mp_audio_ahb_sels, ccm_base + 0x9100); - hws[IMX8MP_CLK_MIPI_DSI_ESC_RX] = imx8m_clk_hw_composite("mipi_dsi_esc_rx", imx8mp_mipi_dsi_esc_rx_sels, ccm_base + 0x9200); + hws[IMX8MP_CLK_AUDIO_AHB] = imx8m_clk_hw_composite_bus("audio_ahb", imx8mp_audio_ahb_sels, ccm_base + 0x9100); + hws[IMX8MP_CLK_MIPI_DSI_ESC_RX] = imx8m_clk_hw_composite_bus("mipi_dsi_esc_rx", imx8mp_mipi_dsi_esc_rx_sels, ccm_base + 0x9200); hws[IMX8MP_CLK_IPG_ROOT] = imx_clk_hw_divider2("ipg_root", "ahb_root", ccm_base + 0x9080, 0, 1); hws[IMX8MP_CLK_IPG_AUDIO_ROOT] = imx_clk_hw_divider2("ipg_audio_root", "audio_ahb", ccm_base + 0x9180, 0, 1); diff --git a/drivers/clk/imx/clk-imx8mq.c b/drivers/clk/imx/clk-imx8mq.c index 91309ff654417..a64aace213c27 100644 --- a/drivers/clk/imx/clk-imx8mq.c +++ b/drivers/clk/imx/clk-imx8mq.c @@ -432,22 +432,22 @@ static int imx8mq_clocks_probe(struct platform_device *pdev) /* BUS */ hws[IMX8MQ_CLK_MAIN_AXI] = imx8m_clk_hw_composite_critical("main_axi", imx8mq_main_axi_sels, base + 0x8800); - hws[IMX8MQ_CLK_ENET_AXI] = imx8m_clk_hw_composite("enet_axi", imx8mq_enet_axi_sels, base + 0x8880); - hws[IMX8MQ_CLK_NAND_USDHC_BUS] = imx8m_clk_hw_composite("nand_usdhc_bus", imx8mq_nand_usdhc_sels, base + 0x8900); - hws[IMX8MQ_CLK_VPU_BUS] = imx8m_clk_hw_composite("vpu_bus", imx8mq_vpu_bus_sels, base + 0x8980); - hws[IMX8MQ_CLK_DISP_AXI] = imx8m_clk_hw_composite("disp_axi", imx8mq_disp_axi_sels, base + 0x8a00); - hws[IMX8MQ_CLK_DISP_APB] = imx8m_clk_hw_composite("disp_apb", imx8mq_disp_apb_sels, base + 0x8a80); - hws[IMX8MQ_CLK_DISP_RTRM] = imx8m_clk_hw_composite("disp_rtrm", imx8mq_disp_rtrm_sels, base + 0x8b00); - hws[IMX8MQ_CLK_USB_BUS] = imx8m_clk_hw_composite("usb_bus", imx8mq_usb_bus_sels, base + 0x8b80); - hws[IMX8MQ_CLK_GPU_AXI] = imx8m_clk_hw_composite("gpu_axi", imx8mq_gpu_axi_sels, base + 0x8c00); - hws[IMX8MQ_CLK_GPU_AHB] = imx8m_clk_hw_composite("gpu_ahb", imx8mq_gpu_ahb_sels, base + 0x8c80); + hws[IMX8MQ_CLK_ENET_AXI] = imx8m_clk_hw_composite_bus("enet_axi", imx8mq_enet_axi_sels, base + 0x8880); + hws[IMX8MQ_CLK_NAND_USDHC_BUS] = imx8m_clk_hw_composite_bus("nand_usdhc_bus", imx8mq_nand_usdhc_sels, base + 0x8900); + hws[IMX8MQ_CLK_VPU_BUS] = imx8m_clk_hw_composite_bus("vpu_bus", imx8mq_vpu_bus_sels, base + 0x8980); + hws[IMX8MQ_CLK_DISP_AXI] = imx8m_clk_hw_composite_bus("disp_axi", imx8mq_disp_axi_sels, base + 0x8a00); + hws[IMX8MQ_CLK_DISP_APB] = imx8m_clk_hw_composite_bus("disp_apb", imx8mq_disp_apb_sels, base + 0x8a80); + hws[IMX8MQ_CLK_DISP_RTRM] = imx8m_clk_hw_composite_bus("disp_rtrm", imx8mq_disp_rtrm_sels, base + 0x8b00); + hws[IMX8MQ_CLK_USB_BUS] = imx8m_clk_hw_composite_bus("usb_bus", imx8mq_usb_bus_sels, base + 0x8b80); + hws[IMX8MQ_CLK_GPU_AXI] = imx8m_clk_hw_composite_bus("gpu_axi", imx8mq_gpu_axi_sels, base + 0x8c00); + hws[IMX8MQ_CLK_GPU_AHB] = imx8m_clk_hw_composite_bus("gpu_ahb", imx8mq_gpu_ahb_sels, base + 0x8c80); hws[IMX8MQ_CLK_NOC] = imx8m_clk_hw_composite_critical("noc", imx8mq_noc_sels, base + 0x8d00); hws[IMX8MQ_CLK_NOC_APB] = imx8m_clk_hw_composite_critical("noc_apb", imx8mq_noc_apb_sels, base + 0x8d80); /* AHB */ /* AHB clock is used by the AHB bus therefore marked as critical */ hws[IMX8MQ_CLK_AHB] = imx8m_clk_hw_composite_critical("ahb", imx8mq_ahb_sels, base + 0x9000); - hws[IMX8MQ_CLK_AUDIO_AHB] = imx8m_clk_hw_composite("audio_ahb", imx8mq_audio_ahb_sels, base + 0x9100); + hws[IMX8MQ_CLK_AUDIO_AHB] = imx8m_clk_hw_composite_bus("audio_ahb", imx8mq_audio_ahb_sels, base + 0x9100); /* IPG */ hws[IMX8MQ_CLK_IPG_ROOT] = imx_clk_hw_divider2("ipg_root", "ahb", base + 0x9080, 0, 1); -- GitLab From c8d70a29d6bbc956013f3401f92a4431a9385a3c Mon Sep 17 00:00:00 2001 From: Juergen Gross Date: Mon, 11 May 2020 09:42:31 +0200 Subject: [PATCH 0601/1971] xen/pvcalls-back: test for errors when calling backend_connect() backend_connect() can fail, so switch the device to connected only if no error occurred. Fixes: 0a9c75c2c7258f2 ("xen/pvcalls: xenbus state handling") Cc: stable@vger.kernel.org Signed-off-by: Juergen Gross Link: https://lore.kernel.org/r/20200511074231.19794-1-jgross@suse.com Reviewed-by: Stefano Stabellini Signed-off-by: Boris Ostrovsky --- drivers/xen/pvcalls-back.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/xen/pvcalls-back.c b/drivers/xen/pvcalls-back.c index cf4ce3e9358d6..41a18ece029a3 100644 --- a/drivers/xen/pvcalls-back.c +++ b/drivers/xen/pvcalls-back.c @@ -1088,7 +1088,8 @@ static void set_backend_state(struct xenbus_device *dev, case XenbusStateInitialised: switch (state) { case XenbusStateConnected: - backend_connect(dev); + if (backend_connect(dev)) + return; xenbus_switch_state(dev, XenbusStateConnected); break; case XenbusStateClosing: -- GitLab From c17db64044e34ae6c2df56cc4837787f9d48edd5 Mon Sep 17 00:00:00 2001 From: Rikard Falkeborn Date: Sat, 9 May 2020 15:47:55 +0200 Subject: [PATCH 0602/1971] xen-platform: Constify dev_pm_ops dev_pm_ops is never modified, so mark it const to allow the compiler to put it in read-only memory. Before: text data bss dec hex filename 2457 1668 256 4381 111d drivers/xen/platform-pci.o After: text data bss dec hex filename 2681 1444 256 4381 111d drivers/xen/platform-pci.o Signed-off-by: Rikard Falkeborn Link: https://lore.kernel.org/r/20200509134755.15038-1-rikard.falkeborn@gmail.com Reviewed-by: Juergen Gross Signed-off-by: Boris Ostrovsky --- drivers/xen/platform-pci.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/xen/platform-pci.c b/drivers/xen/platform-pci.c index 59e85e408c23a..dd911e1ff782c 100644 --- a/drivers/xen/platform-pci.c +++ b/drivers/xen/platform-pci.c @@ -168,7 +168,7 @@ static const struct pci_device_id platform_pci_tbl[] = { {0,} }; -static struct dev_pm_ops platform_pm_ops = { +static const struct dev_pm_ops platform_pm_ops = { .resume_noirq = platform_pci_resume, }; -- GitLab From c54b071c192dfe8061336f650ceaf358e6386e0b Mon Sep 17 00:00:00 2001 From: Boris Ostrovsky Date: Fri, 8 May 2020 18:28:43 -0400 Subject: [PATCH 0603/1971] xen/cpuhotplug: Fix initial CPU offlining for PV(H) guests Commit a926f81d2f6c ("xen/cpuhotplug: Replace cpu_up/down() with device_online/offline()") replaced cpu_down() with device_offline() call which requires that the CPU has been registered before. This registration, however, happens later from topology_init() which is called as subsys_initcall(). setup_vcpu_hotplug_event(), on the other hand, is invoked earlier, during arch_initcall(). As result, booting a PV(H) guest with vcpus < maxvcpus causes a crash. Move setup_vcpu_hotplug_event() (and therefore setup_cpu_watcher()) to late_initcall(). In addition, instead of performing all offlining steps in setup_cpu_watcher() simply call disable_hotplug_cpu(). Fixes: a926f81d2f6c (xen/cpuhotplug: Replace cpu_up/down() with device_online/offline()" Signed-off-by: Boris Ostrovsky Link: https://lore.kernel.org/r/1588976923-3667-1-git-send-email-boris.ostrovsky@oracle.com Reviewed-by: Juergen Gross Signed-off-by: Boris Ostrovsky --- drivers/xen/cpu_hotplug.c | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/drivers/xen/cpu_hotplug.c b/drivers/xen/cpu_hotplug.c index ec975decb5def..b96b11e2b571d 100644 --- a/drivers/xen/cpu_hotplug.c +++ b/drivers/xen/cpu_hotplug.c @@ -93,10 +93,8 @@ static int setup_cpu_watcher(struct notifier_block *notifier, (void)register_xenbus_watch(&cpu_watch); for_each_possible_cpu(cpu) { - if (vcpu_online(cpu) == 0) { - device_offline(get_cpu_device(cpu)); - set_cpu_present(cpu, false); - } + if (vcpu_online(cpu) == 0) + disable_hotplug_cpu(cpu); } return NOTIFY_DONE; @@ -119,5 +117,5 @@ static int __init setup_vcpu_hotplug_event(void) return 0; } -arch_initcall(setup_vcpu_hotplug_event); +late_initcall(setup_vcpu_hotplug_event); -- GitLab From 5333875301d64e96d1400592a500f56ac7a33f27 Mon Sep 17 00:00:00 2001 From: YueHaibing Date: Fri, 10 Apr 2020 19:56:20 +0800 Subject: [PATCH 0604/1971] xen/pvcalls: Make pvcalls_back_global static Fix sparse warning: drivers/xen/pvcalls-back.c:30:3: warning: symbol 'pvcalls_back_global' was not declared. Should it be static? Reported-by: Hulk Robot Signed-off-by: YueHaibing Link: https://lore.kernel.org/r/20200410115620.33024-1-yuehaibing@huawei.com Reviewed-by: Stefano Stabellini Signed-off-by: Boris Ostrovsky --- drivers/xen/pvcalls-back.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/xen/pvcalls-back.c b/drivers/xen/pvcalls-back.c index 41a18ece029a3..9eae1fceec1e5 100644 --- a/drivers/xen/pvcalls-back.c +++ b/drivers/xen/pvcalls-back.c @@ -24,7 +24,7 @@ #define PVCALLS_VERSIONS "1" #define MAX_RING_ORDER XENBUS_MAX_RING_GRANT_ORDER -struct pvcalls_back_global { +static struct pvcalls_back_global { struct list_head frontends; struct semaphore frontends_lock; } pvcalls_back_global; -- GitLab From 0df683ff7456eb6401c9035c054b7a58c93dd215 Mon Sep 17 00:00:00 2001 From: Roger Pau Monne Date: Tue, 24 Mar 2020 16:00:14 +0100 Subject: [PATCH 0605/1971] xen: expand BALLOON_MEMORY_HOTPLUG description MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit To mention it's also useful for PVH or HVM domains that require mapping foreign memory or grants. [boris: "non PV" instead of "translated" at Juergen's request] Signed-off-by: Roger Pau Monné Link: https://lore.kernel.org/r/20200324150015.50496-1-roger.pau@citrix.com Reviewed-by: Juergen Gross Signed-off-by: Boris Ostrovsky --- drivers/xen/Kconfig | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/xen/Kconfig b/drivers/xen/Kconfig index 61212fc7f0c70..ce024e57274c9 100644 --- a/drivers/xen/Kconfig +++ b/drivers/xen/Kconfig @@ -19,6 +19,9 @@ config XEN_BALLOON_MEMORY_HOTPLUG It is very useful on critical systems which require long run without rebooting. + It's also very useful for non PV domains to obtain unpopulated physical + memory ranges to use in order to map foreign memory or grants. + Memory could be hotplugged in following steps: 1) target domain: ensure that memory auto online policy is in -- GitLab From 2abb65a39bcca0629437bb83e583889f9e1ad696 Mon Sep 17 00:00:00 2001 From: Roger Pau Monne Date: Tue, 24 Mar 2020 16:00:15 +0100 Subject: [PATCH 0606/1971] xen: enable BALLOON_MEMORY_HOTPLUG by default MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Without it a PVH dom0 is mostly useless, as it would balloon down huge amounts of RAM in order get physical address space to map foreign memory and grants, ultimately leading to an out of memory situation. Such option is also needed for HVM or PVH driver domains, since they also require mapping grants into physical memory regions. Suggested-by: Ian Jackson Signed-off-by: Roger Pau Monné Link: https://lore.kernel.org/r/20200324150015.50496-2-roger.pau@citrix.com Reviewed-by: Juergen Gross Signed-off-by: Boris Ostrovsky --- drivers/xen/Kconfig | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/xen/Kconfig b/drivers/xen/Kconfig index ce024e57274c9..727f11eb46b2b 100644 --- a/drivers/xen/Kconfig +++ b/drivers/xen/Kconfig @@ -13,6 +13,7 @@ config XEN_BALLOON config XEN_BALLOON_MEMORY_HOTPLUG bool "Memory hotplug support for Xen balloon driver" depends on XEN_BALLOON && MEMORY_HOTPLUG + default y help Memory hotplug support for Xen balloon driver allows expanding memory available for the system above limit declared at system startup. -- GitLab From ab0ef8bac10db48b1045e5d492a21511e54babaf Mon Sep 17 00:00:00 2001 From: Andy Shevchenko Date: Tue, 19 May 2020 15:50:37 +0300 Subject: [PATCH 0607/1971] i2c: designware: Get rid of PCI driver specifics in common code Do not spread PCI specifics over common code. It seems to be a layering violation which can be easily avoided. Refactor PCI driver and drop PCI specifics from common code. Signed-off-by: Andy Shevchenko Acked-by: Jarkko Nikula Signed-off-by: Wolfram Sang --- drivers/i2c/busses/i2c-designware-core.h | 1 - drivers/i2c/busses/i2c-designware-pcidrv.c | 24 +++++++++++++--------- 2 files changed, 14 insertions(+), 11 deletions(-) diff --git a/drivers/i2c/busses/i2c-designware-core.h b/drivers/i2c/busses/i2c-designware-core.h index 1674caf277451..6202f9ee953d9 100644 --- a/drivers/i2c/busses/i2c-designware-core.h +++ b/drivers/i2c/busses/i2c-designware-core.h @@ -232,7 +232,6 @@ struct dw_i2c_dev { struct reset_control *rst; struct i2c_client *slave; u32 (*get_clk_rate_khz) (struct dw_i2c_dev *dev); - struct dw_pci_controller *controller; int cmd_err; struct i2c_msg *msgs; int msgs_num; diff --git a/drivers/i2c/busses/i2c-designware-pcidrv.c b/drivers/i2c/busses/i2c-designware-pcidrv.c index c762e5a11e44e..c8808e5855b48 100644 --- a/drivers/i2c/busses/i2c-designware-pcidrv.c +++ b/drivers/i2c/busses/i2c-designware-pcidrv.c @@ -48,10 +48,10 @@ struct dw_pci_controller { u32 bus_num; u32 tx_fifo_depth; u32 rx_fifo_depth; - u32 clk_khz; u32 flags; struct dw_scl_sda_cfg *scl_sda_cfg; int (*setup)(struct pci_dev *pdev, struct dw_pci_controller *c); + u32 (*get_clk_rate_khz)(struct dw_i2c_dev *dev); }; /* Merrifield HCNT/LCNT/SDA hold time */ @@ -80,6 +80,11 @@ static struct dw_scl_sda_cfg hsw_config = { .sda_hold = 0x9, }; +static u32 mfld_get_clk_rate_khz(struct dw_i2c_dev *dev) +{ + return 25000; +} + static int mfld_setup(struct pci_dev *pdev, struct dw_pci_controller *c) { struct dw_i2c_dev *dev = dev_get_drvdata(&pdev->dev); @@ -120,13 +125,18 @@ static int mrfld_setup(struct pci_dev *pdev, struct dw_pci_controller *c) return -ENODEV; } +static u32 ehl_get_clk_rate_khz(struct dw_i2c_dev *dev) +{ + return 100000; +} + static struct dw_pci_controller dw_pci_controllers[] = { [medfield] = { .bus_num = -1, .tx_fifo_depth = 32, .rx_fifo_depth = 32, - .clk_khz = 25000, .setup = mfld_setup, + .get_clk_rate_khz = mfld_get_clk_rate_khz, }, [merrifield] = { .bus_num = -1, @@ -158,7 +168,7 @@ static struct dw_pci_controller dw_pci_controllers[] = { .bus_num = -1, .tx_fifo_depth = 32, .rx_fifo_depth = 32, - .clk_khz = 100000, + .get_clk_rate_khz = ehl_get_clk_rate_khz, }, }; @@ -188,11 +198,6 @@ static int i2c_dw_pci_resume(struct device *dev) static UNIVERSAL_DEV_PM_OPS(i2c_dw_pm_ops, i2c_dw_pci_suspend, i2c_dw_pci_resume, NULL); -static u32 i2c_dw_get_clk_rate_khz(struct dw_i2c_dev *dev) -{ - return dev->controller->clk_khz; -} - static int i2c_dw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) { @@ -233,8 +238,7 @@ static int i2c_dw_pci_probe(struct pci_dev *pdev, if (r < 0) return r; - dev->controller = controller; - dev->get_clk_rate_khz = i2c_dw_get_clk_rate_khz; + dev->get_clk_rate_khz = controller->get_clk_rate_khz; dev->timings.bus_freq_hz = I2C_MAX_FAST_MODE_FREQ; dev->base = pcim_iomap_table(pdev)[0]; dev->dev = &pdev->dev; -- GitLab From a19f133f694c8f5549d46b43e4b30343b35c3aa5 Mon Sep 17 00:00:00 2001 From: Andy Shevchenko Date: Tue, 19 May 2020 15:50:38 +0300 Subject: [PATCH 0608/1971] i2c: designware: Include proper headers in i2c-desingware-core.h This header is a user of some generic ones, include them respectively. Signed-off-by: Andy Shevchenko Acked-by: Jarkko Nikula Signed-off-by: Wolfram Sang --- drivers/i2c/busses/i2c-designware-core.h | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/drivers/i2c/busses/i2c-designware-core.h b/drivers/i2c/busses/i2c-designware-core.h index 6202f9ee953d9..94d9ad15133ae 100644 --- a/drivers/i2c/busses/i2c-designware-core.h +++ b/drivers/i2c/busses/i2c-designware-core.h @@ -9,7 +9,13 @@ * Copyright (C) 2009 Provigent Ltd. */ +#include +#include +#include +#include +#include #include +#include #define DW_IC_DEFAULT_FUNCTIONALITY (I2C_FUNC_I2C | \ I2C_FUNC_SMBUS_BYTE | \ @@ -170,6 +176,9 @@ DW_IC_TX_ABRT_TXDATA_NOACK | \ DW_IC_TX_ABRT_GCALL_NOACK) +struct clk; +struct device; +struct reset_control; /** * struct dw_i2c_dev - private i2c-designware data -- GitLab From 20ee1d9020c9233129ba84094e7dd6fe4651cfad Mon Sep 17 00:00:00 2001 From: Andy Shevchenko Date: Tue, 19 May 2020 15:50:39 +0300 Subject: [PATCH 0609/1971] i2c: designware: Move i2c_dw_validate_speed() helper to a common code In order to export array supported speed for wider use, move it to a header along with i2c_dw_validate_speed() helper moved to a common code. No functional changes intended. Signed-off-by: Andy Shevchenko Acked-by: Jarkko Nikula Signed-off-by: Wolfram Sang --- drivers/i2c/busses/i2c-designware-common.c | 24 +++++++++++++++++ drivers/i2c/busses/i2c-designware-core.h | 9 +++++++ drivers/i2c/busses/i2c-designware-platdrv.c | 29 ++++----------------- 3 files changed, 38 insertions(+), 24 deletions(-) diff --git a/drivers/i2c/busses/i2c-designware-common.c b/drivers/i2c/busses/i2c-designware-common.c index c70c6fc09ee3b..9f06567be54ab 100644 --- a/drivers/i2c/busses/i2c-designware-common.c +++ b/drivers/i2c/busses/i2c-designware-common.c @@ -116,6 +116,30 @@ int i2c_dw_set_reg_access(struct dw_i2c_dev *dev) return 0; } +int i2c_dw_validate_speed(struct dw_i2c_dev *dev) +{ + struct i2c_timings *t = &dev->timings; + unsigned int i; + + /* + * Only standard mode at 100kHz, fast mode at 400kHz, + * fast mode plus at 1MHz and high speed mode at 3.4MHz are supported. + */ + for (i = 0; i < ARRAY_SIZE(i2c_dw_supported_speeds); i++) { + if (t->bus_freq_hz == i2c_dw_supported_speeds[i]) + break; + } + if (i == ARRAY_SIZE(i2c_dw_supported_speeds)) { + dev_err(dev->dev, + "%d Hz is unsupported, only 100kHz, 400kHz, 1MHz and 3.4MHz are supported\n", + t->bus_freq_hz); + return -EINVAL; + } + + return 0; +} +EXPORT_SYMBOL_GPL(i2c_dw_validate_speed); + u32 i2c_dw_scl_hcnt(u32 ic_clk, u32 tSYMBOL, u32 tf, int cond, int offset) { /* diff --git a/drivers/i2c/busses/i2c-designware-core.h b/drivers/i2c/busses/i2c-designware-core.h index 94d9ad15133ae..77c8aa422268c 100644 --- a/drivers/i2c/busses/i2c-designware-core.h +++ b/drivers/i2c/busses/i2c-designware-core.h @@ -359,3 +359,12 @@ extern int i2c_dw_probe_lock_support(struct dw_i2c_dev *dev); #else static inline int i2c_dw_probe_lock_support(struct dw_i2c_dev *dev) { return 0; } #endif + +static __maybe_unused const u32 i2c_dw_supported_speeds[] = { + I2C_MAX_HIGH_SPEED_MODE_FREQ, + I2C_MAX_FAST_MODE_PLUS_FREQ, + I2C_MAX_FAST_MODE_FREQ, + I2C_MAX_STANDARD_MODE_FREQ, +}; + +int i2c_dw_validate_speed(struct dw_i2c_dev *dev); diff --git a/drivers/i2c/busses/i2c-designware-platdrv.c b/drivers/i2c/busses/i2c-designware-platdrv.c index 01db634461b60..d6c03d7179c7a 100644 --- a/drivers/i2c/busses/i2c-designware-platdrv.c +++ b/drivers/i2c/busses/i2c-designware-platdrv.c @@ -192,13 +192,6 @@ static void dw_i2c_plat_pm_cleanup(struct dw_i2c_dev *dev) pm_runtime_put_noidle(dev->dev); } -static const u32 supported_speeds[] = { - I2C_MAX_HIGH_SPEED_MODE_FREQ, - I2C_MAX_FAST_MODE_PLUS_FREQ, - I2C_MAX_FAST_MODE_FREQ, - I2C_MAX_STANDARD_MODE_FREQ, -}; - static int dw_i2c_plat_probe(struct platform_device *pdev) { struct dw_i2c_platform_data *pdata = dev_get_platdata(&pdev->dev); @@ -241,11 +234,11 @@ static int dw_i2c_plat_probe(struct platform_device *pdev) * Some DSTDs use a non standard speed, round down to the lowest * standard speed. */ - for (i = 0; i < ARRAY_SIZE(supported_speeds); i++) { - if (acpi_speed >= supported_speeds[i]) + for (i = 0; i < ARRAY_SIZE(i2c_dw_supported_speeds); i++) { + if (acpi_speed >= i2c_dw_supported_speeds[i]) break; } - acpi_speed = i < ARRAY_SIZE(supported_speeds) ? supported_speeds[i] : 0; + acpi_speed = i < ARRAY_SIZE(i2c_dw_supported_speeds) ? i2c_dw_supported_speeds[i] : 0; /* * Find bus speed from the "clock-frequency" device property, ACPI @@ -266,21 +259,9 @@ static int dw_i2c_plat_probe(struct platform_device *pdev) if (has_acpi_companion(&pdev->dev)) dw_i2c_acpi_configure(pdev); - /* - * Only standard mode at 100kHz, fast mode at 400kHz, - * fast mode plus at 1MHz and high speed mode at 3.4MHz are supported. - */ - for (i = 0; i < ARRAY_SIZE(supported_speeds); i++) { - if (t->bus_freq_hz == supported_speeds[i]) - break; - } - if (i == ARRAY_SIZE(supported_speeds)) { - dev_err(&pdev->dev, - "%d Hz is unsupported, only 100kHz, 400kHz, 1MHz and 3.4MHz are supported\n", - t->bus_freq_hz); - ret = -EINVAL; + ret = i2c_dw_validate_speed(dev); + if (ret) goto exit_reset; - } ret = i2c_dw_probe_lock_support(dev); if (ret) -- GitLab From 462cfcb4aa1c92239cb16177fd3ceb65326955ff Mon Sep 17 00:00:00 2001 From: Andy Shevchenko Date: Tue, 19 May 2020 15:50:40 +0300 Subject: [PATCH 0610/1971] i2c: designware: Drop unneeded condition in i2c_dw_validate_speed() We may bailout directly from the loop instead of breaking it and testing a loop counter. This also gives advantages such as decreased indentation level along with dropped unneeded condition. Signed-off-by: Andy Shevchenko Acked-by: Jarkko Nikula Signed-off-by: Wolfram Sang --- drivers/i2c/busses/i2c-designware-common.c | 14 ++++++-------- 1 file changed, 6 insertions(+), 8 deletions(-) diff --git a/drivers/i2c/busses/i2c-designware-common.c b/drivers/i2c/busses/i2c-designware-common.c index 9f06567be54ab..2fd5372b12373 100644 --- a/drivers/i2c/busses/i2c-designware-common.c +++ b/drivers/i2c/busses/i2c-designware-common.c @@ -127,16 +127,14 @@ int i2c_dw_validate_speed(struct dw_i2c_dev *dev) */ for (i = 0; i < ARRAY_SIZE(i2c_dw_supported_speeds); i++) { if (t->bus_freq_hz == i2c_dw_supported_speeds[i]) - break; - } - if (i == ARRAY_SIZE(i2c_dw_supported_speeds)) { - dev_err(dev->dev, - "%d Hz is unsupported, only 100kHz, 400kHz, 1MHz and 3.4MHz are supported\n", - t->bus_freq_hz); - return -EINVAL; + return 0; } - return 0; + dev_err(dev->dev, + "%d Hz is unsupported, only 100kHz, 400kHz, 1MHz and 3.4MHz are supported\n", + t->bus_freq_hz); + + return -EINVAL; } EXPORT_SYMBOL_GPL(i2c_dw_validate_speed); -- GitLab From f9288fcc5c6154959de4dd83be1b91abcf5e0c17 Mon Sep 17 00:00:00 2001 From: Andy Shevchenko Date: Tue, 19 May 2020 15:50:41 +0300 Subject: [PATCH 0611/1971] i2c: designware: Move ACPI parts into common module For possible code reuse in the future, move ACPI parts into common module. Signed-off-by: Andy Shevchenko Acked-by: Jarkko Nikula Signed-off-by: Wolfram Sang --- drivers/i2c/busses/i2c-designware-common.c | 135 +++++++++++++++++++- drivers/i2c/busses/i2c-designware-core.h | 15 ++- drivers/i2c/busses/i2c-designware-platdrv.c | 111 +--------------- 3 files changed, 142 insertions(+), 119 deletions(-) diff --git a/drivers/i2c/busses/i2c-designware-common.c b/drivers/i2c/busses/i2c-designware-common.c index 2fd5372b12373..e1697ed8b54a5 100644 --- a/drivers/i2c/busses/i2c-designware-common.c +++ b/drivers/i2c/busses/i2c-designware-common.c @@ -8,17 +8,21 @@ * Copyright (C) 2007 MontaVista Software Inc. * Copyright (C) 2009 Provigent Ltd. */ +#include #include #include -#include -#include +#include #include +#include +#include #include #include #include +#include #include #include #include +#include #include "i2c-designware-core.h" @@ -116,6 +120,13 @@ int i2c_dw_set_reg_access(struct dw_i2c_dev *dev) return 0; } +static const u32 supported_speeds[] = { + I2C_MAX_HIGH_SPEED_MODE_FREQ, + I2C_MAX_FAST_MODE_PLUS_FREQ, + I2C_MAX_FAST_MODE_FREQ, + I2C_MAX_STANDARD_MODE_FREQ, +}; + int i2c_dw_validate_speed(struct dw_i2c_dev *dev) { struct i2c_timings *t = &dev->timings; @@ -125,8 +136,8 @@ int i2c_dw_validate_speed(struct dw_i2c_dev *dev) * Only standard mode at 100kHz, fast mode at 400kHz, * fast mode plus at 1MHz and high speed mode at 3.4MHz are supported. */ - for (i = 0; i < ARRAY_SIZE(i2c_dw_supported_speeds); i++) { - if (t->bus_freq_hz == i2c_dw_supported_speeds[i]) + for (i = 0; i < ARRAY_SIZE(supported_speeds); i++) { + if (t->bus_freq_hz == supported_speeds[i]) return 0; } @@ -138,6 +149,122 @@ int i2c_dw_validate_speed(struct dw_i2c_dev *dev) } EXPORT_SYMBOL_GPL(i2c_dw_validate_speed); +#ifdef CONFIG_ACPI + +#include + +/* + * The HCNT/LCNT information coming from ACPI should be the most accurate + * for given platform. However, some systems get it wrong. On such systems + * we get better results by calculating those based on the input clock. + */ +static const struct dmi_system_id i2c_dw_no_acpi_params[] = { + { + .ident = "Dell Inspiron 7348", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 7348"), + }, + }, + {} +}; + +static void i2c_dw_acpi_params(struct device *device, char method[], + u16 *hcnt, u16 *lcnt, u32 *sda_hold) +{ + struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER }; + acpi_handle handle = ACPI_HANDLE(device); + union acpi_object *obj; + + if (dmi_check_system(i2c_dw_no_acpi_params)) + return; + + if (ACPI_FAILURE(acpi_evaluate_object(handle, method, NULL, &buf))) + return; + + obj = (union acpi_object *)buf.pointer; + if (obj->type == ACPI_TYPE_PACKAGE && obj->package.count == 3) { + const union acpi_object *objs = obj->package.elements; + + *hcnt = (u16)objs[0].integer.value; + *lcnt = (u16)objs[1].integer.value; + *sda_hold = (u32)objs[2].integer.value; + } + + kfree(buf.pointer); +} + +int i2c_dw_acpi_configure(struct device *device) +{ + struct dw_i2c_dev *dev = dev_get_drvdata(device); + struct i2c_timings *t = &dev->timings; + u32 ss_ht = 0, fp_ht = 0, hs_ht = 0, fs_ht = 0; + + dev->tx_fifo_depth = 32; + dev->rx_fifo_depth = 32; + + /* + * Try to get SDA hold time and *CNT values from an ACPI method for + * selected speed modes. + */ + i2c_dw_acpi_params(device, "SSCN", &dev->ss_hcnt, &dev->ss_lcnt, &ss_ht); + i2c_dw_acpi_params(device, "FPCN", &dev->fp_hcnt, &dev->fp_lcnt, &fp_ht); + i2c_dw_acpi_params(device, "HSCN", &dev->hs_hcnt, &dev->hs_lcnt, &hs_ht); + i2c_dw_acpi_params(device, "FMCN", &dev->fs_hcnt, &dev->fs_lcnt, &fs_ht); + + switch (t->bus_freq_hz) { + case I2C_MAX_STANDARD_MODE_FREQ: + dev->sda_hold_time = ss_ht; + break; + case I2C_MAX_FAST_MODE_PLUS_FREQ: + dev->sda_hold_time = fp_ht; + break; + case I2C_MAX_HIGH_SPEED_MODE_FREQ: + dev->sda_hold_time = hs_ht; + break; + case I2C_MAX_FAST_MODE_FREQ: + default: + dev->sda_hold_time = fs_ht; + break; + } + + return 0; +} +EXPORT_SYMBOL_GPL(i2c_dw_acpi_configure); + +void i2c_dw_acpi_adjust_bus_speed(struct device *device) +{ + struct dw_i2c_dev *dev = dev_get_drvdata(device); + struct i2c_timings *t = &dev->timings; + u32 acpi_speed; + int i; + + acpi_speed = i2c_acpi_find_bus_speed(device); + /* + * Some DSTDs use a non standard speed, round down to the lowest + * standard speed. + */ + for (i = 0; i < ARRAY_SIZE(supported_speeds); i++) { + if (acpi_speed >= supported_speeds[i]) + break; + } + acpi_speed = i < ARRAY_SIZE(supported_speeds) ? supported_speeds[i] : 0; + + /* + * Find bus speed from the "clock-frequency" device property, ACPI + * or by using fast mode if neither is set. + */ + if (acpi_speed && t->bus_freq_hz) + t->bus_freq_hz = min(t->bus_freq_hz, acpi_speed); + else if (acpi_speed || t->bus_freq_hz) + t->bus_freq_hz = max(t->bus_freq_hz, acpi_speed); + else + t->bus_freq_hz = I2C_MAX_FAST_MODE_FREQ; +} +EXPORT_SYMBOL_GPL(i2c_dw_acpi_adjust_bus_speed); + +#endif /* CONFIG_ACPI */ + u32 i2c_dw_scl_hcnt(u32 ic_clk, u32 tSYMBOL, u32 tf, int cond, int offset) { /* diff --git a/drivers/i2c/busses/i2c-designware-core.h b/drivers/i2c/busses/i2c-designware-core.h index 77c8aa422268c..150de5e5c31bd 100644 --- a/drivers/i2c/busses/i2c-designware-core.h +++ b/drivers/i2c/busses/i2c-designware-core.h @@ -360,11 +360,12 @@ extern int i2c_dw_probe_lock_support(struct dw_i2c_dev *dev); static inline int i2c_dw_probe_lock_support(struct dw_i2c_dev *dev) { return 0; } #endif -static __maybe_unused const u32 i2c_dw_supported_speeds[] = { - I2C_MAX_HIGH_SPEED_MODE_FREQ, - I2C_MAX_FAST_MODE_PLUS_FREQ, - I2C_MAX_FAST_MODE_FREQ, - I2C_MAX_STANDARD_MODE_FREQ, -}; - int i2c_dw_validate_speed(struct dw_i2c_dev *dev); + +#if IS_ENABLED(CONFIG_ACPI) +int i2c_dw_acpi_configure(struct device *device); +void i2c_dw_acpi_adjust_bus_speed(struct device *device); +#else +static inline int i2c_dw_acpi_configure(struct device *device) { return -ENODEV; } +static inline void i2c_dw_acpi_adjust_bus_speed(struct device *device) {} +#endif diff --git a/drivers/i2c/busses/i2c-designware-platdrv.c b/drivers/i2c/busses/i2c-designware-platdrv.c index d6c03d7179c7a..f6d2c96e35cef 100644 --- a/drivers/i2c/busses/i2c-designware-platdrv.c +++ b/drivers/i2c/busses/i2c-designware-platdrv.c @@ -12,7 +12,6 @@ #include #include #include -#include #include #include #include @@ -39,84 +38,6 @@ static u32 i2c_dw_get_clk_rate_khz(struct dw_i2c_dev *dev) } #ifdef CONFIG_ACPI -/* - * The HCNT/LCNT information coming from ACPI should be the most accurate - * for given platform. However, some systems get it wrong. On such systems - * we get better results by calculating those based on the input clock. - */ -static const struct dmi_system_id dw_i2c_no_acpi_params[] = { - { - .ident = "Dell Inspiron 7348", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), - DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 7348"), - }, - }, - { } -}; - -static void dw_i2c_acpi_params(struct platform_device *pdev, char method[], - u16 *hcnt, u16 *lcnt, u32 *sda_hold) -{ - struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER }; - acpi_handle handle = ACPI_HANDLE(&pdev->dev); - union acpi_object *obj; - - if (dmi_check_system(dw_i2c_no_acpi_params)) - return; - - if (ACPI_FAILURE(acpi_evaluate_object(handle, method, NULL, &buf))) - return; - - obj = (union acpi_object *)buf.pointer; - if (obj->type == ACPI_TYPE_PACKAGE && obj->package.count == 3) { - const union acpi_object *objs = obj->package.elements; - - *hcnt = (u16)objs[0].integer.value; - *lcnt = (u16)objs[1].integer.value; - *sda_hold = (u32)objs[2].integer.value; - } - - kfree(buf.pointer); -} - -static int dw_i2c_acpi_configure(struct platform_device *pdev) -{ - struct dw_i2c_dev *dev = platform_get_drvdata(pdev); - struct i2c_timings *t = &dev->timings; - u32 ss_ht = 0, fp_ht = 0, hs_ht = 0, fs_ht = 0; - - dev->tx_fifo_depth = 32; - dev->rx_fifo_depth = 32; - - /* - * Try to get SDA hold time and *CNT values from an ACPI method for - * selected speed modes. - */ - dw_i2c_acpi_params(pdev, "SSCN", &dev->ss_hcnt, &dev->ss_lcnt, &ss_ht); - dw_i2c_acpi_params(pdev, "FPCN", &dev->fp_hcnt, &dev->fp_lcnt, &fp_ht); - dw_i2c_acpi_params(pdev, "HSCN", &dev->hs_hcnt, &dev->hs_lcnt, &hs_ht); - dw_i2c_acpi_params(pdev, "FMCN", &dev->fs_hcnt, &dev->fs_lcnt, &fs_ht); - - switch (t->bus_freq_hz) { - case I2C_MAX_STANDARD_MODE_FREQ: - dev->sda_hold_time = ss_ht; - break; - case I2C_MAX_FAST_MODE_PLUS_FREQ: - dev->sda_hold_time = fp_ht; - break; - case I2C_MAX_HIGH_SPEED_MODE_FREQ: - dev->sda_hold_time = hs_ht; - break; - case I2C_MAX_FAST_MODE_FREQ: - default: - dev->sda_hold_time = fs_ht; - break; - } - - return 0; -} - static const struct acpi_device_id dw_i2c_acpi_match[] = { { "INT33C2", 0 }, { "INT33C3", 0 }, @@ -134,11 +55,6 @@ static const struct acpi_device_id dw_i2c_acpi_match[] = { { } }; MODULE_DEVICE_TABLE(acpi, dw_i2c_acpi_match); -#else -static inline int dw_i2c_acpi_configure(struct platform_device *pdev) -{ - return -ENODEV; -} #endif #ifdef CONFIG_OF @@ -198,8 +114,7 @@ static int dw_i2c_plat_probe(struct platform_device *pdev) struct i2c_adapter *adap; struct dw_i2c_dev *dev; struct i2c_timings *t; - u32 acpi_speed; - int i, irq, ret; + int irq, ret; irq = platform_get_irq(pdev, 0); if (irq < 0) @@ -229,27 +144,7 @@ static int dw_i2c_plat_probe(struct platform_device *pdev) else i2c_parse_fw_timings(&pdev->dev, t, false); - acpi_speed = i2c_acpi_find_bus_speed(&pdev->dev); - /* - * Some DSTDs use a non standard speed, round down to the lowest - * standard speed. - */ - for (i = 0; i < ARRAY_SIZE(i2c_dw_supported_speeds); i++) { - if (acpi_speed >= i2c_dw_supported_speeds[i]) - break; - } - acpi_speed = i < ARRAY_SIZE(i2c_dw_supported_speeds) ? i2c_dw_supported_speeds[i] : 0; - - /* - * Find bus speed from the "clock-frequency" device property, ACPI - * or by using fast mode if neither is set. - */ - if (acpi_speed && t->bus_freq_hz) - t->bus_freq_hz = min(t->bus_freq_hz, acpi_speed); - else if (acpi_speed || t->bus_freq_hz) - t->bus_freq_hz = max(t->bus_freq_hz, acpi_speed); - else - t->bus_freq_hz = I2C_MAX_FAST_MODE_FREQ; + i2c_dw_acpi_adjust_bus_speed(&pdev->dev); dev->flags |= (uintptr_t)device_get_match_data(&pdev->dev); @@ -257,7 +152,7 @@ static int dw_i2c_plat_probe(struct platform_device *pdev) dw_i2c_of_configure(pdev); if (has_acpi_companion(&pdev->dev)) - dw_i2c_acpi_configure(pdev); + i2c_dw_acpi_configure(&pdev->dev); ret = i2c_dw_validate_speed(dev); if (ret) -- GitLab From 64d0a0755c7deeb600d8ee287cfb84469aa37ac8 Mon Sep 17 00:00:00 2001 From: Andy Shevchenko Date: Tue, 19 May 2020 15:50:42 +0300 Subject: [PATCH 0612/1971] i2c: designware: Read counters from ACPI for PCI driver PCI devices may have been backed with ACPI handle which supplies an additional information to the drivers, such as counters. Call for ACPI configuration from PCI driver in order to utilize counters provided by ACPI. Signed-off-by: Andy Shevchenko Acked-by: Jarkko Nikula Signed-off-by: Wolfram Sang --- drivers/i2c/busses/i2c-designware-pcidrv.c | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/drivers/i2c/busses/i2c-designware-pcidrv.c b/drivers/i2c/busses/i2c-designware-pcidrv.c index c8808e5855b48..3664d76bb9766 100644 --- a/drivers/i2c/busses/i2c-designware-pcidrv.c +++ b/drivers/i2c/busses/i2c-designware-pcidrv.c @@ -255,6 +255,17 @@ static int i2c_dw_pci_probe(struct pci_dev *pdev, } } + i2c_dw_acpi_adjust_bus_speed(&pdev->dev); + + if (has_acpi_companion(&pdev->dev)) + i2c_dw_acpi_configure(&pdev->dev); + + r = i2c_dw_validate_speed(dev); + if (r) { + pci_free_irq_vectors(pdev); + return r; + } + i2c_dw_configure(dev); if (controller->scl_sda_cfg) { -- GitLab From 3f35064a7cfef4ed8d25cdb16da0abfbbd525f63 Mon Sep 17 00:00:00 2001 From: Andy Shevchenko Date: Tue, 19 May 2020 15:50:43 +0300 Subject: [PATCH 0613/1971] i2c: designware: Drop hard coded FIFO depth assignment It's not clear why the commit fe20ff5c7e9c ("i2c-designware: Add support for Designware core behind PCI devices.") followed by commit b61b14154b19 ("i2c-designware: add support for Intel Lynxpoint") chose to hard code FIFO depth size. The FIFO depth on all hardware, I have tested on, can be nicely detected automatically. Thus, we may safely drop hard coded FIFO sizes from the driver. Signed-off-by: Andy Shevchenko Acked-by: Jarkko Nikula Signed-off-by: Wolfram Sang --- drivers/i2c/busses/i2c-designware-common.c | 3 --- drivers/i2c/busses/i2c-designware-pcidrv.c | 17 ----------------- 2 files changed, 20 deletions(-) diff --git a/drivers/i2c/busses/i2c-designware-common.c b/drivers/i2c/busses/i2c-designware-common.c index e1697ed8b54a5..ed302342f8db1 100644 --- a/drivers/i2c/busses/i2c-designware-common.c +++ b/drivers/i2c/busses/i2c-designware-common.c @@ -200,9 +200,6 @@ int i2c_dw_acpi_configure(struct device *device) struct i2c_timings *t = &dev->timings; u32 ss_ht = 0, fp_ht = 0, hs_ht = 0, fs_ht = 0; - dev->tx_fifo_depth = 32; - dev->rx_fifo_depth = 32; - /* * Try to get SDA hold time and *CNT values from an ACPI method for * selected speed modes. diff --git a/drivers/i2c/busses/i2c-designware-pcidrv.c b/drivers/i2c/busses/i2c-designware-pcidrv.c index 3664d76bb9766..11a5e4751eab7 100644 --- a/drivers/i2c/busses/i2c-designware-pcidrv.c +++ b/drivers/i2c/busses/i2c-designware-pcidrv.c @@ -46,8 +46,6 @@ struct dw_scl_sda_cfg { struct dw_pci_controller { u32 bus_num; - u32 tx_fifo_depth; - u32 rx_fifo_depth; u32 flags; struct dw_scl_sda_cfg *scl_sda_cfg; int (*setup)(struct pci_dev *pdev, struct dw_pci_controller *c); @@ -133,41 +131,29 @@ static u32 ehl_get_clk_rate_khz(struct dw_i2c_dev *dev) static struct dw_pci_controller dw_pci_controllers[] = { [medfield] = { .bus_num = -1, - .tx_fifo_depth = 32, - .rx_fifo_depth = 32, .setup = mfld_setup, .get_clk_rate_khz = mfld_get_clk_rate_khz, }, [merrifield] = { .bus_num = -1, - .tx_fifo_depth = 64, - .rx_fifo_depth = 64, .scl_sda_cfg = &mrfld_config, .setup = mrfld_setup, }, [baytrail] = { .bus_num = -1, - .tx_fifo_depth = 32, - .rx_fifo_depth = 32, .scl_sda_cfg = &byt_config, }, [haswell] = { .bus_num = -1, - .tx_fifo_depth = 32, - .rx_fifo_depth = 32, .scl_sda_cfg = &hsw_config, }, [cherrytrail] = { .bus_num = -1, - .tx_fifo_depth = 32, - .rx_fifo_depth = 32, .flags = MODEL_CHERRYTRAIL, .scl_sda_cfg = &byt_config, }, [elkhartlake] = { .bus_num = -1, - .tx_fifo_depth = 32, - .rx_fifo_depth = 32, .get_clk_rate_khz = ehl_get_clk_rate_khz, }, }; @@ -277,9 +263,6 @@ static int i2c_dw_pci_probe(struct pci_dev *pdev, dev->sda_hold_time = cfg->sda_hold; } - dev->tx_fifo_depth = controller->tx_fifo_depth; - dev->rx_fifo_depth = controller->rx_fifo_depth; - adap = &dev->adapter; adap->owner = THIS_MODULE; adap->class = 0; -- GitLab From 7a4e63cb0905672fd52e7316a885f19d4aeed976 Mon Sep 17 00:00:00 2001 From: Wolfram Sang Date: Fri, 22 May 2020 16:56:58 +0200 Subject: [PATCH 0614/1971] Revert "i2c: core: support bus regulator controlling in adapter" This reverts commit 6fe12cdbcfe35ad4726a619a9546822d34fc934c. Testing in linux-next showed it needs some more time. Signed-off-by: Wolfram Sang --- drivers/i2c/i2c-core-base.c | 84 ------------------------------------- include/linux/i2c.h | 2 - 2 files changed, 86 deletions(-) diff --git a/drivers/i2c/i2c-core-base.c b/drivers/i2c/i2c-core-base.c index ac7edcc1d5ee1..9987e72752c4c 100644 --- a/drivers/i2c/i2c-core-base.c +++ b/drivers/i2c/i2c-core-base.c @@ -313,14 +313,12 @@ static int i2c_smbus_host_notify_to_irq(const struct i2c_client *client) static int i2c_device_probe(struct device *dev) { struct i2c_client *client = i2c_verify_client(dev); - struct i2c_adapter *adap; struct i2c_driver *driver; int status; if (!client) return 0; - adap = client->adapter; driver = to_i2c_driver(dev->driver); client->irq = client->init_irq; @@ -386,12 +384,6 @@ static int i2c_device_probe(struct device *dev) dev_dbg(dev, "probe\n"); - status = regulator_enable(adap->bus_regulator); - if (status < 0) { - dev_err(&adap->dev, "Failed to enable power regulator\n"); - goto err_clear_wakeup_irq; - } - status = of_clk_set_defaults(dev->of_node, false); if (status < 0) goto err_clear_wakeup_irq; @@ -432,14 +424,12 @@ put_sync_adapter: static int i2c_device_remove(struct device *dev) { struct i2c_client *client = i2c_verify_client(dev); - struct i2c_adapter *adap; struct i2c_driver *driver; int status = 0; if (!client || !dev->driver) return 0; - adap = client->adapter; driver = to_i2c_driver(dev->driver); if (driver->remove) { dev_dbg(dev, "remove\n"); @@ -447,8 +437,6 @@ static int i2c_device_remove(struct device *dev) } dev_pm_domain_detach(&client->dev, true); - if (!pm_runtime_status_suspended(&client->dev)) - regulator_disable(adap->bus_regulator); dev_pm_clear_wake_irq(&client->dev); device_init_wakeup(&client->dev, false); @@ -460,72 +448,6 @@ static int i2c_device_remove(struct device *dev) return status; } -#ifdef CONFIG_PM_SLEEP -static int i2c_resume_early(struct device *dev) -{ - struct i2c_client *client = i2c_verify_client(dev); - struct i2c_adapter *adap = client->adapter; - int err; - - if (!pm_runtime_status_suspended(&client->dev)) { - err = regulator_enable(adap->bus_regulator); - if (err) - return err; - } - - return pm_generic_resume_early(&client->dev); -} - -static int i2c_suspend_late(struct device *dev) -{ - struct i2c_client *client = i2c_verify_client(dev); - struct i2c_adapter *adap = client->adapter; - int err; - - err = pm_generic_suspend_late(&client->dev); - if (err) - return err; - - if (!pm_runtime_status_suspended(&client->dev)) - return regulator_disable(adap->bus_regulator); - - return 0; -} -#endif - -#ifdef CONFIG_PM -static int i2c_runtime_resume(struct device *dev) -{ - struct i2c_client *client = i2c_verify_client(dev); - struct i2c_adapter *adap = client->adapter; - int err; - - err = regulator_enable(adap->bus_regulator); - if (err) - return err; - - return pm_generic_runtime_resume(&client->dev); -} - -static int i2c_runtime_suspend(struct device *dev) -{ - struct i2c_client *client = i2c_verify_client(dev); - struct i2c_adapter *adap = client->adapter; - int err; - - err = pm_generic_runtime_suspend(&client->dev); - if (err) - return err; - - return regulator_disable(adap->bus_regulator); -} -#endif - -static const struct dev_pm_ops i2c_device_pm = { - SET_LATE_SYSTEM_SLEEP_PM_OPS(i2c_suspend_late, i2c_resume_early) - SET_RUNTIME_PM_OPS(i2c_runtime_suspend, i2c_runtime_resume, NULL) -}; - static void i2c_device_shutdown(struct device *dev) { struct i2c_client *client = i2c_verify_client(dev); @@ -583,7 +505,6 @@ struct bus_type i2c_bus_type = { .probe = i2c_device_probe, .remove = i2c_device_remove, .shutdown = i2c_device_shutdown, - .pm = &i2c_device_pm, }; EXPORT_SYMBOL_GPL(i2c_bus_type); @@ -1422,11 +1343,6 @@ static int i2c_register_adapter(struct i2c_adapter *adap) if (res) goto out_reg; - adap->bus_regulator = devm_regulator_get(&adap->dev, "bus"); - if (IS_ERR(adap->bus_regulator)) { - res = PTR_ERR(adap->bus_regulator); - goto out_reg; - } dev_dbg(&adap->dev, "adapter [%s] registered\n", adap->name); pm_runtime_no_callbacks(&adap->dev); diff --git a/include/linux/i2c.h b/include/linux/i2c.h index 7433aba207bd8..49d29054e6571 100644 --- a/include/linux/i2c.h +++ b/include/linux/i2c.h @@ -15,7 +15,6 @@ #include /* for struct device */ #include /* for completion */ #include -#include #include #include /* for Host Notify IRQ */ #include /* for struct device_node */ @@ -716,7 +715,6 @@ struct i2c_adapter { const struct i2c_adapter_quirks *quirks; struct irq_domain *host_notify_domain; - struct regulator *bus_regulator; }; #define to_i2c_adapter(d) container_of(d, struct i2c_adapter, dev) -- GitLab From 2ea81c0064fe2bbf7c66d3ddeb9f5c5ba0befdc4 Mon Sep 17 00:00:00 2001 From: Wolfram Sang Date: Fri, 22 May 2020 16:57:59 +0200 Subject: [PATCH 0615/1971] Revert "dt-binding: i2c: add bus-supply property" This reverts commit 6aab46bc52a8f579879d491c9d8062e03caa5c61. Testing in linux-next showed it needs some more time. Signed-off-by: Wolfram Sang --- Documentation/devicetree/bindings/i2c/i2c.txt | 3 --- 1 file changed, 3 deletions(-) diff --git a/Documentation/devicetree/bindings/i2c/i2c.txt b/Documentation/devicetree/bindings/i2c/i2c.txt index 7aff425c099bd..819436b48faef 100644 --- a/Documentation/devicetree/bindings/i2c/i2c.txt +++ b/Documentation/devicetree/bindings/i2c/i2c.txt @@ -23,9 +23,6 @@ Optional properties (per bus) These properties may not be supported by all drivers. However, if a driver wants to support one of the below features, it should adapt these bindings. -- bus-supply - phandle to the regulator that provides power to SCL/SDA. - - clock-frequency frequency of bus clock in Hz. -- GitLab From 79f7ab3a64fd92b86ebc992b333815400942a3b7 Mon Sep 17 00:00:00 2001 From: Tang Bin Date: Wed, 15 Apr 2020 21:57:34 +0800 Subject: [PATCH 0616/1971] i2c: efm32: Omit superfluous error message in efm32_i2c_probe() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit In the function efm32_i2c_probe(),when get irq failed,the function platform_get_irq() logs an error message,so remove redundant message here. Signed-off-by: Shengju Zhang Signed-off-by: Tang Bin Acked-by: Uwe Kleine-König Signed-off-by: Wolfram Sang --- drivers/i2c/busses/i2c-efm32.c | 1 - 1 file changed, 1 deletion(-) diff --git a/drivers/i2c/busses/i2c-efm32.c b/drivers/i2c/busses/i2c-efm32.c index 18cca8f56da87..9d28de0e87232 100644 --- a/drivers/i2c/busses/i2c-efm32.c +++ b/drivers/i2c/busses/i2c-efm32.c @@ -352,7 +352,6 @@ static int efm32_i2c_probe(struct platform_device *pdev) ret = platform_get_irq(pdev, 0); if (ret <= 0) { - dev_err(&pdev->dev, "failed to get irq (%d)\n", ret); if (!ret) ret = -EINVAL; return ret; -- GitLab From d1fdeb314e82fb782ab918ea6bfe41b429343725 Mon Sep 17 00:00:00 2001 From: Tang Bin Date: Wed, 15 Apr 2020 22:06:40 +0800 Subject: [PATCH 0617/1971] i2c: efm32: Avoid unnecessary check in efm32_i2c_probe() The function efm32_i2c_probe() is only called with an openfirmware platform device.Therefore there is no need to check that it has an openfirmware node. Signed-off-by: Shengju Zhang Signed-off-by: Tang Bin Signed-off-by: Wolfram Sang --- drivers/i2c/busses/i2c-efm32.c | 3 --- 1 file changed, 3 deletions(-) diff --git a/drivers/i2c/busses/i2c-efm32.c b/drivers/i2c/busses/i2c-efm32.c index 9d28de0e87232..838ce0947191f 100644 --- a/drivers/i2c/busses/i2c-efm32.c +++ b/drivers/i2c/busses/i2c-efm32.c @@ -312,9 +312,6 @@ static int efm32_i2c_probe(struct platform_device *pdev) int ret; u32 clkdiv; - if (!np) - return -EINVAL; - ddata = devm_kzalloc(&pdev->dev, sizeof(*ddata), GFP_KERNEL); if (!ddata) return -ENOMEM; -- GitLab From 61016db15b8e20f371352db6a75b044ec3183fe7 Mon Sep 17 00:00:00 2001 From: Kees Cook Date: Sun, 17 May 2020 21:47:43 -0700 Subject: [PATCH 0618/1971] selftests/exec: Verify execve of non-regular files fail Add a named pipe as an exec target to make sure that non-regular files are rejected by execve() with EACCES. This can help verify commit 73601ea5b7b1 ("fs/open.c: allow opening only regular files during execve()"). Signed-off-by: Kees Cook Signed-off-by: Shuah Khan --- tools/testing/selftests/exec/.gitignore | 1 + tools/testing/selftests/exec/Makefile | 2 +- tools/testing/selftests/exec/execveat.c | 8 ++++++++ 3 files changed, 10 insertions(+), 1 deletion(-) diff --git a/tools/testing/selftests/exec/.gitignore b/tools/testing/selftests/exec/.gitignore index c078ece12ff07..94b02a18f230b 100644 --- a/tools/testing/selftests/exec/.gitignore +++ b/tools/testing/selftests/exec/.gitignore @@ -9,3 +9,4 @@ execveat.ephemeral execveat.denatured /recursion-depth xxxxxxxx* +pipe diff --git a/tools/testing/selftests/exec/Makefile b/tools/testing/selftests/exec/Makefile index 33339e31e3652..cfafa1f8a2fac 100644 --- a/tools/testing/selftests/exec/Makefile +++ b/tools/testing/selftests/exec/Makefile @@ -4,7 +4,7 @@ CFLAGS += -Wno-nonnull CFLAGS += -D_GNU_SOURCE TEST_GEN_PROGS := execveat -TEST_GEN_FILES := execveat.symlink execveat.denatured script subdir +TEST_GEN_FILES := execveat.symlink execveat.denatured script subdir pipe # Makefile is a run-time dependency, since it's accessed by the execveat test TEST_FILES := Makefile diff --git a/tools/testing/selftests/exec/execveat.c b/tools/testing/selftests/exec/execveat.c index cbb6efbdb786f..67bf7254a48f0 100644 --- a/tools/testing/selftests/exec/execveat.c +++ b/tools/testing/selftests/exec/execveat.c @@ -5,7 +5,9 @@ * Selftests for execveat(2). */ +#ifndef _GNU_SOURCE #define _GNU_SOURCE /* to get O_PATH, AT_EMPTY_PATH */ +#endif #include #include #include @@ -311,6 +313,10 @@ static int run_tests(void) fail += check_execveat_fail(AT_FDCWD, fullname_symlink, AT_SYMLINK_NOFOLLOW, ELOOP); + /* Non-regular file failure */ + fail += check_execveat_fail(dot_dfd, "pipe", 0, EACCES); + unlink("pipe"); + /* Shell script wrapping executable file: */ /* dfd + path */ fail += check_execveat(subdir_dfd, "../script", 0); @@ -384,6 +390,8 @@ static void prerequisites(void) fd = open("subdir.ephemeral/script", O_RDWR|O_CREAT|O_TRUNC, 0755); write(fd, script, strlen(script)); close(fd); + + mkfifo("pipe", 0755); } int main(int argc, char **argv) -- GitLab From babf8a978d497af2fe4111cc80866b9e436bf785 Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Thu, 21 May 2020 21:37:05 +0100 Subject: [PATCH 0619/1971] selftests: vdso: Rename vdso_test to vdso_test_gettimeofday Currently the vDSO kselftests have a test called vdso_test which tests the vDSO implementation of gettimeofday(). In preparation for adding tests for other vDSO functionality rename this test to reflect what's going on. Signed-off-by: Mark Brown Signed-off-by: Shuah Khan --- tools/testing/selftests/vDSO/.gitignore | 1 + tools/testing/selftests/vDSO/Makefile | 4 ++-- .../selftests/vDSO/{vdso_test.c => vdso_test_gettimeofday.c} | 5 +++-- 3 files changed, 6 insertions(+), 4 deletions(-) rename tools/testing/selftests/vDSO/{vdso_test.c => vdso_test_gettimeofday.c} (89%) diff --git a/tools/testing/selftests/vDSO/.gitignore b/tools/testing/selftests/vDSO/.gitignore index 382cfb39a1a34..74f49bd5f6dd8 100644 --- a/tools/testing/selftests/vDSO/.gitignore +++ b/tools/testing/selftests/vDSO/.gitignore @@ -1,3 +1,4 @@ # SPDX-License-Identifier: GPL-2.0-only vdso_test +vdso_test_gettimeofday vdso_standalone_test_x86 diff --git a/tools/testing/selftests/vDSO/Makefile b/tools/testing/selftests/vDSO/Makefile index 9e03d61f52fdf..ae15d700b62ee 100644 --- a/tools/testing/selftests/vDSO/Makefile +++ b/tools/testing/selftests/vDSO/Makefile @@ -4,7 +4,7 @@ include ../lib.mk uname_M := $(shell uname -m 2>/dev/null || echo not) ARCH ?= $(shell echo $(uname_M) | sed -e s/i.86/x86/ -e s/x86_64/x86/) -TEST_GEN_PROGS := $(OUTPUT)/vdso_test +TEST_GEN_PROGS := $(OUTPUT)/vdso_test_gettimeofday ifeq ($(ARCH),x86) TEST_GEN_PROGS += $(OUTPUT)/vdso_standalone_test_x86 endif @@ -17,7 +17,7 @@ LDLIBS += -lgcc_s endif all: $(TEST_GEN_PROGS) -$(OUTPUT)/vdso_test: parse_vdso.c vdso_test.c +$(OUTPUT)/vdso_test_gettimeofday: parse_vdso.c vdso_test_gettimeofday.c $(OUTPUT)/vdso_standalone_test_x86: vdso_standalone_test_x86.c parse_vdso.c $(CC) $(CFLAGS) $(CFLAGS_vdso_standalone_test_x86) \ vdso_standalone_test_x86.c parse_vdso.c \ diff --git a/tools/testing/selftests/vDSO/vdso_test.c b/tools/testing/selftests/vDSO/vdso_test_gettimeofday.c similarity index 89% rename from tools/testing/selftests/vDSO/vdso_test.c rename to tools/testing/selftests/vDSO/vdso_test_gettimeofday.c index 719d5a6bd664c..511c0dc5e47ef 100644 --- a/tools/testing/selftests/vDSO/vdso_test.c +++ b/tools/testing/selftests/vDSO/vdso_test_gettimeofday.c @@ -1,10 +1,11 @@ // SPDX-License-Identifier: GPL-2.0-only /* - * vdso_test.c: Sample code to test parse_vdso.c + * vdso_test_gettimeofday.c: Sample code to test parse_vdso.c and + * vDSO gettimeofday() * Copyright (c) 2014 Andy Lutomirski * * Compile with: - * gcc -std=gnu99 vdso_test.c parse_vdso.c + * gcc -std=gnu99 vdso_test_gettimeofday.c parse_vdso_gettimeofday.c * * Tested on x86, 32-bit and 64-bit. It may work on other architectures, too. */ -- GitLab From c1bba2c94decdec382c6a07d9ba722cefd617575 Mon Sep 17 00:00:00 2001 From: Amit Kucheria Date: Fri, 3 Apr 2020 12:31:46 +0530 Subject: [PATCH 0620/1971] dt-bindings: thermal: Add yaml bindings for thermal sensors As part of moving the thermal bindings to YAML, split it up into 3 bindings: thermal sensors, cooling devices and thermal zones. The property #thermal-sensor-cells is required in each device that acts as a thermal sensor. It is used to uniquely identify the instance of the thermal sensor inside the system. Signed-off-by: Amit Kucheria Reviewed-by: Rob Herring Reviewed-by: Lukasz Luba Signed-off-by: Daniel Lezcano Link: https://lore.kernel.org/r/a91b5603caea5b8854cc9f5325448e4c7228c328.1585748882.git.amit.kucheria@linaro.org --- .../bindings/thermal/thermal-sensor.yaml | 72 +++++++++++++++++++ 1 file changed, 72 insertions(+) create mode 100644 Documentation/devicetree/bindings/thermal/thermal-sensor.yaml diff --git a/Documentation/devicetree/bindings/thermal/thermal-sensor.yaml b/Documentation/devicetree/bindings/thermal/thermal-sensor.yaml new file mode 100644 index 0000000000000..fcd25a0af38c9 --- /dev/null +++ b/Documentation/devicetree/bindings/thermal/thermal-sensor.yaml @@ -0,0 +1,72 @@ +# SPDX-License-Identifier: (GPL-2.0) +# Copyright 2020 Linaro Ltd. +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/thermal/thermal-sensor.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Thermal sensor binding + +maintainers: + - Amit Kucheria + +description: | + Thermal management is achieved in devicetree by describing the sensor hardware + and the software abstraction of thermal zones required to take appropriate + action to mitigate thermal overloads. + + The following node types are used to completely describe a thermal management + system in devicetree: + - thermal-sensor: device that measures temperature, has SoC-specific bindings + - cooling-device: device used to dissipate heat either passively or actively + - thermal-zones: a container of the following node types used to describe all + thermal data for the platform + + This binding describes the thermal-sensor. + + Thermal sensor devices provide temperature sensing capabilities on thermal + zones. Typical devices are I2C ADC converters and bandgaps. Thermal sensor + devices may control one or more internal sensors. + +properties: + "#thermal-sensor-cells": + description: + Used to uniquely identify a thermal sensor instance within an IC. Will be + 0 on sensor nodes with only a single sensor and at least 1 on nodes + containing several internal sensors. + enum: [0, 1] + +examples: + - | + #include + + // Example 1: SDM845 TSENS + soc: soc@0 { + #address-cells = <2>; + #size-cells = <2>; + + /* ... */ + + tsens0: thermal-sensor@c263000 { + compatible = "qcom,sdm845-tsens", "qcom,tsens-v2"; + reg = <0 0x0c263000 0 0x1ff>, /* TM */ + <0 0x0c222000 0 0x1ff>; /* SROT */ + #qcom,sensors = <13>; + interrupts = , + ; + interrupt-names = "uplow", "critical"; + #thermal-sensor-cells = <1>; + }; + + tsens1: thermal-sensor@c265000 { + compatible = "qcom,sdm845-tsens", "qcom,tsens-v2"; + reg = <0 0x0c265000 0 0x1ff>, /* TM */ + <0 0x0c223000 0 0x1ff>; /* SROT */ + #qcom,sensors = <8>; + interrupts = , + ; + interrupt-names = "uplow", "critical"; + #thermal-sensor-cells = <1>; + }; + }; +... -- GitLab From 73c46acf915385e27f3312e77717d27eff74c08a Mon Sep 17 00:00:00 2001 From: Amit Kucheria Date: Fri, 3 Apr 2020 12:31:47 +0530 Subject: [PATCH 0621/1971] dt-bindings: thermal: Add yaml bindings for thermal cooling-devices As part of moving the thermal bindings to YAML, split it up into 3 bindings: thermal sensors, cooling devices and thermal zones. The property #cooling-cells is required in each device that acts as a cooling device - whether active or passive. So any device that can throttle its performance to passively reduce heat dissipation (e.g. CPUs, GPUs) and any device that can actively dissipate heat at different levels (e.g. fans) will contain this property. Signed-off-by: Amit Kucheria Reviewed-by: Rob Herring Reviewed-by: Lukasz Luba Signed-off-by: Daniel Lezcano Link: https://lore.kernel.org/r/7a9ead7fb67585fb70ab3ffd481e7d567e96970e.1585748882.git.amit.kucheria@linaro.org --- .../thermal/thermal-cooling-devices.yaml | 116 ++++++++++++++++++ 1 file changed, 116 insertions(+) create mode 100644 Documentation/devicetree/bindings/thermal/thermal-cooling-devices.yaml diff --git a/Documentation/devicetree/bindings/thermal/thermal-cooling-devices.yaml b/Documentation/devicetree/bindings/thermal/thermal-cooling-devices.yaml new file mode 100644 index 0000000000000..5145883d932e6 --- /dev/null +++ b/Documentation/devicetree/bindings/thermal/thermal-cooling-devices.yaml @@ -0,0 +1,116 @@ +# SPDX-License-Identifier: (GPL-2.0) +# Copyright 2020 Linaro Ltd. +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/thermal/thermal-cooling-devices.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Thermal cooling device binding + +maintainers: + - Amit Kucheria + +description: | + Thermal management is achieved in devicetree by describing the sensor hardware + and the software abstraction of cooling devices and thermal zones required to + take appropriate action to mitigate thermal overload. + + The following node types are used to completely describe a thermal management + system in devicetree: + - thermal-sensor: device that measures temperature, has SoC-specific bindings + - cooling-device: device used to dissipate heat either passively or actively + - thermal-zones: a container of the following node types used to describe all + thermal data for the platform + + This binding describes the cooling devices. + + There are essentially two ways to provide control on power dissipation: + - Passive cooling: by means of regulating device performance. A typical + passive cooling mechanism is a CPU that has dynamic voltage and frequency + scaling (DVFS), and uses lower frequencies as cooling states. + - Active cooling: by means of activating devices in order to remove the + dissipated heat, e.g. regulating fan speeds. + + Any cooling device has a range of cooling states (i.e. different levels of + heat dissipation). They also have a way to determine the state of cooling in + which the device is. For example, a fan's cooling states correspond to the + different fan speeds possible. Cooling states are referred to by single + unsigned integers, where larger numbers mean greater heat dissipation. The + precise set of cooling states associated with a device should be defined in + a particular device's binding. + +select: true + +properties: + "#cooling-cells": + description: + Must be 2, in order to specify minimum and maximum cooling state used in + the cooling-maps reference. The first cell is the minimum cooling state + and the second cell is the maximum cooling state requested. + const: 2 + +examples: + - | + #include + #include + + // Example 1: Cpufreq cooling device on CPU0 + cpus { + #address-cells = <2>; + #size-cells = <0>; + + CPU0: cpu@0 { + device_type = "cpu"; + compatible = "qcom,kryo385"; + reg = <0x0 0x0>; + enable-method = "psci"; + cpu-idle-states = <&LITTLE_CPU_SLEEP_0 + &LITTLE_CPU_SLEEP_1 + &CLUSTER_SLEEP_0>; + capacity-dmips-mhz = <607>; + dynamic-power-coefficient = <100>; + qcom,freq-domain = <&cpufreq_hw 0>; + #cooling-cells = <2>; + next-level-cache = <&L2_0>; + L2_0: l2-cache { + compatible = "cache"; + next-level-cache = <&L3_0>; + L3_0: l3-cache { + compatible = "cache"; + }; + }; + }; + + /* ... */ + + }; + + /* ... */ + + thermal-zones { + cpu0-thermal { + polling-delay-passive = <250>; + polling-delay = <1000>; + + thermal-sensors = <&tsens0 1>; + + trips { + cpu0_alert0: trip-point0 { + temperature = <90000>; + hysteresis = <2000>; + type = "passive"; + }; + }; + + cooling-maps { + map0 { + trip = <&cpu0_alert0>; + /* Corresponds to 1000MHz in OPP table */ + cooling-device = <&CPU0 5 5>; + }; + }; + }; + + /* ... */ + }; +... -- GitLab From 1202a442a31fd2e53cde1a9677d9f7005e48fd6e Mon Sep 17 00:00:00 2001 From: Amit Kucheria Date: Fri, 3 Apr 2020 12:31:48 +0530 Subject: [PATCH 0622/1971] dt-bindings: thermal: Add yaml bindings for thermal zones As part of moving the thermal bindings to YAML, split it up into 3 bindings: thermal sensors, cooling devices and thermal zones. The thermal-zone binding is a software abstraction to capture the properties of each zone - how often they should be checked, the temperature thresholds (trips) at which mitigation actions need to be taken and the level of mitigation needed at those thresholds. Signed-off-by: Amit Kucheria Reviewed-by: Rob Herring Reviewed-by: Lukasz Luba Signed-off-by: Daniel Lezcano Link: https://lore.kernel.org/r/44e5c68bc654ccaf88945f70dc875fa186dd1480.1585748882.git.amit.kucheria@linaro.org --- .../bindings/thermal/thermal-zones.yaml | 341 ++++++++++++++++++ 1 file changed, 341 insertions(+) create mode 100644 Documentation/devicetree/bindings/thermal/thermal-zones.yaml diff --git a/Documentation/devicetree/bindings/thermal/thermal-zones.yaml b/Documentation/devicetree/bindings/thermal/thermal-zones.yaml new file mode 100644 index 0000000000000..b8515d3eeaa2b --- /dev/null +++ b/Documentation/devicetree/bindings/thermal/thermal-zones.yaml @@ -0,0 +1,341 @@ +# SPDX-License-Identifier: (GPL-2.0) +# Copyright 2020 Linaro Ltd. +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/thermal/thermal-zones.yaml# +$schema: http://devicetree.org/meta-schemas/base.yaml# + +title: Thermal zone binding + +maintainers: + - Amit Kucheria + +description: | + Thermal management is achieved in devicetree by describing the sensor hardware + and the software abstraction of cooling devices and thermal zones required to + take appropriate action to mitigate thermal overloads. + + The following node types are used to completely describe a thermal management + system in devicetree: + - thermal-sensor: device that measures temperature, has SoC-specific bindings + - cooling-device: device used to dissipate heat either passively or actively + - thermal-zones: a container of the following node types used to describe all + thermal data for the platform + + This binding describes the thermal-zones. + + The polling-delay properties of a thermal-zone are bound to the maximum dT/dt + (temperature derivative over time) in two situations for a thermal zone: + 1. when passive cooling is activated (polling-delay-passive) + 2. when the zone just needs to be monitored (polling-delay) or when + active cooling is activated. + + The maximum dT/dt is highly bound to hardware power consumption and + dissipation capability. The delays should be chosen to account for said + max dT/dt, such that a device does not cross several trip boundaries + unexpectedly between polls. Choosing the right polling delays shall avoid + having the device in temperature ranges that may damage the silicon structures + and reduce silicon lifetime. + +properties: + $nodename: + const: thermal-zones + description: + A /thermal-zones node is required in order to use the thermal framework to + manage input from the various thermal zones in the system in order to + mitigate thermal overload conditions. It does not represent a real device + in the system, but acts as a container to link a thermal sensor device, + platform-data regarding temperature thresholds and the mitigation actions + to take when the temperature crosses those thresholds. + +patternProperties: + "^[a-zA-Z][a-zA-Z0-9\\-]{1,12}-thermal$": + type: object + description: + Each thermal zone node contains information about how frequently it + must be checked, the sensor responsible for reporting temperature for + this zone, one sub-node containing the various trip points for this + zone and one sub-node containing all the zone cooling-maps. + + properties: + polling-delay: + $ref: /schemas/types.yaml#/definitions/uint32 + description: + The maximum number of milliseconds to wait between polls when + checking this thermal zone. Setting this to 0 disables the polling + timers setup by the thermal framework and assumes that the thermal + sensors in this zone support interrupts. + + polling-delay-passive: + $ref: /schemas/types.yaml#/definitions/uint32 + description: + The maximum number of milliseconds to wait between polls when + checking this thermal zone while doing passive cooling. Setting + this to 0 disables the polling timers setup by the thermal + framework and assumes that the thermal sensors in this zone + support interrupts. + + thermal-sensors: + $ref: /schemas/types.yaml#/definitions/phandle-array + maxItems: 1 + description: + The thermal sensor phandle and sensor specifier used to monitor this + thermal zone. + + coefficients: + $ref: /schemas/types.yaml#/definitions/uint32-array + description: + An array of integers containing the coefficients of a linear equation + that binds all the sensors listed in this thermal zone. + + The linear equation used is as follows, + z = c0 * x0 + c1 * x1 + ... + c(n-1) * x(n-1) + cn + where c0, c1, .., cn are the coefficients. + + Coefficients default to 1 in case this property is not specified. The + coefficients are ordered and are matched with sensors by means of the + sensor ID. Additional coefficients are interpreted as constant offset. + + sustainable-power: + $ref: /schemas/types.yaml#/definitions/uint32 + description: + An estimate of the sustainable power (in mW) that this thermal zone + can dissipate at the desired control temperature. For reference, the + sustainable power of a 4-inch phone is typically 2000mW, while on a + 10-inch tablet is around 4500mW. + + trips: + type: object + description: + This node describes a set of points in the temperature domain at + which the thermal framework needs to take action. The actions to + be taken are defined in another node called cooling-maps. + + patternProperties: + "^[a-zA-Z][a-zA-Z0-9\\-_]{0,63}$": + type: object + + properties: + temperature: + $ref: /schemas/types.yaml#/definitions/int32 + minimum: -273000 + maximum: 200000 + description: + An integer expressing the trip temperature in millicelsius. + + hysteresis: + $ref: /schemas/types.yaml#/definitions/uint32 + description: + An unsigned integer expressing the hysteresis delta with + respect to the trip temperature property above, also in + millicelsius. Any cooling action initiated by the framework is + maintained until the temperature falls below + (trip temperature - hysteresis). This potentially prevents a + situation where the trip gets constantly triggered soon after + cooling action is removed. + + type: + $ref: /schemas/types.yaml#/definitions/string + enum: + - active # enable active cooling e.g. fans + - passive # enable passive cooling e.g. throttling cpu + - hot # send notification to driver + - critical # send notification to driver, trigger shutdown + description: | + There are four valid trip types: active, passive, hot, + critical. + + The critical trip type is used to set the maximum + temperature threshold above which the HW becomes + unstable and underlying firmware might even trigger a + reboot. Hitting the critical threshold triggers a system + shutdown. + + The hot trip type can be used to send a notification to + the thermal driver (if a .notify callback is registered). + The action to be taken is left to the driver. + + The passive trip type can be used to slow down HW e.g. run + the CPU, GPU, bus at a lower frequency. + + The active trip type can be used to control other HW to + help in cooling e.g. fans can be sped up or slowed down + + required: + - temperature + - hysteresis + - type + additionalProperties: false + + additionalProperties: false + + cooling-maps: + type: object + description: + This node describes the action to be taken when a thermal zone + crosses one of the temperature thresholds described in the trips + node. The action takes the form of a mapping relation between a + trip and the target cooling device state. + + patternProperties: + "^map[-a-zA-Z0-9]*$": + type: object + + properties: + trip: + $ref: /schemas/types.yaml#/definitions/phandle + description: + A phandle of a trip point node within this thermal zone. + + cooling-device: + $ref: /schemas/types.yaml#/definitions/phandle-array + description: + A list of cooling device phandles along with the minimum + and maximum cooling state specifiers for each cooling + device. Using the THERMAL_NO_LIMIT (-1UL) constant in the + cooling-device phandle limit specifier lets the framework + use the minimum and maximum cooling state for that cooling + device automatically. + + contribution: + $ref: /schemas/types.yaml#/definitions/uint32 + minimum: 0 + maximum: 100 + description: + The percentage contribution of the cooling devices at the + specific trip temperature referenced in this map + to this thermal zone + + required: + - trip + - cooling-device + additionalProperties: false + + required: + - polling-delay + - polling-delay-passive + - thermal-sensors + - trips + additionalProperties: false + +examples: + - | + #include + #include + + // Example 1: SDM845 TSENS + soc: soc@0 { + #address-cells = <2>; + #size-cells = <2>; + + /* ... */ + + tsens0: thermal-sensor@c263000 { + compatible = "qcom,sdm845-tsens", "qcom,tsens-v2"; + reg = <0 0x0c263000 0 0x1ff>, /* TM */ + <0 0x0c222000 0 0x1ff>; /* SROT */ + #qcom,sensors = <13>; + interrupts = , + ; + interrupt-names = "uplow", "critical"; + #thermal-sensor-cells = <1>; + }; + + tsens1: thermal-sensor@c265000 { + compatible = "qcom,sdm845-tsens", "qcom,tsens-v2"; + reg = <0 0x0c265000 0 0x1ff>, /* TM */ + <0 0x0c223000 0 0x1ff>; /* SROT */ + #qcom,sensors = <8>; + interrupts = , + ; + interrupt-names = "uplow", "critical"; + #thermal-sensor-cells = <1>; + }; + }; + + /* ... */ + + thermal-zones { + cpu0-thermal { + polling-delay-passive = <250>; + polling-delay = <1000>; + + thermal-sensors = <&tsens0 1>; + + trips { + cpu0_alert0: trip-point0 { + temperature = <90000>; + hysteresis = <2000>; + type = "passive"; + }; + + cpu0_alert1: trip-point1 { + temperature = <95000>; + hysteresis = <2000>; + type = "passive"; + }; + + cpu0_crit: cpu_crit { + temperature = <110000>; + hysteresis = <1000>; + type = "critical"; + }; + }; + + cooling-maps { + map0 { + trip = <&cpu0_alert0>; + /* Corresponds to 1400MHz in OPP table */ + cooling-device = <&CPU0 3 3>, <&CPU1 3 3>, + <&CPU2 3 3>, <&CPU3 3 3>; + }; + + map1 { + trip = <&cpu0_alert1>; + /* Corresponds to 1000MHz in OPP table */ + cooling-device = <&CPU0 5 5>, <&CPU1 5 5>, + <&CPU2 5 5>, <&CPU3 5 5>; + }; + }; + }; + + /* ... */ + + cluster0-thermal { + polling-delay-passive = <250>; + polling-delay = <1000>; + + thermal-sensors = <&tsens0 5>; + + trips { + cluster0_alert0: trip-point0 { + temperature = <90000>; + hysteresis = <2000>; + type = "hot"; + }; + cluster0_crit: cluster0_crit { + temperature = <110000>; + hysteresis = <2000>; + type = "critical"; + }; + }; + }; + + /* ... */ + + gpu-top-thermal { + polling-delay-passive = <250>; + polling-delay = <1000>; + + thermal-sensors = <&tsens0 11>; + + trips { + gpu1_alert0: trip-point0 { + temperature = <90000>; + hysteresis = <2000>; + type = "hot"; + }; + }; + }; + }; +... -- GitLab From a4e91825d7e1252f7cba005f1451e5464b23c15d Mon Sep 17 00:00:00 2001 From: Alexander Monakov Date: Sun, 10 May 2020 20:48:40 +0000 Subject: [PATCH 0623/1971] x86/amd_nb: Add AMD family 17h model 60h PCI IDs Add PCI IDs for AMD Renoir (4000-series Ryzen CPUs). This is necessary to enable support for temperature sensors via the k10temp module. Signed-off-by: Alexander Monakov Signed-off-by: Borislav Petkov Acked-by: Yazen Ghannam Acked-by: Guenter Roeck Link: https://lkml.kernel.org/r/20200510204842.2603-2-amonakov@ispras.ru --- arch/x86/kernel/amd_nb.c | 5 +++++ include/linux/pci_ids.h | 1 + 2 files changed, 6 insertions(+) diff --git a/arch/x86/kernel/amd_nb.c b/arch/x86/kernel/amd_nb.c index b6b3297851f37..18f6b7c4bd79f 100644 --- a/arch/x86/kernel/amd_nb.c +++ b/arch/x86/kernel/amd_nb.c @@ -18,9 +18,11 @@ #define PCI_DEVICE_ID_AMD_17H_ROOT 0x1450 #define PCI_DEVICE_ID_AMD_17H_M10H_ROOT 0x15d0 #define PCI_DEVICE_ID_AMD_17H_M30H_ROOT 0x1480 +#define PCI_DEVICE_ID_AMD_17H_M60H_ROOT 0x1630 #define PCI_DEVICE_ID_AMD_17H_DF_F4 0x1464 #define PCI_DEVICE_ID_AMD_17H_M10H_DF_F4 0x15ec #define PCI_DEVICE_ID_AMD_17H_M30H_DF_F4 0x1494 +#define PCI_DEVICE_ID_AMD_17H_M60H_DF_F4 0x144c #define PCI_DEVICE_ID_AMD_17H_M70H_DF_F4 0x1444 #define PCI_DEVICE_ID_AMD_19H_DF_F4 0x1654 @@ -33,6 +35,7 @@ static const struct pci_device_id amd_root_ids[] = { { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_ROOT) }, { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M10H_ROOT) }, { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M30H_ROOT) }, + { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M60H_ROOT) }, {} }; @@ -50,6 +53,7 @@ static const struct pci_device_id amd_nb_misc_ids[] = { { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_DF_F3) }, { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M10H_DF_F3) }, { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M30H_DF_F3) }, + { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M60H_DF_F3) }, { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CNB17H_F3) }, { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M70H_DF_F3) }, { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_DF_F3) }, @@ -65,6 +69,7 @@ static const struct pci_device_id amd_nb_link_ids[] = { { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_DF_F4) }, { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M10H_DF_F4) }, { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M30H_DF_F4) }, + { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M60H_DF_F4) }, { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M70H_DF_F4) }, { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_DF_F4) }, { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CNB17H_F4) }, diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h index 1dfc4e1dcb94c..3155f5ada02eb 100644 --- a/include/linux/pci_ids.h +++ b/include/linux/pci_ids.h @@ -550,6 +550,7 @@ #define PCI_DEVICE_ID_AMD_17H_DF_F3 0x1463 #define PCI_DEVICE_ID_AMD_17H_M10H_DF_F3 0x15eb #define PCI_DEVICE_ID_AMD_17H_M30H_DF_F3 0x1493 +#define PCI_DEVICE_ID_AMD_17H_M60H_DF_F3 0x144b #define PCI_DEVICE_ID_AMD_17H_M70H_DF_F3 0x1443 #define PCI_DEVICE_ID_AMD_19H_DF_F3 0x1653 #define PCI_DEVICE_ID_AMD_CNB17H_F3 0x1703 -- GitLab From 279f0b3a4b80660fba6faadc2ca2fa426bf3f7e9 Mon Sep 17 00:00:00 2001 From: Alexander Monakov Date: Sun, 10 May 2020 20:48:41 +0000 Subject: [PATCH 0624/1971] hwmon: (k10temp) Add AMD family 17h model 60h PCI match Add support for retrieving Tdie and Tctl on AMD Renoir (4000-series Ryzen CPUs). It appears SMU offsets for reading current/voltage and CCD temperature have changed for this generation (reads from currently used offsets yield zeros), so those features cannot be enabled so trivially. Signed-off-by: Alexander Monakov Signed-off-by: Borislav Petkov Acked-by: Guenter Roeck Link: https://lkml.kernel.org/r/20200510204842.2603-3-amonakov@ispras.ru --- drivers/hwmon/k10temp.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/hwmon/k10temp.c b/drivers/hwmon/k10temp.c index 3f37d5d81fe40..7ba82e0efbeba 100644 --- a/drivers/hwmon/k10temp.c +++ b/drivers/hwmon/k10temp.c @@ -632,6 +632,7 @@ static const struct pci_device_id k10temp_id_table[] = { { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_17H_DF_F3) }, { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_17H_M10H_DF_F3) }, { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_17H_M30H_DF_F3) }, + { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_17H_M60H_DF_F3) }, { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_17H_M70H_DF_F3) }, { PCI_VDEVICE(HYGON, PCI_DEVICE_ID_AMD_17H_DF_F3) }, {} -- GitLab From b6bea24d41519e8c31e4798f1c1a3f67e540c5d0 Mon Sep 17 00:00:00 2001 From: Alexander Monakov Date: Sun, 10 May 2020 20:48:42 +0000 Subject: [PATCH 0625/1971] EDAC/amd64: Add AMD family 17h model 60h PCI IDs Add support for AMD Renoir (4000-series Ryzen CPUs). Signed-off-by: Alexander Monakov Signed-off-by: Borislav Petkov Acked-by: Yazen Ghannam Link: https://lkml.kernel.org/r/20200510204842.2603-4-amonakov@ispras.ru --- drivers/edac/amd64_edac.c | 14 ++++++++++++++ drivers/edac/amd64_edac.h | 3 +++ 2 files changed, 17 insertions(+) diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c index 6bdc5bb8c8bc9..42024a8d6a581 100644 --- a/drivers/edac/amd64_edac.c +++ b/drivers/edac/amd64_edac.c @@ -2316,6 +2316,16 @@ static struct amd64_family_type family_types[] = { .dbam_to_cs = f17_addr_mask_to_cs_size, } }, + [F17_M60H_CPUS] = { + .ctl_name = "F17h_M60h", + .f0_id = PCI_DEVICE_ID_AMD_17H_M60H_DF_F0, + .f6_id = PCI_DEVICE_ID_AMD_17H_M60H_DF_F6, + .max_mcs = 2, + .ops = { + .early_channel_count = f17_early_channel_count, + .dbam_to_cs = f17_addr_mask_to_cs_size, + } + }, [F17_M70H_CPUS] = { .ctl_name = "F17h_M70h", .f0_id = PCI_DEVICE_ID_AMD_17H_M70H_DF_F0, @@ -3354,6 +3364,10 @@ static struct amd64_family_type *per_family_init(struct amd64_pvt *pvt) fam_type = &family_types[F17_M30H_CPUS]; pvt->ops = &family_types[F17_M30H_CPUS].ops; break; + } else if (pvt->model >= 0x60 && pvt->model <= 0x6f) { + fam_type = &family_types[F17_M60H_CPUS]; + pvt->ops = &family_types[F17_M60H_CPUS].ops; + break; } else if (pvt->model >= 0x70 && pvt->model <= 0x7f) { fam_type = &family_types[F17_M70H_CPUS]; pvt->ops = &family_types[F17_M70H_CPUS].ops; diff --git a/drivers/edac/amd64_edac.h b/drivers/edac/amd64_edac.h index abbf3c274d749..52b5d03eeba00 100644 --- a/drivers/edac/amd64_edac.h +++ b/drivers/edac/amd64_edac.h @@ -120,6 +120,8 @@ #define PCI_DEVICE_ID_AMD_17H_M10H_DF_F6 0x15ee #define PCI_DEVICE_ID_AMD_17H_M30H_DF_F0 0x1490 #define PCI_DEVICE_ID_AMD_17H_M30H_DF_F6 0x1496 +#define PCI_DEVICE_ID_AMD_17H_M60H_DF_F0 0x1448 +#define PCI_DEVICE_ID_AMD_17H_M60H_DF_F6 0x144e #define PCI_DEVICE_ID_AMD_17H_M70H_DF_F0 0x1440 #define PCI_DEVICE_ID_AMD_17H_M70H_DF_F6 0x1446 #define PCI_DEVICE_ID_AMD_19H_DF_F0 0x1650 @@ -293,6 +295,7 @@ enum amd_families { F17_CPUS, F17_M10H_CPUS, F17_M30H_CPUS, + F17_M60H_CPUS, F17_M70H_CPUS, F19_CPUS, NUM_FAMILIES, -- GitLab From f740e64c6cd6d9c26b4b9fc0a8d339b215147af7 Mon Sep 17 00:00:00 2001 From: "Gustavo A. R. Silva" Date: Thu, 7 May 2020 14:25:17 -0500 Subject: [PATCH 0626/1971] thermal: imx8mm: Replace zero-length array with flexible-array The current codebase makes use of the zero-length array language extension to the C90 standard, but the preferred mechanism to declare variable-length types such as these ones is a flexible array member[1][2], introduced in C99: struct foo { int stuff; struct boo array[]; }; By making use of the mechanism above, we will get a compiler warning in case the flexible array does not occur last in the structure, which will help us prevent some kind of undefined behavior bugs from being inadvertently introduced[3] to the codebase from now on. Also, notice that, dynamic memory allocations won't be affected by this change: "Flexible array members have incomplete type, and so the sizeof operator may not be applied. As a quirk of the original implementation of zero-length arrays, sizeof evaluates to zero."[1] sizeof(flexible-array-member) triggers a warning because flexible array members have incomplete type[1]. There are some instances of code in which the sizeof operator is being incorrectly/erroneously applied to zero-length arrays and the result is zero. Such instances may be hiding some bugs. So, this work (flexible-array member conversions) will also help to get completely rid of those sorts of issues. This issue was found with the help of Coccinelle. [1] https://gcc.gnu.org/onlinedocs/gcc/Zero-Length.html [2] https://github.com/KSPP/linux/issues/21 [3] commit 76497732932f ("cxgb3/l2t: Fix undefined behaviour") Signed-off-by: Gustavo A. R. Silva Signed-off-by: Daniel Lezcano Link: https://lore.kernel.org/r/20200507192517.GA16557@embeddedor --- drivers/thermal/imx8mm_thermal.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/thermal/imx8mm_thermal.c b/drivers/thermal/imx8mm_thermal.c index 0d60f8d7894f8..e6061e26d4ac0 100644 --- a/drivers/thermal/imx8mm_thermal.c +++ b/drivers/thermal/imx8mm_thermal.c @@ -54,7 +54,7 @@ struct imx8mm_tmu { void __iomem *base; struct clk *clk; const struct thermal_soc_data *socdata; - struct tmu_sensor sensors[0]; + struct tmu_sensor sensors[]; }; static int imx8mm_tmu_get_temp(void *data, int *temp) -- GitLab From 869495ccf52a707a21870ba5cba1cfd5ca720dd9 Mon Sep 17 00:00:00 2001 From: Amit Kucheria Date: Mon, 11 May 2020 17:54:49 +0530 Subject: [PATCH 0627/1971] thermal/core: Get rid of MODULE_* tags The thermal framework can no longer be compiled as a module as of commit 554b3529fe01 ("thermal/drivers/core: Remove the module Kconfig's option"). Remove the MODULE_* tags. Rui is mentioned in the copyright line at the top of the file and the license is mentioned in the SPDX tags. So no loss of information. Signed-off-by: Amit Kucheria Signed-off-by: Daniel Lezcano Link: https://lore.kernel.org/r/74339a09a55f8f3d86c4074fc2bf853a302d6186.1589199124.git.amit.kucheria@linaro.org --- drivers/thermal/thermal_core.c | 4 ---- 1 file changed, 4 deletions(-) diff --git a/drivers/thermal/thermal_core.c b/drivers/thermal/thermal_core.c index c065509309799..dd3f4e87857bd 100644 --- a/drivers/thermal/thermal_core.c +++ b/drivers/thermal/thermal_core.c @@ -27,10 +27,6 @@ #include "thermal_core.h" #include "thermal_hwmon.h" -MODULE_AUTHOR("Zhang Rui"); -MODULE_DESCRIPTION("Generic thermal management sysfs support"); -MODULE_LICENSE("GPL v2"); - static DEFINE_IDA(thermal_tz_ida); static DEFINE_IDA(thermal_cdev_ida); -- GitLab From 3f0cfea3dd6ed7cd176376bb4a5488b75b938d96 Mon Sep 17 00:00:00 2001 From: Amit Kucheria Date: Mon, 11 May 2020 17:54:50 +0530 Subject: [PATCH 0628/1971] thermal/core: Replace module.h with export.h Thermal core cannot be modular, remove the unnecessary module.h include and replace with export.h to handle EXPORT_SYMBOL family of macros. Signed-off-by: Amit Kucheria Signed-off-by: Daniel Lezcano Link: https://lore.kernel.org/r/33af23406dcdb0c62dae1e6401446b997ccb449f.1589199124.git.amit.kucheria@linaro.org --- drivers/thermal/thermal_core.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/thermal/thermal_core.c b/drivers/thermal/thermal_core.c index dd3f4e87857bd..b71196eaf90e8 100644 --- a/drivers/thermal/thermal_core.c +++ b/drivers/thermal/thermal_core.c @@ -9,9 +9,9 @@ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt -#include #include #include +#include #include #include #include -- GitLab From 231b98af4da050138657febdd506951928981722 Mon Sep 17 00:00:00 2001 From: Amit Kucheria Date: Mon, 11 May 2020 17:54:51 +0530 Subject: [PATCH 0629/1971] thermal/drivers/thermal_helpers: Sort headers alphabetically Sort headers to make it easier to read and find duplicate headers. Signed-off-by: Amit Kucheria Signed-off-by: Daniel Lezcano Link: https://lore.kernel.org/r/133db154796f354e6c51e6310095f679e1f45441.1589199124.git.amit.kucheria@linaro.org --- drivers/thermal/thermal_helpers.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/thermal/thermal_helpers.c b/drivers/thermal/thermal_helpers.c index 59eaf2d0fdb32..3d737143ec118 100644 --- a/drivers/thermal/thermal_helpers.c +++ b/drivers/thermal/thermal_helpers.c @@ -12,11 +12,11 @@ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt -#include #include #include #include #include +#include #include -- GitLab From 3a74c882dcc15c959e1cc14c3f62877d2f09aef8 Mon Sep 17 00:00:00 2001 From: Amit Kucheria Date: Mon, 11 May 2020 17:54:52 +0530 Subject: [PATCH 0630/1971] thermal/drivers/thermal_helpers: Include export.h It is preferable to include export.h when you are using EXPORT_SYMBOL family of macros. Signed-off-by: Amit Kucheria Signed-off-by: Daniel Lezcano Link: https://lore.kernel.org/r/fd3443f00dbba6ca90f35726c7451ae52145d2d4.1589199124.git.amit.kucheria@linaro.org --- drivers/thermal/thermal_helpers.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/thermal/thermal_helpers.c b/drivers/thermal/thermal_helpers.c index 3d737143ec118..87b1256fa2f20 100644 --- a/drivers/thermal/thermal_helpers.c +++ b/drivers/thermal/thermal_helpers.c @@ -14,6 +14,7 @@ #include #include +#include #include #include #include -- GitLab From 1330e04f423661ed05a4d2a5235505e0332b8026 Mon Sep 17 00:00:00 2001 From: Amit Kucheria Date: Mon, 11 May 2020 17:54:53 +0530 Subject: [PATCH 0631/1971] thermal/drivers/thermal_hwmon: Sort headers alphabetically Sort headers to make it easier to read and find duplicate headers. Signed-off-by: Amit Kucheria Signed-off-by: Daniel Lezcano Link: https://lore.kernel.org/r/29b64f1fe81e674c753c8f8309c310acd782ebea.1589199124.git.amit.kucheria@linaro.org --- drivers/thermal/thermal_hwmon.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/drivers/thermal/thermal_hwmon.c b/drivers/thermal/thermal_hwmon.c index c8d2620f2e427..e43ae551592d1 100644 --- a/drivers/thermal/thermal_hwmon.c +++ b/drivers/thermal/thermal_hwmon.c @@ -10,10 +10,11 @@ * Copyright (C) 2013 Texas Instruments * Copyright (C) 2013 Eduardo Valentin */ +#include #include -#include #include -#include +#include + #include "thermal_hwmon.h" /* hwmon sys I/F */ -- GitLab From e5ebf357bbfc73d4bfca14195e104e0726c5a729 Mon Sep 17 00:00:00 2001 From: Amit Kucheria Date: Mon, 11 May 2020 17:54:54 +0530 Subject: [PATCH 0632/1971] thermal/drivers/thermal_hwmon: Include export.h It is preferable to include export.h when you are using EXPORT_SYMBOL family of macros. Signed-off-by: Amit Kucheria Signed-off-by: Daniel Lezcano Link: https://lore.kernel.org/r/f542962494a8441fdc8e550a11d0e535b92362a0.1589199124.git.amit.kucheria@linaro.org --- drivers/thermal/thermal_hwmon.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/thermal/thermal_hwmon.c b/drivers/thermal/thermal_hwmon.c index e43ae551592d1..8b92e00ff2362 100644 --- a/drivers/thermal/thermal_hwmon.c +++ b/drivers/thermal/thermal_hwmon.c @@ -11,6 +11,7 @@ * Copyright (C) 2013 Eduardo Valentin */ #include +#include #include #include #include -- GitLab From d5d1f6e759dfc8218ae9500af222e280ead1aeec Mon Sep 17 00:00:00 2001 From: Amit Kucheria Date: Mon, 11 May 2020 17:54:55 +0530 Subject: [PATCH 0633/1971] thermal/drivers/clock_cooling: Sort headers alphabetically Sort headers to make it easier to read and find duplicate headers. Signed-off-by: Amit Kucheria Signed-off-by: Daniel Lezcano Link: https://lore.kernel.org/r/f8e1258fd8b882bab018de63c7e713b4334fec30.1589199124.git.amit.kucheria@linaro.org --- drivers/thermal/clock_cooling.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/thermal/clock_cooling.c b/drivers/thermal/clock_cooling.c index 7cb3ae4b44ee7..fd6bc6eefc88c 100644 --- a/drivers/thermal/clock_cooling.c +++ b/drivers/thermal/clock_cooling.c @@ -12,6 +12,7 @@ * Copyright (C) 2012 Amit Daniel */ #include +#include #include #include #include @@ -20,7 +21,6 @@ #include #include #include -#include /** * struct clock_cooling_device - data for cooling device with clock -- GitLab From 1628d4b8ca9a877577aaf4116c02f1f45ea18a89 Mon Sep 17 00:00:00 2001 From: Amit Kucheria Date: Mon, 11 May 2020 17:54:56 +0530 Subject: [PATCH 0634/1971] thermal/drivers/clock_cooling: Include export.h It is preferrable to include export.h when you are using EXPORT_SYMBOL family of macros. Signed-off-by: Amit Kucheria Signed-off-by: Daniel Lezcano Link: https://lore.kernel.org/r/25f16415ab7b7587a052f1bce4133da318d58192.1589199124.git.amit.kucheria@linaro.org --- drivers/thermal/clock_cooling.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/thermal/clock_cooling.c b/drivers/thermal/clock_cooling.c index fd6bc6eefc88c..56cb1f46a428e 100644 --- a/drivers/thermal/clock_cooling.c +++ b/drivers/thermal/clock_cooling.c @@ -16,6 +16,7 @@ #include #include #include +#include #include #include #include -- GitLab From 5ccb451e47fa6da8ae6cd6710b91758280197073 Mon Sep 17 00:00:00 2001 From: Amit Kucheria Date: Mon, 11 May 2020 17:54:57 +0530 Subject: [PATCH 0635/1971] thermal/drivers/cpufreq_cooling: Sort headers alphabetically Sort headers to make it easier to read and find duplicate headers. Signed-off-by: Amit Kucheria Signed-off-by: Daniel Lezcano Link: https://lore.kernel.org/r/4231f5dfe758b9bf716981be71cadf9642c83528.1589199124.git.amit.kucheria@linaro.org --- drivers/thermal/cpufreq_cooling.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/thermal/cpufreq_cooling.c b/drivers/thermal/cpufreq_cooling.c index e297e135c031a..1b5a63b4763d0 100644 --- a/drivers/thermal/cpufreq_cooling.c +++ b/drivers/thermal/cpufreq_cooling.c @@ -11,16 +11,16 @@ * */ #include -#include +#include #include +#include +#include #include #include #include #include #include -#include -#include -#include +#include #include -- GitLab From c65f83c0667ae1b0013fa87918f009c4380443d5 Mon Sep 17 00:00:00 2001 From: Amit Kucheria Date: Mon, 11 May 2020 17:54:58 +0530 Subject: [PATCH 0636/1971] thermal/drivers/cpufreq_cooling: Replace module.h with export.h cpufreq_cooling cannot be modular, remove the unnecessary module.h include and replace with export.h to handle EXPORT_SYMBOL family of macros. Signed-off-by: Amit Kucheria Signed-off-by: Daniel Lezcano Link: https://lore.kernel.org/r/7a439e41e91d8bc5ff99207f99723fcf04ca36eb.1589199124.git.amit.kucheria@linaro.org --- drivers/thermal/cpufreq_cooling.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/thermal/cpufreq_cooling.c b/drivers/thermal/cpufreq_cooling.c index 1b5a63b4763d0..9e124020519fc 100644 --- a/drivers/thermal/cpufreq_cooling.c +++ b/drivers/thermal/cpufreq_cooling.c @@ -10,12 +10,12 @@ * Viresh Kumar * */ -#include #include #include #include #include #include +#include #include #include #include -- GitLab From 2b61314e76671e125b3d53a02eec3912204c5418 Mon Sep 17 00:00:00 2001 From: Amit Kucheria Date: Mon, 11 May 2020 17:54:59 +0530 Subject: [PATCH 0637/1971] thermal/drivers/of-thermal: Sort headers alphabetically Sort headers to make it easier to read and find duplicate headers. Signed-off-by: Amit Kucheria Signed-off-by: Daniel Lezcano Link: https://lore.kernel.org/r/f9f9d8117f1659872114ba65bbfa9ed4b813128f.1589199124.git.amit.kucheria@linaro.org --- drivers/thermal/of-thermal.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/drivers/thermal/of-thermal.c b/drivers/thermal/of-thermal.c index 874a47d6923f4..ddf88dbe7ba29 100644 --- a/drivers/thermal/of-thermal.c +++ b/drivers/thermal/of-thermal.c @@ -8,13 +8,13 @@ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt -#include -#include -#include -#include -#include #include #include +#include +#include +#include +#include +#include #include #include "thermal_core.h" -- GitLab From 6abea5d2af4cdd508b04d94ed9382c3710b99dfc Mon Sep 17 00:00:00 2001 From: Amit Kucheria Date: Mon, 11 May 2020 17:55:00 +0530 Subject: [PATCH 0638/1971] thermal/drivers/user_space: Sort headers alphabetically Sort headers to make it easier to read and find duplicate headers. Signed-off-by: Amit Kucheria Signed-off-by: Daniel Lezcano Link: https://lore.kernel.org/r/406d0c7c961e997b42e25adf4e432fe4f57b315a.1589199124.git.amit.kucheria@linaro.org --- drivers/thermal/user_space.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/thermal/user_space.c b/drivers/thermal/user_space.c index 293cffd9c8adb..82a7198bbe71e 100644 --- a/drivers/thermal/user_space.c +++ b/drivers/thermal/user_space.c @@ -10,8 +10,8 @@ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */ -#include #include +#include #include "thermal_core.h" -- GitLab From 0015d9a2a72745308ef9728a746ff7b1e82138bc Mon Sep 17 00:00:00 2001 From: Amit Kucheria Date: Mon, 11 May 2020 17:55:01 +0530 Subject: [PATCH 0639/1971] thermal/governors: Prefix all source files with gov_ Bang-bang governor source file is prefixed with gov_. Do the same for other governors for consistency so they're easy to find in the sources. Signed-off-by: Amit Kucheria Signed-off-by: Daniel Lezcano Link: https://lore.kernel.org/r/b9a85d3204712f14e320504948c12712dc0b291b.1589199124.git.amit.kucheria@linaro.org --- drivers/thermal/Makefile | 8 ++++---- drivers/thermal/{fair_share.c => gov_fair_share.c} | 0 .../thermal/{power_allocator.c => gov_power_allocator.c} | 0 drivers/thermal/{step_wise.c => gov_step_wise.c} | 0 drivers/thermal/{user_space.c => gov_user_space.c} | 0 5 files changed, 4 insertions(+), 4 deletions(-) rename drivers/thermal/{fair_share.c => gov_fair_share.c} (100%) rename drivers/thermal/{power_allocator.c => gov_power_allocator.c} (100%) rename drivers/thermal/{step_wise.c => gov_step_wise.c} (100%) rename drivers/thermal/{user_space.c => gov_user_space.c} (100%) diff --git a/drivers/thermal/Makefile b/drivers/thermal/Makefile index 86c506410cc01..757c40a719405 100644 --- a/drivers/thermal/Makefile +++ b/drivers/thermal/Makefile @@ -12,11 +12,11 @@ thermal_sys-$(CONFIG_THERMAL_HWMON) += thermal_hwmon.o thermal_sys-$(CONFIG_THERMAL_OF) += of-thermal.o # governors -thermal_sys-$(CONFIG_THERMAL_GOV_FAIR_SHARE) += fair_share.o +thermal_sys-$(CONFIG_THERMAL_GOV_FAIR_SHARE) += gov_fair_share.o thermal_sys-$(CONFIG_THERMAL_GOV_BANG_BANG) += gov_bang_bang.o -thermal_sys-$(CONFIG_THERMAL_GOV_STEP_WISE) += step_wise.o -thermal_sys-$(CONFIG_THERMAL_GOV_USER_SPACE) += user_space.o -thermal_sys-$(CONFIG_THERMAL_GOV_POWER_ALLOCATOR) += power_allocator.o +thermal_sys-$(CONFIG_THERMAL_GOV_STEP_WISE) += gov_step_wise.o +thermal_sys-$(CONFIG_THERMAL_GOV_USER_SPACE) += gov_user_space.o +thermal_sys-$(CONFIG_THERMAL_GOV_POWER_ALLOCATOR) += gov_power_allocator.o # cpufreq cooling thermal_sys-$(CONFIG_CPU_FREQ_THERMAL) += cpufreq_cooling.o diff --git a/drivers/thermal/fair_share.c b/drivers/thermal/gov_fair_share.c similarity index 100% rename from drivers/thermal/fair_share.c rename to drivers/thermal/gov_fair_share.c diff --git a/drivers/thermal/power_allocator.c b/drivers/thermal/gov_power_allocator.c similarity index 100% rename from drivers/thermal/power_allocator.c rename to drivers/thermal/gov_power_allocator.c diff --git a/drivers/thermal/step_wise.c b/drivers/thermal/gov_step_wise.c similarity index 100% rename from drivers/thermal/step_wise.c rename to drivers/thermal/gov_step_wise.c diff --git a/drivers/thermal/user_space.c b/drivers/thermal/gov_user_space.c similarity index 100% rename from drivers/thermal/user_space.c rename to drivers/thermal/gov_user_space.c -- GitLab From 14adf6c83f7c6953a136d9d4beda79004191e729 Mon Sep 17 00:00:00 2001 From: Amit Kucheria Date: Mon, 11 May 2020 17:55:02 +0530 Subject: [PATCH 0640/1971] thermal/of: Rename of-thermal.c Core thermal framework code files should start with thermal_*. of-thermal.c does not follow this pattern and can easily be confused with platform driver. Fix this by renaming it to thermal_of.c Signed-off-by: Amit Kucheria Signed-off-by: Daniel Lezcano Link: https://lore.kernel.org/r/f5e233d5c5dcc7c7cb56b3448da255cb2c9ef0d1.1589199124.git.amit.kucheria@linaro.org --- drivers/thermal/Makefile | 2 +- drivers/thermal/{of-thermal.c => thermal_of.c} | 0 2 files changed, 1 insertion(+), 1 deletion(-) rename drivers/thermal/{of-thermal.c => thermal_of.c} (100%) diff --git a/drivers/thermal/Makefile b/drivers/thermal/Makefile index 757c40a719405..0c8b84a09b9aa 100644 --- a/drivers/thermal/Makefile +++ b/drivers/thermal/Makefile @@ -9,7 +9,7 @@ thermal_sys-y += thermal_core.o thermal_sysfs.o \ # interface to/from other layers providing sensors thermal_sys-$(CONFIG_THERMAL_HWMON) += thermal_hwmon.o -thermal_sys-$(CONFIG_THERMAL_OF) += of-thermal.o +thermal_sys-$(CONFIG_THERMAL_OF) += thermal_of.o # governors thermal_sys-$(CONFIG_THERMAL_GOV_FAIR_SHARE) += gov_fair_share.o diff --git a/drivers/thermal/of-thermal.c b/drivers/thermal/thermal_of.c similarity index 100% rename from drivers/thermal/of-thermal.c rename to drivers/thermal/thermal_of.c -- GitLab From a7ff82976122eb6d1fd286dc34f09b6ecd756b60 Mon Sep 17 00:00:00 2001 From: Amit Kucheria Date: Wed, 29 Apr 2020 23:44:17 +0530 Subject: [PATCH 0641/1971] drivers: thermal: tsens: Merge tsens-common.c into tsens.c tsens-common.c has outlived its usefuless. It was created expecting lots of custom routines per version of the TSENS IP. We haven't needed those, there is now only data in the version-specific files. Merge the code for tsens-common.c into tsens.c. As a result, - Remove any unnecessary forward declarations in tsens.h. - Add a Linaro copyright to tsens.c. - Fixup the Makefile to remove tsens-common.c. - Where it made sense, fix some 80-column alignments in the tsens-common.c code being copied over. There is no functional change with this patch. Signed-off-by: Amit Kucheria Reviewed-by: Bjorn Andersson Signed-off-by: Daniel Lezcano Link: https://lore.kernel.org/r/e30e2ba6fa5c007983afd4d7d4e0311c0b57917a.1588183879.git.amit.kucheria@linaro.org --- drivers/thermal/qcom/Makefile | 4 +- drivers/thermal/qcom/tsens-common.c | 843 ---------------------------- drivers/thermal/qcom/tsens.c | 838 +++++++++++++++++++++++++++ drivers/thermal/qcom/tsens.h | 5 - 4 files changed, 840 insertions(+), 850 deletions(-) delete mode 100644 drivers/thermal/qcom/tsens-common.c diff --git a/drivers/thermal/qcom/Makefile b/drivers/thermal/qcom/Makefile index 7c8dc6e366936..ec86eef7f6a6b 100644 --- a/drivers/thermal/qcom/Makefile +++ b/drivers/thermal/qcom/Makefile @@ -1,6 +1,6 @@ # SPDX-License-Identifier: GPL-2.0-only obj-$(CONFIG_QCOM_TSENS) += qcom_tsens.o -qcom_tsens-y += tsens.o tsens-common.o tsens-v0_1.o \ - tsens-8960.o tsens-v2.o tsens-v1.o +qcom_tsens-y += tsens.o tsens-v2.o tsens-v1.o tsens-v0_1.o \ + tsens-8960.o obj-$(CONFIG_QCOM_SPMI_TEMP_ALARM) += qcom-spmi-temp-alarm.o diff --git a/drivers/thermal/qcom/tsens-common.c b/drivers/thermal/qcom/tsens-common.c deleted file mode 100644 index 172545366636e..0000000000000 --- a/drivers/thermal/qcom/tsens-common.c +++ /dev/null @@ -1,843 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * Copyright (c) 2015, The Linux Foundation. All rights reserved. - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include "tsens.h" - -/** - * struct tsens_irq_data - IRQ status and temperature violations - * @up_viol: upper threshold violated - * @up_thresh: upper threshold temperature value - * @up_irq_mask: mask register for upper threshold irqs - * @up_irq_clear: clear register for uppper threshold irqs - * @low_viol: lower threshold violated - * @low_thresh: lower threshold temperature value - * @low_irq_mask: mask register for lower threshold irqs - * @low_irq_clear: clear register for lower threshold irqs - * @crit_viol: critical threshold violated - * @crit_thresh: critical threshold temperature value - * @crit_irq_mask: mask register for critical threshold irqs - * @crit_irq_clear: clear register for critical threshold irqs - * - * Structure containing data about temperature threshold settings and - * irq status if they were violated. - */ -struct tsens_irq_data { - u32 up_viol; - int up_thresh; - u32 up_irq_mask; - u32 up_irq_clear; - u32 low_viol; - int low_thresh; - u32 low_irq_mask; - u32 low_irq_clear; - u32 crit_viol; - u32 crit_thresh; - u32 crit_irq_mask; - u32 crit_irq_clear; -}; - -char *qfprom_read(struct device *dev, const char *cname) -{ - struct nvmem_cell *cell; - ssize_t data; - char *ret; - - cell = nvmem_cell_get(dev, cname); - if (IS_ERR(cell)) - return ERR_CAST(cell); - - ret = nvmem_cell_read(cell, &data); - nvmem_cell_put(cell); - - return ret; -} - -/* - * Use this function on devices where slope and offset calculations - * depend on calibration data read from qfprom. On others the slope - * and offset values are derived from tz->tzp->slope and tz->tzp->offset - * resp. - */ -void compute_intercept_slope(struct tsens_priv *priv, u32 *p1, - u32 *p2, u32 mode) -{ - int i; - int num, den; - - for (i = 0; i < priv->num_sensors; i++) { - dev_dbg(priv->dev, - "%s: sensor%d - data_point1:%#x data_point2:%#x\n", - __func__, i, p1[i], p2[i]); - - priv->sensor[i].slope = SLOPE_DEFAULT; - if (mode == TWO_PT_CALIB) { - /* - * slope (m) = adc_code2 - adc_code1 (y2 - y1)/ - * temp_120_degc - temp_30_degc (x2 - x1) - */ - num = p2[i] - p1[i]; - num *= SLOPE_FACTOR; - den = CAL_DEGC_PT2 - CAL_DEGC_PT1; - priv->sensor[i].slope = num / den; - } - - priv->sensor[i].offset = (p1[i] * SLOPE_FACTOR) - - (CAL_DEGC_PT1 * - priv->sensor[i].slope); - dev_dbg(priv->dev, "%s: offset:%d\n", __func__, priv->sensor[i].offset); - } -} - -static inline u32 degc_to_code(int degc, const struct tsens_sensor *s) -{ - u64 code = div_u64(((u64)degc * s->slope + s->offset), SLOPE_FACTOR); - - pr_debug("%s: raw_code: 0x%llx, degc:%d\n", __func__, code, degc); - return clamp_val(code, THRESHOLD_MIN_ADC_CODE, THRESHOLD_MAX_ADC_CODE); -} - -static inline int code_to_degc(u32 adc_code, const struct tsens_sensor *s) -{ - int degc, num, den; - - num = (adc_code * SLOPE_FACTOR) - s->offset; - den = s->slope; - - if (num > 0) - degc = num + (den / 2); - else if (num < 0) - degc = num - (den / 2); - else - degc = num; - - degc /= den; - - return degc; -} - -/** - * tsens_hw_to_mC - Return sign-extended temperature in mCelsius. - * @s: Pointer to sensor struct - * @field: Index into regmap_field array pointing to temperature data - * - * This function handles temperature returned in ADC code or deciCelsius - * depending on IP version. - * - * Return: Temperature in milliCelsius on success, a negative errno will - * be returned in error cases - */ -static int tsens_hw_to_mC(const struct tsens_sensor *s, int field) -{ - struct tsens_priv *priv = s->priv; - u32 resolution; - u32 temp = 0; - int ret; - - resolution = priv->fields[LAST_TEMP_0].msb - - priv->fields[LAST_TEMP_0].lsb; - - ret = regmap_field_read(priv->rf[field], &temp); - if (ret) - return ret; - - /* Convert temperature from ADC code to milliCelsius */ - if (priv->feat->adc) - return code_to_degc(temp, s) * 1000; - - /* deciCelsius -> milliCelsius along with sign extension */ - return sign_extend32(temp, resolution) * 100; -} - -/** - * tsens_mC_to_hw - Convert temperature to hardware register value - * @s: Pointer to sensor struct - * @temp: temperature in milliCelsius to be programmed to hardware - * - * This function outputs the value to be written to hardware in ADC code - * or deciCelsius depending on IP version. - * - * Return: ADC code or temperature in deciCelsius. - */ -static int tsens_mC_to_hw(const struct tsens_sensor *s, int temp) -{ - struct tsens_priv *priv = s->priv; - - /* milliC to adc code */ - if (priv->feat->adc) - return degc_to_code(temp / 1000, s); - - /* milliC to deciC */ - return temp / 100; -} - -static inline enum tsens_ver tsens_version(struct tsens_priv *priv) -{ - return priv->feat->ver_major; -} - -static void tsens_set_interrupt_v1(struct tsens_priv *priv, u32 hw_id, - enum tsens_irq_type irq_type, bool enable) -{ - u32 index = 0; - - switch (irq_type) { - case UPPER: - index = UP_INT_CLEAR_0 + hw_id; - break; - case LOWER: - index = LOW_INT_CLEAR_0 + hw_id; - break; - case CRITICAL: - /* No critical interrupts before v2 */ - return; - } - regmap_field_write(priv->rf[index], enable ? 0 : 1); -} - -static void tsens_set_interrupt_v2(struct tsens_priv *priv, u32 hw_id, - enum tsens_irq_type irq_type, bool enable) -{ - u32 index_mask = 0, index_clear = 0; - - /* - * To enable the interrupt flag for a sensor: - * - clear the mask bit - * To disable the interrupt flag for a sensor: - * - Mask further interrupts for this sensor - * - Write 1 followed by 0 to clear the interrupt - */ - switch (irq_type) { - case UPPER: - index_mask = UP_INT_MASK_0 + hw_id; - index_clear = UP_INT_CLEAR_0 + hw_id; - break; - case LOWER: - index_mask = LOW_INT_MASK_0 + hw_id; - index_clear = LOW_INT_CLEAR_0 + hw_id; - break; - case CRITICAL: - index_mask = CRIT_INT_MASK_0 + hw_id; - index_clear = CRIT_INT_CLEAR_0 + hw_id; - break; - } - - if (enable) { - regmap_field_write(priv->rf[index_mask], 0); - } else { - regmap_field_write(priv->rf[index_mask], 1); - regmap_field_write(priv->rf[index_clear], 1); - regmap_field_write(priv->rf[index_clear], 0); - } -} - -/** - * tsens_set_interrupt - Set state of an interrupt - * @priv: Pointer to tsens controller private data - * @hw_id: Hardware ID aka. sensor number - * @irq_type: irq_type from enum tsens_irq_type - * @enable: false = disable, true = enable - * - * Call IP-specific function to set state of an interrupt - * - * Return: void - */ -static void tsens_set_interrupt(struct tsens_priv *priv, u32 hw_id, - enum tsens_irq_type irq_type, bool enable) -{ - dev_dbg(priv->dev, "[%u] %s: %s -> %s\n", hw_id, __func__, - irq_type ? ((irq_type == 1) ? "UP" : "CRITICAL") : "LOW", - enable ? "en" : "dis"); - if (tsens_version(priv) > VER_1_X) - tsens_set_interrupt_v2(priv, hw_id, irq_type, enable); - else - tsens_set_interrupt_v1(priv, hw_id, irq_type, enable); -} - -/** - * tsens_threshold_violated - Check if a sensor temperature violated a preset threshold - * @priv: Pointer to tsens controller private data - * @hw_id: Hardware ID aka. sensor number - * @d: Pointer to irq state data - * - * Return: 0 if threshold was not violated, 1 if it was violated and negative - * errno in case of errors - */ -static int tsens_threshold_violated(struct tsens_priv *priv, u32 hw_id, - struct tsens_irq_data *d) -{ - int ret; - - ret = regmap_field_read(priv->rf[UPPER_STATUS_0 + hw_id], &d->up_viol); - if (ret) - return ret; - ret = regmap_field_read(priv->rf[LOWER_STATUS_0 + hw_id], &d->low_viol); - if (ret) - return ret; - - if (priv->feat->crit_int) { - ret = regmap_field_read(priv->rf[CRITICAL_STATUS_0 + hw_id], - &d->crit_viol); - if (ret) - return ret; - } - - if (d->up_viol || d->low_viol || d->crit_viol) - return 1; - - return 0; -} - -static int tsens_read_irq_state(struct tsens_priv *priv, u32 hw_id, - const struct tsens_sensor *s, - struct tsens_irq_data *d) -{ - int ret; - - ret = regmap_field_read(priv->rf[UP_INT_CLEAR_0 + hw_id], &d->up_irq_clear); - if (ret) - return ret; - ret = regmap_field_read(priv->rf[LOW_INT_CLEAR_0 + hw_id], &d->low_irq_clear); - if (ret) - return ret; - if (tsens_version(priv) > VER_1_X) { - ret = regmap_field_read(priv->rf[UP_INT_MASK_0 + hw_id], &d->up_irq_mask); - if (ret) - return ret; - ret = regmap_field_read(priv->rf[LOW_INT_MASK_0 + hw_id], &d->low_irq_mask); - if (ret) - return ret; - ret = regmap_field_read(priv->rf[CRIT_INT_CLEAR_0 + hw_id], - &d->crit_irq_clear); - if (ret) - return ret; - ret = regmap_field_read(priv->rf[CRIT_INT_MASK_0 + hw_id], - &d->crit_irq_mask); - if (ret) - return ret; - - d->crit_thresh = tsens_hw_to_mC(s, CRIT_THRESH_0 + hw_id); - } else { - /* No mask register on older TSENS */ - d->up_irq_mask = 0; - d->low_irq_mask = 0; - d->crit_irq_clear = 0; - d->crit_irq_mask = 0; - d->crit_thresh = 0; - } - - d->up_thresh = tsens_hw_to_mC(s, UP_THRESH_0 + hw_id); - d->low_thresh = tsens_hw_to_mC(s, LOW_THRESH_0 + hw_id); - - dev_dbg(priv->dev, "[%u] %s%s: status(%u|%u|%u) | clr(%u|%u|%u) | mask(%u|%u|%u)\n", - hw_id, __func__, - (d->up_viol || d->low_viol || d->crit_viol) ? "(V)" : "", - d->low_viol, d->up_viol, d->crit_viol, - d->low_irq_clear, d->up_irq_clear, d->crit_irq_clear, - d->low_irq_mask, d->up_irq_mask, d->crit_irq_mask); - dev_dbg(priv->dev, "[%u] %s%s: thresh: (%d:%d:%d)\n", hw_id, __func__, - (d->up_viol || d->low_viol || d->crit_viol) ? "(V)" : "", - d->low_thresh, d->up_thresh, d->crit_thresh); - - return 0; -} - -static inline u32 masked_irq(u32 hw_id, u32 mask, enum tsens_ver ver) -{ - if (ver > VER_1_X) - return mask & (1 << hw_id); - - /* v1, v0.1 don't have a irq mask register */ - return 0; -} - -/** - * tsens_critical_irq_thread() - Threaded handler for critical interrupts - * @irq: irq number - * @data: tsens controller private data - * - * Check FSM watchdog bark status and clear if needed. - * Check all sensors to find ones that violated their critical threshold limits. - * Clear and then re-enable the interrupt. - * - * The level-triggered interrupt might deassert if the temperature returned to - * within the threshold limits by the time the handler got scheduled. We - * consider the irq to have been handled in that case. - * - * Return: IRQ_HANDLED - */ -irqreturn_t tsens_critical_irq_thread(int irq, void *data) -{ - struct tsens_priv *priv = data; - struct tsens_irq_data d; - int temp, ret, i; - u32 wdog_status, wdog_count; - - if (priv->feat->has_watchdog) { - ret = regmap_field_read(priv->rf[WDOG_BARK_STATUS], - &wdog_status); - if (ret) - return ret; - - if (wdog_status) { - /* Clear WDOG interrupt */ - regmap_field_write(priv->rf[WDOG_BARK_CLEAR], 1); - regmap_field_write(priv->rf[WDOG_BARK_CLEAR], 0); - ret = regmap_field_read(priv->rf[WDOG_BARK_COUNT], - &wdog_count); - if (ret) - return ret; - if (wdog_count) - dev_dbg(priv->dev, "%s: watchdog count: %d\n", - __func__, wdog_count); - - /* Fall through to handle critical interrupts if any */ - } - } - - for (i = 0; i < priv->num_sensors; i++) { - const struct tsens_sensor *s = &priv->sensor[i]; - u32 hw_id = s->hw_id; - - if (IS_ERR(s->tzd)) - continue; - if (!tsens_threshold_violated(priv, hw_id, &d)) - continue; - ret = get_temp_tsens_valid(s, &temp); - if (ret) { - dev_err(priv->dev, "[%u] %s: error reading sensor\n", - hw_id, __func__); - continue; - } - - tsens_read_irq_state(priv, hw_id, s, &d); - if (d.crit_viol && - !masked_irq(hw_id, d.crit_irq_mask, tsens_version(priv))) { - /* Mask critical interrupts, unused on Linux */ - tsens_set_interrupt(priv, hw_id, CRITICAL, false); - } - } - - return IRQ_HANDLED; -} - -/** - * tsens_irq_thread - Threaded interrupt handler for uplow interrupts - * @irq: irq number - * @data: tsens controller private data - * - * Check all sensors to find ones that violated their threshold limits. If the - * temperature is still outside the limits, call thermal_zone_device_update() to - * update the thresholds, else re-enable the interrupts. - * - * The level-triggered interrupt might deassert if the temperature returned to - * within the threshold limits by the time the handler got scheduled. We - * consider the irq to have been handled in that case. - * - * Return: IRQ_HANDLED - */ -irqreturn_t tsens_irq_thread(int irq, void *data) -{ - struct tsens_priv *priv = data; - struct tsens_irq_data d; - bool enable = true, disable = false; - unsigned long flags; - int temp, ret, i; - - for (i = 0; i < priv->num_sensors; i++) { - bool trigger = false; - const struct tsens_sensor *s = &priv->sensor[i]; - u32 hw_id = s->hw_id; - - if (IS_ERR(s->tzd)) - continue; - if (!tsens_threshold_violated(priv, hw_id, &d)) - continue; - ret = get_temp_tsens_valid(s, &temp); - if (ret) { - dev_err(priv->dev, "[%u] %s: error reading sensor\n", hw_id, __func__); - continue; - } - - spin_lock_irqsave(&priv->ul_lock, flags); - - tsens_read_irq_state(priv, hw_id, s, &d); - - if (d.up_viol && - !masked_irq(hw_id, d.up_irq_mask, tsens_version(priv))) { - tsens_set_interrupt(priv, hw_id, UPPER, disable); - if (d.up_thresh > temp) { - dev_dbg(priv->dev, "[%u] %s: re-arm upper\n", - hw_id, __func__); - tsens_set_interrupt(priv, hw_id, UPPER, enable); - } else { - trigger = true; - /* Keep irq masked */ - } - } else if (d.low_viol && - !masked_irq(hw_id, d.low_irq_mask, tsens_version(priv))) { - tsens_set_interrupt(priv, hw_id, LOWER, disable); - if (d.low_thresh < temp) { - dev_dbg(priv->dev, "[%u] %s: re-arm low\n", - hw_id, __func__); - tsens_set_interrupt(priv, hw_id, LOWER, enable); - } else { - trigger = true; - /* Keep irq masked */ - } - } - - spin_unlock_irqrestore(&priv->ul_lock, flags); - - if (trigger) { - dev_dbg(priv->dev, "[%u] %s: TZ update trigger (%d mC)\n", - hw_id, __func__, temp); - thermal_zone_device_update(s->tzd, - THERMAL_EVENT_UNSPECIFIED); - } else { - dev_dbg(priv->dev, "[%u] %s: no violation: %d\n", - hw_id, __func__, temp); - } - } - - return IRQ_HANDLED; -} - -int tsens_set_trips(void *_sensor, int low, int high) -{ - struct tsens_sensor *s = _sensor; - struct tsens_priv *priv = s->priv; - struct device *dev = priv->dev; - struct tsens_irq_data d; - unsigned long flags; - int high_val, low_val, cl_high, cl_low; - u32 hw_id = s->hw_id; - - dev_dbg(dev, "[%u] %s: proposed thresholds: (%d:%d)\n", - hw_id, __func__, low, high); - - cl_high = clamp_val(high, -40000, 120000); - cl_low = clamp_val(low, -40000, 120000); - - high_val = tsens_mC_to_hw(s, cl_high); - low_val = tsens_mC_to_hw(s, cl_low); - - spin_lock_irqsave(&priv->ul_lock, flags); - - tsens_read_irq_state(priv, hw_id, s, &d); - - /* Write the new thresholds and clear the status */ - regmap_field_write(priv->rf[LOW_THRESH_0 + hw_id], low_val); - regmap_field_write(priv->rf[UP_THRESH_0 + hw_id], high_val); - tsens_set_interrupt(priv, hw_id, LOWER, true); - tsens_set_interrupt(priv, hw_id, UPPER, true); - - spin_unlock_irqrestore(&priv->ul_lock, flags); - - dev_dbg(dev, "[%u] %s: (%d:%d)->(%d:%d)\n", - hw_id, __func__, d.low_thresh, d.up_thresh, cl_low, cl_high); - - return 0; -} - -int tsens_enable_irq(struct tsens_priv *priv) -{ - int ret; - int val = tsens_version(priv) > VER_1_X ? 7 : 1; - - ret = regmap_field_write(priv->rf[INT_EN], val); - if (ret < 0) - dev_err(priv->dev, "%s: failed to enable interrupts\n", __func__); - - return ret; -} - -void tsens_disable_irq(struct tsens_priv *priv) -{ - regmap_field_write(priv->rf[INT_EN], 0); -} - -int get_temp_tsens_valid(const struct tsens_sensor *s, int *temp) -{ - struct tsens_priv *priv = s->priv; - int hw_id = s->hw_id; - u32 temp_idx = LAST_TEMP_0 + hw_id; - u32 valid_idx = VALID_0 + hw_id; - u32 valid; - int ret; - - ret = regmap_field_read(priv->rf[valid_idx], &valid); - if (ret) - return ret; - while (!valid) { - /* Valid bit is 0 for 6 AHB clock cycles. - * At 19.2MHz, 1 AHB clock is ~60ns. - * We should enter this loop very, very rarely. - */ - ndelay(400); - ret = regmap_field_read(priv->rf[valid_idx], &valid); - if (ret) - return ret; - } - - /* Valid bit is set, OK to read the temperature */ - *temp = tsens_hw_to_mC(s, temp_idx); - - return 0; -} - -int get_temp_common(const struct tsens_sensor *s, int *temp) -{ - struct tsens_priv *priv = s->priv; - int hw_id = s->hw_id; - int last_temp = 0, ret; - - ret = regmap_field_read(priv->rf[LAST_TEMP_0 + hw_id], &last_temp); - if (ret) - return ret; - - *temp = code_to_degc(last_temp, s) * 1000; - - return 0; -} - -#ifdef CONFIG_DEBUG_FS -static int dbg_sensors_show(struct seq_file *s, void *data) -{ - struct platform_device *pdev = s->private; - struct tsens_priv *priv = platform_get_drvdata(pdev); - int i; - - seq_printf(s, "max: %2d\nnum: %2d\n\n", - priv->feat->max_sensors, priv->num_sensors); - - seq_puts(s, " id slope offset\n--------------------------\n"); - for (i = 0; i < priv->num_sensors; i++) { - seq_printf(s, "%8d %8d %8d\n", priv->sensor[i].hw_id, - priv->sensor[i].slope, priv->sensor[i].offset); - } - - return 0; -} - -static int dbg_version_show(struct seq_file *s, void *data) -{ - struct platform_device *pdev = s->private; - struct tsens_priv *priv = platform_get_drvdata(pdev); - u32 maj_ver, min_ver, step_ver; - int ret; - - if (tsens_version(priv) > VER_0_1) { - ret = regmap_field_read(priv->rf[VER_MAJOR], &maj_ver); - if (ret) - return ret; - ret = regmap_field_read(priv->rf[VER_MINOR], &min_ver); - if (ret) - return ret; - ret = regmap_field_read(priv->rf[VER_STEP], &step_ver); - if (ret) - return ret; - seq_printf(s, "%d.%d.%d\n", maj_ver, min_ver, step_ver); - } else { - seq_puts(s, "0.1.0\n"); - } - - return 0; -} - -DEFINE_SHOW_ATTRIBUTE(dbg_version); -DEFINE_SHOW_ATTRIBUTE(dbg_sensors); - -static void tsens_debug_init(struct platform_device *pdev) -{ - struct tsens_priv *priv = platform_get_drvdata(pdev); - struct dentry *root, *file; - - root = debugfs_lookup("tsens", NULL); - if (!root) - priv->debug_root = debugfs_create_dir("tsens", NULL); - else - priv->debug_root = root; - - file = debugfs_lookup("version", priv->debug_root); - if (!file) - debugfs_create_file("version", 0444, priv->debug_root, - pdev, &dbg_version_fops); - - /* A directory for each instance of the TSENS IP */ - priv->debug = debugfs_create_dir(dev_name(&pdev->dev), priv->debug_root); - debugfs_create_file("sensors", 0444, priv->debug, pdev, &dbg_sensors_fops); -} -#else -static inline void tsens_debug_init(struct platform_device *pdev) {} -#endif - -static const struct regmap_config tsens_config = { - .name = "tm", - .reg_bits = 32, - .val_bits = 32, - .reg_stride = 4, -}; - -static const struct regmap_config tsens_srot_config = { - .name = "srot", - .reg_bits = 32, - .val_bits = 32, - .reg_stride = 4, -}; - -int __init init_common(struct tsens_priv *priv) -{ - void __iomem *tm_base, *srot_base; - struct device *dev = priv->dev; - u32 ver_minor; - struct resource *res; - u32 enabled; - int ret, i, j; - struct platform_device *op = of_find_device_by_node(priv->dev->of_node); - - if (!op) - return -EINVAL; - - if (op->num_resources > 1) { - /* DT with separate SROT and TM address space */ - priv->tm_offset = 0; - res = platform_get_resource(op, IORESOURCE_MEM, 1); - srot_base = devm_ioremap_resource(dev, res); - if (IS_ERR(srot_base)) { - ret = PTR_ERR(srot_base); - goto err_put_device; - } - - priv->srot_map = devm_regmap_init_mmio(dev, srot_base, - &tsens_srot_config); - if (IS_ERR(priv->srot_map)) { - ret = PTR_ERR(priv->srot_map); - goto err_put_device; - } - } else { - /* old DTs where SROT and TM were in a contiguous 2K block */ - priv->tm_offset = 0x1000; - } - - res = platform_get_resource(op, IORESOURCE_MEM, 0); - tm_base = devm_ioremap_resource(dev, res); - if (IS_ERR(tm_base)) { - ret = PTR_ERR(tm_base); - goto err_put_device; - } - - priv->tm_map = devm_regmap_init_mmio(dev, tm_base, &tsens_config); - if (IS_ERR(priv->tm_map)) { - ret = PTR_ERR(priv->tm_map); - goto err_put_device; - } - - if (tsens_version(priv) > VER_0_1) { - for (i = VER_MAJOR; i <= VER_STEP; i++) { - priv->rf[i] = devm_regmap_field_alloc(dev, priv->srot_map, - priv->fields[i]); - if (IS_ERR(priv->rf[i])) - return PTR_ERR(priv->rf[i]); - } - ret = regmap_field_read(priv->rf[VER_MINOR], &ver_minor); - if (ret) - goto err_put_device; - } - - priv->rf[TSENS_EN] = devm_regmap_field_alloc(dev, priv->srot_map, - priv->fields[TSENS_EN]); - if (IS_ERR(priv->rf[TSENS_EN])) { - ret = PTR_ERR(priv->rf[TSENS_EN]); - goto err_put_device; - } - ret = regmap_field_read(priv->rf[TSENS_EN], &enabled); - if (ret) - goto err_put_device; - if (!enabled) { - dev_err(dev, "%s: device not enabled\n", __func__); - ret = -ENODEV; - goto err_put_device; - } - - priv->rf[SENSOR_EN] = devm_regmap_field_alloc(dev, priv->srot_map, - priv->fields[SENSOR_EN]); - if (IS_ERR(priv->rf[SENSOR_EN])) { - ret = PTR_ERR(priv->rf[SENSOR_EN]); - goto err_put_device; - } - priv->rf[INT_EN] = devm_regmap_field_alloc(dev, priv->tm_map, - priv->fields[INT_EN]); - if (IS_ERR(priv->rf[INT_EN])) { - ret = PTR_ERR(priv->rf[INT_EN]); - goto err_put_device; - } - - /* This loop might need changes if enum regfield_ids is reordered */ - for (j = LAST_TEMP_0; j <= UP_THRESH_15; j += 16) { - for (i = 0; i < priv->feat->max_sensors; i++) { - int idx = j + i; - - priv->rf[idx] = devm_regmap_field_alloc(dev, priv->tm_map, - priv->fields[idx]); - if (IS_ERR(priv->rf[idx])) { - ret = PTR_ERR(priv->rf[idx]); - goto err_put_device; - } - } - } - - if (priv->feat->crit_int) { - /* Loop might need changes if enum regfield_ids is reordered */ - for (j = CRITICAL_STATUS_0; j <= CRIT_THRESH_15; j += 16) { - for (i = 0; i < priv->feat->max_sensors; i++) { - int idx = j + i; - - priv->rf[idx] = - devm_regmap_field_alloc(dev, - priv->tm_map, - priv->fields[idx]); - if (IS_ERR(priv->rf[idx])) { - ret = PTR_ERR(priv->rf[idx]); - goto err_put_device; - } - } - } - } - - if (tsens_version(priv) > VER_1_X && ver_minor > 2) { - /* Watchdog is present only on v2.3+ */ - priv->feat->has_watchdog = 1; - for (i = WDOG_BARK_STATUS; i <= CC_MON_MASK; i++) { - priv->rf[i] = devm_regmap_field_alloc(dev, priv->tm_map, - priv->fields[i]); - if (IS_ERR(priv->rf[i])) { - ret = PTR_ERR(priv->rf[i]); - goto err_put_device; - } - } - /* - * Watchdog is already enabled, unmask the bark. - * Disable cycle completion monitoring - */ - regmap_field_write(priv->rf[WDOG_BARK_MASK], 0); - regmap_field_write(priv->rf[CC_MON_MASK], 1); - } - - spin_lock_init(&priv->ul_lock); - tsens_enable_irq(priv); - tsens_debug_init(op); - -err_put_device: - put_device(&op->dev); - return ret; -} diff --git a/drivers/thermal/qcom/tsens.c b/drivers/thermal/qcom/tsens.c index 2f77d235cf735..8d3e94d2a9ed4 100644 --- a/drivers/thermal/qcom/tsens.c +++ b/drivers/thermal/qcom/tsens.c @@ -1,19 +1,857 @@ // SPDX-License-Identifier: GPL-2.0 /* * Copyright (c) 2015, The Linux Foundation. All rights reserved. + * Copyright (c) 2019, 2020, Linaro Ltd. */ #include #include +#include #include +#include #include +#include #include #include #include +#include #include #include #include "tsens.h" +/** + * struct tsens_irq_data - IRQ status and temperature violations + * @up_viol: upper threshold violated + * @up_thresh: upper threshold temperature value + * @up_irq_mask: mask register for upper threshold irqs + * @up_irq_clear: clear register for uppper threshold irqs + * @low_viol: lower threshold violated + * @low_thresh: lower threshold temperature value + * @low_irq_mask: mask register for lower threshold irqs + * @low_irq_clear: clear register for lower threshold irqs + * @crit_viol: critical threshold violated + * @crit_thresh: critical threshold temperature value + * @crit_irq_mask: mask register for critical threshold irqs + * @crit_irq_clear: clear register for critical threshold irqs + * + * Structure containing data about temperature threshold settings and + * irq status if they were violated. + */ +struct tsens_irq_data { + u32 up_viol; + int up_thresh; + u32 up_irq_mask; + u32 up_irq_clear; + u32 low_viol; + int low_thresh; + u32 low_irq_mask; + u32 low_irq_clear; + u32 crit_viol; + u32 crit_thresh; + u32 crit_irq_mask; + u32 crit_irq_clear; +}; + +char *qfprom_read(struct device *dev, const char *cname) +{ + struct nvmem_cell *cell; + ssize_t data; + char *ret; + + cell = nvmem_cell_get(dev, cname); + if (IS_ERR(cell)) + return ERR_CAST(cell); + + ret = nvmem_cell_read(cell, &data); + nvmem_cell_put(cell); + + return ret; +} + +/* + * Use this function on devices where slope and offset calculations + * depend on calibration data read from qfprom. On others the slope + * and offset values are derived from tz->tzp->slope and tz->tzp->offset + * resp. + */ +void compute_intercept_slope(struct tsens_priv *priv, u32 *p1, + u32 *p2, u32 mode) +{ + int i; + int num, den; + + for (i = 0; i < priv->num_sensors; i++) { + dev_dbg(priv->dev, + "%s: sensor%d - data_point1:%#x data_point2:%#x\n", + __func__, i, p1[i], p2[i]); + + priv->sensor[i].slope = SLOPE_DEFAULT; + if (mode == TWO_PT_CALIB) { + /* + * slope (m) = adc_code2 - adc_code1 (y2 - y1)/ + * temp_120_degc - temp_30_degc (x2 - x1) + */ + num = p2[i] - p1[i]; + num *= SLOPE_FACTOR; + den = CAL_DEGC_PT2 - CAL_DEGC_PT1; + priv->sensor[i].slope = num / den; + } + + priv->sensor[i].offset = (p1[i] * SLOPE_FACTOR) - + (CAL_DEGC_PT1 * + priv->sensor[i].slope); + dev_dbg(priv->dev, "%s: offset:%d\n", __func__, + priv->sensor[i].offset); + } +} + +static inline u32 degc_to_code(int degc, const struct tsens_sensor *s) +{ + u64 code = div_u64(((u64)degc * s->slope + s->offset), SLOPE_FACTOR); + + pr_debug("%s: raw_code: 0x%llx, degc:%d\n", __func__, code, degc); + return clamp_val(code, THRESHOLD_MIN_ADC_CODE, THRESHOLD_MAX_ADC_CODE); +} + +static inline int code_to_degc(u32 adc_code, const struct tsens_sensor *s) +{ + int degc, num, den; + + num = (adc_code * SLOPE_FACTOR) - s->offset; + den = s->slope; + + if (num > 0) + degc = num + (den / 2); + else if (num < 0) + degc = num - (den / 2); + else + degc = num; + + degc /= den; + + return degc; +} + +/** + * tsens_hw_to_mC - Return sign-extended temperature in mCelsius. + * @s: Pointer to sensor struct + * @field: Index into regmap_field array pointing to temperature data + * + * This function handles temperature returned in ADC code or deciCelsius + * depending on IP version. + * + * Return: Temperature in milliCelsius on success, a negative errno will + * be returned in error cases + */ +static int tsens_hw_to_mC(const struct tsens_sensor *s, int field) +{ + struct tsens_priv *priv = s->priv; + u32 resolution; + u32 temp = 0; + int ret; + + resolution = priv->fields[LAST_TEMP_0].msb - + priv->fields[LAST_TEMP_0].lsb; + + ret = regmap_field_read(priv->rf[field], &temp); + if (ret) + return ret; + + /* Convert temperature from ADC code to milliCelsius */ + if (priv->feat->adc) + return code_to_degc(temp, s) * 1000; + + /* deciCelsius -> milliCelsius along with sign extension */ + return sign_extend32(temp, resolution) * 100; +} + +/** + * tsens_mC_to_hw - Convert temperature to hardware register value + * @s: Pointer to sensor struct + * @temp: temperature in milliCelsius to be programmed to hardware + * + * This function outputs the value to be written to hardware in ADC code + * or deciCelsius depending on IP version. + * + * Return: ADC code or temperature in deciCelsius. + */ +static int tsens_mC_to_hw(const struct tsens_sensor *s, int temp) +{ + struct tsens_priv *priv = s->priv; + + /* milliC to adc code */ + if (priv->feat->adc) + return degc_to_code(temp / 1000, s); + + /* milliC to deciC */ + return temp / 100; +} + +static inline enum tsens_ver tsens_version(struct tsens_priv *priv) +{ + return priv->feat->ver_major; +} + +static void tsens_set_interrupt_v1(struct tsens_priv *priv, u32 hw_id, + enum tsens_irq_type irq_type, bool enable) +{ + u32 index = 0; + + switch (irq_type) { + case UPPER: + index = UP_INT_CLEAR_0 + hw_id; + break; + case LOWER: + index = LOW_INT_CLEAR_0 + hw_id; + break; + case CRITICAL: + /* No critical interrupts before v2 */ + return; + } + regmap_field_write(priv->rf[index], enable ? 0 : 1); +} + +static void tsens_set_interrupt_v2(struct tsens_priv *priv, u32 hw_id, + enum tsens_irq_type irq_type, bool enable) +{ + u32 index_mask = 0, index_clear = 0; + + /* + * To enable the interrupt flag for a sensor: + * - clear the mask bit + * To disable the interrupt flag for a sensor: + * - Mask further interrupts for this sensor + * - Write 1 followed by 0 to clear the interrupt + */ + switch (irq_type) { + case UPPER: + index_mask = UP_INT_MASK_0 + hw_id; + index_clear = UP_INT_CLEAR_0 + hw_id; + break; + case LOWER: + index_mask = LOW_INT_MASK_0 + hw_id; + index_clear = LOW_INT_CLEAR_0 + hw_id; + break; + case CRITICAL: + index_mask = CRIT_INT_MASK_0 + hw_id; + index_clear = CRIT_INT_CLEAR_0 + hw_id; + break; + } + + if (enable) { + regmap_field_write(priv->rf[index_mask], 0); + } else { + regmap_field_write(priv->rf[index_mask], 1); + regmap_field_write(priv->rf[index_clear], 1); + regmap_field_write(priv->rf[index_clear], 0); + } +} + +/** + * tsens_set_interrupt - Set state of an interrupt + * @priv: Pointer to tsens controller private data + * @hw_id: Hardware ID aka. sensor number + * @irq_type: irq_type from enum tsens_irq_type + * @enable: false = disable, true = enable + * + * Call IP-specific function to set state of an interrupt + * + * Return: void + */ +static void tsens_set_interrupt(struct tsens_priv *priv, u32 hw_id, + enum tsens_irq_type irq_type, bool enable) +{ + dev_dbg(priv->dev, "[%u] %s: %s -> %s\n", hw_id, __func__, + irq_type ? ((irq_type == 1) ? "UP" : "CRITICAL") : "LOW", + enable ? "en" : "dis"); + if (tsens_version(priv) > VER_1_X) + tsens_set_interrupt_v2(priv, hw_id, irq_type, enable); + else + tsens_set_interrupt_v1(priv, hw_id, irq_type, enable); +} + +/** + * tsens_threshold_violated - Check if a sensor temperature violated a preset threshold + * @priv: Pointer to tsens controller private data + * @hw_id: Hardware ID aka. sensor number + * @d: Pointer to irq state data + * + * Return: 0 if threshold was not violated, 1 if it was violated and negative + * errno in case of errors + */ +static int tsens_threshold_violated(struct tsens_priv *priv, u32 hw_id, + struct tsens_irq_data *d) +{ + int ret; + + ret = regmap_field_read(priv->rf[UPPER_STATUS_0 + hw_id], &d->up_viol); + if (ret) + return ret; + ret = regmap_field_read(priv->rf[LOWER_STATUS_0 + hw_id], &d->low_viol); + if (ret) + return ret; + + if (priv->feat->crit_int) { + ret = regmap_field_read(priv->rf[CRITICAL_STATUS_0 + hw_id], + &d->crit_viol); + if (ret) + return ret; + } + + if (d->up_viol || d->low_viol || d->crit_viol) + return 1; + + return 0; +} + +static int tsens_read_irq_state(struct tsens_priv *priv, u32 hw_id, + const struct tsens_sensor *s, + struct tsens_irq_data *d) +{ + int ret; + + ret = regmap_field_read(priv->rf[UP_INT_CLEAR_0 + hw_id], &d->up_irq_clear); + if (ret) + return ret; + ret = regmap_field_read(priv->rf[LOW_INT_CLEAR_0 + hw_id], &d->low_irq_clear); + if (ret) + return ret; + if (tsens_version(priv) > VER_1_X) { + ret = regmap_field_read(priv->rf[UP_INT_MASK_0 + hw_id], &d->up_irq_mask); + if (ret) + return ret; + ret = regmap_field_read(priv->rf[LOW_INT_MASK_0 + hw_id], &d->low_irq_mask); + if (ret) + return ret; + ret = regmap_field_read(priv->rf[CRIT_INT_CLEAR_0 + hw_id], + &d->crit_irq_clear); + if (ret) + return ret; + ret = regmap_field_read(priv->rf[CRIT_INT_MASK_0 + hw_id], + &d->crit_irq_mask); + if (ret) + return ret; + + d->crit_thresh = tsens_hw_to_mC(s, CRIT_THRESH_0 + hw_id); + } else { + /* No mask register on older TSENS */ + d->up_irq_mask = 0; + d->low_irq_mask = 0; + d->crit_irq_clear = 0; + d->crit_irq_mask = 0; + d->crit_thresh = 0; + } + + d->up_thresh = tsens_hw_to_mC(s, UP_THRESH_0 + hw_id); + d->low_thresh = tsens_hw_to_mC(s, LOW_THRESH_0 + hw_id); + + dev_dbg(priv->dev, "[%u] %s%s: status(%u|%u|%u) | clr(%u|%u|%u) | mask(%u|%u|%u)\n", + hw_id, __func__, + (d->up_viol || d->low_viol || d->crit_viol) ? "(V)" : "", + d->low_viol, d->up_viol, d->crit_viol, + d->low_irq_clear, d->up_irq_clear, d->crit_irq_clear, + d->low_irq_mask, d->up_irq_mask, d->crit_irq_mask); + dev_dbg(priv->dev, "[%u] %s%s: thresh: (%d:%d:%d)\n", hw_id, __func__, + (d->up_viol || d->low_viol || d->crit_viol) ? "(V)" : "", + d->low_thresh, d->up_thresh, d->crit_thresh); + + return 0; +} + +static inline u32 masked_irq(u32 hw_id, u32 mask, enum tsens_ver ver) +{ + if (ver > VER_1_X) + return mask & (1 << hw_id); + + /* v1, v0.1 don't have a irq mask register */ + return 0; +} + +/** + * tsens_critical_irq_thread() - Threaded handler for critical interrupts + * @irq: irq number + * @data: tsens controller private data + * + * Check FSM watchdog bark status and clear if needed. + * Check all sensors to find ones that violated their critical threshold limits. + * Clear and then re-enable the interrupt. + * + * The level-triggered interrupt might deassert if the temperature returned to + * within the threshold limits by the time the handler got scheduled. We + * consider the irq to have been handled in that case. + * + * Return: IRQ_HANDLED + */ +irqreturn_t tsens_critical_irq_thread(int irq, void *data) +{ + struct tsens_priv *priv = data; + struct tsens_irq_data d; + int temp, ret, i; + u32 wdog_status, wdog_count; + + if (priv->feat->has_watchdog) { + ret = regmap_field_read(priv->rf[WDOG_BARK_STATUS], + &wdog_status); + if (ret) + return ret; + + if (wdog_status) { + /* Clear WDOG interrupt */ + regmap_field_write(priv->rf[WDOG_BARK_CLEAR], 1); + regmap_field_write(priv->rf[WDOG_BARK_CLEAR], 0); + ret = regmap_field_read(priv->rf[WDOG_BARK_COUNT], + &wdog_count); + if (ret) + return ret; + if (wdog_count) + dev_dbg(priv->dev, "%s: watchdog count: %d\n", + __func__, wdog_count); + + /* Fall through to handle critical interrupts if any */ + } + } + + for (i = 0; i < priv->num_sensors; i++) { + const struct tsens_sensor *s = &priv->sensor[i]; + u32 hw_id = s->hw_id; + + if (IS_ERR(s->tzd)) + continue; + if (!tsens_threshold_violated(priv, hw_id, &d)) + continue; + ret = get_temp_tsens_valid(s, &temp); + if (ret) { + dev_err(priv->dev, "[%u] %s: error reading sensor\n", + hw_id, __func__); + continue; + } + + tsens_read_irq_state(priv, hw_id, s, &d); + if (d.crit_viol && + !masked_irq(hw_id, d.crit_irq_mask, tsens_version(priv))) { + /* Mask critical interrupts, unused on Linux */ + tsens_set_interrupt(priv, hw_id, CRITICAL, false); + } + } + + return IRQ_HANDLED; +} + +/** + * tsens_irq_thread - Threaded interrupt handler for uplow interrupts + * @irq: irq number + * @data: tsens controller private data + * + * Check all sensors to find ones that violated their threshold limits. If the + * temperature is still outside the limits, call thermal_zone_device_update() to + * update the thresholds, else re-enable the interrupts. + * + * The level-triggered interrupt might deassert if the temperature returned to + * within the threshold limits by the time the handler got scheduled. We + * consider the irq to have been handled in that case. + * + * Return: IRQ_HANDLED + */ +irqreturn_t tsens_irq_thread(int irq, void *data) +{ + struct tsens_priv *priv = data; + struct tsens_irq_data d; + bool enable = true, disable = false; + unsigned long flags; + int temp, ret, i; + + for (i = 0; i < priv->num_sensors; i++) { + bool trigger = false; + const struct tsens_sensor *s = &priv->sensor[i]; + u32 hw_id = s->hw_id; + + if (IS_ERR(s->tzd)) + continue; + if (!tsens_threshold_violated(priv, hw_id, &d)) + continue; + ret = get_temp_tsens_valid(s, &temp); + if (ret) { + dev_err(priv->dev, "[%u] %s: error reading sensor\n", + hw_id, __func__); + continue; + } + + spin_lock_irqsave(&priv->ul_lock, flags); + + tsens_read_irq_state(priv, hw_id, s, &d); + + if (d.up_viol && + !masked_irq(hw_id, d.up_irq_mask, tsens_version(priv))) { + tsens_set_interrupt(priv, hw_id, UPPER, disable); + if (d.up_thresh > temp) { + dev_dbg(priv->dev, "[%u] %s: re-arm upper\n", + hw_id, __func__); + tsens_set_interrupt(priv, hw_id, UPPER, enable); + } else { + trigger = true; + /* Keep irq masked */ + } + } else if (d.low_viol && + !masked_irq(hw_id, d.low_irq_mask, tsens_version(priv))) { + tsens_set_interrupt(priv, hw_id, LOWER, disable); + if (d.low_thresh < temp) { + dev_dbg(priv->dev, "[%u] %s: re-arm low\n", + hw_id, __func__); + tsens_set_interrupt(priv, hw_id, LOWER, enable); + } else { + trigger = true; + /* Keep irq masked */ + } + } + + spin_unlock_irqrestore(&priv->ul_lock, flags); + + if (trigger) { + dev_dbg(priv->dev, "[%u] %s: TZ update trigger (%d mC)\n", + hw_id, __func__, temp); + thermal_zone_device_update(s->tzd, + THERMAL_EVENT_UNSPECIFIED); + } else { + dev_dbg(priv->dev, "[%u] %s: no violation: %d\n", + hw_id, __func__, temp); + } + } + + return IRQ_HANDLED; +} + +int tsens_set_trips(void *_sensor, int low, int high) +{ + struct tsens_sensor *s = _sensor; + struct tsens_priv *priv = s->priv; + struct device *dev = priv->dev; + struct tsens_irq_data d; + unsigned long flags; + int high_val, low_val, cl_high, cl_low; + u32 hw_id = s->hw_id; + + dev_dbg(dev, "[%u] %s: proposed thresholds: (%d:%d)\n", + hw_id, __func__, low, high); + + cl_high = clamp_val(high, -40000, 120000); + cl_low = clamp_val(low, -40000, 120000); + + high_val = tsens_mC_to_hw(s, cl_high); + low_val = tsens_mC_to_hw(s, cl_low); + + spin_lock_irqsave(&priv->ul_lock, flags); + + tsens_read_irq_state(priv, hw_id, s, &d); + + /* Write the new thresholds and clear the status */ + regmap_field_write(priv->rf[LOW_THRESH_0 + hw_id], low_val); + regmap_field_write(priv->rf[UP_THRESH_0 + hw_id], high_val); + tsens_set_interrupt(priv, hw_id, LOWER, true); + tsens_set_interrupt(priv, hw_id, UPPER, true); + + spin_unlock_irqrestore(&priv->ul_lock, flags); + + dev_dbg(dev, "[%u] %s: (%d:%d)->(%d:%d)\n", + hw_id, __func__, d.low_thresh, d.up_thresh, cl_low, cl_high); + + return 0; +} + +int tsens_enable_irq(struct tsens_priv *priv) +{ + int ret; + int val = tsens_version(priv) > VER_1_X ? 7 : 1; + + ret = regmap_field_write(priv->rf[INT_EN], val); + if (ret < 0) + dev_err(priv->dev, "%s: failed to enable interrupts\n", + __func__); + + return ret; +} + +void tsens_disable_irq(struct tsens_priv *priv) +{ + regmap_field_write(priv->rf[INT_EN], 0); +} + +int get_temp_tsens_valid(const struct tsens_sensor *s, int *temp) +{ + struct tsens_priv *priv = s->priv; + int hw_id = s->hw_id; + u32 temp_idx = LAST_TEMP_0 + hw_id; + u32 valid_idx = VALID_0 + hw_id; + u32 valid; + int ret; + + ret = regmap_field_read(priv->rf[valid_idx], &valid); + if (ret) + return ret; + while (!valid) { + /* Valid bit is 0 for 6 AHB clock cycles. + * At 19.2MHz, 1 AHB clock is ~60ns. + * We should enter this loop very, very rarely. + */ + ndelay(400); + ret = regmap_field_read(priv->rf[valid_idx], &valid); + if (ret) + return ret; + } + + /* Valid bit is set, OK to read the temperature */ + *temp = tsens_hw_to_mC(s, temp_idx); + + return 0; +} + +int get_temp_common(const struct tsens_sensor *s, int *temp) +{ + struct tsens_priv *priv = s->priv; + int hw_id = s->hw_id; + int last_temp = 0, ret; + + ret = regmap_field_read(priv->rf[LAST_TEMP_0 + hw_id], &last_temp); + if (ret) + return ret; + + *temp = code_to_degc(last_temp, s) * 1000; + + return 0; +} + +#ifdef CONFIG_DEBUG_FS +static int dbg_sensors_show(struct seq_file *s, void *data) +{ + struct platform_device *pdev = s->private; + struct tsens_priv *priv = platform_get_drvdata(pdev); + int i; + + seq_printf(s, "max: %2d\nnum: %2d\n\n", + priv->feat->max_sensors, priv->num_sensors); + + seq_puts(s, " id slope offset\n--------------------------\n"); + for (i = 0; i < priv->num_sensors; i++) { + seq_printf(s, "%8d %8d %8d\n", priv->sensor[i].hw_id, + priv->sensor[i].slope, priv->sensor[i].offset); + } + + return 0; +} + +static int dbg_version_show(struct seq_file *s, void *data) +{ + struct platform_device *pdev = s->private; + struct tsens_priv *priv = platform_get_drvdata(pdev); + u32 maj_ver, min_ver, step_ver; + int ret; + + if (tsens_version(priv) > VER_0_1) { + ret = regmap_field_read(priv->rf[VER_MAJOR], &maj_ver); + if (ret) + return ret; + ret = regmap_field_read(priv->rf[VER_MINOR], &min_ver); + if (ret) + return ret; + ret = regmap_field_read(priv->rf[VER_STEP], &step_ver); + if (ret) + return ret; + seq_printf(s, "%d.%d.%d\n", maj_ver, min_ver, step_ver); + } else { + seq_puts(s, "0.1.0\n"); + } + + return 0; +} + +DEFINE_SHOW_ATTRIBUTE(dbg_version); +DEFINE_SHOW_ATTRIBUTE(dbg_sensors); + +static void tsens_debug_init(struct platform_device *pdev) +{ + struct tsens_priv *priv = platform_get_drvdata(pdev); + struct dentry *root, *file; + + root = debugfs_lookup("tsens", NULL); + if (!root) + priv->debug_root = debugfs_create_dir("tsens", NULL); + else + priv->debug_root = root; + + file = debugfs_lookup("version", priv->debug_root); + if (!file) + debugfs_create_file("version", 0444, priv->debug_root, + pdev, &dbg_version_fops); + + /* A directory for each instance of the TSENS IP */ + priv->debug = debugfs_create_dir(dev_name(&pdev->dev), priv->debug_root); + debugfs_create_file("sensors", 0444, priv->debug, pdev, &dbg_sensors_fops); +} +#else +static inline void tsens_debug_init(struct platform_device *pdev) {} +#endif + +static const struct regmap_config tsens_config = { + .name = "tm", + .reg_bits = 32, + .val_bits = 32, + .reg_stride = 4, +}; + +static const struct regmap_config tsens_srot_config = { + .name = "srot", + .reg_bits = 32, + .val_bits = 32, + .reg_stride = 4, +}; + +int __init init_common(struct tsens_priv *priv) +{ + void __iomem *tm_base, *srot_base; + struct device *dev = priv->dev; + u32 ver_minor; + struct resource *res; + u32 enabled; + int ret, i, j; + struct platform_device *op = of_find_device_by_node(priv->dev->of_node); + + if (!op) + return -EINVAL; + + if (op->num_resources > 1) { + /* DT with separate SROT and TM address space */ + priv->tm_offset = 0; + res = platform_get_resource(op, IORESOURCE_MEM, 1); + srot_base = devm_ioremap_resource(dev, res); + if (IS_ERR(srot_base)) { + ret = PTR_ERR(srot_base); + goto err_put_device; + } + + priv->srot_map = devm_regmap_init_mmio(dev, srot_base, + &tsens_srot_config); + if (IS_ERR(priv->srot_map)) { + ret = PTR_ERR(priv->srot_map); + goto err_put_device; + } + } else { + /* old DTs where SROT and TM were in a contiguous 2K block */ + priv->tm_offset = 0x1000; + } + + res = platform_get_resource(op, IORESOURCE_MEM, 0); + tm_base = devm_ioremap_resource(dev, res); + if (IS_ERR(tm_base)) { + ret = PTR_ERR(tm_base); + goto err_put_device; + } + + priv->tm_map = devm_regmap_init_mmio(dev, tm_base, &tsens_config); + if (IS_ERR(priv->tm_map)) { + ret = PTR_ERR(priv->tm_map); + goto err_put_device; + } + + if (tsens_version(priv) > VER_0_1) { + for (i = VER_MAJOR; i <= VER_STEP; i++) { + priv->rf[i] = devm_regmap_field_alloc(dev, priv->srot_map, + priv->fields[i]); + if (IS_ERR(priv->rf[i])) + return PTR_ERR(priv->rf[i]); + } + ret = regmap_field_read(priv->rf[VER_MINOR], &ver_minor); + if (ret) + goto err_put_device; + } + + priv->rf[TSENS_EN] = devm_regmap_field_alloc(dev, priv->srot_map, + priv->fields[TSENS_EN]); + if (IS_ERR(priv->rf[TSENS_EN])) { + ret = PTR_ERR(priv->rf[TSENS_EN]); + goto err_put_device; + } + ret = regmap_field_read(priv->rf[TSENS_EN], &enabled); + if (ret) + goto err_put_device; + if (!enabled) { + dev_err(dev, "%s: device not enabled\n", __func__); + ret = -ENODEV; + goto err_put_device; + } + + priv->rf[SENSOR_EN] = devm_regmap_field_alloc(dev, priv->srot_map, + priv->fields[SENSOR_EN]); + if (IS_ERR(priv->rf[SENSOR_EN])) { + ret = PTR_ERR(priv->rf[SENSOR_EN]); + goto err_put_device; + } + priv->rf[INT_EN] = devm_regmap_field_alloc(dev, priv->tm_map, + priv->fields[INT_EN]); + if (IS_ERR(priv->rf[INT_EN])) { + ret = PTR_ERR(priv->rf[INT_EN]); + goto err_put_device; + } + + /* This loop might need changes if enum regfield_ids is reordered */ + for (j = LAST_TEMP_0; j <= UP_THRESH_15; j += 16) { + for (i = 0; i < priv->feat->max_sensors; i++) { + int idx = j + i; + + priv->rf[idx] = devm_regmap_field_alloc(dev, + priv->tm_map, + priv->fields[idx]); + if (IS_ERR(priv->rf[idx])) { + ret = PTR_ERR(priv->rf[idx]); + goto err_put_device; + } + } + } + + if (priv->feat->crit_int) { + /* Loop might need changes if enum regfield_ids is reordered */ + for (j = CRITICAL_STATUS_0; j <= CRIT_THRESH_15; j += 16) { + for (i = 0; i < priv->feat->max_sensors; i++) { + int idx = j + i; + + priv->rf[idx] = + devm_regmap_field_alloc(dev, + priv->tm_map, + priv->fields[idx]); + if (IS_ERR(priv->rf[idx])) { + ret = PTR_ERR(priv->rf[idx]); + goto err_put_device; + } + } + } + } + + if (tsens_version(priv) > VER_1_X && ver_minor > 2) { + /* Watchdog is present only on v2.3+ */ + priv->feat->has_watchdog = 1; + for (i = WDOG_BARK_STATUS; i <= CC_MON_MASK; i++) { + priv->rf[i] = devm_regmap_field_alloc(dev, priv->tm_map, + priv->fields[i]); + if (IS_ERR(priv->rf[i])) { + ret = PTR_ERR(priv->rf[i]); + goto err_put_device; + } + } + /* + * Watchdog is already enabled, unmask the bark. + * Disable cycle completion monitoring + */ + regmap_field_write(priv->rf[WDOG_BARK_MASK], 0); + regmap_field_write(priv->rf[CC_MON_MASK], 1); + } + + spin_lock_init(&priv->ul_lock); + tsens_enable_irq(priv); + tsens_debug_init(op); + +err_put_device: + put_device(&op->dev); + return ret; +} + static int tsens_get_temp(void *data, int *temp) { struct tsens_sensor *s = data; diff --git a/drivers/thermal/qcom/tsens.h b/drivers/thermal/qcom/tsens.h index 502acf0e68285..59d01162c66af 100644 --- a/drivers/thermal/qcom/tsens.h +++ b/drivers/thermal/qcom/tsens.h @@ -580,11 +580,6 @@ void compute_intercept_slope(struct tsens_priv *priv, u32 *pt1, u32 *pt2, u32 mo int init_common(struct tsens_priv *priv); int get_temp_tsens_valid(const struct tsens_sensor *s, int *temp); int get_temp_common(const struct tsens_sensor *s, int *temp); -int tsens_enable_irq(struct tsens_priv *priv); -void tsens_disable_irq(struct tsens_priv *priv); -int tsens_set_trips(void *_sensor, int low, int high); -irqreturn_t tsens_irq_thread(int irq, void *data); -irqreturn_t tsens_critical_irq_thread(int irq, void *data); /* TSENS target */ extern struct tsens_plat_data data_8960; -- GitLab From 1ab20c0e53fa2167357bd90b7f7f7019cad9daaa Mon Sep 17 00:00:00 2001 From: Geert Uytterhoeven Date: Thu, 7 May 2020 13:29:55 +0200 Subject: [PATCH 0642/1971] thermal: qoriq: Add platform dependencies The QorIQ Thermal Monitoring Unit is only present on Freescale E500MC and Layerscape SoCs, and on NXP i.MX8 SoCs. Add platform dependencies to the QORIQ_THERMAL config symbol, to avoid asking the user about it when configuring a kernel without support for any of the aforementioned SoCs. Signed-off-by: Geert Uytterhoeven Acked-by: Arnd Bergmann Acked-by: Li Yang Signed-off-by: Daniel Lezcano Link: https://lore.kernel.org/r/20200507112955.23520-5-geert+renesas@glider.be --- drivers/thermal/Kconfig | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/thermal/Kconfig b/drivers/thermal/Kconfig index e53314ea9e25e..3eb2348e52427 100644 --- a/drivers/thermal/Kconfig +++ b/drivers/thermal/Kconfig @@ -295,8 +295,8 @@ config MAX77620_THERMAL config QORIQ_THERMAL tristate "QorIQ Thermal Monitoring Unit" - depends on THERMAL_OF - depends on HAS_IOMEM + depends on THERMAL_OF && HAS_IOMEM + depends on PPC_E500MC || SOC_LS1021A || ARCH_LAYERSCAPE || (ARCH_MXC && ARM64) || COMPILE_TEST select REGMAP_MMIO help Support for Thermal Monitoring Unit (TMU) found on QorIQ platforms. -- GitLab From cd76ca4dd63768385fb81927fdc3087ad3bfeefb Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Fri, 22 May 2020 17:21:38 +0100 Subject: [PATCH 0643/1971] selftests: vdso: Use a header file to prototype parse_vdso API Both vdso_test_gettimeofday and vdso_standalone_test_x86 use the library in parse_vdso.c but each separately declares the API it offers which is not ideal. Create a header file with prototypes of the functions and use it in both the library and the tests to ensure that the same prototypes are used throughout. Signed-off-by: Mark Brown Signed-off-by: Shuah Khan --- tools/testing/selftests/vDSO/parse_vdso.c | 24 +------------- tools/testing/selftests/vDSO/parse_vdso.h | 31 +++++++++++++++++++ .../selftests/vDSO/vdso_standalone_test_x86.c | 4 +-- .../selftests/vDSO/vdso_test_gettimeofday.c | 5 +-- 4 files changed, 34 insertions(+), 30 deletions(-) create mode 100644 tools/testing/selftests/vDSO/parse_vdso.h diff --git a/tools/testing/selftests/vDSO/parse_vdso.c b/tools/testing/selftests/vDSO/parse_vdso.c index 1dbb4b87268fa..413f75620a35b 100644 --- a/tools/testing/selftests/vDSO/parse_vdso.c +++ b/tools/testing/selftests/vDSO/parse_vdso.c @@ -21,29 +21,7 @@ #include #include -/* - * To use this vDSO parser, first call one of the vdso_init_* functions. - * If you've already parsed auxv, then pass the value of AT_SYSINFO_EHDR - * to vdso_init_from_sysinfo_ehdr. Otherwise pass auxv to vdso_init_from_auxv. - * Then call vdso_sym for each symbol you want. For example, to look up - * gettimeofday on x86_64, use: - * - * = vdso_sym("LINUX_2.6", "gettimeofday"); - * or - * = vdso_sym("LINUX_2.6", "__vdso_gettimeofday"); - * - * vdso_sym will return 0 if the symbol doesn't exist or if the init function - * failed or was not called. vdso_sym is a little slow, so its return value - * should be cached. - * - * vdso_sym is threadsafe; the init functions are not. - * - * These are the prototypes: - */ -extern void vdso_init_from_auxv(void *auxv); -extern void vdso_init_from_sysinfo_ehdr(uintptr_t base); -extern void *vdso_sym(const char *version, const char *name); - +#include "parse_vdso.h" /* And here's the code. */ #ifndef ELF_BITS diff --git a/tools/testing/selftests/vDSO/parse_vdso.h b/tools/testing/selftests/vDSO/parse_vdso.h new file mode 100644 index 0000000000000..de0453067d7cd --- /dev/null +++ b/tools/testing/selftests/vDSO/parse_vdso.h @@ -0,0 +1,31 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ + +#ifndef PARSE_VDSO_H +#define PARSE_VDSO_H + +#include + +/* + * To use this vDSO parser, first call one of the vdso_init_* functions. + * If you've already parsed auxv, then pass the value of AT_SYSINFO_EHDR + * to vdso_init_from_sysinfo_ehdr. Otherwise pass auxv to vdso_init_from_auxv. + * Then call vdso_sym for each symbol you want. For example, to look up + * gettimeofday on x86_64, use: + * + * = vdso_sym("LINUX_2.6", "gettimeofday"); + * or + * = vdso_sym("LINUX_2.6", "__vdso_gettimeofday"); + * + * vdso_sym will return 0 if the symbol doesn't exist or if the init function + * failed or was not called. vdso_sym is a little slow, so its return value + * should be cached. + * + * vdso_sym is threadsafe; the init functions are not. + * + * These are the prototypes: + */ +void *vdso_sym(const char *version, const char *name); +void vdso_init_from_sysinfo_ehdr(uintptr_t base); +void vdso_init_from_auxv(void *auxv); + +#endif diff --git a/tools/testing/selftests/vDSO/vdso_standalone_test_x86.c b/tools/testing/selftests/vDSO/vdso_standalone_test_x86.c index 5ac4b00acfbcd..8a44ff973ee17 100644 --- a/tools/testing/selftests/vDSO/vdso_standalone_test_x86.c +++ b/tools/testing/selftests/vDSO/vdso_standalone_test_x86.c @@ -16,9 +16,7 @@ #include #include -extern void *vdso_sym(const char *version, const char *name); -extern void vdso_init_from_sysinfo_ehdr(uintptr_t base); -extern void vdso_init_from_auxv(void *auxv); +#include "parse_vdso.h" /* We need a libc functions... */ int strcmp(const char *a, const char *b) diff --git a/tools/testing/selftests/vDSO/vdso_test_gettimeofday.c b/tools/testing/selftests/vDSO/vdso_test_gettimeofday.c index 511c0dc5e47ef..8ccc73ed8240b 100644 --- a/tools/testing/selftests/vDSO/vdso_test_gettimeofday.c +++ b/tools/testing/selftests/vDSO/vdso_test_gettimeofday.c @@ -17,10 +17,7 @@ #include #include "../kselftest.h" - -extern void *vdso_sym(const char *version, const char *name); -extern void vdso_init_from_sysinfo_ehdr(uintptr_t base); -extern void vdso_init_from_auxv(void *auxv); +#include "parse_vdso.h" /* * ARM64's vDSO exports its gettimeofday() implementation with a different -- GitLab From 2e9a97256616f2f7280f8bed8c98e57bfd745a4d Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Fri, 22 May 2020 17:21:39 +0100 Subject: [PATCH 0644/1971] selftests: vdso: Add a selftest for vDSO getcpu() Provide a very basic selftest for getcpu() which similarly to our existing test for gettimeofday() looks up the function in the vDSO and prints the results it gets if the function exists and succeeds. Signed-off-by: Mark Brown Signed-off-by: Shuah Khan --- tools/testing/selftests/vDSO/.gitignore | 1 + tools/testing/selftests/vDSO/Makefile | 3 +- .../testing/selftests/vDSO/vdso_test_getcpu.c | 54 +++++++++++++++++++ 3 files changed, 57 insertions(+), 1 deletion(-) create mode 100644 tools/testing/selftests/vDSO/vdso_test_getcpu.c diff --git a/tools/testing/selftests/vDSO/.gitignore b/tools/testing/selftests/vDSO/.gitignore index 74f49bd5f6dd8..5eb64d41e5419 100644 --- a/tools/testing/selftests/vDSO/.gitignore +++ b/tools/testing/selftests/vDSO/.gitignore @@ -1,4 +1,5 @@ # SPDX-License-Identifier: GPL-2.0-only vdso_test vdso_test_gettimeofday +vdso_test_getcpu vdso_standalone_test_x86 diff --git a/tools/testing/selftests/vDSO/Makefile b/tools/testing/selftests/vDSO/Makefile index ae15d700b62ee..0069f2f83f865 100644 --- a/tools/testing/selftests/vDSO/Makefile +++ b/tools/testing/selftests/vDSO/Makefile @@ -4,7 +4,7 @@ include ../lib.mk uname_M := $(shell uname -m 2>/dev/null || echo not) ARCH ?= $(shell echo $(uname_M) | sed -e s/i.86/x86/ -e s/x86_64/x86/) -TEST_GEN_PROGS := $(OUTPUT)/vdso_test_gettimeofday +TEST_GEN_PROGS := $(OUTPUT)/vdso_test_gettimeofday $(OUTPUT)/vdso_test_getcpu ifeq ($(ARCH),x86) TEST_GEN_PROGS += $(OUTPUT)/vdso_standalone_test_x86 endif @@ -18,6 +18,7 @@ endif all: $(TEST_GEN_PROGS) $(OUTPUT)/vdso_test_gettimeofday: parse_vdso.c vdso_test_gettimeofday.c +$(OUTPUT)/vdso_test_getcpu: parse_vdso.c vdso_test_getcpu.c $(OUTPUT)/vdso_standalone_test_x86: vdso_standalone_test_x86.c parse_vdso.c $(CC) $(CFLAGS) $(CFLAGS_vdso_standalone_test_x86) \ vdso_standalone_test_x86.c parse_vdso.c \ diff --git a/tools/testing/selftests/vDSO/vdso_test_getcpu.c b/tools/testing/selftests/vDSO/vdso_test_getcpu.c new file mode 100644 index 0000000000000..fc25ede131b88 --- /dev/null +++ b/tools/testing/selftests/vDSO/vdso_test_getcpu.c @@ -0,0 +1,54 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * vdso_test_getcpu.c: Sample code to test parse_vdso.c and vDSO getcpu() + * + * Copyright (c) 2020 Arm Ltd + */ + +#include +#include +#include +#include +#include + +#include "../kselftest.h" +#include "parse_vdso.h" + +const char *version = "LINUX_2.6"; +const char *name = "__vdso_getcpu"; + +struct getcpu_cache; +typedef long (*getcpu_t)(unsigned int *, unsigned int *, + struct getcpu_cache *); + +int main(int argc, char **argv) +{ + unsigned long sysinfo_ehdr; + unsigned int cpu, node; + getcpu_t get_cpu; + long ret; + + sysinfo_ehdr = getauxval(AT_SYSINFO_EHDR); + if (!sysinfo_ehdr) { + printf("AT_SYSINFO_EHDR is not present!\n"); + return KSFT_SKIP; + } + + vdso_init_from_sysinfo_ehdr(getauxval(AT_SYSINFO_EHDR)); + + get_cpu = (getcpu_t)vdso_sym(version, name); + if (!get_cpu) { + printf("Could not find %s\n", name); + return KSFT_SKIP; + } + + ret = get_cpu(&cpu, &node, 0); + if (ret == 0) { + printf("Running on CPU %u node %u\n", cpu, node); + } else { + printf("%s failed\n", name); + return KSFT_FAIL; + } + + return 0; +} -- GitLab From b03628b73564cf54e05b7611e22d9886a8822877 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Niklas=20S=C3=B6derlund?= Date: Thu, 14 May 2020 17:25:05 +0200 Subject: [PATCH 0645/1971] thermal: rcar_thermal: Clean up rcar_thermal_update_temp() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Moving the ctemp variable out of the private data structure made it possible to clean up rcar_thermal_update_temp(). Initialize the local ctemp to the error code to return if the reading fails and just return it at the end of the function. It's OK to change the datatype of old, new and ctemp to int as all values are ANDed with CTEMP (0x3f) before being stored. While at it change the datatype of the loop variable 'i' to to unsigned int. Suggested-by: Geert Uytterhoeven Signed-off-by: Niklas Söderlund Reviewed-by: Geert Uytterhoeven Signed-off-by: Daniel Lezcano Link: https://lore.kernel.org/r/20200514152505.1927634-1-niklas.soderlund+renesas@ragnatech.se --- drivers/thermal/rcar_thermal.c | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/drivers/thermal/rcar_thermal.c b/drivers/thermal/rcar_thermal.c index e0c1f2409035e..46aeb28b4e901 100644 --- a/drivers/thermal/rcar_thermal.c +++ b/drivers/thermal/rcar_thermal.c @@ -198,8 +198,8 @@ static void _rcar_thermal_bset(struct rcar_thermal_priv *priv, u32 reg, static int rcar_thermal_update_temp(struct rcar_thermal_priv *priv) { struct device *dev = rcar_priv_to_dev(priv); - int i; - u32 ctemp, old, new; + int old, new, ctemp = -EINVAL; + unsigned int i; mutex_lock(&priv->lock); @@ -209,7 +209,6 @@ static int rcar_thermal_update_temp(struct rcar_thermal_priv *priv) */ rcar_thermal_bset(priv, THSCR, CPCTL, CPCTL); - ctemp = 0; old = ~0; for (i = 0; i < 128; i++) { /* @@ -227,7 +226,7 @@ static int rcar_thermal_update_temp(struct rcar_thermal_priv *priv) old = new; } - if (!ctemp) { + if (ctemp < 0) { dev_err(dev, "thermal sensor was broken\n"); goto err_out_unlock; } @@ -248,7 +247,7 @@ static int rcar_thermal_update_temp(struct rcar_thermal_priv *priv) err_out_unlock: mutex_unlock(&priv->lock); - return ctemp ? ctemp : -EINVAL; + return ctemp; } static int rcar_thermal_get_current_temp(struct rcar_thermal_priv *priv, -- GitLab From 5627f9cffee73dca9762135b6a8c7ab7213f31e2 Mon Sep 17 00:00:00 2001 From: Nikita Sobolev Date: Thu, 21 May 2020 17:43:44 +0300 Subject: [PATCH 0646/1971] Kernel selftests: Add check if TPM devices are supported TPM2 tests set uses /dev/tpm0 and /dev/tpmrm0 without check if they are available. In case, when these devices are not available test fails, but expected behaviour is skipped test. Signed-off-by: Nikita Sobolev Reviewed-by: Jarkko Sakkinen Reviewed-by: Petr Vorel Signed-off-by: Shuah Khan --- tools/testing/selftests/tpm2/test_smoke.sh | 5 +++++ tools/testing/selftests/tpm2/test_space.sh | 5 +++++ 2 files changed, 10 insertions(+) diff --git a/tools/testing/selftests/tpm2/test_smoke.sh b/tools/testing/selftests/tpm2/test_smoke.sh index 8155c2ea7ccbb..663062701d5aa 100755 --- a/tools/testing/selftests/tpm2/test_smoke.sh +++ b/tools/testing/selftests/tpm2/test_smoke.sh @@ -1,6 +1,11 @@ #!/bin/bash # SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) +# Kselftest framework requirement - SKIP code is 4. +ksft_skip=4 + +[ -f /dev/tpm0 ] || exit $ksft_skip + python -m unittest -v tpm2_tests.SmokeTest python -m unittest -v tpm2_tests.AsyncTest diff --git a/tools/testing/selftests/tpm2/test_space.sh b/tools/testing/selftests/tpm2/test_space.sh index a6f5e346635e5..36c9d030a1c63 100755 --- a/tools/testing/selftests/tpm2/test_space.sh +++ b/tools/testing/selftests/tpm2/test_space.sh @@ -1,4 +1,9 @@ #!/bin/bash # SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) +# Kselftest framework requirement - SKIP code is 4. +ksft_skip=4 + +[ -f /dev/tpmrm0 ] || exit $ksft_skip + python -m unittest -v tpm2_tests.SpaceTest -- GitLab From 558ae0355a91c7d28fdf4c0011bee6ebb5118632 Mon Sep 17 00:00:00 2001 From: Andrei Vagin Date: Thu, 21 May 2020 00:52:52 -0700 Subject: [PATCH 0647/1971] selftests/timens: handle a case when alarm clocks are not supported This can happen if a testing node doesn't have RTC (real time clock) hardware or it doesn't support alarms. Fixes: 61c57676035d ("selftests/timens: Add Time Namespace test for supported clocks") Acked-by: Vincenzo Frascino Reported-by: Vincenzo Frascino Signed-off-by: Andrei Vagin Signed-off-by: Shuah Khan --- tools/testing/selftests/timens/clock_nanosleep.c | 2 +- tools/testing/selftests/timens/timens.c | 2 +- tools/testing/selftests/timens/timens.h | 13 ++++++++++++- tools/testing/selftests/timens/timer.c | 5 +++++ tools/testing/selftests/timens/timerfd.c | 5 +++++ 5 files changed, 24 insertions(+), 3 deletions(-) diff --git a/tools/testing/selftests/timens/clock_nanosleep.c b/tools/testing/selftests/timens/clock_nanosleep.c index 8e7b7c72ef65f..72d41b955fb22 100644 --- a/tools/testing/selftests/timens/clock_nanosleep.c +++ b/tools/testing/selftests/timens/clock_nanosleep.c @@ -119,7 +119,7 @@ int main(int argc, char *argv[]) ksft_set_plan(4); - check_config_posix_timers(); + check_supported_timers(); if (unshare_timens()) return 1; diff --git a/tools/testing/selftests/timens/timens.c b/tools/testing/selftests/timens/timens.c index 098be7c83be3e..52b6a1185f52a 100644 --- a/tools/testing/selftests/timens/timens.c +++ b/tools/testing/selftests/timens/timens.c @@ -155,7 +155,7 @@ int main(int argc, char *argv[]) nscheck(); - check_config_posix_timers(); + check_supported_timers(); ksft_set_plan(ARRAY_SIZE(clocks) * 2); diff --git a/tools/testing/selftests/timens/timens.h b/tools/testing/selftests/timens/timens.h index e09e7e39bc52f..d4fc52d471462 100644 --- a/tools/testing/selftests/timens/timens.h +++ b/tools/testing/selftests/timens/timens.h @@ -14,15 +14,26 @@ #endif static int config_posix_timers = true; +static int config_alarm_timers = true; -static inline void check_config_posix_timers(void) +static inline void check_supported_timers(void) { + struct timespec ts; + if (timer_create(-1, 0, 0) == -1 && errno == ENOSYS) config_posix_timers = false; + + if (clock_gettime(CLOCK_BOOTTIME_ALARM, &ts) == -1 && errno == EINVAL) + config_alarm_timers = false; } static inline bool check_skip(int clockid) { + if (!config_alarm_timers && clockid == CLOCK_BOOTTIME_ALARM) { + ksft_test_result_skip("CLOCK_BOOTTIME_ALARM isn't supported\n"); + return true; + } + if (config_posix_timers) return false; diff --git a/tools/testing/selftests/timens/timer.c b/tools/testing/selftests/timens/timer.c index 96dba11ebe446..5e7f0051bd7be 100644 --- a/tools/testing/selftests/timens/timer.c +++ b/tools/testing/selftests/timens/timer.c @@ -22,6 +22,9 @@ int run_test(int clockid, struct timespec now) timer_t fd; int i; + if (check_skip(clockid)) + return 0; + for (i = 0; i < 2; i++) { struct sigevent sevp = {.sigev_notify = SIGEV_NONE}; int flags = 0; @@ -74,6 +77,8 @@ int main(int argc, char *argv[]) nscheck(); + check_supported_timers(); + ksft_set_plan(3); clock_gettime(CLOCK_MONOTONIC, &mtime_now); diff --git a/tools/testing/selftests/timens/timerfd.c b/tools/testing/selftests/timens/timerfd.c index eff1ec5ff215b..9edd43d6b2c13 100644 --- a/tools/testing/selftests/timens/timerfd.c +++ b/tools/testing/selftests/timens/timerfd.c @@ -28,6 +28,9 @@ int run_test(int clockid, struct timespec now) long long elapsed; int fd, i; + if (check_skip(clockid)) + return 0; + if (tclock_gettime(clockid, &now)) return pr_perror("clock_gettime(%d)", clockid); @@ -81,6 +84,8 @@ int main(int argc, char *argv[]) nscheck(); + check_supported_timers(); + ksft_set_plan(3); clock_gettime(CLOCK_MONOTONIC, &mtime_now); -- GitLab From c4714b0045ac74f3b578851e312f9fbccfb382db Mon Sep 17 00:00:00 2001 From: Lothar Rubusch Date: Wed, 15 Apr 2020 20:16:53 +0000 Subject: [PATCH 0648/1971] Documentation: test.h - fix warnings Fix warnings at 'make htmldocs', and formatting issues in the resulting documentation. - test.h: Fix annotation in kernel-doc parameter description. - Documentation/*.rst: Fixing formatting issues, and a duplicate label issue due to usage of sphinx.ext.autosectionlabel and identical labels within one document (sphinx warning) Signed-off-by: Lothar Rubusch Reviewed-by: Brendan Higgins Signed-off-by: Shuah Khan --- Documentation/dev-tools/kunit/start.rst | 13 ++++++++----- Documentation/dev-tools/kunit/usage.rst | 4 ++-- include/kunit/test.h | 12 ++++++------ 3 files changed, 16 insertions(+), 13 deletions(-) diff --git a/Documentation/dev-tools/kunit/start.rst b/Documentation/dev-tools/kunit/start.rst index e1c5ce80ce12b..bb112cf70624e 100644 --- a/Documentation/dev-tools/kunit/start.rst +++ b/Documentation/dev-tools/kunit/start.rst @@ -32,15 +32,17 @@ test targets as well. The ``.kunitconfig`` should also contain any other config options required by the tests. A good starting point for a ``.kunitconfig`` is the KUnit defconfig: + .. code-block:: bash cd $PATH_TO_LINUX_REPO cp arch/um/configs/kunit_defconfig .kunitconfig You can then add any other Kconfig options you wish, e.g.: + .. code-block:: none - CONFIG_LIST_KUNIT_TEST=y + CONFIG_LIST_KUNIT_TEST=y :doc:`kunit_tool ` will ensure that all config options set in ``.kunitconfig`` are set in the kernel ``.config`` before running the tests. @@ -54,8 +56,8 @@ using. other tools (such as make menuconfig) to adjust other config options. -Running the tests ------------------ +Running the tests (KUnit Wrapper) +--------------------------------- To make sure that everything is set up correctly, simply invoke the Python wrapper from your kernel repo: @@ -105,8 +107,9 @@ have config options ending in ``_KUNIT_TEST``. KUnit and KUnit tests can be compiled as modules: in this case the tests in a module will be run when the module is loaded. -Running the tests ------------------ + +Running the tests (w/o KUnit Wrapper) +------------------------------------- Build and run your kernel as usual. Test output will be written to the kernel log in `TAP `_ format. diff --git a/Documentation/dev-tools/kunit/usage.rst b/Documentation/dev-tools/kunit/usage.rst index 473a2361ec375..3c3fe8b5feccf 100644 --- a/Documentation/dev-tools/kunit/usage.rst +++ b/Documentation/dev-tools/kunit/usage.rst @@ -595,7 +595,7 @@ able to run one test case per invocation. KUnit debugfs representation ============================ When kunit test suites are initialized, they create an associated directory -in /sys/kernel/debug/kunit/. The directory contains one file +in ``/sys/kernel/debug/kunit/``. The directory contains one file - results: "cat results" displays results of each test case and the results of the entire suite for the last test run. @@ -604,4 +604,4 @@ The debugfs representation is primarily of use when kunit test suites are run in a native environment, either as modules or builtin. Having a way to display results like this is valuable as otherwise results can be intermixed with other events in dmesg output. The maximum size of each -results file is KUNIT_LOG_SIZE bytes (defined in include/kunit/test.h). +results file is KUNIT_LOG_SIZE bytes (defined in ``include/kunit/test.h``). diff --git a/include/kunit/test.h b/include/kunit/test.h index 9b0c46a6ca1f1..47e61e1d53370 100644 --- a/include/kunit/test.h +++ b/include/kunit/test.h @@ -175,7 +175,7 @@ struct kunit_suite { void (*exit)(struct kunit *test); struct kunit_case *test_cases; - /* private - internal use only */ + /* private: internal use only */ struct dentry *debugfs; char *log; }; @@ -232,12 +232,12 @@ void __kunit_test_suites_exit(struct kunit_suite **suites); * kunit_test_suites() - used to register one or more &struct kunit_suite * with KUnit. * - * @suites: a statically allocated list of &struct kunit_suite. + * @suites_list...: a statically allocated list of &struct kunit_suite. * - * Registers @suites with the test framework. See &struct kunit_suite for + * Registers @suites_list with the test framework. See &struct kunit_suite for * more information. * - * When builtin, KUnit tests are all run as late_initcalls; this means + * When builtin, KUnit tests are all run as late_initcalls; this means * that they cannot test anything where tests must run at a different init * phase. One significant restriction resulting from this is that KUnit * cannot reliably test anything that is initialize in the late_init phase; @@ -253,8 +253,8 @@ void __kunit_test_suites_exit(struct kunit_suite **suites); * tests from the same place, and at the very least to do so after * everything else is definitely initialized. */ -#define kunit_test_suites(...) \ - static struct kunit_suite *suites[] = { __VA_ARGS__, NULL}; \ +#define kunit_test_suites(suites_list...) \ + static struct kunit_suite *suites[] = {suites_list, NULL}; \ static int kunit_test_suites_init(void) \ { \ return __kunit_test_suites_init(suites); \ -- GitLab From ddbd60c779b4ddaa87173a160ce90146933fb8f9 Mon Sep 17 00:00:00 2001 From: Vitor Massaru Iha Date: Tue, 14 Apr 2020 20:09:50 -0300 Subject: [PATCH 0649/1971] kunit: use --build_dir=.kunit as default To make KUnit easier to use, and to avoid overwriting object and .config files, the default KUnit build directory is set to .kunit * Related bug: https://bugzilla.kernel.org/show_bug.cgi?id=205221 Fixed up minor merge conflicts - Shuah Khan Signed-off-by: Vitor Massaru Iha Reviewed-by: Brendan Higgins Signed-off-by: Shuah Khan --- tools/testing/kunit/kunit.py | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) diff --git a/tools/testing/kunit/kunit.py b/tools/testing/kunit/kunit.py index b01838b6f5f9e..f3a1803bd71f3 100755 --- a/tools/testing/kunit/kunit.py +++ b/tools/testing/kunit/kunit.py @@ -169,7 +169,7 @@ def add_common_opts(parser): parser.add_argument('--build_dir', help='As in the make command, it specifies the build ' 'directory.', - type=str, default='', metavar='build_dir') + type=str, default='.kunit', metavar='build_dir') parser.add_argument('--make_options', help='X=Y make option, can be repeated.', action='append') @@ -245,12 +245,11 @@ def main(argv, linux=None): cli_args = parser.parse_args(argv) if cli_args.subcommand == 'run': - if cli_args.build_dir: - if not os.path.exists(cli_args.build_dir): - os.mkdir(cli_args.build_dir) - kunit_kernel.kunitconfig_path = os.path.join( - cli_args.build_dir, - kunit_kernel.kunitconfig_path) + if not os.path.exists(cli_args.build_dir): + os.mkdir(cli_args.build_dir) + kunit_kernel.kunitconfig_path = os.path.join( + cli_args.build_dir, + kunit_kernel.kunitconfig_path) if not linux: linux = kunit_kernel.LinuxSourceTree() -- GitLab From 9bdf64b35117cc10813d24e1842cd8ee40ecbf19 Mon Sep 17 00:00:00 2001 From: Vitor Massaru Iha Date: Tue, 14 Apr 2020 20:37:53 -0300 Subject: [PATCH 0650/1971] kunit: use KUnit defconfig by default To improve the usability of KUnit, defconfig is used by default if no kunitconfig is present. * https://bugzilla.kernel.org/show_bug.cgi?id=205259 Fixed up minor merge conflicts - Shuah Khan Signed-off-by: Vitor Massaru Iha Reviewed-by: Brendan Higgins Signed-off-by: Shuah Khan --- tools/testing/kunit/kunit.py | 14 +++----------- 1 file changed, 3 insertions(+), 11 deletions(-) diff --git a/tools/testing/kunit/kunit.py b/tools/testing/kunit/kunit.py index f3a1803bd71f3..ec73b07d12658 100755 --- a/tools/testing/kunit/kunit.py +++ b/tools/testing/kunit/kunit.py @@ -32,8 +32,8 @@ KunitExecRequest = namedtuple('KunitExecRequest', KunitParseRequest = namedtuple('KunitParseRequest', ['raw_output', 'input_data']) KunitRequest = namedtuple('KunitRequest', ['raw_output','timeout', 'jobs', - 'build_dir', 'defconfig', - 'alltests', 'make_options']) + 'build_dir', 'alltests', + 'make_options']) KernelDirectoryPath = sys.argv[0].split('tools/testing/kunit/')[0] @@ -60,8 +60,7 @@ def config_tests(linux: kunit_kernel.LinuxSourceTree, kunit_parser.print_with_timestamp('Configuring KUnit Kernel ...') config_start = time.time() - if request.defconfig: - create_default_kunitconfig() + create_default_kunitconfig() success = linux.build_reconfig(request.build_dir, request.make_options) config_end = time.time() if not success: @@ -177,11 +176,6 @@ def add_common_opts(parser): help='Run all KUnit tests through allyesconfig', action='store_true') -def add_config_opts(parser): - parser.add_argument('--defconfig', - help='Uses a default .kunitconfig.', - action='store_true') - def add_build_opts(parser): parser.add_argument('--jobs', help='As in the make command, "Specifies the number of ' @@ -210,7 +204,6 @@ def main(argv, linux=None): # The 'run' command will config, build, exec, and parse in one go. run_parser = subparser.add_parser('run', help='Runs KUnit tests.') add_common_opts(run_parser) - add_config_opts(run_parser) add_build_opts(run_parser) add_exec_opts(run_parser) add_parse_opts(run_parser) @@ -258,7 +251,6 @@ def main(argv, linux=None): cli_args.timeout, cli_args.jobs, cli_args.build_dir, - cli_args.defconfig, cli_args.alltests, cli_args.make_options) result = run_tests(linux, request) -- GitLab From 743f05572e7f81dcccfecf454bc7d9e10edbbaca Mon Sep 17 00:00:00 2001 From: Christophe Kerello Date: Tue, 12 May 2020 13:47:47 +0200 Subject: [PATCH 0651/1971] mtd: rawnand: stm32_fmc2: cosmetic change to use nfc instead of fmc2 where relevant This patch renames functions and local variables. This cleanup is done to get all functions starting by stm32_fmc2_nfc in the FMC2 raw NAND driver when all functions will start by stm32_fmc2_ebi in the FMC2 EBI driver. Signed-off-by: Christophe Kerello Reviewed-by: Miquel Raynal Signed-off-by: Miquel Raynal Link: https://lore.kernel.org/linux-mtd/1589284068-4079-2-git-send-email-christophe.kerello@st.com --- drivers/mtd/nand/raw/stm32_fmc2_nand.c | 810 ++++++++++++------------- 1 file changed, 403 insertions(+), 407 deletions(-) diff --git a/drivers/mtd/nand/raw/stm32_fmc2_nand.c b/drivers/mtd/nand/raw/stm32_fmc2_nand.c index 30083e52b2fc3..901ebd8bb3d3d 100644 --- a/drivers/mtd/nand/raw/stm32_fmc2_nand.c +++ b/drivers/mtd/nand/raw/stm32_fmc2_nand.c @@ -280,12 +280,12 @@ static inline struct stm32_fmc2_nfc *to_stm32_nfc(struct nand_controller *base) return container_of(base, struct stm32_fmc2_nfc, base); } -static void stm32_fmc2_timings_init(struct nand_chip *chip) +static void stm32_fmc2_nfc_timings_init(struct nand_chip *chip) { - struct stm32_fmc2_nfc *fmc2 = to_stm32_nfc(chip->controller); + struct stm32_fmc2_nfc *nfc = to_stm32_nfc(chip->controller); struct stm32_fmc2_nand *nand = to_fmc2_nand(chip); struct stm32_fmc2_timings *timings = &nand->timings; - u32 pcr = readl_relaxed(fmc2->io_base + FMC2_PCR); + u32 pcr = readl_relaxed(nfc->io_base + FMC2_PCR); u32 pmem, patt; /* Set tclr/tar timings */ @@ -306,15 +306,15 @@ static void stm32_fmc2_timings_init(struct nand_chip *chip) patt |= FMC2_PATT_ATTHOLD(timings->thold_att); patt |= FMC2_PATT_ATTHIZ(timings->thiz); - writel_relaxed(pcr, fmc2->io_base + FMC2_PCR); - writel_relaxed(pmem, fmc2->io_base + FMC2_PMEM); - writel_relaxed(patt, fmc2->io_base + FMC2_PATT); + writel_relaxed(pcr, nfc->io_base + FMC2_PCR); + writel_relaxed(pmem, nfc->io_base + FMC2_PMEM); + writel_relaxed(patt, nfc->io_base + FMC2_PATT); } -static void stm32_fmc2_setup(struct nand_chip *chip) +static void stm32_fmc2_nfc_setup(struct nand_chip *chip) { - struct stm32_fmc2_nfc *fmc2 = to_stm32_nfc(chip->controller); - u32 pcr = readl_relaxed(fmc2->io_base + FMC2_PCR); + struct stm32_fmc2_nfc *nfc = to_stm32_nfc(chip->controller); + u32 pcr = readl_relaxed(nfc->io_base + FMC2_PCR); /* Configure ECC algorithm (default configuration is Hamming) */ pcr &= ~FMC2_PCR_ECCALG; @@ -335,174 +335,174 @@ static void stm32_fmc2_setup(struct nand_chip *chip) pcr &= ~FMC2_PCR_ECCSS_MASK; pcr |= FMC2_PCR_ECCSS(FMC2_PCR_ECCSS_512); - writel_relaxed(pcr, fmc2->io_base + FMC2_PCR); + writel_relaxed(pcr, nfc->io_base + FMC2_PCR); } -static int stm32_fmc2_select_chip(struct nand_chip *chip, int chipnr) +static int stm32_fmc2_nfc_select_chip(struct nand_chip *chip, int chipnr) { - struct stm32_fmc2_nfc *fmc2 = to_stm32_nfc(chip->controller); + struct stm32_fmc2_nfc *nfc = to_stm32_nfc(chip->controller); struct stm32_fmc2_nand *nand = to_fmc2_nand(chip); struct dma_slave_config dma_cfg; int ret; - if (nand->cs_used[chipnr] == fmc2->cs_sel) + if (nand->cs_used[chipnr] == nfc->cs_sel) return 0; - fmc2->cs_sel = nand->cs_used[chipnr]; - stm32_fmc2_setup(chip); - stm32_fmc2_timings_init(chip); + nfc->cs_sel = nand->cs_used[chipnr]; + stm32_fmc2_nfc_setup(chip); + stm32_fmc2_nfc_timings_init(chip); - if (fmc2->dma_tx_ch && fmc2->dma_rx_ch) { + if (nfc->dma_tx_ch && nfc->dma_rx_ch) { memset(&dma_cfg, 0, sizeof(dma_cfg)); - dma_cfg.src_addr = fmc2->data_phys_addr[fmc2->cs_sel]; - dma_cfg.dst_addr = fmc2->data_phys_addr[fmc2->cs_sel]; + dma_cfg.src_addr = nfc->data_phys_addr[nfc->cs_sel]; + dma_cfg.dst_addr = nfc->data_phys_addr[nfc->cs_sel]; dma_cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; dma_cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; dma_cfg.src_maxburst = 32; dma_cfg.dst_maxburst = 32; - ret = dmaengine_slave_config(fmc2->dma_tx_ch, &dma_cfg); + ret = dmaengine_slave_config(nfc->dma_tx_ch, &dma_cfg); if (ret) { - dev_err(fmc2->dev, "tx DMA engine slave config failed\n"); + dev_err(nfc->dev, "tx DMA engine slave config failed\n"); return ret; } - ret = dmaengine_slave_config(fmc2->dma_rx_ch, &dma_cfg); + ret = dmaengine_slave_config(nfc->dma_rx_ch, &dma_cfg); if (ret) { - dev_err(fmc2->dev, "rx DMA engine slave config failed\n"); + dev_err(nfc->dev, "rx DMA engine slave config failed\n"); return ret; } } - if (fmc2->dma_ecc_ch) { + if (nfc->dma_ecc_ch) { /* * Hamming: we read HECCR register * BCH4/BCH8: we read BCHDSRSx registers */ memset(&dma_cfg, 0, sizeof(dma_cfg)); - dma_cfg.src_addr = fmc2->io_phys_addr; + dma_cfg.src_addr = nfc->io_phys_addr; dma_cfg.src_addr += chip->ecc.strength == FMC2_ECC_HAM ? FMC2_HECCR : FMC2_BCHDSR0; dma_cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; - ret = dmaengine_slave_config(fmc2->dma_ecc_ch, &dma_cfg); + ret = dmaengine_slave_config(nfc->dma_ecc_ch, &dma_cfg); if (ret) { - dev_err(fmc2->dev, "ECC DMA engine slave config failed\n"); + dev_err(nfc->dev, "ECC DMA engine slave config failed\n"); return ret; } /* Calculate ECC length needed for one sector */ - fmc2->dma_ecc_len = chip->ecc.strength == FMC2_ECC_HAM ? - FMC2_HECCR_LEN : FMC2_BCHDSRS_LEN; + nfc->dma_ecc_len = chip->ecc.strength == FMC2_ECC_HAM ? + FMC2_HECCR_LEN : FMC2_BCHDSRS_LEN; } return 0; } -static void stm32_fmc2_set_buswidth_16(struct stm32_fmc2_nfc *fmc2, bool set) +static void stm32_fmc2_nfc_set_buswidth_16(struct stm32_fmc2_nfc *nfc, bool set) { - u32 pcr = readl_relaxed(fmc2->io_base + FMC2_PCR); + u32 pcr = readl_relaxed(nfc->io_base + FMC2_PCR); pcr &= ~FMC2_PCR_PWID_MASK; if (set) pcr |= FMC2_PCR_PWID(FMC2_PCR_PWID_BUSWIDTH_16); - writel_relaxed(pcr, fmc2->io_base + FMC2_PCR); + writel_relaxed(pcr, nfc->io_base + FMC2_PCR); } -static void stm32_fmc2_set_ecc(struct stm32_fmc2_nfc *fmc2, bool enable) +static void stm32_fmc2_nfc_set_ecc(struct stm32_fmc2_nfc *nfc, bool enable) { - u32 pcr = readl(fmc2->io_base + FMC2_PCR); + u32 pcr = readl(nfc->io_base + FMC2_PCR); pcr &= ~FMC2_PCR_ECCEN; if (enable) pcr |= FMC2_PCR_ECCEN; - writel(pcr, fmc2->io_base + FMC2_PCR); + writel(pcr, nfc->io_base + FMC2_PCR); } -static inline void stm32_fmc2_enable_seq_irq(struct stm32_fmc2_nfc *fmc2) +static inline void stm32_fmc2_nfc_enable_seq_irq(struct stm32_fmc2_nfc *nfc) { - u32 csqier = readl_relaxed(fmc2->io_base + FMC2_CSQIER); + u32 csqier = readl_relaxed(nfc->io_base + FMC2_CSQIER); csqier |= FMC2_CSQIER_TCIE; - fmc2->irq_state = FMC2_IRQ_SEQ; + nfc->irq_state = FMC2_IRQ_SEQ; - writel_relaxed(csqier, fmc2->io_base + FMC2_CSQIER); + writel_relaxed(csqier, nfc->io_base + FMC2_CSQIER); } -static inline void stm32_fmc2_disable_seq_irq(struct stm32_fmc2_nfc *fmc2) +static inline void stm32_fmc2_nfc_disable_seq_irq(struct stm32_fmc2_nfc *nfc) { - u32 csqier = readl_relaxed(fmc2->io_base + FMC2_CSQIER); + u32 csqier = readl_relaxed(nfc->io_base + FMC2_CSQIER); csqier &= ~FMC2_CSQIER_TCIE; - writel_relaxed(csqier, fmc2->io_base + FMC2_CSQIER); + writel_relaxed(csqier, nfc->io_base + FMC2_CSQIER); - fmc2->irq_state = FMC2_IRQ_UNKNOWN; + nfc->irq_state = FMC2_IRQ_UNKNOWN; } -static inline void stm32_fmc2_clear_seq_irq(struct stm32_fmc2_nfc *fmc2) +static inline void stm32_fmc2_nfc_clear_seq_irq(struct stm32_fmc2_nfc *nfc) { - writel_relaxed(FMC2_CSQICR_CLEAR_IRQ, fmc2->io_base + FMC2_CSQICR); + writel_relaxed(FMC2_CSQICR_CLEAR_IRQ, nfc->io_base + FMC2_CSQICR); } -static inline void stm32_fmc2_enable_bch_irq(struct stm32_fmc2_nfc *fmc2, - int mode) +static inline void stm32_fmc2_nfc_enable_bch_irq(struct stm32_fmc2_nfc *nfc, + int mode) { - u32 bchier = readl_relaxed(fmc2->io_base + FMC2_BCHIER); + u32 bchier = readl_relaxed(nfc->io_base + FMC2_BCHIER); if (mode == NAND_ECC_WRITE) bchier |= FMC2_BCHIER_EPBRIE; else bchier |= FMC2_BCHIER_DERIE; - fmc2->irq_state = FMC2_IRQ_BCH; + nfc->irq_state = FMC2_IRQ_BCH; - writel_relaxed(bchier, fmc2->io_base + FMC2_BCHIER); + writel_relaxed(bchier, nfc->io_base + FMC2_BCHIER); } -static inline void stm32_fmc2_disable_bch_irq(struct stm32_fmc2_nfc *fmc2) +static inline void stm32_fmc2_nfc_disable_bch_irq(struct stm32_fmc2_nfc *nfc) { - u32 bchier = readl_relaxed(fmc2->io_base + FMC2_BCHIER); + u32 bchier = readl_relaxed(nfc->io_base + FMC2_BCHIER); bchier &= ~FMC2_BCHIER_DERIE; bchier &= ~FMC2_BCHIER_EPBRIE; - writel_relaxed(bchier, fmc2->io_base + FMC2_BCHIER); + writel_relaxed(bchier, nfc->io_base + FMC2_BCHIER); - fmc2->irq_state = FMC2_IRQ_UNKNOWN; + nfc->irq_state = FMC2_IRQ_UNKNOWN; } -static inline void stm32_fmc2_clear_bch_irq(struct stm32_fmc2_nfc *fmc2) +static inline void stm32_fmc2_nfc_clear_bch_irq(struct stm32_fmc2_nfc *nfc) { - writel_relaxed(FMC2_BCHICR_CLEAR_IRQ, fmc2->io_base + FMC2_BCHICR); + writel_relaxed(FMC2_BCHICR_CLEAR_IRQ, nfc->io_base + FMC2_BCHICR); } /* * Enable ECC logic and reset syndrome/parity bits previously calculated * Syndrome/parity bits is cleared by setting the ECCEN bit to 0 */ -static void stm32_fmc2_hwctl(struct nand_chip *chip, int mode) +static void stm32_fmc2_nfc_hwctl(struct nand_chip *chip, int mode) { - struct stm32_fmc2_nfc *fmc2 = to_stm32_nfc(chip->controller); + struct stm32_fmc2_nfc *nfc = to_stm32_nfc(chip->controller); - stm32_fmc2_set_ecc(fmc2, false); + stm32_fmc2_nfc_set_ecc(nfc, false); if (chip->ecc.strength != FMC2_ECC_HAM) { - u32 pcr = readl_relaxed(fmc2->io_base + FMC2_PCR); + u32 pcr = readl_relaxed(nfc->io_base + FMC2_PCR); if (mode == NAND_ECC_WRITE) pcr |= FMC2_PCR_WEN; else pcr &= ~FMC2_PCR_WEN; - writel_relaxed(pcr, fmc2->io_base + FMC2_PCR); + writel_relaxed(pcr, nfc->io_base + FMC2_PCR); - reinit_completion(&fmc2->complete); - stm32_fmc2_clear_bch_irq(fmc2); - stm32_fmc2_enable_bch_irq(fmc2, mode); + reinit_completion(&nfc->complete); + stm32_fmc2_nfc_clear_bch_irq(nfc); + stm32_fmc2_nfc_enable_bch_irq(nfc, mode); } - stm32_fmc2_set_ecc(fmc2, true); + stm32_fmc2_nfc_set_ecc(nfc, true); } /* @@ -510,37 +510,37 @@ static void stm32_fmc2_hwctl(struct nand_chip *chip, int mode) * ECC is 3 bytes for 512 bytes of data (supports error correction up to * max of 1-bit) */ -static inline void stm32_fmc2_ham_set_ecc(const u32 ecc_sta, u8 *ecc) +static inline void stm32_fmc2_nfc_ham_set_ecc(const u32 ecc_sta, u8 *ecc) { ecc[0] = ecc_sta; ecc[1] = ecc_sta >> 8; ecc[2] = ecc_sta >> 16; } -static int stm32_fmc2_ham_calculate(struct nand_chip *chip, const u8 *data, - u8 *ecc) +static int stm32_fmc2_nfc_ham_calculate(struct nand_chip *chip, const u8 *data, + u8 *ecc) { - struct stm32_fmc2_nfc *fmc2 = to_stm32_nfc(chip->controller); + struct stm32_fmc2_nfc *nfc = to_stm32_nfc(chip->controller); u32 sr, heccr; int ret; - ret = readl_relaxed_poll_timeout(fmc2->io_base + FMC2_SR, + ret = readl_relaxed_poll_timeout(nfc->io_base + FMC2_SR, sr, sr & FMC2_SR_NWRF, 1, 1000 * FMC2_TIMEOUT_MS); if (ret) { - dev_err(fmc2->dev, "ham timeout\n"); + dev_err(nfc->dev, "ham timeout\n"); return ret; } - heccr = readl_relaxed(fmc2->io_base + FMC2_HECCR); - stm32_fmc2_ham_set_ecc(heccr, ecc); - stm32_fmc2_set_ecc(fmc2, false); + heccr = readl_relaxed(nfc->io_base + FMC2_HECCR); + stm32_fmc2_nfc_ham_set_ecc(heccr, ecc); + stm32_fmc2_nfc_set_ecc(nfc, false); return 0; } -static int stm32_fmc2_ham_correct(struct nand_chip *chip, u8 *dat, - u8 *read_ecc, u8 *calc_ecc) +static int stm32_fmc2_nfc_ham_correct(struct nand_chip *chip, u8 *dat, + u8 *read_ecc, u8 *calc_ecc) { u8 bit_position = 0, b0, b1, b2; u32 byte_addr = 0, b; @@ -596,28 +596,28 @@ static int stm32_fmc2_ham_correct(struct nand_chip *chip, u8 *dat, * ECC is 7/13 bytes for 512 bytes of data (supports error correction up to * max of 4-bit/8-bit) */ -static int stm32_fmc2_bch_calculate(struct nand_chip *chip, const u8 *data, - u8 *ecc) +static int stm32_fmc2_nfc_bch_calculate(struct nand_chip *chip, const u8 *data, + u8 *ecc) { - struct stm32_fmc2_nfc *fmc2 = to_stm32_nfc(chip->controller); + struct stm32_fmc2_nfc *nfc = to_stm32_nfc(chip->controller); u32 bchpbr; /* Wait until the BCH code is ready */ - if (!wait_for_completion_timeout(&fmc2->complete, + if (!wait_for_completion_timeout(&nfc->complete, msecs_to_jiffies(FMC2_TIMEOUT_MS))) { - dev_err(fmc2->dev, "bch timeout\n"); - stm32_fmc2_disable_bch_irq(fmc2); + dev_err(nfc->dev, "bch timeout\n"); + stm32_fmc2_nfc_disable_bch_irq(nfc); return -ETIMEDOUT; } /* Read parity bits */ - bchpbr = readl_relaxed(fmc2->io_base + FMC2_BCHPBR1); + bchpbr = readl_relaxed(nfc->io_base + FMC2_BCHPBR1); ecc[0] = bchpbr; ecc[1] = bchpbr >> 8; ecc[2] = bchpbr >> 16; ecc[3] = bchpbr >> 24; - bchpbr = readl_relaxed(fmc2->io_base + FMC2_BCHPBR2); + bchpbr = readl_relaxed(nfc->io_base + FMC2_BCHPBR2); ecc[4] = bchpbr; ecc[5] = bchpbr >> 8; ecc[6] = bchpbr >> 16; @@ -625,22 +625,22 @@ static int stm32_fmc2_bch_calculate(struct nand_chip *chip, const u8 *data, if (chip->ecc.strength == FMC2_ECC_BCH8) { ecc[7] = bchpbr >> 24; - bchpbr = readl_relaxed(fmc2->io_base + FMC2_BCHPBR3); + bchpbr = readl_relaxed(nfc->io_base + FMC2_BCHPBR3); ecc[8] = bchpbr; ecc[9] = bchpbr >> 8; ecc[10] = bchpbr >> 16; ecc[11] = bchpbr >> 24; - bchpbr = readl_relaxed(fmc2->io_base + FMC2_BCHPBR4); + bchpbr = readl_relaxed(nfc->io_base + FMC2_BCHPBR4); ecc[12] = bchpbr; } - stm32_fmc2_set_ecc(fmc2, false); + stm32_fmc2_nfc_set_ecc(nfc, false); return 0; } -static int stm32_fmc2_bch_decode(int eccsize, u8 *dat, u32 *ecc_sta) +static int stm32_fmc2_nfc_bch_decode(int eccsize, u8 *dat, u32 *ecc_sta) { u32 bchdsr0 = ecc_sta[0]; u32 bchdsr1 = ecc_sta[1]; @@ -679,33 +679,33 @@ static int stm32_fmc2_bch_decode(int eccsize, u8 *dat, u32 *ecc_sta) return nb_errs; } -static int stm32_fmc2_bch_correct(struct nand_chip *chip, u8 *dat, - u8 *read_ecc, u8 *calc_ecc) +static int stm32_fmc2_nfc_bch_correct(struct nand_chip *chip, u8 *dat, + u8 *read_ecc, u8 *calc_ecc) { - struct stm32_fmc2_nfc *fmc2 = to_stm32_nfc(chip->controller); + struct stm32_fmc2_nfc *nfc = to_stm32_nfc(chip->controller); u32 ecc_sta[5]; /* Wait until the decoding error is ready */ - if (!wait_for_completion_timeout(&fmc2->complete, + if (!wait_for_completion_timeout(&nfc->complete, msecs_to_jiffies(FMC2_TIMEOUT_MS))) { - dev_err(fmc2->dev, "bch timeout\n"); - stm32_fmc2_disable_bch_irq(fmc2); + dev_err(nfc->dev, "bch timeout\n"); + stm32_fmc2_nfc_disable_bch_irq(nfc); return -ETIMEDOUT; } - ecc_sta[0] = readl_relaxed(fmc2->io_base + FMC2_BCHDSR0); - ecc_sta[1] = readl_relaxed(fmc2->io_base + FMC2_BCHDSR1); - ecc_sta[2] = readl_relaxed(fmc2->io_base + FMC2_BCHDSR2); - ecc_sta[3] = readl_relaxed(fmc2->io_base + FMC2_BCHDSR3); - ecc_sta[4] = readl_relaxed(fmc2->io_base + FMC2_BCHDSR4); + ecc_sta[0] = readl_relaxed(nfc->io_base + FMC2_BCHDSR0); + ecc_sta[1] = readl_relaxed(nfc->io_base + FMC2_BCHDSR1); + ecc_sta[2] = readl_relaxed(nfc->io_base + FMC2_BCHDSR2); + ecc_sta[3] = readl_relaxed(nfc->io_base + FMC2_BCHDSR3); + ecc_sta[4] = readl_relaxed(nfc->io_base + FMC2_BCHDSR4); - stm32_fmc2_set_ecc(fmc2, false); + stm32_fmc2_nfc_set_ecc(nfc, false); - return stm32_fmc2_bch_decode(chip->ecc.size, dat, ecc_sta); + return stm32_fmc2_nfc_bch_decode(chip->ecc.size, dat, ecc_sta); } -static int stm32_fmc2_read_page(struct nand_chip *chip, u8 *buf, - int oob_required, int page) +static int stm32_fmc2_nfc_read_page(struct nand_chip *chip, u8 *buf, + int oob_required, int page) { struct mtd_info *mtd = nand_to_mtd(chip); int ret, i, s, stat, eccsize = chip->ecc.size; @@ -767,21 +767,21 @@ static int stm32_fmc2_read_page(struct nand_chip *chip, u8 *buf, } /* Sequencer read/write configuration */ -static void stm32_fmc2_rw_page_init(struct nand_chip *chip, int page, - int raw, bool write_data) +static void stm32_fmc2_nfc_rw_page_init(struct nand_chip *chip, int page, + int raw, bool write_data) { - struct stm32_fmc2_nfc *fmc2 = to_stm32_nfc(chip->controller); + struct stm32_fmc2_nfc *nfc = to_stm32_nfc(chip->controller); struct mtd_info *mtd = nand_to_mtd(chip); u32 csqcfgr1, csqcfgr2, csqcfgr3; u32 csqar1, csqar2; u32 ecc_offset = mtd->writesize + FMC2_BBM_LEN; - u32 pcr = readl_relaxed(fmc2->io_base + FMC2_PCR); + u32 pcr = readl_relaxed(nfc->io_base + FMC2_PCR); if (write_data) pcr |= FMC2_PCR_WEN; else pcr &= ~FMC2_PCR_WEN; - writel_relaxed(pcr, fmc2->io_base + FMC2_PCR); + writel_relaxed(pcr, nfc->io_base + FMC2_PCR); /* * - Set Program Page/Page Read command @@ -843,7 +843,7 @@ static void stm32_fmc2_rw_page_init(struct nand_chip *chip, int page, * - Calculate the number of address cycles to be issued * - Set byte 5 of address cycle if needed */ - csqar2 = FMC2_CSQCAR2_NANDCEN(fmc2->cs_sel); + csqar2 = FMC2_CSQCAR2_NANDCEN(nfc->cs_sel); if (chip->options & NAND_BUSWIDTH_16) csqar2 |= FMC2_CSQCAR2_SAO(ecc_offset >> 1); else @@ -855,31 +855,32 @@ static void stm32_fmc2_rw_page_init(struct nand_chip *chip, int page, csqcfgr1 |= FMC2_CSQCFGR1_ACYNBR(4); } - writel_relaxed(csqcfgr1, fmc2->io_base + FMC2_CSQCFGR1); - writel_relaxed(csqcfgr2, fmc2->io_base + FMC2_CSQCFGR2); - writel_relaxed(csqcfgr3, fmc2->io_base + FMC2_CSQCFGR3); - writel_relaxed(csqar1, fmc2->io_base + FMC2_CSQAR1); - writel_relaxed(csqar2, fmc2->io_base + FMC2_CSQAR2); + writel_relaxed(csqcfgr1, nfc->io_base + FMC2_CSQCFGR1); + writel_relaxed(csqcfgr2, nfc->io_base + FMC2_CSQCFGR2); + writel_relaxed(csqcfgr3, nfc->io_base + FMC2_CSQCFGR3); + writel_relaxed(csqar1, nfc->io_base + FMC2_CSQAR1); + writel_relaxed(csqar2, nfc->io_base + FMC2_CSQAR2); } -static void stm32_fmc2_dma_callback(void *arg) +static void stm32_fmc2_nfc_dma_callback(void *arg) { complete((struct completion *)arg); } /* Read/write data from/to a page */ -static int stm32_fmc2_xfer(struct nand_chip *chip, const u8 *buf, - int raw, bool write_data) +static int stm32_fmc2_nfc_xfer(struct nand_chip *chip, const u8 *buf, + int raw, bool write_data) { - struct stm32_fmc2_nfc *fmc2 = to_stm32_nfc(chip->controller); + struct stm32_fmc2_nfc *nfc = to_stm32_nfc(chip->controller); struct dma_async_tx_descriptor *desc_data, *desc_ecc; struct scatterlist *sg; - struct dma_chan *dma_ch = fmc2->dma_rx_ch; + struct dma_chan *dma_ch = nfc->dma_rx_ch; enum dma_data_direction dma_data_dir = DMA_FROM_DEVICE; enum dma_transfer_direction dma_transfer_dir = DMA_DEV_TO_MEM; - u32 csqcr = readl_relaxed(fmc2->io_base + FMC2_CSQCR); + u32 csqcr = readl_relaxed(nfc->io_base + FMC2_CSQCR); int eccsteps = chip->ecc.steps; int eccsize = chip->ecc.size; + unsigned long timeout = msecs_to_jiffies(FMC2_TIMEOUT_MS); const u8 *p = buf; int s, ret; @@ -887,20 +888,20 @@ static int stm32_fmc2_xfer(struct nand_chip *chip, const u8 *buf, if (write_data) { dma_data_dir = DMA_TO_DEVICE; dma_transfer_dir = DMA_MEM_TO_DEV; - dma_ch = fmc2->dma_tx_ch; + dma_ch = nfc->dma_tx_ch; } - for_each_sg(fmc2->dma_data_sg.sgl, sg, eccsteps, s) { + for_each_sg(nfc->dma_data_sg.sgl, sg, eccsteps, s) { sg_set_buf(sg, p, eccsize); p += eccsize; } - ret = dma_map_sg(fmc2->dev, fmc2->dma_data_sg.sgl, + ret = dma_map_sg(nfc->dev, nfc->dma_data_sg.sgl, eccsteps, dma_data_dir); if (ret < 0) return ret; - desc_data = dmaengine_prep_slave_sg(dma_ch, fmc2->dma_data_sg.sgl, + desc_data = dmaengine_prep_slave_sg(dma_ch, nfc->dma_data_sg.sgl, eccsteps, dma_transfer_dir, DMA_PREP_INTERRUPT); if (!desc_data) { @@ -908,10 +909,10 @@ static int stm32_fmc2_xfer(struct nand_chip *chip, const u8 *buf, goto err_unmap_data; } - reinit_completion(&fmc2->dma_data_complete); - reinit_completion(&fmc2->complete); - desc_data->callback = stm32_fmc2_dma_callback; - desc_data->callback_param = &fmc2->dma_data_complete; + reinit_completion(&nfc->dma_data_complete); + reinit_completion(&nfc->complete); + desc_data->callback = stm32_fmc2_nfc_dma_callback; + desc_data->callback_param = &nfc->dma_data_complete; ret = dma_submit_error(dmaengine_submit(desc_data)); if (ret) goto err_unmap_data; @@ -920,19 +921,19 @@ static int stm32_fmc2_xfer(struct nand_chip *chip, const u8 *buf, if (!write_data && !raw) { /* Configure DMA ECC status */ - p = fmc2->ecc_buf; - for_each_sg(fmc2->dma_ecc_sg.sgl, sg, eccsteps, s) { - sg_set_buf(sg, p, fmc2->dma_ecc_len); - p += fmc2->dma_ecc_len; + p = nfc->ecc_buf; + for_each_sg(nfc->dma_ecc_sg.sgl, sg, eccsteps, s) { + sg_set_buf(sg, p, nfc->dma_ecc_len); + p += nfc->dma_ecc_len; } - ret = dma_map_sg(fmc2->dev, fmc2->dma_ecc_sg.sgl, + ret = dma_map_sg(nfc->dev, nfc->dma_ecc_sg.sgl, eccsteps, dma_data_dir); if (ret < 0) goto err_unmap_data; - desc_ecc = dmaengine_prep_slave_sg(fmc2->dma_ecc_ch, - fmc2->dma_ecc_sg.sgl, + desc_ecc = dmaengine_prep_slave_sg(nfc->dma_ecc_ch, + nfc->dma_ecc_sg.sgl, eccsteps, dma_transfer_dir, DMA_PREP_INTERRUPT); if (!desc_ecc) { @@ -940,76 +941,73 @@ static int stm32_fmc2_xfer(struct nand_chip *chip, const u8 *buf, goto err_unmap_ecc; } - reinit_completion(&fmc2->dma_ecc_complete); - desc_ecc->callback = stm32_fmc2_dma_callback; - desc_ecc->callback_param = &fmc2->dma_ecc_complete; + reinit_completion(&nfc->dma_ecc_complete); + desc_ecc->callback = stm32_fmc2_nfc_dma_callback; + desc_ecc->callback_param = &nfc->dma_ecc_complete; ret = dma_submit_error(dmaengine_submit(desc_ecc)); if (ret) goto err_unmap_ecc; - dma_async_issue_pending(fmc2->dma_ecc_ch); + dma_async_issue_pending(nfc->dma_ecc_ch); } - stm32_fmc2_clear_seq_irq(fmc2); - stm32_fmc2_enable_seq_irq(fmc2); + stm32_fmc2_nfc_clear_seq_irq(nfc); + stm32_fmc2_nfc_enable_seq_irq(nfc); /* Start the transfer */ csqcr |= FMC2_CSQCR_CSQSTART; - writel_relaxed(csqcr, fmc2->io_base + FMC2_CSQCR); + writel_relaxed(csqcr, nfc->io_base + FMC2_CSQCR); /* Wait end of sequencer transfer */ - if (!wait_for_completion_timeout(&fmc2->complete, - msecs_to_jiffies(FMC2_TIMEOUT_MS))) { - dev_err(fmc2->dev, "seq timeout\n"); - stm32_fmc2_disable_seq_irq(fmc2); + if (!wait_for_completion_timeout(&nfc->complete, timeout)) { + dev_err(nfc->dev, "seq timeout\n"); + stm32_fmc2_nfc_disable_seq_irq(nfc); dmaengine_terminate_all(dma_ch); if (!write_data && !raw) - dmaengine_terminate_all(fmc2->dma_ecc_ch); + dmaengine_terminate_all(nfc->dma_ecc_ch); ret = -ETIMEDOUT; goto err_unmap_ecc; } /* Wait DMA data transfer completion */ - if (!wait_for_completion_timeout(&fmc2->dma_data_complete, - msecs_to_jiffies(FMC2_TIMEOUT_MS))) { - dev_err(fmc2->dev, "data DMA timeout\n"); + if (!wait_for_completion_timeout(&nfc->dma_data_complete, timeout)) { + dev_err(nfc->dev, "data DMA timeout\n"); dmaengine_terminate_all(dma_ch); ret = -ETIMEDOUT; } /* Wait DMA ECC transfer completion */ if (!write_data && !raw) { - if (!wait_for_completion_timeout(&fmc2->dma_ecc_complete, - msecs_to_jiffies(FMC2_TIMEOUT_MS))) { - dev_err(fmc2->dev, "ECC DMA timeout\n"); - dmaengine_terminate_all(fmc2->dma_ecc_ch); + if (!wait_for_completion_timeout(&nfc->dma_ecc_complete, + timeout)) { + dev_err(nfc->dev, "ECC DMA timeout\n"); + dmaengine_terminate_all(nfc->dma_ecc_ch); ret = -ETIMEDOUT; } } err_unmap_ecc: if (!write_data && !raw) - dma_unmap_sg(fmc2->dev, fmc2->dma_ecc_sg.sgl, + dma_unmap_sg(nfc->dev, nfc->dma_ecc_sg.sgl, eccsteps, dma_data_dir); err_unmap_data: - dma_unmap_sg(fmc2->dev, fmc2->dma_data_sg.sgl, eccsteps, dma_data_dir); + dma_unmap_sg(nfc->dev, nfc->dma_data_sg.sgl, eccsteps, dma_data_dir); return ret; } -static int stm32_fmc2_sequencer_write(struct nand_chip *chip, - const u8 *buf, int oob_required, - int page, int raw) +static int stm32_fmc2_nfc_seq_write(struct nand_chip *chip, const u8 *buf, + int oob_required, int page, int raw) { struct mtd_info *mtd = nand_to_mtd(chip); int ret; /* Configure the sequencer */ - stm32_fmc2_rw_page_init(chip, page, raw, true); + stm32_fmc2_nfc_rw_page_init(chip, page, raw, true); /* Write the page */ - ret = stm32_fmc2_xfer(chip, buf, raw, true); + ret = stm32_fmc2_nfc_xfer(chip, buf, raw, true); if (ret) return ret; @@ -1025,53 +1023,50 @@ static int stm32_fmc2_sequencer_write(struct nand_chip *chip, return nand_prog_page_end_op(chip); } -static int stm32_fmc2_sequencer_write_page(struct nand_chip *chip, - const u8 *buf, - int oob_required, - int page) +static int stm32_fmc2_nfc_seq_write_page(struct nand_chip *chip, const u8 *buf, + int oob_required, int page) { int ret; - ret = stm32_fmc2_select_chip(chip, chip->cur_cs); + ret = stm32_fmc2_nfc_select_chip(chip, chip->cur_cs); if (ret) return ret; - return stm32_fmc2_sequencer_write(chip, buf, oob_required, page, false); + return stm32_fmc2_nfc_seq_write(chip, buf, oob_required, page, false); } -static int stm32_fmc2_sequencer_write_page_raw(struct nand_chip *chip, - const u8 *buf, - int oob_required, - int page) +static int stm32_fmc2_nfc_seq_write_page_raw(struct nand_chip *chip, + const u8 *buf, int oob_required, + int page) { int ret; - ret = stm32_fmc2_select_chip(chip, chip->cur_cs); + ret = stm32_fmc2_nfc_select_chip(chip, chip->cur_cs); if (ret) return ret; - return stm32_fmc2_sequencer_write(chip, buf, oob_required, page, true); + return stm32_fmc2_nfc_seq_write(chip, buf, oob_required, page, true); } /* Get a status indicating which sectors have errors */ -static inline u16 stm32_fmc2_get_mapping_status(struct stm32_fmc2_nfc *fmc2) +static inline u16 stm32_fmc2_nfc_get_mapping_status(struct stm32_fmc2_nfc *nfc) { - u32 csqemsr = readl_relaxed(fmc2->io_base + FMC2_CSQEMSR); + u32 csqemsr = readl_relaxed(nfc->io_base + FMC2_CSQEMSR); return csqemsr & FMC2_CSQEMSR_SEM; } -static int stm32_fmc2_sequencer_correct(struct nand_chip *chip, u8 *dat, - u8 *read_ecc, u8 *calc_ecc) +static int stm32_fmc2_nfc_seq_correct(struct nand_chip *chip, u8 *dat, + u8 *read_ecc, u8 *calc_ecc) { struct mtd_info *mtd = nand_to_mtd(chip); - struct stm32_fmc2_nfc *fmc2 = to_stm32_nfc(chip->controller); + struct stm32_fmc2_nfc *nfc = to_stm32_nfc(chip->controller); int eccbytes = chip->ecc.bytes; int eccsteps = chip->ecc.steps; int eccstrength = chip->ecc.strength; int i, s, eccsize = chip->ecc.size; - u32 *ecc_sta = (u32 *)fmc2->ecc_buf; - u16 sta_map = stm32_fmc2_get_mapping_status(fmc2); + u32 *ecc_sta = (u32 *)nfc->ecc_buf; + u16 sta_map = stm32_fmc2_nfc_get_mapping_status(nfc); unsigned int max_bitflips = 0; for (i = 0, s = 0; s < eccsteps; s++, i += eccbytes, dat += eccsize) { @@ -1080,10 +1075,11 @@ static int stm32_fmc2_sequencer_correct(struct nand_chip *chip, u8 *dat, if (eccstrength == FMC2_ECC_HAM) { /* Ecc_sta = FMC2_HECCR */ if (sta_map & BIT(s)) { - stm32_fmc2_ham_set_ecc(*ecc_sta, &calc_ecc[i]); - stat = stm32_fmc2_ham_correct(chip, dat, - &read_ecc[i], - &calc_ecc[i]); + stm32_fmc2_nfc_ham_set_ecc(*ecc_sta, + &calc_ecc[i]); + stat = stm32_fmc2_nfc_ham_correct(chip, dat, + &read_ecc[i], + &calc_ecc[i]); } ecc_sta++; } else { @@ -1095,8 +1091,8 @@ static int stm32_fmc2_sequencer_correct(struct nand_chip *chip, u8 *dat, * Ecc_sta[4] = FMC2_BCHDSR4 */ if (sta_map & BIT(s)) - stat = stm32_fmc2_bch_decode(eccsize, dat, - ecc_sta); + stat = stm32_fmc2_nfc_bch_decode(eccsize, dat, + ecc_sta); ecc_sta += 5; } @@ -1119,29 +1115,29 @@ static int stm32_fmc2_sequencer_correct(struct nand_chip *chip, u8 *dat, return max_bitflips; } -static int stm32_fmc2_sequencer_read_page(struct nand_chip *chip, u8 *buf, - int oob_required, int page) +static int stm32_fmc2_nfc_seq_read_page(struct nand_chip *chip, u8 *buf, + int oob_required, int page) { struct mtd_info *mtd = nand_to_mtd(chip); - struct stm32_fmc2_nfc *fmc2 = to_stm32_nfc(chip->controller); + struct stm32_fmc2_nfc *nfc = to_stm32_nfc(chip->controller); u8 *ecc_calc = chip->ecc.calc_buf; u8 *ecc_code = chip->ecc.code_buf; u16 sta_map; int ret; - ret = stm32_fmc2_select_chip(chip, chip->cur_cs); + ret = stm32_fmc2_nfc_select_chip(chip, chip->cur_cs); if (ret) return ret; /* Configure the sequencer */ - stm32_fmc2_rw_page_init(chip, page, 0, false); + stm32_fmc2_nfc_rw_page_init(chip, page, 0, false); /* Read the page */ - ret = stm32_fmc2_xfer(chip, buf, 0, false); + ret = stm32_fmc2_nfc_xfer(chip, buf, 0, false); if (ret) return ret; - sta_map = stm32_fmc2_get_mapping_status(fmc2); + sta_map = stm32_fmc2_nfc_get_mapping_status(nfc); /* Check if errors happen */ if (likely(!sta_map)) { @@ -1168,21 +1164,21 @@ static int stm32_fmc2_sequencer_read_page(struct nand_chip *chip, u8 *buf, return chip->ecc.correct(chip, buf, ecc_code, ecc_calc); } -static int stm32_fmc2_sequencer_read_page_raw(struct nand_chip *chip, u8 *buf, - int oob_required, int page) +static int stm32_fmc2_nfc_seq_read_page_raw(struct nand_chip *chip, u8 *buf, + int oob_required, int page) { struct mtd_info *mtd = nand_to_mtd(chip); int ret; - ret = stm32_fmc2_select_chip(chip, chip->cur_cs); + ret = stm32_fmc2_nfc_select_chip(chip, chip->cur_cs); if (ret) return ret; /* Configure the sequencer */ - stm32_fmc2_rw_page_init(chip, page, 1, false); + stm32_fmc2_nfc_rw_page_init(chip, page, 1, false); /* Read the page */ - ret = stm32_fmc2_xfer(chip, buf, 1, false); + ret = stm32_fmc2_nfc_xfer(chip, buf, 1, false); if (ret) return ret; @@ -1195,31 +1191,31 @@ static int stm32_fmc2_sequencer_read_page_raw(struct nand_chip *chip, u8 *buf, return 0; } -static irqreturn_t stm32_fmc2_irq(int irq, void *dev_id) +static irqreturn_t stm32_fmc2_nfc_irq(int irq, void *dev_id) { - struct stm32_fmc2_nfc *fmc2 = (struct stm32_fmc2_nfc *)dev_id; + struct stm32_fmc2_nfc *nfc = (struct stm32_fmc2_nfc *)dev_id; - if (fmc2->irq_state == FMC2_IRQ_SEQ) + if (nfc->irq_state == FMC2_IRQ_SEQ) /* Sequencer is used */ - stm32_fmc2_disable_seq_irq(fmc2); - else if (fmc2->irq_state == FMC2_IRQ_BCH) + stm32_fmc2_nfc_disable_seq_irq(nfc); + else if (nfc->irq_state == FMC2_IRQ_BCH) /* BCH is used */ - stm32_fmc2_disable_bch_irq(fmc2); + stm32_fmc2_nfc_disable_bch_irq(nfc); - complete(&fmc2->complete); + complete(&nfc->complete); return IRQ_HANDLED; } -static void stm32_fmc2_read_data(struct nand_chip *chip, void *buf, - unsigned int len, bool force_8bit) +static void stm32_fmc2_nfc_read_data(struct nand_chip *chip, void *buf, + unsigned int len, bool force_8bit) { - struct stm32_fmc2_nfc *fmc2 = to_stm32_nfc(chip->controller); - void __iomem *io_addr_r = fmc2->data_base[fmc2->cs_sel]; + struct stm32_fmc2_nfc *nfc = to_stm32_nfc(chip->controller); + void __iomem *io_addr_r = nfc->data_base[nfc->cs_sel]; if (force_8bit && chip->options & NAND_BUSWIDTH_16) /* Reconfigure bus width to 8-bit */ - stm32_fmc2_set_buswidth_16(fmc2, false); + stm32_fmc2_nfc_set_buswidth_16(nfc, false); if (!IS_ALIGNED((uintptr_t)buf, sizeof(u32))) { if (!IS_ALIGNED((uintptr_t)buf, sizeof(u16)) && len) { @@ -1255,18 +1251,18 @@ static void stm32_fmc2_read_data(struct nand_chip *chip, void *buf, if (force_8bit && chip->options & NAND_BUSWIDTH_16) /* Reconfigure bus width to 16-bit */ - stm32_fmc2_set_buswidth_16(fmc2, true); + stm32_fmc2_nfc_set_buswidth_16(nfc, true); } -static void stm32_fmc2_write_data(struct nand_chip *chip, const void *buf, - unsigned int len, bool force_8bit) +static void stm32_fmc2_nfc_write_data(struct nand_chip *chip, const void *buf, + unsigned int len, bool force_8bit) { - struct stm32_fmc2_nfc *fmc2 = to_stm32_nfc(chip->controller); - void __iomem *io_addr_w = fmc2->data_base[fmc2->cs_sel]; + struct stm32_fmc2_nfc *nfc = to_stm32_nfc(chip->controller); + void __iomem *io_addr_w = nfc->data_base[nfc->cs_sel]; if (force_8bit && chip->options & NAND_BUSWIDTH_16) /* Reconfigure bus width to 8-bit */ - stm32_fmc2_set_buswidth_16(fmc2, false); + stm32_fmc2_nfc_set_buswidth_16(nfc, false); if (!IS_ALIGNED((uintptr_t)buf, sizeof(u32))) { if (!IS_ALIGNED((uintptr_t)buf, sizeof(u16)) && len) { @@ -1302,47 +1298,48 @@ static void stm32_fmc2_write_data(struct nand_chip *chip, const void *buf, if (force_8bit && chip->options & NAND_BUSWIDTH_16) /* Reconfigure bus width to 16-bit */ - stm32_fmc2_set_buswidth_16(fmc2, true); + stm32_fmc2_nfc_set_buswidth_16(nfc, true); } -static int stm32_fmc2_waitrdy(struct nand_chip *chip, unsigned long timeout_ms) +static int stm32_fmc2_nfc_waitrdy(struct nand_chip *chip, + unsigned long timeout_ms) { - struct stm32_fmc2_nfc *fmc2 = to_stm32_nfc(chip->controller); + struct stm32_fmc2_nfc *nfc = to_stm32_nfc(chip->controller); const struct nand_sdr_timings *timings; u32 isr, sr; /* Check if there is no pending requests to the NAND flash */ - if (readl_relaxed_poll_timeout_atomic(fmc2->io_base + FMC2_SR, sr, + if (readl_relaxed_poll_timeout_atomic(nfc->io_base + FMC2_SR, sr, sr & FMC2_SR_NWRF, 1, 1000 * FMC2_TIMEOUT_MS)) - dev_warn(fmc2->dev, "Waitrdy timeout\n"); + dev_warn(nfc->dev, "Waitrdy timeout\n"); /* Wait tWB before R/B# signal is low */ timings = nand_get_sdr_timings(&chip->data_interface); ndelay(PSEC_TO_NSEC(timings->tWB_max)); /* R/B# signal is low, clear high level flag */ - writel_relaxed(FMC2_ICR_CIHLF, fmc2->io_base + FMC2_ICR); + writel_relaxed(FMC2_ICR_CIHLF, nfc->io_base + FMC2_ICR); /* Wait R/B# signal is high */ - return readl_relaxed_poll_timeout_atomic(fmc2->io_base + FMC2_ISR, + return readl_relaxed_poll_timeout_atomic(nfc->io_base + FMC2_ISR, isr, isr & FMC2_ISR_IHLF, 5, 1000 * timeout_ms); } -static int stm32_fmc2_exec_op(struct nand_chip *chip, - const struct nand_operation *op, - bool check_only) +static int stm32_fmc2_nfc_exec_op(struct nand_chip *chip, + const struct nand_operation *op, + bool check_only) { - struct stm32_fmc2_nfc *fmc2 = to_stm32_nfc(chip->controller); + struct stm32_fmc2_nfc *nfc = to_stm32_nfc(chip->controller); const struct nand_op_instr *instr = NULL; - unsigned int op_id, i; + unsigned int op_id, i, timeout; int ret; if (check_only) return 0; - ret = stm32_fmc2_select_chip(chip, op->cs); + ret = stm32_fmc2_nfc_select_chip(chip, op->cs); if (ret) return ret; @@ -1352,30 +1349,30 @@ static int stm32_fmc2_exec_op(struct nand_chip *chip, switch (instr->type) { case NAND_OP_CMD_INSTR: writeb_relaxed(instr->ctx.cmd.opcode, - fmc2->cmd_base[fmc2->cs_sel]); + nfc->cmd_base[nfc->cs_sel]); break; case NAND_OP_ADDR_INSTR: for (i = 0; i < instr->ctx.addr.naddrs; i++) writeb_relaxed(instr->ctx.addr.addrs[i], - fmc2->addr_base[fmc2->cs_sel]); + nfc->addr_base[nfc->cs_sel]); break; case NAND_OP_DATA_IN_INSTR: - stm32_fmc2_read_data(chip, instr->ctx.data.buf.in, - instr->ctx.data.len, - instr->ctx.data.force_8bit); + stm32_fmc2_nfc_read_data(chip, instr->ctx.data.buf.in, + instr->ctx.data.len, + instr->ctx.data.force_8bit); break; case NAND_OP_DATA_OUT_INSTR: - stm32_fmc2_write_data(chip, instr->ctx.data.buf.out, - instr->ctx.data.len, - instr->ctx.data.force_8bit); + stm32_fmc2_nfc_write_data(chip, instr->ctx.data.buf.out, + instr->ctx.data.len, + instr->ctx.data.force_8bit); break; case NAND_OP_WAITRDY_INSTR: - ret = stm32_fmc2_waitrdy(chip, - instr->ctx.waitrdy.timeout_ms); + timeout = instr->ctx.waitrdy.timeout_ms; + ret = stm32_fmc2_nfc_waitrdy(chip, timeout); break; } } @@ -1383,13 +1380,13 @@ static int stm32_fmc2_exec_op(struct nand_chip *chip, return ret; } -static void stm32_fmc2_init(struct stm32_fmc2_nfc *fmc2) +static void stm32_fmc2_nfc_init(struct stm32_fmc2_nfc *nfc) { - u32 pcr = readl_relaxed(fmc2->io_base + FMC2_PCR); - u32 bcr1 = readl_relaxed(fmc2->io_base + FMC2_BCR1); + u32 pcr = readl_relaxed(nfc->io_base + FMC2_PCR); + u32 bcr1 = readl_relaxed(nfc->io_base + FMC2_BCR1); /* Set CS used to undefined */ - fmc2->cs_sel = -1; + nfc->cs_sel = -1; /* Enable wait feature and nand flash memory bank */ pcr |= FMC2_PCR_PWAITEN; @@ -1419,19 +1416,19 @@ static void stm32_fmc2_init(struct stm32_fmc2_nfc *fmc2) /* Enable FMC2 controller */ bcr1 |= FMC2_BCR1_FMC2EN; - writel_relaxed(bcr1, fmc2->io_base + FMC2_BCR1); - writel_relaxed(pcr, fmc2->io_base + FMC2_PCR); - writel_relaxed(FMC2_PMEM_DEFAULT, fmc2->io_base + FMC2_PMEM); - writel_relaxed(FMC2_PATT_DEFAULT, fmc2->io_base + FMC2_PATT); + writel_relaxed(bcr1, nfc->io_base + FMC2_BCR1); + writel_relaxed(pcr, nfc->io_base + FMC2_PCR); + writel_relaxed(FMC2_PMEM_DEFAULT, nfc->io_base + FMC2_PMEM); + writel_relaxed(FMC2_PATT_DEFAULT, nfc->io_base + FMC2_PATT); } -static void stm32_fmc2_calc_timings(struct nand_chip *chip, - const struct nand_sdr_timings *sdrt) +static void stm32_fmc2_nfc_calc_timings(struct nand_chip *chip, + const struct nand_sdr_timings *sdrt) { - struct stm32_fmc2_nfc *fmc2 = to_stm32_nfc(chip->controller); + struct stm32_fmc2_nfc *nfc = to_stm32_nfc(chip->controller); struct stm32_fmc2_nand *nand = to_fmc2_nand(chip); struct stm32_fmc2_timings *tims = &nand->timings; - unsigned long hclk = clk_get_rate(fmc2->clk); + unsigned long hclk = clk_get_rate(nfc->clk); unsigned long hclkp = NSEC_PER_SEC / (hclk / 1000); unsigned long timing, tar, tclr, thiz, twait; unsigned long tset_mem, tset_att, thold_mem, thold_att; @@ -1555,8 +1552,8 @@ static void stm32_fmc2_calc_timings(struct nand_chip *chip, tims->thold_att = clamp_val(timing, 1, FMC2_PMEM_PATT_TIMING_MASK); } -static int stm32_fmc2_setup_interface(struct nand_chip *chip, int chipnr, - const struct nand_data_interface *conf) +static int stm32_fmc2_nfc_setup_interface(struct nand_chip *chip, int chipnr, + const struct nand_data_interface *conf) { const struct nand_sdr_timings *sdrt; @@ -1567,68 +1564,67 @@ static int stm32_fmc2_setup_interface(struct nand_chip *chip, int chipnr, if (chipnr == NAND_DATA_IFACE_CHECK_ONLY) return 0; - stm32_fmc2_calc_timings(chip, sdrt); - stm32_fmc2_timings_init(chip); + stm32_fmc2_nfc_calc_timings(chip, sdrt); + stm32_fmc2_nfc_timings_init(chip); return 0; } -static int stm32_fmc2_dma_setup(struct stm32_fmc2_nfc *fmc2) +static int stm32_fmc2_nfc_dma_setup(struct stm32_fmc2_nfc *nfc) { int ret = 0; - fmc2->dma_tx_ch = dma_request_chan(fmc2->dev, "tx"); - if (IS_ERR(fmc2->dma_tx_ch)) { - ret = PTR_ERR(fmc2->dma_tx_ch); + nfc->dma_tx_ch = dma_request_chan(nfc->dev, "tx"); + if (IS_ERR(nfc->dma_tx_ch)) { + ret = PTR_ERR(nfc->dma_tx_ch); if (ret != -ENODEV) - dev_err(fmc2->dev, + dev_err(nfc->dev, "failed to request tx DMA channel: %d\n", ret); - fmc2->dma_tx_ch = NULL; + nfc->dma_tx_ch = NULL; goto err_dma; } - fmc2->dma_rx_ch = dma_request_chan(fmc2->dev, "rx"); - if (IS_ERR(fmc2->dma_rx_ch)) { - ret = PTR_ERR(fmc2->dma_rx_ch); + nfc->dma_rx_ch = dma_request_chan(nfc->dev, "rx"); + if (IS_ERR(nfc->dma_rx_ch)) { + ret = PTR_ERR(nfc->dma_rx_ch); if (ret != -ENODEV) - dev_err(fmc2->dev, + dev_err(nfc->dev, "failed to request rx DMA channel: %d\n", ret); - fmc2->dma_rx_ch = NULL; + nfc->dma_rx_ch = NULL; goto err_dma; } - fmc2->dma_ecc_ch = dma_request_chan(fmc2->dev, "ecc"); - if (IS_ERR(fmc2->dma_ecc_ch)) { - ret = PTR_ERR(fmc2->dma_ecc_ch); + nfc->dma_ecc_ch = dma_request_chan(nfc->dev, "ecc"); + if (IS_ERR(nfc->dma_ecc_ch)) { + ret = PTR_ERR(nfc->dma_ecc_ch); if (ret != -ENODEV) - dev_err(fmc2->dev, + dev_err(nfc->dev, "failed to request ecc DMA channel: %d\n", ret); - fmc2->dma_ecc_ch = NULL; + nfc->dma_ecc_ch = NULL; goto err_dma; } - ret = sg_alloc_table(&fmc2->dma_ecc_sg, FMC2_MAX_SG, GFP_KERNEL); + ret = sg_alloc_table(&nfc->dma_ecc_sg, FMC2_MAX_SG, GFP_KERNEL); if (ret) return ret; /* Allocate a buffer to store ECC status registers */ - fmc2->ecc_buf = devm_kzalloc(fmc2->dev, FMC2_MAX_ECC_BUF_LEN, - GFP_KERNEL); - if (!fmc2->ecc_buf) + nfc->ecc_buf = devm_kzalloc(nfc->dev, FMC2_MAX_ECC_BUF_LEN, GFP_KERNEL); + if (!nfc->ecc_buf) return -ENOMEM; - ret = sg_alloc_table(&fmc2->dma_data_sg, FMC2_MAX_SG, GFP_KERNEL); + ret = sg_alloc_table(&nfc->dma_data_sg, FMC2_MAX_SG, GFP_KERNEL); if (ret) return ret; - init_completion(&fmc2->dma_data_complete); - init_completion(&fmc2->dma_ecc_complete); + init_completion(&nfc->dma_data_complete); + init_completion(&nfc->dma_ecc_complete); return 0; err_dma: if (ret == -ENODEV) { - dev_warn(fmc2->dev, + dev_warn(nfc->dev, "DMAs not defined in the DT, polling mode is used\n"); ret = 0; } @@ -1636,34 +1632,34 @@ err_dma: return ret; } -static void stm32_fmc2_nand_callbacks_setup(struct nand_chip *chip) +static void stm32_fmc2_nfc_nand_callbacks_setup(struct nand_chip *chip) { - struct stm32_fmc2_nfc *fmc2 = to_stm32_nfc(chip->controller); + struct stm32_fmc2_nfc *nfc = to_stm32_nfc(chip->controller); /* * Specific callbacks to read/write a page depending on * the mode (polling/sequencer) and the algo used (Hamming, BCH). */ - if (fmc2->dma_tx_ch && fmc2->dma_rx_ch && fmc2->dma_ecc_ch) { + if (nfc->dma_tx_ch && nfc->dma_rx_ch && nfc->dma_ecc_ch) { /* DMA => use sequencer mode callbacks */ - chip->ecc.correct = stm32_fmc2_sequencer_correct; - chip->ecc.write_page = stm32_fmc2_sequencer_write_page; - chip->ecc.read_page = stm32_fmc2_sequencer_read_page; - chip->ecc.write_page_raw = stm32_fmc2_sequencer_write_page_raw; - chip->ecc.read_page_raw = stm32_fmc2_sequencer_read_page_raw; + chip->ecc.correct = stm32_fmc2_nfc_seq_correct; + chip->ecc.write_page = stm32_fmc2_nfc_seq_write_page; + chip->ecc.read_page = stm32_fmc2_nfc_seq_read_page; + chip->ecc.write_page_raw = stm32_fmc2_nfc_seq_write_page_raw; + chip->ecc.read_page_raw = stm32_fmc2_nfc_seq_read_page_raw; } else { /* No DMA => use polling mode callbacks */ - chip->ecc.hwctl = stm32_fmc2_hwctl; + chip->ecc.hwctl = stm32_fmc2_nfc_hwctl; if (chip->ecc.strength == FMC2_ECC_HAM) { /* Hamming is used */ - chip->ecc.calculate = stm32_fmc2_ham_calculate; - chip->ecc.correct = stm32_fmc2_ham_correct; + chip->ecc.calculate = stm32_fmc2_nfc_ham_calculate; + chip->ecc.correct = stm32_fmc2_nfc_ham_correct; chip->ecc.options |= NAND_ECC_GENERIC_ERASED_CHECK; } else { /* BCH is used */ - chip->ecc.calculate = stm32_fmc2_bch_calculate; - chip->ecc.correct = stm32_fmc2_bch_correct; - chip->ecc.read_page = stm32_fmc2_read_page; + chip->ecc.calculate = stm32_fmc2_nfc_bch_calculate; + chip->ecc.correct = stm32_fmc2_nfc_bch_correct; + chip->ecc.read_page = stm32_fmc2_nfc_read_page; } } @@ -1676,8 +1672,8 @@ static void stm32_fmc2_nand_callbacks_setup(struct nand_chip *chip) chip->ecc.bytes = chip->options & NAND_BUSWIDTH_16 ? 8 : 7; } -static int stm32_fmc2_nand_ooblayout_ecc(struct mtd_info *mtd, int section, - struct mtd_oob_region *oobregion) +static int stm32_fmc2_nfc_ooblayout_ecc(struct mtd_info *mtd, int section, + struct mtd_oob_region *oobregion) { struct nand_chip *chip = mtd_to_nand(mtd); struct nand_ecc_ctrl *ecc = &chip->ecc; @@ -1691,8 +1687,8 @@ static int stm32_fmc2_nand_ooblayout_ecc(struct mtd_info *mtd, int section, return 0; } -static int stm32_fmc2_nand_ooblayout_free(struct mtd_info *mtd, int section, - struct mtd_oob_region *oobregion) +static int stm32_fmc2_nfc_ooblayout_free(struct mtd_info *mtd, int section, + struct mtd_oob_region *oobregion) { struct nand_chip *chip = mtd_to_nand(mtd); struct nand_ecc_ctrl *ecc = &chip->ecc; @@ -1706,12 +1702,12 @@ static int stm32_fmc2_nand_ooblayout_free(struct mtd_info *mtd, int section, return 0; } -static const struct mtd_ooblayout_ops stm32_fmc2_nand_ooblayout_ops = { - .ecc = stm32_fmc2_nand_ooblayout_ecc, - .free = stm32_fmc2_nand_ooblayout_free, +static const struct mtd_ooblayout_ops stm32_fmc2_nfc_ooblayout_ops = { + .ecc = stm32_fmc2_nfc_ooblayout_ecc, + .free = stm32_fmc2_nfc_ooblayout_free, }; -static int stm32_fmc2_calc_ecc_bytes(int step_size, int strength) +static int stm32_fmc2_nfc_calc_ecc_bytes(int step_size, int strength) { /* Hamming */ if (strength == FMC2_ECC_HAM) @@ -1725,13 +1721,13 @@ static int stm32_fmc2_calc_ecc_bytes(int step_size, int strength) return 8; } -NAND_ECC_CAPS_SINGLE(stm32_fmc2_ecc_caps, stm32_fmc2_calc_ecc_bytes, +NAND_ECC_CAPS_SINGLE(stm32_fmc2_nfc_ecc_caps, stm32_fmc2_nfc_calc_ecc_bytes, FMC2_ECC_STEP_SIZE, FMC2_ECC_HAM, FMC2_ECC_BCH4, FMC2_ECC_BCH8); -static int stm32_fmc2_attach_chip(struct nand_chip *chip) +static int stm32_fmc2_nfc_attach_chip(struct nand_chip *chip) { - struct stm32_fmc2_nfc *fmc2 = to_stm32_nfc(chip->controller); + struct stm32_fmc2_nfc *nfc = to_stm32_nfc(chip->controller); struct mtd_info *mtd = nand_to_mtd(chip); int ret; @@ -1743,45 +1739,45 @@ static int stm32_fmc2_attach_chip(struct nand_chip *chip) * ECC sector size = 512 */ if (chip->ecc.mode != NAND_ECC_HW) { - dev_err(fmc2->dev, "nand_ecc_mode is not well defined in the DT\n"); + dev_err(nfc->dev, "nand_ecc_mode is not well defined in the DT\n"); return -EINVAL; } - ret = nand_ecc_choose_conf(chip, &stm32_fmc2_ecc_caps, + ret = nand_ecc_choose_conf(chip, &stm32_fmc2_nfc_ecc_caps, mtd->oobsize - FMC2_BBM_LEN); if (ret) { - dev_err(fmc2->dev, "no valid ECC settings set\n"); + dev_err(nfc->dev, "no valid ECC settings set\n"); return ret; } if (mtd->writesize / chip->ecc.size > FMC2_MAX_SG) { - dev_err(fmc2->dev, "nand page size is not supported\n"); + dev_err(nfc->dev, "nand page size is not supported\n"); return -EINVAL; } if (chip->bbt_options & NAND_BBT_USE_FLASH) chip->bbt_options |= NAND_BBT_NO_OOB; - stm32_fmc2_nand_callbacks_setup(chip); + stm32_fmc2_nfc_nand_callbacks_setup(chip); - mtd_set_ooblayout(mtd, &stm32_fmc2_nand_ooblayout_ops); + mtd_set_ooblayout(mtd, &stm32_fmc2_nfc_ooblayout_ops); if (chip->options & NAND_BUSWIDTH_16) - stm32_fmc2_set_buswidth_16(fmc2, true); + stm32_fmc2_nfc_set_buswidth_16(nfc, true); return 0; } -static const struct nand_controller_ops stm32_fmc2_nand_controller_ops = { - .attach_chip = stm32_fmc2_attach_chip, - .exec_op = stm32_fmc2_exec_op, - .setup_data_interface = stm32_fmc2_setup_interface, +static const struct nand_controller_ops stm32_fmc2_nfc_controller_ops = { + .attach_chip = stm32_fmc2_nfc_attach_chip, + .exec_op = stm32_fmc2_nfc_exec_op, + .setup_data_interface = stm32_fmc2_nfc_setup_interface, }; -static int stm32_fmc2_parse_child(struct stm32_fmc2_nfc *fmc2, - struct device_node *dn) +static int stm32_fmc2_nfc_parse_child(struct stm32_fmc2_nfc *nfc, + struct device_node *dn) { - struct stm32_fmc2_nand *nand = &fmc2->nand; + struct stm32_fmc2_nand *nand = &nfc->nand; u32 cs; int ret, i; @@ -1790,29 +1786,29 @@ static int stm32_fmc2_parse_child(struct stm32_fmc2_nfc *fmc2, nand->ncs /= sizeof(u32); if (!nand->ncs) { - dev_err(fmc2->dev, "invalid reg property size\n"); + dev_err(nfc->dev, "invalid reg property size\n"); return -EINVAL; } for (i = 0; i < nand->ncs; i++) { ret = of_property_read_u32_index(dn, "reg", i, &cs); if (ret) { - dev_err(fmc2->dev, "could not retrieve reg property: %d\n", + dev_err(nfc->dev, "could not retrieve reg property: %d\n", ret); return ret; } if (cs > FMC2_MAX_CE) { - dev_err(fmc2->dev, "invalid reg value: %d\n", cs); + dev_err(nfc->dev, "invalid reg value: %d\n", cs); return -EINVAL; } - if (fmc2->cs_assigned & BIT(cs)) { - dev_err(fmc2->dev, "cs already assigned: %d\n", cs); + if (nfc->cs_assigned & BIT(cs)) { + dev_err(nfc->dev, "cs already assigned: %d\n", cs); return -EINVAL; } - fmc2->cs_assigned |= BIT(cs); + nfc->cs_assigned |= BIT(cs); nand->cs_used[i] = cs; } @@ -1821,25 +1817,25 @@ static int stm32_fmc2_parse_child(struct stm32_fmc2_nfc *fmc2, return 0; } -static int stm32_fmc2_parse_dt(struct stm32_fmc2_nfc *fmc2) +static int stm32_fmc2_nfc_parse_dt(struct stm32_fmc2_nfc *nfc) { - struct device_node *dn = fmc2->dev->of_node; + struct device_node *dn = nfc->dev->of_node; struct device_node *child; int nchips = of_get_child_count(dn); int ret = 0; if (!nchips) { - dev_err(fmc2->dev, "NAND chip not defined\n"); + dev_err(nfc->dev, "NAND chip not defined\n"); return -EINVAL; } if (nchips > 1) { - dev_err(fmc2->dev, "too many NAND chips defined\n"); + dev_err(nfc->dev, "too many NAND chips defined\n"); return -EINVAL; } for_each_child_of_node(dn, child) { - ret = stm32_fmc2_parse_child(fmc2, child); + ret = stm32_fmc2_nfc_parse_child(nfc, child); if (ret < 0) { of_node_put(child); return ret; @@ -1849,79 +1845,79 @@ static int stm32_fmc2_parse_dt(struct stm32_fmc2_nfc *fmc2) return ret; } -static int stm32_fmc2_probe(struct platform_device *pdev) +static int stm32_fmc2_nfc_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct reset_control *rstc; - struct stm32_fmc2_nfc *fmc2; + struct stm32_fmc2_nfc *nfc; struct stm32_fmc2_nand *nand; struct resource *res; struct mtd_info *mtd; struct nand_chip *chip; int chip_cs, mem_region, ret, irq; - fmc2 = devm_kzalloc(dev, sizeof(*fmc2), GFP_KERNEL); - if (!fmc2) + nfc = devm_kzalloc(dev, sizeof(*nfc), GFP_KERNEL); + if (!nfc) return -ENOMEM; - fmc2->dev = dev; - nand_controller_init(&fmc2->base); - fmc2->base.ops = &stm32_fmc2_nand_controller_ops; + nfc->dev = dev; + nand_controller_init(&nfc->base); + nfc->base.ops = &stm32_fmc2_nfc_controller_ops; - ret = stm32_fmc2_parse_dt(fmc2); + ret = stm32_fmc2_nfc_parse_dt(nfc); if (ret) return ret; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - fmc2->io_base = devm_ioremap_resource(dev, res); - if (IS_ERR(fmc2->io_base)) - return PTR_ERR(fmc2->io_base); + nfc->io_base = devm_ioremap_resource(dev, res); + if (IS_ERR(nfc->io_base)) + return PTR_ERR(nfc->io_base); - fmc2->io_phys_addr = res->start; + nfc->io_phys_addr = res->start; for (chip_cs = 0, mem_region = 1; chip_cs < FMC2_MAX_CE; chip_cs++, mem_region += 3) { - if (!(fmc2->cs_assigned & BIT(chip_cs))) + if (!(nfc->cs_assigned & BIT(chip_cs))) continue; res = platform_get_resource(pdev, IORESOURCE_MEM, mem_region); - fmc2->data_base[chip_cs] = devm_ioremap_resource(dev, res); - if (IS_ERR(fmc2->data_base[chip_cs])) - return PTR_ERR(fmc2->data_base[chip_cs]); + nfc->data_base[chip_cs] = devm_ioremap_resource(dev, res); + if (IS_ERR(nfc->data_base[chip_cs])) + return PTR_ERR(nfc->data_base[chip_cs]); - fmc2->data_phys_addr[chip_cs] = res->start; + nfc->data_phys_addr[chip_cs] = res->start; res = platform_get_resource(pdev, IORESOURCE_MEM, mem_region + 1); - fmc2->cmd_base[chip_cs] = devm_ioremap_resource(dev, res); - if (IS_ERR(fmc2->cmd_base[chip_cs])) - return PTR_ERR(fmc2->cmd_base[chip_cs]); + nfc->cmd_base[chip_cs] = devm_ioremap_resource(dev, res); + if (IS_ERR(nfc->cmd_base[chip_cs])) + return PTR_ERR(nfc->cmd_base[chip_cs]); res = platform_get_resource(pdev, IORESOURCE_MEM, mem_region + 2); - fmc2->addr_base[chip_cs] = devm_ioremap_resource(dev, res); - if (IS_ERR(fmc2->addr_base[chip_cs])) - return PTR_ERR(fmc2->addr_base[chip_cs]); + nfc->addr_base[chip_cs] = devm_ioremap_resource(dev, res); + if (IS_ERR(nfc->addr_base[chip_cs])) + return PTR_ERR(nfc->addr_base[chip_cs]); } irq = platform_get_irq(pdev, 0); if (irq < 0) return irq; - ret = devm_request_irq(dev, irq, stm32_fmc2_irq, 0, - dev_name(dev), fmc2); + ret = devm_request_irq(dev, irq, stm32_fmc2_nfc_irq, 0, + dev_name(dev), nfc); if (ret) { dev_err(dev, "failed to request irq\n"); return ret; } - init_completion(&fmc2->complete); + init_completion(&nfc->complete); - fmc2->clk = devm_clk_get(dev, NULL); - if (IS_ERR(fmc2->clk)) - return PTR_ERR(fmc2->clk); + nfc->clk = devm_clk_get(dev, NULL); + if (IS_ERR(nfc->clk)) + return PTR_ERR(nfc->clk); - ret = clk_prepare_enable(fmc2->clk); + ret = clk_prepare_enable(nfc->clk); if (ret) { dev_err(dev, "can not enable the clock\n"); return ret; @@ -1937,18 +1933,18 @@ static int stm32_fmc2_probe(struct platform_device *pdev) reset_control_deassert(rstc); } - ret = stm32_fmc2_dma_setup(fmc2); + ret = stm32_fmc2_nfc_dma_setup(nfc); if (ret) goto err_release_dma; - stm32_fmc2_init(fmc2); + stm32_fmc2_nfc_init(nfc); - nand = &fmc2->nand; + nand = &nfc->nand; chip = &nand->chip; mtd = nand_to_mtd(chip); mtd->dev.parent = dev; - chip->controller = &fmc2->base; + chip->controller = &nfc->base; chip->options |= NAND_BUSWIDTH_AUTO | NAND_NO_SUBPAGE_WRITE | NAND_USES_DMA; @@ -1966,7 +1962,7 @@ static int stm32_fmc2_probe(struct platform_device *pdev) if (ret) goto err_nand_cleanup; - platform_set_drvdata(pdev, fmc2); + platform_set_drvdata(pdev, nfc); return 0; @@ -1974,73 +1970,73 @@ err_nand_cleanup: nand_cleanup(chip); err_release_dma: - if (fmc2->dma_ecc_ch) - dma_release_channel(fmc2->dma_ecc_ch); - if (fmc2->dma_tx_ch) - dma_release_channel(fmc2->dma_tx_ch); - if (fmc2->dma_rx_ch) - dma_release_channel(fmc2->dma_rx_ch); + if (nfc->dma_ecc_ch) + dma_release_channel(nfc->dma_ecc_ch); + if (nfc->dma_tx_ch) + dma_release_channel(nfc->dma_tx_ch); + if (nfc->dma_rx_ch) + dma_release_channel(nfc->dma_rx_ch); - sg_free_table(&fmc2->dma_data_sg); - sg_free_table(&fmc2->dma_ecc_sg); + sg_free_table(&nfc->dma_data_sg); + sg_free_table(&nfc->dma_ecc_sg); err_clk_disable: - clk_disable_unprepare(fmc2->clk); + clk_disable_unprepare(nfc->clk); return ret; } -static int stm32_fmc2_remove(struct platform_device *pdev) +static int stm32_fmc2_nfc_remove(struct platform_device *pdev) { - struct stm32_fmc2_nfc *fmc2 = platform_get_drvdata(pdev); - struct stm32_fmc2_nand *nand = &fmc2->nand; + struct stm32_fmc2_nfc *nfc = platform_get_drvdata(pdev); + struct stm32_fmc2_nand *nand = &nfc->nand; nand_release(&nand->chip); - if (fmc2->dma_ecc_ch) - dma_release_channel(fmc2->dma_ecc_ch); - if (fmc2->dma_tx_ch) - dma_release_channel(fmc2->dma_tx_ch); - if (fmc2->dma_rx_ch) - dma_release_channel(fmc2->dma_rx_ch); + if (nfc->dma_ecc_ch) + dma_release_channel(nfc->dma_ecc_ch); + if (nfc->dma_tx_ch) + dma_release_channel(nfc->dma_tx_ch); + if (nfc->dma_rx_ch) + dma_release_channel(nfc->dma_rx_ch); - sg_free_table(&fmc2->dma_data_sg); - sg_free_table(&fmc2->dma_ecc_sg); + sg_free_table(&nfc->dma_data_sg); + sg_free_table(&nfc->dma_ecc_sg); - clk_disable_unprepare(fmc2->clk); + clk_disable_unprepare(nfc->clk); return 0; } -static int __maybe_unused stm32_fmc2_suspend(struct device *dev) +static int __maybe_unused stm32_fmc2_nfc_suspend(struct device *dev) { - struct stm32_fmc2_nfc *fmc2 = dev_get_drvdata(dev); + struct stm32_fmc2_nfc *nfc = dev_get_drvdata(dev); - clk_disable_unprepare(fmc2->clk); + clk_disable_unprepare(nfc->clk); pinctrl_pm_select_sleep_state(dev); return 0; } -static int __maybe_unused stm32_fmc2_resume(struct device *dev) +static int __maybe_unused stm32_fmc2_nfc_resume(struct device *dev) { - struct stm32_fmc2_nfc *fmc2 = dev_get_drvdata(dev); - struct stm32_fmc2_nand *nand = &fmc2->nand; + struct stm32_fmc2_nfc *nfc = dev_get_drvdata(dev); + struct stm32_fmc2_nand *nand = &nfc->nand; int chip_cs, ret; pinctrl_pm_select_default_state(dev); - ret = clk_prepare_enable(fmc2->clk); + ret = clk_prepare_enable(nfc->clk); if (ret) { dev_err(dev, "can not enable the clock\n"); return ret; } - stm32_fmc2_init(fmc2); + stm32_fmc2_nfc_init(nfc); for (chip_cs = 0; chip_cs < FMC2_MAX_CE; chip_cs++) { - if (!(fmc2->cs_assigned & BIT(chip_cs))) + if (!(nfc->cs_assigned & BIT(chip_cs))) continue; nand_reset(&nand->chip, chip_cs); @@ -2049,27 +2045,27 @@ static int __maybe_unused stm32_fmc2_resume(struct device *dev) return 0; } -static SIMPLE_DEV_PM_OPS(stm32_fmc2_pm_ops, stm32_fmc2_suspend, - stm32_fmc2_resume); +static SIMPLE_DEV_PM_OPS(stm32_fmc2_nfc_pm_ops, stm32_fmc2_nfc_suspend, + stm32_fmc2_nfc_resume); -static const struct of_device_id stm32_fmc2_match[] = { +static const struct of_device_id stm32_fmc2_nfc_match[] = { {.compatible = "st,stm32mp15-fmc2"}, {} }; -MODULE_DEVICE_TABLE(of, stm32_fmc2_match); +MODULE_DEVICE_TABLE(of, stm32_fmc2_nfc_match); -static struct platform_driver stm32_fmc2_driver = { - .probe = stm32_fmc2_probe, - .remove = stm32_fmc2_remove, +static struct platform_driver stm32_fmc2_nfc_driver = { + .probe = stm32_fmc2_nfc_probe, + .remove = stm32_fmc2_nfc_remove, .driver = { - .name = "stm32_fmc2_nand", - .of_match_table = stm32_fmc2_match, - .pm = &stm32_fmc2_pm_ops, + .name = "stm32_fmc2_nfc", + .of_match_table = stm32_fmc2_nfc_match, + .pm = &stm32_fmc2_nfc_pm_ops, }, }; -module_platform_driver(stm32_fmc2_driver); +module_platform_driver(stm32_fmc2_nfc_driver); -MODULE_ALIAS("platform:stm32_fmc2_nand"); +MODULE_ALIAS("platform:stm32_fmc2_nfc"); MODULE_AUTHOR("Christophe Kerello "); -MODULE_DESCRIPTION("STMicroelectronics STM32 FMC2 nand driver"); +MODULE_DESCRIPTION("STMicroelectronics STM32 FMC2 NFC driver"); MODULE_LICENSE("GPL v2"); -- GitLab From 699d3e6a87d1ea37cb47343fa54f074a9e0a2391 Mon Sep 17 00:00:00 2001 From: Christophe Kerello Date: Tue, 12 May 2020 13:47:48 +0200 Subject: [PATCH 0652/1971] mtd: rawnand: stm32_fmc2: use FIELD_PREP/FIELD_GET macros This patch removes custom macros and uses FIELD_PREP and FIELD_GET macros. Signed-off-by: Christophe Kerello Reviewed-by: Miquel Raynal Signed-off-by: Miquel Raynal Link: https://lore.kernel.org/linux-mtd/1589284068-4079-3-git-send-email-christophe.kerello@st.com --- drivers/mtd/nand/raw/stm32_fmc2_nand.c | 177 ++++++++++++------------- 1 file changed, 85 insertions(+), 92 deletions(-) diff --git a/drivers/mtd/nand/raw/stm32_fmc2_nand.c b/drivers/mtd/nand/raw/stm32_fmc2_nand.c index 901ebd8bb3d3d..8f02d5e9ba215 100644 --- a/drivers/mtd/nand/raw/stm32_fmc2_nand.c +++ b/drivers/mtd/nand/raw/stm32_fmc2_nand.c @@ -4,6 +4,7 @@ * Author: Christophe Kerello */ +#include #include #include #include @@ -84,20 +85,16 @@ /* Register: FMC2_PCR */ #define FMC2_PCR_PWAITEN BIT(1) #define FMC2_PCR_PBKEN BIT(2) -#define FMC2_PCR_PWID_MASK GENMASK(5, 4) -#define FMC2_PCR_PWID(x) (((x) & 0x3) << 4) +#define FMC2_PCR_PWID GENMASK(5, 4) #define FMC2_PCR_PWID_BUSWIDTH_8 0 #define FMC2_PCR_PWID_BUSWIDTH_16 1 #define FMC2_PCR_ECCEN BIT(6) #define FMC2_PCR_ECCALG BIT(8) -#define FMC2_PCR_TCLR_MASK GENMASK(12, 9) -#define FMC2_PCR_TCLR(x) (((x) & 0xf) << 9) +#define FMC2_PCR_TCLR GENMASK(12, 9) #define FMC2_PCR_TCLR_DEFAULT 0xf -#define FMC2_PCR_TAR_MASK GENMASK(16, 13) -#define FMC2_PCR_TAR(x) (((x) & 0xf) << 13) +#define FMC2_PCR_TAR GENMASK(16, 13) #define FMC2_PCR_TAR_DEFAULT 0xf -#define FMC2_PCR_ECCSS_MASK GENMASK(19, 17) -#define FMC2_PCR_ECCSS(x) (((x) & 0x7) << 17) +#define FMC2_PCR_ECCSS GENMASK(19, 17) #define FMC2_PCR_ECCSS_512 1 #define FMC2_PCR_ECCSS_2048 3 #define FMC2_PCR_BCHECC BIT(24) @@ -107,17 +104,17 @@ #define FMC2_SR_NWRF BIT(6) /* Register: FMC2_PMEM */ -#define FMC2_PMEM_MEMSET(x) (((x) & 0xff) << 0) -#define FMC2_PMEM_MEMWAIT(x) (((x) & 0xff) << 8) -#define FMC2_PMEM_MEMHOLD(x) (((x) & 0xff) << 16) -#define FMC2_PMEM_MEMHIZ(x) (((x) & 0xff) << 24) +#define FMC2_PMEM_MEMSET GENMASK(7, 0) +#define FMC2_PMEM_MEMWAIT GENMASK(15, 8) +#define FMC2_PMEM_MEMHOLD GENMASK(23, 16) +#define FMC2_PMEM_MEMHIZ GENMASK(31, 24) #define FMC2_PMEM_DEFAULT 0x0a0a0a0a /* Register: FMC2_PATT */ -#define FMC2_PATT_ATTSET(x) (((x) & 0xff) << 0) -#define FMC2_PATT_ATTWAIT(x) (((x) & 0xff) << 8) -#define FMC2_PATT_ATTHOLD(x) (((x) & 0xff) << 16) -#define FMC2_PATT_ATTHIZ(x) (((x) & 0xff) << 24) +#define FMC2_PATT_ATTSET GENMASK(7, 0) +#define FMC2_PATT_ATTWAIT GENMASK(15, 8) +#define FMC2_PATT_ATTHOLD GENMASK(23, 16) +#define FMC2_PATT_ATTHIZ GENMASK(31, 24) #define FMC2_PATT_DEFAULT 0x0a0a0a0a /* Register: FMC2_ISR */ @@ -132,9 +129,9 @@ /* Register: FMC2_CSQCFGR1 */ #define FMC2_CSQCFGR1_CMD2EN BIT(1) #define FMC2_CSQCFGR1_DMADEN BIT(2) -#define FMC2_CSQCFGR1_ACYNBR(x) (((x) & 0x7) << 4) -#define FMC2_CSQCFGR1_CMD1(x) (((x) & 0xff) << 8) -#define FMC2_CSQCFGR1_CMD2(x) (((x) & 0xff) << 16) +#define FMC2_CSQCFGR1_ACYNBR GENMASK(6, 4) +#define FMC2_CSQCFGR1_CMD1 GENMASK(15, 8) +#define FMC2_CSQCFGR1_CMD2 GENMASK(23, 16) #define FMC2_CSQCFGR1_CMD1T BIT(24) #define FMC2_CSQCFGR1_CMD2T BIT(25) @@ -142,13 +139,13 @@ #define FMC2_CSQCFGR2_SQSDTEN BIT(0) #define FMC2_CSQCFGR2_RCMD2EN BIT(1) #define FMC2_CSQCFGR2_DMASEN BIT(2) -#define FMC2_CSQCFGR2_RCMD1(x) (((x) & 0xff) << 8) -#define FMC2_CSQCFGR2_RCMD2(x) (((x) & 0xff) << 16) +#define FMC2_CSQCFGR2_RCMD1 GENMASK(15, 8) +#define FMC2_CSQCFGR2_RCMD2 GENMASK(23, 16) #define FMC2_CSQCFGR2_RCMD1T BIT(24) #define FMC2_CSQCFGR2_RCMD2T BIT(25) /* Register: FMC2_CSQCFGR3 */ -#define FMC2_CSQCFGR3_SNBR(x) (((x) & 0x1f) << 8) +#define FMC2_CSQCFGR3_SNBR GENMASK(13, 8) #define FMC2_CSQCFGR3_AC1T BIT(16) #define FMC2_CSQCFGR3_AC2T BIT(17) #define FMC2_CSQCFGR3_AC3T BIT(18) @@ -159,15 +156,15 @@ #define FMC2_CSQCFGR3_RAC2T BIT(23) /* Register: FMC2_CSQCAR1 */ -#define FMC2_CSQCAR1_ADDC1(x) (((x) & 0xff) << 0) -#define FMC2_CSQCAR1_ADDC2(x) (((x) & 0xff) << 8) -#define FMC2_CSQCAR1_ADDC3(x) (((x) & 0xff) << 16) -#define FMC2_CSQCAR1_ADDC4(x) (((x) & 0xff) << 24) +#define FMC2_CSQCAR1_ADDC1 GENMASK(7, 0) +#define FMC2_CSQCAR1_ADDC2 GENMASK(15, 8) +#define FMC2_CSQCAR1_ADDC3 GENMASK(23, 16) +#define FMC2_CSQCAR1_ADDC4 GENMASK(31, 24) /* Register: FMC2_CSQCAR2 */ -#define FMC2_CSQCAR2_ADDC5(x) (((x) & 0xff) << 0) -#define FMC2_CSQCAR2_NANDCEN(x) (((x) & 0x3) << 10) -#define FMC2_CSQCAR2_SAO(x) (((x) & 0xffff) << 16) +#define FMC2_CSQCAR2_ADDC5 GENMASK(7, 0) +#define FMC2_CSQCAR2_NANDCEN GENMASK(11, 10) +#define FMC2_CSQCAR2_SAO GENMASK(31, 16) /* Register: FMC2_CSQIER */ #define FMC2_CSQIER_TCIE BIT(0) @@ -188,28 +185,23 @@ /* Register: FMC2_BCHDSR0 */ #define FMC2_BCHDSR0_DUE BIT(0) #define FMC2_BCHDSR0_DEF BIT(1) -#define FMC2_BCHDSR0_DEN_MASK GENMASK(7, 4) -#define FMC2_BCHDSR0_DEN_SHIFT 4 +#define FMC2_BCHDSR0_DEN GENMASK(7, 4) /* Register: FMC2_BCHDSR1 */ -#define FMC2_BCHDSR1_EBP1_MASK GENMASK(12, 0) -#define FMC2_BCHDSR1_EBP2_MASK GENMASK(28, 16) -#define FMC2_BCHDSR1_EBP2_SHIFT 16 +#define FMC2_BCHDSR1_EBP1 GENMASK(12, 0) +#define FMC2_BCHDSR1_EBP2 GENMASK(28, 16) /* Register: FMC2_BCHDSR2 */ -#define FMC2_BCHDSR2_EBP3_MASK GENMASK(12, 0) -#define FMC2_BCHDSR2_EBP4_MASK GENMASK(28, 16) -#define FMC2_BCHDSR2_EBP4_SHIFT 16 +#define FMC2_BCHDSR2_EBP3 GENMASK(12, 0) +#define FMC2_BCHDSR2_EBP4 GENMASK(28, 16) /* Register: FMC2_BCHDSR3 */ -#define FMC2_BCHDSR3_EBP5_MASK GENMASK(12, 0) -#define FMC2_BCHDSR3_EBP6_MASK GENMASK(28, 16) -#define FMC2_BCHDSR3_EBP6_SHIFT 16 +#define FMC2_BCHDSR3_EBP5 GENMASK(12, 0) +#define FMC2_BCHDSR3_EBP6 GENMASK(28, 16) /* Register: FMC2_BCHDSR4 */ -#define FMC2_BCHDSR4_EBP7_MASK GENMASK(12, 0) -#define FMC2_BCHDSR4_EBP8_MASK GENMASK(28, 16) -#define FMC2_BCHDSR4_EBP8_SHIFT 16 +#define FMC2_BCHDSR4_EBP7 GENMASK(12, 0) +#define FMC2_BCHDSR4_EBP8 GENMASK(28, 16) enum stm32_fmc2_ecc { FMC2_ECC_HAM = 1, @@ -289,22 +281,22 @@ static void stm32_fmc2_nfc_timings_init(struct nand_chip *chip) u32 pmem, patt; /* Set tclr/tar timings */ - pcr &= ~FMC2_PCR_TCLR_MASK; - pcr |= FMC2_PCR_TCLR(timings->tclr); - pcr &= ~FMC2_PCR_TAR_MASK; - pcr |= FMC2_PCR_TAR(timings->tar); + pcr &= ~FMC2_PCR_TCLR; + pcr |= FIELD_PREP(FMC2_PCR_TCLR, timings->tclr); + pcr &= ~FMC2_PCR_TAR; + pcr |= FIELD_PREP(FMC2_PCR_TAR, timings->tar); /* Set tset/twait/thold/thiz timings in common bank */ - pmem = FMC2_PMEM_MEMSET(timings->tset_mem); - pmem |= FMC2_PMEM_MEMWAIT(timings->twait); - pmem |= FMC2_PMEM_MEMHOLD(timings->thold_mem); - pmem |= FMC2_PMEM_MEMHIZ(timings->thiz); + pmem = FIELD_PREP(FMC2_PMEM_MEMSET, timings->tset_mem); + pmem |= FIELD_PREP(FMC2_PMEM_MEMWAIT, timings->twait); + pmem |= FIELD_PREP(FMC2_PMEM_MEMHOLD, timings->thold_mem); + pmem |= FIELD_PREP(FMC2_PMEM_MEMHIZ, timings->thiz); /* Set tset/twait/thold/thiz timings in attribut bank */ - patt = FMC2_PATT_ATTSET(timings->tset_att); - patt |= FMC2_PATT_ATTWAIT(timings->twait); - patt |= FMC2_PATT_ATTHOLD(timings->thold_att); - patt |= FMC2_PATT_ATTHIZ(timings->thiz); + patt = FIELD_PREP(FMC2_PATT_ATTSET, timings->tset_att); + patt |= FIELD_PREP(FMC2_PATT_ATTWAIT, timings->twait); + patt |= FIELD_PREP(FMC2_PATT_ATTHOLD, timings->thold_att); + patt |= FIELD_PREP(FMC2_PATT_ATTHIZ, timings->thiz); writel_relaxed(pcr, nfc->io_base + FMC2_PCR); writel_relaxed(pmem, nfc->io_base + FMC2_PMEM); @@ -327,13 +319,13 @@ static void stm32_fmc2_nfc_setup(struct nand_chip *chip) } /* Set buswidth */ - pcr &= ~FMC2_PCR_PWID_MASK; + pcr &= ~FMC2_PCR_PWID; if (chip->options & NAND_BUSWIDTH_16) - pcr |= FMC2_PCR_PWID(FMC2_PCR_PWID_BUSWIDTH_16); + pcr |= FIELD_PREP(FMC2_PCR_PWID, FMC2_PCR_PWID_BUSWIDTH_16); /* Set ECC sector size */ - pcr &= ~FMC2_PCR_ECCSS_MASK; - pcr |= FMC2_PCR_ECCSS(FMC2_PCR_ECCSS_512); + pcr &= ~FMC2_PCR_ECCSS; + pcr |= FIELD_PREP(FMC2_PCR_ECCSS, FMC2_PCR_ECCSS_512); writel_relaxed(pcr, nfc->io_base + FMC2_PCR); } @@ -403,9 +395,9 @@ static void stm32_fmc2_nfc_set_buswidth_16(struct stm32_fmc2_nfc *nfc, bool set) { u32 pcr = readl_relaxed(nfc->io_base + FMC2_PCR); - pcr &= ~FMC2_PCR_PWID_MASK; + pcr &= ~FMC2_PCR_PWID; if (set) - pcr |= FMC2_PCR_PWID(FMC2_PCR_PWID_BUSWIDTH_16); + pcr |= FIELD_PREP(FMC2_PCR_PWID, FMC2_PCR_PWID_BUSWIDTH_16); writel_relaxed(pcr, nfc->io_base + FMC2_PCR); } @@ -659,16 +651,16 @@ static int stm32_fmc2_nfc_bch_decode(int eccsize, u8 *dat, u32 *ecc_sta) if (unlikely(bchdsr0 & FMC2_BCHDSR0_DUE)) return -EBADMSG; - pos[0] = bchdsr1 & FMC2_BCHDSR1_EBP1_MASK; - pos[1] = (bchdsr1 & FMC2_BCHDSR1_EBP2_MASK) >> FMC2_BCHDSR1_EBP2_SHIFT; - pos[2] = bchdsr2 & FMC2_BCHDSR2_EBP3_MASK; - pos[3] = (bchdsr2 & FMC2_BCHDSR2_EBP4_MASK) >> FMC2_BCHDSR2_EBP4_SHIFT; - pos[4] = bchdsr3 & FMC2_BCHDSR3_EBP5_MASK; - pos[5] = (bchdsr3 & FMC2_BCHDSR3_EBP6_MASK) >> FMC2_BCHDSR3_EBP6_SHIFT; - pos[6] = bchdsr4 & FMC2_BCHDSR4_EBP7_MASK; - pos[7] = (bchdsr4 & FMC2_BCHDSR4_EBP8_MASK) >> FMC2_BCHDSR4_EBP8_SHIFT; + pos[0] = FIELD_GET(FMC2_BCHDSR1_EBP1, bchdsr1); + pos[1] = FIELD_GET(FMC2_BCHDSR1_EBP2, bchdsr1); + pos[2] = FIELD_GET(FMC2_BCHDSR2_EBP3, bchdsr2); + pos[3] = FIELD_GET(FMC2_BCHDSR2_EBP4, bchdsr2); + pos[4] = FIELD_GET(FMC2_BCHDSR3_EBP5, bchdsr3); + pos[5] = FIELD_GET(FMC2_BCHDSR3_EBP6, bchdsr3); + pos[6] = FIELD_GET(FMC2_BCHDSR4_EBP7, bchdsr4); + pos[7] = FIELD_GET(FMC2_BCHDSR4_EBP8, bchdsr4); - den = (bchdsr0 & FMC2_BCHDSR0_DEN_MASK) >> FMC2_BCHDSR0_DEN_SHIFT; + den = FIELD_GET(FMC2_BCHDSR0_DEN, bchdsr0); for (i = 0; i < den; i++) { if (pos[i] < eccsize * 8) { change_bit(pos[i], (unsigned long *)dat); @@ -790,11 +782,11 @@ static void stm32_fmc2_nfc_rw_page_init(struct nand_chip *chip, int page, */ csqcfgr1 = FMC2_CSQCFGR1_DMADEN | FMC2_CSQCFGR1_CMD1T; if (write_data) - csqcfgr1 |= FMC2_CSQCFGR1_CMD1(NAND_CMD_SEQIN); + csqcfgr1 |= FIELD_PREP(FMC2_CSQCFGR1_CMD1, NAND_CMD_SEQIN); else - csqcfgr1 |= FMC2_CSQCFGR1_CMD1(NAND_CMD_READ0) | + csqcfgr1 |= FIELD_PREP(FMC2_CSQCFGR1_CMD1, NAND_CMD_READ0) | FMC2_CSQCFGR1_CMD2EN | - FMC2_CSQCFGR1_CMD2(NAND_CMD_READSTART) | + FIELD_PREP(FMC2_CSQCFGR1_CMD2, NAND_CMD_READSTART) | FMC2_CSQCFGR1_CMD2T; /* @@ -804,11 +796,12 @@ static void stm32_fmc2_nfc_rw_page_init(struct nand_chip *chip, int page, * - Set timings */ if (write_data) - csqcfgr2 = FMC2_CSQCFGR2_RCMD1(NAND_CMD_RNDIN); + csqcfgr2 = FIELD_PREP(FMC2_CSQCFGR2_RCMD1, NAND_CMD_RNDIN); else - csqcfgr2 = FMC2_CSQCFGR2_RCMD1(NAND_CMD_RNDOUT) | + csqcfgr2 = FIELD_PREP(FMC2_CSQCFGR2_RCMD1, NAND_CMD_RNDOUT) | FMC2_CSQCFGR2_RCMD2EN | - FMC2_CSQCFGR2_RCMD2(NAND_CMD_RNDOUTSTART) | + FIELD_PREP(FMC2_CSQCFGR2_RCMD2, + NAND_CMD_RNDOUTSTART) | FMC2_CSQCFGR2_RCMD1T | FMC2_CSQCFGR2_RCMD2T; if (!raw) { @@ -820,7 +813,7 @@ static void stm32_fmc2_nfc_rw_page_init(struct nand_chip *chip, int page, * - Set the number of sectors to be written * - Set timings */ - csqcfgr3 = FMC2_CSQCFGR3_SNBR(chip->ecc.steps - 1); + csqcfgr3 = FIELD_PREP(FMC2_CSQCFGR3_SNBR, chip->ecc.steps - 1); if (write_data) { csqcfgr3 |= FMC2_CSQCFGR3_RAC2T; if (chip->options & NAND_ROW_ADDR_3) @@ -834,8 +827,8 @@ static void stm32_fmc2_nfc_rw_page_init(struct nand_chip *chip, int page, * Byte 1 and byte 2 => column, we start at 0x0 * Byte 3 and byte 4 => page */ - csqar1 = FMC2_CSQCAR1_ADDC3(page); - csqar1 |= FMC2_CSQCAR1_ADDC4(page >> 8); + csqar1 = FIELD_PREP(FMC2_CSQCAR1_ADDC3, page); + csqar1 |= FIELD_PREP(FMC2_CSQCAR1_ADDC4, page >> 8); /* * - Set chip enable number @@ -843,16 +836,16 @@ static void stm32_fmc2_nfc_rw_page_init(struct nand_chip *chip, int page, * - Calculate the number of address cycles to be issued * - Set byte 5 of address cycle if needed */ - csqar2 = FMC2_CSQCAR2_NANDCEN(nfc->cs_sel); + csqar2 = FIELD_PREP(FMC2_CSQCAR2_NANDCEN, nfc->cs_sel); if (chip->options & NAND_BUSWIDTH_16) - csqar2 |= FMC2_CSQCAR2_SAO(ecc_offset >> 1); + csqar2 |= FIELD_PREP(FMC2_CSQCAR2_SAO, ecc_offset >> 1); else - csqar2 |= FMC2_CSQCAR2_SAO(ecc_offset); + csqar2 |= FIELD_PREP(FMC2_CSQCAR2_SAO, ecc_offset); if (chip->options & NAND_ROW_ADDR_3) { - csqcfgr1 |= FMC2_CSQCFGR1_ACYNBR(5); - csqar2 |= FMC2_CSQCAR2_ADDC5(page >> 16); + csqcfgr1 |= FIELD_PREP(FMC2_CSQCFGR1_ACYNBR, 5); + csqar2 |= FIELD_PREP(FMC2_CSQCAR2_ADDC5, page >> 16); } else { - csqcfgr1 |= FMC2_CSQCFGR1_ACYNBR(4); + csqcfgr1 |= FIELD_PREP(FMC2_CSQCFGR1_ACYNBR, 4); } writel_relaxed(csqcfgr1, nfc->io_base + FMC2_CSQCFGR1); @@ -1393,7 +1386,7 @@ static void stm32_fmc2_nfc_init(struct stm32_fmc2_nfc *nfc) pcr |= FMC2_PCR_PBKEN; /* Set buswidth to 8 bits mode for identification */ - pcr &= ~FMC2_PCR_PWID_MASK; + pcr &= ~FMC2_PCR_PWID; /* ECC logic is disabled */ pcr &= ~FMC2_PCR_ECCEN; @@ -1404,14 +1397,14 @@ static void stm32_fmc2_nfc_init(struct stm32_fmc2_nfc *nfc) pcr &= ~FMC2_PCR_WEN; /* Set default ECC sector size */ - pcr &= ~FMC2_PCR_ECCSS_MASK; - pcr |= FMC2_PCR_ECCSS(FMC2_PCR_ECCSS_2048); + pcr &= ~FMC2_PCR_ECCSS; + pcr |= FIELD_PREP(FMC2_PCR_ECCSS, FMC2_PCR_ECCSS_2048); /* Set default tclr/tar timings */ - pcr &= ~FMC2_PCR_TCLR_MASK; - pcr |= FMC2_PCR_TCLR(FMC2_PCR_TCLR_DEFAULT); - pcr &= ~FMC2_PCR_TAR_MASK; - pcr |= FMC2_PCR_TAR(FMC2_PCR_TAR_DEFAULT); + pcr &= ~FMC2_PCR_TCLR; + pcr |= FIELD_PREP(FMC2_PCR_TCLR, FMC2_PCR_TCLR_DEFAULT); + pcr &= ~FMC2_PCR_TAR; + pcr |= FIELD_PREP(FMC2_PCR_TAR, FMC2_PCR_TAR_DEFAULT); /* Enable FMC2 controller */ bcr1 |= FMC2_BCR1_FMC2EN; -- GitLab From 1f1ec622623fe8e49a1036d4127b895a0f0e2c43 Mon Sep 17 00:00:00 2001 From: Boris Brezillon Date: Tue, 5 May 2020 12:13:35 +0200 Subject: [PATCH 0653/1971] mtd: rawnand: Propage CS selection to sub operations Some controller using the instruction parse infrastructure might need to know which CS a specific sub-operation is targeting. Let's propagate this information. Signed-off-by: Boris Brezillon Reviewed-by: Miquel Raynal Signed-off-by: Miquel Raynal Link: https://lore.kernel.org/linux-mtd/20200505101353.1776394-2-boris.brezillon@collabora.com --- drivers/mtd/nand/raw/nand_base.c | 3 ++- include/linux/mtd/rawnand.h | 2 ++ 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/drivers/mtd/nand/raw/nand_base.c b/drivers/mtd/nand/raw/nand_base.c index 2d2a216af120f..c4d6d5482b318 100644 --- a/drivers/mtd/nand/raw/nand_base.c +++ b/drivers/mtd/nand/raw/nand_base.c @@ -2174,7 +2174,7 @@ static void nand_op_parser_trace(const struct nand_op_parser_ctx *ctx) char *prefix = " "; unsigned int i; - pr_debug("executing subop:\n"); + pr_debug("executing subop (CS%d):\n", ctx->subop.cs); for (i = 0; i < ctx->ninstrs; i++) { instr = &ctx->instrs[i]; @@ -2238,6 +2238,7 @@ int nand_op_parser_exec_op(struct nand_chip *chip, const struct nand_operation *op, bool check_only) { struct nand_op_parser_ctx ctx = { + .subop.cs = op->cs, .subop.instrs = op->instrs, .instrs = op->instrs, .ninstrs = op->ninstrs, diff --git a/include/linux/mtd/rawnand.h b/include/linux/mtd/rawnand.h index 0f45b6984ad19..a3bac8824937c 100644 --- a/include/linux/mtd/rawnand.h +++ b/include/linux/mtd/rawnand.h @@ -709,6 +709,7 @@ struct nand_op_instr { /** * struct nand_subop - a sub operation + * @cs: the CS line to select for this NAND sub-operation * @instrs: array of instructions * @ninstrs: length of the @instrs array * @first_instr_start_off: offset to start from for the first instruction @@ -724,6 +725,7 @@ struct nand_op_instr { * controller driver. */ struct nand_subop { + unsigned int cs; const struct nand_op_instr *instrs; unsigned int ninstrs; unsigned int first_instr_start_off; -- GitLab From 0584d025829c5e3be98f41e7b930cd5154b319c5 Mon Sep 17 00:00:00 2001 From: Miquel Raynal Date: Sun, 10 May 2020 23:18:08 +0200 Subject: [PATCH 0654/1971] MAINTAINERS: Remove Piotr Sroka and mark Cadence NFC as orphaned Piotr's address is bouncing, remove him from MAINTAINERS and mark the driver he was maintaining, Cadence's, as orphaned. Signed-off-by: Miquel Raynal Link: https://lore.kernel.org/linux-mtd/20200510211809.15610-1-miquel.raynal@bootlin.com --- MAINTAINERS | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/MAINTAINERS b/MAINTAINERS index 26f281d9f32a4..937785189fc86 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -3741,9 +3741,8 @@ F: Documentation/devicetree/bindings/media/cdns,*.txt F: drivers/media/platform/cadence/cdns-csi2* CADENCE NAND DRIVER -M: Piotr Sroka L: linux-mtd@lists.infradead.org -S: Maintained +S: Orphan F: Documentation/devicetree/bindings/mtd/cadence-nand-controller.txt F: drivers/mtd/nand/raw/cadence-nand-controller.c -- GitLab From 2d1b77281f36aa8adce00260918058a4440c889c Mon Sep 17 00:00:00 2001 From: Miquel Raynal Date: Sun, 10 May 2020 23:18:09 +0200 Subject: [PATCH 0655/1971] MAINTAINERS: Remove Xiaolei Li and mark MTK NFC as orphaned Xiaolei's address is bouncing, remove him from MAINTAINERS and mark the driver he was maintaining, Mediatek's, as orphaned. Signed-off-by: Miquel Raynal Link: https://lore.kernel.org/linux-mtd/20200510211809.15610-2-miquel.raynal@bootlin.com --- MAINTAINERS | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/MAINTAINERS b/MAINTAINERS index 937785189fc86..8e4daa03662c8 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -10722,9 +10722,8 @@ F: Documentation/devicetree/bindings/i2c/i2c-mt7621.txt F: drivers/i2c/busses/i2c-mt7621.c MEDIATEK NAND CONTROLLER DRIVER -M: Xiaolei Li L: linux-mtd@lists.infradead.org -S: Maintained +S: Orphan F: Documentation/devicetree/bindings/mtd/mtk-nand.txt F: drivers/mtd/nand/raw/mtk_* -- GitLab From 130bbde4809b011faf64f99dddc14b4b01f440c3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C3=81lvaro=20Fern=C3=A1ndez=20Rojas?= Date: Tue, 12 May 2020 09:57:32 +0200 Subject: [PATCH 0656/1971] mtd: rawnand: brcmnand: fix hamming oob layout MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit First 2 bytes are used in large-page nand. Fixes: ef5eeea6e911 ("mtd: nand: brcm: switch to mtd_ooblayout_ops") Cc: stable@vger.kernel.org Signed-off-by: Álvaro Fernández Rojas Signed-off-by: Miquel Raynal Link: https://lore.kernel.org/linux-mtd/20200512075733.745374-2-noltari@gmail.com --- drivers/mtd/nand/raw/brcmnand/brcmnand.c | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/drivers/mtd/nand/raw/brcmnand/brcmnand.c b/drivers/mtd/nand/raw/brcmnand/brcmnand.c index fe7cd3aa0cd6c..cfd22023cfcb6 100644 --- a/drivers/mtd/nand/raw/brcmnand/brcmnand.c +++ b/drivers/mtd/nand/raw/brcmnand/brcmnand.c @@ -1115,11 +1115,14 @@ static int brcmnand_hamming_ooblayout_free(struct mtd_info *mtd, int section, if (!section) { /* * Small-page NAND use byte 6 for BBI while large-page - * NAND use byte 0. + * NAND use bytes 0 and 1. */ - if (cfg->page_size > 512) - oobregion->offset++; - oobregion->length--; + if (cfg->page_size > 512) { + oobregion->offset += 2; + oobregion->length -= 2; + } else { + oobregion->length--; + } } } -- GitLab From d00358d7a1c50718232799e1ee10955bcd73795a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C3=81lvaro=20Fern=C3=A1ndez=20Rojas?= Date: Tue, 12 May 2020 09:57:33 +0200 Subject: [PATCH 0657/1971] mtd: rawnand: brcmnand: improve hamming oob layout MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The current code generates 8 oob sections: S1 1-5 ECC 6-8 S2 9-15 S3 16-21 ECC 22-24 S4 25-31 S5 32-37 ECC 38-40 S6 41-47 S7 48-53 ECC 54-56 S8 57-63 Change it by merging continuous sections: S1 1-5 ECC 6-8 S2 9-21 ECC 22-24 S3 25-37 ECC 38-40 S4 41-53 ECC 54-56 S5 57-63 Signed-off-by: Álvaro Fernández Rojas Signed-off-by: Miquel Raynal Link: https://lore.kernel.org/linux-mtd/20200512075733.745374-3-noltari@gmail.com --- drivers/mtd/nand/raw/brcmnand/brcmnand.c | 35 +++++++++++------------- 1 file changed, 16 insertions(+), 19 deletions(-) diff --git a/drivers/mtd/nand/raw/brcmnand/brcmnand.c b/drivers/mtd/nand/raw/brcmnand/brcmnand.c index cfd22023cfcb6..ede7cb95fe715 100644 --- a/drivers/mtd/nand/raw/brcmnand/brcmnand.c +++ b/drivers/mtd/nand/raw/brcmnand/brcmnand.c @@ -1099,33 +1099,30 @@ static int brcmnand_hamming_ooblayout_free(struct mtd_info *mtd, int section, struct brcmnand_cfg *cfg = &host->hwcfg; int sas = cfg->spare_area_size << cfg->sector_size_1k; int sectors = cfg->page_size / (512 << cfg->sector_size_1k); + u32 next; - if (section >= sectors * 2) + if (section > sectors) return -ERANGE; - oobregion->offset = (section / 2) * sas; + next = (section * sas); + if (section < sectors) + next += 6; - if (section & 1) { - oobregion->offset += 9; - oobregion->length = 7; + if (section) { + oobregion->offset = ((section - 1) * sas) + 9; } else { - oobregion->length = 6; - - /* First sector of each page may have BBI */ - if (!section) { - /* - * Small-page NAND use byte 6 for BBI while large-page - * NAND use bytes 0 and 1. - */ - if (cfg->page_size > 512) { - oobregion->offset += 2; - oobregion->length -= 2; - } else { - oobregion->length--; - } + if (cfg->page_size > 512) { + /* Large page NAND uses first 2 bytes for BBI */ + oobregion->offset = 2; + } else { + /* Small page NAND uses last byte before ECC for BBI */ + oobregion->offset = 0; + next--; } } + oobregion->length = next - oobregion->offset; + return 0; } -- GitLab From dcb351c03f2fa6a599de1061b174167e03ee312b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C3=81lvaro=20Fern=C3=A1ndez=20Rojas?= Date: Tue, 12 May 2020 10:24:51 +0200 Subject: [PATCH 0658/1971] mtd: rawnand: brcmnand: correctly verify erased pages MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The current code checks that the whole OOB area is erased. This is a problem when JFFS2 cleanmarkers are added to the OOB, since it will fail due to the usable OOB bytes not being 0xff. Correct this by only checking that data and ECC bytes aren't 0xff. Fixes: 02b88eea9f9c ("mtd: brcmnand: Add check for erased page bitflips") Signed-off-by: Álvaro Fernández Rojas Signed-off-by: Miquel Raynal Link: https://lore.kernel.org/linux-mtd/20200512082451.771212-1-noltari@gmail.com --- drivers/mtd/nand/raw/brcmnand/brcmnand.c | 19 +++++++++++-------- 1 file changed, 11 insertions(+), 8 deletions(-) diff --git a/drivers/mtd/nand/raw/brcmnand/brcmnand.c b/drivers/mtd/nand/raw/brcmnand/brcmnand.c index ede7cb95fe715..30a9a8dbcc050 100644 --- a/drivers/mtd/nand/raw/brcmnand/brcmnand.c +++ b/drivers/mtd/nand/raw/brcmnand/brcmnand.c @@ -2017,28 +2017,31 @@ static int brcmnand_read_by_pio(struct mtd_info *mtd, struct nand_chip *chip, static int brcmstb_nand_verify_erased_page(struct mtd_info *mtd, struct nand_chip *chip, void *buf, u64 addr) { - int i, sas; - void *oob = chip->oob_poi; + struct mtd_oob_region ecc; + int i; int bitflips = 0; int page = addr >> chip->page_shift; int ret; + void *ecc_bytes; void *ecc_chunk; if (!buf) buf = nand_get_data_buf(chip); - sas = mtd->oobsize / chip->ecc.steps; - /* read without ecc for verification */ ret = chip->ecc.read_page_raw(chip, buf, true, page); if (ret) return ret; - for (i = 0; i < chip->ecc.steps; i++, oob += sas) { + for (i = 0; i < chip->ecc.steps; i++) { ecc_chunk = buf + chip->ecc.size * i; - ret = nand_check_erased_ecc_chunk(ecc_chunk, - chip->ecc.size, - oob, sas, NULL, 0, + + mtd_ooblayout_ecc(mtd, i, &ecc); + ecc_bytes = chip->oob_poi + ecc.offset; + + ret = nand_check_erased_ecc_chunk(ecc_chunk, chip->ecc.size, + ecc_bytes, ecc.length, + NULL, 0, chip->ecc.strength); if (ret < 0) return ret; -- GitLab From 3626fdcf0904c9c6bb1cc2b4446bc016a8af1435 Mon Sep 17 00:00:00 2001 From: Boris Brezillon Date: Wed, 13 May 2020 19:22:45 +0200 Subject: [PATCH 0659/1971] mtd: rawnand: davinci: Inherit from nand_controller Let's not rely on the dummy_controller embedded in nand_chip.legacy and explicitly inherit from nand_controller instead. Signed-off-by: Boris Brezillon Reviewed-by: Miquel Raynal Tested-by: Bartosz Golaszewski Signed-off-by: Miquel Raynal Link: https://lore.kernel.org/linux-mtd/20200513172248.141402-1-boris.brezillon@collabora.com --- drivers/mtd/nand/raw/davinci_nand.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/drivers/mtd/nand/raw/davinci_nand.c b/drivers/mtd/nand/raw/davinci_nand.c index 25c185bea50cc..0312c632d86a9 100644 --- a/drivers/mtd/nand/raw/davinci_nand.c +++ b/drivers/mtd/nand/raw/davinci_nand.c @@ -38,6 +38,7 @@ * outputs in a "wire-AND" configuration, with no per-chip signals. */ struct davinci_nand_info { + struct nand_controller controller; struct nand_chip chip; struct platform_device *pdev; @@ -788,7 +789,9 @@ static int nand_davinci_probe(struct platform_device *pdev) spin_unlock_irq(&davinci_nand_lock); /* Scan to find existence of the device(s) */ - info->chip.legacy.dummy_controller.ops = &davinci_nand_controller_ops; + nand_controller_init(&info->controller); + info->controller.ops = &davinci_nand_controller_ops; + info->chip.controller = &info->controller; ret = nand_scan(&info->chip, pdata->mask_chipsel ? 2 : 1); if (ret < 0) { dev_dbg(&pdev->dev, "no NAND chip(s) found\n"); -- GitLab From 4f426e6e070fb29974f97a15876aa09501e56e09 Mon Sep 17 00:00:00 2001 From: Boris Brezillon Date: Wed, 13 May 2020 19:22:46 +0200 Subject: [PATCH 0660/1971] mtd: rawnand: davinci: Stop using nand_chip.legacy.IO_ADDR_{R, W} We can use info->current_cs directly instead of doing this weird IO_ADDR_{R,W} re-assignment dance. Signed-off-by: Boris Brezillon Reviewed-by: Miquel Raynal Tested-by: Bartosz Golaszewski Signed-off-by: Miquel Raynal Link: https://lore.kernel.org/linux-mtd/20200513172248.141402-2-boris.brezillon@collabora.com --- drivers/mtd/nand/raw/davinci_nand.c | 36 ++++++++++++----------------- 1 file changed, 15 insertions(+), 21 deletions(-) diff --git a/drivers/mtd/nand/raw/davinci_nand.c b/drivers/mtd/nand/raw/davinci_nand.c index 0312c632d86a9..779f708c791f6 100644 --- a/drivers/mtd/nand/raw/davinci_nand.c +++ b/drivers/mtd/nand/raw/davinci_nand.c @@ -91,18 +91,13 @@ static void nand_davinci_hwcontrol(struct nand_chip *nand, int cmd, struct davinci_nand_info *info = to_davinci_nand(nand_to_mtd(nand)); void __iomem *addr = info->current_cs; - /* Did the control lines change? */ - if (ctrl & NAND_CTRL_CHANGE) { - if ((ctrl & NAND_CTRL_CLE) == NAND_CTRL_CLE) - addr += info->mask_cle; - else if ((ctrl & NAND_CTRL_ALE) == NAND_CTRL_ALE) - addr += info->mask_ale; - - nand->legacy.IO_ADDR_W = addr; - } + if (ctrl & NAND_CTRL_CLE) + addr += info->mask_cle; + else if (ctrl & NAND_CTRL_ALE) + addr += info->mask_ale; if (cmd != NAND_CMD_NONE) - iowrite8(cmd, nand->legacy.IO_ADDR_W); + iowrite8(cmd, addr); } static void nand_davinci_select_chip(struct nand_chip *nand, int chip) @@ -114,9 +109,6 @@ static void nand_davinci_select_chip(struct nand_chip *nand, int chip) /* maybe kick in a second chipselect */ if (chip > 0) info->current_cs += info->mask_chipsel; - - info->chip.legacy.IO_ADDR_W = info->current_cs; - info->chip.legacy.IO_ADDR_R = info->chip.legacy.IO_ADDR_W; } /*----------------------------------------------------------------------*/ @@ -425,23 +417,27 @@ correct: static void nand_davinci_read_buf(struct nand_chip *chip, uint8_t *buf, int len) { + struct davinci_nand_info *info = to_davinci_nand(nand_to_mtd(chip)); + if ((0x03 & ((uintptr_t)buf)) == 0 && (0x03 & len) == 0) - ioread32_rep(chip->legacy.IO_ADDR_R, buf, len >> 2); + ioread32_rep(info->current_cs, buf, len >> 2); else if ((0x01 & ((uintptr_t)buf)) == 0 && (0x01 & len) == 0) - ioread16_rep(chip->legacy.IO_ADDR_R, buf, len >> 1); + ioread16_rep(info->current_cs, buf, len >> 1); else - ioread8_rep(chip->legacy.IO_ADDR_R, buf, len); + ioread8_rep(info->current_cs, buf, len); } static void nand_davinci_write_buf(struct nand_chip *chip, const uint8_t *buf, int len) { + struct davinci_nand_info *info = to_davinci_nand(nand_to_mtd(chip)); + if ((0x03 & ((uintptr_t)buf)) == 0 && (0x03 & len) == 0) - iowrite32_rep(chip->legacy.IO_ADDR_R, buf, len >> 2); + iowrite32_rep(info->current_cs, buf, len >> 2); else if ((0x01 & ((uintptr_t)buf)) == 0 && (0x01 & len) == 0) - iowrite16_rep(chip->legacy.IO_ADDR_R, buf, len >> 1); + iowrite16_rep(info->current_cs, buf, len >> 1); else - iowrite8_rep(chip->legacy.IO_ADDR_R, buf, len); + iowrite8_rep(info->current_cs, buf, len); } /* @@ -747,8 +743,6 @@ static int nand_davinci_probe(struct platform_device *pdev) mtd->dev.parent = &pdev->dev; nand_set_flash_node(&info->chip, pdev->dev.of_node); - info->chip.legacy.IO_ADDR_R = vaddr; - info->chip.legacy.IO_ADDR_W = vaddr; info->chip.legacy.chip_delay = 0; info->chip.legacy.select_chip = nand_davinci_select_chip; -- GitLab From 547aa7c262a4883ce45d1ea1e3acbff942041e45 Mon Sep 17 00:00:00 2001 From: Boris Brezillon Date: Wed, 13 May 2020 19:22:47 +0200 Subject: [PATCH 0661/1971] mtd: rawnand: davinci: Implement exec_op() Implement exec_op() so we can later get rid of the legacy interface implementation. Signed-off-by: Boris Brezillon Tested-by: Bartosz Golaszewski Signed-off-by: Miquel Raynal Link: https://lore.kernel.org/linux-mtd/20200513172248.141402-3-boris.brezillon@collabora.com --- drivers/mtd/nand/raw/davinci_nand.c | 102 +++++++++++++++++++++++++++- 1 file changed, 101 insertions(+), 1 deletion(-) diff --git a/drivers/mtd/nand/raw/davinci_nand.c b/drivers/mtd/nand/raw/davinci_nand.c index 779f708c791f6..0eeb30c7fc4e8 100644 --- a/drivers/mtd/nand/raw/davinci_nand.c +++ b/drivers/mtd/nand/raw/davinci_nand.c @@ -14,7 +14,7 @@ #include #include #include -#include +#include #include #include #include @@ -678,8 +678,108 @@ static int davinci_nand_attach_chip(struct nand_chip *chip) return ret; } +static void nand_davinci_data_in(struct davinci_nand_info *info, void *buf, + unsigned int len, bool force_8bit) +{ + u32 alignment = ((uintptr_t)buf | len) & 3; + + if (force_8bit || (alignment & 1)) + ioread8_rep(info->current_cs, buf, len); + else if (alignment & 3) + ioread16_rep(info->current_cs, buf, len >> 1); + else + ioread32_rep(info->current_cs, buf, len >> 2); +} + +static void nand_davinci_data_out(struct davinci_nand_info *info, + const void *buf, unsigned int len, + bool force_8bit) +{ + u32 alignment = ((uintptr_t)buf | len) & 3; + + if (force_8bit || (alignment & 1)) + iowrite8_rep(info->current_cs, buf, len); + else if (alignment & 3) + iowrite16_rep(info->current_cs, buf, len >> 1); + else + iowrite32_rep(info->current_cs, buf, len >> 2); +} + +static int davinci_nand_exec_instr(struct davinci_nand_info *info, + const struct nand_op_instr *instr) +{ + unsigned int i, timeout_us; + u32 status; + int ret; + + switch (instr->type) { + case NAND_OP_CMD_INSTR: + iowrite8(instr->ctx.cmd.opcode, + info->current_cs + info->mask_cle); + break; + + case NAND_OP_ADDR_INSTR: + for (i = 0; i < instr->ctx.addr.naddrs; i++) { + iowrite8(instr->ctx.addr.addrs[i], + info->current_cs + info->mask_ale); + } + break; + + case NAND_OP_DATA_IN_INSTR: + nand_davinci_data_in(info, instr->ctx.data.buf.in, + instr->ctx.data.len, + instr->ctx.data.force_8bit); + break; + + case NAND_OP_DATA_OUT_INSTR: + nand_davinci_data_out(info, instr->ctx.data.buf.out, + instr->ctx.data.len, + instr->ctx.data.force_8bit); + break; + + case NAND_OP_WAITRDY_INSTR: + timeout_us = instr->ctx.waitrdy.timeout_ms * 1000; + ret = readl_relaxed_poll_timeout(info->base + NANDFSR_OFFSET, + status, status & BIT(0), 100, + timeout_us); + if (ret) + return ret; + + break; + } + + if (instr->delay_ns) + ndelay(instr->delay_ns); + + return 0; +} + +static int davinci_nand_exec_op(struct nand_chip *chip, + const struct nand_operation *op, + bool check_only) +{ + struct davinci_nand_info *info = to_davinci_nand(nand_to_mtd(chip)); + unsigned int i; + + if (check_only) + return 0; + + info->current_cs = info->vaddr + (op->cs * info->mask_chipsel); + + for (i = 0; i < op->ninstrs; i++) { + int ret; + + ret = davinci_nand_exec_instr(info, &op->instrs[i]); + if (ret) + return ret; + } + + return 0; +} + static const struct nand_controller_ops davinci_nand_controller_ops = { .attach_chip = davinci_nand_attach_chip, + .exec_op = davinci_nand_exec_op, }; static int nand_davinci_probe(struct platform_device *pdev) -- GitLab From dbf15080ff2d0656304acb9595ecd8397f17b36d Mon Sep 17 00:00:00 2001 From: Boris Brezillon Date: Wed, 13 May 2020 19:22:48 +0200 Subject: [PATCH 0662/1971] mtd: rawnand: davinci: Get rid of the legacy interface implementation Now that exec_op() is implemented we can get rid of the legacy interface implementation. Signed-off-by: Boris Brezillon Reviewed-by: Miquel Raynal Tested-by: Bartosz Golaszewski Signed-off-by: Miquel Raynal Link: https://lore.kernel.org/linux-mtd/20200513172248.141402-4-boris.brezillon@collabora.com --- drivers/mtd/nand/raw/davinci_nand.c | 91 ----------------------------- 1 file changed, 91 deletions(-) diff --git a/drivers/mtd/nand/raw/davinci_nand.c b/drivers/mtd/nand/raw/davinci_nand.c index 0eeb30c7fc4e8..d8aa61a6928a5 100644 --- a/drivers/mtd/nand/raw/davinci_nand.c +++ b/drivers/mtd/nand/raw/davinci_nand.c @@ -81,38 +81,6 @@ static inline void davinci_nand_writel(struct davinci_nand_info *info, /*----------------------------------------------------------------------*/ -/* - * Access to hardware control lines: ALE, CLE, secondary chipselect. - */ - -static void nand_davinci_hwcontrol(struct nand_chip *nand, int cmd, - unsigned int ctrl) -{ - struct davinci_nand_info *info = to_davinci_nand(nand_to_mtd(nand)); - void __iomem *addr = info->current_cs; - - if (ctrl & NAND_CTRL_CLE) - addr += info->mask_cle; - else if (ctrl & NAND_CTRL_ALE) - addr += info->mask_ale; - - if (cmd != NAND_CMD_NONE) - iowrite8(cmd, addr); -} - -static void nand_davinci_select_chip(struct nand_chip *nand, int chip) -{ - struct davinci_nand_info *info = to_davinci_nand(nand_to_mtd(nand)); - - info->current_cs = info->vaddr; - - /* maybe kick in a second chipselect */ - if (chip > 0) - info->current_cs += info->mask_chipsel; -} - -/*----------------------------------------------------------------------*/ - /* * 1-bit hardware ECC ... context maintained for each core chipselect */ @@ -405,54 +373,6 @@ correct: /*----------------------------------------------------------------------*/ -/* - * NOTE: NAND boot requires ALE == EM_A[1], CLE == EM_A[2], so that's - * how these chips are normally wired. This translates to both 8 and 16 - * bit busses using ALE == BIT(3) in byte addresses, and CLE == BIT(4). - * - * For now we assume that configuration, or any other one which ignores - * the two LSBs for NAND access ... so we can issue 32-bit reads/writes - * and have that transparently morphed into multiple NAND operations. - */ -static void nand_davinci_read_buf(struct nand_chip *chip, uint8_t *buf, - int len) -{ - struct davinci_nand_info *info = to_davinci_nand(nand_to_mtd(chip)); - - if ((0x03 & ((uintptr_t)buf)) == 0 && (0x03 & len) == 0) - ioread32_rep(info->current_cs, buf, len >> 2); - else if ((0x01 & ((uintptr_t)buf)) == 0 && (0x01 & len) == 0) - ioread16_rep(info->current_cs, buf, len >> 1); - else - ioread8_rep(info->current_cs, buf, len); -} - -static void nand_davinci_write_buf(struct nand_chip *chip, const uint8_t *buf, - int len) -{ - struct davinci_nand_info *info = to_davinci_nand(nand_to_mtd(chip)); - - if ((0x03 & ((uintptr_t)buf)) == 0 && (0x03 & len) == 0) - iowrite32_rep(info->current_cs, buf, len >> 2); - else if ((0x01 & ((uintptr_t)buf)) == 0 && (0x01 & len) == 0) - iowrite16_rep(info->current_cs, buf, len >> 1); - else - iowrite8_rep(info->current_cs, buf, len); -} - -/* - * Check hardware register for wait status. Returns 1 if device is ready, - * 0 if it is still busy. - */ -static int nand_davinci_dev_ready(struct nand_chip *chip) -{ - struct davinci_nand_info *info = to_davinci_nand(nand_to_mtd(chip)); - - return davinci_nand_readl(info, NANDFSR_OFFSET) & BIT(0); -} - -/*----------------------------------------------------------------------*/ - /* An ECC layout for using 4-bit ECC with small-page flash, storing * ten ECC bytes plus the manufacturer's bad block marker byte, and * and not overlapping the default BBT markers. @@ -843,9 +763,6 @@ static int nand_davinci_probe(struct platform_device *pdev) mtd->dev.parent = &pdev->dev; nand_set_flash_node(&info->chip, pdev->dev.of_node); - info->chip.legacy.chip_delay = 0; - info->chip.legacy.select_chip = nand_davinci_select_chip; - /* options such as NAND_BBT_USE_FLASH */ info->chip.bbt_options = pdata->bbt_options; /* options such as 16-bit widths */ @@ -862,14 +779,6 @@ static int nand_davinci_probe(struct platform_device *pdev) info->mask_ale = pdata->mask_ale ? : MASK_ALE; info->mask_cle = pdata->mask_cle ? : MASK_CLE; - /* Set address of hardware control function */ - info->chip.legacy.cmd_ctrl = nand_davinci_hwcontrol; - info->chip.legacy.dev_ready = nand_davinci_dev_ready; - - /* Speed up buffer I/O */ - info->chip.legacy.read_buf = nand_davinci_read_buf; - info->chip.legacy.write_buf = nand_davinci_write_buf; - /* Use board-specific ECC config */ info->chip.ecc.mode = pdata->ecc_mode; -- GitLab From e45a4b652dbd2f8b5a3b8e97e89f602a58cb28aa Mon Sep 17 00:00:00 2001 From: Boris Brezillon Date: Mon, 18 May 2020 17:52:37 +0200 Subject: [PATCH 0663/1971] mtd: rawnand: Fix nand_gpio_waitrdy() Mimic what's done in nand_soft_waitrdy() and add one to the jiffies timeout so we don't end up waiting less than actually required. Reported-by: Tudor Ambarus Fixes: b0e137ad24b6c ("mtd: rawnand: Provide helper for polling GPIO R/B pin") Cc: Signed-off-by: Boris Brezillon Reviewed-by: Tudor Ambarus Signed-off-by: Miquel Raynal Link: https://lore.kernel.org/linux-mtd/20200518155237.297549-1-boris.brezillon@collabora.com --- drivers/mtd/nand/raw/nand_base.c | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/drivers/mtd/nand/raw/nand_base.c b/drivers/mtd/nand/raw/nand_base.c index c4d6d5482b318..79e8052bdff91 100644 --- a/drivers/mtd/nand/raw/nand_base.c +++ b/drivers/mtd/nand/raw/nand_base.c @@ -790,8 +790,14 @@ EXPORT_SYMBOL_GPL(nand_soft_waitrdy); int nand_gpio_waitrdy(struct nand_chip *chip, struct gpio_desc *gpiod, unsigned long timeout_ms) { - /* Wait until R/B pin indicates chip is ready or timeout occurs */ - timeout_ms = jiffies + msecs_to_jiffies(timeout_ms); + + /* + * Wait until R/B pin indicates chip is ready or timeout occurs. + * +1 below is necessary because if we are now in the last fraction + * of jiffy and msecs_to_jiffies is 1 then we will wait only that + * small jiffy fraction - possibly leading to false timeout. + */ + timeout_ms = jiffies + msecs_to_jiffies(timeout_ms) + 1; do { if (gpiod_get_value_cansleep(gpiod)) return 0; -- GitLab From 767727b927aa48e429c6fa92d4b55cc235fffe95 Mon Sep 17 00:00:00 2001 From: Boris Brezillon Date: Mon, 18 May 2020 18:33:00 +0200 Subject: [PATCH 0664/1971] mtd: rawnand: Remove the cmx270 NAND controller driver The CM-X270 board has been removed, we can remove the custom NAND driver as well. Signed-off-by: Boris Brezillon Acked-by: Robert Jarzmik Signed-off-by: Miquel Raynal Link: https://lore.kernel.org/linux-mtd/20200518163300.304732-1-boris.brezillon@collabora.com --- drivers/mtd/nand/raw/Kconfig | 4 - drivers/mtd/nand/raw/Makefile | 1 - drivers/mtd/nand/raw/cmx270_nand.c | 236 ----------------------------- 3 files changed, 241 deletions(-) delete mode 100644 drivers/mtd/nand/raw/cmx270_nand.c diff --git a/drivers/mtd/nand/raw/Kconfig b/drivers/mtd/nand/raw/Kconfig index a80a46bb5b8bc..9b08a58ae88e6 100644 --- a/drivers/mtd/nand/raw/Kconfig +++ b/drivers/mtd/nand/raw/Kconfig @@ -213,10 +213,6 @@ config MTD_NAND_MLC_LPC32XX Please check the actual NAND chip connected and its support by the MLC NAND controller. -config MTD_NAND_CM_X270 - tristate "CM-X270 modules NAND controller" - depends on MACH_ARMCORE - config MTD_NAND_PASEMI tristate "PA Semi PWRficient NAND controller" depends on PPC_PASEMI diff --git a/drivers/mtd/nand/raw/Makefile b/drivers/mtd/nand/raw/Makefile index 2d136b158fb7e..a817052286c7e 100644 --- a/drivers/mtd/nand/raw/Makefile +++ b/drivers/mtd/nand/raw/Makefile @@ -25,7 +25,6 @@ obj-$(CONFIG_MTD_NAND_GPIO) += gpio.o omap2_nand-objs := omap2.o obj-$(CONFIG_MTD_NAND_OMAP2) += omap2_nand.o obj-$(CONFIG_MTD_NAND_OMAP_BCH_BUILD) += omap_elm.o -obj-$(CONFIG_MTD_NAND_CM_X270) += cmx270_nand.o obj-$(CONFIG_MTD_NAND_MARVELL) += marvell_nand.o obj-$(CONFIG_MTD_NAND_TMIO) += tmio_nand.o obj-$(CONFIG_MTD_NAND_PLATFORM) += plat_nand.o diff --git a/drivers/mtd/nand/raw/cmx270_nand.c b/drivers/mtd/nand/raw/cmx270_nand.c deleted file mode 100644 index 045b6175ae79c..0000000000000 --- a/drivers/mtd/nand/raw/cmx270_nand.c +++ /dev/null @@ -1,236 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * Copyright (C) 2006 Compulab, Ltd. - * Mike Rapoport - * - * Derived from drivers/mtd/nand/h1910.c (removed in v3.10) - * Copyright (C) 2002 Marius Gröger (mag@sysgo.de) - * Copyright (c) 2001 Thomas Gleixner (gleixner@autronix.de) - * - * Overview: - * This is a device driver for the NAND flash device found on the - * CM-X270 board. - */ - -#include -#include -#include -#include -#include - -#include -#include -#include - -#include - -#define GPIO_NAND_CS (11) -#define GPIO_NAND_RB (89) - -/* MTD structure for CM-X270 board */ -static struct mtd_info *cmx270_nand_mtd; - -/* remaped IO address of the device */ -static void __iomem *cmx270_nand_io; - -/* - * Define static partitions for flash device - */ -static const struct mtd_partition partition_info[] = { - [0] = { - .name = "cmx270-0", - .offset = 0, - .size = MTDPART_SIZ_FULL - } -}; -#define NUM_PARTITIONS (ARRAY_SIZE(partition_info)) - -static u_char cmx270_read_byte(struct nand_chip *this) -{ - return (readl(this->legacy.IO_ADDR_R) >> 16); -} - -static void cmx270_write_buf(struct nand_chip *this, const u_char *buf, - int len) -{ - int i; - - for (i=0; ilegacy.IO_ADDR_W); -} - -static void cmx270_read_buf(struct nand_chip *this, u_char *buf, int len) -{ - int i; - - for (i=0; ilegacy.IO_ADDR_R) >> 16; -} - -static inline void nand_cs_on(void) -{ - gpio_set_value(GPIO_NAND_CS, 0); -} - -static void nand_cs_off(void) -{ - dsb(); - - gpio_set_value(GPIO_NAND_CS, 1); -} - -/* - * hardware specific access to control-lines - */ -static void cmx270_hwcontrol(struct nand_chip *this, int dat, - unsigned int ctrl) -{ - unsigned int nandaddr = (unsigned int)this->legacy.IO_ADDR_W; - - dsb(); - - if (ctrl & NAND_CTRL_CHANGE) { - if ( ctrl & NAND_ALE ) - nandaddr |= (1 << 3); - else - nandaddr &= ~(1 << 3); - if ( ctrl & NAND_CLE ) - nandaddr |= (1 << 2); - else - nandaddr &= ~(1 << 2); - if ( ctrl & NAND_NCE ) - nand_cs_on(); - else - nand_cs_off(); - } - - dsb(); - this->legacy.IO_ADDR_W = (void __iomem*)nandaddr; - if (dat != NAND_CMD_NONE) - writel((dat << 16), this->legacy.IO_ADDR_W); - - dsb(); -} - -/* - * read device ready pin - */ -static int cmx270_device_ready(struct nand_chip *this) -{ - dsb(); - - return (gpio_get_value(GPIO_NAND_RB)); -} - -/* - * Main initialization routine - */ -static int __init cmx270_init(void) -{ - struct nand_chip *this; - int ret; - - if (!(machine_is_armcore() && cpu_is_pxa27x())) - return -ENODEV; - - ret = gpio_request(GPIO_NAND_CS, "NAND CS"); - if (ret) { - pr_warn("CM-X270: failed to request NAND CS gpio\n"); - return ret; - } - - gpio_direction_output(GPIO_NAND_CS, 1); - - ret = gpio_request(GPIO_NAND_RB, "NAND R/B"); - if (ret) { - pr_warn("CM-X270: failed to request NAND R/B gpio\n"); - goto err_gpio_request; - } - - gpio_direction_input(GPIO_NAND_RB); - - /* Allocate memory for MTD device structure and private data */ - this = kzalloc(sizeof(struct nand_chip), GFP_KERNEL); - if (!this) { - ret = -ENOMEM; - goto err_kzalloc; - } - - cmx270_nand_io = ioremap(PXA_CS1_PHYS, 12); - if (!cmx270_nand_io) { - pr_debug("Unable to ioremap NAND device\n"); - ret = -EINVAL; - goto err_ioremap; - } - - cmx270_nand_mtd = nand_to_mtd(this); - - /* Link the private data with the MTD structure */ - cmx270_nand_mtd->owner = THIS_MODULE; - - /* insert callbacks */ - this->legacy.IO_ADDR_R = cmx270_nand_io; - this->legacy.IO_ADDR_W = cmx270_nand_io; - this->legacy.cmd_ctrl = cmx270_hwcontrol; - this->legacy.dev_ready = cmx270_device_ready; - - /* 15 us command delay time */ - this->legacy.chip_delay = 20; - this->ecc.mode = NAND_ECC_SOFT; - this->ecc.algo = NAND_ECC_HAMMING; - - /* read/write functions */ - this->legacy.read_byte = cmx270_read_byte; - this->legacy.read_buf = cmx270_read_buf; - this->legacy.write_buf = cmx270_write_buf; - - /* Scan to find existence of the device */ - ret = nand_scan(this, 1); - if (ret) { - pr_notice("No NAND device\n"); - goto err_scan; - } - - /* Register the partitions */ - ret = mtd_device_register(cmx270_nand_mtd, partition_info, - NUM_PARTITIONS); - if (ret) - goto err_scan; - - /* Return happy */ - return 0; - -err_scan: - iounmap(cmx270_nand_io); -err_ioremap: - kfree(this); -err_kzalloc: - gpio_free(GPIO_NAND_RB); -err_gpio_request: - gpio_free(GPIO_NAND_CS); - - return ret; - -} -module_init(cmx270_init); - -/* - * Clean up routine - */ -static void __exit cmx270_cleanup(void) -{ - /* Release resources, unregister device */ - nand_release(mtd_to_nand(cmx270_nand_mtd)); - - gpio_free(GPIO_NAND_RB); - gpio_free(GPIO_NAND_CS); - - iounmap(cmx270_nand_io); - - kfree(mtd_to_nand(cmx270_nand_mtd)); -} -module_exit(cmx270_cleanup); - -MODULE_LICENSE("GPL"); -MODULE_AUTHOR("Mike Rapoport "); -MODULE_DESCRIPTION("NAND flash driver for Compulab CM-X270 Module"); -- GitLab From f0689802850beaf02dab79029a60eafdc617697f Mon Sep 17 00:00:00 2001 From: Miquel Raynal Date: Tue, 19 May 2020 15:08:34 +0200 Subject: [PATCH 0665/1971] mtd: rawnand: micron: Adapt the PAGE READ flow to constraint controllers There are controllers not able to just read data cycles on the bus. There are controllers not able to do a change column. If we want to support both, we need to check which operation is supported first. This is the exact same mechanism that is in use for parameter page reads (ONFI/JEDEC) as the same problem occurs. Speed testing does not show any throughput penalty so we do not optimize more than that. However it is likely that, in the future, a more robust and exhaustive test will run at boot time to avoid re-checking what is supported and what is not at every call. Signed-off-by: Miquel Raynal Reviewed-by: Boris Brezillon Link: https://lore.kernel.org/linux-mtd/20200519130834.2918-1-miquel.raynal@bootlin.com --- drivers/mtd/nand/raw/nand_micron.c | 61 +++++++++++++++++++++++++----- 1 file changed, 52 insertions(+), 9 deletions(-) diff --git a/drivers/mtd/nand/raw/nand_micron.c b/drivers/mtd/nand/raw/nand_micron.c index b2b047b245f41..3589b4fce0d48 100644 --- a/drivers/mtd/nand/raw/nand_micron.c +++ b/drivers/mtd/nand/raw/nand_micron.c @@ -192,6 +192,7 @@ static int micron_nand_on_die_ecc_status_4(struct nand_chip *chip, u8 status, struct micron_nand *micron = nand_get_manufacturer_data(chip); struct mtd_info *mtd = nand_to_mtd(chip); unsigned int step, max_bitflips = 0; + bool use_datain = false; int ret; if (!(status & NAND_ECC_STATUS_WRITE_RECOMMENDED)) { @@ -211,8 +212,27 @@ static int micron_nand_on_die_ecc_status_4(struct nand_chip *chip, u8 status, * in non-raw mode, even if the user did not request those bytes. */ if (!oob_required) { - ret = nand_read_data_op(chip, chip->oob_poi, mtd->oobsize, - false, false); + /* + * We first check which operation is supported by the controller + * before running it. This trick makes it possible to support + * all controllers, even the most constraints, without almost + * any performance hit. + * + * TODO: could be enhanced to avoid repeating the same check + * over and over in the fast path. + */ + if (!nand_has_exec_op(chip) || + !nand_read_data_op(chip, chip->oob_poi, mtd->oobsize, false, + true)) + use_datain = true; + + if (use_datain) + ret = nand_read_data_op(chip, chip->oob_poi, + mtd->oobsize, false, false); + else + ret = nand_change_read_column_op(chip, mtd->writesize, + chip->oob_poi, + mtd->oobsize, false); if (ret) return ret; } @@ -285,6 +305,7 @@ micron_nand_read_page_on_die_ecc(struct nand_chip *chip, uint8_t *buf, int oob_required, int page) { struct mtd_info *mtd = nand_to_mtd(chip); + bool use_datain = false; u8 status; int ret, max_bitflips = 0; @@ -300,14 +321,36 @@ micron_nand_read_page_on_die_ecc(struct nand_chip *chip, uint8_t *buf, if (ret) goto out; - ret = nand_exit_status_op(chip); - if (ret) - goto out; + /* + * We first check which operation is supported by the controller before + * running it. This trick makes it possible to support all controllers, + * even the most constraints, without almost any performance hit. + * + * TODO: could be enhanced to avoid repeating the same check over and + * over in the fast path. + */ + if (!nand_has_exec_op(chip) || + !nand_read_data_op(chip, buf, mtd->writesize, false, true)) + use_datain = true; - ret = nand_read_data_op(chip, buf, mtd->writesize, false, false); - if (!ret && oob_required) - ret = nand_read_data_op(chip, chip->oob_poi, mtd->oobsize, - false, false); + if (use_datain) { + ret = nand_exit_status_op(chip); + if (ret) + goto out; + + ret = nand_read_data_op(chip, buf, mtd->writesize, false, + false); + if (!ret && oob_required) + ret = nand_read_data_op(chip, chip->oob_poi, + mtd->oobsize, false, false); + } else { + ret = nand_change_read_column_op(chip, 0, buf, mtd->writesize, + false); + if (!ret && oob_required) + ret = nand_change_read_column_op(chip, mtd->writesize, + chip->oob_poi, + mtd->oobsize, false); + } if (chip->ecc.strength == 4) max_bitflips = micron_nand_on_die_ecc_status_4(chip, status, -- GitLab From c8ae3f744ddca0da164bcacee42d1d4b6fe7027d Mon Sep 17 00:00:00 2001 From: Miquel Raynal Date: Tue, 19 May 2020 09:45:42 +0200 Subject: [PATCH 0666/1971] lib/bch: Rework a little bit the exported function names There are four exported functions, all suffixed by _bch, which is clearly not the norm. Let's rename them by prefixing them with bch_ instead. This is a mechanical change: init_bch -> bch_init free_bch -> bch_free encode_bch -> bch_encode decode_bch -> bch_decode Signed-off-by: Miquel Raynal Reviewed-by: Boris Brezillon Link: https://lore.kernel.org/linux-mtd/20200519074549.23673-2-miquel.raynal@bootlin.com --- drivers/mtd/devices/docg3.c | 10 +++--- drivers/mtd/nand/raw/nand_bch.c | 10 +++--- include/linux/bch.h | 8 ++--- lib/bch.c | 64 ++++++++++++++++----------------- 4 files changed, 46 insertions(+), 46 deletions(-) diff --git a/drivers/mtd/devices/docg3.c b/drivers/mtd/devices/docg3.c index eb0f4600efd17..799df8d033573 100644 --- a/drivers/mtd/devices/docg3.c +++ b/drivers/mtd/devices/docg3.c @@ -647,7 +647,7 @@ static int doc_ecc_bch_fix_data(struct docg3 *docg3, void *buf, u8 *hwecc) for (i = 0; i < DOC_ECC_BCH_SIZE; i++) ecc[i] = bitrev8(hwecc[i]); - numerrs = decode_bch(docg3->cascade->bch, NULL, + numerrs = bch_decode(docg3->cascade->bch, NULL, DOC_ECC_BCH_COVERED_BYTES, NULL, ecc, NULL, errorpos); BUG_ON(numerrs == -EINVAL); @@ -1984,8 +1984,8 @@ static int __init docg3_probe(struct platform_device *pdev) return ret; cascade->base = base; mutex_init(&cascade->lock); - cascade->bch = init_bch(DOC_ECC_BCH_M, DOC_ECC_BCH_T, - DOC_ECC_BCH_PRIMPOLY); + cascade->bch = bch_init(DOC_ECC_BCH_M, DOC_ECC_BCH_T, + DOC_ECC_BCH_PRIMPOLY); if (!cascade->bch) return ret; @@ -2021,7 +2021,7 @@ notfound: ret = -ENODEV; dev_info(dev, "No supported DiskOnChip found\n"); err_probe: - free_bch(cascade->bch); + bch_free(cascade->bch); for (floor = 0; floor < DOC_MAX_NBFLOORS; floor++) if (cascade->floors[floor]) doc_release_device(cascade->floors[floor]); @@ -2045,7 +2045,7 @@ static int docg3_release(struct platform_device *pdev) if (cascade->floors[floor]) doc_release_device(cascade->floors[floor]); - free_bch(docg3->cascade->bch); + bch_free(docg3->cascade->bch); return 0; } diff --git a/drivers/mtd/nand/raw/nand_bch.c b/drivers/mtd/nand/raw/nand_bch.c index 17527310c3a1d..d95fcc7358e97 100644 --- a/drivers/mtd/nand/raw/nand_bch.c +++ b/drivers/mtd/nand/raw/nand_bch.c @@ -41,7 +41,7 @@ int nand_bch_calculate_ecc(struct nand_chip *chip, const unsigned char *buf, unsigned int i; memset(code, 0, chip->ecc.bytes); - encode_bch(nbc->bch, buf, chip->ecc.size, code); + bch_encode(nbc->bch, buf, chip->ecc.size, code); /* apply mask so that an erased page is a valid codeword */ for (i = 0; i < chip->ecc.bytes; i++) @@ -67,7 +67,7 @@ int nand_bch_correct_data(struct nand_chip *chip, unsigned char *buf, unsigned int *errloc = nbc->errloc; int i, count; - count = decode_bch(nbc->bch, NULL, chip->ecc.size, read_ecc, calc_ecc, + count = bch_decode(nbc->bch, NULL, chip->ecc.size, read_ecc, calc_ecc, NULL, errloc); if (count > 0) { for (i = 0; i < count; i++) { @@ -130,7 +130,7 @@ struct nand_bch_control *nand_bch_init(struct mtd_info *mtd) if (!nbc) goto fail; - nbc->bch = init_bch(m, t, 0); + nbc->bch = bch_init(m, t, 0); if (!nbc->bch) goto fail; @@ -182,7 +182,7 @@ struct nand_bch_control *nand_bch_init(struct mtd_info *mtd) goto fail; memset(erased_page, 0xff, eccsize); - encode_bch(nbc->bch, erased_page, eccsize, nbc->eccmask); + bch_encode(nbc->bch, erased_page, eccsize, nbc->eccmask); kfree(erased_page); for (i = 0; i < eccbytes; i++) @@ -205,7 +205,7 @@ EXPORT_SYMBOL(nand_bch_init); void nand_bch_free(struct nand_bch_control *nbc) { if (nbc) { - free_bch(nbc->bch); + bch_free(nbc->bch); kfree(nbc->errloc); kfree(nbc->eccmask); kfree(nbc); diff --git a/include/linux/bch.h b/include/linux/bch.h index aa765af85c382..9c35e7cd58909 100644 --- a/include/linux/bch.h +++ b/include/linux/bch.h @@ -53,14 +53,14 @@ struct bch_control { struct gf_poly *poly_2t[4]; }; -struct bch_control *init_bch(int m, int t, unsigned int prim_poly); +struct bch_control *bch_init(int m, int t, unsigned int prim_poly); -void free_bch(struct bch_control *bch); +void bch_free(struct bch_control *bch); -void encode_bch(struct bch_control *bch, const uint8_t *data, +void bch_encode(struct bch_control *bch, const uint8_t *data, unsigned int len, uint8_t *ecc); -int decode_bch(struct bch_control *bch, const uint8_t *data, unsigned int len, +int bch_decode(struct bch_control *bch, const uint8_t *data, unsigned int len, const uint8_t *recv_ecc, const uint8_t *calc_ecc, const unsigned int *syn, unsigned int *errloc); diff --git a/lib/bch.c b/lib/bch.c index 052d3fb753a08..1091841ac7160 100644 --- a/lib/bch.c +++ b/lib/bch.c @@ -23,15 +23,15 @@ * This library provides runtime configurable encoding/decoding of binary * Bose-Chaudhuri-Hocquenghem (BCH) codes. * - * Call init_bch to get a pointer to a newly allocated bch_control structure for + * Call bch_init to get a pointer to a newly allocated bch_control structure for * the given m (Galois field order), t (error correction capability) and * (optional) primitive polynomial parameters. * - * Call encode_bch to compute and store ecc parity bytes to a given buffer. - * Call decode_bch to detect and locate errors in received data. + * Call bch_encode to compute and store ecc parity bytes to a given buffer. + * Call bch_decode to detect and locate errors in received data. * * On systems supporting hw BCH features, intermediate results may be provided - * to decode_bch in order to skip certain steps. See decode_bch() documentation + * to bch_decode in order to skip certain steps. See bch_decode() documentation * for details. * * Option CONFIG_BCH_CONST_PARAMS can be used to force fixed values of @@ -115,9 +115,9 @@ struct gf_poly_deg1 { }; /* - * same as encode_bch(), but process input data one byte at a time + * same as bch_encode(), but process input data one byte at a time */ -static void encode_bch_unaligned(struct bch_control *bch, +static void bch_encode_unaligned(struct bch_control *bch, const unsigned char *data, unsigned int len, uint32_t *ecc) { @@ -174,7 +174,7 @@ static void store_ecc8(struct bch_control *bch, uint8_t *dst, } /** - * encode_bch - calculate BCH ecc parity of data + * bch_encode - calculate BCH ecc parity of data * @bch: BCH control structure * @data: data to encode * @len: data length in bytes @@ -187,7 +187,7 @@ static void store_ecc8(struct bch_control *bch, uint8_t *dst, * The exact number of computed ecc parity bits is given by member @ecc_bits of * @bch; it may be less than m*t for large values of t. */ -void encode_bch(struct bch_control *bch, const uint8_t *data, +void bch_encode(struct bch_control *bch, const uint8_t *data, unsigned int len, uint8_t *ecc) { const unsigned int l = BCH_ECC_WORDS(bch)-1; @@ -215,7 +215,7 @@ void encode_bch(struct bch_control *bch, const uint8_t *data, m = ((unsigned long)data) & 3; if (m) { mlen = (len < (4-m)) ? len : 4-m; - encode_bch_unaligned(bch, data, mlen, bch->ecc_buf); + bch_encode_unaligned(bch, data, mlen, bch->ecc_buf); data += mlen; len -= mlen; } @@ -255,13 +255,13 @@ void encode_bch(struct bch_control *bch, const uint8_t *data, /* process last unaligned bytes */ if (len) - encode_bch_unaligned(bch, data, len, bch->ecc_buf); + bch_encode_unaligned(bch, data, len, bch->ecc_buf); /* store ecc parity bytes into original parity buffer */ if (ecc) store_ecc8(bch, ecc, bch->ecc_buf); } -EXPORT_SYMBOL_GPL(encode_bch); +EXPORT_SYMBOL_GPL(bch_encode); static inline int modulo(struct bch_control *bch, unsigned int v) { @@ -952,7 +952,7 @@ static int chien_search(struct bch_control *bch, unsigned int len, #endif /* USE_CHIEN_SEARCH */ /** - * decode_bch - decode received codeword and find bit error locations + * bch_decode - decode received codeword and find bit error locations * @bch: BCH control structure * @data: received data, ignored if @calc_ecc is provided * @len: data length in bytes, must always be provided @@ -966,22 +966,22 @@ static int chien_search(struct bch_control *bch, unsigned int len, * invalid parameters were provided * * Depending on the available hw BCH support and the need to compute @calc_ecc - * separately (using encode_bch()), this function should be called with one of + * separately (using bch_encode()), this function should be called with one of * the following parameter configurations - * * by providing @data and @recv_ecc only: - * decode_bch(@bch, @data, @len, @recv_ecc, NULL, NULL, @errloc) + * bch_decode(@bch, @data, @len, @recv_ecc, NULL, NULL, @errloc) * * by providing @recv_ecc and @calc_ecc: - * decode_bch(@bch, NULL, @len, @recv_ecc, @calc_ecc, NULL, @errloc) + * bch_decode(@bch, NULL, @len, @recv_ecc, @calc_ecc, NULL, @errloc) * * by providing ecc = recv_ecc XOR calc_ecc: - * decode_bch(@bch, NULL, @len, NULL, ecc, NULL, @errloc) + * bch_decode(@bch, NULL, @len, NULL, ecc, NULL, @errloc) * * by providing syndrome results @syn: - * decode_bch(@bch, NULL, @len, NULL, NULL, @syn, @errloc) + * bch_decode(@bch, NULL, @len, NULL, NULL, @syn, @errloc) * - * Once decode_bch() has successfully returned with a positive value, error + * Once bch_decode() has successfully returned with a positive value, error * locations returned in array @errloc should be interpreted as follows - * * if (errloc[n] >= 8*len), then n-th error is located in ecc (no need for @@ -993,7 +993,7 @@ static int chien_search(struct bch_control *bch, unsigned int len, * Note that this function does not perform any data correction by itself, it * merely indicates error locations. */ -int decode_bch(struct bch_control *bch, const uint8_t *data, unsigned int len, +int bch_decode(struct bch_control *bch, const uint8_t *data, unsigned int len, const uint8_t *recv_ecc, const uint8_t *calc_ecc, const unsigned int *syn, unsigned int *errloc) { @@ -1012,7 +1012,7 @@ int decode_bch(struct bch_control *bch, const uint8_t *data, unsigned int len, /* compute received data ecc into an internal buffer */ if (!data || !recv_ecc) return -EINVAL; - encode_bch(bch, data, len, NULL); + bch_encode(bch, data, len, NULL); } else { /* load provided calculated ecc */ load_ecc8(bch, bch->ecc_buf, calc_ecc); @@ -1053,7 +1053,7 @@ int decode_bch(struct bch_control *bch, const uint8_t *data, unsigned int len, } return (err >= 0) ? err : -EBADMSG; } -EXPORT_SYMBOL_GPL(decode_bch); +EXPORT_SYMBOL_GPL(bch_decode); /* * generate Galois field lookup tables @@ -1236,7 +1236,7 @@ finish: } /** - * init_bch - initialize a BCH encoder/decoder + * bch_init - initialize a BCH encoder/decoder * @m: Galois field order, should be in the range 5-15 * @t: maximum error correction capability, in bits * @prim_poly: user-provided primitive polynomial (or 0 to use default) @@ -1246,17 +1246,17 @@ finish: * * This initialization can take some time, as lookup tables are built for fast * encoding/decoding; make sure not to call this function from a time critical - * path. Usually, init_bch() should be called on module/driver init and - * free_bch() should be called to release memory on exit. + * path. Usually, bch_init() should be called on module/driver init and + * bch_free() should be called to release memory on exit. * * You may provide your own primitive polynomial of degree @m in argument - * @prim_poly, or let init_bch() use its default polynomial. + * @prim_poly, or let bch_init() use its default polynomial. * - * Once init_bch() has successfully returned a pointer to a newly allocated + * Once bch_init() has successfully returned a pointer to a newly allocated * BCH control structure, ecc length in bytes is given by member @ecc_bytes of * the structure. */ -struct bch_control *init_bch(int m, int t, unsigned int prim_poly) +struct bch_control *bch_init(int m, int t, unsigned int prim_poly) { int err = 0; unsigned int i, words; @@ -1347,16 +1347,16 @@ struct bch_control *init_bch(int m, int t, unsigned int prim_poly) return bch; fail: - free_bch(bch); + bch_free(bch); return NULL; } -EXPORT_SYMBOL_GPL(init_bch); +EXPORT_SYMBOL_GPL(bch_init); /** - * free_bch - free the BCH control structure + * bch_free - free the BCH control structure * @bch: BCH control structure to release */ -void free_bch(struct bch_control *bch) +void bch_free(struct bch_control *bch) { unsigned int i; @@ -1377,7 +1377,7 @@ void free_bch(struct bch_control *bch) kfree(bch); } } -EXPORT_SYMBOL_GPL(free_bch); +EXPORT_SYMBOL_GPL(bch_free); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Ivan Djelic "); -- GitLab From 1759279ad138cb0a903224a89f4bf40f69c417e8 Mon Sep 17 00:00:00 2001 From: Miquel Raynal Date: Tue, 19 May 2020 09:45:43 +0200 Subject: [PATCH 0667/1971] lib/bch: Allow easy bit swapping It seems that several hardware ECC engine use a swapped representation of bytes compared to software. This might having to do with how the ECC engine is wired to the NAND controller or the order the bits are passed to the hardware BCH logic. This means that when the software BCH engine is working in conjunction with data generated with hardware, sometimes we might need to swap the bits inside bytes, eg: 0x0A = b0000_1010 -> b0101_0000 = 0x50 Make it possible by adding a boolean to the BCH initialization routine. Regarding the implementation itself, this is a rather simple approach that can probably be enhanced in the future by preparing the ->a_{mod,pow}_tab tables with the swapping in mind. Suggested-by: Boris Brezillon Signed-off-by: Miquel Raynal Reviewed-by: Boris Brezillon Link: https://lore.kernel.org/linux-mtd/20200519074549.23673-3-miquel.raynal@bootlin.com --- drivers/mtd/devices/docg3.c | 2 +- drivers/mtd/nand/raw/nand_bch.c | 2 +- include/linux/bch.h | 5 +- lib/bch.c | 90 ++++++++++++++++++++++++++++----- 4 files changed, 82 insertions(+), 17 deletions(-) diff --git a/drivers/mtd/devices/docg3.c b/drivers/mtd/devices/docg3.c index 799df8d033573..a030792115bc2 100644 --- a/drivers/mtd/devices/docg3.c +++ b/drivers/mtd/devices/docg3.c @@ -1985,7 +1985,7 @@ static int __init docg3_probe(struct platform_device *pdev) cascade->base = base; mutex_init(&cascade->lock); cascade->bch = bch_init(DOC_ECC_BCH_M, DOC_ECC_BCH_T, - DOC_ECC_BCH_PRIMPOLY); + DOC_ECC_BCH_PRIMPOLY, false); if (!cascade->bch) return ret; diff --git a/drivers/mtd/nand/raw/nand_bch.c b/drivers/mtd/nand/raw/nand_bch.c index d95fcc7358e97..d5af8c5fd02f7 100644 --- a/drivers/mtd/nand/raw/nand_bch.c +++ b/drivers/mtd/nand/raw/nand_bch.c @@ -130,7 +130,7 @@ struct nand_bch_control *nand_bch_init(struct mtd_info *mtd) if (!nbc) goto fail; - nbc->bch = bch_init(m, t, 0); + nbc->bch = bch_init(m, t, 0, false); if (!nbc->bch) goto fail; diff --git a/include/linux/bch.h b/include/linux/bch.h index 9c35e7cd58909..85fdce83d4e21 100644 --- a/include/linux/bch.h +++ b/include/linux/bch.h @@ -33,6 +33,7 @@ * @cache: log-based polynomial representation buffer * @elp: error locator polynomial * @poly_2t: temporary polynomials of degree 2t + * @swap_bits: swap bits within data and syndrome bytes */ struct bch_control { unsigned int m; @@ -51,9 +52,11 @@ struct bch_control { int *cache; struct gf_poly *elp; struct gf_poly *poly_2t[4]; + bool swap_bits; }; -struct bch_control *bch_init(int m, int t, unsigned int prim_poly); +struct bch_control *bch_init(int m, int t, unsigned int prim_poly, + bool swap_bits); void bch_free(struct bch_control *bch); diff --git a/lib/bch.c b/lib/bch.c index 1091841ac7160..7c031ee8b93b2 100644 --- a/lib/bch.c +++ b/lib/bch.c @@ -114,6 +114,49 @@ struct gf_poly_deg1 { unsigned int c[2]; }; +static u8 swap_bits_table[] = { + 0x00, 0x80, 0x40, 0xc0, 0x20, 0xa0, 0x60, 0xe0, + 0x10, 0x90, 0x50, 0xd0, 0x30, 0xb0, 0x70, 0xf0, + 0x08, 0x88, 0x48, 0xc8, 0x28, 0xa8, 0x68, 0xe8, + 0x18, 0x98, 0x58, 0xd8, 0x38, 0xb8, 0x78, 0xf8, + 0x04, 0x84, 0x44, 0xc4, 0x24, 0xa4, 0x64, 0xe4, + 0x14, 0x94, 0x54, 0xd4, 0x34, 0xb4, 0x74, 0xf4, + 0x0c, 0x8c, 0x4c, 0xcc, 0x2c, 0xac, 0x6c, 0xec, + 0x1c, 0x9c, 0x5c, 0xdc, 0x3c, 0xbc, 0x7c, 0xfc, + 0x02, 0x82, 0x42, 0xc2, 0x22, 0xa2, 0x62, 0xe2, + 0x12, 0x92, 0x52, 0xd2, 0x32, 0xb2, 0x72, 0xf2, + 0x0a, 0x8a, 0x4a, 0xca, 0x2a, 0xaa, 0x6a, 0xea, + 0x1a, 0x9a, 0x5a, 0xda, 0x3a, 0xba, 0x7a, 0xfa, + 0x06, 0x86, 0x46, 0xc6, 0x26, 0xa6, 0x66, 0xe6, + 0x16, 0x96, 0x56, 0xd6, 0x36, 0xb6, 0x76, 0xf6, + 0x0e, 0x8e, 0x4e, 0xce, 0x2e, 0xae, 0x6e, 0xee, + 0x1e, 0x9e, 0x5e, 0xde, 0x3e, 0xbe, 0x7e, 0xfe, + 0x01, 0x81, 0x41, 0xc1, 0x21, 0xa1, 0x61, 0xe1, + 0x11, 0x91, 0x51, 0xd1, 0x31, 0xb1, 0x71, 0xf1, + 0x09, 0x89, 0x49, 0xc9, 0x29, 0xa9, 0x69, 0xe9, + 0x19, 0x99, 0x59, 0xd9, 0x39, 0xb9, 0x79, 0xf9, + 0x05, 0x85, 0x45, 0xc5, 0x25, 0xa5, 0x65, 0xe5, + 0x15, 0x95, 0x55, 0xd5, 0x35, 0xb5, 0x75, 0xf5, + 0x0d, 0x8d, 0x4d, 0xcd, 0x2d, 0xad, 0x6d, 0xed, + 0x1d, 0x9d, 0x5d, 0xdd, 0x3d, 0xbd, 0x7d, 0xfd, + 0x03, 0x83, 0x43, 0xc3, 0x23, 0xa3, 0x63, 0xe3, + 0x13, 0x93, 0x53, 0xd3, 0x33, 0xb3, 0x73, 0xf3, + 0x0b, 0x8b, 0x4b, 0xcb, 0x2b, 0xab, 0x6b, 0xeb, + 0x1b, 0x9b, 0x5b, 0xdb, 0x3b, 0xbb, 0x7b, 0xfb, + 0x07, 0x87, 0x47, 0xc7, 0x27, 0xa7, 0x67, 0xe7, + 0x17, 0x97, 0x57, 0xd7, 0x37, 0xb7, 0x77, 0xf7, + 0x0f, 0x8f, 0x4f, 0xcf, 0x2f, 0xaf, 0x6f, 0xef, + 0x1f, 0x9f, 0x5f, 0xdf, 0x3f, 0xbf, 0x7f, 0xff, +}; + +static u8 swap_bits(struct bch_control *bch, u8 in) +{ + if (!bch->swap_bits) + return in; + + return swap_bits_table[in]; +} + /* * same as bch_encode(), but process input data one byte at a time */ @@ -126,7 +169,9 @@ static void bch_encode_unaligned(struct bch_control *bch, const int l = BCH_ECC_WORDS(bch)-1; while (len--) { - p = bch->mod8_tab + (l+1)*(((ecc[0] >> 24)^(*data++)) & 0xff); + u8 tmp = swap_bits(bch, *data++); + + p = bch->mod8_tab + (l+1)*(((ecc[0] >> 24)^(tmp)) & 0xff); for (i = 0; i < l; i++) ecc[i] = ((ecc[i] << 8)|(ecc[i+1] >> 24))^(*p++); @@ -145,10 +190,16 @@ static void load_ecc8(struct bch_control *bch, uint32_t *dst, unsigned int i, nwords = BCH_ECC_WORDS(bch)-1; for (i = 0; i < nwords; i++, src += 4) - dst[i] = (src[0] << 24)|(src[1] << 16)|(src[2] << 8)|src[3]; + dst[i] = ((u32)swap_bits(bch, src[0]) << 24) | + ((u32)swap_bits(bch, src[1]) << 16) | + ((u32)swap_bits(bch, src[2]) << 8) | + swap_bits(bch, src[3]); memcpy(pad, src, BCH_ECC_BYTES(bch)-4*nwords); - dst[nwords] = (pad[0] << 24)|(pad[1] << 16)|(pad[2] << 8)|pad[3]; + dst[nwords] = ((u32)swap_bits(bch, pad[0]) << 24) | + ((u32)swap_bits(bch, pad[1]) << 16) | + ((u32)swap_bits(bch, pad[2]) << 8) | + swap_bits(bch, pad[3]); } /* @@ -161,15 +212,15 @@ static void store_ecc8(struct bch_control *bch, uint8_t *dst, unsigned int i, nwords = BCH_ECC_WORDS(bch)-1; for (i = 0; i < nwords; i++) { - *dst++ = (src[i] >> 24); - *dst++ = (src[i] >> 16) & 0xff; - *dst++ = (src[i] >> 8) & 0xff; - *dst++ = (src[i] >> 0) & 0xff; + *dst++ = swap_bits(bch, src[i] >> 24); + *dst++ = swap_bits(bch, src[i] >> 16); + *dst++ = swap_bits(bch, src[i] >> 8); + *dst++ = swap_bits(bch, src[i]); } - pad[0] = (src[nwords] >> 24); - pad[1] = (src[nwords] >> 16) & 0xff; - pad[2] = (src[nwords] >> 8) & 0xff; - pad[3] = (src[nwords] >> 0) & 0xff; + pad[0] = swap_bits(bch, src[nwords] >> 24); + pad[1] = swap_bits(bch, src[nwords] >> 16); + pad[2] = swap_bits(bch, src[nwords] >> 8); + pad[3] = swap_bits(bch, src[nwords]); memcpy(dst, pad, BCH_ECC_BYTES(bch)-4*nwords); } @@ -240,7 +291,13 @@ void bch_encode(struct bch_control *bch, const uint8_t *data, */ while (mlen--) { /* input data is read in big-endian format */ - w = r[0]^cpu_to_be32(*pdata++); + w = cpu_to_be32(*pdata++); + if (bch->swap_bits) + w = (u32)swap_bits(bch, w) | + ((u32)swap_bits(bch, w >> 8) << 8) | + ((u32)swap_bits(bch, w >> 16) << 16) | + ((u32)swap_bits(bch, w >> 24) << 24); + w ^= r[0]; p0 = tab0 + (l+1)*((w >> 0) & 0xff); p1 = tab1 + (l+1)*((w >> 8) & 0xff); p2 = tab2 + (l+1)*((w >> 16) & 0xff); @@ -1048,7 +1105,9 @@ int bch_decode(struct bch_control *bch, const uint8_t *data, unsigned int len, break; } errloc[i] = nbits-1-errloc[i]; - errloc[i] = (errloc[i] & ~7)|(7-(errloc[i] & 7)); + if (!bch->swap_bits) + errloc[i] = (errloc[i] & ~7) | + (7-(errloc[i] & 7)); } } return (err >= 0) ? err : -EBADMSG; @@ -1240,6 +1299,7 @@ finish: * @m: Galois field order, should be in the range 5-15 * @t: maximum error correction capability, in bits * @prim_poly: user-provided primitive polynomial (or 0 to use default) + * @swap_bits: swap bits within data and syndrome bytes * * Returns: * a newly allocated BCH control structure if successful, NULL otherwise @@ -1256,7 +1316,8 @@ finish: * BCH control structure, ecc length in bytes is given by member @ecc_bytes of * the structure. */ -struct bch_control *bch_init(int m, int t, unsigned int prim_poly) +struct bch_control *bch_init(int m, int t, unsigned int prim_poly, + bool swap_bits) { int err = 0; unsigned int i, words; @@ -1321,6 +1382,7 @@ struct bch_control *bch_init(int m, int t, unsigned int prim_poly) bch->syn = bch_alloc(2*t*sizeof(*bch->syn), &err); bch->cache = bch_alloc(2*t*sizeof(*bch->cache), &err); bch->elp = bch_alloc((t+1)*sizeof(struct gf_poly_deg1), &err); + bch->swap_bits = swap_bits; for (i = 0; i < ARRAY_SIZE(bch->poly_2t); i++) bch->poly_2t[i] = bch_alloc(GF_POLY_SZ(2*t), &err); -- GitLab From 0651ed5082bc5af45be9f79cabcbc32c1cf4e599 Mon Sep 17 00:00:00 2001 From: Miquel Raynal Date: Tue, 19 May 2020 09:45:44 +0200 Subject: [PATCH 0668/1971] mtd: rawnand: Ensure the number of bitflips is consistent The main NAND read page function can loop over "page reads" many times in if the reading reports uncorrectable error(s) and if the chip supports the read_retry feature. In this case, the number of bitflips is summarized between attempts. Fix this by re-initializing the entire mtd_ecc_stats object each time we retry. Suggested-by: Boris Brezillon Signed-off-by: Miquel Raynal Reviewed-by: Boris Brezillon Link: https://lore.kernel.org/linux-mtd/20200519074549.23673-4-miquel.raynal@bootlin.com --- drivers/mtd/nand/raw/nand_base.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/drivers/mtd/nand/raw/nand_base.c b/drivers/mtd/nand/raw/nand_base.c index 79e8052bdff91..475647ea69485 100644 --- a/drivers/mtd/nand/raw/nand_base.c +++ b/drivers/mtd/nand/raw/nand_base.c @@ -3295,7 +3295,7 @@ static int nand_do_read_ops(struct nand_chip *chip, loff_t from, oob_required = oob ? 1 : 0; while (1) { - unsigned int ecc_failures = mtd->ecc_stats.failed; + struct mtd_ecc_stats ecc_stats = mtd->ecc_stats; bytes = min(mtd->writesize - col, readlen); aligned = (bytes == mtd->writesize); @@ -3346,7 +3346,7 @@ read_retry: */ if (use_bounce_buf) { if (!NAND_HAS_SUBPAGE_READ(chip) && !oob && - !(mtd->ecc_stats.failed - ecc_failures) && + !(mtd->ecc_stats.failed - ecc_stats.failed) && (ops->mode != MTD_OPS_RAW)) { chip->pagecache.page = realpage; chip->pagecache.bitflips = ret; @@ -3369,7 +3369,7 @@ read_retry: nand_wait_readrdy(chip); - if (mtd->ecc_stats.failed - ecc_failures) { + if (mtd->ecc_stats.failed - ecc_stats.failed) { if (retry_mode + 1 < chip->read_retries) { retry_mode++; ret = nand_setup_read_retry(chip, @@ -3377,8 +3377,8 @@ read_retry: if (ret < 0) break; - /* Reset failures; retry */ - mtd->ecc_stats.failed = ecc_failures; + /* Reset ecc_stats; retry */ + mtd->ecc_stats = ecc_stats; goto read_retry; } else { /* No more retry modes; real failure */ -- GitLab From d7904619ea0636411f40fb1f34057288c0783ecf Mon Sep 17 00:00:00 2001 From: Miquel Raynal Date: Tue, 19 May 2020 09:45:45 +0200 Subject: [PATCH 0669/1971] mtd: rawnand: Add nand_extract_bits() There are cases where ECC bytes are not byte-aligned. Indeed, BCH implies using a number of ECC bits, which are not always a multiple of 8. We then need a helper like nand_extract_bits() to extract these syndromes from a buffer. Signed-off-by: Miquel Raynal Reviewed-by: Boris Brezillon Link: https://lore.kernel.org/linux-mtd/20200519074549.23673-5-miquel.raynal@bootlin.com --- drivers/mtd/nand/raw/nand_base.c | 44 ++++++++++++++++++++++++++++++++ include/linux/mtd/rawnand.h | 4 +++ 2 files changed, 48 insertions(+) diff --git a/drivers/mtd/nand/raw/nand_base.c b/drivers/mtd/nand/raw/nand_base.c index 475647ea69485..6a6a0a36b3fd5 100644 --- a/drivers/mtd/nand/raw/nand_base.c +++ b/drivers/mtd/nand/raw/nand_base.c @@ -274,6 +274,50 @@ static int check_offs_len(struct nand_chip *chip, loff_t ofs, uint64_t len) return ret; } +/** + * nand_extract_bits - Copy unaligned bits from one buffer to another one + * @dst: destination buffer + * @dst_off: bit offset at which the writing starts + * @src: source buffer + * @src_off: bit offset at which the reading starts + * @nbits: number of bits to copy from @src to @dst + * + * Copy bits from one memory region to another (overlap authorized). + */ +void nand_extract_bits(u8 *dst, unsigned int dst_off, const u8 *src, + unsigned int src_off, unsigned int nbits) +{ + unsigned int tmp, n; + + dst += dst_off / 8; + dst_off %= 8; + src += src_off / 8; + src_off %= 8; + + while (nbits) { + n = min3(8 - dst_off, 8 - src_off, nbits); + + tmp = (*src >> src_off) & GENMASK(n - 1, 0); + *dst &= ~GENMASK(n - 1 + dst_off, dst_off); + *dst |= tmp << dst_off; + + dst_off += n; + if (dst_off >= 8) { + dst++; + dst_off -= 8; + } + + src_off += n; + if (src_off >= 8) { + src++; + src_off -= 8; + } + + nbits -= n; + } +} +EXPORT_SYMBOL_GPL(nand_extract_bits); + /** * nand_select_target() - Select a NAND target (A.K.A. die) * @chip: NAND chip object diff --git a/include/linux/mtd/rawnand.h b/include/linux/mtd/rawnand.h index a3bac8824937c..2804c13e56621 100644 --- a/include/linux/mtd/rawnand.h +++ b/include/linux/mtd/rawnand.h @@ -1414,6 +1414,10 @@ int nand_gpio_waitrdy(struct nand_chip *chip, struct gpio_desc *gpiod, void nand_select_target(struct nand_chip *chip, unsigned int cs); void nand_deselect_target(struct nand_chip *chip); +/* Bitops */ +void nand_extract_bits(u8 *dst, unsigned int dst_off, const u8 *src, + unsigned int src_off, unsigned int nbits); + /** * nand_get_data_buf() - Get the internal page buffer * @chip: NAND chip object -- GitLab From ce33bd4c8c3f2ce063ce6f946cd5f6fa9693776a Mon Sep 17 00:00:00 2001 From: Miquel Raynal Date: Tue, 19 May 2020 09:45:46 +0200 Subject: [PATCH 0670/1971] MAINTAINERS: Add Arasan NAND controller and bindings Fill a new entry for the Arasan NAND controller. Signed-off-by: Miquel Raynal Link: https://lore.kernel.org/linux-mtd/20200519074549.23673-6-miquel.raynal@bootlin.com --- MAINTAINERS | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/MAINTAINERS b/MAINTAINERS index 8e4daa03662c8..0f9237f6bb266 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -1284,6 +1284,13 @@ S: Supported W: http://www.aquantia.com F: drivers/net/ethernet/aquantia/atlantic/aq_ptp* +ARASAN NAND CONTROLLER DRIVER +M: Naga Sureshkumar Relli +L: linux-mtd@lists.infradead.org +S: Maintained +F: Documentation/devicetree/bindings/mtd/arasan,nand-controller.yaml +F: drivers/mtd/nand/raw/arasan-nand-controller.c + ARC FRAMEBUFFER DRIVER M: Jaya Kumar S: Maintained -- GitLab From 8201c579ec781a46b406158966030aba987d2dca Mon Sep 17 00:00:00 2001 From: Miquel Raynal Date: Tue, 19 May 2020 09:45:47 +0200 Subject: [PATCH 0671/1971] dt-bindings: mtd: Document ARASAN NAND bindings Document the Arasan NAND controller bindings. Signed-off-by: Miquel Raynal Reviewed-by: Rob Herring Link: https://lore.kernel.org/linux-mtd/20200519074549.23673-7-miquel.raynal@bootlin.com --- .../bindings/mtd/arasan,nand-controller.yaml | 63 +++++++++++++++++++ 1 file changed, 63 insertions(+) create mode 100644 Documentation/devicetree/bindings/mtd/arasan,nand-controller.yaml diff --git a/Documentation/devicetree/bindings/mtd/arasan,nand-controller.yaml b/Documentation/devicetree/bindings/mtd/arasan,nand-controller.yaml new file mode 100644 index 0000000000000..db8f115a13ec2 --- /dev/null +++ b/Documentation/devicetree/bindings/mtd/arasan,nand-controller.yaml @@ -0,0 +1,63 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/mtd/arasan,nand-controller.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Arasan NAND Flash Controller with ONFI 3.1 support device tree bindings + +allOf: + - $ref: "nand-controller.yaml" + +maintainers: + - Naga Sureshkumar Relli + +properties: + compatible: + oneOf: + - items: + - enum: + - xlnx,zynqmp-nand-controller + - enum: + - arasan,nfc-v3p10 + + reg: + maxItems: 1 + + clocks: + items: + - description: Controller clock + - description: NAND bus clock + + clock-names: + items: + - const: controller + - const: bus + + interrupts: + maxItems: 1 + + "#address-cells": true + "#size-cells": true + +required: + - compatible + - reg + - clocks + - clock-names + - interrupts + +additionalProperties: true + +examples: + - | + nfc: nand-controller@ff100000 { + compatible = "xlnx,zynqmp-nand-controller", "arasan,nfc-v3p10"; + reg = <0x0 0xff100000 0x0 0x1000>; + clock-names = "controller", "bus"; + clocks = <&clk200>, <&clk100>; + interrupt-parent = <&gic>; + interrupts = <0 14 4>; + #address-cells = <1>; + #size-cells = <0>; + }; -- GitLab From 197b88fecc50ee3c7a22415db81eae0b9126f20e Mon Sep 17 00:00:00 2001 From: Miquel Raynal Date: Tue, 19 May 2020 09:45:48 +0200 Subject: [PATCH 0672/1971] mtd: rawnand: arasan: Add new Arasan NAND controller Add the Arasan NAND controller driver. This brings only NAND controller support. The ECC engine being a bit subtle, hardware ECC support will be added in a second time. This work is based on contributions from Naga Sureshkumar Relli. Signed-off-by: Miquel Raynal Reviewed-by: Boris Brezillon Link: https://lore.kernel.org/linux-mtd/20200519074549.23673-8-miquel.raynal@bootlin.com --- drivers/mtd/nand/raw/Kconfig | 7 + drivers/mtd/nand/raw/Makefile | 1 + drivers/mtd/nand/raw/arasan-nand-controller.c | 955 ++++++++++++++++++ 3 files changed, 963 insertions(+) create mode 100644 drivers/mtd/nand/raw/arasan-nand-controller.c diff --git a/drivers/mtd/nand/raw/Kconfig b/drivers/mtd/nand/raw/Kconfig index 9b08a58ae88e6..e2bc87779bf96 100644 --- a/drivers/mtd/nand/raw/Kconfig +++ b/drivers/mtd/nand/raw/Kconfig @@ -453,6 +453,13 @@ config MTD_NAND_CADENCE Enable the driver for NAND flash on platforms using a Cadence NAND controller. +config MTD_NAND_ARASAN + tristate "Support for Arasan NAND flash controller" + depends on HAS_IOMEM && HAS_DMA + help + Enables the driver for the Arasan NAND flash controller on + Zynq Ultrascale+ MPSoC. + comment "Misc" config MTD_SM_COMMON diff --git a/drivers/mtd/nand/raw/Makefile b/drivers/mtd/nand/raw/Makefile index a817052286c7e..2930f5b9015d4 100644 --- a/drivers/mtd/nand/raw/Makefile +++ b/drivers/mtd/nand/raw/Makefile @@ -57,6 +57,7 @@ obj-$(CONFIG_MTD_NAND_TEGRA) += tegra_nand.o obj-$(CONFIG_MTD_NAND_STM32_FMC2) += stm32_fmc2_nand.o obj-$(CONFIG_MTD_NAND_MESON) += meson_nand.o obj-$(CONFIG_MTD_NAND_CADENCE) += cadence-nand-controller.o +obj-$(CONFIG_MTD_NAND_ARASAN) += arasan-nand-controller.o nand-objs := nand_base.o nand_legacy.o nand_bbt.o nand_timings.o nand_ids.o nand-objs += nand_onfi.o diff --git a/drivers/mtd/nand/raw/arasan-nand-controller.c b/drivers/mtd/nand/raw/arasan-nand-controller.c new file mode 100644 index 0000000000000..03d95ee1488ba --- /dev/null +++ b/drivers/mtd/nand/raw/arasan-nand-controller.c @@ -0,0 +1,955 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Arasan NAND Flash Controller Driver + * + * Copyright (C) 2014 - 2020 Xilinx, Inc. + * Author: + * Miquel Raynal + * Original work (fully rewritten): + * Punnaiah Choudary Kalluri + * Naga Sureshkumar Relli + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define PKT_REG 0x00 +#define PKT_SIZE(x) FIELD_PREP(GENMASK(10, 0), (x)) +#define PKT_STEPS(x) FIELD_PREP(GENMASK(23, 12), (x)) + +#define MEM_ADDR1_REG 0x04 + +#define MEM_ADDR2_REG 0x08 +#define ADDR2_STRENGTH(x) FIELD_PREP(GENMASK(27, 25), (x)) +#define ADDR2_CS(x) FIELD_PREP(GENMASK(31, 30), (x)) + +#define CMD_REG 0x0C +#define CMD_1(x) FIELD_PREP(GENMASK(7, 0), (x)) +#define CMD_2(x) FIELD_PREP(GENMASK(15, 8), (x)) +#define CMD_PAGE_SIZE(x) FIELD_PREP(GENMASK(25, 23), (x)) +#define CMD_DMA_ENABLE BIT(27) +#define CMD_NADDRS(x) FIELD_PREP(GENMASK(30, 28), (x)) +#define CMD_ECC_ENABLE BIT(31) + +#define PROG_REG 0x10 +#define PROG_PGRD BIT(0) +#define PROG_ERASE BIT(2) +#define PROG_STATUS BIT(3) +#define PROG_PGPROG BIT(4) +#define PROG_RDID BIT(6) +#define PROG_RDPARAM BIT(7) +#define PROG_RST BIT(8) +#define PROG_GET_FEATURE BIT(9) +#define PROG_SET_FEATURE BIT(10) + +#define INTR_STS_EN_REG 0x14 +#define INTR_SIG_EN_REG 0x18 +#define INTR_STS_REG 0x1C +#define WRITE_READY BIT(0) +#define READ_READY BIT(1) +#define XFER_COMPLETE BIT(2) +#define DMA_BOUNDARY BIT(6) +#define EVENT_MASK GENMASK(7, 0) + +#define READY_STS_REG 0x20 + +#define DMA_ADDR0_REG 0x50 +#define DMA_ADDR1_REG 0x24 + +#define FLASH_STS_REG 0x28 + +#define DATA_PORT_REG 0x30 + +#define ECC_CONF_REG 0x34 +#define ECC_CONF_COL(x) FIELD_PREP(GENMASK(15, 0), (x)) +#define ECC_CONF_LEN(x) FIELD_PREP(GENMASK(26, 16), (x)) +#define ECC_CONF_BCH_EN BIT(27) + +#define ECC_ERR_CNT_REG 0x38 +#define GET_PKT_ERR_CNT(x) FIELD_GET(GENMASK(7, 0), (x)) +#define GET_PAGE_ERR_CNT(x) FIELD_GET(GENMASK(16, 8), (x)) + +#define ECC_SP_REG 0x3C +#define ECC_SP_CMD1(x) FIELD_PREP(GENMASK(7, 0), (x)) +#define ECC_SP_CMD2(x) FIELD_PREP(GENMASK(15, 8), (x)) +#define ECC_SP_ADDRS(x) FIELD_PREP(GENMASK(30, 28), (x)) + +#define ECC_1ERR_CNT_REG 0x40 +#define ECC_2ERR_CNT_REG 0x44 + +#define DATA_INTERFACE_REG 0x6C +#define DIFACE_SDR_MODE(x) FIELD_PREP(GENMASK(2, 0), (x)) +#define DIFACE_DDR_MODE(x) FIELD_PREP(GENMASK(5, 3), (X)) +#define DIFACE_SDR 0 +#define DIFACE_NVDDR BIT(9) + +#define ANFC_MAX_CS 2 +#define ANFC_DFLT_TIMEOUT_US 1000000 +#define ANFC_MAX_CHUNK_SIZE SZ_1M +#define ANFC_MAX_PARAM_SIZE SZ_4K +#define ANFC_MAX_STEPS SZ_2K +#define ANFC_MAX_PKT_SIZE (SZ_2K - 1) +#define ANFC_MAX_ADDR_CYC 5U +#define ANFC_RSVD_ECC_BYTES 21 + +#define ANFC_XLNX_SDR_DFLT_CORE_CLK 100000000 +#define ANFC_XLNX_SDR_HS_CORE_CLK 80000000 + +/** + * struct anfc_op - Defines how to execute an operation + * @pkt_reg: Packet register + * @addr1_reg: Memory address 1 register + * @addr2_reg: Memory address 2 register + * @cmd_reg: Command register + * @prog_reg: Program register + * @steps: Number of "packets" to read/write + * @rdy_timeout_ms: Timeout for waits on Ready/Busy pin + * @len: Data transfer length + * @read: Data transfer direction from the controller point of view + */ +struct anfc_op { + u32 pkt_reg; + u32 addr1_reg; + u32 addr2_reg; + u32 cmd_reg; + u32 prog_reg; + int steps; + unsigned int rdy_timeout_ms; + unsigned int len; + bool read; + u8 *buf; +}; + +/** + * struct anand - Defines the NAND chip related information + * @node: Used to store NAND chips into a list + * @chip: NAND chip information structure + * @cs: Chip select line + * @rb: Ready-busy line + * @page_sz: Register value of the page_sz field to use + * @clk: Expected clock frequency to use + * @timings: Data interface timing mode to use + * @ecc_conf: Hardware ECC configuration value + * @strength: Register value of the ECC strength + * @raddr_cycles: Row address cycle information + * @caddr_cycles: Column address cycle information + */ +struct anand { + struct list_head node; + struct nand_chip chip; + unsigned int cs; + unsigned int rb; + unsigned int page_sz; + unsigned long clk; + u32 timings; + u32 ecc_conf; + u32 strength; + u16 raddr_cycles; + u16 caddr_cycles; +}; + +/** + * struct arasan_nfc - Defines the Arasan NAND flash controller driver instance + * @dev: Pointer to the device structure + * @base: Remapped register area + * @controller_clk: Pointer to the system clock + * @bus_clk: Pointer to the flash clock + * @controller: Base controller structure + * @chips: List of all NAND chips attached to the controller + * @assigned_cs: Bitmask describing already assigned CS lines + * @cur_clk: Current clock rate + */ +struct arasan_nfc { + struct device *dev; + void __iomem *base; + struct clk *controller_clk; + struct clk *bus_clk; + struct nand_controller controller; + struct list_head chips; + unsigned long assigned_cs; + unsigned int cur_clk; +}; + +static struct anand *to_anand(struct nand_chip *nand) +{ + return container_of(nand, struct anand, chip); +} + +static struct arasan_nfc *to_anfc(struct nand_controller *ctrl) +{ + return container_of(ctrl, struct arasan_nfc, controller); +} + +static int anfc_wait_for_event(struct arasan_nfc *nfc, unsigned int event) +{ + u32 val; + int ret; + + ret = readl_relaxed_poll_timeout(nfc->base + INTR_STS_REG, val, + val & event, 0, + ANFC_DFLT_TIMEOUT_US); + if (ret) { + dev_err(nfc->dev, "Timeout waiting for event 0x%x\n", event); + return -ETIMEDOUT; + } + + writel_relaxed(event, nfc->base + INTR_STS_REG); + + return 0; +} + +static int anfc_wait_for_rb(struct arasan_nfc *nfc, struct nand_chip *chip, + unsigned int timeout_ms) +{ + struct anand *anand = to_anand(chip); + u32 val; + int ret; + + /* There is no R/B interrupt, we must poll a register */ + ret = readl_relaxed_poll_timeout(nfc->base + READY_STS_REG, val, + val & BIT(anand->rb), + 1, timeout_ms * 1000); + if (ret) { + dev_err(nfc->dev, "Timeout waiting for R/B 0x%x\n", + readl_relaxed(nfc->base + READY_STS_REG)); + return -ETIMEDOUT; + } + + return 0; +} + +static void anfc_trigger_op(struct arasan_nfc *nfc, struct anfc_op *nfc_op) +{ + writel_relaxed(nfc_op->pkt_reg, nfc->base + PKT_REG); + writel_relaxed(nfc_op->addr1_reg, nfc->base + MEM_ADDR1_REG); + writel_relaxed(nfc_op->addr2_reg, nfc->base + MEM_ADDR2_REG); + writel_relaxed(nfc_op->cmd_reg, nfc->base + CMD_REG); + writel_relaxed(nfc_op->prog_reg, nfc->base + PROG_REG); +} + +static int anfc_pkt_len_config(unsigned int len, unsigned int *steps, + unsigned int *pktsize) +{ + unsigned int nb, sz; + + for (nb = 1; nb < ANFC_MAX_STEPS; nb *= 2) { + sz = len / nb; + if (sz <= ANFC_MAX_PKT_SIZE) + break; + } + + if (sz * nb != len) + return -ENOTSUPP; + + if (steps) + *steps = nb; + + if (pktsize) + *pktsize = sz; + + return 0; +} + +/* NAND framework ->exec_op() hooks and related helpers */ +static int anfc_parse_instructions(struct nand_chip *chip, + const struct nand_subop *subop, + struct anfc_op *nfc_op) +{ + struct anand *anand = to_anand(chip); + const struct nand_op_instr *instr = NULL; + bool first_cmd = true; + unsigned int op_id; + int ret, i; + + memset(nfc_op, 0, sizeof(*nfc_op)); + nfc_op->addr2_reg = ADDR2_CS(anand->cs); + nfc_op->cmd_reg = CMD_PAGE_SIZE(anand->page_sz); + + for (op_id = 0; op_id < subop->ninstrs; op_id++) { + unsigned int offset, naddrs, pktsize; + const u8 *addrs; + u8 *buf; + + instr = &subop->instrs[op_id]; + + switch (instr->type) { + case NAND_OP_CMD_INSTR: + if (first_cmd) + nfc_op->cmd_reg |= CMD_1(instr->ctx.cmd.opcode); + else + nfc_op->cmd_reg |= CMD_2(instr->ctx.cmd.opcode); + + first_cmd = false; + break; + + case NAND_OP_ADDR_INSTR: + offset = nand_subop_get_addr_start_off(subop, op_id); + naddrs = nand_subop_get_num_addr_cyc(subop, op_id); + addrs = &instr->ctx.addr.addrs[offset]; + nfc_op->cmd_reg |= CMD_NADDRS(naddrs); + + for (i = 0; i < min(ANFC_MAX_ADDR_CYC, naddrs); i++) { + if (i < 4) + nfc_op->addr1_reg |= (u32)addrs[i] << i * 8; + else + nfc_op->addr2_reg |= addrs[i]; + } + + break; + case NAND_OP_DATA_IN_INSTR: + nfc_op->read = true; + fallthrough; + case NAND_OP_DATA_OUT_INSTR: + offset = nand_subop_get_data_start_off(subop, op_id); + buf = instr->ctx.data.buf.in; + nfc_op->buf = &buf[offset]; + nfc_op->len = nand_subop_get_data_len(subop, op_id); + ret = anfc_pkt_len_config(nfc_op->len, &nfc_op->steps, + &pktsize); + if (ret) + return ret; + + /* + * Number of DATA cycles must be aligned on 4, this + * means the controller might read/write more than + * requested. This is harmless most of the time as extra + * DATA are discarded in the write path and read pointer + * adjusted in the read path. + * + * FIXME: The core should mark operations where + * reading/writing more is allowed so the exec_op() + * implementation can take the right decision when the + * alignment constraint is not met: adjust the number of + * DATA cycles when it's allowed, reject the operation + * otherwise. + */ + nfc_op->pkt_reg |= PKT_SIZE(round_up(pktsize, 4)) | + PKT_STEPS(nfc_op->steps); + break; + case NAND_OP_WAITRDY_INSTR: + nfc_op->rdy_timeout_ms = instr->ctx.waitrdy.timeout_ms; + break; + } + } + + return 0; +} + +static int anfc_rw_pio_op(struct arasan_nfc *nfc, struct anfc_op *nfc_op) +{ + unsigned int dwords = (nfc_op->len / 4) / nfc_op->steps; + unsigned int last_len = nfc_op->len % 4; + unsigned int offset, dir; + u8 *buf = nfc_op->buf; + int ret, i; + + for (i = 0; i < nfc_op->steps; i++) { + dir = nfc_op->read ? READ_READY : WRITE_READY; + ret = anfc_wait_for_event(nfc, dir); + if (ret) { + dev_err(nfc->dev, "PIO %s ready signal not received\n", + nfc_op->read ? "Read" : "Write"); + return ret; + } + + offset = i * (dwords * 4); + if (nfc_op->read) + ioread32_rep(nfc->base + DATA_PORT_REG, &buf[offset], + dwords); + else + iowrite32_rep(nfc->base + DATA_PORT_REG, &buf[offset], + dwords); + } + + if (last_len) { + u32 remainder; + + offset = nfc_op->len - last_len; + + if (nfc_op->read) { + remainder = readl_relaxed(nfc->base + DATA_PORT_REG); + memcpy(&buf[offset], &remainder, last_len); + } else { + memcpy(&remainder, &buf[offset], last_len); + writel_relaxed(remainder, nfc->base + DATA_PORT_REG); + } + } + + return anfc_wait_for_event(nfc, XFER_COMPLETE); +} + +static int anfc_misc_data_type_exec(struct nand_chip *chip, + const struct nand_subop *subop, + u32 prog_reg) +{ + struct arasan_nfc *nfc = to_anfc(chip->controller); + struct anfc_op nfc_op = {}; + int ret; + + ret = anfc_parse_instructions(chip, subop, &nfc_op); + if (ret) + return ret; + + nfc_op.prog_reg = prog_reg; + anfc_trigger_op(nfc, &nfc_op); + + if (nfc_op.rdy_timeout_ms) { + ret = anfc_wait_for_rb(nfc, chip, nfc_op.rdy_timeout_ms); + if (ret) + return ret; + } + + return anfc_rw_pio_op(nfc, &nfc_op); +} + +static int anfc_param_read_type_exec(struct nand_chip *chip, + const struct nand_subop *subop) +{ + return anfc_misc_data_type_exec(chip, subop, PROG_RDPARAM); +} + +static int anfc_data_read_type_exec(struct nand_chip *chip, + const struct nand_subop *subop) +{ + return anfc_misc_data_type_exec(chip, subop, PROG_PGRD); +} + +static int anfc_param_write_type_exec(struct nand_chip *chip, + const struct nand_subop *subop) +{ + return anfc_misc_data_type_exec(chip, subop, PROG_SET_FEATURE); +} + +static int anfc_data_write_type_exec(struct nand_chip *chip, + const struct nand_subop *subop) +{ + return anfc_misc_data_type_exec(chip, subop, PROG_PGPROG); +} + +static int anfc_misc_zerolen_type_exec(struct nand_chip *chip, + const struct nand_subop *subop, + u32 prog_reg) +{ + struct arasan_nfc *nfc = to_anfc(chip->controller); + struct anfc_op nfc_op = {}; + int ret; + + ret = anfc_parse_instructions(chip, subop, &nfc_op); + if (ret) + return ret; + + nfc_op.prog_reg = prog_reg; + anfc_trigger_op(nfc, &nfc_op); + + ret = anfc_wait_for_event(nfc, XFER_COMPLETE); + if (ret) + return ret; + + if (nfc_op.rdy_timeout_ms) + ret = anfc_wait_for_rb(nfc, chip, nfc_op.rdy_timeout_ms); + + return ret; +} + +static int anfc_status_type_exec(struct nand_chip *chip, + const struct nand_subop *subop) +{ + struct arasan_nfc *nfc = to_anfc(chip->controller); + u32 tmp; + int ret; + + /* See anfc_check_op() for details about this constraint */ + if (subop->instrs[0].ctx.cmd.opcode != NAND_CMD_STATUS) + return -ENOTSUPP; + + ret = anfc_misc_zerolen_type_exec(chip, subop, PROG_STATUS); + if (ret) + return ret; + + tmp = readl_relaxed(nfc->base + FLASH_STS_REG); + memcpy(subop->instrs[1].ctx.data.buf.in, &tmp, 1); + + return 0; +} + +static int anfc_reset_type_exec(struct nand_chip *chip, + const struct nand_subop *subop) +{ + return anfc_misc_zerolen_type_exec(chip, subop, PROG_RST); +} + +static int anfc_erase_type_exec(struct nand_chip *chip, + const struct nand_subop *subop) +{ + return anfc_misc_zerolen_type_exec(chip, subop, PROG_ERASE); +} + +static int anfc_wait_type_exec(struct nand_chip *chip, + const struct nand_subop *subop) +{ + struct arasan_nfc *nfc = to_anfc(chip->controller); + struct anfc_op nfc_op = {}; + int ret; + + ret = anfc_parse_instructions(chip, subop, &nfc_op); + if (ret) + return ret; + + return anfc_wait_for_rb(nfc, chip, nfc_op.rdy_timeout_ms); +} + +static const struct nand_op_parser anfc_op_parser = NAND_OP_PARSER( + NAND_OP_PARSER_PATTERN( + anfc_param_read_type_exec, + NAND_OP_PARSER_PAT_CMD_ELEM(false), + NAND_OP_PARSER_PAT_ADDR_ELEM(false, ANFC_MAX_ADDR_CYC), + NAND_OP_PARSER_PAT_WAITRDY_ELEM(true), + NAND_OP_PARSER_PAT_DATA_IN_ELEM(false, ANFC_MAX_CHUNK_SIZE)), + NAND_OP_PARSER_PATTERN( + anfc_param_write_type_exec, + NAND_OP_PARSER_PAT_CMD_ELEM(false), + NAND_OP_PARSER_PAT_ADDR_ELEM(false, ANFC_MAX_ADDR_CYC), + NAND_OP_PARSER_PAT_DATA_OUT_ELEM(false, ANFC_MAX_PARAM_SIZE)), + NAND_OP_PARSER_PATTERN( + anfc_data_read_type_exec, + NAND_OP_PARSER_PAT_CMD_ELEM(false), + NAND_OP_PARSER_PAT_ADDR_ELEM(false, ANFC_MAX_ADDR_CYC), + NAND_OP_PARSER_PAT_CMD_ELEM(false), + NAND_OP_PARSER_PAT_WAITRDY_ELEM(true), + NAND_OP_PARSER_PAT_DATA_IN_ELEM(true, ANFC_MAX_CHUNK_SIZE)), + NAND_OP_PARSER_PATTERN( + anfc_data_write_type_exec, + NAND_OP_PARSER_PAT_CMD_ELEM(false), + NAND_OP_PARSER_PAT_ADDR_ELEM(false, ANFC_MAX_ADDR_CYC), + NAND_OP_PARSER_PAT_DATA_OUT_ELEM(false, ANFC_MAX_CHUNK_SIZE), + NAND_OP_PARSER_PAT_CMD_ELEM(false)), + NAND_OP_PARSER_PATTERN( + anfc_reset_type_exec, + NAND_OP_PARSER_PAT_CMD_ELEM(false), + NAND_OP_PARSER_PAT_WAITRDY_ELEM(false)), + NAND_OP_PARSER_PATTERN( + anfc_erase_type_exec, + NAND_OP_PARSER_PAT_CMD_ELEM(false), + NAND_OP_PARSER_PAT_ADDR_ELEM(false, ANFC_MAX_ADDR_CYC), + NAND_OP_PARSER_PAT_CMD_ELEM(false), + NAND_OP_PARSER_PAT_WAITRDY_ELEM(false)), + NAND_OP_PARSER_PATTERN( + anfc_status_type_exec, + NAND_OP_PARSER_PAT_CMD_ELEM(false), + NAND_OP_PARSER_PAT_DATA_IN_ELEM(false, ANFC_MAX_CHUNK_SIZE)), + NAND_OP_PARSER_PATTERN( + anfc_wait_type_exec, + NAND_OP_PARSER_PAT_WAITRDY_ELEM(false)), + ); + +static int anfc_select_target(struct nand_chip *chip, int target) +{ + struct anand *anand = to_anand(chip); + struct arasan_nfc *nfc = to_anfc(chip->controller); + int ret; + + /* Update the controller timings and the potential ECC configuration */ + writel_relaxed(anand->timings, nfc->base + DATA_INTERFACE_REG); + + /* Update clock frequency */ + if (nfc->cur_clk != anand->clk) { + clk_disable_unprepare(nfc->controller_clk); + ret = clk_set_rate(nfc->controller_clk, anand->clk); + if (ret) { + dev_err(nfc->dev, "Failed to change clock rate\n"); + return ret; + } + + ret = clk_prepare_enable(nfc->controller_clk); + if (ret) { + dev_err(nfc->dev, + "Failed to re-enable the controller clock\n"); + return ret; + } + + nfc->cur_clk = anand->clk; + } + + return 0; +} + +static int anfc_check_op(struct nand_chip *chip, + const struct nand_operation *op) +{ + const struct nand_op_instr *instr; + int op_id; + + /* + * The controller abstracts all the NAND operations and do not support + * data only operations. + * + * TODO: The nand_op_parser framework should be extended to + * support custom checks on DATA instructions. + */ + for (op_id = 0; op_id < op->ninstrs; op_id++) { + instr = &op->instrs[op_id]; + + switch (instr->type) { + case NAND_OP_ADDR_INSTR: + if (instr->ctx.addr.naddrs > ANFC_MAX_ADDR_CYC) + return -ENOTSUPP; + + break; + case NAND_OP_DATA_IN_INSTR: + case NAND_OP_DATA_OUT_INSTR: + if (instr->ctx.data.len > ANFC_MAX_CHUNK_SIZE) + return -ENOTSUPP; + + if (anfc_pkt_len_config(instr->ctx.data.len, 0, 0)) + return -ENOTSUPP; + + break; + default: + break; + } + } + + /* + * The controller does not allow to proceed with a CMD+DATA_IN cycle + * manually on the bus by reading data from the data register. Instead, + * the controller abstract a status read operation with its own status + * register after ordering a read status operation. Hence, we cannot + * support any CMD+DATA_IN operation other than a READ STATUS. + * + * TODO: The nand_op_parser() framework should be extended to describe + * fixed patterns instead of open-coding this check here. + */ + if (op->ninstrs == 2 && + op->instrs[0].type == NAND_OP_CMD_INSTR && + op->instrs[0].ctx.cmd.opcode != NAND_CMD_STATUS && + op->instrs[1].type == NAND_OP_DATA_IN_INSTR) + return -ENOTSUPP; + + return nand_op_parser_exec_op(chip, &anfc_op_parser, op, true); +} + +static int anfc_exec_op(struct nand_chip *chip, + const struct nand_operation *op, + bool check_only) +{ + int ret; + + if (check_only) + return anfc_check_op(chip, op); + + ret = anfc_select_target(chip, op->cs); + if (ret) + return ret; + + return nand_op_parser_exec_op(chip, &anfc_op_parser, op, check_only); +} + +static int anfc_setup_data_interface(struct nand_chip *chip, int target, + const struct nand_data_interface *conf) +{ + struct anand *anand = to_anand(chip); + struct arasan_nfc *nfc = to_anfc(chip->controller); + struct device_node *np = nfc->dev->of_node; + + if (target < 0) + return 0; + + anand->timings = DIFACE_SDR | DIFACE_SDR_MODE(conf->timings.mode); + anand->clk = ANFC_XLNX_SDR_DFLT_CORE_CLK; + + /* + * Due to a hardware bug in the ZynqMP SoC, SDR timing modes 0-1 work + * with f > 90MHz (default clock is 100MHz) but signals are unstable + * with higher modes. Hence we decrease a little bit the clock rate to + * 80MHz when using modes 2-5 with this SoC. + */ + if (of_device_is_compatible(np, "xlnx,zynqmp-nand-controller") && + conf->timings.mode >= 2) + anand->clk = ANFC_XLNX_SDR_HS_CORE_CLK; + + return 0; +} + +static int anfc_attach_chip(struct nand_chip *chip) +{ + struct anand *anand = to_anand(chip); + struct arasan_nfc *nfc = to_anfc(chip->controller); + struct mtd_info *mtd = nand_to_mtd(chip); + int ret = 0; + + if (mtd->writesize <= SZ_512) + anand->caddr_cycles = 1; + else + anand->caddr_cycles = 2; + + if (chip->options & NAND_ROW_ADDR_3) + anand->raddr_cycles = 3; + else + anand->raddr_cycles = 2; + + switch (mtd->writesize) { + case 512: + anand->page_sz = 0; + break; + case 1024: + anand->page_sz = 5; + break; + case 2048: + anand->page_sz = 1; + break; + case 4096: + anand->page_sz = 2; + break; + case 8192: + anand->page_sz = 3; + break; + case 16384: + anand->page_sz = 4; + break; + default: + return -EINVAL; + } + + /* These hooks are valid for all ECC providers */ + chip->ecc.read_page_raw = nand_monolithic_read_page_raw; + chip->ecc.write_page_raw = nand_monolithic_write_page_raw; + + switch (chip->ecc.mode) { + case NAND_ECC_NONE: + case NAND_ECC_SOFT: + case NAND_ECC_ON_DIE: + break; + case NAND_ECC_HW: + default: + dev_err(nfc->dev, "Unsupported ECC mode: %d\n", + chip->ecc.mode); + return -EINVAL; + } + + return ret; +} + +static const struct nand_controller_ops anfc_ops = { + .exec_op = anfc_exec_op, + .setup_data_interface = anfc_setup_data_interface, + .attach_chip = anfc_attach_chip, +}; + +static int anfc_chip_init(struct arasan_nfc *nfc, struct device_node *np) +{ + struct anand *anand; + struct nand_chip *chip; + struct mtd_info *mtd; + int cs, rb, ret; + + anand = devm_kzalloc(nfc->dev, sizeof(*anand), GFP_KERNEL); + if (!anand) + return -ENOMEM; + + /* We do not support multiple CS per chip yet */ + if (of_property_count_elems_of_size(np, "reg", sizeof(u32)) != 1) { + dev_err(nfc->dev, "Invalid reg property\n"); + return -EINVAL; + } + + ret = of_property_read_u32(np, "reg", &cs); + if (ret) + return ret; + + ret = of_property_read_u32(np, "nand-rb", &rb); + if (ret) + return ret; + + if (cs >= ANFC_MAX_CS || rb >= ANFC_MAX_CS) { + dev_err(nfc->dev, "Wrong CS %d or RB %d\n", cs, rb); + return -EINVAL; + } + + if (test_and_set_bit(cs, &nfc->assigned_cs)) { + dev_err(nfc->dev, "Already assigned CS %d\n", cs); + return -EINVAL; + } + + anand->cs = cs; + anand->rb = rb; + + chip = &anand->chip; + mtd = nand_to_mtd(chip); + mtd->dev.parent = nfc->dev; + chip->controller = &nfc->controller; + chip->options = NAND_BUSWIDTH_AUTO | NAND_NO_SUBPAGE_WRITE | + NAND_USES_DMA; + + nand_set_flash_node(chip, np); + if (!mtd->name) { + dev_err(nfc->dev, "NAND label property is mandatory\n"); + return -EINVAL; + } + + ret = nand_scan(chip, 1); + if (ret) { + dev_err(nfc->dev, "Scan operation failed\n"); + return ret; + } + + ret = mtd_device_register(mtd, NULL, 0); + if (ret) { + nand_cleanup(chip); + return ret; + } + + list_add_tail(&anand->node, &nfc->chips); + + return 0; +} + +static void anfc_chips_cleanup(struct arasan_nfc *nfc) +{ + struct anand *anand, *tmp; + struct nand_chip *chip; + int ret; + + list_for_each_entry_safe(anand, tmp, &nfc->chips, node) { + chip = &anand->chip; + ret = mtd_device_unregister(nand_to_mtd(chip)); + WARN_ON(ret); + nand_cleanup(chip); + list_del(&anand->node); + } +} + +static int anfc_chips_init(struct arasan_nfc *nfc) +{ + struct device_node *np = nfc->dev->of_node, *nand_np; + int nchips = of_get_child_count(np); + int ret; + + if (!nchips || nchips > ANFC_MAX_CS) { + dev_err(nfc->dev, "Incorrect number of NAND chips (%d)\n", + nchips); + return -EINVAL; + } + + for_each_child_of_node(np, nand_np) { + ret = anfc_chip_init(nfc, nand_np); + if (ret) { + of_node_put(nand_np); + anfc_chips_cleanup(nfc); + break; + } + } + + return ret; +} + +static void anfc_reset(struct arasan_nfc *nfc) +{ + /* Disable interrupt signals */ + writel_relaxed(0, nfc->base + INTR_SIG_EN_REG); + + /* Enable interrupt status */ + writel_relaxed(EVENT_MASK, nfc->base + INTR_STS_EN_REG); +} + +static int anfc_probe(struct platform_device *pdev) +{ + struct arasan_nfc *nfc; + int ret; + + nfc = devm_kzalloc(&pdev->dev, sizeof(*nfc), GFP_KERNEL); + if (!nfc) + return -ENOMEM; + + nfc->dev = &pdev->dev; + nand_controller_init(&nfc->controller); + nfc->controller.ops = &anfc_ops; + INIT_LIST_HEAD(&nfc->chips); + + nfc->base = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(nfc->base)) + return PTR_ERR(nfc->base); + + anfc_reset(nfc); + + nfc->controller_clk = devm_clk_get(&pdev->dev, "controller"); + if (IS_ERR(nfc->controller_clk)) + return PTR_ERR(nfc->controller_clk); + + nfc->bus_clk = devm_clk_get(&pdev->dev, "bus"); + if (IS_ERR(nfc->bus_clk)) + return PTR_ERR(nfc->bus_clk); + + ret = clk_prepare_enable(nfc->controller_clk); + if (ret) + return ret; + + ret = clk_prepare_enable(nfc->bus_clk); + if (ret) + goto disable_controller_clk; + + ret = anfc_chips_init(nfc); + if (ret) + goto disable_bus_clk; + + platform_set_drvdata(pdev, nfc); + + return 0; + +disable_bus_clk: + clk_disable_unprepare(nfc->bus_clk); + +disable_controller_clk: + clk_disable_unprepare(nfc->controller_clk); + + return ret; +} + +static int anfc_remove(struct platform_device *pdev) +{ + struct arasan_nfc *nfc = platform_get_drvdata(pdev); + + anfc_chips_cleanup(nfc); + + clk_disable_unprepare(nfc->bus_clk); + clk_disable_unprepare(nfc->controller_clk); + + return 0; +} + +static const struct of_device_id anfc_ids[] = { + { + .compatible = "xlnx,zynqmp-nand-controller", + }, + { + .compatible = "arasan,nfc-v3p10", + }, + {} +}; +MODULE_DEVICE_TABLE(of, anfc_ids); + +static struct platform_driver anfc_driver = { + .driver = { + .name = "arasan-nand-controller", + .of_match_table = anfc_ids, + }, + .probe = anfc_probe, + .remove = anfc_remove, +}; +module_platform_driver(anfc_driver); + +MODULE_LICENSE("GPL v2"); +MODULE_AUTHOR("Punnaiah Choudary Kalluri "); +MODULE_AUTHOR("Naga Sureshkumar Relli "); +MODULE_AUTHOR("Miquel Raynal "); +MODULE_DESCRIPTION("Arasan NAND Flash Controller Driver"); -- GitLab From c3f4ec050f56eeab7c1f290321f9b762c95bd332 Mon Sep 17 00:00:00 2001 From: Christophe JAILLET Date: Sat, 18 Apr 2020 09:07:51 +0200 Subject: [PATCH 0673/1971] m68k/PCI: Fix a memory leak in an error handling path If 'ioremap' fails, we must free 'bridge', as done in other error handling path bellow. Fixes: 19cc4c843f40 ("m68k/PCI: Replace pci_fixup_irqs() call with host bridge IRQ mapping hooks") Signed-off-by: Christophe JAILLET Reviewed-by: Geert Uytterhoeven Signed-off-by: Greg Ungerer --- arch/m68k/coldfire/pci.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/arch/m68k/coldfire/pci.c b/arch/m68k/coldfire/pci.c index 62b0eb6cf69a3..84eab0f5e00af 100644 --- a/arch/m68k/coldfire/pci.c +++ b/arch/m68k/coldfire/pci.c @@ -216,8 +216,10 @@ static int __init mcf_pci_init(void) /* Keep a virtual mapping to IO/config space active */ iospace = (unsigned long) ioremap(PCI_IO_PA, PCI_IO_SIZE); - if (iospace == 0) + if (iospace == 0) { + pci_free_host_bridge(bridge); return -ENODEV; + } pr_info("Coldfire: PCI IO/config window mapped to 0x%x\n", (u32) iospace); -- GitLab From 6d7c865c2714b122a940774990cfb1d87b57294a Mon Sep 17 00:00:00 2001 From: Jaegeuk Kim Date: Mon, 18 May 2020 18:00:33 -0700 Subject: [PATCH 0674/1971] f2fs: avoid inifinite loop to wait for flushing node pages at cp_error Shutdown test is somtimes hung, since it keeps trying to flush dirty node pages in an inifinite loop. Let's drop dirty pages at umount in that case. Signed-off-by: Jaegeuk Kim --- fs/f2fs/node.c | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c index e632de10aedab..e0bb0f7e0506e 100644 --- a/fs/f2fs/node.c +++ b/fs/f2fs/node.c @@ -1520,8 +1520,15 @@ static int __write_node_page(struct page *page, bool atomic, bool *submitted, trace_f2fs_writepage(page, NODE); - if (unlikely(f2fs_cp_error(sbi))) + if (unlikely(f2fs_cp_error(sbi))) { + if (is_sbi_flag_set(sbi, SBI_IS_CLOSE)) { + ClearPageUptodate(page); + dec_page_count(sbi, F2FS_DIRTY_NODES); + unlock_page(page); + return 0; + } goto redirty_out; + } if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING))) goto redirty_out; -- GitLab From 140fd4ac78d385e6c8e6a5757585f6c707085f87 Mon Sep 17 00:00:00 2001 From: Hill Ma Date: Sat, 25 Apr 2020 13:06:41 -0700 Subject: [PATCH 0675/1971] x86/reboot/quirks: Add MacBook6,1 reboot quirk On MacBook6,1 reboot would hang unless parameter reboot=pci is added. Make it automatic. Signed-off-by: Hill Ma Signed-off-by: Borislav Petkov Cc: stable@vger.kernel.org Link: https://lkml.kernel.org/r/20200425200641.GA1554@cslab.localdomain --- arch/x86/kernel/reboot.c | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c index 3ca43be4f9cf0..8b8cebfd32989 100644 --- a/arch/x86/kernel/reboot.c +++ b/arch/x86/kernel/reboot.c @@ -197,6 +197,14 @@ static const struct dmi_system_id reboot_dmi_table[] __initconst = { DMI_MATCH(DMI_PRODUCT_NAME, "MacBook5"), }, }, + { /* Handle problems with rebooting on Apple MacBook6,1 */ + .callback = set_pci_reboot, + .ident = "Apple MacBook6,1", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "MacBook6,1"), + }, + }, { /* Handle problems with rebooting on Apple MacBookPro5 */ .callback = set_pci_reboot, .ident = "Apple MacBookPro5", -- GitLab From 11399346ac39a26ade2a90303d38ad318163c665 Mon Sep 17 00:00:00 2001 From: "Gustavo A. R. Silva" Date: Thu, 7 May 2020 14:00:33 -0500 Subject: [PATCH 0676/1971] mtd: Replace zero-length array with flexible-array The current codebase makes use of the zero-length array language extension to the C90 standard, but the preferred mechanism to declare variable-length types such as these ones is a flexible array member[1][2], introduced in C99: struct foo { int stuff; struct boo array[]; }; By making use of the mechanism above, we will get a compiler warning in case the flexible array does not occur last in the structure, which will help us prevent some kind of undefined behavior bugs from being inadvertently introduced[3] to the codebase from now on. Also, notice that, dynamic memory allocations won't be affected by this change: "Flexible array members have incomplete type, and so the sizeof operator may not be applied. As a quirk of the original implementation of zero-length arrays, sizeof evaluates to zero."[1] sizeof(flexible-array-member) triggers a warning because flexible array members have incomplete type[1]. There are some instances of code in which the sizeof operator is being incorrectly/erroneously applied to zero-length arrays and the result is zero. Such instances may be hiding some bugs. So, this work (flexible-array member conversions) will also help to get completely rid of those sorts of issues. This issue was found with the help of Coccinelle. [1] https://gcc.gnu.org/onlinedocs/gcc/Zero-Length.html [2] https://github.com/KSPP/linux/issues/21 [3] commit 76497732932f ("cxgb3/l2t: Fix undefined behaviour") Signed-off-by: Gustavo A. R. Silva Acked-by: Miquel Raynal Signed-off-by: Vignesh Raghavendra Link: https://lore.kernel.org/r/20200507190033.GA15215@embeddedor --- include/linux/mtd/cfi.h | 6 +++--- include/linux/mtd/qinfo.h | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/include/linux/mtd/cfi.h b/include/linux/mtd/cfi.h index c98a211086880..fd1ecb8211060 100644 --- a/include/linux/mtd/cfi.h +++ b/include/linux/mtd/cfi.h @@ -138,7 +138,7 @@ struct cfi_ident { uint16_t InterfaceDesc; uint16_t MaxBufWriteSize; uint8_t NumEraseRegions; - uint32_t EraseRegionInfo[0]; /* Not host ordered */ + uint32_t EraseRegionInfo[]; /* Not host ordered */ } __packed; /* Extended Query Structure for both PRI and ALT */ @@ -165,7 +165,7 @@ struct cfi_pri_intelext { uint16_t ProtRegAddr; uint8_t FactProtRegSize; uint8_t UserProtRegSize; - uint8_t extra[0]; + uint8_t extra[]; } __packed; struct cfi_intelext_otpinfo { @@ -286,7 +286,7 @@ struct cfi_private { map_word sector_erase_cmd; unsigned long chipshift; /* Because they're of the same type */ const char *im_name; /* inter_module name for cmdset_setup */ - struct flchip chips[0]; /* per-chip data structure for each chip */ + struct flchip chips[]; /* per-chip data structure for each chip */ }; uint32_t cfi_build_cmd_addr(uint32_t cmd_ofs, diff --git a/include/linux/mtd/qinfo.h b/include/linux/mtd/qinfo.h index df5b9fddea164..2e3f43788d483 100644 --- a/include/linux/mtd/qinfo.h +++ b/include/linux/mtd/qinfo.h @@ -24,7 +24,7 @@ struct lpddr_private { struct qinfo_chip *qinfo; int numchips; unsigned long chipshift; - struct flchip chips[0]; + struct flchip chips[]; }; /* qinfo_query_info structure contains request information for -- GitLab From de308d1815c9e8fe602a958c5c76142ff6501d75 Mon Sep 17 00:00:00 2001 From: Borislav Petkov Date: Mon, 25 May 2020 12:38:39 +0200 Subject: [PATCH 0677/1971] x86/apic: Make TSC deadline timer detection message visible The commit c84cb3735fd5 ("x86/apic: Move TSC deadline timer debug printk") removed the message which said that the deadline timer was enabled. It added a pr_debug() message which is issued when deadline timer validation succeeds. Well, issued only when CONFIG_DYNAMIC_DEBUG is enabled - otherwise pr_debug() calls get optimized away if DEBUG is not defined in the compilation unit. Therefore, make the above message pr_info() so that it is visible in dmesg. Signed-off-by: Borislav Petkov Link: https://lkml.kernel.org/r/20200525104218.27018-1-bp@alien8.de --- arch/x86/kernel/apic/apic.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c index e53dda210cd73..21d2f1de10578 100644 --- a/arch/x86/kernel/apic/apic.c +++ b/arch/x86/kernel/apic/apic.c @@ -2093,7 +2093,7 @@ void __init init_apic_mappings(void) unsigned int new_apicid; if (apic_validate_deadline_timer()) - pr_debug("TSC deadline timer available\n"); + pr_info("TSC deadline timer available\n"); if (x2apic_mode) { boot_cpu_physical_apicid = read_apic_id(); -- GitLab From 88ffef1b65cf989b2dfea6374caff804fca6ec32 Mon Sep 17 00:00:00 2001 From: Miquel Raynal Date: Tue, 19 May 2020 09:45:49 +0200 Subject: [PATCH 0678/1971] mtd: rawnand: arasan: Support the hardware BCH ECC engine Add support for the hardware ECC BCH engine. Please mind that this engine has an important limitation: BCH implementation does not inform the user when an uncorrectable ECC error occurs. To workaround this, we avoid using the hardware engine in the read path and do the computation with the software BCH implementation, which is faster than mixing hardware (for correction) and software (for verification). Signed-off-by: Miquel Raynal Reviewed-by: Boris Brezillon Link: https://lore.kernel.org/linux-mtd/20200519074549.23673-9-miquel.raynal@bootlin.com --- drivers/mtd/nand/raw/Kconfig | 1 + drivers/mtd/nand/raw/arasan-nand-controller.c | 342 ++++++++++++++++++ 2 files changed, 343 insertions(+) diff --git a/drivers/mtd/nand/raw/Kconfig b/drivers/mtd/nand/raw/Kconfig index e2bc87779bf96..113f610522692 100644 --- a/drivers/mtd/nand/raw/Kconfig +++ b/drivers/mtd/nand/raw/Kconfig @@ -456,6 +456,7 @@ config MTD_NAND_CADENCE config MTD_NAND_ARASAN tristate "Support for Arasan NAND flash controller" depends on HAS_IOMEM && HAS_DMA + select BCH help Enables the driver for the Arasan NAND flash controller on Zynq Ultrascale+ MPSoC. diff --git a/drivers/mtd/nand/raw/arasan-nand-controller.c b/drivers/mtd/nand/raw/arasan-nand-controller.c index 03d95ee1488ba..7141dcccba3c2 100644 --- a/drivers/mtd/nand/raw/arasan-nand-controller.c +++ b/drivers/mtd/nand/raw/arasan-nand-controller.c @@ -10,6 +10,7 @@ * Naga Sureshkumar Relli */ +#include #include #include #include @@ -144,6 +145,11 @@ struct anfc_op { * @strength: Register value of the ECC strength * @raddr_cycles: Row address cycle information * @caddr_cycles: Column address cycle information + * @ecc_bits: Exact number of ECC bits per syndrome + * @ecc_total: Total number of ECC bytes + * @errloc: Array of errors located with soft BCH + * @hw_ecc: Buffer to store syndromes computed by hardware + * @bch: BCH structure */ struct anand { struct list_head node; @@ -157,6 +163,11 @@ struct anand { u32 strength; u16 raddr_cycles; u16 caddr_cycles; + unsigned int ecc_bits; + unsigned int ecc_total; + unsigned int *errloc; + u8 *hw_ecc; + struct bch_control *bch; }; /** @@ -261,6 +272,194 @@ static int anfc_pkt_len_config(unsigned int len, unsigned int *steps, return 0; } +/* + * When using the embedded hardware ECC engine, the controller is in charge of + * feeding the engine with, first, the ECC residue present in the data array. + * A typical read operation is: + * 1/ Assert the read operation by sending the relevant command/address cycles + * but targeting the column of the first ECC bytes in the OOB area instead of + * the main data directly. + * 2/ After having read the relevant number of ECC bytes, the controller uses + * the RNDOUT/RNDSTART commands which are set into the "ECC Spare Command + * Register" to move the pointer back at the beginning of the main data. + * 3/ It will read the content of the main area for a given size (pktsize) and + * will feed the ECC engine with this buffer again. + * 4/ The ECC engine derives the ECC bytes for the given data and compare them + * with the ones already received. It eventually trigger status flags and + * then set the "Buffer Read Ready" flag. + * 5/ The corrected data is then available for reading from the data port + * register. + * + * The hardware BCH ECC engine is known to be inconstent in BCH mode and never + * reports uncorrectable errors. Because of this bug, we have to use the + * software BCH implementation in the read path. + */ +static int anfc_read_page_hw_ecc(struct nand_chip *chip, u8 *buf, + int oob_required, int page) +{ + struct arasan_nfc *nfc = to_anfc(chip->controller); + struct mtd_info *mtd = nand_to_mtd(chip); + struct anand *anand = to_anand(chip); + unsigned int len = mtd->writesize + (oob_required ? mtd->oobsize : 0); + unsigned int max_bitflips = 0; + dma_addr_t dma_addr; + int step, ret; + struct anfc_op nfc_op = { + .pkt_reg = + PKT_SIZE(chip->ecc.size) | + PKT_STEPS(chip->ecc.steps), + .addr1_reg = + (page & 0xFF) << (8 * (anand->caddr_cycles)) | + (((page >> 8) & 0xFF) << (8 * (1 + anand->caddr_cycles))), + .addr2_reg = + ((page >> 16) & 0xFF) | + ADDR2_STRENGTH(anand->strength) | + ADDR2_CS(anand->cs), + .cmd_reg = + CMD_1(NAND_CMD_READ0) | + CMD_2(NAND_CMD_READSTART) | + CMD_PAGE_SIZE(anand->page_sz) | + CMD_DMA_ENABLE | + CMD_NADDRS(anand->caddr_cycles + + anand->raddr_cycles), + .prog_reg = PROG_PGRD, + }; + + dma_addr = dma_map_single(nfc->dev, (void *)buf, len, DMA_FROM_DEVICE); + if (dma_mapping_error(nfc->dev, dma_addr)) { + dev_err(nfc->dev, "Buffer mapping error"); + return -EIO; + } + + writel_relaxed(lower_32_bits(dma_addr), nfc->base + DMA_ADDR0_REG); + writel_relaxed(upper_32_bits(dma_addr), nfc->base + DMA_ADDR1_REG); + + anfc_trigger_op(nfc, &nfc_op); + + ret = anfc_wait_for_event(nfc, XFER_COMPLETE); + dma_unmap_single(nfc->dev, dma_addr, len, DMA_FROM_DEVICE); + if (ret) { + dev_err(nfc->dev, "Error reading page %d\n", page); + return ret; + } + + /* Store the raw OOB bytes as well */ + ret = nand_change_read_column_op(chip, mtd->writesize, chip->oob_poi, + mtd->oobsize, 0); + if (ret) + return ret; + + /* + * For each step, compute by softare the BCH syndrome over the raw data. + * Compare the theoretical amount of errors and compare with the + * hardware engine feedback. + */ + for (step = 0; step < chip->ecc.steps; step++) { + u8 *raw_buf = &buf[step * chip->ecc.size]; + unsigned int bit, byte; + int bf, i; + + /* Extract the syndrome, it is not necessarily aligned */ + memset(anand->hw_ecc, 0, chip->ecc.bytes); + nand_extract_bits(anand->hw_ecc, 0, + &chip->oob_poi[mtd->oobsize - anand->ecc_total], + anand->ecc_bits * step, anand->ecc_bits); + + bf = bch_decode(anand->bch, raw_buf, chip->ecc.size, + anand->hw_ecc, NULL, NULL, anand->errloc); + if (!bf) { + continue; + } else if (bf > 0) { + for (i = 0; i < bf; i++) { + /* Only correct the data, not the syndrome */ + if (anand->errloc[i] < (chip->ecc.size * 8)) { + bit = BIT(anand->errloc[i] & 7); + byte = anand->errloc[i] >> 3; + raw_buf[byte] ^= bit; + } + } + + mtd->ecc_stats.corrected += bf; + max_bitflips = max_t(unsigned int, max_bitflips, bf); + + continue; + } + + bf = nand_check_erased_ecc_chunk(raw_buf, chip->ecc.size, + NULL, 0, NULL, 0, + chip->ecc.strength); + if (bf > 0) { + mtd->ecc_stats.corrected += bf; + max_bitflips = max_t(unsigned int, max_bitflips, bf); + memset(raw_buf, 0xFF, chip->ecc.size); + } else if (bf < 0) { + mtd->ecc_stats.failed++; + } + } + + return 0; +} + +static int anfc_write_page_hw_ecc(struct nand_chip *chip, const u8 *buf, + int oob_required, int page) +{ + struct anand *anand = to_anand(chip); + struct arasan_nfc *nfc = to_anfc(chip->controller); + struct mtd_info *mtd = nand_to_mtd(chip); + unsigned int len = mtd->writesize + (oob_required ? mtd->oobsize : 0); + dma_addr_t dma_addr; + int ret; + struct anfc_op nfc_op = { + .pkt_reg = + PKT_SIZE(chip->ecc.size) | + PKT_STEPS(chip->ecc.steps), + .addr1_reg = + (page & 0xFF) << (8 * (anand->caddr_cycles)) | + (((page >> 8) & 0xFF) << (8 * (1 + anand->caddr_cycles))), + .addr2_reg = + ((page >> 16) & 0xFF) | + ADDR2_STRENGTH(anand->strength) | + ADDR2_CS(anand->cs), + .cmd_reg = + CMD_1(NAND_CMD_SEQIN) | + CMD_2(NAND_CMD_PAGEPROG) | + CMD_PAGE_SIZE(anand->page_sz) | + CMD_DMA_ENABLE | + CMD_NADDRS(anand->caddr_cycles + + anand->raddr_cycles) | + CMD_ECC_ENABLE, + .prog_reg = PROG_PGPROG, + }; + + writel_relaxed(anand->ecc_conf, nfc->base + ECC_CONF_REG); + writel_relaxed(ECC_SP_CMD1(NAND_CMD_RNDIN) | + ECC_SP_ADDRS(anand->caddr_cycles), + nfc->base + ECC_SP_REG); + + dma_addr = dma_map_single(nfc->dev, (void *)buf, len, DMA_TO_DEVICE); + if (dma_mapping_error(nfc->dev, dma_addr)) { + dev_err(nfc->dev, "Buffer mapping error"); + return -EIO; + } + + writel_relaxed(lower_32_bits(dma_addr), nfc->base + DMA_ADDR0_REG); + writel_relaxed(upper_32_bits(dma_addr), nfc->base + DMA_ADDR1_REG); + + anfc_trigger_op(nfc, &nfc_op); + ret = anfc_wait_for_event(nfc, XFER_COMPLETE); + dma_unmap_single(nfc->dev, dma_addr, len, DMA_TO_DEVICE); + if (ret) { + dev_err(nfc->dev, "Error writing page %d\n", page); + return ret; + } + + /* Spare data is not protected */ + if (oob_required) + ret = nand_write_oob_std(chip, page); + + return ret; +} + /* NAND framework ->exec_op() hooks and related helpers */ static int anfc_parse_instructions(struct nand_chip *chip, const struct nand_subop *subop, @@ -681,6 +880,138 @@ static int anfc_setup_data_interface(struct nand_chip *chip, int target, return 0; } +static int anfc_calc_hw_ecc_bytes(int step_size, int strength) +{ + unsigned int bch_gf_mag, ecc_bits; + + switch (step_size) { + case SZ_512: + bch_gf_mag = 13; + break; + case SZ_1K: + bch_gf_mag = 14; + break; + default: + return -EINVAL; + } + + ecc_bits = bch_gf_mag * strength; + + return DIV_ROUND_UP(ecc_bits, 8); +} + +static const int anfc_hw_ecc_512_strengths[] = {4, 8, 12}; + +static const int anfc_hw_ecc_1024_strengths[] = {24}; + +static const struct nand_ecc_step_info anfc_hw_ecc_step_infos[] = { + { + .stepsize = SZ_512, + .strengths = anfc_hw_ecc_512_strengths, + .nstrengths = ARRAY_SIZE(anfc_hw_ecc_512_strengths), + }, + { + .stepsize = SZ_1K, + .strengths = anfc_hw_ecc_1024_strengths, + .nstrengths = ARRAY_SIZE(anfc_hw_ecc_1024_strengths), + }, +}; + +static const struct nand_ecc_caps anfc_hw_ecc_caps = { + .stepinfos = anfc_hw_ecc_step_infos, + .nstepinfos = ARRAY_SIZE(anfc_hw_ecc_step_infos), + .calc_ecc_bytes = anfc_calc_hw_ecc_bytes, +}; + +static int anfc_init_hw_ecc_controller(struct arasan_nfc *nfc, + struct nand_chip *chip) +{ + struct anand *anand = to_anand(chip); + struct mtd_info *mtd = nand_to_mtd(chip); + struct nand_ecc_ctrl *ecc = &chip->ecc; + unsigned int bch_prim_poly = 0, bch_gf_mag = 0, ecc_offset; + int ret; + + switch (mtd->writesize) { + case SZ_512: + case SZ_2K: + case SZ_4K: + case SZ_8K: + case SZ_16K: + break; + default: + dev_err(nfc->dev, "Unsupported page size %d\n", mtd->writesize); + return -EINVAL; + } + + ret = nand_ecc_choose_conf(chip, &anfc_hw_ecc_caps, mtd->oobsize); + if (ret) + return ret; + + switch (ecc->strength) { + case 12: + anand->strength = 0x1; + break; + case 8: + anand->strength = 0x2; + break; + case 4: + anand->strength = 0x3; + break; + case 24: + anand->strength = 0x4; + break; + default: + dev_err(nfc->dev, "Unsupported strength %d\n", ecc->strength); + return -EINVAL; + } + + switch (ecc->size) { + case SZ_512: + bch_gf_mag = 13; + bch_prim_poly = 0x201b; + break; + case SZ_1K: + bch_gf_mag = 14; + bch_prim_poly = 0x4443; + break; + default: + dev_err(nfc->dev, "Unsupported step size %d\n", ecc->strength); + return -EINVAL; + } + + mtd_set_ooblayout(mtd, &nand_ooblayout_lp_ops); + + ecc->steps = mtd->writesize / ecc->size; + ecc->algo = NAND_ECC_BCH; + anand->ecc_bits = bch_gf_mag * ecc->strength; + ecc->bytes = DIV_ROUND_UP(anand->ecc_bits, 8); + anand->ecc_total = DIV_ROUND_UP(anand->ecc_bits * ecc->steps, 8); + ecc_offset = mtd->writesize + mtd->oobsize - anand->ecc_total; + anand->ecc_conf = ECC_CONF_COL(ecc_offset) | + ECC_CONF_LEN(anand->ecc_total) | + ECC_CONF_BCH_EN; + + anand->errloc = devm_kmalloc_array(nfc->dev, ecc->strength, + sizeof(*anand->errloc), GFP_KERNEL); + if (!anand->errloc) + return -ENOMEM; + + anand->hw_ecc = devm_kmalloc(nfc->dev, ecc->bytes, GFP_KERNEL); + if (!anand->hw_ecc) + return -ENOMEM; + + /* Enforce bit swapping to fit the hardware */ + anand->bch = bch_init(bch_gf_mag, ecc->strength, bch_prim_poly, true); + if (!anand->bch) + return -EINVAL; + + ecc->read_page = anfc_read_page_hw_ecc; + ecc->write_page = anfc_write_page_hw_ecc; + + return 0; +} + static int anfc_attach_chip(struct nand_chip *chip) { struct anand *anand = to_anand(chip); @@ -731,6 +1062,8 @@ static int anfc_attach_chip(struct nand_chip *chip) case NAND_ECC_ON_DIE: break; case NAND_ECC_HW: + ret = anfc_init_hw_ecc_controller(nfc, chip); + break; default: dev_err(nfc->dev, "Unsupported ECC mode: %d\n", chip->ecc.mode); @@ -740,10 +1073,19 @@ static int anfc_attach_chip(struct nand_chip *chip) return ret; } +static void anfc_detach_chip(struct nand_chip *chip) +{ + struct anand *anand = to_anand(chip); + + if (anand->bch) + bch_free(anand->bch); +} + static const struct nand_controller_ops anfc_ops = { .exec_op = anfc_exec_op, .setup_data_interface = anfc_setup_data_interface, .attach_chip = anfc_attach_chip, + .detach_chip = anfc_detach_chip, }; static int anfc_chip_init(struct arasan_nfc *nfc, struct device_node *np) -- GitLab From 08f25cd767e1086266453fb2f4a3ded05b9cc8a7 Mon Sep 17 00:00:00 2001 From: Miquel Raynal Date: Tue, 19 May 2020 14:59:34 +0200 Subject: [PATCH 0679/1971] mtd: rawnand: ams-delta: Stop using nand_release() This helper is not very useful and very often people get confused: they use nand_release() instead of nand_cleanup(). Let's stop using nand_release() by calling mtd_device_unregister() and nand_cleanup() directly. Signed-off-by: Miquel Raynal Link: https://lore.kernel.org/linux-mtd/20200519130035.1883-2-miquel.raynal@bootlin.com --- drivers/mtd/nand/raw/ams-delta.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/drivers/mtd/nand/raw/ams-delta.c b/drivers/mtd/nand/raw/ams-delta.c index d66dab25df203..3711e7a0436cd 100644 --- a/drivers/mtd/nand/raw/ams-delta.c +++ b/drivers/mtd/nand/raw/ams-delta.c @@ -387,12 +387,15 @@ static int gpio_nand_remove(struct platform_device *pdev) { struct gpio_nand *priv = platform_get_drvdata(pdev); struct mtd_info *mtd = nand_to_mtd(&priv->nand_chip); + int ret; /* Apply write protection */ gpiod_set_value(priv->gpiod_nwp, 1); /* Unregister device */ - nand_release(mtd_to_nand(mtd)); + ret = mtd_device_unregister(mtd); + WARN_ON(ret); + nand_cleanup(mtd_to_nand(mtd)); return 0; } -- GitLab From f376c43bec4f8ee8d1ba5c5c4cfbd6e84fb279cb Mon Sep 17 00:00:00 2001 From: Nathan Chancellor Date: Sat, 16 May 2020 01:08:06 -0700 Subject: [PATCH 0680/1971] clk: bcm2835: Fix return type of bcm2835_register_gate bcm2835_register_gate is used as a callback for the clk_register member of bcm2835_clk_desc, which expects a struct clk_hw * return type but bcm2835_register_gate returns a struct clk *. This discrepancy is hidden by the fact that bcm2835_register_gate is cast to the typedef bcm2835_clk_register by the _REGISTER macro. This turns out to be a control flow integrity violation, which is how this was noticed. Change the return type of bcm2835_register_gate to be struct clk_hw * and use clk_hw_register_gate to do so. This should be a non-functional change as clk_register_gate calls clk_hw_register_gate anyways but this is needed to avoid issues with further changes. Fixes: b19f009d4510 ("clk: bcm2835: Migrate to clk_hw based registration and OF APIs") Link: https://github.com/ClangBuiltLinux/linux/issues/1028 Signed-off-by: Nathan Chancellor Link: https://lkml.kernel.org/r/20200516080806.1459784-1-natechancellor@gmail.com Signed-off-by: Stephen Boyd --- drivers/clk/bcm/clk-bcm2835.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/drivers/clk/bcm/clk-bcm2835.c b/drivers/clk/bcm/clk-bcm2835.c index ded13ccf768e1..7c845c293af00 100644 --- a/drivers/clk/bcm/clk-bcm2835.c +++ b/drivers/clk/bcm/clk-bcm2835.c @@ -1448,13 +1448,13 @@ static struct clk_hw *bcm2835_register_clock(struct bcm2835_cprman *cprman, return &clock->hw; } -static struct clk *bcm2835_register_gate(struct bcm2835_cprman *cprman, +static struct clk_hw *bcm2835_register_gate(struct bcm2835_cprman *cprman, const struct bcm2835_gate_data *data) { - return clk_register_gate(cprman->dev, data->name, data->parent, - CLK_IGNORE_UNUSED | CLK_SET_RATE_GATE, - cprman->regs + data->ctl_reg, - CM_GATE_BIT, 0, &cprman->regs_lock); + return clk_hw_register_gate(cprman->dev, data->name, data->parent, + CLK_IGNORE_UNUSED | CLK_SET_RATE_GATE, + cprman->regs + data->ctl_reg, + CM_GATE_BIT, 0, &cprman->regs_lock); } typedef struct clk_hw *(*bcm2835_clk_register)(struct bcm2835_cprman *cprman, -- GitLab From 99a1ae29360980e79fa2d616819a6fe7411e4eda Mon Sep 17 00:00:00 2001 From: Nathan Chancellor Date: Sat, 16 May 2020 01:08:07 -0700 Subject: [PATCH 0681/1971] clk: bcm2835: Remove casting to bcm2835_clk_register There are four different callback functions that are used for the clk_register callback that all have different second parameter types. bcm2835_register_pll -> struct bcm2835_pll_data bcm2835_register_pll_divider -> struct bcm2835_pll_divider_data bcm2835_register_clock -> struct bcm2835_clock_data bcm2835_register_date -> struct bcm2835_gate_data These callbacks are cast to bcm2835_clk_register so that there is no error about incompatible pointer types. Unfortunately, this is a control flow integrity violation, which verifies that the callback function's types match the prototypes exactly before jumping. [ 0.857913] CFI failure (target: 0xffffff9334a81820): [ 0.857977] WARNING: CPU: 3 PID: 35 at kernel/cfi.c:29 __cfi_check_fail+0x50/0x58 [ 0.857985] Modules linked in: [ 0.858007] CPU: 3 PID: 35 Comm: kworker/3:1 Not tainted 4.19.123-v8-01301-gdbb48f16956e4-dirty #1 [ 0.858015] Hardware name: Raspberry Pi 3 Model B Rev 1.2 (DT) [ 0.858031] Workqueue: events 0xffffff9334a925c8 [ 0.858046] pstate: 60000005 (nZCv daif -PAN -UAO) [ 0.858058] pc : __cfi_check_fail+0x50/0x58 [ 0.858070] lr : __cfi_check_fail+0x50/0x58 [ 0.858078] sp : ffffff800814ba90 [ 0.858086] x29: ffffff800814ba90 x28: 000fffffffdfff3d [ 0.858101] x27: 00000000002000c2 x26: ffffff93355fdb18 [ 0.858116] x25: 0000000000000000 x24: ffffff9334a81820 [ 0.858131] x23: ffffff93357f3580 x22: ffffff9334af1000 [ 0.858146] x21: a79b57e88f8ebc81 x20: ffffff93357f3580 [ 0.858161] x19: ffffff9334a81820 x18: fffffff679769070 [ 0.858175] x17: 0000000000000000 x16: 0000000000000000 [ 0.858190] x15: 0000000000000004 x14: 000000000000003c [ 0.858205] x13: 0000000000003044 x12: 0000000000000000 [ 0.858220] x11: b57e91cd641bae00 x10: b57e91cd641bae00 [ 0.858235] x9 : b57e91cd641bae00 x8 : b57e91cd641bae00 [ 0.858250] x7 : 0000000000000000 x6 : ffffff933591d4e5 [ 0.858264] x5 : 0000000000000000 x4 : 0000000000000000 [ 0.858279] x3 : ffffff800814b718 x2 : ffffff9334a84818 [ 0.858293] x1 : ffffff9334bba66c x0 : 0000000000000029 [ 0.858308] Call trace: [ 0.858321] __cfi_check_fail+0x50/0x58 [ 0.858337] __cfi_check+0x3ab3c/0x4467c [ 0.858351] bcm2835_clk_probe+0x210/0x2dc [ 0.858369] platform_drv_probe+0xb0/0xfc [ 0.858380] really_probe+0x4a0/0x5a8 [ 0.858391] driver_probe_device+0x68/0x104 [ 0.858403] __device_attach_driver+0x100/0x148 [ 0.858418] bus_for_each_drv+0xb0/0x12c [ 0.858431] __device_attach.llvm.17225159516306086099+0xc0/0x168 [ 0.858443] bus_probe_device+0x44/0xfc [ 0.858455] deferred_probe_work_func+0xa0/0xe0 [ 0.858472] process_one_work+0x210/0x538 [ 0.858485] worker_thread+0x2e8/0x478 [ 0.858500] kthread+0x154/0x164 [ 0.858515] ret_from_fork+0x10/0x18 To fix this, change the second parameter of all functions void * and use a local variable with the correct type so that everything works properly. With this, the only use of bcm2835_clk_register is in struct bcm2835_clk_desc so we can just remove it and use the type directly. Fixes: 56eb3a2ed972 ("clk: bcm2835: remove use of BCM2835_CLOCK_COUNT in driver") Link: https://github.com/ClangBuiltLinux/linux/issues/1028 Signed-off-by: Nathan Chancellor Link: https://lkml.kernel.org/r/20200516080806.1459784-2-natechancellor@gmail.com Signed-off-by: Stephen Boyd --- drivers/clk/bcm/clk-bcm2835.c | 68 +++++++++++++++++++---------------- 1 file changed, 37 insertions(+), 31 deletions(-) diff --git a/drivers/clk/bcm/clk-bcm2835.c b/drivers/clk/bcm/clk-bcm2835.c index 7c845c293af00..0d0eeb3b0dd52 100644 --- a/drivers/clk/bcm/clk-bcm2835.c +++ b/drivers/clk/bcm/clk-bcm2835.c @@ -1296,8 +1296,9 @@ static const struct clk_ops bcm2835_vpu_clock_clk_ops = { }; static struct clk_hw *bcm2835_register_pll(struct bcm2835_cprman *cprman, - const struct bcm2835_pll_data *data) + const void *data) { + const struct bcm2835_pll_data *pll_data = data; struct bcm2835_pll *pll; struct clk_init_data init; int ret; @@ -1307,7 +1308,7 @@ static struct clk_hw *bcm2835_register_pll(struct bcm2835_cprman *cprman, /* All of the PLLs derive from the external oscillator. */ init.parent_names = &cprman->real_parent_names[0]; init.num_parents = 1; - init.name = data->name; + init.name = pll_data->name; init.ops = &bcm2835_pll_clk_ops; init.flags = CLK_IGNORE_UNUSED; @@ -1316,7 +1317,7 @@ static struct clk_hw *bcm2835_register_pll(struct bcm2835_cprman *cprman, return NULL; pll->cprman = cprman; - pll->data = data; + pll->data = pll_data; pll->hw.init = &init; ret = devm_clk_hw_register(cprman->dev, &pll->hw); @@ -1327,35 +1328,36 @@ static struct clk_hw *bcm2835_register_pll(struct bcm2835_cprman *cprman, static struct clk_hw * bcm2835_register_pll_divider(struct bcm2835_cprman *cprman, - const struct bcm2835_pll_divider_data *data) + const void *data) { + const struct bcm2835_pll_divider_data *divider_data = data; struct bcm2835_pll_divider *divider; struct clk_init_data init; const char *divider_name; int ret; - if (data->fixed_divider != 1) { + if (divider_data->fixed_divider != 1) { divider_name = devm_kasprintf(cprman->dev, GFP_KERNEL, - "%s_prediv", data->name); + "%s_prediv", divider_data->name); if (!divider_name) return NULL; } else { - divider_name = data->name; + divider_name = divider_data->name; } memset(&init, 0, sizeof(init)); - init.parent_names = &data->source_pll; + init.parent_names = ÷r_data->source_pll; init.num_parents = 1; init.name = divider_name; init.ops = &bcm2835_pll_divider_clk_ops; - init.flags = data->flags | CLK_IGNORE_UNUSED; + init.flags = divider_data->flags | CLK_IGNORE_UNUSED; divider = devm_kzalloc(cprman->dev, sizeof(*divider), GFP_KERNEL); if (!divider) return NULL; - divider->div.reg = cprman->regs + data->a2w_reg; + divider->div.reg = cprman->regs + divider_data->a2w_reg; divider->div.shift = A2W_PLL_DIV_SHIFT; divider->div.width = A2W_PLL_DIV_BITS; divider->div.flags = CLK_DIVIDER_MAX_AT_ZERO; @@ -1364,7 +1366,7 @@ bcm2835_register_pll_divider(struct bcm2835_cprman *cprman, divider->div.table = NULL; divider->cprman = cprman; - divider->data = data; + divider->data = divider_data; ret = devm_clk_hw_register(cprman->dev, ÷r->div.hw); if (ret) @@ -1374,20 +1376,22 @@ bcm2835_register_pll_divider(struct bcm2835_cprman *cprman, * PLLH's channels have a fixed divide by 10 afterwards, which * is what our consumers are actually using. */ - if (data->fixed_divider != 1) { - return clk_hw_register_fixed_factor(cprman->dev, data->name, + if (divider_data->fixed_divider != 1) { + return clk_hw_register_fixed_factor(cprman->dev, + divider_data->name, divider_name, CLK_SET_RATE_PARENT, 1, - data->fixed_divider); + divider_data->fixed_divider); } return ÷r->div.hw; } static struct clk_hw *bcm2835_register_clock(struct bcm2835_cprman *cprman, - const struct bcm2835_clock_data *data) + const void *data) { + const struct bcm2835_clock_data *clock_data = data; struct bcm2835_clock *clock; struct clk_init_data init; const char *parents[1 << CM_SRC_BITS]; @@ -1398,8 +1402,8 @@ static struct clk_hw *bcm2835_register_clock(struct bcm2835_cprman *cprman, * Replace our strings referencing parent clocks with the * actual clock-output-name of the parent. */ - for (i = 0; i < data->num_mux_parents; i++) { - parents[i] = data->parents[i]; + for (i = 0; i < clock_data->num_mux_parents; i++) { + parents[i] = clock_data->parents[i]; ret = match_string(cprman_parent_names, ARRAY_SIZE(cprman_parent_names), @@ -1410,18 +1414,18 @@ static struct clk_hw *bcm2835_register_clock(struct bcm2835_cprman *cprman, memset(&init, 0, sizeof(init)); init.parent_names = parents; - init.num_parents = data->num_mux_parents; - init.name = data->name; - init.flags = data->flags | CLK_IGNORE_UNUSED; + init.num_parents = clock_data->num_mux_parents; + init.name = clock_data->name; + init.flags = clock_data->flags | CLK_IGNORE_UNUSED; /* * Pass the CLK_SET_RATE_PARENT flag if we are allowed to propagate * rate changes on at least of the parents. */ - if (data->set_rate_parent) + if (clock_data->set_rate_parent) init.flags |= CLK_SET_RATE_PARENT; - if (data->is_vpu_clock) { + if (clock_data->is_vpu_clock) { init.ops = &bcm2835_vpu_clock_clk_ops; } else { init.ops = &bcm2835_clock_clk_ops; @@ -1430,7 +1434,7 @@ static struct clk_hw *bcm2835_register_clock(struct bcm2835_cprman *cprman, /* If the clock wasn't actually enabled at boot, it's not * critical. */ - if (!(cprman_read(cprman, data->ctl_reg) & CM_ENABLE)) + if (!(cprman_read(cprman, clock_data->ctl_reg) & CM_ENABLE)) init.flags &= ~CLK_IS_CRITICAL; } @@ -1439,7 +1443,7 @@ static struct clk_hw *bcm2835_register_clock(struct bcm2835_cprman *cprman, return NULL; clock->cprman = cprman; - clock->data = data; + clock->data = clock_data; clock->hw.init = &init; ret = devm_clk_hw_register(cprman->dev, &clock->hw); @@ -1449,24 +1453,26 @@ static struct clk_hw *bcm2835_register_clock(struct bcm2835_cprman *cprman, } static struct clk_hw *bcm2835_register_gate(struct bcm2835_cprman *cprman, - const struct bcm2835_gate_data *data) + const void *data) { - return clk_hw_register_gate(cprman->dev, data->name, data->parent, + const struct bcm2835_gate_data *gate_data = data; + + return clk_hw_register_gate(cprman->dev, gate_data->name, + gate_data->parent, CLK_IGNORE_UNUSED | CLK_SET_RATE_GATE, - cprman->regs + data->ctl_reg, + cprman->regs + gate_data->ctl_reg, CM_GATE_BIT, 0, &cprman->regs_lock); } -typedef struct clk_hw *(*bcm2835_clk_register)(struct bcm2835_cprman *cprman, - const void *data); struct bcm2835_clk_desc { - bcm2835_clk_register clk_register; + struct clk_hw *(*clk_register)(struct bcm2835_cprman *cprman, + const void *data); unsigned int supported; const void *data; }; /* assignment helper macros for different clock types */ -#define _REGISTER(f, s, ...) { .clk_register = (bcm2835_clk_register)f, \ +#define _REGISTER(f, s, ...) { .clk_register = f, \ .supported = s, \ .data = __VA_ARGS__ } #define REGISTER_PLL(s, ...) _REGISTER(&bcm2835_register_pll, \ -- GitLab From 0541e0217ac8daef32650ba8477ff0a55f9a94b1 Mon Sep 17 00:00:00 2001 From: Rajan Vaja Date: Mon, 2 Mar 2020 13:50:40 -0800 Subject: [PATCH 0682/1971] clk: zynqmp: Limit bestdiv with maxdiv Clock divider value should not be greater than maximum divider value. So use minimum of best divider or maximum divider value. Signed-off-by: Rajan Vaja Signed-off-by: Jolly Shah Link: https://lkml.kernel.org/r/1583185843-20707-2-git-send-email-jolly.shah@xilinx.com Signed-off-by: Stephen Boyd --- drivers/clk/zynqmp/divider.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/clk/zynqmp/divider.c b/drivers/clk/zynqmp/divider.c index 4be2cc76aa2e7..5c41ddbee6462 100644 --- a/drivers/clk/zynqmp/divider.c +++ b/drivers/clk/zynqmp/divider.c @@ -197,6 +197,8 @@ static long zynqmp_clk_divider_round_rate(struct clk_hw *hw, if ((clk_hw_get_flags(hw) & CLK_SET_RATE_PARENT) && divider->is_frac) bestdiv = rate % *prate ? 1 : bestdiv; + + bestdiv = min_t(u32, bestdiv, divider->max_div); *prate = rate * bestdiv; return rate; -- GitLab From b8c1049c68d634a412ed5980ae666ed7c8839305 Mon Sep 17 00:00:00 2001 From: Tejas Patel Date: Mon, 2 Mar 2020 13:50:41 -0800 Subject: [PATCH 0683/1971] clk: zynqmp: Fix divider2 calculation zynqmp_get_divider2_val() calculates, divider value of type DIV2 clock, considering best possible combination of DIV1 and DIV2. To find best possible values of DIV1 and DIV2, DIV1's parent rate should be consider and not DIV2's parent rate since it would rate of div1 clock. Consider a below topology, out_clk->div2_clk->div1_clk->fixed_parent where out_clk = (fixed_parent/div1_clk) / div2_clk, so parent clock of div1_clk (i.e. out_clk) should be divided by div1_clk and div2_clk. Existing code divides parent rate of div2_clk's clock instead of div1_clk's parent rate, which is wrong. Fix the same by considering div1's parent clock rate. Fixes: 4ebd92d2e228 ("clk: zynqmp: Fix divider calculation") Signed-off-by: Tejas Patel Signed-off-by: Jolly Shah Link: https://lkml.kernel.org/r/1583185843-20707-3-git-send-email-jolly.shah@xilinx.com Signed-off-by: Stephen Boyd --- drivers/clk/zynqmp/divider.c | 17 ++++++++++++----- 1 file changed, 12 insertions(+), 5 deletions(-) diff --git a/drivers/clk/zynqmp/divider.c b/drivers/clk/zynqmp/divider.c index 5c41ddbee6462..f7b35459ad0bb 100644 --- a/drivers/clk/zynqmp/divider.c +++ b/drivers/clk/zynqmp/divider.c @@ -111,23 +111,30 @@ static unsigned long zynqmp_clk_divider_recalc_rate(struct clk_hw *hw, static void zynqmp_get_divider2_val(struct clk_hw *hw, unsigned long rate, - unsigned long parent_rate, struct zynqmp_clk_divider *divider, int *bestdiv) { int div1; int div2; long error = LONG_MAX; - struct clk_hw *parent_hw = clk_hw_get_parent(hw); - struct zynqmp_clk_divider *pdivider = to_zynqmp_clk_divider(parent_hw); + unsigned long div1_prate; + struct clk_hw *div1_parent_hw; + struct clk_hw *div2_parent_hw = clk_hw_get_parent(hw); + struct zynqmp_clk_divider *pdivider = + to_zynqmp_clk_divider(div2_parent_hw); if (!pdivider) return; + div1_parent_hw = clk_hw_get_parent(div2_parent_hw); + if (!div1_parent_hw) + return; + + div1_prate = clk_hw_get_rate(div1_parent_hw); *bestdiv = 1; for (div1 = 1; div1 <= pdivider->max_div;) { for (div2 = 1; div2 <= divider->max_div;) { - long new_error = ((parent_rate / div1) / div2) - rate; + long new_error = ((div1_prate / div1) / div2) - rate; if (abs(new_error) < abs(error)) { *bestdiv = div2; @@ -192,7 +199,7 @@ static long zynqmp_clk_divider_round_rate(struct clk_hw *hw, */ if (div_type == TYPE_DIV2 && (clk_hw_get_flags(hw) & CLK_SET_RATE_PARENT)) { - zynqmp_get_divider2_val(hw, rate, *prate, divider, &bestdiv); + zynqmp_get_divider2_val(hw, rate, divider, &bestdiv); } if ((clk_hw_get_flags(hw) & CLK_SET_RATE_PARENT) && divider->is_frac) -- GitLab From 5268aa1c561f64c5af0daaaee87425ef9ce210d3 Mon Sep 17 00:00:00 2001 From: Rajan Vaja Date: Mon, 2 Mar 2020 13:50:42 -0800 Subject: [PATCH 0684/1971] clk: zynqmp: Fix invalid clock name queries The clock driver makes EEMI call to get the name of invalid clk when executing versal_get_clock_info() function. This results in error messages. Added check for validating clock before saving clock attribute and calling zynqmp_pm_clock_get_name() in versal_get_clock_info() function. Signed-off-by: Rajan Vaja Signed-off-by: Tejas Patel Signed-off-by: Jolly Shah Link: https://lkml.kernel.org/r/1583185843-20707-4-git-send-email-jolly.shah@xilinx.com Signed-off-by: Stephen Boyd --- drivers/clk/zynqmp/clkc.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/drivers/clk/zynqmp/clkc.c b/drivers/clk/zynqmp/clkc.c index 10e89f23880ba..3e83c51d65a63 100644 --- a/drivers/clk/zynqmp/clkc.c +++ b/drivers/clk/zynqmp/clkc.c @@ -663,6 +663,11 @@ static void zynqmp_get_clock_info(void) continue; clock[i].valid = FIELD_GET(CLK_ATTR_VALID, attr.attr[0]); + /* skip query for Invalid clock */ + ret = zynqmp_is_valid_clock(i); + if (ret != CLK_ATTR_VALID) + continue; + clock[i].type = FIELD_GET(CLK_ATTR_TYPE, attr.attr[0]) ? CLK_TYPE_EXTERNAL : CLK_TYPE_OUTPUT; -- GitLab From 58b0fb86260063f86afecaebf4056c876fff2a19 Mon Sep 17 00:00:00 2001 From: Quanyang Wang Date: Mon, 2 Mar 2020 13:50:43 -0800 Subject: [PATCH 0685/1971] clk: zynqmp: fix memory leak in zynqmp_register_clocks This is detected by kmemleak running on zcu102 board: unreferenced object 0xffffffc877e48180 (size 128): comm "swapper/0", pid 1, jiffies 4294892909 (age 315.436s) hex dump (first 32 bytes): 64 70 5f 76 69 64 65 6f 5f 72 65 66 5f 64 69 76 dp_video_ref_div 31 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 1............... backtrace: [<00000000c9be883b>] __kmalloc_track_caller+0x200/0x380 [<00000000f02c3809>] kvasprintf+0x7c/0x100 [<00000000e51dde4d>] kasprintf+0x60/0x80 [<0000000092298b05>] zynqmp_register_clocks+0x29c/0x398 [<00000000faaff182>] zynqmp_clock_probe+0x3cc/0x4c0 [<000000005f5986f0>] platform_drv_probe+0x58/0xa8 [<00000000d5810136>] really_probe+0xd8/0x2a8 [<00000000f5b671be>] driver_probe_device+0x5c/0x100 [<0000000038f91fcf>] __device_attach_driver+0x98/0xb8 [<000000008a3f2ac2>] bus_for_each_drv+0x74/0xd8 [<000000001cb2783d>] __device_attach+0xe0/0x140 [<00000000c268031b>] device_initial_probe+0x24/0x30 [<000000006998de4b>] bus_probe_device+0x9c/0xa8 [<00000000647ae6ff>] device_add+0x3c0/0x610 [<0000000071c14bb8>] of_device_add+0x40/0x50 [<000000004bb5d132>] of_platform_device_create_pdata+0xbc/0x138 This is because that when num_nodes is larger than 1, clk_out is allocated using kasprintf for these nodes but only the last node's clk_out is freed. Signed-off-by: Quanyang Wang Signed-off-by: Michal Simek Signed-off-by: Tejas Patel Signed-off-by: Jolly Shah Link: https://lkml.kernel.org/r/1583185843-20707-5-git-send-email-jolly.shah@xilinx.com Signed-off-by: Stephen Boyd --- drivers/clk/zynqmp/clkc.c | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/drivers/clk/zynqmp/clkc.c b/drivers/clk/zynqmp/clkc.c index 3e83c51d65a63..e8b2cf2053770 100644 --- a/drivers/clk/zynqmp/clkc.c +++ b/drivers/clk/zynqmp/clkc.c @@ -558,7 +558,7 @@ static struct clk_hw *zynqmp_register_clk_topology(int clk_id, char *clk_name, { int j; u32 num_nodes, clk_dev_id; - char *clk_out = NULL; + char *clk_out[MAX_NODES]; struct clock_topology *nodes; struct clk_hw *hw = NULL; @@ -572,16 +572,16 @@ static struct clk_hw *zynqmp_register_clk_topology(int clk_id, char *clk_name, * Intermediate clock names are postfixed with type of clock. */ if (j != (num_nodes - 1)) { - clk_out = kasprintf(GFP_KERNEL, "%s%s", clk_name, + clk_out[j] = kasprintf(GFP_KERNEL, "%s%s", clk_name, clk_type_postfix[nodes[j].type]); } else { - clk_out = kasprintf(GFP_KERNEL, "%s", clk_name); + clk_out[j] = kasprintf(GFP_KERNEL, "%s", clk_name); } if (!clk_topology[nodes[j].type]) continue; - hw = (*clk_topology[nodes[j].type])(clk_out, clk_dev_id, + hw = (*clk_topology[nodes[j].type])(clk_out[j], clk_dev_id, parent_names, num_parents, &nodes[j]); @@ -590,9 +590,12 @@ static struct clk_hw *zynqmp_register_clk_topology(int clk_id, char *clk_name, __func__, clk_dev_id, clk_name, PTR_ERR(hw)); - parent_names[0] = clk_out; + parent_names[0] = clk_out[j]; } - kfree(clk_out); + + for (j = 0; j < num_nodes; j++) + kfree(clk_out[j]); + return hw; } -- GitLab From e605fa9c4a0c1218e5604b42bef59de0a3a4f813 Mon Sep 17 00:00:00 2001 From: Rajan Vaja Date: Thu, 12 Mar 2020 14:31:38 -0700 Subject: [PATCH 0686/1971] clk: zynqmp: Add support for custom type flags Store extra custom type flags received from firmware. Signed-off-by: Rajan Vaja Signed-off-by: Tejas Patel Signed-off-by: Jolly Shah Link: https://lkml.kernel.org/r/1584048699-24186-2-git-send-email-jolly.shah@xilinx.com Signed-off-by: Stephen Boyd --- drivers/clk/zynqmp/clk-zynqmp.h | 1 + drivers/clk/zynqmp/clkc.c | 4 ++++ 2 files changed, 5 insertions(+) diff --git a/drivers/clk/zynqmp/clk-zynqmp.h b/drivers/clk/zynqmp/clk-zynqmp.h index fec9a15c87862..5beeb41b29fa1 100644 --- a/drivers/clk/zynqmp/clk-zynqmp.h +++ b/drivers/clk/zynqmp/clk-zynqmp.h @@ -30,6 +30,7 @@ struct clock_topology { u32 type; u32 flag; u32 type_flag; + u8 custom_type_flag; }; struct clk_hw *zynqmp_clk_register_pll(const char *name, u32 clk_id, diff --git a/drivers/clk/zynqmp/clkc.c b/drivers/clk/zynqmp/clkc.c index e8b2cf2053770..bfc1e7dd0f587 100644 --- a/drivers/clk/zynqmp/clkc.c +++ b/drivers/clk/zynqmp/clkc.c @@ -84,6 +84,7 @@ struct name_resp { struct topology_resp { #define CLK_TOPOLOGY_TYPE GENMASK(3, 0) +#define CLK_TOPOLOGY_CUSTOM_TYPE_FLAGS GENMASK(7, 4) #define CLK_TOPOLOGY_FLAGS GENMASK(23, 8) #define CLK_TOPOLOGY_TYPE_FLAGS GENMASK(31, 24) u32 topology[CLK_GET_TOPOLOGY_RESP_WORDS]; @@ -396,6 +397,9 @@ static int __zynqmp_clock_get_topology(struct clock_topology *topology, topology[*nnodes].type_flag = FIELD_GET(CLK_TOPOLOGY_TYPE_FLAGS, response->topology[i]); + topology[*nnodes].custom_type_flag = + FIELD_GET(CLK_TOPOLOGY_CUSTOM_TYPE_FLAGS, + response->topology[i]); (*nnodes)++; } -- GitLab From 2ce7e495dab4647055f6cf300bc66870dc8a7cab Mon Sep 17 00:00:00 2001 From: Tejas Patel Date: Thu, 12 Mar 2020 14:31:39 -0700 Subject: [PATCH 0687/1971] clk: zynqmp: Update fraction clock check from custom type flags Older firmware version sets BIT(13) in clkflag to mark a divider as fractional divider. Updated firmware version sets BIT(4) in type flags to mark a divider as fractional divider since BIT(13) is defined as CLK_DUTY_CYCLE_PARENT in the common clk framework flags. To support both old and new firmware version, consider BIT(13) from clkflag and BIT(4) from type_flag to check if divider is fractional or not. To maintain compatibility BIT(13) of clkflag in firmware will not be used in future for any purpose and will be marked as unused. Signed-off-by: Tejas Patel Signed-off-by: Rajan Vaja Signed-off-by: Jolly Shah Link: https://lkml.kernel.org/r/1584048699-24186-3-git-send-email-jolly.shah@xilinx.com Signed-off-by: Stephen Boyd --- drivers/clk/zynqmp/divider.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/drivers/clk/zynqmp/divider.c b/drivers/clk/zynqmp/divider.c index f7b35459ad0bb..79a294488e845 100644 --- a/drivers/clk/zynqmp/divider.c +++ b/drivers/clk/zynqmp/divider.c @@ -25,7 +25,8 @@ #define to_zynqmp_clk_divider(_hw) \ container_of(_hw, struct zynqmp_clk_divider, hw) -#define CLK_FRAC BIT(13) /* has a fractional parent */ +#define CLK_FRAC BIT(13) /* has a fractional parent */ +#define CUSTOM_FLAG_CLK_FRAC BIT(0) /* has a fractional parent in custom type flag */ /** * struct zynqmp_clk_divider - adjustable divider clock @@ -320,7 +321,8 @@ struct clk_hw *zynqmp_clk_register_divider(const char *name, init.num_parents = 1; /* struct clk_divider assignments */ - div->is_frac = !!(nodes->flag & CLK_FRAC); + div->is_frac = !!((nodes->flag & CLK_FRAC) | + (nodes->custom_type_flag & CUSTOM_FLAG_CLK_FRAC)); div->flags = nodes->type_flag; div->hw.init = &init; div->clk_id = clk_id; -- GitLab From 32e594f9a63f6bc7fdd396d9bc0a77b18208332b Mon Sep 17 00:00:00 2001 From: Chen Zhou Date: Wed, 1 Apr 2020 21:09:03 +0800 Subject: [PATCH 0688/1971] KVM: PPC: Book3S HV: Remove redundant NULL check Free function kfree() already does NULL check, so the additional check is unnecessary, just remove it. Signed-off-by: Chen Zhou Signed-off-by: Paul Mackerras --- arch/powerpc/kvm/book3s_hv_nested.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/arch/powerpc/kvm/book3s_hv_nested.c b/arch/powerpc/kvm/book3s_hv_nested.c index dc97e5be76f61..cad324312040b 100644 --- a/arch/powerpc/kvm/book3s_hv_nested.c +++ b/arch/powerpc/kvm/book3s_hv_nested.c @@ -1416,8 +1416,7 @@ static long int __kvmhv_nested_page_fault(struct kvm_run *run, rmapp = &memslot->arch.rmap[gfn - memslot->base_gfn]; ret = kvmppc_create_pte(kvm, gp->shadow_pgtable, pte, n_gpa, level, mmu_seq, gp->shadow_lpid, rmapp, &n_rmap); - if (n_rmap) - kfree(n_rmap); + kfree(n_rmap); if (ret == -EAGAIN) ret = RESUME_GUEST; /* Let the guest try again */ -- GitLab From 512721d2fcdb1bcfd87a0ffa7822c0ea37e3c65f Mon Sep 17 00:00:00 2001 From: Laurent Dufour Date: Thu, 16 Apr 2020 18:27:15 +0200 Subject: [PATCH 0689/1971] KVM: PPC: Book3S HV: Read ibm,secure-memory nodes The newly introduced ibm,secure-memory nodes supersede the ibm,uv-firmware's property secure-memory-ranges. Firmware will no more expose the secure-memory-ranges property so first read the new one and if not found rollback to the older one. Signed-off-by: Laurent Dufour Signed-off-by: Paul Mackerras --- arch/powerpc/kvm/book3s_hv_uvmem.c | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/arch/powerpc/kvm/book3s_hv_uvmem.c b/arch/powerpc/kvm/book3s_hv_uvmem.c index 76d05c71fb1ff..e2cb3ce4931ca 100644 --- a/arch/powerpc/kvm/book3s_hv_uvmem.c +++ b/arch/powerpc/kvm/book3s_hv_uvmem.c @@ -749,6 +749,20 @@ static u64 kvmppc_get_secmem_size(void) const __be32 *prop; u64 size = 0; + /* + * First try the new ibm,secure-memory nodes which supersede the + * secure-memory-ranges property. + * If we found some, no need to read the deprecated ones. + */ + for_each_compatible_node(np, NULL, "ibm,secure-memory") { + prop = of_get_property(np, "reg", &len); + if (!prop) + continue; + size += of_read_number(prop + 2, 2); + } + if (size) + return size; + np = of_find_compatible_node(NULL, NULL, "ibm,uv-firmware"); if (!np) goto out; -- GitLab From 2610a57f64d55b5d09340c2b716cf20922b88605 Mon Sep 17 00:00:00 2001 From: Tianjia Zhang Date: Mon, 27 Apr 2020 12:35:10 +0800 Subject: [PATCH 0690/1971] KVM: PPC: Remove redundant kvm_run from vcpu_arch The 'kvm_run' field already exists in the 'vcpu' structure, which is the same structure as the 'kvm_run' in the 'vcpu_arch' and should be deleted. Signed-off-by: Tianjia Zhang Reviewed-by: Vitaly Kuznetsov Reviewed-by: Paul Mackerras Signed-off-by: Paul Mackerras --- arch/powerpc/include/asm/kvm_host.h | 1 - arch/powerpc/kvm/book3s_hv.c | 6 ++---- arch/powerpc/kvm/book3s_hv_nested.c | 3 +-- 3 files changed, 3 insertions(+), 7 deletions(-) diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h index 337047ba4a566..7e2d061d04451 100644 --- a/arch/powerpc/include/asm/kvm_host.h +++ b/arch/powerpc/include/asm/kvm_host.h @@ -795,7 +795,6 @@ struct kvm_vcpu_arch { struct mmio_hpte_cache_entry *pgfault_cache; struct task_struct *run_task; - struct kvm_run *kvm_run; spinlock_t vpa_update_lock; struct kvmppc_vpa vpa; diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c index 7f59c47a5b9d3..50fbfc1fc34b2 100644 --- a/arch/powerpc/kvm/book3s_hv.c +++ b/arch/powerpc/kvm/book3s_hv.c @@ -2932,7 +2932,7 @@ static void post_guest_process(struct kvmppc_vcore *vc, bool is_master) ret = RESUME_GUEST; if (vcpu->arch.trap) - ret = kvmppc_handle_exit_hv(vcpu->arch.kvm_run, vcpu, + ret = kvmppc_handle_exit_hv(vcpu->run, vcpu, vcpu->arch.run_task); vcpu->arch.ret = ret; @@ -3917,7 +3917,6 @@ static int kvmppc_run_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) spin_lock(&vc->lock); vcpu->arch.ceded = 0; vcpu->arch.run_task = current; - vcpu->arch.kvm_run = kvm_run; vcpu->arch.stolen_logged = vcore_stolen_time(vc, mftb()); vcpu->arch.state = KVMPPC_VCPU_RUNNABLE; vcpu->arch.busy_preempt = TB_NIL; @@ -3970,7 +3969,7 @@ static int kvmppc_run_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) if (signal_pending(v->arch.run_task)) { kvmppc_remove_runnable(vc, v); v->stat.signal_exits++; - v->arch.kvm_run->exit_reason = KVM_EXIT_INTR; + v->run->exit_reason = KVM_EXIT_INTR; v->arch.ret = -EINTR; wake_up(&v->arch.cpu_run); } @@ -4046,7 +4045,6 @@ int kvmhv_run_single_vcpu(struct kvm_run *kvm_run, vc = vcpu->arch.vcore; vcpu->arch.ceded = 0; vcpu->arch.run_task = current; - vcpu->arch.kvm_run = kvm_run; vcpu->arch.stolen_logged = vcore_stolen_time(vc, mftb()); vcpu->arch.state = KVMPPC_VCPU_RUNNABLE; vcpu->arch.busy_preempt = TB_NIL; diff --git a/arch/powerpc/kvm/book3s_hv_nested.c b/arch/powerpc/kvm/book3s_hv_nested.c index cad324312040b..d31c2e5c8b2ae 100644 --- a/arch/powerpc/kvm/book3s_hv_nested.c +++ b/arch/powerpc/kvm/book3s_hv_nested.c @@ -290,8 +290,7 @@ long kvmhv_enter_nested_guest(struct kvm_vcpu *vcpu) r = RESUME_HOST; break; } - r = kvmhv_run_single_vcpu(vcpu->arch.kvm_run, vcpu, hdec_exp, - lpcr); + r = kvmhv_run_single_vcpu(vcpu->run, vcpu, hdec_exp, lpcr); } while (is_kvmppc_resume_guest(r)); /* save L2 state for return */ -- GitLab From 8c99d34578628b50233210dae5fc9600eea20b8e Mon Sep 17 00:00:00 2001 From: Tianjia Zhang Date: Mon, 27 Apr 2020 12:35:11 +0800 Subject: [PATCH 0691/1971] KVM: PPC: Clean up redundant 'kvm_run' parameters In the current kvm version, 'kvm_run' has been included in the 'kvm_vcpu' structure. For historical reasons, many kvm-related function parameters retain the 'kvm_run' and 'kvm_vcpu' parameters at the same time. This patch does a unified cleanup of these remaining redundant parameters. Signed-off-by: Tianjia Zhang Reviewed-by: Vitaly Kuznetsov Reviewed-by: Paul Mackerras Signed-off-by: Paul Mackerras --- arch/powerpc/include/asm/kvm_book3s.h | 16 +++--- arch/powerpc/include/asm/kvm_ppc.h | 27 +++++---- arch/powerpc/kvm/book3s.c | 4 +- arch/powerpc/kvm/book3s.h | 2 +- arch/powerpc/kvm/book3s_64_mmu_hv.c | 12 ++-- arch/powerpc/kvm/book3s_64_mmu_radix.c | 4 +- arch/powerpc/kvm/book3s_emulate.c | 10 ++-- arch/powerpc/kvm/book3s_hv.c | 60 ++++++++++---------- arch/powerpc/kvm/book3s_hv_nested.c | 11 ++-- arch/powerpc/kvm/book3s_paired_singles.c | 72 ++++++++++++------------ arch/powerpc/kvm/book3s_pr.c | 30 +++++----- arch/powerpc/kvm/booke.c | 36 ++++++------ arch/powerpc/kvm/booke.h | 8 +-- arch/powerpc/kvm/booke_emulate.c | 2 +- arch/powerpc/kvm/e500_emulate.c | 15 +++-- arch/powerpc/kvm/emulate.c | 10 ++-- arch/powerpc/kvm/emulate_loadstore.c | 32 +++++------ arch/powerpc/kvm/powerpc.c | 72 ++++++++++++------------ arch/powerpc/kvm/trace_hv.h | 6 +- 19 files changed, 212 insertions(+), 217 deletions(-) diff --git a/arch/powerpc/include/asm/kvm_book3s.h b/arch/powerpc/include/asm/kvm_book3s.h index 6e5d85ba588d5..e1772e4bf7108 100644 --- a/arch/powerpc/include/asm/kvm_book3s.h +++ b/arch/powerpc/include/asm/kvm_book3s.h @@ -155,12 +155,11 @@ extern void kvmppc_mmu_unmap_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte) extern int kvmppc_mmu_map_segment(struct kvm_vcpu *vcpu, ulong eaddr); extern void kvmppc_mmu_flush_segment(struct kvm_vcpu *vcpu, ulong eaddr, ulong seg_size); extern void kvmppc_mmu_flush_segments(struct kvm_vcpu *vcpu); -extern int kvmppc_book3s_hv_page_fault(struct kvm_run *run, - struct kvm_vcpu *vcpu, unsigned long addr, - unsigned long status); +extern int kvmppc_book3s_hv_page_fault(struct kvm_vcpu *vcpu, + unsigned long addr, unsigned long status); extern long kvmppc_hv_find_lock_hpte(struct kvm *kvm, gva_t eaddr, unsigned long slb_v, unsigned long valid); -extern int kvmppc_hv_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu, +extern int kvmppc_hv_emulate_mmio(struct kvm_vcpu *vcpu, unsigned long gpa, gva_t ea, int is_store); extern void kvmppc_mmu_hpte_cache_map(struct kvm_vcpu *vcpu, struct hpte_cache *pte); @@ -174,8 +173,7 @@ extern void kvmppc_mmu_hpte_sysexit(void); extern int kvmppc_mmu_hv_init(void); extern int kvmppc_book3s_hcall_implemented(struct kvm *kvm, unsigned long hc); -extern int kvmppc_book3s_radix_page_fault(struct kvm_run *run, - struct kvm_vcpu *vcpu, +extern int kvmppc_book3s_radix_page_fault(struct kvm_vcpu *vcpu, unsigned long ea, unsigned long dsisr); extern unsigned long __kvmhv_copy_tofrom_guest_radix(int lpid, int pid, gva_t eaddr, void *to, void *from, @@ -234,7 +232,7 @@ extern void kvmppc_trigger_fac_interrupt(struct kvm_vcpu *vcpu, ulong fac); extern void kvmppc_set_bat(struct kvm_vcpu *vcpu, struct kvmppc_bat *bat, bool upper, u32 val); extern void kvmppc_giveup_ext(struct kvm_vcpu *vcpu, ulong msr); -extern int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu); +extern int kvmppc_emulate_paired_single(struct kvm_vcpu *vcpu); extern kvm_pfn_t kvmppc_gpa_to_pfn(struct kvm_vcpu *vcpu, gpa_t gpa, bool writing, bool *writable); extern void kvmppc_add_revmap_chain(struct kvm *kvm, struct revmap_entry *rev, @@ -300,12 +298,12 @@ void kvmhv_set_ptbl_entry(unsigned int lpid, u64 dw0, u64 dw1); void kvmhv_release_all_nested(struct kvm *kvm); long kvmhv_enter_nested_guest(struct kvm_vcpu *vcpu); long kvmhv_do_nested_tlbie(struct kvm_vcpu *vcpu); -int kvmhv_run_single_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu, +int kvmhv_run_single_vcpu(struct kvm_vcpu *vcpu, u64 time_limit, unsigned long lpcr); void kvmhv_save_hv_regs(struct kvm_vcpu *vcpu, struct hv_guest_state *hr); void kvmhv_restore_hv_return_state(struct kvm_vcpu *vcpu, struct hv_guest_state *hr); -long int kvmhv_nested_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu); +long int kvmhv_nested_page_fault(struct kvm_vcpu *vcpu); void kvmppc_giveup_fac(struct kvm_vcpu *vcpu, ulong fac); diff --git a/arch/powerpc/include/asm/kvm_ppc.h b/arch/powerpc/include/asm/kvm_ppc.h index 94f5a32acaf1b..ccf66b3a4c1db 100644 --- a/arch/powerpc/include/asm/kvm_ppc.h +++ b/arch/powerpc/include/asm/kvm_ppc.h @@ -58,28 +58,28 @@ enum xlate_readwrite { XLATE_WRITE /* check for write permissions */ }; -extern int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu); -extern int __kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu); +extern int kvmppc_vcpu_run(struct kvm_vcpu *vcpu); +extern int __kvmppc_vcpu_run(struct kvm_run *run, struct kvm_vcpu *vcpu); extern void kvmppc_handler_highmem(void); extern void kvmppc_dump_vcpu(struct kvm_vcpu *vcpu); -extern int kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu, +extern int kvmppc_handle_load(struct kvm_vcpu *vcpu, unsigned int rt, unsigned int bytes, int is_default_endian); -extern int kvmppc_handle_loads(struct kvm_run *run, struct kvm_vcpu *vcpu, +extern int kvmppc_handle_loads(struct kvm_vcpu *vcpu, unsigned int rt, unsigned int bytes, int is_default_endian); -extern int kvmppc_handle_vsx_load(struct kvm_run *run, struct kvm_vcpu *vcpu, +extern int kvmppc_handle_vsx_load(struct kvm_vcpu *vcpu, unsigned int rt, unsigned int bytes, int is_default_endian, int mmio_sign_extend); -extern int kvmppc_handle_vmx_load(struct kvm_run *run, struct kvm_vcpu *vcpu, +extern int kvmppc_handle_vmx_load(struct kvm_vcpu *vcpu, unsigned int rt, unsigned int bytes, int is_default_endian); -extern int kvmppc_handle_vmx_store(struct kvm_run *run, struct kvm_vcpu *vcpu, +extern int kvmppc_handle_vmx_store(struct kvm_vcpu *vcpu, unsigned int rs, unsigned int bytes, int is_default_endian); -extern int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu, +extern int kvmppc_handle_store(struct kvm_vcpu *vcpu, u64 val, unsigned int bytes, int is_default_endian); -extern int kvmppc_handle_vsx_store(struct kvm_run *run, struct kvm_vcpu *vcpu, +extern int kvmppc_handle_vsx_store(struct kvm_vcpu *vcpu, int rs, unsigned int bytes, int is_default_endian); @@ -90,10 +90,9 @@ extern int kvmppc_ld(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr, bool data); extern int kvmppc_st(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr, bool data); -extern int kvmppc_emulate_instruction(struct kvm_run *run, - struct kvm_vcpu *vcpu); +extern int kvmppc_emulate_instruction(struct kvm_vcpu *vcpu); extern int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu); -extern int kvmppc_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu); +extern int kvmppc_emulate_mmio(struct kvm_vcpu *vcpu); extern void kvmppc_emulate_dec(struct kvm_vcpu *vcpu); extern u32 kvmppc_get_dec(struct kvm_vcpu *vcpu, u64 tb); extern void kvmppc_decrementer_func(struct kvm_vcpu *vcpu); @@ -267,7 +266,7 @@ struct kvmppc_ops { void (*vcpu_put)(struct kvm_vcpu *vcpu); void (*inject_interrupt)(struct kvm_vcpu *vcpu, int vec, u64 srr1_flags); void (*set_msr)(struct kvm_vcpu *vcpu, u64 msr); - int (*vcpu_run)(struct kvm_run *run, struct kvm_vcpu *vcpu); + int (*vcpu_run)(struct kvm_vcpu *vcpu); int (*vcpu_create)(struct kvm_vcpu *vcpu); void (*vcpu_free)(struct kvm_vcpu *vcpu); int (*check_requests)(struct kvm_vcpu *vcpu); @@ -291,7 +290,7 @@ struct kvmppc_ops { int (*init_vm)(struct kvm *kvm); void (*destroy_vm)(struct kvm *kvm); int (*get_smmu_info)(struct kvm *kvm, struct kvm_ppc_smmu_info *info); - int (*emulate_op)(struct kvm_run *run, struct kvm_vcpu *vcpu, + int (*emulate_op)(struct kvm_vcpu *vcpu, unsigned int inst, int *advance); int (*emulate_mtspr)(struct kvm_vcpu *vcpu, int sprn, ulong spr_val); int (*emulate_mfspr)(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val); diff --git a/arch/powerpc/kvm/book3s.c b/arch/powerpc/kvm/book3s.c index 37508a356f283..41fedec69ac35 100644 --- a/arch/powerpc/kvm/book3s.c +++ b/arch/powerpc/kvm/book3s.c @@ -755,9 +755,9 @@ void kvmppc_set_msr(struct kvm_vcpu *vcpu, u64 msr) } EXPORT_SYMBOL_GPL(kvmppc_set_msr); -int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) +int kvmppc_vcpu_run(struct kvm_vcpu *vcpu) { - return vcpu->kvm->arch.kvm_ops->vcpu_run(kvm_run, vcpu); + return vcpu->kvm->arch.kvm_ops->vcpu_run(vcpu); } int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu, diff --git a/arch/powerpc/kvm/book3s.h b/arch/powerpc/kvm/book3s.h index eae259ee49af6..9b6323ec8e607 100644 --- a/arch/powerpc/kvm/book3s.h +++ b/arch/powerpc/kvm/book3s.h @@ -18,7 +18,7 @@ extern void kvm_set_spte_hva_hv(struct kvm *kvm, unsigned long hva, pte_t pte); extern int kvmppc_mmu_init_pr(struct kvm_vcpu *vcpu); extern void kvmppc_mmu_destroy_pr(struct kvm_vcpu *vcpu); -extern int kvmppc_core_emulate_op_pr(struct kvm_run *run, struct kvm_vcpu *vcpu, +extern int kvmppc_core_emulate_op_pr(struct kvm_vcpu *vcpu, unsigned int inst, int *advance); extern int kvmppc_core_emulate_mtspr_pr(struct kvm_vcpu *vcpu, int sprn, ulong spr_val); diff --git a/arch/powerpc/kvm/book3s_64_mmu_hv.c b/arch/powerpc/kvm/book3s_64_mmu_hv.c index 2b35f9bcf8928..36a07656ebbb3 100644 --- a/arch/powerpc/kvm/book3s_64_mmu_hv.c +++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c @@ -413,7 +413,7 @@ static int instruction_is_store(unsigned int instr) return (instr & mask) != 0; } -int kvmppc_hv_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu, +int kvmppc_hv_emulate_mmio(struct kvm_vcpu *vcpu, unsigned long gpa, gva_t ea, int is_store) { u32 last_inst; @@ -473,10 +473,10 @@ int kvmppc_hv_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu, vcpu->arch.paddr_accessed = gpa; vcpu->arch.vaddr_accessed = ea; - return kvmppc_emulate_mmio(run, vcpu); + return kvmppc_emulate_mmio(vcpu); } -int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu, +int kvmppc_book3s_hv_page_fault(struct kvm_vcpu *vcpu, unsigned long ea, unsigned long dsisr) { struct kvm *kvm = vcpu->kvm; @@ -499,7 +499,7 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu, pte_t pte, *ptep; if (kvm_is_radix(kvm)) - return kvmppc_book3s_radix_page_fault(run, vcpu, ea, dsisr); + return kvmppc_book3s_radix_page_fault(vcpu, ea, dsisr); /* * Real-mode code has already searched the HPT and found the @@ -519,7 +519,7 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu, gpa_base = r & HPTE_R_RPN & ~(psize - 1); gfn_base = gpa_base >> PAGE_SHIFT; gpa = gpa_base | (ea & (psize - 1)); - return kvmppc_hv_emulate_mmio(run, vcpu, gpa, ea, + return kvmppc_hv_emulate_mmio(vcpu, gpa, ea, dsisr & DSISR_ISSTORE); } } @@ -555,7 +555,7 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu, /* No memslot means it's an emulated MMIO region */ if (!memslot || (memslot->flags & KVM_MEMSLOT_INVALID)) - return kvmppc_hv_emulate_mmio(run, vcpu, gpa, ea, + return kvmppc_hv_emulate_mmio(vcpu, gpa, ea, dsisr & DSISR_ISSTORE); /* diff --git a/arch/powerpc/kvm/book3s_64_mmu_radix.c b/arch/powerpc/kvm/book3s_64_mmu_radix.c index aa12cd4078b32..16c947bd5e87c 100644 --- a/arch/powerpc/kvm/book3s_64_mmu_radix.c +++ b/arch/powerpc/kvm/book3s_64_mmu_radix.c @@ -887,7 +887,7 @@ int kvmppc_book3s_instantiate_page(struct kvm_vcpu *vcpu, return ret; } -int kvmppc_book3s_radix_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu, +int kvmppc_book3s_radix_page_fault(struct kvm_vcpu *vcpu, unsigned long ea, unsigned long dsisr) { struct kvm *kvm = vcpu->kvm; @@ -933,7 +933,7 @@ int kvmppc_book3s_radix_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu, kvmppc_core_queue_data_storage(vcpu, ea, dsisr); return RESUME_GUEST; } - return kvmppc_hv_emulate_mmio(run, vcpu, gpa, ea, writing); + return kvmppc_hv_emulate_mmio(vcpu, gpa, ea, writing); } if (memslot->flags & KVM_MEM_READONLY) { diff --git a/arch/powerpc/kvm/book3s_emulate.c b/arch/powerpc/kvm/book3s_emulate.c index dad71d276b914..0effd48c8f4d1 100644 --- a/arch/powerpc/kvm/book3s_emulate.c +++ b/arch/powerpc/kvm/book3s_emulate.c @@ -235,7 +235,7 @@ void kvmppc_emulate_tabort(struct kvm_vcpu *vcpu, int ra_val) #endif -int kvmppc_core_emulate_op_pr(struct kvm_run *run, struct kvm_vcpu *vcpu, +int kvmppc_core_emulate_op_pr(struct kvm_vcpu *vcpu, unsigned int inst, int *advance) { int emulated = EMULATE_DONE; @@ -371,13 +371,13 @@ int kvmppc_core_emulate_op_pr(struct kvm_run *run, struct kvm_vcpu *vcpu, if (kvmppc_h_pr(vcpu, cmd) == EMULATE_DONE) break; - run->papr_hcall.nr = cmd; + vcpu->run->papr_hcall.nr = cmd; for (i = 0; i < 9; ++i) { ulong gpr = kvmppc_get_gpr(vcpu, 4 + i); - run->papr_hcall.args[i] = gpr; + vcpu->run->papr_hcall.args[i] = gpr; } - run->exit_reason = KVM_EXIT_PAPR_HCALL; + vcpu->run->exit_reason = KVM_EXIT_PAPR_HCALL; vcpu->arch.hcall_needed = 1; emulated = EMULATE_EXIT_USER; break; @@ -629,7 +629,7 @@ int kvmppc_core_emulate_op_pr(struct kvm_run *run, struct kvm_vcpu *vcpu, } if (emulated == EMULATE_FAIL) - emulated = kvmppc_emulate_paired_single(run, vcpu); + emulated = kvmppc_emulate_paired_single(vcpu); return emulated; } diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c index 50fbfc1fc34b2..e9aa5555d6901 100644 --- a/arch/powerpc/kvm/book3s_hv.c +++ b/arch/powerpc/kvm/book3s_hv.c @@ -1154,8 +1154,7 @@ static int kvmppc_hcall_impl_hv(unsigned long cmd) return kvmppc_hcall_impl_hv_realmode(cmd); } -static int kvmppc_emulate_debug_inst(struct kvm_run *run, - struct kvm_vcpu *vcpu) +static int kvmppc_emulate_debug_inst(struct kvm_vcpu *vcpu) { u32 last_inst; @@ -1169,8 +1168,8 @@ static int kvmppc_emulate_debug_inst(struct kvm_run *run, } if (last_inst == KVMPPC_INST_SW_BREAKPOINT) { - run->exit_reason = KVM_EXIT_DEBUG; - run->debug.arch.address = kvmppc_get_pc(vcpu); + vcpu->run->exit_reason = KVM_EXIT_DEBUG; + vcpu->run->debug.arch.address = kvmppc_get_pc(vcpu); return RESUME_HOST; } else { kvmppc_core_queue_program(vcpu, SRR1_PROGILL); @@ -1271,9 +1270,10 @@ static int kvmppc_emulate_doorbell_instr(struct kvm_vcpu *vcpu) return RESUME_GUEST; } -static int kvmppc_handle_exit_hv(struct kvm_run *run, struct kvm_vcpu *vcpu, +static int kvmppc_handle_exit_hv(struct kvm_vcpu *vcpu, struct task_struct *tsk) { + struct kvm_run *run = vcpu->run; int r = RESUME_HOST; vcpu->stat.sum_exits++; @@ -1408,7 +1408,7 @@ static int kvmppc_handle_exit_hv(struct kvm_run *run, struct kvm_vcpu *vcpu, swab32(vcpu->arch.emul_inst) : vcpu->arch.emul_inst; if (vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP) { - r = kvmppc_emulate_debug_inst(run, vcpu); + r = kvmppc_emulate_debug_inst(vcpu); } else { kvmppc_core_queue_program(vcpu, SRR1_PROGILL); r = RESUME_GUEST; @@ -1460,7 +1460,7 @@ static int kvmppc_handle_exit_hv(struct kvm_run *run, struct kvm_vcpu *vcpu, return r; } -static int kvmppc_handle_nested_exit(struct kvm_run *run, struct kvm_vcpu *vcpu) +static int kvmppc_handle_nested_exit(struct kvm_vcpu *vcpu) { int r; int srcu_idx; @@ -1518,7 +1518,7 @@ static int kvmppc_handle_nested_exit(struct kvm_run *run, struct kvm_vcpu *vcpu) */ case BOOK3S_INTERRUPT_H_DATA_STORAGE: srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); - r = kvmhv_nested_page_fault(run, vcpu); + r = kvmhv_nested_page_fault(vcpu); srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx); break; case BOOK3S_INTERRUPT_H_INST_STORAGE: @@ -1528,7 +1528,7 @@ static int kvmppc_handle_nested_exit(struct kvm_run *run, struct kvm_vcpu *vcpu) if (vcpu->arch.shregs.msr & HSRR1_HISI_WRITE) vcpu->arch.fault_dsisr |= DSISR_ISSTORE; srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); - r = kvmhv_nested_page_fault(run, vcpu); + r = kvmhv_nested_page_fault(vcpu); srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx); break; @@ -2932,7 +2932,7 @@ static void post_guest_process(struct kvmppc_vcore *vc, bool is_master) ret = RESUME_GUEST; if (vcpu->arch.trap) - ret = kvmppc_handle_exit_hv(vcpu->run, vcpu, + ret = kvmppc_handle_exit_hv(vcpu, vcpu->arch.run_task); vcpu->arch.ret = ret; @@ -3897,15 +3897,16 @@ static int kvmhv_setup_mmu(struct kvm_vcpu *vcpu) return r; } -static int kvmppc_run_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) +static int kvmppc_run_vcpu(struct kvm_vcpu *vcpu) { + struct kvm_run *run = vcpu->run; int n_ceded, i, r; struct kvmppc_vcore *vc; struct kvm_vcpu *v; trace_kvmppc_run_vcpu_enter(vcpu); - kvm_run->exit_reason = 0; + run->exit_reason = 0; vcpu->arch.ret = RESUME_GUEST; vcpu->arch.trap = 0; kvmppc_update_vpas(vcpu); @@ -3949,8 +3950,8 @@ static int kvmppc_run_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) r = kvmhv_setup_mmu(vcpu); spin_lock(&vc->lock); if (r) { - kvm_run->exit_reason = KVM_EXIT_FAIL_ENTRY; - kvm_run->fail_entry. + run->exit_reason = KVM_EXIT_FAIL_ENTRY; + run->fail_entry. hardware_entry_failure_reason = 0; vcpu->arch.ret = r; break; @@ -4010,7 +4011,7 @@ static int kvmppc_run_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) if (vcpu->arch.state == KVMPPC_VCPU_RUNNABLE) { kvmppc_remove_runnable(vc, vcpu); vcpu->stat.signal_exits++; - kvm_run->exit_reason = KVM_EXIT_INTR; + run->exit_reason = KVM_EXIT_INTR; vcpu->arch.ret = -EINTR; } @@ -4021,15 +4022,15 @@ static int kvmppc_run_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) wake_up(&v->arch.cpu_run); } - trace_kvmppc_run_vcpu_exit(vcpu, kvm_run); + trace_kvmppc_run_vcpu_exit(vcpu); spin_unlock(&vc->lock); return vcpu->arch.ret; } -int kvmhv_run_single_vcpu(struct kvm_run *kvm_run, - struct kvm_vcpu *vcpu, u64 time_limit, +int kvmhv_run_single_vcpu(struct kvm_vcpu *vcpu, u64 time_limit, unsigned long lpcr) { + struct kvm_run *run = vcpu->run; int trap, r, pcpu; int srcu_idx, lpid; struct kvmppc_vcore *vc; @@ -4038,7 +4039,7 @@ int kvmhv_run_single_vcpu(struct kvm_run *kvm_run, trace_kvmppc_run_vcpu_enter(vcpu); - kvm_run->exit_reason = 0; + run->exit_reason = 0; vcpu->arch.ret = RESUME_GUEST; vcpu->arch.trap = 0; @@ -4162,9 +4163,9 @@ int kvmhv_run_single_vcpu(struct kvm_run *kvm_run, r = RESUME_GUEST; if (trap) { if (!nested) - r = kvmppc_handle_exit_hv(kvm_run, vcpu, current); + r = kvmppc_handle_exit_hv(vcpu, current); else - r = kvmppc_handle_nested_exit(kvm_run, vcpu); + r = kvmppc_handle_nested_exit(vcpu); } vcpu->arch.ret = r; @@ -4174,7 +4175,7 @@ int kvmhv_run_single_vcpu(struct kvm_run *kvm_run, while (vcpu->arch.ceded && !kvmppc_vcpu_woken(vcpu)) { if (signal_pending(current)) { vcpu->stat.signal_exits++; - kvm_run->exit_reason = KVM_EXIT_INTR; + run->exit_reason = KVM_EXIT_INTR; vcpu->arch.ret = -EINTR; break; } @@ -4190,13 +4191,13 @@ int kvmhv_run_single_vcpu(struct kvm_run *kvm_run, done: kvmppc_remove_runnable(vc, vcpu); - trace_kvmppc_run_vcpu_exit(vcpu, kvm_run); + trace_kvmppc_run_vcpu_exit(vcpu); return vcpu->arch.ret; sigpend: vcpu->stat.signal_exits++; - kvm_run->exit_reason = KVM_EXIT_INTR; + run->exit_reason = KVM_EXIT_INTR; vcpu->arch.ret = -EINTR; out: local_irq_enable(); @@ -4204,8 +4205,9 @@ int kvmhv_run_single_vcpu(struct kvm_run *kvm_run, goto done; } -static int kvmppc_vcpu_run_hv(struct kvm_run *run, struct kvm_vcpu *vcpu) +static int kvmppc_vcpu_run_hv(struct kvm_vcpu *vcpu) { + struct kvm_run *run = vcpu->run; int r; int srcu_idx; unsigned long ebb_regs[3] = {}; /* shut up GCC */ @@ -4289,10 +4291,10 @@ static int kvmppc_vcpu_run_hv(struct kvm_run *run, struct kvm_vcpu *vcpu) */ if (kvm->arch.threads_indep && kvm_is_radix(kvm) && !no_mixing_hpt_and_radix) - r = kvmhv_run_single_vcpu(run, vcpu, ~(u64)0, + r = kvmhv_run_single_vcpu(vcpu, ~(u64)0, vcpu->arch.vcore->lpcr); else - r = kvmppc_run_vcpu(run, vcpu); + r = kvmppc_run_vcpu(vcpu); if (run->exit_reason == KVM_EXIT_PAPR_HCALL && !(vcpu->arch.shregs.msr & MSR_PR)) { @@ -4302,7 +4304,7 @@ static int kvmppc_vcpu_run_hv(struct kvm_run *run, struct kvm_vcpu *vcpu) kvmppc_core_prepare_to_enter(vcpu); } else if (r == RESUME_PAGE_FAULT) { srcu_idx = srcu_read_lock(&kvm->srcu); - r = kvmppc_book3s_hv_page_fault(run, vcpu, + r = kvmppc_book3s_hv_page_fault(vcpu, vcpu->arch.fault_dar, vcpu->arch.fault_dsisr); srcu_read_unlock(&kvm->srcu, srcu_idx); } else if (r == RESUME_PASSTHROUGH) { @@ -4976,7 +4978,7 @@ static void kvmppc_core_destroy_vm_hv(struct kvm *kvm) } /* We don't need to emulate any privileged instructions or dcbz */ -static int kvmppc_core_emulate_op_hv(struct kvm_run *run, struct kvm_vcpu *vcpu, +static int kvmppc_core_emulate_op_hv(struct kvm_vcpu *vcpu, unsigned int inst, int *advance) { return EMULATE_FAIL; diff --git a/arch/powerpc/kvm/book3s_hv_nested.c b/arch/powerpc/kvm/book3s_hv_nested.c index d31c2e5c8b2ae..2fda52b969dc5 100644 --- a/arch/powerpc/kvm/book3s_hv_nested.c +++ b/arch/powerpc/kvm/book3s_hv_nested.c @@ -290,7 +290,7 @@ long kvmhv_enter_nested_guest(struct kvm_vcpu *vcpu) r = RESUME_HOST; break; } - r = kvmhv_run_single_vcpu(vcpu->run, vcpu, hdec_exp, lpcr); + r = kvmhv_run_single_vcpu(vcpu, hdec_exp, lpcr); } while (is_kvmppc_resume_guest(r)); /* save L2 state for return */ @@ -1256,8 +1256,7 @@ static inline int kvmppc_radix_shift_to_level(int shift) } /* called with gp->tlb_lock held */ -static long int __kvmhv_nested_page_fault(struct kvm_run *run, - struct kvm_vcpu *vcpu, +static long int __kvmhv_nested_page_fault(struct kvm_vcpu *vcpu, struct kvm_nested_guest *gp) { struct kvm *kvm = vcpu->kvm; @@ -1340,7 +1339,7 @@ static long int __kvmhv_nested_page_fault(struct kvm_run *run, } /* passthrough of emulated MMIO case */ - return kvmppc_hv_emulate_mmio(run, vcpu, gpa, ea, writing); + return kvmppc_hv_emulate_mmio(vcpu, gpa, ea, writing); } if (memslot->flags & KVM_MEM_READONLY) { if (writing) { @@ -1426,13 +1425,13 @@ static long int __kvmhv_nested_page_fault(struct kvm_run *run, return RESUME_GUEST; } -long int kvmhv_nested_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu) +long int kvmhv_nested_page_fault(struct kvm_vcpu *vcpu) { struct kvm_nested_guest *gp = vcpu->arch.nested; long int ret; mutex_lock(&gp->tlb_lock); - ret = __kvmhv_nested_page_fault(run, vcpu, gp); + ret = __kvmhv_nested_page_fault(vcpu, gp); mutex_unlock(&gp->tlb_lock); return ret; } diff --git a/arch/powerpc/kvm/book3s_paired_singles.c b/arch/powerpc/kvm/book3s_paired_singles.c index bf0282775e37a..a11436720a8ce 100644 --- a/arch/powerpc/kvm/book3s_paired_singles.c +++ b/arch/powerpc/kvm/book3s_paired_singles.c @@ -169,7 +169,7 @@ static void kvmppc_inject_pf(struct kvm_vcpu *vcpu, ulong eaddr, bool is_store) kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_DATA_STORAGE); } -static int kvmppc_emulate_fpr_load(struct kvm_run *run, struct kvm_vcpu *vcpu, +static int kvmppc_emulate_fpr_load(struct kvm_vcpu *vcpu, int rs, ulong addr, int ls_type) { int emulated = EMULATE_FAIL; @@ -188,7 +188,7 @@ static int kvmppc_emulate_fpr_load(struct kvm_run *run, struct kvm_vcpu *vcpu, kvmppc_inject_pf(vcpu, addr, false); goto done_load; } else if (r == EMULATE_DO_MMIO) { - emulated = kvmppc_handle_load(run, vcpu, KVM_MMIO_REG_FPR | rs, + emulated = kvmppc_handle_load(vcpu, KVM_MMIO_REG_FPR | rs, len, 1); goto done_load; } @@ -213,7 +213,7 @@ done_load: return emulated; } -static int kvmppc_emulate_fpr_store(struct kvm_run *run, struct kvm_vcpu *vcpu, +static int kvmppc_emulate_fpr_store(struct kvm_vcpu *vcpu, int rs, ulong addr, int ls_type) { int emulated = EMULATE_FAIL; @@ -248,7 +248,7 @@ static int kvmppc_emulate_fpr_store(struct kvm_run *run, struct kvm_vcpu *vcpu, if (r < 0) { kvmppc_inject_pf(vcpu, addr, true); } else if (r == EMULATE_DO_MMIO) { - emulated = kvmppc_handle_store(run, vcpu, val, len, 1); + emulated = kvmppc_handle_store(vcpu, val, len, 1); } else { emulated = EMULATE_DONE; } @@ -259,7 +259,7 @@ static int kvmppc_emulate_fpr_store(struct kvm_run *run, struct kvm_vcpu *vcpu, return emulated; } -static int kvmppc_emulate_psq_load(struct kvm_run *run, struct kvm_vcpu *vcpu, +static int kvmppc_emulate_psq_load(struct kvm_vcpu *vcpu, int rs, ulong addr, bool w, int i) { int emulated = EMULATE_FAIL; @@ -279,12 +279,12 @@ static int kvmppc_emulate_psq_load(struct kvm_run *run, struct kvm_vcpu *vcpu, kvmppc_inject_pf(vcpu, addr, false); goto done_load; } else if ((r == EMULATE_DO_MMIO) && w) { - emulated = kvmppc_handle_load(run, vcpu, KVM_MMIO_REG_FPR | rs, + emulated = kvmppc_handle_load(vcpu, KVM_MMIO_REG_FPR | rs, 4, 1); vcpu->arch.qpr[rs] = tmp[1]; goto done_load; } else if (r == EMULATE_DO_MMIO) { - emulated = kvmppc_handle_load(run, vcpu, KVM_MMIO_REG_FQPR | rs, + emulated = kvmppc_handle_load(vcpu, KVM_MMIO_REG_FQPR | rs, 8, 1); goto done_load; } @@ -302,7 +302,7 @@ done_load: return emulated; } -static int kvmppc_emulate_psq_store(struct kvm_run *run, struct kvm_vcpu *vcpu, +static int kvmppc_emulate_psq_store(struct kvm_vcpu *vcpu, int rs, ulong addr, bool w, int i) { int emulated = EMULATE_FAIL; @@ -318,10 +318,10 @@ static int kvmppc_emulate_psq_store(struct kvm_run *run, struct kvm_vcpu *vcpu, if (r < 0) { kvmppc_inject_pf(vcpu, addr, true); } else if ((r == EMULATE_DO_MMIO) && w) { - emulated = kvmppc_handle_store(run, vcpu, tmp[0], 4, 1); + emulated = kvmppc_handle_store(vcpu, tmp[0], 4, 1); } else if (r == EMULATE_DO_MMIO) { u64 val = ((u64)tmp[0] << 32) | tmp[1]; - emulated = kvmppc_handle_store(run, vcpu, val, 8, 1); + emulated = kvmppc_handle_store(vcpu, val, 8, 1); } else { emulated = EMULATE_DONE; } @@ -618,7 +618,7 @@ static int kvmppc_ps_one_in(struct kvm_vcpu *vcpu, bool rc, return EMULATE_DONE; } -int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu) +int kvmppc_emulate_paired_single(struct kvm_vcpu *vcpu) { u32 inst; enum emulation_result emulated = EMULATE_DONE; @@ -680,7 +680,7 @@ int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu) int i = inst_get_field(inst, 17, 19); addr += get_d_signext(inst); - emulated = kvmppc_emulate_psq_load(run, vcpu, ax_rd, addr, w, i); + emulated = kvmppc_emulate_psq_load(vcpu, ax_rd, addr, w, i); break; } case OP_PSQ_LU: @@ -690,7 +690,7 @@ int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu) int i = inst_get_field(inst, 17, 19); addr += get_d_signext(inst); - emulated = kvmppc_emulate_psq_load(run, vcpu, ax_rd, addr, w, i); + emulated = kvmppc_emulate_psq_load(vcpu, ax_rd, addr, w, i); if (emulated == EMULATE_DONE) kvmppc_set_gpr(vcpu, ax_ra, addr); @@ -703,7 +703,7 @@ int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu) int i = inst_get_field(inst, 17, 19); addr += get_d_signext(inst); - emulated = kvmppc_emulate_psq_store(run, vcpu, ax_rd, addr, w, i); + emulated = kvmppc_emulate_psq_store(vcpu, ax_rd, addr, w, i); break; } case OP_PSQ_STU: @@ -713,7 +713,7 @@ int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu) int i = inst_get_field(inst, 17, 19); addr += get_d_signext(inst); - emulated = kvmppc_emulate_psq_store(run, vcpu, ax_rd, addr, w, i); + emulated = kvmppc_emulate_psq_store(vcpu, ax_rd, addr, w, i); if (emulated == EMULATE_DONE) kvmppc_set_gpr(vcpu, ax_ra, addr); @@ -733,7 +733,7 @@ int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu) int i = inst_get_field(inst, 22, 24); addr += kvmppc_get_gpr(vcpu, ax_rb); - emulated = kvmppc_emulate_psq_load(run, vcpu, ax_rd, addr, w, i); + emulated = kvmppc_emulate_psq_load(vcpu, ax_rd, addr, w, i); break; } case OP_4X_PS_CMPO0: @@ -747,7 +747,7 @@ int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu) int i = inst_get_field(inst, 22, 24); addr += kvmppc_get_gpr(vcpu, ax_rb); - emulated = kvmppc_emulate_psq_load(run, vcpu, ax_rd, addr, w, i); + emulated = kvmppc_emulate_psq_load(vcpu, ax_rd, addr, w, i); if (emulated == EMULATE_DONE) kvmppc_set_gpr(vcpu, ax_ra, addr); @@ -824,7 +824,7 @@ int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu) int i = inst_get_field(inst, 22, 24); addr += kvmppc_get_gpr(vcpu, ax_rb); - emulated = kvmppc_emulate_psq_store(run, vcpu, ax_rd, addr, w, i); + emulated = kvmppc_emulate_psq_store(vcpu, ax_rd, addr, w, i); break; } case OP_4XW_PSQ_STUX: @@ -834,7 +834,7 @@ int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu) int i = inst_get_field(inst, 22, 24); addr += kvmppc_get_gpr(vcpu, ax_rb); - emulated = kvmppc_emulate_psq_store(run, vcpu, ax_rd, addr, w, i); + emulated = kvmppc_emulate_psq_store(vcpu, ax_rd, addr, w, i); if (emulated == EMULATE_DONE) kvmppc_set_gpr(vcpu, ax_ra, addr); @@ -922,7 +922,7 @@ int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu) { ulong addr = (ax_ra ? kvmppc_get_gpr(vcpu, ax_ra) : 0) + full_d; - emulated = kvmppc_emulate_fpr_load(run, vcpu, ax_rd, addr, + emulated = kvmppc_emulate_fpr_load(vcpu, ax_rd, addr, FPU_LS_SINGLE); break; } @@ -930,7 +930,7 @@ int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu) { ulong addr = kvmppc_get_gpr(vcpu, ax_ra) + full_d; - emulated = kvmppc_emulate_fpr_load(run, vcpu, ax_rd, addr, + emulated = kvmppc_emulate_fpr_load(vcpu, ax_rd, addr, FPU_LS_SINGLE); if (emulated == EMULATE_DONE) @@ -941,7 +941,7 @@ int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu) { ulong addr = (ax_ra ? kvmppc_get_gpr(vcpu, ax_ra) : 0) + full_d; - emulated = kvmppc_emulate_fpr_load(run, vcpu, ax_rd, addr, + emulated = kvmppc_emulate_fpr_load(vcpu, ax_rd, addr, FPU_LS_DOUBLE); break; } @@ -949,7 +949,7 @@ int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu) { ulong addr = kvmppc_get_gpr(vcpu, ax_ra) + full_d; - emulated = kvmppc_emulate_fpr_load(run, vcpu, ax_rd, addr, + emulated = kvmppc_emulate_fpr_load(vcpu, ax_rd, addr, FPU_LS_DOUBLE); if (emulated == EMULATE_DONE) @@ -960,7 +960,7 @@ int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu) { ulong addr = (ax_ra ? kvmppc_get_gpr(vcpu, ax_ra) : 0) + full_d; - emulated = kvmppc_emulate_fpr_store(run, vcpu, ax_rd, addr, + emulated = kvmppc_emulate_fpr_store(vcpu, ax_rd, addr, FPU_LS_SINGLE); break; } @@ -968,7 +968,7 @@ int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu) { ulong addr = kvmppc_get_gpr(vcpu, ax_ra) + full_d; - emulated = kvmppc_emulate_fpr_store(run, vcpu, ax_rd, addr, + emulated = kvmppc_emulate_fpr_store(vcpu, ax_rd, addr, FPU_LS_SINGLE); if (emulated == EMULATE_DONE) @@ -979,7 +979,7 @@ int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu) { ulong addr = (ax_ra ? kvmppc_get_gpr(vcpu, ax_ra) : 0) + full_d; - emulated = kvmppc_emulate_fpr_store(run, vcpu, ax_rd, addr, + emulated = kvmppc_emulate_fpr_store(vcpu, ax_rd, addr, FPU_LS_DOUBLE); break; } @@ -987,7 +987,7 @@ int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu) { ulong addr = kvmppc_get_gpr(vcpu, ax_ra) + full_d; - emulated = kvmppc_emulate_fpr_store(run, vcpu, ax_rd, addr, + emulated = kvmppc_emulate_fpr_store(vcpu, ax_rd, addr, FPU_LS_DOUBLE); if (emulated == EMULATE_DONE) @@ -1001,7 +1001,7 @@ int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu) ulong addr = ax_ra ? kvmppc_get_gpr(vcpu, ax_ra) : 0; addr += kvmppc_get_gpr(vcpu, ax_rb); - emulated = kvmppc_emulate_fpr_load(run, vcpu, ax_rd, + emulated = kvmppc_emulate_fpr_load(vcpu, ax_rd, addr, FPU_LS_SINGLE); break; } @@ -1010,7 +1010,7 @@ int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu) ulong addr = kvmppc_get_gpr(vcpu, ax_ra) + kvmppc_get_gpr(vcpu, ax_rb); - emulated = kvmppc_emulate_fpr_load(run, vcpu, ax_rd, + emulated = kvmppc_emulate_fpr_load(vcpu, ax_rd, addr, FPU_LS_SINGLE); if (emulated == EMULATE_DONE) @@ -1022,7 +1022,7 @@ int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu) ulong addr = (ax_ra ? kvmppc_get_gpr(vcpu, ax_ra) : 0) + kvmppc_get_gpr(vcpu, ax_rb); - emulated = kvmppc_emulate_fpr_load(run, vcpu, ax_rd, + emulated = kvmppc_emulate_fpr_load(vcpu, ax_rd, addr, FPU_LS_DOUBLE); break; } @@ -1031,7 +1031,7 @@ int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu) ulong addr = kvmppc_get_gpr(vcpu, ax_ra) + kvmppc_get_gpr(vcpu, ax_rb); - emulated = kvmppc_emulate_fpr_load(run, vcpu, ax_rd, + emulated = kvmppc_emulate_fpr_load(vcpu, ax_rd, addr, FPU_LS_DOUBLE); if (emulated == EMULATE_DONE) @@ -1043,7 +1043,7 @@ int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu) ulong addr = (ax_ra ? kvmppc_get_gpr(vcpu, ax_ra) : 0) + kvmppc_get_gpr(vcpu, ax_rb); - emulated = kvmppc_emulate_fpr_store(run, vcpu, ax_rd, + emulated = kvmppc_emulate_fpr_store(vcpu, ax_rd, addr, FPU_LS_SINGLE); break; } @@ -1052,7 +1052,7 @@ int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu) ulong addr = kvmppc_get_gpr(vcpu, ax_ra) + kvmppc_get_gpr(vcpu, ax_rb); - emulated = kvmppc_emulate_fpr_store(run, vcpu, ax_rd, + emulated = kvmppc_emulate_fpr_store(vcpu, ax_rd, addr, FPU_LS_SINGLE); if (emulated == EMULATE_DONE) @@ -1064,7 +1064,7 @@ int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu) ulong addr = (ax_ra ? kvmppc_get_gpr(vcpu, ax_ra) : 0) + kvmppc_get_gpr(vcpu, ax_rb); - emulated = kvmppc_emulate_fpr_store(run, vcpu, ax_rd, + emulated = kvmppc_emulate_fpr_store(vcpu, ax_rd, addr, FPU_LS_DOUBLE); break; } @@ -1073,7 +1073,7 @@ int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu) ulong addr = kvmppc_get_gpr(vcpu, ax_ra) + kvmppc_get_gpr(vcpu, ax_rb); - emulated = kvmppc_emulate_fpr_store(run, vcpu, ax_rd, + emulated = kvmppc_emulate_fpr_store(vcpu, ax_rd, addr, FPU_LS_DOUBLE); if (emulated == EMULATE_DONE) @@ -1085,7 +1085,7 @@ int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu) ulong addr = (ax_ra ? kvmppc_get_gpr(vcpu, ax_ra) : 0) + kvmppc_get_gpr(vcpu, ax_rb); - emulated = kvmppc_emulate_fpr_store(run, vcpu, ax_rd, + emulated = kvmppc_emulate_fpr_store(vcpu, ax_rd, addr, FPU_LS_SINGLE_LOW); break; diff --git a/arch/powerpc/kvm/book3s_pr.c b/arch/powerpc/kvm/book3s_pr.c index a0f6813f4560e..ef54f917bdaf8 100644 --- a/arch/powerpc/kvm/book3s_pr.c +++ b/arch/powerpc/kvm/book3s_pr.c @@ -700,7 +700,7 @@ static bool kvmppc_visible_gpa(struct kvm_vcpu *vcpu, gpa_t gpa) return kvm_is_visible_gfn(vcpu->kvm, gpa >> PAGE_SHIFT); } -int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu, +static int kvmppc_handle_pagefault(struct kvm_vcpu *vcpu, ulong eaddr, int vec) { bool data = (vec == BOOK3S_INTERRUPT_DATA_STORAGE); @@ -795,7 +795,7 @@ int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu, /* The guest's PTE is not mapped yet. Map on the host */ if (kvmppc_mmu_map_page(vcpu, &pte, iswrite) == -EIO) { /* Exit KVM if mapping failed */ - run->exit_reason = KVM_EXIT_INTERNAL_ERROR; + vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; return RESUME_HOST; } if (data) @@ -808,7 +808,7 @@ int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu, vcpu->stat.mmio_exits++; vcpu->arch.paddr_accessed = pte.raddr; vcpu->arch.vaddr_accessed = pte.eaddr; - r = kvmppc_emulate_mmio(run, vcpu); + r = kvmppc_emulate_mmio(vcpu); if ( r == RESUME_HOST_NV ) r = RESUME_HOST; } @@ -992,7 +992,7 @@ static void kvmppc_emulate_fac(struct kvm_vcpu *vcpu, ulong fac) enum emulation_result er = EMULATE_FAIL; if (!(kvmppc_get_msr(vcpu) & MSR_PR)) - er = kvmppc_emulate_instruction(vcpu->run, vcpu); + er = kvmppc_emulate_instruction(vcpu); if ((er != EMULATE_DONE) && (er != EMULATE_AGAIN)) { /* Couldn't emulate, trigger interrupt in guest */ @@ -1089,8 +1089,7 @@ static void kvmppc_clear_debug(struct kvm_vcpu *vcpu) } } -static int kvmppc_exit_pr_progint(struct kvm_run *run, struct kvm_vcpu *vcpu, - unsigned int exit_nr) +static int kvmppc_exit_pr_progint(struct kvm_vcpu *vcpu, unsigned int exit_nr) { enum emulation_result er; ulong flags; @@ -1124,7 +1123,7 @@ static int kvmppc_exit_pr_progint(struct kvm_run *run, struct kvm_vcpu *vcpu, } vcpu->stat.emulated_inst_exits++; - er = kvmppc_emulate_instruction(run, vcpu); + er = kvmppc_emulate_instruction(vcpu); switch (er) { case EMULATE_DONE: r = RESUME_GUEST_NV; @@ -1139,7 +1138,7 @@ static int kvmppc_exit_pr_progint(struct kvm_run *run, struct kvm_vcpu *vcpu, r = RESUME_GUEST; break; case EMULATE_DO_MMIO: - run->exit_reason = KVM_EXIT_MMIO; + vcpu->run->exit_reason = KVM_EXIT_MMIO; r = RESUME_HOST_NV; break; case EMULATE_EXIT_USER: @@ -1198,7 +1197,7 @@ int kvmppc_handle_exit_pr(struct kvm_run *run, struct kvm_vcpu *vcpu, /* only care about PTEG not found errors, but leave NX alone */ if (shadow_srr1 & 0x40000000) { int idx = srcu_read_lock(&vcpu->kvm->srcu); - r = kvmppc_handle_pagefault(run, vcpu, kvmppc_get_pc(vcpu), exit_nr); + r = kvmppc_handle_pagefault(vcpu, kvmppc_get_pc(vcpu), exit_nr); srcu_read_unlock(&vcpu->kvm->srcu, idx); vcpu->stat.sp_instruc++; } else if (vcpu->arch.mmu.is_dcbz32(vcpu) && @@ -1248,7 +1247,7 @@ int kvmppc_handle_exit_pr(struct kvm_run *run, struct kvm_vcpu *vcpu, */ if (fault_dsisr & (DSISR_NOHPTE | DSISR_PROTFAULT)) { int idx = srcu_read_lock(&vcpu->kvm->srcu); - r = kvmppc_handle_pagefault(run, vcpu, dar, exit_nr); + r = kvmppc_handle_pagefault(vcpu, dar, exit_nr); srcu_read_unlock(&vcpu->kvm->srcu, idx); } else { kvmppc_core_queue_data_storage(vcpu, dar, fault_dsisr); @@ -1292,7 +1291,7 @@ int kvmppc_handle_exit_pr(struct kvm_run *run, struct kvm_vcpu *vcpu, break; case BOOK3S_INTERRUPT_PROGRAM: case BOOK3S_INTERRUPT_H_EMUL_ASSIST: - r = kvmppc_exit_pr_progint(run, vcpu, exit_nr); + r = kvmppc_exit_pr_progint(vcpu, exit_nr); break; case BOOK3S_INTERRUPT_SYSCALL: { @@ -1370,7 +1369,7 @@ int kvmppc_handle_exit_pr(struct kvm_run *run, struct kvm_vcpu *vcpu, emul = kvmppc_get_last_inst(vcpu, INST_GENERIC, &last_inst); if (emul == EMULATE_DONE) - r = kvmppc_exit_pr_progint(run, vcpu, exit_nr); + r = kvmppc_exit_pr_progint(vcpu, exit_nr); else r = RESUME_GUEST; @@ -1825,8 +1824,9 @@ static void kvmppc_core_vcpu_free_pr(struct kvm_vcpu *vcpu) vfree(vcpu_book3s); } -static int kvmppc_vcpu_run_pr(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) +static int kvmppc_vcpu_run_pr(struct kvm_vcpu *vcpu) { + struct kvm_run *run = vcpu->run; int ret; #ifdef CONFIG_ALTIVEC unsigned long uninitialized_var(vrsave); @@ -1834,7 +1834,7 @@ static int kvmppc_vcpu_run_pr(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) /* Check if we can run the vcpu at all */ if (!vcpu->arch.sane) { - kvm_run->exit_reason = KVM_EXIT_INTERNAL_ERROR; + run->exit_reason = KVM_EXIT_INTERNAL_ERROR; ret = -EINVAL; goto out; } @@ -1861,7 +1861,7 @@ static int kvmppc_vcpu_run_pr(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) kvmppc_fix_ee_before_entry(); - ret = __kvmppc_vcpu_run(kvm_run, vcpu); + ret = __kvmppc_vcpu_run(run, vcpu); kvmppc_clear_debug(vcpu); diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c index 888afe8d35cc8..c0d62a917e205 100644 --- a/arch/powerpc/kvm/booke.c +++ b/arch/powerpc/kvm/booke.c @@ -729,13 +729,14 @@ int kvmppc_core_check_requests(struct kvm_vcpu *vcpu) return r; } -int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) +int kvmppc_vcpu_run(struct kvm_vcpu *vcpu) { + struct kvm_run *run = vcpu->run; int ret, s; struct debug_reg debug; if (!vcpu->arch.sane) { - kvm_run->exit_reason = KVM_EXIT_INTERNAL_ERROR; + run->exit_reason = KVM_EXIT_INTERNAL_ERROR; return -EINVAL; } @@ -777,7 +778,7 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) vcpu->arch.pgdir = vcpu->kvm->mm->pgd; kvmppc_fix_ee_before_entry(); - ret = __kvmppc_vcpu_run(kvm_run, vcpu); + ret = __kvmppc_vcpu_run(run, vcpu); /* No need for guest_exit. It's done in handle_exit. We also get here with interrupts enabled. */ @@ -799,11 +800,11 @@ out: return ret; } -static int emulation_exit(struct kvm_run *run, struct kvm_vcpu *vcpu) +static int emulation_exit(struct kvm_vcpu *vcpu) { enum emulation_result er; - er = kvmppc_emulate_instruction(run, vcpu); + er = kvmppc_emulate_instruction(vcpu); switch (er) { case EMULATE_DONE: /* don't overwrite subtypes, just account kvm_stats */ @@ -820,8 +821,8 @@ static int emulation_exit(struct kvm_run *run, struct kvm_vcpu *vcpu) __func__, vcpu->arch.regs.nip, vcpu->arch.last_inst); /* For debugging, encode the failing instruction and * report it to userspace. */ - run->hw.hardware_exit_reason = ~0ULL << 32; - run->hw.hardware_exit_reason |= vcpu->arch.last_inst; + vcpu->run->hw.hardware_exit_reason = ~0ULL << 32; + vcpu->run->hw.hardware_exit_reason |= vcpu->arch.last_inst; kvmppc_core_queue_program(vcpu, ESR_PIL); return RESUME_HOST; @@ -833,8 +834,9 @@ static int emulation_exit(struct kvm_run *run, struct kvm_vcpu *vcpu) } } -static int kvmppc_handle_debug(struct kvm_run *run, struct kvm_vcpu *vcpu) +static int kvmppc_handle_debug(struct kvm_vcpu *vcpu) { + struct kvm_run *run = vcpu->run; struct debug_reg *dbg_reg = &(vcpu->arch.dbg_reg); u32 dbsr = vcpu->arch.dbsr; @@ -953,7 +955,7 @@ static void kvmppc_restart_interrupt(struct kvm_vcpu *vcpu, } } -static int kvmppc_resume_inst_load(struct kvm_run *run, struct kvm_vcpu *vcpu, +static int kvmppc_resume_inst_load(struct kvm_vcpu *vcpu, enum emulation_result emulated, u32 last_inst) { switch (emulated) { @@ -965,8 +967,8 @@ static int kvmppc_resume_inst_load(struct kvm_run *run, struct kvm_vcpu *vcpu, __func__, vcpu->arch.regs.nip); /* For debugging, encode the failing instruction and * report it to userspace. */ - run->hw.hardware_exit_reason = ~0ULL << 32; - run->hw.hardware_exit_reason |= last_inst; + vcpu->run->hw.hardware_exit_reason = ~0ULL << 32; + vcpu->run->hw.hardware_exit_reason |= last_inst; kvmppc_core_queue_program(vcpu, ESR_PIL); return RESUME_HOST; @@ -1023,7 +1025,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, run->ready_for_interrupt_injection = 1; if (emulated != EMULATE_DONE) { - r = kvmppc_resume_inst_load(run, vcpu, emulated, last_inst); + r = kvmppc_resume_inst_load(vcpu, emulated, last_inst); goto out; } @@ -1083,7 +1085,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, break; case BOOKE_INTERRUPT_HV_PRIV: - r = emulation_exit(run, vcpu); + r = emulation_exit(vcpu); break; case BOOKE_INTERRUPT_PROGRAM: @@ -1093,7 +1095,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, * We are here because of an SW breakpoint instr, * so lets return to host to handle. */ - r = kvmppc_handle_debug(run, vcpu); + r = kvmppc_handle_debug(vcpu); run->exit_reason = KVM_EXIT_DEBUG; kvmppc_account_exit(vcpu, DEBUG_EXITS); break; @@ -1114,7 +1116,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, break; } - r = emulation_exit(run, vcpu); + r = emulation_exit(vcpu); break; case BOOKE_INTERRUPT_FP_UNAVAIL: @@ -1281,7 +1283,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, * actually RAM. */ vcpu->arch.paddr_accessed = gpaddr; vcpu->arch.vaddr_accessed = eaddr; - r = kvmppc_emulate_mmio(run, vcpu); + r = kvmppc_emulate_mmio(vcpu); kvmppc_account_exit(vcpu, MMIO_EXITS); } @@ -1332,7 +1334,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, } case BOOKE_INTERRUPT_DEBUG: { - r = kvmppc_handle_debug(run, vcpu); + r = kvmppc_handle_debug(vcpu); if (r == RESUME_HOST) run->exit_reason = KVM_EXIT_DEBUG; kvmppc_account_exit(vcpu, DEBUG_EXITS); diff --git a/arch/powerpc/kvm/booke.h b/arch/powerpc/kvm/booke.h index 65b4d337d337f..be9da96d9f060 100644 --- a/arch/powerpc/kvm/booke.h +++ b/arch/powerpc/kvm/booke.h @@ -70,7 +70,7 @@ void kvmppc_set_tcr(struct kvm_vcpu *vcpu, u32 new_tcr); void kvmppc_set_tsr_bits(struct kvm_vcpu *vcpu, u32 tsr_bits); void kvmppc_clr_tsr_bits(struct kvm_vcpu *vcpu, u32 tsr_bits); -int kvmppc_booke_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu, +int kvmppc_booke_emulate_op(struct kvm_vcpu *vcpu, unsigned int inst, int *advance); int kvmppc_booke_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val); int kvmppc_booke_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, ulong spr_val); @@ -94,16 +94,12 @@ enum int_class { void kvmppc_set_pending_interrupt(struct kvm_vcpu *vcpu, enum int_class type); -extern int kvmppc_core_emulate_op_e500(struct kvm_run *run, - struct kvm_vcpu *vcpu, +extern int kvmppc_core_emulate_op_e500(struct kvm_vcpu *vcpu, unsigned int inst, int *advance); extern int kvmppc_core_emulate_mtspr_e500(struct kvm_vcpu *vcpu, int sprn, ulong spr_val); extern int kvmppc_core_emulate_mfspr_e500(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val); -extern int kvmppc_core_emulate_op_e500(struct kvm_run *run, - struct kvm_vcpu *vcpu, - unsigned int inst, int *advance); extern int kvmppc_core_emulate_mtspr_e500(struct kvm_vcpu *vcpu, int sprn, ulong spr_val); extern int kvmppc_core_emulate_mfspr_e500(struct kvm_vcpu *vcpu, int sprn, diff --git a/arch/powerpc/kvm/booke_emulate.c b/arch/powerpc/kvm/booke_emulate.c index 689ff5f90e9ef..d8d38aca71bd3 100644 --- a/arch/powerpc/kvm/booke_emulate.c +++ b/arch/powerpc/kvm/booke_emulate.c @@ -39,7 +39,7 @@ static void kvmppc_emul_rfci(struct kvm_vcpu *vcpu) kvmppc_set_msr(vcpu, vcpu->arch.csrr1); } -int kvmppc_booke_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu, +int kvmppc_booke_emulate_op(struct kvm_vcpu *vcpu, unsigned int inst, int *advance) { int emulated = EMULATE_DONE; diff --git a/arch/powerpc/kvm/e500_emulate.c b/arch/powerpc/kvm/e500_emulate.c index 3d0d3ec5be966..64eb833e9f023 100644 --- a/arch/powerpc/kvm/e500_emulate.c +++ b/arch/powerpc/kvm/e500_emulate.c @@ -83,16 +83,16 @@ static int kvmppc_e500_emul_msgsnd(struct kvm_vcpu *vcpu, int rb) } #endif -static int kvmppc_e500_emul_ehpriv(struct kvm_run *run, struct kvm_vcpu *vcpu, +static int kvmppc_e500_emul_ehpriv(struct kvm_vcpu *vcpu, unsigned int inst, int *advance) { int emulated = EMULATE_DONE; switch (get_oc(inst)) { case EHPRIV_OC_DEBUG: - run->exit_reason = KVM_EXIT_DEBUG; - run->debug.arch.address = vcpu->arch.regs.nip; - run->debug.arch.status = 0; + vcpu->run->exit_reason = KVM_EXIT_DEBUG; + vcpu->run->debug.arch.address = vcpu->arch.regs.nip; + vcpu->run->debug.arch.status = 0; kvmppc_account_exit(vcpu, DEBUG_EXITS); emulated = EMULATE_EXIT_USER; *advance = 0; @@ -125,7 +125,7 @@ static int kvmppc_e500_emul_mftmr(struct kvm_vcpu *vcpu, unsigned int inst, return EMULATE_FAIL; } -int kvmppc_core_emulate_op_e500(struct kvm_run *run, struct kvm_vcpu *vcpu, +int kvmppc_core_emulate_op_e500(struct kvm_vcpu *vcpu, unsigned int inst, int *advance) { int emulated = EMULATE_DONE; @@ -182,8 +182,7 @@ int kvmppc_core_emulate_op_e500(struct kvm_run *run, struct kvm_vcpu *vcpu, break; case XOP_EHPRIV: - emulated = kvmppc_e500_emul_ehpriv(run, vcpu, inst, - advance); + emulated = kvmppc_e500_emul_ehpriv(vcpu, inst, advance); break; default: @@ -197,7 +196,7 @@ int kvmppc_core_emulate_op_e500(struct kvm_run *run, struct kvm_vcpu *vcpu, } if (emulated == EMULATE_FAIL) - emulated = kvmppc_booke_emulate_op(run, vcpu, inst, advance); + emulated = kvmppc_booke_emulate_op(vcpu, inst, advance); return emulated; } diff --git a/arch/powerpc/kvm/emulate.c b/arch/powerpc/kvm/emulate.c index 6fca38ca791ff..ee1147c98cd8f 100644 --- a/arch/powerpc/kvm/emulate.c +++ b/arch/powerpc/kvm/emulate.c @@ -191,7 +191,7 @@ static int kvmppc_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt) /* XXX Should probably auto-generate instruction decoding for a particular core * from opcode tables in the future. */ -int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) +int kvmppc_emulate_instruction(struct kvm_vcpu *vcpu) { u32 inst; int rs, rt, sprn; @@ -270,9 +270,9 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) * these are illegal instructions. */ if (inst == KVMPPC_INST_SW_BREAKPOINT) { - run->exit_reason = KVM_EXIT_DEBUG; - run->debug.arch.status = 0; - run->debug.arch.address = kvmppc_get_pc(vcpu); + vcpu->run->exit_reason = KVM_EXIT_DEBUG; + vcpu->run->debug.arch.status = 0; + vcpu->run->debug.arch.address = kvmppc_get_pc(vcpu); emulated = EMULATE_EXIT_USER; advance = 0; } else @@ -285,7 +285,7 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) } if (emulated == EMULATE_FAIL) { - emulated = vcpu->kvm->arch.kvm_ops->emulate_op(run, vcpu, inst, + emulated = vcpu->kvm->arch.kvm_ops->emulate_op(vcpu, inst, &advance); if (emulated == EMULATE_AGAIN) { advance = 0; diff --git a/arch/powerpc/kvm/emulate_loadstore.c b/arch/powerpc/kvm/emulate_loadstore.c index 1139bc56e0045..e8a47c84d77d4 100644 --- a/arch/powerpc/kvm/emulate_loadstore.c +++ b/arch/powerpc/kvm/emulate_loadstore.c @@ -71,7 +71,6 @@ static bool kvmppc_check_altivec_disabled(struct kvm_vcpu *vcpu) */ int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu) { - struct kvm_run *run = vcpu->run; u32 inst; enum emulation_result emulated = EMULATE_FAIL; int advance = 1; @@ -104,10 +103,10 @@ int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu) int instr_byte_swap = op.type & BYTEREV; if (op.type & SIGNEXT) - emulated = kvmppc_handle_loads(run, vcpu, + emulated = kvmppc_handle_loads(vcpu, op.reg, size, !instr_byte_swap); else - emulated = kvmppc_handle_load(run, vcpu, + emulated = kvmppc_handle_load(vcpu, op.reg, size, !instr_byte_swap); if ((op.type & UPDATE) && (emulated != EMULATE_FAIL)) @@ -124,10 +123,10 @@ int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu) vcpu->arch.mmio_sp64_extend = 1; if (op.type & SIGNEXT) - emulated = kvmppc_handle_loads(run, vcpu, + emulated = kvmppc_handle_loads(vcpu, KVM_MMIO_REG_FPR|op.reg, size, 1); else - emulated = kvmppc_handle_load(run, vcpu, + emulated = kvmppc_handle_load(vcpu, KVM_MMIO_REG_FPR|op.reg, size, 1); if ((op.type & UPDATE) && (emulated != EMULATE_FAIL)) @@ -164,12 +163,12 @@ int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu) if (size == 16) { vcpu->arch.mmio_vmx_copy_nums = 2; - emulated = kvmppc_handle_vmx_load(run, - vcpu, KVM_MMIO_REG_VMX|op.reg, + emulated = kvmppc_handle_vmx_load(vcpu, + KVM_MMIO_REG_VMX|op.reg, 8, 1); } else { vcpu->arch.mmio_vmx_copy_nums = 1; - emulated = kvmppc_handle_vmx_load(run, vcpu, + emulated = kvmppc_handle_vmx_load(vcpu, KVM_MMIO_REG_VMX|op.reg, size, 1); } @@ -217,7 +216,7 @@ int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu) io_size_each = op.element_size; } - emulated = kvmppc_handle_vsx_load(run, vcpu, + emulated = kvmppc_handle_vsx_load(vcpu, KVM_MMIO_REG_VSX|op.reg, io_size_each, 1, op.type & SIGNEXT); break; @@ -227,8 +226,7 @@ int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu) /* if need byte reverse, op.val has been reversed by * analyse_instr(). */ - emulated = kvmppc_handle_store(run, vcpu, op.val, - size, 1); + emulated = kvmppc_handle_store(vcpu, op.val, size, 1); if ((op.type & UPDATE) && (emulated != EMULATE_FAIL)) kvmppc_set_gpr(vcpu, op.update_reg, op.ea); @@ -250,7 +248,7 @@ int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu) if (op.type & FPCONV) vcpu->arch.mmio_sp64_extend = 1; - emulated = kvmppc_handle_store(run, vcpu, + emulated = kvmppc_handle_store(vcpu, VCPU_FPR(vcpu, op.reg), size, 1); if ((op.type & UPDATE) && (emulated != EMULATE_FAIL)) @@ -290,12 +288,12 @@ int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu) if (size == 16) { vcpu->arch.mmio_vmx_copy_nums = 2; - emulated = kvmppc_handle_vmx_store(run, - vcpu, op.reg, 8, 1); + emulated = kvmppc_handle_vmx_store(vcpu, + op.reg, 8, 1); } else { vcpu->arch.mmio_vmx_copy_nums = 1; - emulated = kvmppc_handle_vmx_store(run, - vcpu, op.reg, size, 1); + emulated = kvmppc_handle_vmx_store(vcpu, + op.reg, size, 1); } break; @@ -338,7 +336,7 @@ int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu) io_size_each = op.element_size; } - emulated = kvmppc_handle_vsx_store(run, vcpu, + emulated = kvmppc_handle_vsx_store(vcpu, op.reg, io_size_each, 1); break; } diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c index 27ccff612903e..dd7d141e33e8c 100644 --- a/arch/powerpc/kvm/powerpc.c +++ b/arch/powerpc/kvm/powerpc.c @@ -279,7 +279,7 @@ out: } EXPORT_SYMBOL_GPL(kvmppc_sanity_check); -int kvmppc_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu) +int kvmppc_emulate_mmio(struct kvm_vcpu *vcpu) { enum emulation_result er; int r; @@ -295,7 +295,7 @@ int kvmppc_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu) r = RESUME_GUEST; break; case EMULATE_DO_MMIO: - run->exit_reason = KVM_EXIT_MMIO; + vcpu->run->exit_reason = KVM_EXIT_MMIO; /* We must reload nonvolatiles because "update" load/store * instructions modify register state. */ /* Future optimization: only reload non-volatiles if they were @@ -1107,9 +1107,9 @@ static inline u32 dp_to_sp(u64 fprd) #define dp_to_sp(x) (x) #endif /* CONFIG_PPC_FPU */ -static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu, - struct kvm_run *run) +static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu) { + struct kvm_run *run = vcpu->run; u64 uninitialized_var(gpr); if (run->mmio.len > sizeof(gpr)) { @@ -1219,10 +1219,11 @@ static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu, } } -static int __kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu, +static int __kvmppc_handle_load(struct kvm_vcpu *vcpu, unsigned int rt, unsigned int bytes, int is_default_endian, int sign_extend) { + struct kvm_run *run = vcpu->run; int idx, ret; bool host_swabbed; @@ -1256,7 +1257,7 @@ static int __kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu, srcu_read_unlock(&vcpu->kvm->srcu, idx); if (!ret) { - kvmppc_complete_mmio_load(vcpu, run); + kvmppc_complete_mmio_load(vcpu); vcpu->mmio_needed = 0; return EMULATE_DONE; } @@ -1264,24 +1265,24 @@ static int __kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu, return EMULATE_DO_MMIO; } -int kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu, +int kvmppc_handle_load(struct kvm_vcpu *vcpu, unsigned int rt, unsigned int bytes, int is_default_endian) { - return __kvmppc_handle_load(run, vcpu, rt, bytes, is_default_endian, 0); + return __kvmppc_handle_load(vcpu, rt, bytes, is_default_endian, 0); } EXPORT_SYMBOL_GPL(kvmppc_handle_load); /* Same as above, but sign extends */ -int kvmppc_handle_loads(struct kvm_run *run, struct kvm_vcpu *vcpu, +int kvmppc_handle_loads(struct kvm_vcpu *vcpu, unsigned int rt, unsigned int bytes, int is_default_endian) { - return __kvmppc_handle_load(run, vcpu, rt, bytes, is_default_endian, 1); + return __kvmppc_handle_load(vcpu, rt, bytes, is_default_endian, 1); } #ifdef CONFIG_VSX -int kvmppc_handle_vsx_load(struct kvm_run *run, struct kvm_vcpu *vcpu, +int kvmppc_handle_vsx_load(struct kvm_vcpu *vcpu, unsigned int rt, unsigned int bytes, int is_default_endian, int mmio_sign_extend) { @@ -1292,13 +1293,13 @@ int kvmppc_handle_vsx_load(struct kvm_run *run, struct kvm_vcpu *vcpu, return EMULATE_FAIL; while (vcpu->arch.mmio_vsx_copy_nums) { - emulated = __kvmppc_handle_load(run, vcpu, rt, bytes, + emulated = __kvmppc_handle_load(vcpu, rt, bytes, is_default_endian, mmio_sign_extend); if (emulated != EMULATE_DONE) break; - vcpu->arch.paddr_accessed += run->mmio.len; + vcpu->arch.paddr_accessed += vcpu->run->mmio.len; vcpu->arch.mmio_vsx_copy_nums--; vcpu->arch.mmio_vsx_offset++; @@ -1307,9 +1308,10 @@ int kvmppc_handle_vsx_load(struct kvm_run *run, struct kvm_vcpu *vcpu, } #endif /* CONFIG_VSX */ -int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu, +int kvmppc_handle_store(struct kvm_vcpu *vcpu, u64 val, unsigned int bytes, int is_default_endian) { + struct kvm_run *run = vcpu->run; void *data = run->mmio.data; int idx, ret; bool host_swabbed; @@ -1423,7 +1425,7 @@ static inline int kvmppc_get_vsr_data(struct kvm_vcpu *vcpu, int rs, u64 *val) return result; } -int kvmppc_handle_vsx_store(struct kvm_run *run, struct kvm_vcpu *vcpu, +int kvmppc_handle_vsx_store(struct kvm_vcpu *vcpu, int rs, unsigned int bytes, int is_default_endian) { u64 val; @@ -1439,13 +1441,13 @@ int kvmppc_handle_vsx_store(struct kvm_run *run, struct kvm_vcpu *vcpu, if (kvmppc_get_vsr_data(vcpu, rs, &val) == -1) return EMULATE_FAIL; - emulated = kvmppc_handle_store(run, vcpu, + emulated = kvmppc_handle_store(vcpu, val, bytes, is_default_endian); if (emulated != EMULATE_DONE) break; - vcpu->arch.paddr_accessed += run->mmio.len; + vcpu->arch.paddr_accessed += vcpu->run->mmio.len; vcpu->arch.mmio_vsx_copy_nums--; vcpu->arch.mmio_vsx_offset++; @@ -1454,19 +1456,19 @@ int kvmppc_handle_vsx_store(struct kvm_run *run, struct kvm_vcpu *vcpu, return emulated; } -static int kvmppc_emulate_mmio_vsx_loadstore(struct kvm_vcpu *vcpu, - struct kvm_run *run) +static int kvmppc_emulate_mmio_vsx_loadstore(struct kvm_vcpu *vcpu) { + struct kvm_run *run = vcpu->run; enum emulation_result emulated = EMULATE_FAIL; int r; vcpu->arch.paddr_accessed += run->mmio.len; if (!vcpu->mmio_is_write) { - emulated = kvmppc_handle_vsx_load(run, vcpu, vcpu->arch.io_gpr, + emulated = kvmppc_handle_vsx_load(vcpu, vcpu->arch.io_gpr, run->mmio.len, 1, vcpu->arch.mmio_sign_extend); } else { - emulated = kvmppc_handle_vsx_store(run, vcpu, + emulated = kvmppc_handle_vsx_store(vcpu, vcpu->arch.io_gpr, run->mmio.len, 1); } @@ -1490,7 +1492,7 @@ static int kvmppc_emulate_mmio_vsx_loadstore(struct kvm_vcpu *vcpu, #endif /* CONFIG_VSX */ #ifdef CONFIG_ALTIVEC -int kvmppc_handle_vmx_load(struct kvm_run *run, struct kvm_vcpu *vcpu, +int kvmppc_handle_vmx_load(struct kvm_vcpu *vcpu, unsigned int rt, unsigned int bytes, int is_default_endian) { enum emulation_result emulated = EMULATE_DONE; @@ -1499,13 +1501,13 @@ int kvmppc_handle_vmx_load(struct kvm_run *run, struct kvm_vcpu *vcpu, return EMULATE_FAIL; while (vcpu->arch.mmio_vmx_copy_nums) { - emulated = __kvmppc_handle_load(run, vcpu, rt, bytes, + emulated = __kvmppc_handle_load(vcpu, rt, bytes, is_default_endian, 0); if (emulated != EMULATE_DONE) break; - vcpu->arch.paddr_accessed += run->mmio.len; + vcpu->arch.paddr_accessed += vcpu->run->mmio.len; vcpu->arch.mmio_vmx_copy_nums--; vcpu->arch.mmio_vmx_offset++; } @@ -1585,7 +1587,7 @@ int kvmppc_get_vmx_byte(struct kvm_vcpu *vcpu, int index, u64 *val) return result; } -int kvmppc_handle_vmx_store(struct kvm_run *run, struct kvm_vcpu *vcpu, +int kvmppc_handle_vmx_store(struct kvm_vcpu *vcpu, unsigned int rs, unsigned int bytes, int is_default_endian) { u64 val = 0; @@ -1620,12 +1622,12 @@ int kvmppc_handle_vmx_store(struct kvm_run *run, struct kvm_vcpu *vcpu, return EMULATE_FAIL; } - emulated = kvmppc_handle_store(run, vcpu, val, bytes, + emulated = kvmppc_handle_store(vcpu, val, bytes, is_default_endian); if (emulated != EMULATE_DONE) break; - vcpu->arch.paddr_accessed += run->mmio.len; + vcpu->arch.paddr_accessed += vcpu->run->mmio.len; vcpu->arch.mmio_vmx_copy_nums--; vcpu->arch.mmio_vmx_offset++; } @@ -1633,19 +1635,19 @@ int kvmppc_handle_vmx_store(struct kvm_run *run, struct kvm_vcpu *vcpu, return emulated; } -static int kvmppc_emulate_mmio_vmx_loadstore(struct kvm_vcpu *vcpu, - struct kvm_run *run) +static int kvmppc_emulate_mmio_vmx_loadstore(struct kvm_vcpu *vcpu) { + struct kvm_run *run = vcpu->run; enum emulation_result emulated = EMULATE_FAIL; int r; vcpu->arch.paddr_accessed += run->mmio.len; if (!vcpu->mmio_is_write) { - emulated = kvmppc_handle_vmx_load(run, vcpu, + emulated = kvmppc_handle_vmx_load(vcpu, vcpu->arch.io_gpr, run->mmio.len, 1); } else { - emulated = kvmppc_handle_vmx_store(run, vcpu, + emulated = kvmppc_handle_vmx_store(vcpu, vcpu->arch.io_gpr, run->mmio.len, 1); } @@ -1775,7 +1777,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu) if (vcpu->mmio_needed) { vcpu->mmio_needed = 0; if (!vcpu->mmio_is_write) - kvmppc_complete_mmio_load(vcpu, run); + kvmppc_complete_mmio_load(vcpu); #ifdef CONFIG_VSX if (vcpu->arch.mmio_vsx_copy_nums > 0) { vcpu->arch.mmio_vsx_copy_nums--; @@ -1783,7 +1785,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu) } if (vcpu->arch.mmio_vsx_copy_nums > 0) { - r = kvmppc_emulate_mmio_vsx_loadstore(vcpu, run); + r = kvmppc_emulate_mmio_vsx_loadstore(vcpu); if (r == RESUME_HOST) { vcpu->mmio_needed = 1; goto out; @@ -1797,7 +1799,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu) } if (vcpu->arch.mmio_vmx_copy_nums > 0) { - r = kvmppc_emulate_mmio_vmx_loadstore(vcpu, run); + r = kvmppc_emulate_mmio_vmx_loadstore(vcpu); if (r == RESUME_HOST) { vcpu->mmio_needed = 1; goto out; @@ -1830,7 +1832,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu) if (run->immediate_exit) r = -EINTR; else - r = kvmppc_vcpu_run(run, vcpu); + r = kvmppc_vcpu_run(vcpu); kvm_sigset_deactivate(vcpu); diff --git a/arch/powerpc/kvm/trace_hv.h b/arch/powerpc/kvm/trace_hv.h index 8a1e3b0047f19..4a61a971c34e1 100644 --- a/arch/powerpc/kvm/trace_hv.h +++ b/arch/powerpc/kvm/trace_hv.h @@ -472,9 +472,9 @@ TRACE_EVENT(kvmppc_run_vcpu_enter, ); TRACE_EVENT(kvmppc_run_vcpu_exit, - TP_PROTO(struct kvm_vcpu *vcpu, struct kvm_run *run), + TP_PROTO(struct kvm_vcpu *vcpu), - TP_ARGS(vcpu, run), + TP_ARGS(vcpu), TP_STRUCT__entry( __field(int, vcpu_id) @@ -484,7 +484,7 @@ TRACE_EVENT(kvmppc_run_vcpu_exit, TP_fast_assign( __entry->vcpu_id = vcpu->vcpu_id; - __entry->exit = run->exit_reason; + __entry->exit = vcpu->run->exit_reason; __entry->ret = vcpu->arch.ret; ), -- GitLab From 0aca8a5575544bd21b3363058afb8f1e81505150 Mon Sep 17 00:00:00 2001 From: Qian Cai Date: Wed, 13 May 2020 09:39:15 -0400 Subject: [PATCH 0692/1971] KVM: PPC: Book3S HV: Ignore kmemleak false positives kvmppc_pmd_alloc() and kvmppc_pte_alloc() allocate some memory but then pud_populate() and pmd_populate() will use __pa() to reference the newly allocated memory. Since kmemleak is unable to track the physical memory resulting in false positives, silence those by using kmemleak_ignore(). unreferenced object 0xc000201c382a1000 (size 4096): comm "qemu-kvm", pid 124828, jiffies 4295733767 (age 341.250s) hex dump (first 32 bytes): c0 00 20 09 f4 60 03 87 c0 00 20 10 72 a0 03 87 .. ..`.... .r... c0 00 20 0e 13 a0 03 87 c0 00 20 1b dc c0 03 87 .. ....... ..... backtrace: [<000000004cc2790f>] kvmppc_create_pte+0x838/0xd20 [kvm_hv] kvmppc_pmd_alloc at arch/powerpc/kvm/book3s_64_mmu_radix.c:366 (inlined by) kvmppc_create_pte at arch/powerpc/kvm/book3s_64_mmu_radix.c:590 [<00000000d123c49a>] kvmppc_book3s_instantiate_page+0x2e0/0x8c0 [kvm_hv] [<00000000bb549087>] kvmppc_book3s_radix_page_fault+0x1b4/0x2b0 [kvm_hv] [<0000000086dddc0e>] kvmppc_book3s_hv_page_fault+0x214/0x12a0 [kvm_hv] [<000000005ae9ccc2>] kvmppc_vcpu_run_hv+0xc5c/0x15f0 [kvm_hv] [<00000000d22162ff>] kvmppc_vcpu_run+0x34/0x48 [kvm] [<00000000d6953bc4>] kvm_arch_vcpu_ioctl_run+0x314/0x420 [kvm] [<000000002543dd54>] kvm_vcpu_ioctl+0x33c/0x950 [kvm] [<0000000048155cd6>] ksys_ioctl+0xd8/0x130 [<0000000041ffeaa7>] sys_ioctl+0x28/0x40 [<000000004afc4310>] system_call_exception+0x114/0x1e0 [<00000000fb70a873>] system_call_common+0xf0/0x278 unreferenced object 0xc0002001f0c03900 (size 256): comm "qemu-kvm", pid 124830, jiffies 4295735235 (age 326.570s) hex dump (first 32 bytes): c0 00 20 10 fa a0 03 87 c0 00 20 10 fa a1 03 87 .. ....... ..... c0 00 20 10 fa a2 03 87 c0 00 20 10 fa a3 03 87 .. ....... ..... backtrace: [<0000000023f675b8>] kvmppc_create_pte+0x854/0xd20 [kvm_hv] kvmppc_pte_alloc at arch/powerpc/kvm/book3s_64_mmu_radix.c:356 (inlined by) kvmppc_create_pte at arch/powerpc/kvm/book3s_64_mmu_radix.c:593 [<00000000d123c49a>] kvmppc_book3s_instantiate_page+0x2e0/0x8c0 [kvm_hv] [<00000000bb549087>] kvmppc_book3s_radix_page_fault+0x1b4/0x2b0 [kvm_hv] [<0000000086dddc0e>] kvmppc_book3s_hv_page_fault+0x214/0x12a0 [kvm_hv] [<000000005ae9ccc2>] kvmppc_vcpu_run_hv+0xc5c/0x15f0 [kvm_hv] [<00000000d22162ff>] kvmppc_vcpu_run+0x34/0x48 [kvm] [<00000000d6953bc4>] kvm_arch_vcpu_ioctl_run+0x314/0x420 [kvm] [<000000002543dd54>] kvm_vcpu_ioctl+0x33c/0x950 [kvm] [<0000000048155cd6>] ksys_ioctl+0xd8/0x130 [<0000000041ffeaa7>] sys_ioctl+0x28/0x40 [<000000004afc4310>] system_call_exception+0x114/0x1e0 [<00000000fb70a873>] system_call_common+0xf0/0x278 Signed-off-by: Qian Cai Signed-off-by: Paul Mackerras --- arch/powerpc/kvm/book3s_64_mmu_radix.c | 16 ++++++++++++++-- 1 file changed, 14 insertions(+), 2 deletions(-) diff --git a/arch/powerpc/kvm/book3s_64_mmu_radix.c b/arch/powerpc/kvm/book3s_64_mmu_radix.c index 16c947bd5e87c..97b45eaa7014a 100644 --- a/arch/powerpc/kvm/book3s_64_mmu_radix.c +++ b/arch/powerpc/kvm/book3s_64_mmu_radix.c @@ -353,7 +353,13 @@ static struct kmem_cache *kvm_pmd_cache; static pte_t *kvmppc_pte_alloc(void) { - return kmem_cache_alloc(kvm_pte_cache, GFP_KERNEL); + pte_t *pte; + + pte = kmem_cache_alloc(kvm_pte_cache, GFP_KERNEL); + /* pmd_populate() will only reference _pa(pte). */ + kmemleak_ignore(pte); + + return pte; } static void kvmppc_pte_free(pte_t *ptep) @@ -363,7 +369,13 @@ static void kvmppc_pte_free(pte_t *ptep) static pmd_t *kvmppc_pmd_alloc(void) { - return kmem_cache_alloc(kvm_pmd_cache, GFP_KERNEL); + pmd_t *pmd; + + pmd = kmem_cache_alloc(kvm_pmd_cache, GFP_KERNEL); + /* pud_populate() will only reference _pa(pmd). */ + kmemleak_ignore(pmd); + + return pmd; } static void kvmppc_pmd_free(pmd_t *pmdp) -- GitLab From ab8b65be183180c3eef405d449163964ecc4b571 Mon Sep 17 00:00:00 2001 From: Qian Cai Date: Sun, 10 May 2020 01:18:34 -0400 Subject: [PATCH 0693/1971] KVM: PPC: Book3S: Fix some RCU-list locks It is unsafe to traverse kvm->arch.spapr_tce_tables and stt->iommu_tables without the RCU read lock held. Also, add cond_resched_rcu() in places with the RCU read lock held that could take a while to finish. arch/powerpc/kvm/book3s_64_vio.c:76 RCU-list traversed in non-reader section!! other info that might help us debug this: rcu_scheduler_active = 2, debug_locks = 1 no locks held by qemu-kvm/4265. stack backtrace: CPU: 96 PID: 4265 Comm: qemu-kvm Not tainted 5.7.0-rc4-next-20200508+ #2 Call Trace: [c000201a8690f720] [c000000000715948] dump_stack+0xfc/0x174 (unreliable) [c000201a8690f770] [c0000000001d9470] lockdep_rcu_suspicious+0x140/0x164 [c000201a8690f7f0] [c008000010b9fb48] kvm_spapr_tce_release_iommu_group+0x1f0/0x220 [kvm] [c000201a8690f870] [c008000010b8462c] kvm_spapr_tce_release_vfio_group+0x54/0xb0 [kvm] [c000201a8690f8a0] [c008000010b84710] kvm_vfio_destroy+0x88/0x140 [kvm] [c000201a8690f8f0] [c008000010b7d488] kvm_put_kvm+0x370/0x600 [kvm] [c000201a8690f990] [c008000010b7e3c0] kvm_vm_release+0x38/0x60 [kvm] [c000201a8690f9c0] [c0000000005223f4] __fput+0x124/0x330 [c000201a8690fa20] [c000000000151cd8] task_work_run+0xb8/0x130 [c000201a8690fa70] [c0000000001197e8] do_exit+0x4e8/0xfa0 [c000201a8690fb70] [c00000000011a374] do_group_exit+0x64/0xd0 [c000201a8690fbb0] [c000000000132c90] get_signal+0x1f0/0x1200 [c000201a8690fcc0] [c000000000020690] do_notify_resume+0x130/0x3c0 [c000201a8690fda0] [c000000000038d64] syscall_exit_prepare+0x1a4/0x280 [c000201a8690fe20] [c00000000000c8f8] system_call_common+0xf8/0x278 ==== arch/powerpc/kvm/book3s_64_vio.c:368 RCU-list traversed in non-reader section!! other info that might help us debug this: rcu_scheduler_active = 2, debug_locks = 1 2 locks held by qemu-kvm/4264: #0: c000201ae2d000d8 (&vcpu->mutex){+.+.}-{3:3}, at: kvm_vcpu_ioctl+0xdc/0x950 [kvm] #1: c000200c9ed0c468 (&kvm->srcu){....}-{0:0}, at: kvmppc_h_put_tce+0x88/0x340 [kvm] ==== arch/powerpc/kvm/book3s_64_vio.c:108 RCU-list traversed in non-reader section!! other info that might help us debug this: rcu_scheduler_active = 2, debug_locks = 1 1 lock held by qemu-kvm/4257: #0: c000200b1b363a40 (&kv->lock){+.+.}-{3:3}, at: kvm_vfio_set_attr+0x598/0x6c0 [kvm] ==== arch/powerpc/kvm/book3s_64_vio.c:146 RCU-list traversed in non-reader section!! other info that might help us debug this: rcu_scheduler_active = 2, debug_locks = 1 1 lock held by qemu-kvm/4257: #0: c000200b1b363a40 (&kv->lock){+.+.}-{3:3}, at: kvm_vfio_set_attr+0x598/0x6c0 [kvm] Signed-off-by: Qian Cai Signed-off-by: Paul Mackerras --- arch/powerpc/kvm/book3s_64_vio.c | 18 ++++++++++++++---- 1 file changed, 14 insertions(+), 4 deletions(-) diff --git a/arch/powerpc/kvm/book3s_64_vio.c b/arch/powerpc/kvm/book3s_64_vio.c index 50555ad1db932..1a529df0ab442 100644 --- a/arch/powerpc/kvm/book3s_64_vio.c +++ b/arch/powerpc/kvm/book3s_64_vio.c @@ -73,6 +73,7 @@ extern void kvm_spapr_tce_release_iommu_group(struct kvm *kvm, struct kvmppc_spapr_tce_iommu_table *stit, *tmp; struct iommu_table_group *table_group = NULL; + rcu_read_lock(); list_for_each_entry_rcu(stt, &kvm->arch.spapr_tce_tables, list) { table_group = iommu_group_get_iommudata(grp); @@ -87,7 +88,9 @@ extern void kvm_spapr_tce_release_iommu_group(struct kvm *kvm, kref_put(&stit->kref, kvm_spapr_tce_liobn_put); } } + cond_resched_rcu(); } + rcu_read_unlock(); } extern long kvm_spapr_tce_attach_iommu_group(struct kvm *kvm, int tablefd, @@ -105,12 +108,14 @@ extern long kvm_spapr_tce_attach_iommu_group(struct kvm *kvm, int tablefd, if (!f.file) return -EBADF; + rcu_read_lock(); list_for_each_entry_rcu(stt, &kvm->arch.spapr_tce_tables, list) { if (stt == f.file->private_data) { found = true; break; } } + rcu_read_unlock(); fdput(f); @@ -143,6 +148,7 @@ extern long kvm_spapr_tce_attach_iommu_group(struct kvm *kvm, int tablefd, if (!tbl) return -EINVAL; + rcu_read_lock(); list_for_each_entry_rcu(stit, &stt->iommu_tables, next) { if (tbl != stit->tbl) continue; @@ -150,14 +156,17 @@ extern long kvm_spapr_tce_attach_iommu_group(struct kvm *kvm, int tablefd, if (!kref_get_unless_zero(&stit->kref)) { /* stit is being destroyed */ iommu_tce_table_put(tbl); + rcu_read_unlock(); return -ENOTTY; } /* * The table is already known to this KVM, we just increased * its KVM reference counter and can return. */ + rcu_read_unlock(); return 0; } + rcu_read_unlock(); stit = kzalloc(sizeof(*stit), GFP_KERNEL); if (!stit) { @@ -365,18 +374,19 @@ static long kvmppc_tce_validate(struct kvmppc_spapr_tce_table *stt, if (kvmppc_tce_to_ua(stt->kvm, tce, &ua)) return H_TOO_HARD; + rcu_read_lock(); list_for_each_entry_rcu(stit, &stt->iommu_tables, next) { unsigned long hpa = 0; struct mm_iommu_table_group_mem_t *mem; long shift = stit->tbl->it_page_shift; mem = mm_iommu_lookup(stt->kvm->mm, ua, 1ULL << shift); - if (!mem) - return H_TOO_HARD; - - if (mm_iommu_ua_to_hpa(mem, ua, shift, &hpa)) + if (!mem || mm_iommu_ua_to_hpa(mem, ua, shift, &hpa)) { + rcu_read_unlock(); return H_TOO_HARD; + } } + rcu_read_unlock(); return H_SUCCESS; } -- GitLab From e3326ae3d59e443a379367c6936941d6ab55d316 Mon Sep 17 00:00:00 2001 From: Laurent Dufour Date: Wed, 20 May 2020 19:43:08 +0200 Subject: [PATCH 0694/1971] KVM: PPC: Book3S HV: Relax check on H_SVM_INIT_ABORT The commit 8c47b6ff29e3 ("KVM: PPC: Book3S HV: Check caller of H_SVM_* Hcalls") added checks of secure bit of SRR1 to filter out the Hcall reserved to the Ultravisor. However, the Hcall H_SVM_INIT_ABORT is made by the Ultravisor passing the context of the VM calling UV_ESM. This allows the Hypervisor to return to the guest without going through the Ultravisor. Thus the Secure bit of SRR1 is not set in that particular case. In the case a regular VM is calling H_SVM_INIT_ABORT, this hcall will be filtered out in kvmppc_h_svm_init_abort() because kvm->arch.secure_guest is not set in that case. Fixes: 8c47b6ff29e3 ("KVM: PPC: Book3S HV: Check caller of H_SVM_* Hcalls") Signed-off-by: Laurent Dufour Reviewed-by: Greg Kurz Reviewed-by: Ram Pai Signed-off-by: Paul Mackerras --- arch/powerpc/kvm/book3s_hv.c | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c index e9aa5555d6901..1b84806abef68 100644 --- a/arch/powerpc/kvm/book3s_hv.c +++ b/arch/powerpc/kvm/book3s_hv.c @@ -1097,9 +1097,14 @@ int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu) ret = kvmppc_h_svm_init_done(vcpu->kvm); break; case H_SVM_INIT_ABORT: - ret = H_UNSUPPORTED; - if (kvmppc_get_srr1(vcpu) & MSR_S) - ret = kvmppc_h_svm_init_abort(vcpu->kvm); + /* + * Even if that call is made by the Ultravisor, the SSR1 value + * is the guest context one, with the secure bit clear as it has + * not yet been secured. So we can't check it here. + * Instead the kvm->arch.secure_guest flag is checked inside + * kvmppc_h_svm_init_abort(). + */ + ret = kvmppc_h_svm_init_abort(vcpu->kvm); break; default: -- GitLab From 9d66e85784f196fb7442193a1d7f3896ed418806 Mon Sep 17 00:00:00 2001 From: YueHaibing Date: Fri, 3 Apr 2020 16:30:40 +0800 Subject: [PATCH 0695/1971] clk: zynqmp: Make zynqmp_clk_get_max_divisor static Fix sparse warning: drivers/clk/zynqmp/divider.c:259:5: warning: symbol 'zynqmp_clk_get_max_divisor' was not declared. Should it be static? Reported-by: Hulk Robot Signed-off-by: YueHaibing Link: https://lkml.kernel.org/r/20200403083040.37748-1-yuehaibing@huawei.com Reviewed-by: Michal Simek Signed-off-by: Stephen Boyd --- drivers/clk/zynqmp/divider.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/clk/zynqmp/divider.c b/drivers/clk/zynqmp/divider.c index 79a294488e845..b22ca33e40c05 100644 --- a/drivers/clk/zynqmp/divider.c +++ b/drivers/clk/zynqmp/divider.c @@ -266,7 +266,7 @@ static const struct clk_ops zynqmp_clk_divider_ops = { * Return: Maximum divisor of a clock if query data is successful * U16_MAX in case of query data is not success */ -u32 zynqmp_clk_get_max_divisor(u32 clk_id, u32 type) +static u32 zynqmp_clk_get_max_divisor(u32 clk_id, u32 type) { const struct zynqmp_eemi_ops *eemi_ops = zynqmp_pm_get_eemi_ops(); struct zynqmp_pm_query_data qdata = {0}; -- GitLab From e2266f4c3881d7dfc0eceeb0066831fefcbdbba2 Mon Sep 17 00:00:00 2001 From: Rahul Tanwar Date: Fri, 17 Apr 2020 13:54:46 +0800 Subject: [PATCH 0696/1971] dt-bindings: clk: intel: Add bindings document & header file for CGU Clock generation unit(CGU) is a clock controller IP of Intel's Lightning Mountain(LGM) SoC. Add DT bindings include file and document for CGU clock controller driver of LGM. Reviewed-by: Rob Herring Signed-off-by: Rahul Tanwar Link: https://lkml.kernel.org/r/8dce2be13195aab20c6b11fca6af0fffe22d5241.1587102634.git.rahul.tanwar@linux.intel.com Signed-off-by: Stephen Boyd --- .../bindings/clock/intel,cgu-lgm.yaml | 44 +++++ include/dt-bindings/clock/intel,lgm-clk.h | 165 ++++++++++++++++++ 2 files changed, 209 insertions(+) create mode 100644 Documentation/devicetree/bindings/clock/intel,cgu-lgm.yaml create mode 100644 include/dt-bindings/clock/intel,lgm-clk.h diff --git a/Documentation/devicetree/bindings/clock/intel,cgu-lgm.yaml b/Documentation/devicetree/bindings/clock/intel,cgu-lgm.yaml new file mode 100644 index 0000000000000..6dc1414bfb7f6 --- /dev/null +++ b/Documentation/devicetree/bindings/clock/intel,cgu-lgm.yaml @@ -0,0 +1,44 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/clock/intel,cgu-lgm.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Intel Lightning Mountain SoC's Clock Controller(CGU) Binding + +maintainers: + - Rahul Tanwar + +description: | + Lightning Mountain(LGM) SoC's Clock Generation Unit(CGU) driver provides + all means to access the CGU hardware module in order to generate a series + of clocks for the whole system and individual peripherals. + + Please refer to include/dt-bindings/clock/intel,lgm-clk.h header file, it + defines all available clocks as macros. These macros can be used in device + tree sources. + +properties: + compatible: + const: intel,cgu-lgm + + reg: + maxItems: 1 + + '#clock-cells': + const: 1 + +required: + - compatible + - reg + - '#clock-cells' + +examples: + - | + cgu: clock-controller@e0200000 { + compatible = "intel,cgu-lgm"; + reg = <0xe0200000 0x33c>; + #clock-cells = <1>; + }; + +... diff --git a/include/dt-bindings/clock/intel,lgm-clk.h b/include/dt-bindings/clock/intel,lgm-clk.h new file mode 100644 index 0000000000000..92f5be6490bb5 --- /dev/null +++ b/include/dt-bindings/clock/intel,lgm-clk.h @@ -0,0 +1,165 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */ +/* + * Copyright (C) 2020 Intel Corporation. + * Lei Chuanhua + * Zhu Yixin + */ +#ifndef __INTEL_LGM_CLK_H +#define __INTEL_LGM_CLK_H + +/* PLL clocks */ +#define LGM_CLK_OSC 1 +#define LGM_CLK_PLLPP 2 +#define LGM_CLK_PLL2 3 +#define LGM_CLK_PLL0CZ 4 +#define LGM_CLK_PLL0B 5 +#define LGM_CLK_PLL1 6 +#define LGM_CLK_LJPLL3 7 +#define LGM_CLK_LJPLL4 8 +#define LGM_CLK_PLL0CM0 9 +#define LGM_CLK_PLL0CM1 10 + +/* clocks from PLLs */ + +/* ROPLL clocks */ +#define LGM_CLK_PP_HW 15 +#define LGM_CLK_PP_UC 16 +#define LGM_CLK_PP_FXD 17 +#define LGM_CLK_PP_TBM 18 + +/* PLL2 clocks */ +#define LGM_CLK_DDR 20 + +/* PLL0CZ */ +#define LGM_CLK_CM 25 +#define LGM_CLK_IC 26 +#define LGM_CLK_SDXC3 27 + +/* PLL0B */ +#define LGM_CLK_NGI 30 +#define LGM_CLK_NOC4 31 +#define LGM_CLK_SW 32 +#define LGM_CLK_QSPI 33 +#define LGM_CLK_CQEM LGM_CLK_SW +#define LGM_CLK_EMMC5 LGM_CLK_NOC4 + +/* PLL1 */ +#define LGM_CLK_CT 35 +#define LGM_CLK_DSP 36 +#define LGM_CLK_VIF 37 + +/* LJPLL3 */ +#define LGM_CLK_CML 40 +#define LGM_CLK_SERDES 41 +#define LGM_CLK_POOL 42 +#define LGM_CLK_PTP 43 + +/* LJPLL4 */ +#define LGM_CLK_PCIE 45 +#define LGM_CLK_SATA LGM_CLK_PCIE + +/* PLL0CM0 */ +#define LGM_CLK_CPU0 50 + +/* PLL0CM1 */ +#define LGM_CLK_CPU1 55 + +/* Miscellaneous clocks */ +#define LGM_CLK_EMMC4 60 +#define LGM_CLK_SDXC2 61 +#define LGM_CLK_EMMC 62 +#define LGM_CLK_SDXC 63 +#define LGM_CLK_SLIC 64 +#define LGM_CLK_DCL 65 +#define LGM_CLK_DOCSIS 66 +#define LGM_CLK_PCM 67 +#define LGM_CLK_DDR_PHY 68 +#define LGM_CLK_PONDEF 69 +#define LGM_CLK_PL25M 70 +#define LGM_CLK_PL10M 71 +#define LGM_CLK_PL1544K 72 +#define LGM_CLK_PL2048K 73 +#define LGM_CLK_PL8K 74 +#define LGM_CLK_PON_NTR 75 +#define LGM_CLK_SYNC0 76 +#define LGM_CLK_SYNC1 77 +#define LGM_CLK_PROGDIV 78 +#define LGM_CLK_OD0 79 +#define LGM_CLK_OD1 80 +#define LGM_CLK_CBPHY0 81 +#define LGM_CLK_CBPHY1 82 +#define LGM_CLK_CBPHY2 83 +#define LGM_CLK_CBPHY3 84 + +/* Gate clocks */ +/* Gate CLK0 */ +#define LGM_GCLK_C55 100 +#define LGM_GCLK_QSPI 101 +#define LGM_GCLK_EIP197 102 +#define LGM_GCLK_VAULT 103 +#define LGM_GCLK_TOE 104 +#define LGM_GCLK_SDXC 105 +#define LGM_GCLK_EMMC 106 +#define LGM_GCLK_SPI_DBG 107 +#define LGM_GCLK_DMA3 108 + +/* Gate CLK1 */ +#define LGM_GCLK_DMA0 120 +#define LGM_GCLK_LEDC0 121 +#define LGM_GCLK_LEDC1 122 +#define LGM_GCLK_I2S0 123 +#define LGM_GCLK_I2S1 124 +#define LGM_GCLK_EBU 125 +#define LGM_GCLK_PWM 126 +#define LGM_GCLK_I2C0 127 +#define LGM_GCLK_I2C1 128 +#define LGM_GCLK_I2C2 129 +#define LGM_GCLK_I2C3 130 +#define LGM_GCLK_SSC0 131 +#define LGM_GCLK_SSC1 132 +#define LGM_GCLK_SSC2 133 +#define LGM_GCLK_SSC3 134 +#define LGM_GCLK_GPTC0 135 +#define LGM_GCLK_GPTC1 136 +#define LGM_GCLK_GPTC2 137 +#define LGM_GCLK_GPTC3 138 +#define LGM_GCLK_ASC0 139 +#define LGM_GCLK_ASC1 140 +#define LGM_GCLK_ASC2 141 +#define LGM_GCLK_ASC3 142 +#define LGM_GCLK_PCM0 143 +#define LGM_GCLK_PCM1 144 +#define LGM_GCLK_PCM2 145 + +/* Gate CLK2 */ +#define LGM_GCLK_PCIE10 150 +#define LGM_GCLK_PCIE11 151 +#define LGM_GCLK_PCIE30 152 +#define LGM_GCLK_PCIE31 153 +#define LGM_GCLK_PCIE20 154 +#define LGM_GCLK_PCIE21 155 +#define LGM_GCLK_PCIE40 156 +#define LGM_GCLK_PCIE41 157 +#define LGM_GCLK_XPCS0 158 +#define LGM_GCLK_XPCS1 159 +#define LGM_GCLK_XPCS2 160 +#define LGM_GCLK_XPCS3 161 +#define LGM_GCLK_SATA0 162 +#define LGM_GCLK_SATA1 163 +#define LGM_GCLK_SATA2 164 +#define LGM_GCLK_SATA3 165 + +/* Gate CLK3 */ +#define LGM_GCLK_ARCEM4 170 +#define LGM_GCLK_IDMAR1 171 +#define LGM_GCLK_IDMAT0 172 +#define LGM_GCLK_IDMAT1 173 +#define LGM_GCLK_IDMAT2 174 +#define LGM_GCLK_PPV4 175 +#define LGM_GCLK_GSWIPO 176 +#define LGM_GCLK_CQEM 177 +#define LGM_GCLK_XPCS5 178 +#define LGM_GCLK_USB1 179 +#define LGM_GCLK_USB2 180 + +#endif /* __INTEL_LGM_CLK_H */ -- GitLab From 195f406543e555611330b98ea8c21f113a192fc3 Mon Sep 17 00:00:00 2001 From: Chengguang Xu Date: Tue, 26 May 2020 17:05:43 +0800 Subject: [PATCH 0697/1971] f2fs: code cleanup by removing ifdef macro surrounding Define f2fs_listxattr and to NULL when CONFIG_F2FS_FS_XATTR is not enabled, then we can remove many ugly ifdef macros in the code. Signed-off-by: Chengguang Xu Reviewed-by: Chao Yu Signed-off-by: Jaegeuk Kim --- fs/f2fs/file.c | 2 -- fs/f2fs/namei.c | 8 -------- fs/f2fs/xattr.h | 6 +----- 3 files changed, 1 insertion(+), 15 deletions(-) diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c index f7de2a1da5283..67fd1c900eb49 100644 --- a/fs/f2fs/file.c +++ b/fs/f2fs/file.c @@ -979,9 +979,7 @@ const struct inode_operations f2fs_file_inode_operations = { .setattr = f2fs_setattr, .get_acl = f2fs_get_acl, .set_acl = f2fs_set_acl, -#ifdef CONFIG_F2FS_FS_XATTR .listxattr = f2fs_listxattr, -#endif .fiemap = f2fs_fiemap, }; diff --git a/fs/f2fs/namei.c b/fs/f2fs/namei.c index 63b34a161cf48..8f55201441f78 100644 --- a/fs/f2fs/namei.c +++ b/fs/f2fs/namei.c @@ -1287,9 +1287,7 @@ const struct inode_operations f2fs_encrypted_symlink_inode_operations = { .get_link = f2fs_encrypted_get_link, .getattr = f2fs_getattr, .setattr = f2fs_setattr, -#ifdef CONFIG_F2FS_FS_XATTR .listxattr = f2fs_listxattr, -#endif }; const struct inode_operations f2fs_dir_inode_operations = { @@ -1307,9 +1305,7 @@ const struct inode_operations f2fs_dir_inode_operations = { .setattr = f2fs_setattr, .get_acl = f2fs_get_acl, .set_acl = f2fs_set_acl, -#ifdef CONFIG_F2FS_FS_XATTR .listxattr = f2fs_listxattr, -#endif .fiemap = f2fs_fiemap, }; @@ -1317,9 +1313,7 @@ const struct inode_operations f2fs_symlink_inode_operations = { .get_link = f2fs_get_link, .getattr = f2fs_getattr, .setattr = f2fs_setattr, -#ifdef CONFIG_F2FS_FS_XATTR .listxattr = f2fs_listxattr, -#endif }; const struct inode_operations f2fs_special_inode_operations = { @@ -1327,7 +1321,5 @@ const struct inode_operations f2fs_special_inode_operations = { .setattr = f2fs_setattr, .get_acl = f2fs_get_acl, .set_acl = f2fs_set_acl, -#ifdef CONFIG_F2FS_FS_XATTR .listxattr = f2fs_listxattr, -#endif }; diff --git a/fs/f2fs/xattr.h b/fs/f2fs/xattr.h index 6a192e6c7a9eb..416d652774a33 100644 --- a/fs/f2fs/xattr.h +++ b/fs/f2fs/xattr.h @@ -136,6 +136,7 @@ extern void f2fs_destroy_xattr_caches(struct f2fs_sb_info *); #else #define f2fs_xattr_handlers NULL +#define f2fs_listxattr NULL static inline int f2fs_setxattr(struct inode *inode, int index, const char *name, const void *value, size_t size, struct page *page, int flags) @@ -148,11 +149,6 @@ static inline int f2fs_getxattr(struct inode *inode, int index, { return -EOPNOTSUPP; } -static inline ssize_t f2fs_listxattr(struct dentry *dentry, char *buffer, - size_t buffer_size) -{ - return -EOPNOTSUPP; -} static inline int f2fs_init_xattr_caches(struct f2fs_sb_info *sbi) { return 0; } static inline void f2fs_destroy_xattr_caches(struct f2fs_sb_info *sbi) { } #endif -- GitLab From 762d961aee4042282b83db557edff305eb8a1713 Mon Sep 17 00:00:00 2001 From: Dinh Nguyen Date: Tue, 12 May 2020 13:16:43 -0500 Subject: [PATCH 0698/1971] clk: socfpga: stratix10: use new parent data scheme Convert, where possible, the stratix10 clock driver to the new parent data scheme by specifying the parent data for clocks that have multiple parents. Signed-off-by: Dinh Nguyen Link: https://lkml.kernel.org/r/20200512181647.5071-1-dinguyen@kernel.org Signed-off-by: Stephen Boyd --- drivers/clk/socfpga/clk-gate-s10.c | 5 +- drivers/clk/socfpga/clk-periph-s10.c | 10 +- drivers/clk/socfpga/clk-pll-s10.c | 4 +- drivers/clk/socfpga/clk-s10.c | 160 ++++++++++++++++++++++----- drivers/clk/socfpga/stratix10-clk.h | 8 +- 5 files changed, 146 insertions(+), 41 deletions(-) diff --git a/drivers/clk/socfpga/clk-gate-s10.c b/drivers/clk/socfpga/clk-gate-s10.c index 8be4722f60649..083b2ec21fdd6 100644 --- a/drivers/clk/socfpga/clk-gate-s10.c +++ b/drivers/clk/socfpga/clk-gate-s10.c @@ -70,7 +70,6 @@ struct clk *s10_register_gate(const struct stratix10_gate_clock *clks, void __io struct clk *clk; struct socfpga_gate_clk *socfpga_clk; struct clk_init_data init; - const char * const *parent_names = clks->parent_names; const char *parent_name = clks->parent_name; socfpga_clk = kzalloc(sizeof(*socfpga_clk), GFP_KERNEL); @@ -108,7 +107,9 @@ struct clk *s10_register_gate(const struct stratix10_gate_clock *clks, void __io init.flags = clks->flags; init.num_parents = clks->num_parents; - init.parent_names = parent_names ? parent_names : &parent_name; + init.parent_names = parent_name ? &parent_name : NULL; + if (init.parent_names == NULL) + init.parent_data = clks->parent_data; socfpga_clk->hw.hw.init = &init; clk = clk_register(NULL, &socfpga_clk->hw.hw); diff --git a/drivers/clk/socfpga/clk-periph-s10.c b/drivers/clk/socfpga/clk-periph-s10.c index dd6d4056e9de1..397b77b89b166 100644 --- a/drivers/clk/socfpga/clk-periph-s10.c +++ b/drivers/clk/socfpga/clk-periph-s10.c @@ -81,7 +81,6 @@ struct clk *s10_register_periph(const struct stratix10_perip_c_clock *clks, struct clk_init_data init; const char *name = clks->name; const char *parent_name = clks->parent_name; - const char * const *parent_names = clks->parent_names; periph_clk = kzalloc(sizeof(*periph_clk), GFP_KERNEL); if (WARN_ON(!periph_clk)) @@ -94,7 +93,9 @@ struct clk *s10_register_periph(const struct stratix10_perip_c_clock *clks, init.flags = clks->flags; init.num_parents = clks->num_parents; - init.parent_names = parent_names ? parent_names : &parent_name; + init.parent_names = parent_name ? &parent_name : NULL; + if (init.parent_names == NULL) + init.parent_data = clks->parent_data; periph_clk->hw.hw.init = &init; @@ -114,7 +115,6 @@ struct clk *s10_register_cnt_periph(const struct stratix10_perip_cnt_clock *clks struct clk_init_data init; const char *name = clks->name; const char *parent_name = clks->parent_name; - const char * const *parent_names = clks->parent_names; periph_clk = kzalloc(sizeof(*periph_clk), GFP_KERNEL); if (WARN_ON(!periph_clk)) @@ -137,7 +137,9 @@ struct clk *s10_register_cnt_periph(const struct stratix10_perip_cnt_clock *clks init.flags = clks->flags; init.num_parents = clks->num_parents; - init.parent_names = parent_names ? parent_names : &parent_name; + init.parent_names = parent_name ? &parent_name : NULL; + if (init.parent_names == NULL) + init.parent_data = clks->parent_data; periph_clk->hw.hw.init = &init; diff --git a/drivers/clk/socfpga/clk-pll-s10.c b/drivers/clk/socfpga/clk-pll-s10.c index a301bb22f36c0..bcd3f14e9145f 100644 --- a/drivers/clk/socfpga/clk-pll-s10.c +++ b/drivers/clk/socfpga/clk-pll-s10.c @@ -117,7 +117,6 @@ struct clk *s10_register_pll(const struct stratix10_pll_clock *clks, struct socfpga_pll *pll_clk; struct clk_init_data init; const char *name = clks->name; - const char * const *parent_names = clks->parent_names; pll_clk = kzalloc(sizeof(*pll_clk), GFP_KERNEL); if (WARN_ON(!pll_clk)) @@ -134,7 +133,8 @@ struct clk *s10_register_pll(const struct stratix10_pll_clock *clks, init.flags = clks->flags; init.num_parents = clks->num_parents; - init.parent_names = parent_names; + init.parent_names = NULL; + init.parent_data = clks->parent_data; pll_clk->hw.hw.init = &init; pll_clk->hw.bit_idx = SOCFPGA_PLL_POWER; diff --git a/drivers/clk/socfpga/clk-s10.c b/drivers/clk/socfpga/clk-s10.c index dea7c6c7d2698..c1dfc9b34e4e9 100644 --- a/drivers/clk/socfpga/clk-s10.c +++ b/drivers/clk/socfpga/clk-s10.c @@ -12,35 +12,137 @@ #include "stratix10-clk.h" -static const char * const pll_mux[] = { "osc1", "cb-intosc-hs-div2-clk", - "f2s-free-clk",}; -static const char * const cntr_mux[] = { "main_pll", "periph_pll", - "osc1", "cb-intosc-hs-div2-clk", - "f2s-free-clk"}; -static const char * const boot_mux[] = { "osc1", "cb-intosc-hs-div2-clk",}; - -static const char * const noc_free_mux[] = {"main_noc_base_clk", - "peri_noc_base_clk", - "osc1", "cb-intosc-hs-div2-clk", - "f2s-free-clk"}; - -static const char * const emaca_free_mux[] = {"peri_emaca_clk", "boot_clk"}; -static const char * const emacb_free_mux[] = {"peri_emacb_clk", "boot_clk"}; -static const char * const emac_ptp_free_mux[] = {"peri_emac_ptp_clk", "boot_clk"}; -static const char * const gpio_db_free_mux[] = {"peri_gpio_db_clk", "boot_clk"}; -static const char * const sdmmc_free_mux[] = {"main_sdmmc_clk", "boot_clk"}; -static const char * const s2f_usr1_free_mux[] = {"peri_s2f_usr1_clk", "boot_clk"}; -static const char * const psi_ref_free_mux[] = {"peri_psi_ref_clk", "boot_clk"}; -static const char * const mpu_mux[] = { "mpu_free_clk", "boot_clk",}; - -static const char * const s2f_usr0_mux[] = {"f2s-free-clk", "boot_clk"}; -static const char * const emac_mux[] = {"emaca_free_clk", "emacb_free_clk"}; -static const char * const noc_mux[] = {"noc_free_clk", "boot_clk"}; - -static const char * const mpu_free_mux[] = {"main_mpu_base_clk", - "peri_mpu_base_clk", - "osc1", "cb-intosc-hs-div2-clk", - "f2s-free-clk"}; +static const struct clk_parent_data pll_mux[] = { + { .fw_name = "osc1", + .name = "osc1" }, + { .fw_name = "cb-intosc-hs-div2-clk", + .name = "cb-intosc-hs-div2-clk" }, + { .fw_name = "f2s-free-clk", + .name = "f2s-free-clk" }, +}; + +static const struct clk_parent_data cntr_mux[] = { + { .fw_name = "main_pll", + .name = "main_pll", }, + { .fw_name = "periph_pll", + .name = "periph_pll", }, + { .fw_name = "osc1", + .name = "osc1", }, + { .fw_name = "cb-intosc-hs-div2-clk", + .name = "cb-intosc-hs-div2-clk", }, + { .fw_name = "f2s-free-clk", + .name = "f2s-free-clk", }, +}; + +static const struct clk_parent_data boot_mux[] = { + { .fw_name = "osc1", + .name = "osc1" }, + { .fw_name = "cb-intosc-hs-div2-clk", + .name = "cb-intosc-hs-div2-clk" }, +}; + +static const struct clk_parent_data noc_free_mux[] = { + { .fw_name = "main_noc_base_clk", + .name = "main_noc_base_clk", }, + { .fw_name = "peri_noc_base_clk", + .name = "peri_noc_base_clk", }, + { .fw_name = "osc1", + .name = "osc1", }, + { .fw_name = "cb-intosc-hs-div2-clk", + .name = "cb-intosc-hs-div2-clk", }, + { .fw_name = "f2s-free-clk", + .name = "f2s-free-clk", }, +}; + +static const struct clk_parent_data emaca_free_mux[] = { + { .fw_name = "peri_emaca_clk", + .name = "peri_emaca_clk", }, + { .fw_name = "boot_clk", + .name = "boot_clk", }, +}; + +static const struct clk_parent_data emacb_free_mux[] = { + { .fw_name = "peri_emacb_clk", + .name = "peri_emacb_clk", }, + { .fw_name = "boot_clk", + .name = "boot_clk", }, +}; + +static const struct clk_parent_data emac_ptp_free_mux[] = { + { .fw_name = "peri_emac_ptp_clk", + .name = "peri_emac_ptp_clk", }, + { .fw_name = "boot_clk", + .name = "boot_clk", }, +}; + +static const struct clk_parent_data gpio_db_free_mux[] = { + { .fw_name = "peri_gpio_db_clk", + .name = "peri_gpio_db_clk", }, + { .fw_name = "boot_clk", + .name = "boot_clk", }, +}; + +static const struct clk_parent_data sdmmc_free_mux[] = { + { .fw_name = "main_sdmmc_clk", + .name = "main_sdmmc_clk", }, + { .fw_name = "boot_clk", + .name = "boot_clk", }, +}; + +static const struct clk_parent_data s2f_usr1_free_mux[] = { + { .fw_name = "peri_s2f_usr1_clk", + .name = "peri_s2f_usr1_clk", }, + { .fw_name = "boot_clk", + .name = "boot_clk", }, +}; + +static const struct clk_parent_data psi_ref_free_mux[] = { + { .fw_name = "peri_psi_ref_clk", + .name = "peri_psi_ref_clk", }, + { .fw_name = "boot_clk", + .name = "boot_clk", }, +}; + +static const struct clk_parent_data mpu_mux[] = { + { .fw_name = "mpu_free_clk", + .name = "mpu_free_clk", }, + { .fw_name = "boot_clk", + .name = "boot_clk", }, +}; + +static const struct clk_parent_data s2f_usr0_mux[] = { + { .fw_name = "f2s-free-clk", + .name = "f2s-free-clk", }, + { .fw_name = "boot_clk", + .name = "boot_clk", }, +}; + +static const struct clk_parent_data emac_mux[] = { + { .fw_name = "emaca_free_clk", + .name = "emaca_free_clk", }, + { .fw_name = "emacb_free_clk", + .name = "emacb_free_clk", }, +}; + +static const struct clk_parent_data noc_mux[] = { + { .fw_name = "noc_free_clk", + .name = "noc_free_clk", }, + { .fw_name = "boot_clk", + .name = "boot_clk", }, +}; + +static const struct clk_parent_data mpu_free_mux[] = { + { .fw_name = "main_mpu_base_clk", + .name = "main_mpu_base_clk", }, + { .fw_name = "peri_mpu_base_clk", + .name = "peri_mpu_base_clk", }, + { .fw_name = "osc1", + .name = "osc1", }, + { .fw_name = "cb-intosc-hs-div2-clk", + .name = "cb-intosc-hs-div2-clk", }, + { .fw_name = "f2s-free-clk", + .name = "f2s-free-clk", }, +}; /* clocks in AO (always on) controller */ static const struct stratix10_pll_clock s10_pll_clks[] = { diff --git a/drivers/clk/socfpga/stratix10-clk.h b/drivers/clk/socfpga/stratix10-clk.h index fcabef42249c1..ffbd1fb2c8efb 100644 --- a/drivers/clk/socfpga/stratix10-clk.h +++ b/drivers/clk/socfpga/stratix10-clk.h @@ -14,7 +14,7 @@ struct stratix10_clock_data { struct stratix10_pll_clock { unsigned int id; const char *name; - const char *const *parent_names; + const struct clk_parent_data *parent_data; u8 num_parents; unsigned long flags; unsigned long offset; @@ -24,7 +24,7 @@ struct stratix10_perip_c_clock { unsigned int id; const char *name; const char *parent_name; - const char *const *parent_names; + const struct clk_parent_data *parent_data; u8 num_parents; unsigned long flags; unsigned long offset; @@ -34,7 +34,7 @@ struct stratix10_perip_cnt_clock { unsigned int id; const char *name; const char *parent_name; - const char *const *parent_names; + const struct clk_parent_data *parent_data; u8 num_parents; unsigned long flags; unsigned long offset; @@ -47,7 +47,7 @@ struct stratix10_gate_clock { unsigned int id; const char *name; const char *parent_name; - const char *const *parent_names; + const struct clk_parent_data *parent_data; u8 num_parents; unsigned long flags; unsigned long gate_reg; -- GitLab From 535d936f6103caee3daa0fb2be180aaed322a8b9 Mon Sep 17 00:00:00 2001 From: Dinh Nguyen Date: Tue, 12 May 2020 13:16:44 -0500 Subject: [PATCH 0699/1971] clk: socfpga: remove clk_ops enable/disable methods The enable/disable clock ops are already defined in the standard clock ops, so we don't need to assign them. Signed-off-by: Dinh Nguyen Link: https://lkml.kernel.org/r/20200512181647.5071-2-dinguyen@kernel.org Signed-off-by: Stephen Boyd --- drivers/clk/socfpga/clk-pll-a10.c | 2 -- drivers/clk/socfpga/clk-pll-s10.c | 2 -- drivers/clk/socfpga/clk-pll.c | 2 -- 3 files changed, 6 deletions(-) diff --git a/drivers/clk/socfpga/clk-pll-a10.c b/drivers/clk/socfpga/clk-pll-a10.c index 3816fc04b2745..6d9395106c0c9 100644 --- a/drivers/clk/socfpga/clk-pll-a10.c +++ b/drivers/clk/socfpga/clk-pll-a10.c @@ -102,8 +102,6 @@ static struct clk * __init __socfpga_pll_init(struct device_node *node, pll_clk->hw.hw.init = &init; pll_clk->hw.bit_idx = SOCFPGA_PLL_EXT_ENA; - clk_pll_ops.enable = clk_gate_ops.enable; - clk_pll_ops.disable = clk_gate_ops.disable; clk = clk_register(NULL, &pll_clk->hw.hw); if (WARN_ON(IS_ERR(clk))) { diff --git a/drivers/clk/socfpga/clk-pll-s10.c b/drivers/clk/socfpga/clk-pll-s10.c index bcd3f14e9145f..9faa80ff3b53e 100644 --- a/drivers/clk/socfpga/clk-pll-s10.c +++ b/drivers/clk/socfpga/clk-pll-s10.c @@ -138,8 +138,6 @@ struct clk *s10_register_pll(const struct stratix10_pll_clock *clks, pll_clk->hw.hw.init = &init; pll_clk->hw.bit_idx = SOCFPGA_PLL_POWER; - clk_pll_ops.enable = clk_gate_ops.enable; - clk_pll_ops.disable = clk_gate_ops.disable; clk = clk_register(NULL, &pll_clk->hw.hw); if (WARN_ON(IS_ERR(clk))) { diff --git a/drivers/clk/socfpga/clk-pll.c b/drivers/clk/socfpga/clk-pll.c index dc65cc0fd3bd1..a001641b2f421 100644 --- a/drivers/clk/socfpga/clk-pll.c +++ b/drivers/clk/socfpga/clk-pll.c @@ -105,8 +105,6 @@ static __init struct clk *__socfpga_pll_init(struct device_node *node, pll_clk->hw.hw.init = &init; pll_clk->hw.bit_idx = SOCFPGA_PLL_EXT_ENA; - clk_pll_ops.enable = clk_gate_ops.enable; - clk_pll_ops.disable = clk_gate_ops.disable; clk = clk_register(NULL, &pll_clk->hw.hw); if (WARN_ON(IS_ERR(clk))) { -- GitLab From d52579ce21e0f8b915cb62501789716677f9264a Mon Sep 17 00:00:00 2001 From: Dinh Nguyen Date: Tue, 12 May 2020 13:16:45 -0500 Subject: [PATCH 0700/1971] clk: socfpga: add const to _ops data structures All the static clk_ops data structure need a const. Signed-off-by: Dinh Nguyen Link: https://lkml.kernel.org/r/20200512181647.5071-3-dinguyen@kernel.org Signed-off-by: Stephen Boyd --- drivers/clk/socfpga/clk-pll-a10.c | 2 +- drivers/clk/socfpga/clk-pll-s10.c | 4 ++-- drivers/clk/socfpga/clk-pll.c | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/clk/socfpga/clk-pll-a10.c b/drivers/clk/socfpga/clk-pll-a10.c index 6d9395106c0c9..db54f7d806a09 100644 --- a/drivers/clk/socfpga/clk-pll-a10.c +++ b/drivers/clk/socfpga/clk-pll-a10.c @@ -58,7 +58,7 @@ static u8 clk_pll_get_parent(struct clk_hw *hwclk) CLK_MGR_PLL_CLK_SRC_MASK; } -static struct clk_ops clk_pll_ops = { +static const struct clk_ops clk_pll_ops = { .recalc_rate = clk_pll_recalc_rate, .get_parent = clk_pll_get_parent, }; diff --git a/drivers/clk/socfpga/clk-pll-s10.c b/drivers/clk/socfpga/clk-pll-s10.c index 9faa80ff3b53e..5c3e1ee44f6b5 100644 --- a/drivers/clk/socfpga/clk-pll-s10.c +++ b/drivers/clk/socfpga/clk-pll-s10.c @@ -98,13 +98,13 @@ static int clk_pll_prepare(struct clk_hw *hwclk) return 0; } -static struct clk_ops clk_pll_ops = { +static const struct clk_ops clk_pll_ops = { .recalc_rate = clk_pll_recalc_rate, .get_parent = clk_pll_get_parent, .prepare = clk_pll_prepare, }; -static struct clk_ops clk_boot_ops = { +static const struct clk_ops clk_boot_ops = { .recalc_rate = clk_boot_clk_recalc_rate, .get_parent = clk_boot_get_parent, .prepare = clk_pll_prepare, diff --git a/drivers/clk/socfpga/clk-pll.c b/drivers/clk/socfpga/clk-pll.c index a001641b2f421..e5fb786843f39 100644 --- a/drivers/clk/socfpga/clk-pll.c +++ b/drivers/clk/socfpga/clk-pll.c @@ -65,7 +65,7 @@ static u8 clk_pll_get_parent(struct clk_hw *hwclk) CLK_MGR_PLL_CLK_SRC_MASK; } -static struct clk_ops clk_pll_ops = { +static const struct clk_ops clk_pll_ops = { .recalc_rate = clk_pll_recalc_rate, .get_parent = clk_pll_get_parent, }; -- GitLab From 6b3c59780ed301f132e3f10ceb49a9984c22da01 Mon Sep 17 00:00:00 2001 From: Dinh Nguyen Date: Tue, 12 May 2020 13:16:46 -0500 Subject: [PATCH 0701/1971] dt-bindings: documentation: add clock bindings information for Agilex Document the Agilex clock bindings, and add the clock header file. The clock header is an enumeration of all the different clocks on the Agilex platform. Signed-off-by: Dinh Nguyen Reviewed-by: Rob Herring Link: https://lkml.kernel.org/r/20200512181647.5071-4-dinguyen@kernel.org Signed-off-by: Stephen Boyd --- .../bindings/clock/intel,agilex.yaml | 46 ++++++++++++ include/dt-bindings/clock/agilex-clock.h | 70 +++++++++++++++++++ 2 files changed, 116 insertions(+) create mode 100644 Documentation/devicetree/bindings/clock/intel,agilex.yaml create mode 100644 include/dt-bindings/clock/agilex-clock.h diff --git a/Documentation/devicetree/bindings/clock/intel,agilex.yaml b/Documentation/devicetree/bindings/clock/intel,agilex.yaml new file mode 100644 index 0000000000000..cf5a9eb803e62 --- /dev/null +++ b/Documentation/devicetree/bindings/clock/intel,agilex.yaml @@ -0,0 +1,46 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/clock/intel,agilex.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Intel SoCFPGA Agilex platform clock controller binding + +maintainers: + - Dinh Nguyen + +description: + The Intel Agilex Clock controller is an integrated clock controller, which + generates and supplies to all modules. + +properties: + compatible: + const: intel,agilex-clkmgr + + '#clock-cells': + const: 1 + + reg: + maxItems: 1 + + clocks: + maxItems: 1 + +required: + - compatible + - reg + - clocks + - '#clock-cells' + +additionalProperties: false + +examples: + # Clock controller node: + - | + clkmgr: clock-controller@ffd10000 { + compatible = "intel,agilex-clkmgr"; + reg = <0xffd10000 0x1000>; + clocks = <&osc1>; + #clock-cells = <1>; + }; +... diff --git a/include/dt-bindings/clock/agilex-clock.h b/include/dt-bindings/clock/agilex-clock.h new file mode 100644 index 0000000000000..f19cf8ccbdd2e --- /dev/null +++ b/include/dt-bindings/clock/agilex-clock.h @@ -0,0 +1,70 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2019, Intel Corporation + */ + +#ifndef __AGILEX_CLOCK_H +#define __AGILEX_CLOCK_H + +/* fixed rate clocks */ +#define AGILEX_OSC1 0 +#define AGILEX_CB_INTOSC_HS_DIV2_CLK 1 +#define AGILEX_CB_INTOSC_LS_CLK 2 +#define AGILEX_L4_SYS_FREE_CLK 3 +#define AGILEX_F2S_FREE_CLK 4 + +/* PLL clocks */ +#define AGILEX_MAIN_PLL_CLK 5 +#define AGILEX_MAIN_PLL_C0_CLK 6 +#define AGILEX_MAIN_PLL_C1_CLK 7 +#define AGILEX_MAIN_PLL_C2_CLK 8 +#define AGILEX_MAIN_PLL_C3_CLK 9 +#define AGILEX_PERIPH_PLL_CLK 10 +#define AGILEX_PERIPH_PLL_C0_CLK 11 +#define AGILEX_PERIPH_PLL_C1_CLK 12 +#define AGILEX_PERIPH_PLL_C2_CLK 13 +#define AGILEX_PERIPH_PLL_C3_CLK 14 +#define AGILEX_MPU_FREE_CLK 15 +#define AGILEX_MPU_CCU_CLK 16 +#define AGILEX_BOOT_CLK 17 + +/* fixed factor clocks */ +#define AGILEX_L3_MAIN_FREE_CLK 18 +#define AGILEX_NOC_FREE_CLK 19 +#define AGILEX_S2F_USR0_CLK 20 +#define AGILEX_NOC_CLK 21 +#define AGILEX_EMAC_A_FREE_CLK 22 +#define AGILEX_EMAC_B_FREE_CLK 23 +#define AGILEX_EMAC_PTP_FREE_CLK 24 +#define AGILEX_GPIO_DB_FREE_CLK 25 +#define AGILEX_SDMMC_FREE_CLK 26 +#define AGILEX_S2F_USER0_FREE_CLK 27 +#define AGILEX_S2F_USER1_FREE_CLK 28 +#define AGILEX_PSI_REF_FREE_CLK 29 + +/* Gate clocks */ +#define AGILEX_MPU_CLK 30 +#define AGILEX_MPU_L2RAM_CLK 31 +#define AGILEX_MPU_PERIPH_CLK 32 +#define AGILEX_L4_MAIN_CLK 33 +#define AGILEX_L4_MP_CLK 34 +#define AGILEX_L4_SP_CLK 35 +#define AGILEX_CS_AT_CLK 36 +#define AGILEX_CS_TRACE_CLK 37 +#define AGILEX_CS_PDBG_CLK 38 +#define AGILEX_CS_TIMER_CLK 39 +#define AGILEX_S2F_USER0_CLK 40 +#define AGILEX_EMAC0_CLK 41 +#define AGILEX_EMAC1_CLK 43 +#define AGILEX_EMAC2_CLK 44 +#define AGILEX_EMAC_PTP_CLK 45 +#define AGILEX_GPIO_DB_CLK 46 +#define AGILEX_NAND_CLK 47 +#define AGILEX_PSI_REF_CLK 48 +#define AGILEX_S2F_USER1_CLK 49 +#define AGILEX_SDMMC_CLK 50 +#define AGILEX_SPI_M_CLK 51 +#define AGILEX_USB_CLK 52 +#define AGILEX_NUM_CLKS 53 + +#endif /* __AGILEX_CLOCK_H */ -- GitLab From 80c6b7a0894ffdf3c781f047479752015e5d5b27 Mon Sep 17 00:00:00 2001 From: Dinh Nguyen Date: Tue, 12 May 2020 13:16:47 -0500 Subject: [PATCH 0702/1971] clk: socfpga: agilex: add clock driver for the Agilex platform For the most part the Agilex clock structure is very similar to Stratix10, so we re-use most of the Stratix10 clock driver. Signed-off-by: Dinh Nguyen Link: https://lkml.kernel.org/r/20200512181647.5071-5-dinguyen@kernel.org Signed-off-by: Stephen Boyd --- drivers/clk/Makefile | 3 +- drivers/clk/socfpga/Makefile | 2 + drivers/clk/socfpga/clk-agilex.c | 454 ++++++++++++++++++++++++++++ drivers/clk/socfpga/clk-pll-s10.c | 68 +++++ drivers/clk/socfpga/stratix10-clk.h | 2 + 5 files changed, 528 insertions(+), 1 deletion(-) create mode 100644 drivers/clk/socfpga/clk-agilex.c diff --git a/drivers/clk/Makefile b/drivers/clk/Makefile index f4169cc2fd318..a178e4b6001f3 100644 --- a/drivers/clk/Makefile +++ b/drivers/clk/Makefile @@ -104,10 +104,11 @@ obj-$(CONFIG_COMMON_CLK_SAMSUNG) += samsung/ obj-$(CONFIG_CLK_SIFIVE) += sifive/ obj-$(CONFIG_ARCH_SIRF) += sirf/ obj-$(CONFIG_ARCH_SOCFPGA) += socfpga/ +obj-$(CONFIG_ARCH_AGILEX) += socfpga/ +obj-$(CONFIG_ARCH_STRATIX10) += socfpga/ obj-$(CONFIG_PLAT_SPEAR) += spear/ obj-$(CONFIG_ARCH_SPRD) += sprd/ obj-$(CONFIG_ARCH_STI) += st/ -obj-$(CONFIG_ARCH_STRATIX10) += socfpga/ obj-$(CONFIG_ARCH_SUNXI) += sunxi/ obj-$(CONFIG_SUNXI_CCU) += sunxi-ng/ obj-$(CONFIG_ARCH_TEGRA) += tegra/ diff --git a/drivers/clk/socfpga/Makefile b/drivers/clk/socfpga/Makefile index ce5aa7802eb8a..bf736f8d201ab 100644 --- a/drivers/clk/socfpga/Makefile +++ b/drivers/clk/socfpga/Makefile @@ -3,3 +3,5 @@ obj-$(CONFIG_ARCH_SOCFPGA) += clk.o clk-gate.o clk-pll.o clk-periph.o obj-$(CONFIG_ARCH_SOCFPGA) += clk-pll-a10.o clk-periph-a10.o clk-gate-a10.o obj-$(CONFIG_ARCH_STRATIX10) += clk-s10.o obj-$(CONFIG_ARCH_STRATIX10) += clk-pll-s10.o clk-periph-s10.o clk-gate-s10.o +obj-$(CONFIG_ARCH_AGILEX) += clk-agilex.o +obj-$(CONFIG_ARCH_AGILEX) += clk-pll-s10.o clk-periph-s10.o clk-gate-s10.o diff --git a/drivers/clk/socfpga/clk-agilex.c b/drivers/clk/socfpga/clk-agilex.c new file mode 100644 index 0000000000000..699527f7e7643 --- /dev/null +++ b/drivers/clk/socfpga/clk-agilex.c @@ -0,0 +1,454 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2019, Intel Corporation + */ +#include +#include +#include +#include +#include + +#include + +#include "stratix10-clk.h" + +static const struct clk_parent_data pll_mux[] = { + { .fw_name = "osc1", + .name = "osc1", }, + { .fw_name = "cb-intosc-hs-div2-clk", + .name = "cb-intosc-hs-div2-clk", }, + { .fw_name = "f2s-free-clk", + .name = "f2s-free-clk", }, +}; + +static const struct clk_parent_data cntr_mux[] = { + { .fw_name = "main_pll", + .name = "main_pll", }, + { .fw_name = "periph_pll", + .name = "periph_pll", }, + { .fw_name = "osc1", + .name = "osc1", }, + { .fw_name = "cb-intosc-hs-div2-clk", + .name = "cb-intosc-hs-div2-clk", }, + { .fw_name = "f2s-free-clk", + .name = "f2s-free-clk", }, +}; + +static const struct clk_parent_data boot_mux[] = { + { .fw_name = "osc1", + .name = "osc1", }, + { .fw_name = "cb-intosc-hs-div2-clk", + .name = "cb-intosc-hs-div2-clk", }, +}; + +static const struct clk_parent_data mpu_free_mux[] = { + { .fw_name = "main_pll_c0", + .name = "main_pll_c0", }, + { .fw_name = "peri_pll_c0", + .name = "peri_pll_c0", }, + { .fw_name = "osc1", + .name = "osc1", }, + { .fw_name = "cb-intosc-hs-div2-clk", + .name = "cb-intosc-hs-div2-clk", }, + { .fw_name = "f2s-free-clk", + .name = "f2s-free-clk", }, +}; + +static const struct clk_parent_data noc_free_mux[] = { + { .fw_name = "main_pll_c1", + .name = "main_pll_c1", }, + { .fw_name = "peri_pll_c1", + .name = "peri_pll_c1", }, + { .fw_name = "osc1", + .name = "osc1", }, + { .fw_name = "cb-intosc-hs-div2-clk", + .name = "cb-intosc-hs-div2-clk", }, + { .fw_name = "f2s-free-clk", + .name = "f2s-free-clk", }, +}; + +static const struct clk_parent_data emaca_free_mux[] = { + { .fw_name = "main_pll_c2", + .name = "main_pll_c2", }, + { .fw_name = "peri_pll_c2", + .name = "peri_pll_c2", }, + { .fw_name = "osc1", + .name = "osc1", }, + { .fw_name = "cb-intosc-hs-div2-clk", + .name = "cb-intosc-hs-div2-clk", }, + { .fw_name = "f2s-free-clk", + .name = "f2s-free-clk", }, +}; + +static const struct clk_parent_data emacb_free_mux[] = { + { .fw_name = "main_pll_c3", + .name = "main_pll_c3", }, + { .fw_name = "peri_pll_c3", + .name = "peri_pll_c3", }, + { .fw_name = "osc1", + .name = "osc1", }, + { .fw_name = "cb-intosc-hs-div2-clk", + .name = "cb-intosc-hs-div2-clk", }, + { .fw_name = "f2s-free-clk", + .name = "f2s-free-clk", }, +}; + +static const struct clk_parent_data emac_ptp_free_mux[] = { + { .fw_name = "main_pll_c3", + .name = "main_pll_c3", }, + { .fw_name = "peri_pll_c3", + .name = "peri_pll_c3", }, + { .fw_name = "osc1", + .name = "osc1", }, + { .fw_name = "cb-intosc-hs-div2-clk", + .name = "cb-intosc-hs-div2-clk", }, + { .fw_name = "f2s-free-clk", + .name = "f2s-free-clk", }, +}; + +static const struct clk_parent_data gpio_db_free_mux[] = { + { .fw_name = "main_pll_c3", + .name = "main_pll_c3", }, + { .fw_name = "peri_pll_c3", + .name = "peri_pll_c3", }, + { .fw_name = "osc1", + .name = "osc1", }, + { .fw_name = "cb-intosc-hs-div2-clk", + .name = "cb-intosc-hs-div2-clk", }, + { .fw_name = "f2s-free-clk", + .name = "f2s-free-clk", }, +}; + +static const struct clk_parent_data psi_ref_free_mux[] = { + { .fw_name = "main_pll_c3", + .name = "main_pll_c3", }, + { .fw_name = "peri_pll_c3", + .name = "peri_pll_c3", }, + { .fw_name = "osc1", + .name = "osc1", }, + { .fw_name = "cb-intosc-hs-div2-clk", + .name = "cb-intosc-hs-div2-clk", }, + { .fw_name = "f2s-free-clk", + .name = "f2s-free-clk", }, +}; + +static const struct clk_parent_data sdmmc_free_mux[] = { + { .fw_name = "main_pll_c3", + .name = "main_pll_c3", }, + { .fw_name = "peri_pll_c3", + .name = "peri_pll_c3", }, + { .fw_name = "osc1", + .name = "osc1", }, + { .fw_name = "cb-intosc-hs-div2-clk", + .name = "cb-intosc-hs-div2-clk", }, + { .fw_name = "f2s-free-clk", + .name = "f2s-free-clk", }, +}; + +static const struct clk_parent_data s2f_usr0_free_mux[] = { + { .fw_name = "main_pll_c2", + .name = "main_pll_c2", }, + { .fw_name = "peri_pll_c2", + .name = "peri_pll_c2", }, + { .fw_name = "osc1", + .name = "osc1", }, + { .fw_name = "cb-intosc-hs-div2-clk", + .name = "cb-intosc-hs-div2-clk", }, + { .fw_name = "f2s-free-clk", + .name = "f2s-free-clk", }, +}; + +static const struct clk_parent_data s2f_usr1_free_mux[] = { + { .fw_name = "main_pll_c2", + .name = "main_pll_c2", }, + { .fw_name = "peri_pll_c2", + .name = "peri_pll_c2", }, + { .fw_name = "osc1", + .name = "osc1", }, + { .fw_name = "cb-intosc-hs-div2-clk", + .name = "cb-intosc-hs-div2-clk", }, + { .fw_name = "f2s-free-clk", + .name = "f2s-free-clk", }, +}; + +static const struct clk_parent_data mpu_mux[] = { + { .fw_name = "mpu_free_clk", + .name = "mpu_free_clk", }, + { .fw_name = "boot_clk", + .name = "boot_clk", }, +}; + +static const struct clk_parent_data s2f_usr0_mux[] = { + { .fw_name = "f2s-free-clk", + .name = "f2s-free-clk", }, + { .fw_name = "boot_clk", + .name = "boot_clk", }, +}; + +static const struct clk_parent_data emac_mux[] = { + { .fw_name = "emaca_free_clk", + .name = "emaca_free_clk", }, + { .fw_name = "emacb_free_clk", + .name = "emacb_free_clk", }, +}; + +static const struct clk_parent_data noc_mux[] = { + { .fw_name = "noc_free_clk", + .name = "noc_free_clk", }, + { .fw_name = "boot_clk", + .name = "boot_clk", }, +}; + +/* clocks in AO (always on) controller */ +static const struct stratix10_pll_clock agilex_pll_clks[] = { + { AGILEX_BOOT_CLK, "boot_clk", boot_mux, ARRAY_SIZE(boot_mux), 0, + 0x0}, + { AGILEX_MAIN_PLL_CLK, "main_pll", pll_mux, ARRAY_SIZE(pll_mux), + 0, 0x48}, + { AGILEX_PERIPH_PLL_CLK, "periph_pll", pll_mux, ARRAY_SIZE(pll_mux), + 0, 0x9c}, +}; + +static const struct stratix10_perip_c_clock agilex_main_perip_c_clks[] = { + { AGILEX_MAIN_PLL_C0_CLK, "main_pll_c0", "main_pll", NULL, 1, 0, 0x58}, + { AGILEX_MAIN_PLL_C1_CLK, "main_pll_c1", "main_pll", NULL, 1, 0, 0x5C}, + { AGILEX_MAIN_PLL_C2_CLK, "main_pll_c2", "main_pll", NULL, 1, 0, 0x64}, + { AGILEX_MAIN_PLL_C3_CLK, "main_pll_c3", "main_pll", NULL, 1, 0, 0x68}, + { AGILEX_PERIPH_PLL_C0_CLK, "peri_pll_c0", "periph_pll", NULL, 1, 0, 0xAC}, + { AGILEX_PERIPH_PLL_C1_CLK, "peri_pll_c1", "periph_pll", NULL, 1, 0, 0xB0}, + { AGILEX_PERIPH_PLL_C2_CLK, "peri_pll_c2", "periph_pll", NULL, 1, 0, 0xB8}, + { AGILEX_PERIPH_PLL_C3_CLK, "peri_pll_c3", "periph_pll", NULL, 1, 0, 0xBC}, +}; + +static const struct stratix10_perip_cnt_clock agilex_main_perip_cnt_clks[] = { + { AGILEX_MPU_FREE_CLK, "mpu_free_clk", NULL, mpu_free_mux, ARRAY_SIZE(mpu_free_mux), + 0, 0x3C, 0, 0, 0}, + { AGILEX_NOC_FREE_CLK, "noc_free_clk", NULL, noc_free_mux, ARRAY_SIZE(noc_free_mux), + 0, 0x40, 0, 0, 1}, + { AGILEX_L4_SYS_FREE_CLK, "l4_sys_free_clk", "noc_free_clk", NULL, 1, 0, + 0, 4, 0, 0}, + { AGILEX_NOC_CLK, "noc_clk", NULL, noc_mux, ARRAY_SIZE(noc_mux), + 0, 0, 0, 0x30, 1}, + { AGILEX_EMAC_A_FREE_CLK, "emaca_free_clk", NULL, emaca_free_mux, ARRAY_SIZE(emaca_free_mux), + 0, 0xD4, 0, 0x88, 0}, + { AGILEX_EMAC_B_FREE_CLK, "emacb_free_clk", NULL, emacb_free_mux, ARRAY_SIZE(emacb_free_mux), + 0, 0xD8, 0, 0x88, 1}, + { AGILEX_EMAC_PTP_FREE_CLK, "emac_ptp_free_clk", NULL, emac_ptp_free_mux, + ARRAY_SIZE(emac_ptp_free_mux), 0, 0xDC, 0, 0x88, 2}, + { AGILEX_GPIO_DB_FREE_CLK, "gpio_db_free_clk", NULL, gpio_db_free_mux, + ARRAY_SIZE(gpio_db_free_mux), 0, 0xE0, 0, 0x88, 3}, + { AGILEX_SDMMC_FREE_CLK, "sdmmc_free_clk", NULL, sdmmc_free_mux, + ARRAY_SIZE(sdmmc_free_mux), 0, 0xE4, 0, 0x88, 4}, + { AGILEX_S2F_USER0_FREE_CLK, "s2f_user0_free_clk", NULL, s2f_usr0_free_mux, + ARRAY_SIZE(s2f_usr0_free_mux), 0, 0xE8, 0, 0, 0}, + { AGILEX_S2F_USER1_FREE_CLK, "s2f_user1_free_clk", NULL, s2f_usr1_free_mux, + ARRAY_SIZE(s2f_usr1_free_mux), 0, 0xEC, 0, 0x88, 5}, + { AGILEX_PSI_REF_FREE_CLK, "psi_ref_free_clk", NULL, psi_ref_free_mux, + ARRAY_SIZE(psi_ref_free_mux), 0, 0xF0, 0, 0x88, 6}, +}; + +static const struct stratix10_gate_clock agilex_gate_clks[] = { + { AGILEX_MPU_CLK, "mpu_clk", NULL, mpu_mux, ARRAY_SIZE(mpu_mux), 0, 0x24, + 0, 0, 0, 0, 0x30, 0, 0}, + { AGILEX_MPU_PERIPH_CLK, "mpu_periph_clk", "mpu_clk", NULL, 1, 0, 0x24, + 0, 0, 0, 0, 0, 0, 4}, + { AGILEX_MPU_L2RAM_CLK, "mpu_l2ram_clk", "mpu_clk", NULL, 1, 0, 0x24, + 0, 0, 0, 0, 0, 0, 2}, + { AGILEX_L4_MAIN_CLK, "l4_main_clk", "noc_clk", NULL, 1, 0, 0x24, + 1, 0x44, 0, 2, 0, 0, 0}, + { AGILEX_L4_MP_CLK, "l4_mp_clk", "noc_clk", NULL, 1, 0, 0x24, + 2, 0x44, 8, 2, 0, 0, 0}, + /* + * The l4_sp_clk feeds a 100 MHz clock to various peripherals, one of them + * being the SP timers, thus cannot get gated. + */ + { AGILEX_L4_SP_CLK, "l4_sp_clk", "noc_clk", NULL, 1, CLK_IS_CRITICAL, 0x24, + 3, 0x44, 16, 2, 0, 0, 0}, + { AGILEX_CS_AT_CLK, "cs_at_clk", "noc_clk", NULL, 1, 0, 0x24, + 4, 0x44, 24, 2, 0, 0, 0}, + { AGILEX_CS_TRACE_CLK, "cs_trace_clk", "noc_clk", NULL, 1, 0, 0x24, + 4, 0x44, 26, 2, 0, 0, 0}, + { AGILEX_CS_PDBG_CLK, "cs_pdbg_clk", "cs_at_clk", NULL, 1, 0, 0x24, + 4, 0x44, 28, 1, 0, 0, 0}, + { AGILEX_CS_TIMER_CLK, "cs_timer_clk", "noc_clk", NULL, 1, 0, 0x24, + 5, 0, 0, 0, 0, 0, 0}, + { AGILEX_S2F_USER0_CLK, "s2f_user0_clk", NULL, s2f_usr0_mux, ARRAY_SIZE(s2f_usr0_mux), 0, 0x24, + 6, 0, 0, 0, 0, 0, 0}, + { AGILEX_EMAC0_CLK, "emac0_clk", NULL, emac_mux, ARRAY_SIZE(emac_mux), 0, 0x7C, + 0, 0, 0, 0, 0x94, 26, 0}, + { AGILEX_EMAC1_CLK, "emac1_clk", NULL, emac_mux, ARRAY_SIZE(emac_mux), 0, 0x7C, + 1, 0, 0, 0, 0x94, 27, 0}, + { AGILEX_EMAC2_CLK, "emac2_clk", NULL, emac_mux, ARRAY_SIZE(emac_mux), 0, 0x7C, + 2, 0, 0, 0, 0x94, 28, 0}, + { AGILEX_EMAC_PTP_CLK, "emac_ptp_clk", "emac_ptp_free_clk", NULL, 1, 0, 0x7C, + 3, 0, 0, 0, 0, 0, 0}, + { AGILEX_GPIO_DB_CLK, "gpio_db_clk", "gpio_db_free_clk", NULL, 1, 0, 0x7C, + 4, 0x98, 0, 16, 0, 0, 0}, + { AGILEX_SDMMC_CLK, "sdmmc_clk", "sdmmc_free_clk", NULL, 1, 0, 0x7C, + 5, 0, 0, 0, 0, 0, 4}, + { AGILEX_S2F_USER1_CLK, "s2f_user1_clk", "s2f_user1_free_clk", NULL, 1, 0, 0x7C, + 6, 0, 0, 0, 0, 0, 0}, + { AGILEX_PSI_REF_CLK, "psi_ref_clk", "psi_ref_free_clk", NULL, 1, 0, 0x7C, + 7, 0, 0, 0, 0, 0, 0}, + { AGILEX_USB_CLK, "usb_clk", "l4_mp_clk", NULL, 1, 0, 0x7C, + 8, 0, 0, 0, 0, 0, 0}, + { AGILEX_SPI_M_CLK, "spi_m_clk", "l4_mp_clk", NULL, 1, 0, 0x7C, + 9, 0, 0, 0, 0, 0, 0}, + { AGILEX_NAND_CLK, "nand_clk", "l4_main_clk", NULL, 1, 0, 0x7C, + 10, 0, 0, 0, 0, 0, 0}, +}; + +static int agilex_clk_register_c_perip(const struct stratix10_perip_c_clock *clks, + int nums, struct stratix10_clock_data *data) +{ + struct clk *clk; + void __iomem *base = data->base; + int i; + + for (i = 0; i < nums; i++) { + clk = s10_register_periph(&clks[i], base); + if (IS_ERR(clk)) { + pr_err("%s: failed to register clock %s\n", + __func__, clks[i].name); + continue; + } + data->clk_data.clks[clks[i].id] = clk; + } + return 0; +} + +static int agilex_clk_register_cnt_perip(const struct stratix10_perip_cnt_clock *clks, + int nums, struct stratix10_clock_data *data) +{ + struct clk *clk; + void __iomem *base = data->base; + int i; + + for (i = 0; i < nums; i++) { + clk = s10_register_cnt_periph(&clks[i], base); + if (IS_ERR(clk)) { + pr_err("%s: failed to register clock %s\n", + __func__, clks[i].name); + continue; + } + data->clk_data.clks[clks[i].id] = clk; + } + + return 0; +} + +static int agilex_clk_register_gate(const struct stratix10_gate_clock *clks, int nums, struct stratix10_clock_data *data) +{ + struct clk *clk; + void __iomem *base = data->base; + int i; + + for (i = 0; i < nums; i++) { + clk = s10_register_gate(&clks[i], base); + if (IS_ERR(clk)) { + pr_err("%s: failed to register clock %s\n", + __func__, clks[i].name); + continue; + } + data->clk_data.clks[clks[i].id] = clk; + } + + return 0; +} + +static int agilex_clk_register_pll(const struct stratix10_pll_clock *clks, + int nums, struct stratix10_clock_data *data) +{ + struct clk *clk; + void __iomem *base = data->base; + int i; + + for (i = 0; i < nums; i++) { + clk = agilex_register_pll(&clks[i], base); + if (IS_ERR(clk)) { + pr_err("%s: failed to register clock %s\n", + __func__, clks[i].name); + continue; + } + data->clk_data.clks[clks[i].id] = clk; + } + + return 0; +} + +static struct stratix10_clock_data *__socfpga_agilex_clk_init(struct platform_device *pdev, + int nr_clks) +{ + struct device_node *np = pdev->dev.of_node; + struct device *dev = &pdev->dev; + struct stratix10_clock_data *clk_data; + struct clk **clk_table; + struct resource *res; + void __iomem *base; + int ret; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + base = devm_ioremap_resource(dev, res); + if (IS_ERR(base)) + return ERR_CAST(base); + + clk_data = devm_kzalloc(dev, sizeof(*clk_data), GFP_KERNEL); + if (!clk_data) + return ERR_PTR(-ENOMEM); + + clk_data->base = base; + clk_table = devm_kcalloc(dev, nr_clks, sizeof(*clk_table), GFP_KERNEL); + if (!clk_table) + return ERR_PTR(-ENOMEM); + + clk_data->clk_data.clks = clk_table; + clk_data->clk_data.clk_num = nr_clks; + ret = of_clk_add_provider(np, of_clk_src_onecell_get, &clk_data->clk_data); + if (ret) + return ERR_PTR(ret); + + return clk_data; +} + +static int agilex_clkmgr_probe(struct platform_device *pdev) +{ + struct stratix10_clock_data *clk_data; + + clk_data = __socfpga_agilex_clk_init(pdev, AGILEX_NUM_CLKS); + if (IS_ERR(clk_data)) + return PTR_ERR(clk_data); + + agilex_clk_register_pll(agilex_pll_clks, ARRAY_SIZE(agilex_pll_clks), clk_data); + + agilex_clk_register_c_perip(agilex_main_perip_c_clks, + ARRAY_SIZE(agilex_main_perip_c_clks), clk_data); + + agilex_clk_register_cnt_perip(agilex_main_perip_cnt_clks, + ARRAY_SIZE(agilex_main_perip_cnt_clks), + clk_data); + + agilex_clk_register_gate(agilex_gate_clks, ARRAY_SIZE(agilex_gate_clks), + clk_data); + return 0; +} + +static const struct of_device_id agilex_clkmgr_match_table[] = { + { .compatible = "intel,agilex-clkmgr", + .data = agilex_clkmgr_probe }, + { } +}; + +static struct platform_driver agilex_clkmgr_driver = { + .probe = agilex_clkmgr_probe, + .driver = { + .name = "agilex-clkmgr", + .suppress_bind_attrs = true, + .of_match_table = agilex_clkmgr_match_table, + }, +}; + +static int __init agilex_clk_init(void) +{ + return platform_driver_register(&agilex_clkmgr_driver); +} +core_initcall(agilex_clk_init); diff --git a/drivers/clk/socfpga/clk-pll-s10.c b/drivers/clk/socfpga/clk-pll-s10.c index 5c3e1ee44f6b5..4e268953b7da2 100644 --- a/drivers/clk/socfpga/clk-pll-s10.c +++ b/drivers/clk/socfpga/clk-pll-s10.c @@ -18,8 +18,12 @@ #define SOCFPGA_PLL_RESET_MASK 0x2 #define SOCFPGA_PLL_REFDIV_MASK 0x00003F00 #define SOCFPGA_PLL_REFDIV_SHIFT 8 +#define SOCFPGA_PLL_AREFDIV_MASK 0x00000F00 +#define SOCFPGA_PLL_DREFDIV_MASK 0x00003000 +#define SOCFPGA_PLL_DREFDIV_SHIFT 12 #define SOCFPGA_PLL_MDIV_MASK 0xFF000000 #define SOCFPGA_PLL_MDIV_SHIFT 24 +#define SOCFPGA_AGILEX_PLL_MDIV_MASK 0x000003FF #define SWCTRLBTCLKSEL_MASK 0x200 #define SWCTRLBTCLKSEL_SHIFT 9 @@ -27,6 +31,27 @@ #define to_socfpga_clk(p) container_of(p, struct socfpga_pll, hw.hw) +static unsigned long agilex_clk_pll_recalc_rate(struct clk_hw *hwclk, + unsigned long parent_rate) +{ + struct socfpga_pll *socfpgaclk = to_socfpga_clk(hwclk); + unsigned long arefdiv, reg, mdiv; + unsigned long long vco_freq; + + /* read VCO1 reg for numerator and denominator */ + reg = readl(socfpgaclk->hw.reg); + arefdiv = (reg & SOCFPGA_PLL_AREFDIV_MASK) >> SOCFPGA_PLL_REFDIV_SHIFT; + + vco_freq = (unsigned long long)parent_rate / arefdiv; + + /* Read mdiv and fdiv from the fdbck register */ + reg = readl(socfpgaclk->hw.reg + 0x24); + mdiv = reg & SOCFPGA_AGILEX_PLL_MDIV_MASK; + + vco_freq = (unsigned long long)vco_freq * mdiv; + return (unsigned long)vco_freq; +} + static unsigned long clk_pll_recalc_rate(struct clk_hw *hwclk, unsigned long parent_rate) { @@ -98,6 +123,12 @@ static int clk_pll_prepare(struct clk_hw *hwclk) return 0; } +static const struct clk_ops agilex_clk_pll_ops = { + .recalc_rate = agilex_clk_pll_recalc_rate, + .get_parent = clk_pll_get_parent, + .prepare = clk_pll_prepare, +}; + static const struct clk_ops clk_pll_ops = { .recalc_rate = clk_pll_recalc_rate, .get_parent = clk_pll_get_parent, @@ -146,3 +177,40 @@ struct clk *s10_register_pll(const struct stratix10_pll_clock *clks, } return clk; } + +struct clk *agilex_register_pll(const struct stratix10_pll_clock *clks, + void __iomem *reg) +{ + struct clk *clk; + struct socfpga_pll *pll_clk; + struct clk_init_data init; + const char *name = clks->name; + + pll_clk = kzalloc(sizeof(*pll_clk), GFP_KERNEL); + if (WARN_ON(!pll_clk)) + return NULL; + + pll_clk->hw.reg = reg + clks->offset; + + if (streq(name, SOCFPGA_BOOT_CLK)) + init.ops = &clk_boot_ops; + else + init.ops = &agilex_clk_pll_ops; + + init.name = name; + init.flags = clks->flags; + + init.num_parents = clks->num_parents; + init.parent_names = NULL; + init.parent_data = clks->parent_data; + pll_clk->hw.hw.init = &init; + + pll_clk->hw.bit_idx = SOCFPGA_PLL_POWER; + + clk = clk_register(NULL, &pll_clk->hw.hw); + if (WARN_ON(IS_ERR(clk))) { + kfree(pll_clk); + return NULL; + } + return clk; +} diff --git a/drivers/clk/socfpga/stratix10-clk.h b/drivers/clk/socfpga/stratix10-clk.h index ffbd1fb2c8efb..f9d5d724c6949 100644 --- a/drivers/clk/socfpga/stratix10-clk.h +++ b/drivers/clk/socfpga/stratix10-clk.h @@ -62,6 +62,8 @@ struct stratix10_gate_clock { struct clk *s10_register_pll(const struct stratix10_pll_clock *, void __iomem *); +struct clk *agilex_register_pll(const struct stratix10_pll_clock *, + void __iomem *); struct clk *s10_register_periph(const struct stratix10_perip_c_clock *, void __iomem *); struct clk *s10_register_cnt_periph(const struct stratix10_perip_cnt_clock *, -- GitLab From 1b70061f5939ff1cacd728821b4f378cb0fb7961 Mon Sep 17 00:00:00 2001 From: Taniya Das Date: Sun, 17 May 2020 15:34:19 +0530 Subject: [PATCH 0703/1971] clk: qcom: gcc: Add support for a new frequency for SC7180 There is a requirement to support 51.2MHz from GPLL6 for qup clocks, thus update the frequency table and parent data/map to use the GPLL6 source PLL. Fixes: 17269568f7267 ("clk: qcom: Add Global Clock controller (GCC) driver for SC7180") Signed-off-by: Taniya Das Link: https://lkml.kernel.org/r/1589709861-27580-2-git-send-email-tdas@codeaurora.org Signed-off-by: Stephen Boyd --- drivers/clk/qcom/gcc-sc7180.c | 73 ++++++++++++++++++----------------- 1 file changed, 37 insertions(+), 36 deletions(-) diff --git a/drivers/clk/qcom/gcc-sc7180.c b/drivers/clk/qcom/gcc-sc7180.c index 6a51b5b5fc19e..73380525cb091 100644 --- a/drivers/clk/qcom/gcc-sc7180.c +++ b/drivers/clk/qcom/gcc-sc7180.c @@ -390,6 +390,7 @@ static const struct freq_tbl ftbl_gcc_qupv3_wrap0_s0_clk_src[] = { F(29491200, P_GPLL0_OUT_EVEN, 1, 1536, 15625), F(32000000, P_GPLL0_OUT_EVEN, 1, 8, 75), F(48000000, P_GPLL0_OUT_EVEN, 1, 4, 25), + F(51200000, P_GPLL6_OUT_MAIN, 7.5, 0, 0), F(64000000, P_GPLL0_OUT_EVEN, 1, 16, 75), F(75000000, P_GPLL0_OUT_EVEN, 4, 0, 0), F(80000000, P_GPLL0_OUT_EVEN, 1, 4, 15), @@ -405,8 +406,8 @@ static const struct freq_tbl ftbl_gcc_qupv3_wrap0_s0_clk_src[] = { static struct clk_init_data gcc_qupv3_wrap0_s0_clk_src_init = { .name = "gcc_qupv3_wrap0_s0_clk_src", - .parent_data = gcc_parent_data_0, - .num_parents = 4, + .parent_data = gcc_parent_data_1, + .num_parents = ARRAY_SIZE(gcc_parent_data_1), .ops = &clk_rcg2_ops, }; @@ -414,15 +415,15 @@ static struct clk_rcg2 gcc_qupv3_wrap0_s0_clk_src = { .cmd_rcgr = 0x17034, .mnd_width = 16, .hid_width = 5, - .parent_map = gcc_parent_map_0, + .parent_map = gcc_parent_map_1, .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src, .clkr.hw.init = &gcc_qupv3_wrap0_s0_clk_src_init, }; static struct clk_init_data gcc_qupv3_wrap0_s1_clk_src_init = { .name = "gcc_qupv3_wrap0_s1_clk_src", - .parent_data = gcc_parent_data_0, - .num_parents = 4, + .parent_data = gcc_parent_data_1, + .num_parents = ARRAY_SIZE(gcc_parent_data_1), .ops = &clk_rcg2_ops, }; @@ -430,15 +431,15 @@ static struct clk_rcg2 gcc_qupv3_wrap0_s1_clk_src = { .cmd_rcgr = 0x17164, .mnd_width = 16, .hid_width = 5, - .parent_map = gcc_parent_map_0, + .parent_map = gcc_parent_map_1, .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src, .clkr.hw.init = &gcc_qupv3_wrap0_s1_clk_src_init, }; static struct clk_init_data gcc_qupv3_wrap0_s2_clk_src_init = { .name = "gcc_qupv3_wrap0_s2_clk_src", - .parent_data = gcc_parent_data_0, - .num_parents = 4, + .parent_data = gcc_parent_data_1, + .num_parents = ARRAY_SIZE(gcc_parent_data_1), .ops = &clk_rcg2_ops, }; @@ -446,15 +447,15 @@ static struct clk_rcg2 gcc_qupv3_wrap0_s2_clk_src = { .cmd_rcgr = 0x17294, .mnd_width = 16, .hid_width = 5, - .parent_map = gcc_parent_map_0, + .parent_map = gcc_parent_map_1, .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src, .clkr.hw.init = &gcc_qupv3_wrap0_s2_clk_src_init, }; static struct clk_init_data gcc_qupv3_wrap0_s3_clk_src_init = { .name = "gcc_qupv3_wrap0_s3_clk_src", - .parent_data = gcc_parent_data_0, - .num_parents = 4, + .parent_data = gcc_parent_data_1, + .num_parents = ARRAY_SIZE(gcc_parent_data_1), .ops = &clk_rcg2_ops, }; @@ -462,15 +463,15 @@ static struct clk_rcg2 gcc_qupv3_wrap0_s3_clk_src = { .cmd_rcgr = 0x173c4, .mnd_width = 16, .hid_width = 5, - .parent_map = gcc_parent_map_0, + .parent_map = gcc_parent_map_1, .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src, .clkr.hw.init = &gcc_qupv3_wrap0_s3_clk_src_init, }; static struct clk_init_data gcc_qupv3_wrap0_s4_clk_src_init = { .name = "gcc_qupv3_wrap0_s4_clk_src", - .parent_data = gcc_parent_data_0, - .num_parents = 4, + .parent_data = gcc_parent_data_1, + .num_parents = ARRAY_SIZE(gcc_parent_data_1), .ops = &clk_rcg2_ops, }; @@ -478,15 +479,15 @@ static struct clk_rcg2 gcc_qupv3_wrap0_s4_clk_src = { .cmd_rcgr = 0x174f4, .mnd_width = 16, .hid_width = 5, - .parent_map = gcc_parent_map_0, + .parent_map = gcc_parent_map_1, .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src, .clkr.hw.init = &gcc_qupv3_wrap0_s4_clk_src_init, }; static struct clk_init_data gcc_qupv3_wrap0_s5_clk_src_init = { .name = "gcc_qupv3_wrap0_s5_clk_src", - .parent_data = gcc_parent_data_0, - .num_parents = 4, + .parent_data = gcc_parent_data_1, + .num_parents = ARRAY_SIZE(gcc_parent_data_1), .ops = &clk_rcg2_ops, }; @@ -494,15 +495,15 @@ static struct clk_rcg2 gcc_qupv3_wrap0_s5_clk_src = { .cmd_rcgr = 0x17624, .mnd_width = 16, .hid_width = 5, - .parent_map = gcc_parent_map_0, + .parent_map = gcc_parent_map_1, .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src, .clkr.hw.init = &gcc_qupv3_wrap0_s5_clk_src_init, }; static struct clk_init_data gcc_qupv3_wrap1_s0_clk_src_init = { .name = "gcc_qupv3_wrap1_s0_clk_src", - .parent_data = gcc_parent_data_0, - .num_parents = 4, + .parent_data = gcc_parent_data_1, + .num_parents = ARRAY_SIZE(gcc_parent_data_1), .ops = &clk_rcg2_ops, }; @@ -510,15 +511,15 @@ static struct clk_rcg2 gcc_qupv3_wrap1_s0_clk_src = { .cmd_rcgr = 0x18018, .mnd_width = 16, .hid_width = 5, - .parent_map = gcc_parent_map_0, + .parent_map = gcc_parent_map_1, .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src, .clkr.hw.init = &gcc_qupv3_wrap1_s0_clk_src_init, }; static struct clk_init_data gcc_qupv3_wrap1_s1_clk_src_init = { .name = "gcc_qupv3_wrap1_s1_clk_src", - .parent_data = gcc_parent_data_0, - .num_parents = 4, + .parent_data = gcc_parent_data_1, + .num_parents = ARRAY_SIZE(gcc_parent_data_1), .ops = &clk_rcg2_ops, }; @@ -526,15 +527,15 @@ static struct clk_rcg2 gcc_qupv3_wrap1_s1_clk_src = { .cmd_rcgr = 0x18148, .mnd_width = 16, .hid_width = 5, - .parent_map = gcc_parent_map_0, + .parent_map = gcc_parent_map_1, .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src, .clkr.hw.init = &gcc_qupv3_wrap1_s1_clk_src_init, }; static struct clk_init_data gcc_qupv3_wrap1_s2_clk_src_init = { .name = "gcc_qupv3_wrap1_s2_clk_src", - .parent_data = gcc_parent_data_0, - .num_parents = 4, + .parent_data = gcc_parent_data_1, + .num_parents = ARRAY_SIZE(gcc_parent_data_1), .ops = &clk_rcg2_ops, }; @@ -542,15 +543,15 @@ static struct clk_rcg2 gcc_qupv3_wrap1_s2_clk_src = { .cmd_rcgr = 0x18278, .mnd_width = 16, .hid_width = 5, - .parent_map = gcc_parent_map_0, + .parent_map = gcc_parent_map_1, .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src, .clkr.hw.init = &gcc_qupv3_wrap1_s2_clk_src_init, }; static struct clk_init_data gcc_qupv3_wrap1_s3_clk_src_init = { .name = "gcc_qupv3_wrap1_s3_clk_src", - .parent_data = gcc_parent_data_0, - .num_parents = 4, + .parent_data = gcc_parent_data_1, + .num_parents = ARRAY_SIZE(gcc_parent_data_1), .ops = &clk_rcg2_ops, }; @@ -558,15 +559,15 @@ static struct clk_rcg2 gcc_qupv3_wrap1_s3_clk_src = { .cmd_rcgr = 0x183a8, .mnd_width = 16, .hid_width = 5, - .parent_map = gcc_parent_map_0, + .parent_map = gcc_parent_map_1, .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src, .clkr.hw.init = &gcc_qupv3_wrap1_s3_clk_src_init, }; static struct clk_init_data gcc_qupv3_wrap1_s4_clk_src_init = { .name = "gcc_qupv3_wrap1_s4_clk_src", - .parent_data = gcc_parent_data_0, - .num_parents = 4, + .parent_data = gcc_parent_data_1, + .num_parents = ARRAY_SIZE(gcc_parent_data_1), .ops = &clk_rcg2_ops, }; @@ -574,15 +575,15 @@ static struct clk_rcg2 gcc_qupv3_wrap1_s4_clk_src = { .cmd_rcgr = 0x184d8, .mnd_width = 16, .hid_width = 5, - .parent_map = gcc_parent_map_0, + .parent_map = gcc_parent_map_1, .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src, .clkr.hw.init = &gcc_qupv3_wrap1_s4_clk_src_init, }; static struct clk_init_data gcc_qupv3_wrap1_s5_clk_src_init = { .name = "gcc_qupv3_wrap1_s5_clk_src", - .parent_data = gcc_parent_data_0, - .num_parents = 4, + .parent_data = gcc_parent_data_1, + .num_parents = ARRAY_SIZE(gcc_parent_data_1), .ops = &clk_rcg2_ops, }; @@ -590,7 +591,7 @@ static struct clk_rcg2 gcc_qupv3_wrap1_s5_clk_src = { .cmd_rcgr = 0x18608, .mnd_width = 16, .hid_width = 5, - .parent_map = gcc_parent_map_0, + .parent_map = gcc_parent_map_1, .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src, .clkr.hw.init = &gcc_qupv3_wrap1_s5_clk_src_init, }; -- GitLab From 3005b17c5e2f426576530e3a69af2f7df16efb60 Mon Sep 17 00:00:00 2001 From: Taniya Das Date: Sun, 17 May 2020 15:34:20 +0530 Subject: [PATCH 0704/1971] dt-bindings: clock: Add gcc_sec_ctrl_clk_src clock ID The gcc_sec_ctrl_clk_src clock is required to be controlled by the secure controller driver. Signed-off-by: Taniya Das Link: https://lkml.kernel.org/r/1589709861-27580-3-git-send-email-tdas@codeaurora.org Signed-off-by: Stephen Boyd --- include/dt-bindings/clock/qcom,gcc-sc7180.h | 1 + 1 file changed, 1 insertion(+) diff --git a/include/dt-bindings/clock/qcom,gcc-sc7180.h b/include/dt-bindings/clock/qcom,gcc-sc7180.h index 1258fd05db688..992b67b7e5e4d 100644 --- a/include/dt-bindings/clock/qcom,gcc-sc7180.h +++ b/include/dt-bindings/clock/qcom,gcc-sc7180.h @@ -137,6 +137,7 @@ #define GCC_MSS_NAV_AXI_CLK 127 #define GCC_MSS_Q6_MEMNOC_AXI_CLK 128 #define GCC_MSS_SNOC_AXI_CLK 129 +#define GCC_SEC_CTRL_CLK_SRC 130 /* GCC resets */ #define GCC_QUSB2PHY_PRIM_BCR 0 -- GitLab From bd4bb225eb3a149d37098cf0e17cd5170d863a37 Mon Sep 17 00:00:00 2001 From: Taniya Das Date: Sun, 17 May 2020 15:34:21 +0530 Subject: [PATCH 0705/1971] clk: qcom: gcc: Add support for Secure control source clock The secure controller driver requires to request for various frequencies on the source clock, thus add support for the same. Signed-off-by: Taniya Das Link: https://lkml.kernel.org/r/1589709861-27580-4-git-send-email-tdas@codeaurora.org Signed-off-by: Stephen Boyd --- drivers/clk/qcom/gcc-sc7180.c | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) diff --git a/drivers/clk/qcom/gcc-sc7180.c b/drivers/clk/qcom/gcc-sc7180.c index 73380525cb091..ca4383e3a02a7 100644 --- a/drivers/clk/qcom/gcc-sc7180.c +++ b/drivers/clk/qcom/gcc-sc7180.c @@ -817,6 +817,26 @@ static struct clk_rcg2 gcc_usb3_prim_phy_aux_clk_src = { }, }; +static const struct freq_tbl ftbl_gcc_sec_ctrl_clk_src[] = { + F(4800000, P_BI_TCXO, 4, 0, 0), + F(19200000, P_BI_TCXO, 1, 0, 0), + { } +}; + +static struct clk_rcg2 gcc_sec_ctrl_clk_src = { + .cmd_rcgr = 0x3d030, + .mnd_width = 0, + .hid_width = 5, + .parent_map = gcc_parent_map_3, + .freq_tbl = ftbl_gcc_sec_ctrl_clk_src, + .clkr.hw.init = &(struct clk_init_data){ + .name = "gcc_sec_ctrl_clk_src", + .parent_data = gcc_parent_data_3, + .num_parents = ARRAY_SIZE(gcc_parent_data_3), + .ops = &clk_rcg2_ops, + }, +}; + static struct clk_branch gcc_aggre_ufs_phy_axi_clk = { .halt_reg = 0x82024, .halt_check = BRANCH_HALT_DELAY, @@ -2407,6 +2427,7 @@ static struct clk_regmap *gcc_sc7180_clocks[] = { [GCC_MSS_NAV_AXI_CLK] = &gcc_mss_nav_axi_clk.clkr, [GCC_MSS_Q6_MEMNOC_AXI_CLK] = &gcc_mss_q6_memnoc_axi_clk.clkr, [GCC_MSS_SNOC_AXI_CLK] = &gcc_mss_snoc_axi_clk.clkr, + [GCC_SEC_CTRL_CLK_SRC] = &gcc_sec_ctrl_clk_src.clkr, }; static const struct qcom_reset_map gcc_sc7180_resets[] = { -- GitLab From 1664014e467923992f6f1cdb73b85fe1bb9a5f01 Mon Sep 17 00:00:00 2001 From: Bryan O'Donoghue Date: Sun, 17 May 2020 14:13:48 +0100 Subject: [PATCH 0706/1971] clk: qcom: gcc-msm8939: Add MSM8939 Generic Clock Controller This patch adds support for the MSM8939 GCC. The MSM8939 is based on the MSM8916. MSM8939 is compatible in several ways with MSM8916 but, has additional functional blocks added which require additional PLL sources. In some cases functional blocks from the MSM8916 have different clock sources or different supported frequencies. Cc: Andy Gross Cc: Bjorn Andersson Cc: Michael Turquette Cc: Stephen Boyd Cc: Rob Herring Cc: Philipp Zabel Cc: linux-arm-msm@vger.kernel.org Cc: linux-clk@vger.kernel.org Cc: linux-kernel@vger.kernel.org Cc: devicetree@vger.kernel.org Co-developed-by: Shawn Guo Signed-off-by: Shawn Guo Tested-by: Vincent Knecht Signed-off-by: Bryan O'Donoghue Link: https://lkml.kernel.org/r/20200517131348.688405-3-bryan.odonoghue@linaro.org Tested-by: Konrad Dybcio [sboyd@kernel.org: Drop ret in probe function to remove unused variable] Signed-off-by: Stephen Boyd --- drivers/clk/qcom/Kconfig | 8 + drivers/clk/qcom/Makefile | 1 + drivers/clk/qcom/gcc-msm8939.c | 3988 ++++++++++++++++++++++++++++++++ 3 files changed, 3997 insertions(+) create mode 100644 drivers/clk/qcom/gcc-msm8939.c diff --git a/drivers/clk/qcom/Kconfig b/drivers/clk/qcom/Kconfig index 11ec6f4664678..54c4e3a026365 100644 --- a/drivers/clk/qcom/Kconfig +++ b/drivers/clk/qcom/Kconfig @@ -142,6 +142,14 @@ config MSM_GCC_8916 Say Y if you want to use devices such as UART, SPI i2c, USB, SD/eMMC, display, graphics, camera etc. +config MSM_GCC_8939 + tristate "MSM8939 Global Clock Controller" + select QCOM_GDSC + help + Support for the global clock controller on msm8939 devices. + Say Y if you want to use devices such as UART, SPI i2c, USB, + SD/eMMC, display, graphics, camera etc. + config MSM_GCC_8960 tristate "APQ8064/MSM8960 Global Clock Controller" help diff --git a/drivers/clk/qcom/Makefile b/drivers/clk/qcom/Makefile index 691efbf7e81f0..7ec8561a1270b 100644 --- a/drivers/clk/qcom/Makefile +++ b/drivers/clk/qcom/Makefile @@ -28,6 +28,7 @@ obj-$(CONFIG_MDM_GCC_9615) += gcc-mdm9615.o obj-$(CONFIG_MDM_LCC_9615) += lcc-mdm9615.o obj-$(CONFIG_MSM_GCC_8660) += gcc-msm8660.o obj-$(CONFIG_MSM_GCC_8916) += gcc-msm8916.o +obj-$(CONFIG_MSM_GCC_8939) += gcc-msm8939.o obj-$(CONFIG_MSM_GCC_8960) += gcc-msm8960.o obj-$(CONFIG_MSM_GCC_8974) += gcc-msm8974.o obj-$(CONFIG_MSM_GCC_8994) += gcc-msm8994.o diff --git a/drivers/clk/qcom/gcc-msm8939.c b/drivers/clk/qcom/gcc-msm8939.c new file mode 100644 index 0000000000000..778354f82b1e3 --- /dev/null +++ b/drivers/clk/qcom/gcc-msm8939.c @@ -0,0 +1,3988 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright 2020 Linaro Limited + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include "common.h" +#include "clk-regmap.h" +#include "clk-pll.h" +#include "clk-rcg.h" +#include "clk-branch.h" +#include "reset.h" +#include "gdsc.h" + +enum { + P_XO, + P_GPLL0, + P_GPLL0_AUX, + P_BIMC, + P_GPLL1, + P_GPLL1_AUX, + P_GPLL2, + P_GPLL2_AUX, + P_GPLL3, + P_GPLL3_AUX, + P_GPLL4, + P_GPLL5, + P_GPLL5_AUX, + P_GPLL5_EARLY, + P_GPLL6, + P_GPLL6_AUX, + P_SLEEP_CLK, + P_DSI0_PHYPLL_BYTE, + P_DSI0_PHYPLL_DSI, + P_EXT_PRI_I2S, + P_EXT_SEC_I2S, + P_EXT_MCLK, +}; + +static struct clk_pll gpll0 = { + .l_reg = 0x21004, + .m_reg = 0x21008, + .n_reg = 0x2100c, + .config_reg = 0x21010, + .mode_reg = 0x21000, + .status_reg = 0x2101c, + .status_bit = 17, + .clkr.hw.init = &(struct clk_init_data){ + .name = "gpll0", + .parent_data = &(const struct clk_parent_data) { + .fw_name = "xo", + }, + .num_parents = 1, + .ops = &clk_pll_ops, + }, +}; + +static struct clk_regmap gpll0_vote = { + .enable_reg = 0x45000, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gpll0_vote", + .parent_data = &(const struct clk_parent_data) { + .hw = &gpll0.clkr.hw, + }, + .num_parents = 1, + .ops = &clk_pll_vote_ops, + }, +}; + +static struct clk_pll gpll1 = { + .l_reg = 0x20004, + .m_reg = 0x20008, + .n_reg = 0x2000c, + .config_reg = 0x20010, + .mode_reg = 0x20000, + .status_reg = 0x2001c, + .status_bit = 17, + .clkr.hw.init = &(struct clk_init_data){ + .name = "gpll1", + .parent_data = &(const struct clk_parent_data) { + .fw_name = "xo", + }, + .num_parents = 1, + .ops = &clk_pll_ops, + }, +}; + +static struct clk_regmap gpll1_vote = { + .enable_reg = 0x45000, + .enable_mask = BIT(1), + .hw.init = &(struct clk_init_data){ + .name = "gpll1_vote", + .parent_data = &(const struct clk_parent_data) { + .hw = &gpll1.clkr.hw, + }, + .num_parents = 1, + .ops = &clk_pll_vote_ops, + }, +}; + +static struct clk_pll gpll2 = { + .l_reg = 0x4a004, + .m_reg = 0x4a008, + .n_reg = 0x4a00c, + .config_reg = 0x4a010, + .mode_reg = 0x4a000, + .status_reg = 0x4a01c, + .status_bit = 17, + .clkr.hw.init = &(struct clk_init_data){ + .name = "gpll2", + .parent_data = &(const struct clk_parent_data) { + .fw_name = "xo", + }, + .num_parents = 1, + .ops = &clk_pll_ops, + }, +}; + +static struct clk_regmap gpll2_vote = { + .enable_reg = 0x45000, + .enable_mask = BIT(2), + .hw.init = &(struct clk_init_data){ + .name = "gpll2_vote", + .parent_data = &(const struct clk_parent_data) { + .hw = &gpll2.clkr.hw, + }, + .num_parents = 1, + .ops = &clk_pll_vote_ops, + }, +}; + +static struct clk_pll bimc_pll = { + .l_reg = 0x23004, + .m_reg = 0x23008, + .n_reg = 0x2300c, + .config_reg = 0x23010, + .mode_reg = 0x23000, + .status_reg = 0x2301c, + .status_bit = 17, + .clkr.hw.init = &(struct clk_init_data){ + .name = "bimc_pll", + .parent_data = &(const struct clk_parent_data) { + .fw_name = "xo", + }, + .num_parents = 1, + .ops = &clk_pll_ops, + }, +}; + +static struct clk_regmap bimc_pll_vote = { + .enable_reg = 0x45000, + .enable_mask = BIT(3), + .hw.init = &(struct clk_init_data){ + .name = "bimc_pll_vote", + .parent_data = &(const struct clk_parent_data) { + .hw = &bimc_pll.clkr.hw, + }, + .num_parents = 1, + .ops = &clk_pll_vote_ops, + }, +}; + +static struct clk_pll gpll3 = { + .l_reg = 0x22004, + .m_reg = 0x22008, + .n_reg = 0x2200c, + .config_reg = 0x22010, + .mode_reg = 0x22000, + .status_reg = 0x2201c, + .status_bit = 17, + .clkr.hw.init = &(struct clk_init_data){ + .name = "gpll3", + .parent_data = &(const struct clk_parent_data) { + .fw_name = "xo", + }, + .num_parents = 1, + .ops = &clk_pll_ops, + }, +}; + +static struct clk_regmap gpll3_vote = { + .enable_reg = 0x45000, + .enable_mask = BIT(4), + .hw.init = &(struct clk_init_data){ + .name = "gpll3_vote", + .parent_data = &(const struct clk_parent_data) { + .hw = &gpll3.clkr.hw, + }, + .num_parents = 1, + .ops = &clk_pll_vote_ops, + }, +}; + +/* GPLL3 at 1100 MHz, main output enabled. */ +static const struct pll_config gpll3_config = { + .l = 57, + .m = 7, + .n = 24, + .vco_val = 0x0, + .vco_mask = BIT(20), + .pre_div_val = 0x0, + .pre_div_mask = BIT(12), + .post_div_val = 0x0, + .post_div_mask = BIT(9) | BIT(8), + .mn_ena_mask = BIT(24), + .main_output_mask = BIT(0), + .aux_output_mask = BIT(1), +}; + +static struct clk_pll gpll4 = { + .l_reg = 0x24004, + .m_reg = 0x24008, + .n_reg = 0x2400c, + .config_reg = 0x24010, + .mode_reg = 0x24000, + .status_reg = 0x2401c, + .status_bit = 17, + .clkr.hw.init = &(struct clk_init_data){ + .name = "gpll4", + .parent_data = &(const struct clk_parent_data) { + .fw_name = "xo", + }, + .num_parents = 1, + .ops = &clk_pll_ops, + }, +}; + +static struct clk_regmap gpll4_vote = { + .enable_reg = 0x45000, + .enable_mask = BIT(5), + .hw.init = &(struct clk_init_data){ + .name = "gpll4_vote", + .parent_data = &(const struct clk_parent_data) { + .hw = &gpll4.clkr.hw, + }, + .num_parents = 1, + .ops = &clk_pll_vote_ops, + }, +}; + +/* GPLL4 at 1200 MHz, main output enabled. */ +static struct pll_config gpll4_config = { + .l = 62, + .m = 1, + .n = 2, + .vco_val = 0x0, + .vco_mask = BIT(20), + .pre_div_val = 0x0, + .pre_div_mask = BIT(12), + .post_div_val = 0x0, + .post_div_mask = BIT(9) | BIT(8), + .mn_ena_mask = BIT(24), + .main_output_mask = BIT(0), +}; + +static struct clk_pll gpll5 = { + .l_reg = 0x25004, + .m_reg = 0x25008, + .n_reg = 0x2500c, + .config_reg = 0x25010, + .mode_reg = 0x25000, + .status_reg = 0x2501c, + .status_bit = 17, + .clkr.hw.init = &(struct clk_init_data){ + .name = "gpll5", + .parent_data = &(const struct clk_parent_data) { + .fw_name = "xo", + }, + .num_parents = 1, + .ops = &clk_pll_ops, + }, +}; + +static struct clk_regmap gpll5_vote = { + .enable_reg = 0x45000, + .enable_mask = BIT(6), + .hw.init = &(struct clk_init_data){ + .name = "gpll5_vote", + .parent_data = &(const struct clk_parent_data) { + .hw = &gpll5.clkr.hw, + }, + .num_parents = 1, + .ops = &clk_pll_vote_ops, + }, +}; + +static struct clk_pll gpll6 = { + .l_reg = 0x37004, + .m_reg = 0x37008, + .n_reg = 0x3700c, + .config_reg = 0x37010, + .mode_reg = 0x37000, + .status_reg = 0x3701c, + .status_bit = 17, + .clkr.hw.init = &(struct clk_init_data){ + .name = "gpll6", + .parent_data = &(const struct clk_parent_data) { + .fw_name = "xo", + }, + .num_parents = 1, + .ops = &clk_pll_ops, + }, +}; + +static struct clk_regmap gpll6_vote = { + .enable_reg = 0x45000, + .enable_mask = BIT(7), + .hw.init = &(struct clk_init_data){ + .name = "gpll6_vote", + .parent_data = &(const struct clk_parent_data) { + .hw = &gpll6.clkr.hw, + }, + .num_parents = 1, + .ops = &clk_pll_vote_ops, + }, +}; + +static const struct parent_map gcc_xo_gpll0_map[] = { + { P_XO, 0 }, + { P_GPLL0, 1 }, +}; + +static const struct clk_parent_data gcc_xo_gpll0_parent_data[] = { + { .fw_name = "xo" }, + { .hw = &gpll0_vote.hw }, +}; + +static const struct parent_map gcc_xo_gpll0_bimc_map[] = { + { P_XO, 0 }, + { P_GPLL0, 1 }, + { P_BIMC, 2 }, +}; + +static const struct clk_parent_data gcc_xo_gpll0_bimc_parent_data[] = { + { .fw_name = "xo" }, + { .hw = &gpll0_vote.hw }, + { .hw = &bimc_pll_vote.hw }, +}; + +static const struct parent_map gcc_xo_gpll0_gpll6a_map[] = { + { P_XO, 0 }, + { P_GPLL0, 1 }, + { P_GPLL6_AUX, 2 }, +}; + +static const struct clk_parent_data gcc_xo_gpll0_gpll6a_parent_data[] = { + { .fw_name = "xo" }, + { .hw = &gpll0_vote.hw }, + { .hw = &gpll6_vote.hw }, +}; + +static const struct parent_map gcc_xo_gpll0_gpll2a_gpll3_gpll6a_map[] = { + { P_XO, 0 }, + { P_GPLL0, 1 }, + { P_GPLL2_AUX, 4 }, + { P_GPLL3, 2 }, + { P_GPLL6_AUX, 3 }, +}; + +static const struct clk_parent_data gcc_xo_gpll0_gpll2a_gpll3_gpll6a_parent_data[] = { + { .fw_name = "xo" }, + { .hw = &gpll0_vote.hw }, + { .hw = &gpll2_vote.hw }, + { .hw = &gpll3_vote.hw }, + { .hw = &gpll6_vote.hw }, +}; + +static const struct parent_map gcc_xo_gpll0_gpll2_map[] = { + { P_XO, 0 }, + { P_GPLL0, 1 }, + { P_GPLL2, 2 }, +}; + +static const struct clk_parent_data gcc_xo_gpll0_gpll2_parent_data[] = { + { .fw_name = "xo" }, + { .hw = &gpll0_vote.hw }, + { .hw = &gpll2_vote.hw }, +}; + +static const struct parent_map gcc_xo_gpll0_gpll2_gpll4_map[] = { + { P_XO, 0 }, + { P_GPLL0, 1 }, + { P_GPLL2, 3 }, + { P_GPLL4, 2 }, +}; + +static const struct clk_parent_data gcc_xo_gpll0_gpll2_gpll4_parent_data[] = { + { .fw_name = "xo" }, + { .hw = &gpll0_vote.hw }, + { .hw = &gpll2_vote.hw }, + { .hw = &gpll4_vote.hw }, +}; + +static const struct parent_map gcc_xo_gpll0a_map[] = { + { P_XO, 0 }, + { P_GPLL0_AUX, 2 }, +}; + +static const struct clk_parent_data gcc_xo_gpll0a_parent_data[] = { + { .fw_name = "xo" }, + { .hw = &gpll0_vote.hw }, +}; + +static const struct parent_map gcc_xo_gpll0_gpll1a_sleep_map[] = { + { P_XO, 0 }, + { P_GPLL0, 1 }, + { P_GPLL1_AUX, 2 }, + { P_SLEEP_CLK, 6 }, +}; + +static const struct clk_parent_data gcc_xo_gpll0_gpll1a_sleep_parent_data[] = { + { .fw_name = "xo" }, + { .hw = &gpll0_vote.hw }, + { .hw = &gpll1_vote.hw }, + { .fw_name = "sleep_clk", .name = "sleep_clk" }, +}; + +static const struct parent_map gcc_xo_gpll0_gpll1a_gpll6_sleep_map[] = { + { P_XO, 0 }, + { P_GPLL0, 1 }, + { P_GPLL1_AUX, 2 }, + { P_GPLL6, 2 }, + { P_SLEEP_CLK, 6 }, +}; + +static const struct clk_parent_data gcc_xo_gpll0_gpll1a_gpll6_sleep_parent_data[] = { + { .fw_name = "xo" }, + { .hw = &gpll0_vote.hw }, + { .hw = &gpll1_vote.hw }, + { .hw = &gpll6_vote.hw }, + { .fw_name = "sleep_clk", .name = "sleep_clk" }, +}; + +static const struct parent_map gcc_xo_gpll0_gpll1a_map[] = { + { P_XO, 0 }, + { P_GPLL0, 1 }, + { P_GPLL1_AUX, 2 }, +}; + +static const struct clk_parent_data gcc_xo_gpll0_gpll1a_parent_data[] = { + { .fw_name = "xo" }, + { .hw = &gpll0_vote.hw }, + { .hw = &gpll1_vote.hw }, +}; + +static const struct parent_map gcc_xo_dsibyte_map[] = { + { P_XO, 0, }, + { P_DSI0_PHYPLL_BYTE, 2 }, +}; + +static const struct clk_parent_data gcc_xo_dsibyte_parent_data[] = { + { .fw_name = "xo" }, + { .fw_name = "dsi0pllbyte", .name = "dsi0pllbyte" }, +}; + +static const struct parent_map gcc_xo_gpll0a_dsibyte_map[] = { + { P_XO, 0 }, + { P_GPLL0_AUX, 2 }, + { P_DSI0_PHYPLL_BYTE, 1 }, +}; + +static const struct clk_parent_data gcc_xo_gpll0a_dsibyte_parent_data[] = { + { .fw_name = "xo" }, + { .hw = &gpll0_vote.hw }, + { .fw_name = "dsi0pllbyte", .name = "dsi0pllbyte" }, +}; + +static const struct parent_map gcc_xo_gpll1_dsiphy_gpll6_gpll3a_gpll0a_map[] = { + { P_XO, 0 }, + { P_GPLL1, 1 }, + { P_DSI0_PHYPLL_DSI, 2 }, + { P_GPLL6, 3 }, + { P_GPLL3_AUX, 4 }, + { P_GPLL0_AUX, 5 }, +}; + +static const struct clk_parent_data gcc_xo_gpll1_dsiphy_gpll6_gpll3a_gpll0a_parent_data[] = { + { .fw_name = "xo" }, + { .hw = &gpll1_vote.hw }, + { .fw_name = "dsi0pll", .name = "dsi0pll" }, + { .hw = &gpll6_vote.hw }, + { .hw = &gpll3_vote.hw }, + { .hw = &gpll0_vote.hw }, +}; + +static const struct parent_map gcc_xo_gpll0a_dsiphy_map[] = { + { P_XO, 0 }, + { P_GPLL0_AUX, 2 }, + { P_DSI0_PHYPLL_DSI, 1 }, +}; + +static const struct clk_parent_data gcc_xo_gpll0a_dsiphy_parent_data[] = { + { .fw_name = "xo" }, + { .hw = &gpll0_vote.hw }, + { .fw_name = "dsi0pll", .name = "dsi0pll" }, +}; + +static const struct parent_map gcc_xo_gpll0_gpll5a_gpll6_bimc_map[] = { + { P_XO, 0 }, + { P_GPLL0, 1 }, + { P_GPLL5_AUX, 3 }, + { P_GPLL6, 2 }, + { P_BIMC, 4 }, +}; + +static const struct clk_parent_data gcc_xo_gpll0_gpll5a_gpll6_bimc_parent_data[] = { + { .fw_name = "xo" }, + { .hw = &gpll0_vote.hw }, + { .hw = &gpll5_vote.hw }, + { .hw = &gpll6_vote.hw }, + { .hw = &bimc_pll_vote.hw }, +}; + +static const struct parent_map gcc_xo_gpll0_gpll1_sleep_map[] = { + { P_XO, 0 }, + { P_GPLL0, 1 }, + { P_GPLL1, 2 }, + { P_SLEEP_CLK, 6 } +}; + +static const struct clk_parent_data gcc_xo_gpll0_gpll1_sleep_parent_data[] = { + { .fw_name = "xo" }, + { .hw = &gpll0_vote.hw }, + { .hw = &gpll1_vote.hw }, + { .fw_name = "sleep_clk", .name = "sleep_clk" }, +}; + +static const struct parent_map gcc_xo_gpll1_epi2s_emclk_sleep_map[] = { + { P_XO, 0 }, + { P_GPLL1, 1 }, + { P_EXT_PRI_I2S, 2 }, + { P_EXT_MCLK, 3 }, + { P_SLEEP_CLK, 6 } +}; + +static const struct clk_parent_data gcc_xo_gpll1_epi2s_emclk_sleep_parent_data[] = { + { .fw_name = "xo" }, + { .hw = &gpll0_vote.hw }, + { .fw_name = "ext_pri_i2s", .name = "ext_pri_i2s" }, + { .fw_name = "ext_mclk", .name = "ext_mclk" }, + { .fw_name = "sleep_clk", .name = "sleep_clk" }, +}; + +static const struct parent_map gcc_xo_gpll1_esi2s_emclk_sleep_map[] = { + { P_XO, 0 }, + { P_GPLL1, 1 }, + { P_EXT_SEC_I2S, 2 }, + { P_EXT_MCLK, 3 }, + { P_SLEEP_CLK, 6 } +}; + +static const struct clk_parent_data gcc_xo_gpll1_esi2s_emclk_sleep_parent_data[] = { + { .fw_name = "xo" }, + { .hw = &gpll1_vote.hw }, + { .fw_name = "ext_sec_i2s", .name = "ext_sec_i2s" }, + { .fw_name = "ext_mclk", .name = "ext_mclk" }, + { .fw_name = "sleep_clk", .name = "sleep_clk" }, +}; + +static const struct parent_map gcc_xo_sleep_map[] = { + { P_XO, 0 }, + { P_SLEEP_CLK, 6 } +}; + +static const struct clk_parent_data gcc_xo_sleep_parent_data[] = { + { .fw_name = "xo" }, + { .fw_name = "sleep_clk", .name = "sleep_clk" }, +}; + +static const struct parent_map gcc_xo_gpll1_emclk_sleep_map[] = { + { P_XO, 0 }, + { P_GPLL1, 1 }, + { P_EXT_MCLK, 2 }, + { P_SLEEP_CLK, 6 } +}; + +static const struct clk_parent_data gcc_xo_gpll1_emclk_sleep_parent_data[] = { + { .fw_name = "xo" }, + { .hw = &gpll1_vote.hw }, + { .fw_name = "ext_mclk", .name = "ext_mclk" }, + { .fw_name = "sleep_clk", .name = "sleep_clk" }, +}; + +static const struct parent_map gcc_xo_gpll6_gpll0_map[] = { + { P_XO, 0 }, + { P_GPLL6, 1 }, + { P_GPLL0, 2 }, +}; + +static const struct clk_parent_data gcc_xo_gpll6_gpll0_parent_data[] = { + { .fw_name = "xo" }, + { .hw = &gpll6_vote.hw }, + { .hw = &gpll0_vote.hw }, +}; + +static const struct parent_map gcc_xo_gpll6_gpll0a_map[] = { + { P_XO, 0 }, + { P_GPLL6, 1 }, + { P_GPLL0_AUX, 2 }, +}; + +static const struct clk_parent_data gcc_xo_gpll6_gpll0a_parent_data[] = { + { .fw_name = "xo" }, + { .hw = &gpll6_vote.hw }, + { .hw = &gpll0_vote.hw }, +}; + +static struct clk_rcg2 pcnoc_bfdcd_clk_src = { + .cmd_rcgr = 0x27000, + .hid_width = 5, + .parent_map = gcc_xo_gpll0_map, + .clkr.hw.init = &(struct clk_init_data){ + .name = "pcnoc_bfdcd_clk_src", + .parent_data = gcc_xo_gpll0_parent_data, + .num_parents = 2, + .ops = &clk_rcg2_ops, + }, +}; + +static struct clk_rcg2 system_noc_bfdcd_clk_src = { + .cmd_rcgr = 0x26004, + .hid_width = 5, + .parent_map = gcc_xo_gpll0_gpll6a_map, + .clkr.hw.init = &(struct clk_init_data){ + .name = "system_noc_bfdcd_clk_src", + .parent_data = gcc_xo_gpll0_gpll6a_parent_data, + .num_parents = 3, + .ops = &clk_rcg2_ops, + }, +}; + +static struct clk_rcg2 bimc_ddr_clk_src = { + .cmd_rcgr = 0x32004, + .hid_width = 5, + .parent_map = gcc_xo_gpll0_bimc_map, + .clkr.hw.init = &(struct clk_init_data){ + .name = "bimc_ddr_clk_src", + .parent_data = gcc_xo_gpll0_bimc_parent_data, + .num_parents = 3, + .ops = &clk_rcg2_ops, + .flags = CLK_GET_RATE_NOCACHE, + }, +}; + +static const struct freq_tbl ftbl_gcc_camss_ahb_clk[] = { + F(40000000, P_GPLL0, 10, 1, 2), + F(80000000, P_GPLL0, 10, 0, 0), + { } +}; + +static struct clk_rcg2 camss_ahb_clk_src = { + .cmd_rcgr = 0x5a000, + .mnd_width = 8, + .hid_width = 5, + .parent_map = gcc_xo_gpll0_map, + .freq_tbl = ftbl_gcc_camss_ahb_clk, + .clkr.hw.init = &(struct clk_init_data){ + .name = "camss_ahb_clk_src", + .parent_data = gcc_xo_gpll0_parent_data, + .num_parents = 2, + .ops = &clk_rcg2_ops, + }, +}; + +static const struct freq_tbl ftbl_apss_ahb_clk[] = { + F(19200000, P_XO, 1, 0, 0), + F(50000000, P_GPLL0, 16, 0, 0), + F(100000000, P_GPLL0, 8, 0, 0), + F(133330000, P_GPLL0, 6, 0, 0), + { } +}; + +static struct clk_rcg2 apss_ahb_clk_src = { + .cmd_rcgr = 0x46000, + .hid_width = 5, + .parent_map = gcc_xo_gpll0_map, + .freq_tbl = ftbl_apss_ahb_clk, + .clkr.hw.init = &(struct clk_init_data){ + .name = "apss_ahb_clk_src", + .parent_data = gcc_xo_gpll0_parent_data, + .num_parents = 2, + .ops = &clk_rcg2_ops, + }, +}; + +static const struct freq_tbl ftbl_gcc_camss_csi0_1_clk[] = { + F(100000000, P_GPLL0, 8, 0, 0), + F(200000000, P_GPLL0, 4, 0, 0), + { } +}; + +static struct clk_rcg2 csi0_clk_src = { + .cmd_rcgr = 0x4e020, + .hid_width = 5, + .parent_map = gcc_xo_gpll0_map, + .freq_tbl = ftbl_gcc_camss_csi0_1_clk, + .clkr.hw.init = &(struct clk_init_data){ + .name = "csi0_clk_src", + .parent_data = gcc_xo_gpll0_parent_data, + .num_parents = 2, + .ops = &clk_rcg2_ops, + }, +}; + +static struct clk_rcg2 csi1_clk_src = { + .cmd_rcgr = 0x4f020, + .hid_width = 5, + .parent_map = gcc_xo_gpll0_map, + .freq_tbl = ftbl_gcc_camss_csi0_1_clk, + .clkr.hw.init = &(struct clk_init_data){ + .name = "csi1_clk_src", + .parent_data = gcc_xo_gpll0_parent_data, + .num_parents = 2, + .ops = &clk_rcg2_ops, + }, +}; + +static const struct freq_tbl ftbl_gcc_oxili_gfx3d_clk[] = { + F(19200000, P_XO, 1, 0, 0), + F(50000000, P_GPLL0, 16, 0, 0), + F(80000000, P_GPLL0, 10, 0, 0), + F(100000000, P_GPLL0, 8, 0, 0), + F(160000000, P_GPLL0, 5, 0, 0), + F(200000000, P_GPLL0, 4, 0, 0), + F(220000000, P_GPLL3, 5, 0, 0), + F(266670000, P_GPLL0, 3, 0, 0), + F(310000000, P_GPLL2_AUX, 3, 0, 0), + F(400000000, P_GPLL0, 2, 0, 0), + F(465000000, P_GPLL2_AUX, 2, 0, 0), + F(550000000, P_GPLL3, 2, 0, 0), + { } +}; + +static struct clk_rcg2 gfx3d_clk_src = { + .cmd_rcgr = 0x59000, + .hid_width = 5, + .parent_map = gcc_xo_gpll0_gpll2a_gpll3_gpll6a_map, + .freq_tbl = ftbl_gcc_oxili_gfx3d_clk, + .clkr.hw.init = &(struct clk_init_data){ + .name = "gfx3d_clk_src", + .parent_data = gcc_xo_gpll0_gpll2a_gpll3_gpll6a_parent_data, + .num_parents = 5, + .ops = &clk_rcg2_ops, + }, +}; + +static const struct freq_tbl ftbl_gcc_camss_vfe0_clk[] = { + F(50000000, P_GPLL0, 16, 0, 0), + F(80000000, P_GPLL0, 10, 0, 0), + F(100000000, P_GPLL0, 8, 0, 0), + F(160000000, P_GPLL0, 5, 0, 0), + F(177780000, P_GPLL0, 4.5, 0, 0), + F(200000000, P_GPLL0, 4, 0, 0), + F(266670000, P_GPLL0, 3, 0, 0), + F(320000000, P_GPLL0, 2.5, 0, 0), + F(400000000, P_GPLL0, 2, 0, 0), + F(465000000, P_GPLL2, 2, 0, 0), + F(480000000, P_GPLL4, 2.5, 0, 0), + F(600000000, P_GPLL4, 2, 0, 0), + { } +}; + +static struct clk_rcg2 vfe0_clk_src = { + .cmd_rcgr = 0x58000, + .hid_width = 5, + .parent_map = gcc_xo_gpll0_gpll2_gpll4_map, + .freq_tbl = ftbl_gcc_camss_vfe0_clk, + .clkr.hw.init = &(struct clk_init_data){ + .name = "vfe0_clk_src", + .parent_data = gcc_xo_gpll0_gpll2_gpll4_parent_data, + .num_parents = 4, + .ops = &clk_rcg2_ops, + }, +}; + +static const struct freq_tbl ftbl_gcc_blsp1_qup1_6_i2c_apps_clk[] = { + F(19200000, P_XO, 1, 0, 0), + F(50000000, P_GPLL0, 16, 0, 0), + { } +}; + +static struct clk_rcg2 blsp1_qup1_i2c_apps_clk_src = { + .cmd_rcgr = 0x0200c, + .hid_width = 5, + .parent_map = gcc_xo_gpll0_map, + .freq_tbl = ftbl_gcc_blsp1_qup1_6_i2c_apps_clk, + .clkr.hw.init = &(struct clk_init_data){ + .name = "blsp1_qup1_i2c_apps_clk_src", + .parent_data = gcc_xo_gpll0_parent_data, + .num_parents = 2, + .ops = &clk_rcg2_ops, + }, +}; + +static const struct freq_tbl ftbl_gcc_blsp1_qup1_6_spi_apps_clk[] = { + F(960000, P_XO, 10, 1, 2), + F(4800000, P_XO, 4, 0, 0), + F(9600000, P_XO, 2, 0, 0), + F(16000000, P_GPLL0, 10, 1, 5), + F(19200000, P_XO, 1, 0, 0), + F(25000000, P_GPLL0, 16, 1, 2), + F(50000000, P_GPLL0, 16, 0, 0), + { } +}; + +static struct clk_rcg2 blsp1_qup1_spi_apps_clk_src = { + .cmd_rcgr = 0x02024, + .mnd_width = 8, + .hid_width = 5, + .parent_map = gcc_xo_gpll0_map, + .freq_tbl = ftbl_gcc_blsp1_qup1_6_spi_apps_clk, + .clkr.hw.init = &(struct clk_init_data){ + .name = "blsp1_qup1_spi_apps_clk_src", + .parent_data = gcc_xo_gpll0_parent_data, + .num_parents = 2, + .ops = &clk_rcg2_ops, + }, +}; + +static struct clk_rcg2 blsp1_qup2_i2c_apps_clk_src = { + .cmd_rcgr = 0x03000, + .hid_width = 5, + .parent_map = gcc_xo_gpll0_map, + .freq_tbl = ftbl_gcc_blsp1_qup1_6_i2c_apps_clk, + .clkr.hw.init = &(struct clk_init_data){ + .name = "blsp1_qup2_i2c_apps_clk_src", + .parent_data = gcc_xo_gpll0_parent_data, + .num_parents = 2, + .ops = &clk_rcg2_ops, + }, +}; + +static struct clk_rcg2 blsp1_qup2_spi_apps_clk_src = { + .cmd_rcgr = 0x03014, + .mnd_width = 8, + .hid_width = 5, + .parent_map = gcc_xo_gpll0_map, + .freq_tbl = ftbl_gcc_blsp1_qup1_6_spi_apps_clk, + .clkr.hw.init = &(struct clk_init_data){ + .name = "blsp1_qup2_spi_apps_clk_src", + .parent_data = gcc_xo_gpll0_parent_data, + .num_parents = 2, + .ops = &clk_rcg2_ops, + }, +}; + +static struct clk_rcg2 blsp1_qup3_i2c_apps_clk_src = { + .cmd_rcgr = 0x04000, + .hid_width = 5, + .parent_map = gcc_xo_gpll0_map, + .freq_tbl = ftbl_gcc_blsp1_qup1_6_i2c_apps_clk, + .clkr.hw.init = &(struct clk_init_data){ + .name = "blsp1_qup3_i2c_apps_clk_src", + .parent_data = gcc_xo_gpll0_parent_data, + .num_parents = 2, + .ops = &clk_rcg2_ops, + }, +}; + +static struct clk_rcg2 blsp1_qup3_spi_apps_clk_src = { + .cmd_rcgr = 0x04024, + .mnd_width = 8, + .hid_width = 5, + .parent_map = gcc_xo_gpll0_map, + .freq_tbl = ftbl_gcc_blsp1_qup1_6_spi_apps_clk, + .clkr.hw.init = &(struct clk_init_data){ + .name = "blsp1_qup3_spi_apps_clk_src", + .parent_data = gcc_xo_gpll0_parent_data, + .num_parents = 2, + .ops = &clk_rcg2_ops, + }, +}; + +static struct clk_rcg2 blsp1_qup4_i2c_apps_clk_src = { + .cmd_rcgr = 0x05000, + .hid_width = 5, + .parent_map = gcc_xo_gpll0_map, + .freq_tbl = ftbl_gcc_blsp1_qup1_6_i2c_apps_clk, + .clkr.hw.init = &(struct clk_init_data){ + .name = "blsp1_qup4_i2c_apps_clk_src", + .parent_data = gcc_xo_gpll0_parent_data, + .num_parents = 2, + .ops = &clk_rcg2_ops, + }, +}; + +static struct clk_rcg2 blsp1_qup4_spi_apps_clk_src = { + .cmd_rcgr = 0x05024, + .mnd_width = 8, + .hid_width = 5, + .parent_map = gcc_xo_gpll0_map, + .freq_tbl = ftbl_gcc_blsp1_qup1_6_spi_apps_clk, + .clkr.hw.init = &(struct clk_init_data){ + .name = "blsp1_qup4_spi_apps_clk_src", + .parent_data = gcc_xo_gpll0_parent_data, + .num_parents = 2, + .ops = &clk_rcg2_ops, + }, +}; + +static struct clk_rcg2 blsp1_qup5_i2c_apps_clk_src = { + .cmd_rcgr = 0x06000, + .hid_width = 5, + .parent_map = gcc_xo_gpll0_map, + .freq_tbl = ftbl_gcc_blsp1_qup1_6_i2c_apps_clk, + .clkr.hw.init = &(struct clk_init_data){ + .name = "blsp1_qup5_i2c_apps_clk_src", + .parent_data = gcc_xo_gpll0_parent_data, + .num_parents = 2, + .ops = &clk_rcg2_ops, + }, +}; + +static struct clk_rcg2 blsp1_qup5_spi_apps_clk_src = { + .cmd_rcgr = 0x06024, + .mnd_width = 8, + .hid_width = 5, + .parent_map = gcc_xo_gpll0_map, + .freq_tbl = ftbl_gcc_blsp1_qup1_6_spi_apps_clk, + .clkr.hw.init = &(struct clk_init_data){ + .name = "blsp1_qup5_spi_apps_clk_src", + .parent_data = gcc_xo_gpll0_parent_data, + .num_parents = 2, + .ops = &clk_rcg2_ops, + }, +}; + +static struct clk_rcg2 blsp1_qup6_i2c_apps_clk_src = { + .cmd_rcgr = 0x07000, + .hid_width = 5, + .parent_map = gcc_xo_gpll0_map, + .freq_tbl = ftbl_gcc_blsp1_qup1_6_i2c_apps_clk, + .clkr.hw.init = &(struct clk_init_data){ + .name = "blsp1_qup6_i2c_apps_clk_src", + .parent_data = gcc_xo_gpll0_parent_data, + .num_parents = 2, + .ops = &clk_rcg2_ops, + }, +}; + +static struct clk_rcg2 blsp1_qup6_spi_apps_clk_src = { + .cmd_rcgr = 0x07024, + .mnd_width = 8, + .hid_width = 5, + .parent_map = gcc_xo_gpll0_map, + .freq_tbl = ftbl_gcc_blsp1_qup1_6_spi_apps_clk, + .clkr.hw.init = &(struct clk_init_data){ + .name = "blsp1_qup6_spi_apps_clk_src", + .parent_data = gcc_xo_gpll0_parent_data, + .num_parents = 2, + .ops = &clk_rcg2_ops, + }, +}; + +static const struct freq_tbl ftbl_gcc_blsp1_uart1_6_apps_clk[] = { + F(3686400, P_GPLL0, 1, 72, 15625), + F(7372800, P_GPLL0, 1, 144, 15625), + F(14745600, P_GPLL0, 1, 288, 15625), + F(16000000, P_GPLL0, 10, 1, 5), + F(19200000, P_XO, 1, 0, 0), + F(24000000, P_GPLL0, 1, 3, 100), + F(25000000, P_GPLL0, 16, 1, 2), + F(32000000, P_GPLL0, 1, 1, 25), + F(40000000, P_GPLL0, 1, 1, 20), + F(46400000, P_GPLL0, 1, 29, 500), + F(48000000, P_GPLL0, 1, 3, 50), + F(51200000, P_GPLL0, 1, 8, 125), + F(56000000, P_GPLL0, 1, 7, 100), + F(58982400, P_GPLL0, 1, 1152, 15625), + F(60000000, P_GPLL0, 1, 3, 40), + { } +}; + +static struct clk_rcg2 blsp1_uart1_apps_clk_src = { + .cmd_rcgr = 0x02044, + .mnd_width = 16, + .hid_width = 5, + .parent_map = gcc_xo_gpll0_map, + .freq_tbl = ftbl_gcc_blsp1_uart1_6_apps_clk, + .clkr.hw.init = &(struct clk_init_data){ + .name = "blsp1_uart1_apps_clk_src", + .parent_data = gcc_xo_gpll0_parent_data, + .num_parents = 2, + .ops = &clk_rcg2_ops, + }, +}; + +static struct clk_rcg2 blsp1_uart2_apps_clk_src = { + .cmd_rcgr = 0x03034, + .mnd_width = 16, + .hid_width = 5, + .parent_map = gcc_xo_gpll0_map, + .freq_tbl = ftbl_gcc_blsp1_uart1_6_apps_clk, + .clkr.hw.init = &(struct clk_init_data){ + .name = "blsp1_uart2_apps_clk_src", + .parent_data = gcc_xo_gpll0_parent_data, + .num_parents = 2, + .ops = &clk_rcg2_ops, + }, +}; + +static const struct freq_tbl ftbl_gcc_camss_cci_clk[] = { + F(19200000, P_XO, 1, 0, 0), + { } +}; + +static struct clk_rcg2 cci_clk_src = { + .cmd_rcgr = 0x51000, + .mnd_width = 8, + .hid_width = 5, + .parent_map = gcc_xo_gpll0a_map, + .freq_tbl = ftbl_gcc_camss_cci_clk, + .clkr.hw.init = &(struct clk_init_data){ + .name = "cci_clk_src", + .parent_data = gcc_xo_gpll0a_parent_data, + .num_parents = 2, + .ops = &clk_rcg2_ops, + }, +}; + +static const struct freq_tbl ftbl_gcc_camss_gp0_1_clk[] = { + F(100000000, P_GPLL0, 8, 0, 0), + F(200000000, P_GPLL0, 4, 0, 0), + { } +}; + +static struct clk_rcg2 camss_gp0_clk_src = { + .cmd_rcgr = 0x54000, + .mnd_width = 8, + .hid_width = 5, + .parent_map = gcc_xo_gpll0_gpll1a_sleep_map, + .freq_tbl = ftbl_gcc_camss_gp0_1_clk, + .clkr.hw.init = &(struct clk_init_data){ + .name = "camss_gp0_clk_src", + .parent_data = gcc_xo_gpll0_gpll1a_sleep_parent_data, + .num_parents = 4, + .ops = &clk_rcg2_ops, + }, +}; + +static struct clk_rcg2 camss_gp1_clk_src = { + .cmd_rcgr = 0x55000, + .mnd_width = 8, + .hid_width = 5, + .parent_map = gcc_xo_gpll0_gpll1a_sleep_map, + .freq_tbl = ftbl_gcc_camss_gp0_1_clk, + .clkr.hw.init = &(struct clk_init_data){ + .name = "camss_gp1_clk_src", + .parent_data = gcc_xo_gpll0_gpll1a_sleep_parent_data, + .num_parents = 4, + .ops = &clk_rcg2_ops, + }, +}; + +static const struct freq_tbl ftbl_gcc_camss_jpeg0_clk[] = { + F(133330000, P_GPLL0, 6, 0, 0), + F(266670000, P_GPLL0, 3, 0, 0), + F(320000000, P_GPLL0, 2.5, 0, 0), + { } +}; + +static struct clk_rcg2 jpeg0_clk_src = { + .cmd_rcgr = 0x57000, + .hid_width = 5, + .parent_map = gcc_xo_gpll0_map, + .freq_tbl = ftbl_gcc_camss_jpeg0_clk, + .clkr.hw.init = &(struct clk_init_data){ + .name = "jpeg0_clk_src", + .parent_data = gcc_xo_gpll0_parent_data, + .num_parents = 2, + .ops = &clk_rcg2_ops, + }, +}; + +static const struct freq_tbl ftbl_gcc_camss_mclk0_1_clk[] = { + F(24000000, P_GPLL0, 1, 1, 45), + F(66670000, P_GPLL0, 12, 0, 0), + { } +}; + +static struct clk_rcg2 mclk0_clk_src = { + .cmd_rcgr = 0x52000, + .mnd_width = 8, + .hid_width = 5, + .parent_map = gcc_xo_gpll0_gpll1a_gpll6_sleep_map, + .freq_tbl = ftbl_gcc_camss_mclk0_1_clk, + .clkr.hw.init = &(struct clk_init_data){ + .name = "mclk0_clk_src", + .parent_data = gcc_xo_gpll0_gpll1a_gpll6_sleep_parent_data, + .num_parents = 5, + .ops = &clk_rcg2_ops, + }, +}; + +static struct clk_rcg2 mclk1_clk_src = { + .cmd_rcgr = 0x53000, + .mnd_width = 8, + .hid_width = 5, + .parent_map = gcc_xo_gpll0_gpll1a_gpll6_sleep_map, + .freq_tbl = ftbl_gcc_camss_mclk0_1_clk, + .clkr.hw.init = &(struct clk_init_data){ + .name = "mclk1_clk_src", + .parent_data = gcc_xo_gpll0_gpll1a_gpll6_sleep_parent_data, + .num_parents = 5, + .ops = &clk_rcg2_ops, + }, +}; + +static const struct freq_tbl ftbl_gcc_camss_csi0_1phytimer_clk[] = { + F(100000000, P_GPLL0, 8, 0, 0), + F(200000000, P_GPLL0, 4, 0, 0), + { } +}; + +static struct clk_rcg2 csi0phytimer_clk_src = { + .cmd_rcgr = 0x4e000, + .hid_width = 5, + .parent_map = gcc_xo_gpll0_gpll1a_map, + .freq_tbl = ftbl_gcc_camss_csi0_1phytimer_clk, + .clkr.hw.init = &(struct clk_init_data){ + .name = "csi0phytimer_clk_src", + .parent_data = gcc_xo_gpll0_gpll1a_parent_data, + .num_parents = 3, + .ops = &clk_rcg2_ops, + }, +}; + +static struct clk_rcg2 csi1phytimer_clk_src = { + .cmd_rcgr = 0x4f000, + .hid_width = 5, + .parent_map = gcc_xo_gpll0_gpll1a_map, + .freq_tbl = ftbl_gcc_camss_csi0_1phytimer_clk, + .clkr.hw.init = &(struct clk_init_data){ + .name = "csi1phytimer_clk_src", + .parent_data = gcc_xo_gpll0_gpll1a_parent_data, + .num_parents = 3, + .ops = &clk_rcg2_ops, + }, +}; + +static const struct freq_tbl ftbl_gcc_camss_cpp_clk[] = { + F(160000000, P_GPLL0, 5, 0, 0), + F(320000000, P_GPLL0, 2.5, 0, 0), + F(465000000, P_GPLL2, 2, 0, 0), + { } +}; + +static struct clk_rcg2 cpp_clk_src = { + .cmd_rcgr = 0x58018, + .hid_width = 5, + .parent_map = gcc_xo_gpll0_gpll2_map, + .freq_tbl = ftbl_gcc_camss_cpp_clk, + .clkr.hw.init = &(struct clk_init_data){ + .name = "cpp_clk_src", + .parent_data = gcc_xo_gpll0_gpll2_parent_data, + .num_parents = 3, + .ops = &clk_rcg2_ops, + }, +}; + +static const struct freq_tbl ftbl_gcc_crypto_clk[] = { + F(50000000, P_GPLL0, 16, 0, 0), + F(80000000, P_GPLL0, 10, 0, 0), + F(100000000, P_GPLL0, 8, 0, 0), + F(160000000, P_GPLL0, 5, 0, 0), + { } +}; + +/* This is not in the documentation but is in the downstream driver */ +static struct clk_rcg2 crypto_clk_src = { + .cmd_rcgr = 0x16004, + .hid_width = 5, + .parent_map = gcc_xo_gpll0_map, + .freq_tbl = ftbl_gcc_crypto_clk, + .clkr.hw.init = &(struct clk_init_data){ + .name = "crypto_clk_src", + .parent_data = gcc_xo_gpll0_parent_data, + .num_parents = 2, + .ops = &clk_rcg2_ops, + }, +}; + +static const struct freq_tbl ftbl_gcc_gp1_3_clk[] = { + F(19200000, P_XO, 1, 0, 0), + { } +}; + +static struct clk_rcg2 gp1_clk_src = { + .cmd_rcgr = 0x08004, + .mnd_width = 8, + .hid_width = 5, + .parent_map = gcc_xo_gpll0_gpll1a_sleep_map, + .freq_tbl = ftbl_gcc_gp1_3_clk, + .clkr.hw.init = &(struct clk_init_data){ + .name = "gp1_clk_src", + .parent_data = gcc_xo_gpll0_gpll1a_sleep_parent_data, + .num_parents = 3, + .ops = &clk_rcg2_ops, + }, +}; + +static struct clk_rcg2 gp2_clk_src = { + .cmd_rcgr = 0x09004, + .mnd_width = 8, + .hid_width = 5, + .parent_map = gcc_xo_gpll0_gpll1a_sleep_map, + .freq_tbl = ftbl_gcc_gp1_3_clk, + .clkr.hw.init = &(struct clk_init_data){ + .name = "gp2_clk_src", + .parent_data = gcc_xo_gpll0_gpll1a_sleep_parent_data, + .num_parents = 3, + .ops = &clk_rcg2_ops, + }, +}; + +static struct clk_rcg2 gp3_clk_src = { + .cmd_rcgr = 0x0a004, + .mnd_width = 8, + .hid_width = 5, + .parent_map = gcc_xo_gpll0_gpll1a_sleep_map, + .freq_tbl = ftbl_gcc_gp1_3_clk, + .clkr.hw.init = &(struct clk_init_data){ + .name = "gp3_clk_src", + .parent_data = gcc_xo_gpll0_gpll1a_sleep_parent_data, + .num_parents = 3, + .ops = &clk_rcg2_ops, + }, +}; + +static struct clk_rcg2 byte0_clk_src = { + .cmd_rcgr = 0x4d044, + .hid_width = 5, + .parent_map = gcc_xo_gpll0a_dsibyte_map, + .clkr.hw.init = &(struct clk_init_data){ + .name = "byte0_clk_src", + .parent_data = gcc_xo_gpll0a_dsibyte_parent_data, + .num_parents = 3, + .ops = &clk_byte2_ops, + .flags = CLK_SET_RATE_PARENT, + }, +}; + +static struct clk_rcg2 byte1_clk_src = { + .cmd_rcgr = 0x4d0b0, + .hid_width = 5, + .parent_map = gcc_xo_gpll0a_dsibyte_map, + .clkr.hw.init = &(struct clk_init_data){ + .name = "byte1_clk_src", + .parent_data = gcc_xo_gpll0a_dsibyte_parent_data, + .num_parents = 3, + .ops = &clk_byte2_ops, + .flags = CLK_SET_RATE_PARENT, + }, +}; + +static const struct freq_tbl ftbl_gcc_mdss_esc_clk[] = { + F(19200000, P_XO, 1, 0, 0), + { } +}; + +static struct clk_rcg2 esc0_clk_src = { + .cmd_rcgr = 0x4d060, + .hid_width = 5, + .parent_map = gcc_xo_dsibyte_map, + .freq_tbl = ftbl_gcc_mdss_esc_clk, + .clkr.hw.init = &(struct clk_init_data){ + .name = "esc0_clk_src", + .parent_data = gcc_xo_dsibyte_parent_data, + .num_parents = 2, + .ops = &clk_rcg2_ops, + }, +}; + +static struct clk_rcg2 esc1_clk_src = { + .cmd_rcgr = 0x4d0a8, + .hid_width = 5, + .parent_map = gcc_xo_dsibyte_map, + .freq_tbl = ftbl_gcc_mdss_esc_clk, + .clkr.hw.init = &(struct clk_init_data){ + .name = "esc1_clk_src", + .parent_data = gcc_xo_dsibyte_parent_data, + .num_parents = 2, + .ops = &clk_rcg2_ops, + }, +}; + +static const struct freq_tbl ftbl_gcc_mdss_mdp_clk[] = { + F(50000000, P_GPLL0_AUX, 16, 0, 0), + F(80000000, P_GPLL0_AUX, 10, 0, 0), + F(100000000, P_GPLL0_AUX, 8, 0, 0), + F(160000000, P_GPLL0_AUX, 5, 0, 0), + F(177780000, P_GPLL0_AUX, 4.5, 0, 0), + F(200000000, P_GPLL0_AUX, 4, 0, 0), + F(266670000, P_GPLL0_AUX, 3, 0, 0), + F(307200000, P_GPLL1, 2, 0, 0), + F(366670000, P_GPLL3_AUX, 3, 0, 0), + { } +}; + +static struct clk_rcg2 mdp_clk_src = { + .cmd_rcgr = 0x4d014, + .hid_width = 5, + .parent_map = gcc_xo_gpll1_dsiphy_gpll6_gpll3a_gpll0a_map, + .freq_tbl = ftbl_gcc_mdss_mdp_clk, + .clkr.hw.init = &(struct clk_init_data){ + .name = "mdp_clk_src", + .parent_data = gcc_xo_gpll1_dsiphy_gpll6_gpll3a_gpll0a_parent_data, + .num_parents = 6, + .ops = &clk_rcg2_ops, + }, +}; + +static struct clk_rcg2 pclk0_clk_src = { + .cmd_rcgr = 0x4d000, + .mnd_width = 8, + .hid_width = 5, + .parent_map = gcc_xo_gpll0a_dsiphy_map, + .clkr.hw.init = &(struct clk_init_data){ + .name = "pclk0_clk_src", + .parent_data = gcc_xo_gpll0a_dsiphy_parent_data, + .num_parents = 3, + .ops = &clk_pixel_ops, + .flags = CLK_SET_RATE_PARENT, + }, +}; + +static struct clk_rcg2 pclk1_clk_src = { + .cmd_rcgr = 0x4d0b8, + .mnd_width = 8, + .hid_width = 5, + .parent_map = gcc_xo_gpll0a_dsiphy_map, + .clkr.hw.init = &(struct clk_init_data){ + .name = "pclk1_clk_src", + .parent_data = gcc_xo_gpll0a_dsiphy_parent_data, + .num_parents = 3, + .ops = &clk_pixel_ops, + .flags = CLK_SET_RATE_PARENT, + }, +}; + +static const struct freq_tbl ftbl_gcc_mdss_vsync_clk[] = { + F(19200000, P_XO, 1, 0, 0), + { } +}; + +static struct clk_rcg2 vsync_clk_src = { + .cmd_rcgr = 0x4d02c, + .hid_width = 5, + .parent_map = gcc_xo_gpll0a_map, + .freq_tbl = ftbl_gcc_mdss_vsync_clk, + .clkr.hw.init = &(struct clk_init_data){ + .name = "vsync_clk_src", + .parent_data = gcc_xo_gpll0a_parent_data, + .num_parents = 2, + .ops = &clk_rcg2_ops, + }, +}; + +static const struct freq_tbl ftbl_gcc_pdm2_clk[] = { + F(64000000, P_GPLL0, 12.5, 0, 0), + { } +}; + +/* This is not in the documentation but is in the downstream driver */ +static struct clk_rcg2 pdm2_clk_src = { + .cmd_rcgr = 0x44010, + .hid_width = 5, + .parent_map = gcc_xo_gpll0_map, + .freq_tbl = ftbl_gcc_pdm2_clk, + .clkr.hw.init = &(struct clk_init_data){ + .name = "pdm2_clk_src", + .parent_data = gcc_xo_gpll0_parent_data, + .num_parents = 2, + .ops = &clk_rcg2_ops, + }, +}; + +static const struct freq_tbl ftbl_gcc_sdcc_apps_clk[] = { + F(144000, P_XO, 16, 3, 25), + F(400000, P_XO, 12, 1, 4), + F(20000000, P_GPLL0, 10, 1, 4), + F(25000000, P_GPLL0, 16, 1, 2), + F(50000000, P_GPLL0, 16, 0, 0), + F(100000000, P_GPLL0, 8, 0, 0), + F(177770000, P_GPLL0, 4.5, 0, 0), + F(200000000, P_GPLL0, 4, 0, 0), + { } +}; + +static struct clk_rcg2 sdcc1_apps_clk_src = { + .cmd_rcgr = 0x42004, + .mnd_width = 8, + .hid_width = 5, + .parent_map = gcc_xo_gpll0_map, + .freq_tbl = ftbl_gcc_sdcc_apps_clk, + .clkr.hw.init = &(struct clk_init_data){ + .name = "sdcc1_apps_clk_src", + .parent_data = gcc_xo_gpll0_parent_data, + .num_parents = 2, + .ops = &clk_rcg2_floor_ops, + }, +}; + +static struct clk_rcg2 sdcc2_apps_clk_src = { + .cmd_rcgr = 0x43004, + .mnd_width = 8, + .hid_width = 5, + .parent_map = gcc_xo_gpll0_map, + .freq_tbl = ftbl_gcc_sdcc_apps_clk, + .clkr.hw.init = &(struct clk_init_data){ + .name = "sdcc2_apps_clk_src", + .parent_data = gcc_xo_gpll0_parent_data, + .num_parents = 2, + .ops = &clk_rcg2_floor_ops, + }, +}; + +static const struct freq_tbl ftbl_gcc_apss_tcu_clk[] = { + F(154285000, P_GPLL6, 7, 0, 0), + F(320000000, P_GPLL0, 2.5, 0, 0), + F(400000000, P_GPLL0, 2, 0, 0), + { } +}; + +static struct clk_rcg2 apss_tcu_clk_src = { + .cmd_rcgr = 0x1207c, + .hid_width = 5, + .parent_map = gcc_xo_gpll0_gpll5a_gpll6_bimc_map, + .freq_tbl = ftbl_gcc_apss_tcu_clk, + .clkr.hw.init = &(struct clk_init_data){ + .name = "apss_tcu_clk_src", + .parent_data = gcc_xo_gpll0_gpll5a_gpll6_bimc_parent_data, + .num_parents = 5, + .ops = &clk_rcg2_ops, + }, +}; + +static const struct freq_tbl ftbl_gcc_bimc_gpu_clk[] = { + F(19200000, P_XO, 1, 0, 0), + F(100000000, P_GPLL0, 8, 0, 0), + F(200000000, P_GPLL0, 4, 0, 0), + F(266500000, P_BIMC, 4, 0, 0), + F(400000000, P_GPLL0, 2, 0, 0), + F(533000000, P_BIMC, 2, 0, 0), + { } +}; + +static struct clk_rcg2 bimc_gpu_clk_src = { + .cmd_rcgr = 0x31028, + .hid_width = 5, + .parent_map = gcc_xo_gpll0_gpll5a_gpll6_bimc_map, + .freq_tbl = ftbl_gcc_bimc_gpu_clk, + .clkr.hw.init = &(struct clk_init_data){ + .name = "bimc_gpu_clk_src", + .parent_data = gcc_xo_gpll0_gpll5a_gpll6_bimc_parent_data, + .num_parents = 5, + .flags = CLK_GET_RATE_NOCACHE, + .ops = &clk_rcg2_ops, + }, +}; + +static const struct freq_tbl ftbl_gcc_usb_hs_system_clk[] = { + F(80000000, P_GPLL0, 10, 0, 0), + { } +}; + +static struct clk_rcg2 usb_hs_system_clk_src = { + .cmd_rcgr = 0x41010, + .hid_width = 5, + .parent_map = gcc_xo_gpll0_map, + .freq_tbl = ftbl_gcc_usb_hs_system_clk, + .clkr.hw.init = &(struct clk_init_data){ + .name = "usb_hs_system_clk_src", + .parent_data = gcc_xo_gpll0_parent_data, + .num_parents = 2, + .ops = &clk_rcg2_ops, + }, +}; + +static const struct freq_tbl ftbl_gcc_usb_fs_system_clk[] = { + F(64000000, P_GPLL0, 12.5, 0, 0), + { } +}; + +static struct clk_rcg2 usb_fs_system_clk_src = { + .cmd_rcgr = 0x3f010, + .hid_width = 5, + .parent_map = gcc_xo_gpll0_map, + .freq_tbl = ftbl_gcc_usb_fs_system_clk, + .clkr.hw.init = &(struct clk_init_data){ + .name = "usb_fs_system_clk_src", + .parent_data = gcc_xo_gpll6_gpll0_parent_data, + .num_parents = 3, + .ops = &clk_rcg2_ops, + }, +}; + +static const struct freq_tbl ftbl_gcc_usb_fs_ic_clk[] = { + F(60000000, P_GPLL6, 1, 1, 18), + { } +}; + +static struct clk_rcg2 usb_fs_ic_clk_src = { + .cmd_rcgr = 0x3f034, + .hid_width = 5, + .parent_map = gcc_xo_gpll0_map, + .freq_tbl = ftbl_gcc_usb_fs_ic_clk, + .clkr.hw.init = &(struct clk_init_data){ + .name = "usb_fs_ic_clk_src", + .parent_data = gcc_xo_gpll6_gpll0a_parent_data, + .num_parents = 3, + .ops = &clk_rcg2_ops, + }, +}; + +static const struct freq_tbl ftbl_gcc_ultaudio_ahb_clk[] = { + F(3200000, P_XO, 6, 0, 0), + F(6400000, P_XO, 3, 0, 0), + F(9600000, P_XO, 2, 0, 0), + F(19200000, P_XO, 1, 0, 0), + F(40000000, P_GPLL0, 10, 1, 2), + F(66670000, P_GPLL0, 12, 0, 0), + F(80000000, P_GPLL0, 10, 0, 0), + F(100000000, P_GPLL0, 8, 0, 0), + { } +}; + +static struct clk_rcg2 ultaudio_ahbfabric_clk_src = { + .cmd_rcgr = 0x1c010, + .hid_width = 5, + .mnd_width = 8, + .parent_map = gcc_xo_gpll0_gpll1_sleep_map, + .freq_tbl = ftbl_gcc_ultaudio_ahb_clk, + .clkr.hw.init = &(struct clk_init_data){ + .name = "ultaudio_ahbfabric_clk_src", + .parent_data = gcc_xo_gpll0_gpll1_sleep_parent_data, + .num_parents = 4, + .ops = &clk_rcg2_ops, + }, +}; + +static struct clk_branch gcc_ultaudio_ahbfabric_ixfabric_clk = { + .halt_reg = 0x1c028, + .clkr = { + .enable_reg = 0x1c028, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_ultaudio_ahbfabric_ixfabric_clk", + .parent_data = &(const struct clk_parent_data){ + .hw = &ultaudio_ahbfabric_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_ultaudio_ahbfabric_ixfabric_lpm_clk = { + .halt_reg = 0x1c024, + .clkr = { + .enable_reg = 0x1c024, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_ultaudio_ahbfabric_ixfabric_lpm_clk", + .parent_data = &(const struct clk_parent_data){ + .hw = &ultaudio_ahbfabric_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static const struct freq_tbl ftbl_gcc_ultaudio_lpaif_i2s_clk[] = { + F(128000, P_XO, 10, 1, 15), + F(256000, P_XO, 5, 1, 15), + F(384000, P_XO, 5, 1, 10), + F(512000, P_XO, 5, 2, 15), + F(576000, P_XO, 5, 3, 20), + F(705600, P_GPLL1, 16, 1, 80), + F(768000, P_XO, 5, 1, 5), + F(800000, P_XO, 5, 5, 24), + F(1024000, P_XO, 5, 4, 15), + F(1152000, P_XO, 1, 3, 50), + F(1411200, P_GPLL1, 16, 1, 40), + F(1536000, P_XO, 1, 2, 25), + F(1600000, P_XO, 12, 0, 0), + F(1728000, P_XO, 5, 9, 20), + F(2048000, P_XO, 5, 8, 15), + F(2304000, P_XO, 5, 3, 5), + F(2400000, P_XO, 8, 0, 0), + F(2822400, P_GPLL1, 16, 1, 20), + F(3072000, P_XO, 5, 4, 5), + F(4096000, P_GPLL1, 9, 2, 49), + F(4800000, P_XO, 4, 0, 0), + F(5644800, P_GPLL1, 16, 1, 10), + F(6144000, P_GPLL1, 7, 1, 21), + F(8192000, P_GPLL1, 9, 4, 49), + F(9600000, P_XO, 2, 0, 0), + F(11289600, P_GPLL1, 16, 1, 5), + F(12288000, P_GPLL1, 7, 2, 21), + { } +}; + +static struct clk_rcg2 ultaudio_lpaif_pri_i2s_clk_src = { + .cmd_rcgr = 0x1c054, + .hid_width = 5, + .mnd_width = 8, + .parent_map = gcc_xo_gpll1_epi2s_emclk_sleep_map, + .freq_tbl = ftbl_gcc_ultaudio_lpaif_i2s_clk, + .clkr.hw.init = &(struct clk_init_data){ + .name = "ultaudio_lpaif_pri_i2s_clk_src", + .parent_data = gcc_xo_gpll1_epi2s_emclk_sleep_parent_data, + .num_parents = 5, + .ops = &clk_rcg2_ops, + }, +}; + +static struct clk_branch gcc_ultaudio_lpaif_pri_i2s_clk = { + .halt_reg = 0x1c068, + .clkr = { + .enable_reg = 0x1c068, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_ultaudio_lpaif_pri_i2s_clk", + .parent_data = &(const struct clk_parent_data){ + .hw = &ultaudio_lpaif_pri_i2s_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_rcg2 ultaudio_lpaif_sec_i2s_clk_src = { + .cmd_rcgr = 0x1c06c, + .hid_width = 5, + .mnd_width = 8, + .parent_map = gcc_xo_gpll1_esi2s_emclk_sleep_map, + .freq_tbl = ftbl_gcc_ultaudio_lpaif_i2s_clk, + .clkr.hw.init = &(struct clk_init_data){ + .name = "ultaudio_lpaif_sec_i2s_clk_src", + .parent_data = gcc_xo_gpll1_esi2s_emclk_sleep_parent_data, + .num_parents = 5, + .ops = &clk_rcg2_ops, + }, +}; + +static struct clk_branch gcc_ultaudio_lpaif_sec_i2s_clk = { + .halt_reg = 0x1c080, + .clkr = { + .enable_reg = 0x1c080, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_ultaudio_lpaif_sec_i2s_clk", + .parent_data = &(const struct clk_parent_data){ + .hw = &ultaudio_lpaif_sec_i2s_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_rcg2 ultaudio_lpaif_aux_i2s_clk_src = { + .cmd_rcgr = 0x1c084, + .hid_width = 5, + .mnd_width = 8, + .parent_map = gcc_xo_gpll1_emclk_sleep_map, + .freq_tbl = ftbl_gcc_ultaudio_lpaif_i2s_clk, + .clkr.hw.init = &(struct clk_init_data){ + .name = "ultaudio_lpaif_aux_i2s_clk_src", + .parent_data = gcc_xo_gpll1_esi2s_emclk_sleep_parent_data, + .num_parents = 5, + .ops = &clk_rcg2_ops, + }, +}; + +static struct clk_branch gcc_ultaudio_lpaif_aux_i2s_clk = { + .halt_reg = 0x1c098, + .clkr = { + .enable_reg = 0x1c098, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_ultaudio_lpaif_aux_i2s_clk", + .parent_data = &(const struct clk_parent_data){ + .hw = &ultaudio_lpaif_aux_i2s_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static const struct freq_tbl ftbl_gcc_ultaudio_xo_clk[] = { + F(19200000, P_XO, 1, 0, 0), + { } +}; + +static struct clk_rcg2 ultaudio_xo_clk_src = { + .cmd_rcgr = 0x1c034, + .hid_width = 5, + .parent_map = gcc_xo_sleep_map, + .freq_tbl = ftbl_gcc_ultaudio_xo_clk, + .clkr.hw.init = &(struct clk_init_data){ + .name = "ultaudio_xo_clk_src", + .parent_data = gcc_xo_sleep_parent_data, + .num_parents = 2, + .ops = &clk_rcg2_ops, + }, +}; + +static struct clk_branch gcc_ultaudio_avsync_xo_clk = { + .halt_reg = 0x1c04c, + .clkr = { + .enable_reg = 0x1c04c, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_ultaudio_avsync_xo_clk", + .parent_data = &(const struct clk_parent_data){ + .hw = &ultaudio_xo_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_ultaudio_stc_xo_clk = { + .halt_reg = 0x1c050, + .clkr = { + .enable_reg = 0x1c050, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_ultaudio_stc_xo_clk", + .parent_data = &(const struct clk_parent_data){ + .hw = &ultaudio_xo_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static const struct freq_tbl ftbl_codec_clk[] = { + F(9600000, P_XO, 2, 0, 0), + F(12288000, P_XO, 1, 16, 25), + F(19200000, P_XO, 1, 0, 0), + F(11289600, P_EXT_MCLK, 1, 0, 0), + { } +}; + +static struct clk_rcg2 codec_digcodec_clk_src = { + .cmd_rcgr = 0x1c09c, + .mnd_width = 8, + .hid_width = 5, + .parent_map = gcc_xo_gpll1_emclk_sleep_map, + .freq_tbl = ftbl_codec_clk, + .clkr.hw.init = &(struct clk_init_data){ + .name = "codec_digcodec_clk_src", + .parent_data = gcc_xo_gpll1_emclk_sleep_parent_data, + .num_parents = 4, + .ops = &clk_rcg2_ops, + }, +}; + +static struct clk_branch gcc_codec_digcodec_clk = { + .halt_reg = 0x1c0b0, + .clkr = { + .enable_reg = 0x1c0b0, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_ultaudio_codec_digcodec_clk", + .parent_data = &(const struct clk_parent_data){ + .hw = &codec_digcodec_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_ultaudio_pcnoc_mport_clk = { + .halt_reg = 0x1c000, + .clkr = { + .enable_reg = 0x1c000, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_ultaudio_pcnoc_mport_clk", + .parent_data = &(const struct clk_parent_data){ + .hw = &pcnoc_bfdcd_clk_src.clkr.hw, + }, + .num_parents = 1, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_ultaudio_pcnoc_sway_clk = { + .halt_reg = 0x1c004, + .clkr = { + .enable_reg = 0x1c004, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_ultaudio_pcnoc_sway_clk", + .parent_data = &(const struct clk_parent_data){ + .hw = &pcnoc_bfdcd_clk_src.clkr.hw, + }, + .num_parents = 1, + .ops = &clk_branch2_ops, + }, + }, +}; + +static const struct freq_tbl ftbl_gcc_venus0_vcodec0_clk[] = { + F(100000000, P_GPLL0, 8, 0, 0), + F(160000000, P_GPLL0, 5, 0, 0), + F(228570000, P_GPLL0, 3.5, 0, 0), + { } +}; + +static struct clk_rcg2 vcodec0_clk_src = { + .cmd_rcgr = 0x4C000, + .mnd_width = 8, + .hid_width = 5, + .parent_map = gcc_xo_gpll0_map, + .freq_tbl = ftbl_gcc_venus0_vcodec0_clk, + .clkr.hw.init = &(struct clk_init_data){ + .name = "vcodec0_clk_src", + .parent_data = gcc_xo_gpll0_parent_data, + .num_parents = 2, + .ops = &clk_rcg2_ops, + }, +}; + +static struct clk_branch gcc_blsp1_ahb_clk = { + .halt_reg = 0x01008, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x45004, + .enable_mask = BIT(10), + .hw.init = &(struct clk_init_data){ + .name = "gcc_blsp1_ahb_clk", + .parent_data = &(const struct clk_parent_data){ + .hw = &pcnoc_bfdcd_clk_src.clkr.hw, + }, + .num_parents = 1, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_blsp1_sleep_clk = { + .halt_reg = 0x01004, + .clkr = { + .enable_reg = 0x01004, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_blsp1_sleep_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_blsp1_qup1_i2c_apps_clk = { + .halt_reg = 0x02008, + .clkr = { + .enable_reg = 0x02008, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_blsp1_qup1_i2c_apps_clk", + .parent_data = &(const struct clk_parent_data){ + .hw = &blsp1_qup1_i2c_apps_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_blsp1_qup1_spi_apps_clk = { + .halt_reg = 0x02004, + .clkr = { + .enable_reg = 0x02004, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_blsp1_qup1_spi_apps_clk", + .parent_data = &(const struct clk_parent_data){ + .hw = &blsp1_qup1_spi_apps_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_blsp1_qup2_i2c_apps_clk = { + .halt_reg = 0x03010, + .clkr = { + .enable_reg = 0x03010, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_blsp1_qup2_i2c_apps_clk", + .parent_data = &(const struct clk_parent_data){ + .hw = &blsp1_qup2_i2c_apps_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_blsp1_qup2_spi_apps_clk = { + .halt_reg = 0x0300c, + .clkr = { + .enable_reg = 0x0300c, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_blsp1_qup2_spi_apps_clk", + .parent_data = &(const struct clk_parent_data){ + .hw = &blsp1_qup2_spi_apps_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_blsp1_qup3_i2c_apps_clk = { + .halt_reg = 0x04020, + .clkr = { + .enable_reg = 0x04020, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_blsp1_qup3_i2c_apps_clk", + .parent_data = &(const struct clk_parent_data){ + .hw = &blsp1_qup3_i2c_apps_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_blsp1_qup3_spi_apps_clk = { + .halt_reg = 0x0401c, + .clkr = { + .enable_reg = 0x0401c, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_blsp1_qup3_spi_apps_clk", + .parent_data = &(const struct clk_parent_data){ + .hw = &blsp1_qup3_spi_apps_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_blsp1_qup4_i2c_apps_clk = { + .halt_reg = 0x05020, + .clkr = { + .enable_reg = 0x05020, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_blsp1_qup4_i2c_apps_clk", + .parent_data = &(const struct clk_parent_data){ + .hw = &blsp1_qup4_i2c_apps_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_blsp1_qup4_spi_apps_clk = { + .halt_reg = 0x0501c, + .clkr = { + .enable_reg = 0x0501c, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_blsp1_qup4_spi_apps_clk", + .parent_data = &(const struct clk_parent_data){ + .hw = &blsp1_qup4_spi_apps_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_blsp1_qup5_i2c_apps_clk = { + .halt_reg = 0x06020, + .clkr = { + .enable_reg = 0x06020, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_blsp1_qup5_i2c_apps_clk", + .parent_data = &(const struct clk_parent_data){ + .hw = &blsp1_qup5_i2c_apps_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_blsp1_qup5_spi_apps_clk = { + .halt_reg = 0x0601c, + .clkr = { + .enable_reg = 0x0601c, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_blsp1_qup5_spi_apps_clk", + .parent_data = &(const struct clk_parent_data){ + .hw = &blsp1_qup5_spi_apps_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_blsp1_qup6_i2c_apps_clk = { + .halt_reg = 0x07020, + .clkr = { + .enable_reg = 0x07020, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_blsp1_qup6_i2c_apps_clk", + .parent_data = &(const struct clk_parent_data){ + .hw = &blsp1_qup6_i2c_apps_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_blsp1_qup6_spi_apps_clk = { + .halt_reg = 0x0701c, + .clkr = { + .enable_reg = 0x0701c, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_blsp1_qup6_spi_apps_clk", + .parent_data = &(const struct clk_parent_data){ + .hw = &blsp1_qup6_spi_apps_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_blsp1_uart1_apps_clk = { + .halt_reg = 0x0203c, + .clkr = { + .enable_reg = 0x0203c, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_blsp1_uart1_apps_clk", + .parent_data = &(const struct clk_parent_data){ + .hw = &blsp1_uart1_apps_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_blsp1_uart2_apps_clk = { + .halt_reg = 0x0302c, + .clkr = { + .enable_reg = 0x0302c, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_blsp1_uart2_apps_clk", + .parent_data = &(const struct clk_parent_data){ + .hw = &blsp1_uart2_apps_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_boot_rom_ahb_clk = { + .halt_reg = 0x1300c, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x45004, + .enable_mask = BIT(7), + .hw.init = &(struct clk_init_data){ + .name = "gcc_boot_rom_ahb_clk", + .parent_data = &(const struct clk_parent_data){ + .hw = &pcnoc_bfdcd_clk_src.clkr.hw, + }, + .num_parents = 1, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_camss_cci_ahb_clk = { + .halt_reg = 0x5101c, + .clkr = { + .enable_reg = 0x5101c, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_camss_cci_ahb_clk", + .parent_data = &(const struct clk_parent_data){ + .hw = &camss_ahb_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_camss_cci_clk = { + .halt_reg = 0x51018, + .clkr = { + .enable_reg = 0x51018, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_camss_cci_clk", + .parent_data = &(const struct clk_parent_data){ + .hw = &cci_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_camss_csi0_ahb_clk = { + .halt_reg = 0x4e040, + .clkr = { + .enable_reg = 0x4e040, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_camss_csi0_ahb_clk", + .parent_data = &(const struct clk_parent_data){ + .hw = &camss_ahb_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_camss_csi0_clk = { + .halt_reg = 0x4e03c, + .clkr = { + .enable_reg = 0x4e03c, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_camss_csi0_clk", + .parent_data = &(const struct clk_parent_data){ + .hw = &csi0_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_camss_csi0phy_clk = { + .halt_reg = 0x4e048, + .clkr = { + .enable_reg = 0x4e048, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_camss_csi0phy_clk", + .parent_data = &(const struct clk_parent_data){ + .hw = &csi0_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_camss_csi0pix_clk = { + .halt_reg = 0x4e058, + .clkr = { + .enable_reg = 0x4e058, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_camss_csi0pix_clk", + .parent_data = &(const struct clk_parent_data){ + .hw = &csi0_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_camss_csi0rdi_clk = { + .halt_reg = 0x4e050, + .clkr = { + .enable_reg = 0x4e050, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_camss_csi0rdi_clk", + .parent_data = &(const struct clk_parent_data){ + .hw = &csi0_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_camss_csi1_ahb_clk = { + .halt_reg = 0x4f040, + .clkr = { + .enable_reg = 0x4f040, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_camss_csi1_ahb_clk", + .parent_data = &(const struct clk_parent_data){ + .hw = &camss_ahb_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_camss_csi1_clk = { + .halt_reg = 0x4f03c, + .clkr = { + .enable_reg = 0x4f03c, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_camss_csi1_clk", + .parent_data = &(const struct clk_parent_data){ + .hw = &csi1_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_camss_csi1phy_clk = { + .halt_reg = 0x4f048, + .clkr = { + .enable_reg = 0x4f048, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_camss_csi1phy_clk", + .parent_data = &(const struct clk_parent_data){ + .hw = &csi1_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_camss_csi1pix_clk = { + .halt_reg = 0x4f058, + .clkr = { + .enable_reg = 0x4f058, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_camss_csi1pix_clk", + .parent_data = &(const struct clk_parent_data){ + .hw = &csi1_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_camss_csi1rdi_clk = { + .halt_reg = 0x4f050, + .clkr = { + .enable_reg = 0x4f050, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_camss_csi1rdi_clk", + .parent_data = &(const struct clk_parent_data){ + .hw = &csi1_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_camss_csi_vfe0_clk = { + .halt_reg = 0x58050, + .clkr = { + .enable_reg = 0x58050, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_camss_csi_vfe0_clk", + .parent_data = &(const struct clk_parent_data){ + .hw = &vfe0_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_camss_gp0_clk = { + .halt_reg = 0x54018, + .clkr = { + .enable_reg = 0x54018, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_camss_gp0_clk", + .parent_data = &(const struct clk_parent_data){ + .hw = &camss_gp0_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_camss_gp1_clk = { + .halt_reg = 0x55018, + .clkr = { + .enable_reg = 0x55018, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_camss_gp1_clk", + .parent_data = &(const struct clk_parent_data){ + .hw = &camss_gp1_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_camss_ispif_ahb_clk = { + .halt_reg = 0x50004, + .clkr = { + .enable_reg = 0x50004, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_camss_ispif_ahb_clk", + .parent_data = &(const struct clk_parent_data){ + .hw = &camss_ahb_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_camss_jpeg0_clk = { + .halt_reg = 0x57020, + .clkr = { + .enable_reg = 0x57020, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_camss_jpeg0_clk", + .parent_data = &(const struct clk_parent_data){ + .hw = &jpeg0_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_camss_jpeg_ahb_clk = { + .halt_reg = 0x57024, + .clkr = { + .enable_reg = 0x57024, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_camss_jpeg_ahb_clk", + .parent_data = &(const struct clk_parent_data){ + .hw = &camss_ahb_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_camss_jpeg_axi_clk = { + .halt_reg = 0x57028, + .clkr = { + .enable_reg = 0x57028, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_camss_jpeg_axi_clk", + .parent_data = &(const struct clk_parent_data){ + .hw = &system_noc_bfdcd_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_camss_mclk0_clk = { + .halt_reg = 0x52018, + .clkr = { + .enable_reg = 0x52018, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_camss_mclk0_clk", + .parent_data = &(const struct clk_parent_data){ + .hw = &mclk0_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_camss_mclk1_clk = { + .halt_reg = 0x53018, + .clkr = { + .enable_reg = 0x53018, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_camss_mclk1_clk", + .parent_data = &(const struct clk_parent_data){ + .hw = &mclk1_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_camss_micro_ahb_clk = { + .halt_reg = 0x5600c, + .clkr = { + .enable_reg = 0x5600c, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_camss_micro_ahb_clk", + .parent_data = &(const struct clk_parent_data){ + .hw = &camss_ahb_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_camss_csi0phytimer_clk = { + .halt_reg = 0x4e01c, + .clkr = { + .enable_reg = 0x4e01c, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_camss_csi0phytimer_clk", + .parent_data = &(const struct clk_parent_data){ + .hw = &csi0phytimer_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_camss_csi1phytimer_clk = { + .halt_reg = 0x4f01c, + .clkr = { + .enable_reg = 0x4f01c, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_camss_csi1phytimer_clk", + .parent_data = &(const struct clk_parent_data){ + .hw = &csi1phytimer_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_camss_ahb_clk = { + .halt_reg = 0x5a014, + .clkr = { + .enable_reg = 0x5a014, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_camss_ahb_clk", + .parent_data = &(const struct clk_parent_data){ + .hw = &camss_ahb_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_camss_top_ahb_clk = { + .halt_reg = 0x56004, + .clkr = { + .enable_reg = 0x56004, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_camss_top_ahb_clk", + .parent_data = &(const struct clk_parent_data){ + .hw = &pcnoc_bfdcd_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_camss_cpp_ahb_clk = { + .halt_reg = 0x58040, + .clkr = { + .enable_reg = 0x58040, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_camss_cpp_ahb_clk", + .parent_data = &(const struct clk_parent_data){ + .hw = &camss_ahb_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_camss_cpp_clk = { + .halt_reg = 0x5803c, + .clkr = { + .enable_reg = 0x5803c, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_camss_cpp_clk", + .parent_data = &(const struct clk_parent_data){ + .hw = &cpp_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_camss_vfe0_clk = { + .halt_reg = 0x58038, + .clkr = { + .enable_reg = 0x58038, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_camss_vfe0_clk", + .parent_data = &(const struct clk_parent_data){ + .hw = &vfe0_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_camss_vfe_ahb_clk = { + .halt_reg = 0x58044, + .clkr = { + .enable_reg = 0x58044, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_camss_vfe_ahb_clk", + .parent_data = &(const struct clk_parent_data){ + .hw = &camss_ahb_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_camss_vfe_axi_clk = { + .halt_reg = 0x58048, + .clkr = { + .enable_reg = 0x58048, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_camss_vfe_axi_clk", + .parent_data = &(const struct clk_parent_data){ + .hw = &system_noc_bfdcd_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_crypto_ahb_clk = { + .halt_reg = 0x16024, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x45004, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_crypto_ahb_clk", + .parent_data = &(const struct clk_parent_data){ + .hw = &pcnoc_bfdcd_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_crypto_axi_clk = { + .halt_reg = 0x16020, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x45004, + .enable_mask = BIT(1), + .hw.init = &(struct clk_init_data){ + .name = "gcc_crypto_axi_clk", + .parent_data = &(const struct clk_parent_data){ + .hw = &pcnoc_bfdcd_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_crypto_clk = { + .halt_reg = 0x1601c, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x45004, + .enable_mask = BIT(2), + .hw.init = &(struct clk_init_data){ + .name = "gcc_crypto_clk", + .parent_data = &(const struct clk_parent_data){ + .hw = &crypto_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_oxili_gmem_clk = { + .halt_reg = 0x59024, + .clkr = { + .enable_reg = 0x59024, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_oxili_gmem_clk", + .parent_data = &(const struct clk_parent_data){ + .hw = &gfx3d_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_gp1_clk = { + .halt_reg = 0x08000, + .clkr = { + .enable_reg = 0x08000, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_gp1_clk", + .parent_data = &(const struct clk_parent_data){ + .hw = &gp1_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_gp2_clk = { + .halt_reg = 0x09000, + .clkr = { + .enable_reg = 0x09000, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_gp2_clk", + .parent_data = &(const struct clk_parent_data){ + .hw = &gp2_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_gp3_clk = { + .halt_reg = 0x0a000, + .clkr = { + .enable_reg = 0x0a000, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_gp3_clk", + .parent_data = &(const struct clk_parent_data){ + .hw = &gp3_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_mdss_ahb_clk = { + .halt_reg = 0x4d07c, + .clkr = { + .enable_reg = 0x4d07c, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_mdss_ahb_clk", + .parent_data = &(const struct clk_parent_data){ + .hw = &pcnoc_bfdcd_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_mdss_axi_clk = { + .halt_reg = 0x4d080, + .clkr = { + .enable_reg = 0x4d080, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_mdss_axi_clk", + .parent_data = &(const struct clk_parent_data){ + .hw = &system_noc_bfdcd_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_mdss_byte0_clk = { + .halt_reg = 0x4d094, + .clkr = { + .enable_reg = 0x4d094, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_mdss_byte0_clk", + .parent_data = &(const struct clk_parent_data){ + .hw = &byte0_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_mdss_byte1_clk = { + .halt_reg = 0x4d0a0, + .clkr = { + .enable_reg = 0x4d0a0, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_mdss_byte1_clk", + .parent_data = &(const struct clk_parent_data){ + .hw = &byte1_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_mdss_esc0_clk = { + .halt_reg = 0x4d098, + .clkr = { + .enable_reg = 0x4d098, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_mdss_esc0_clk", + .parent_data = &(const struct clk_parent_data){ + .hw = &esc0_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_mdss_esc1_clk = { + .halt_reg = 0x4d09c, + .clkr = { + .enable_reg = 0x4d09c, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_mdss_esc1_clk", + .parent_data = &(const struct clk_parent_data){ + .hw = &esc1_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_mdss_mdp_clk = { + .halt_reg = 0x4D088, + .clkr = { + .enable_reg = 0x4D088, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_mdss_mdp_clk", + .parent_data = &(const struct clk_parent_data){ + .hw = &mdp_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_mdss_pclk0_clk = { + .halt_reg = 0x4d084, + .clkr = { + .enable_reg = 0x4d084, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_mdss_pclk0_clk", + .parent_data = &(const struct clk_parent_data){ + .hw = &pclk0_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_mdss_pclk1_clk = { + .halt_reg = 0x4d0a4, + .clkr = { + .enable_reg = 0x4d0a4, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_mdss_pclk1_clk", + .parent_data = &(const struct clk_parent_data){ + .hw = &pclk1_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_mdss_vsync_clk = { + .halt_reg = 0x4d090, + .clkr = { + .enable_reg = 0x4d090, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_mdss_vsync_clk", + .parent_data = &(const struct clk_parent_data){ + .hw = &vsync_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_mss_cfg_ahb_clk = { + .halt_reg = 0x49000, + .clkr = { + .enable_reg = 0x49000, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_mss_cfg_ahb_clk", + .parent_data = &(const struct clk_parent_data){ + .hw = &pcnoc_bfdcd_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_mss_q6_bimc_axi_clk = { + .halt_reg = 0x49004, + .clkr = { + .enable_reg = 0x49004, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_mss_q6_bimc_axi_clk", + .parent_data = &(const struct clk_parent_data){ + .hw = &bimc_ddr_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_oxili_ahb_clk = { + .halt_reg = 0x59028, + .clkr = { + .enable_reg = 0x59028, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_oxili_ahb_clk", + .parent_data = &(const struct clk_parent_data){ + .hw = &pcnoc_bfdcd_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_oxili_gfx3d_clk = { + .halt_reg = 0x59020, + .clkr = { + .enable_reg = 0x59020, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_oxili_gfx3d_clk", + .parent_data = &(const struct clk_parent_data){ + .hw = &gfx3d_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_pdm2_clk = { + .halt_reg = 0x4400c, + .clkr = { + .enable_reg = 0x4400c, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_pdm2_clk", + .parent_data = &(const struct clk_parent_data){ + .hw = &pdm2_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_pdm_ahb_clk = { + .halt_reg = 0x44004, + .clkr = { + .enable_reg = 0x44004, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_pdm_ahb_clk", + .parent_data = &(const struct clk_parent_data){ + .hw = &pcnoc_bfdcd_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_prng_ahb_clk = { + .halt_reg = 0x13004, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x45004, + .enable_mask = BIT(8), + .hw.init = &(struct clk_init_data){ + .name = "gcc_prng_ahb_clk", + .parent_data = &(const struct clk_parent_data){ + .hw = &pcnoc_bfdcd_clk_src.clkr.hw, + }, + .num_parents = 1, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_sdcc1_ahb_clk = { + .halt_reg = 0x4201c, + .clkr = { + .enable_reg = 0x4201c, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_sdcc1_ahb_clk", + .parent_data = &(const struct clk_parent_data){ + .hw = &pcnoc_bfdcd_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_sdcc1_apps_clk = { + .halt_reg = 0x42018, + .clkr = { + .enable_reg = 0x42018, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_sdcc1_apps_clk", + .parent_data = &(const struct clk_parent_data){ + .hw = &sdcc1_apps_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_sdcc2_ahb_clk = { + .halt_reg = 0x4301c, + .clkr = { + .enable_reg = 0x4301c, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_sdcc2_ahb_clk", + .parent_data = &(const struct clk_parent_data){ + .hw = &pcnoc_bfdcd_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_sdcc2_apps_clk = { + .halt_reg = 0x43018, + .clkr = { + .enable_reg = 0x43018, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_sdcc2_apps_clk", + .parent_data = &(const struct clk_parent_data){ + .hw = &sdcc2_apps_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_apss_tcu_clk = { + .halt_reg = 0x12018, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x4500c, + .enable_mask = BIT(1), + .hw.init = &(struct clk_init_data){ + .name = "gcc_apss_tcu_clk", + .parent_data = &(const struct clk_parent_data){ + .hw = &bimc_ddr_clk_src.clkr.hw, + }, + .num_parents = 1, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_gfx_tcu_clk = { + .halt_reg = 0x12020, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x4500c, + .enable_mask = BIT(2), + .hw.init = &(struct clk_init_data){ + .name = "gcc_gfx_tcu_clk", + .parent_data = &(const struct clk_parent_data){ + .hw = &bimc_ddr_clk_src.clkr.hw, + }, + .num_parents = 1, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_gfx_tbu_clk = { + .halt_reg = 0x12010, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x4500c, + .enable_mask = BIT(3), + .hw.init = &(struct clk_init_data){ + .name = "gcc_gfx_tbu_clk", + .parent_data = &(const struct clk_parent_data){ + .hw = &bimc_ddr_clk_src.clkr.hw, + }, + .num_parents = 1, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_mdp_tbu_clk = { + .halt_reg = 0x1201c, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x4500c, + .enable_mask = BIT(4), + .hw.init = &(struct clk_init_data){ + .name = "gcc_mdp_tbu_clk", + .parent_data = &(const struct clk_parent_data){ + .hw = &system_noc_bfdcd_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_venus_tbu_clk = { + .halt_reg = 0x12014, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x4500c, + .enable_mask = BIT(5), + .hw.init = &(struct clk_init_data){ + .name = "gcc_venus_tbu_clk", + .parent_data = &(const struct clk_parent_data){ + .hw = &system_noc_bfdcd_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_vfe_tbu_clk = { + .halt_reg = 0x1203c, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x4500c, + .enable_mask = BIT(9), + .hw.init = &(struct clk_init_data){ + .name = "gcc_vfe_tbu_clk", + .parent_data = &(const struct clk_parent_data){ + .hw = &system_noc_bfdcd_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_jpeg_tbu_clk = { + .halt_reg = 0x12034, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x4500c, + .enable_mask = BIT(10), + .hw.init = &(struct clk_init_data){ + .name = "gcc_jpeg_tbu_clk", + .parent_data = &(const struct clk_parent_data){ + .hw = &system_noc_bfdcd_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_smmu_cfg_clk = { + .halt_reg = 0x12038, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x4500c, + .enable_mask = BIT(12), + .hw.init = &(struct clk_init_data){ + .name = "gcc_smmu_cfg_clk", + .parent_data = &(const struct clk_parent_data){ + .hw = &pcnoc_bfdcd_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_gtcu_ahb_clk = { + .halt_reg = 0x12044, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x4500c, + .enable_mask = BIT(13), + .hw.init = &(struct clk_init_data){ + .name = "gcc_gtcu_ahb_clk", + .parent_data = &(const struct clk_parent_data){ + .hw = &pcnoc_bfdcd_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_cpp_tbu_clk = { + .halt_reg = 0x12040, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x4500c, + .enable_mask = BIT(14), + .hw.init = &(struct clk_init_data){ + .name = "gcc_cpp_tbu_clk", + .parent_data = &(const struct clk_parent_data){ + .hw = &pcnoc_bfdcd_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_mdp_rt_tbu_clk = { + .halt_reg = 0x1201c, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x4500c, + .enable_mask = BIT(15), + .hw.init = &(struct clk_init_data){ + .name = "gcc_mdp_rt_tbu_clk", + .parent_data = &(const struct clk_parent_data){ + .hw = &pcnoc_bfdcd_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_bimc_gfx_clk = { + .halt_reg = 0x31024, + .clkr = { + .enable_reg = 0x31024, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_bimc_gfx_clk", + .parent_data = &(const struct clk_parent_data){ + .hw = &bimc_gpu_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_bimc_gpu_clk = { + .halt_reg = 0x31040, + .clkr = { + .enable_reg = 0x31040, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_bimc_gpu_clk", + .parent_data = &(const struct clk_parent_data){ + .hw = &bimc_gpu_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_usb2a_phy_sleep_clk = { + .halt_reg = 0x4102c, + .clkr = { + .enable_reg = 0x4102c, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_usb2a_phy_sleep_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_usb_fs_ahb_clk = { + .halt_reg = 0x3f008, + .clkr = { + .enable_reg = 0x3f008, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_usb_fs_ahb_clk", + .parent_data = &(const struct clk_parent_data){ + .hw = &pcnoc_bfdcd_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_usb_fs_ic_clk = { + .halt_reg = 0x3f030, + .clkr = { + .enable_reg = 0x3f030, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_usb_fs_ic_clk", + .parent_data = &(const struct clk_parent_data){ + .hw = &usb_fs_ic_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_usb_fs_system_clk = { + .halt_reg = 0x3f004, + .clkr = { + .enable_reg = 0x3f004, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_usb_fs_system_clk", + .parent_data = &(const struct clk_parent_data){ + .hw = &usb_fs_system_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_usb_hs_ahb_clk = { + .halt_reg = 0x41008, + .clkr = { + .enable_reg = 0x41008, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_usb_hs_ahb_clk", + .parent_data = &(const struct clk_parent_data){ + .hw = &pcnoc_bfdcd_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_usb_hs_system_clk = { + .halt_reg = 0x41004, + .clkr = { + .enable_reg = 0x41004, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_usb_hs_system_clk", + .parent_data = &(const struct clk_parent_data){ + .hw = &usb_hs_system_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_venus0_ahb_clk = { + .halt_reg = 0x4c020, + .clkr = { + .enable_reg = 0x4c020, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_venus0_ahb_clk", + .parent_data = &(const struct clk_parent_data){ + .hw = &pcnoc_bfdcd_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_venus0_axi_clk = { + .halt_reg = 0x4c024, + .clkr = { + .enable_reg = 0x4c024, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_venus0_axi_clk", + .parent_data = &(const struct clk_parent_data){ + .hw = &system_noc_bfdcd_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_venus0_vcodec0_clk = { + .halt_reg = 0x4c01c, + .clkr = { + .enable_reg = 0x4c01c, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_venus0_vcodec0_clk", + .parent_data = &(const struct clk_parent_data){ + .hw = &vcodec0_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_venus0_core0_vcodec0_clk = { + .halt_reg = 0x4c02c, + .clkr = { + .enable_reg = 0x4c02c, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_venus0_core0_vcodec0_clk", + .parent_data = &(const struct clk_parent_data){ + .hw = &vcodec0_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_venus0_core1_vcodec0_clk = { + .halt_reg = 0x4c034, + .clkr = { + .enable_reg = 0x4c034, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_venus0_core1_vcodec0_clk", + .parent_data = &(const struct clk_parent_data){ + .hw = &vcodec0_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_oxili_timer_clk = { + .halt_reg = 0x59040, + .clkr = { + .enable_reg = 0x59040, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_oxili_timer_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct gdsc venus_gdsc = { + .gdscr = 0x4c018, + .pd = { + .name = "venus", + }, + .pwrsts = PWRSTS_OFF_ON, +}; + +static struct gdsc mdss_gdsc = { + .gdscr = 0x4d078, + .pd = { + .name = "mdss", + }, + .pwrsts = PWRSTS_OFF_ON, +}; + +static struct gdsc jpeg_gdsc = { + .gdscr = 0x5701c, + .pd = { + .name = "jpeg", + }, + .pwrsts = PWRSTS_OFF_ON, +}; + +static struct gdsc vfe_gdsc = { + .gdscr = 0x58034, + .pd = { + .name = "vfe", + }, + .pwrsts = PWRSTS_OFF_ON, +}; + +static struct gdsc oxili_gdsc = { + .gdscr = 0x5901c, + .pd = { + .name = "oxili", + }, + .pwrsts = PWRSTS_OFF_ON, +}; + +static struct gdsc venus_core0_gdsc = { + .gdscr = 0x4c028, + .pd = { + .name = "venus_core0", + }, + .pwrsts = PWRSTS_OFF_ON, +}; + +static struct gdsc venus_core1_gdsc = { + .gdscr = 0x4c030, + .pd = { + .name = "venus_core1", + }, + .pwrsts = PWRSTS_OFF_ON, +}; + +static struct clk_regmap *gcc_msm8939_clocks[] = { + [GPLL0] = &gpll0.clkr, + [GPLL0_VOTE] = &gpll0_vote, + [BIMC_PLL] = &bimc_pll.clkr, + [BIMC_PLL_VOTE] = &bimc_pll_vote, + [GPLL1] = &gpll1.clkr, + [GPLL1_VOTE] = &gpll1_vote, + [GPLL2] = &gpll2.clkr, + [GPLL2_VOTE] = &gpll2_vote, + [PCNOC_BFDCD_CLK_SRC] = &pcnoc_bfdcd_clk_src.clkr, + [SYSTEM_NOC_BFDCD_CLK_SRC] = &system_noc_bfdcd_clk_src.clkr, + [CAMSS_AHB_CLK_SRC] = &camss_ahb_clk_src.clkr, + [APSS_AHB_CLK_SRC] = &apss_ahb_clk_src.clkr, + [CSI0_CLK_SRC] = &csi0_clk_src.clkr, + [CSI1_CLK_SRC] = &csi1_clk_src.clkr, + [GFX3D_CLK_SRC] = &gfx3d_clk_src.clkr, + [VFE0_CLK_SRC] = &vfe0_clk_src.clkr, + [BLSP1_QUP1_I2C_APPS_CLK_SRC] = &blsp1_qup1_i2c_apps_clk_src.clkr, + [BLSP1_QUP1_SPI_APPS_CLK_SRC] = &blsp1_qup1_spi_apps_clk_src.clkr, + [BLSP1_QUP2_I2C_APPS_CLK_SRC] = &blsp1_qup2_i2c_apps_clk_src.clkr, + [BLSP1_QUP2_SPI_APPS_CLK_SRC] = &blsp1_qup2_spi_apps_clk_src.clkr, + [BLSP1_QUP3_I2C_APPS_CLK_SRC] = &blsp1_qup3_i2c_apps_clk_src.clkr, + [BLSP1_QUP3_SPI_APPS_CLK_SRC] = &blsp1_qup3_spi_apps_clk_src.clkr, + [BLSP1_QUP4_I2C_APPS_CLK_SRC] = &blsp1_qup4_i2c_apps_clk_src.clkr, + [BLSP1_QUP4_SPI_APPS_CLK_SRC] = &blsp1_qup4_spi_apps_clk_src.clkr, + [BLSP1_QUP5_I2C_APPS_CLK_SRC] = &blsp1_qup5_i2c_apps_clk_src.clkr, + [BLSP1_QUP5_SPI_APPS_CLK_SRC] = &blsp1_qup5_spi_apps_clk_src.clkr, + [BLSP1_QUP6_I2C_APPS_CLK_SRC] = &blsp1_qup6_i2c_apps_clk_src.clkr, + [BLSP1_QUP6_SPI_APPS_CLK_SRC] = &blsp1_qup6_spi_apps_clk_src.clkr, + [BLSP1_UART1_APPS_CLK_SRC] = &blsp1_uart1_apps_clk_src.clkr, + [BLSP1_UART2_APPS_CLK_SRC] = &blsp1_uart2_apps_clk_src.clkr, + [CCI_CLK_SRC] = &cci_clk_src.clkr, + [CAMSS_GP0_CLK_SRC] = &camss_gp0_clk_src.clkr, + [CAMSS_GP1_CLK_SRC] = &camss_gp1_clk_src.clkr, + [JPEG0_CLK_SRC] = &jpeg0_clk_src.clkr, + [MCLK0_CLK_SRC] = &mclk0_clk_src.clkr, + [MCLK1_CLK_SRC] = &mclk1_clk_src.clkr, + [CSI0PHYTIMER_CLK_SRC] = &csi0phytimer_clk_src.clkr, + [CSI1PHYTIMER_CLK_SRC] = &csi1phytimer_clk_src.clkr, + [CPP_CLK_SRC] = &cpp_clk_src.clkr, + [CRYPTO_CLK_SRC] = &crypto_clk_src.clkr, + [GP1_CLK_SRC] = &gp1_clk_src.clkr, + [GP2_CLK_SRC] = &gp2_clk_src.clkr, + [GP3_CLK_SRC] = &gp3_clk_src.clkr, + [BYTE0_CLK_SRC] = &byte0_clk_src.clkr, + [ESC0_CLK_SRC] = &esc0_clk_src.clkr, + [MDP_CLK_SRC] = &mdp_clk_src.clkr, + [PCLK0_CLK_SRC] = &pclk0_clk_src.clkr, + [VSYNC_CLK_SRC] = &vsync_clk_src.clkr, + [PDM2_CLK_SRC] = &pdm2_clk_src.clkr, + [SDCC1_APPS_CLK_SRC] = &sdcc1_apps_clk_src.clkr, + [SDCC2_APPS_CLK_SRC] = &sdcc2_apps_clk_src.clkr, + [APSS_TCU_CLK_SRC] = &apss_tcu_clk_src.clkr, + [USB_HS_SYSTEM_CLK_SRC] = &usb_hs_system_clk_src.clkr, + [VCODEC0_CLK_SRC] = &vcodec0_clk_src.clkr, + [GCC_BLSP1_AHB_CLK] = &gcc_blsp1_ahb_clk.clkr, + [GCC_BLSP1_SLEEP_CLK] = &gcc_blsp1_sleep_clk.clkr, + [GCC_BLSP1_QUP1_I2C_APPS_CLK] = &gcc_blsp1_qup1_i2c_apps_clk.clkr, + [GCC_BLSP1_QUP1_SPI_APPS_CLK] = &gcc_blsp1_qup1_spi_apps_clk.clkr, + [GCC_BLSP1_QUP2_I2C_APPS_CLK] = &gcc_blsp1_qup2_i2c_apps_clk.clkr, + [GCC_BLSP1_QUP2_SPI_APPS_CLK] = &gcc_blsp1_qup2_spi_apps_clk.clkr, + [GCC_BLSP1_QUP3_I2C_APPS_CLK] = &gcc_blsp1_qup3_i2c_apps_clk.clkr, + [GCC_BLSP1_QUP3_SPI_APPS_CLK] = &gcc_blsp1_qup3_spi_apps_clk.clkr, + [GCC_BLSP1_QUP4_I2C_APPS_CLK] = &gcc_blsp1_qup4_i2c_apps_clk.clkr, + [GCC_BLSP1_QUP4_SPI_APPS_CLK] = &gcc_blsp1_qup4_spi_apps_clk.clkr, + [GCC_BLSP1_QUP5_I2C_APPS_CLK] = &gcc_blsp1_qup5_i2c_apps_clk.clkr, + [GCC_BLSP1_QUP5_SPI_APPS_CLK] = &gcc_blsp1_qup5_spi_apps_clk.clkr, + [GCC_BLSP1_QUP6_I2C_APPS_CLK] = &gcc_blsp1_qup6_i2c_apps_clk.clkr, + [GCC_BLSP1_QUP6_SPI_APPS_CLK] = &gcc_blsp1_qup6_spi_apps_clk.clkr, + [GCC_BLSP1_UART1_APPS_CLK] = &gcc_blsp1_uart1_apps_clk.clkr, + [GCC_BLSP1_UART2_APPS_CLK] = &gcc_blsp1_uart2_apps_clk.clkr, + [GCC_BOOT_ROM_AHB_CLK] = &gcc_boot_rom_ahb_clk.clkr, + [GCC_CAMSS_CCI_AHB_CLK] = &gcc_camss_cci_ahb_clk.clkr, + [GCC_CAMSS_CCI_CLK] = &gcc_camss_cci_clk.clkr, + [GCC_CAMSS_CSI0_AHB_CLK] = &gcc_camss_csi0_ahb_clk.clkr, + [GCC_CAMSS_CSI0_CLK] = &gcc_camss_csi0_clk.clkr, + [GCC_CAMSS_CSI0PHY_CLK] = &gcc_camss_csi0phy_clk.clkr, + [GCC_CAMSS_CSI0PIX_CLK] = &gcc_camss_csi0pix_clk.clkr, + [GCC_CAMSS_CSI0RDI_CLK] = &gcc_camss_csi0rdi_clk.clkr, + [GCC_CAMSS_CSI1_AHB_CLK] = &gcc_camss_csi1_ahb_clk.clkr, + [GCC_CAMSS_CSI1_CLK] = &gcc_camss_csi1_clk.clkr, + [GCC_CAMSS_CSI1PHY_CLK] = &gcc_camss_csi1phy_clk.clkr, + [GCC_CAMSS_CSI1PIX_CLK] = &gcc_camss_csi1pix_clk.clkr, + [GCC_CAMSS_CSI1RDI_CLK] = &gcc_camss_csi1rdi_clk.clkr, + [GCC_CAMSS_CSI_VFE0_CLK] = &gcc_camss_csi_vfe0_clk.clkr, + [GCC_CAMSS_GP0_CLK] = &gcc_camss_gp0_clk.clkr, + [GCC_CAMSS_GP1_CLK] = &gcc_camss_gp1_clk.clkr, + [GCC_CAMSS_ISPIF_AHB_CLK] = &gcc_camss_ispif_ahb_clk.clkr, + [GCC_CAMSS_JPEG0_CLK] = &gcc_camss_jpeg0_clk.clkr, + [GCC_CAMSS_JPEG_AHB_CLK] = &gcc_camss_jpeg_ahb_clk.clkr, + [GCC_CAMSS_JPEG_AXI_CLK] = &gcc_camss_jpeg_axi_clk.clkr, + [GCC_CAMSS_MCLK0_CLK] = &gcc_camss_mclk0_clk.clkr, + [GCC_CAMSS_MCLK1_CLK] = &gcc_camss_mclk1_clk.clkr, + [GCC_CAMSS_MICRO_AHB_CLK] = &gcc_camss_micro_ahb_clk.clkr, + [GCC_CAMSS_CSI0PHYTIMER_CLK] = &gcc_camss_csi0phytimer_clk.clkr, + [GCC_CAMSS_CSI1PHYTIMER_CLK] = &gcc_camss_csi1phytimer_clk.clkr, + [GCC_CAMSS_AHB_CLK] = &gcc_camss_ahb_clk.clkr, + [GCC_CAMSS_TOP_AHB_CLK] = &gcc_camss_top_ahb_clk.clkr, + [GCC_CAMSS_CPP_AHB_CLK] = &gcc_camss_cpp_ahb_clk.clkr, + [GCC_CAMSS_CPP_CLK] = &gcc_camss_cpp_clk.clkr, + [GCC_CAMSS_VFE0_CLK] = &gcc_camss_vfe0_clk.clkr, + [GCC_CAMSS_VFE_AHB_CLK] = &gcc_camss_vfe_ahb_clk.clkr, + [GCC_CAMSS_VFE_AXI_CLK] = &gcc_camss_vfe_axi_clk.clkr, + [GCC_CRYPTO_AHB_CLK] = &gcc_crypto_ahb_clk.clkr, + [GCC_CRYPTO_AXI_CLK] = &gcc_crypto_axi_clk.clkr, + [GCC_CRYPTO_CLK] = &gcc_crypto_clk.clkr, + [GCC_OXILI_GMEM_CLK] = &gcc_oxili_gmem_clk.clkr, + [GCC_GP1_CLK] = &gcc_gp1_clk.clkr, + [GCC_GP2_CLK] = &gcc_gp2_clk.clkr, + [GCC_GP3_CLK] = &gcc_gp3_clk.clkr, + [GCC_MDSS_AHB_CLK] = &gcc_mdss_ahb_clk.clkr, + [GCC_MDSS_AXI_CLK] = &gcc_mdss_axi_clk.clkr, + [GCC_MDSS_BYTE0_CLK] = &gcc_mdss_byte0_clk.clkr, + [GCC_MDSS_ESC0_CLK] = &gcc_mdss_esc0_clk.clkr, + [GCC_MDSS_MDP_CLK] = &gcc_mdss_mdp_clk.clkr, + [GCC_MDSS_PCLK0_CLK] = &gcc_mdss_pclk0_clk.clkr, + [GCC_MDSS_VSYNC_CLK] = &gcc_mdss_vsync_clk.clkr, + [GCC_MSS_CFG_AHB_CLK] = &gcc_mss_cfg_ahb_clk.clkr, + [GCC_OXILI_AHB_CLK] = &gcc_oxili_ahb_clk.clkr, + [GCC_OXILI_GFX3D_CLK] = &gcc_oxili_gfx3d_clk.clkr, + [GCC_PDM2_CLK] = &gcc_pdm2_clk.clkr, + [GCC_PDM_AHB_CLK] = &gcc_pdm_ahb_clk.clkr, + [GCC_PRNG_AHB_CLK] = &gcc_prng_ahb_clk.clkr, + [GCC_SDCC1_AHB_CLK] = &gcc_sdcc1_ahb_clk.clkr, + [GCC_SDCC1_APPS_CLK] = &gcc_sdcc1_apps_clk.clkr, + [GCC_SDCC2_AHB_CLK] = &gcc_sdcc2_ahb_clk.clkr, + [GCC_SDCC2_APPS_CLK] = &gcc_sdcc2_apps_clk.clkr, + [GCC_GTCU_AHB_CLK] = &gcc_gtcu_ahb_clk.clkr, + [GCC_JPEG_TBU_CLK] = &gcc_jpeg_tbu_clk.clkr, + [GCC_MDP_TBU_CLK] = &gcc_mdp_tbu_clk.clkr, + [GCC_SMMU_CFG_CLK] = &gcc_smmu_cfg_clk.clkr, + [GCC_VENUS_TBU_CLK] = &gcc_venus_tbu_clk.clkr, + [GCC_VFE_TBU_CLK] = &gcc_vfe_tbu_clk.clkr, + [GCC_USB2A_PHY_SLEEP_CLK] = &gcc_usb2a_phy_sleep_clk.clkr, + [GCC_USB_HS_AHB_CLK] = &gcc_usb_hs_ahb_clk.clkr, + [GCC_USB_HS_SYSTEM_CLK] = &gcc_usb_hs_system_clk.clkr, + [GCC_VENUS0_AHB_CLK] = &gcc_venus0_ahb_clk.clkr, + [GCC_VENUS0_AXI_CLK] = &gcc_venus0_axi_clk.clkr, + [GCC_VENUS0_VCODEC0_CLK] = &gcc_venus0_vcodec0_clk.clkr, + [BIMC_DDR_CLK_SRC] = &bimc_ddr_clk_src.clkr, + [GCC_APSS_TCU_CLK] = &gcc_apss_tcu_clk.clkr, + [GCC_GFX_TCU_CLK] = &gcc_gfx_tcu_clk.clkr, + [BIMC_GPU_CLK_SRC] = &bimc_gpu_clk_src.clkr, + [GCC_BIMC_GFX_CLK] = &gcc_bimc_gfx_clk.clkr, + [GCC_BIMC_GPU_CLK] = &gcc_bimc_gpu_clk.clkr, + [ULTAUDIO_AHBFABRIC_CLK_SRC] = &ultaudio_ahbfabric_clk_src.clkr, + [ULTAUDIO_LPAIF_PRI_I2S_CLK_SRC] = &ultaudio_lpaif_pri_i2s_clk_src.clkr, + [ULTAUDIO_LPAIF_SEC_I2S_CLK_SRC] = &ultaudio_lpaif_sec_i2s_clk_src.clkr, + [ULTAUDIO_LPAIF_AUX_I2S_CLK_SRC] = &ultaudio_lpaif_aux_i2s_clk_src.clkr, + [ULTAUDIO_XO_CLK_SRC] = &ultaudio_xo_clk_src.clkr, + [CODEC_DIGCODEC_CLK_SRC] = &codec_digcodec_clk_src.clkr, + [GCC_ULTAUDIO_PCNOC_MPORT_CLK] = &gcc_ultaudio_pcnoc_mport_clk.clkr, + [GCC_ULTAUDIO_PCNOC_SWAY_CLK] = &gcc_ultaudio_pcnoc_sway_clk.clkr, + [GCC_ULTAUDIO_AVSYNC_XO_CLK] = &gcc_ultaudio_avsync_xo_clk.clkr, + [GCC_ULTAUDIO_STC_XO_CLK] = &gcc_ultaudio_stc_xo_clk.clkr, + [GCC_ULTAUDIO_AHBFABRIC_IXFABRIC_CLK] = &gcc_ultaudio_ahbfabric_ixfabric_clk.clkr, + [GCC_ULTAUDIO_AHBFABRIC_IXFABRIC_LPM_CLK] = &gcc_ultaudio_ahbfabric_ixfabric_lpm_clk.clkr, + [GCC_ULTAUDIO_LPAIF_PRI_I2S_CLK] = &gcc_ultaudio_lpaif_pri_i2s_clk.clkr, + [GCC_ULTAUDIO_LPAIF_SEC_I2S_CLK] = &gcc_ultaudio_lpaif_sec_i2s_clk.clkr, + [GCC_ULTAUDIO_LPAIF_AUX_I2S_CLK] = &gcc_ultaudio_lpaif_aux_i2s_clk.clkr, + [GCC_CODEC_DIGCODEC_CLK] = &gcc_codec_digcodec_clk.clkr, + [GCC_MSS_Q6_BIMC_AXI_CLK] = &gcc_mss_q6_bimc_axi_clk.clkr, + [GPLL3] = &gpll3.clkr, + [GPLL3_VOTE] = &gpll3_vote, + [GPLL4] = &gpll4.clkr, + [GPLL4_VOTE] = &gpll4_vote, + [GPLL5] = &gpll5.clkr, + [GPLL5_VOTE] = &gpll5_vote, + [GPLL6] = &gpll6.clkr, + [GPLL6_VOTE] = &gpll6_vote, + [BYTE1_CLK_SRC] = &byte1_clk_src.clkr, + [GCC_MDSS_BYTE1_CLK] = &gcc_mdss_byte1_clk.clkr, + [ESC1_CLK_SRC] = &esc1_clk_src.clkr, + [GCC_MDSS_ESC1_CLK] = &gcc_mdss_esc1_clk.clkr, + [PCLK1_CLK_SRC] = &pclk1_clk_src.clkr, + [GCC_MDSS_PCLK1_CLK] = &gcc_mdss_pclk1_clk.clkr, + [GCC_GFX_TBU_CLK] = &gcc_gfx_tbu_clk.clkr, + [GCC_CPP_TBU_CLK] = &gcc_cpp_tbu_clk.clkr, + [GCC_MDP_RT_TBU_CLK] = &gcc_mdp_rt_tbu_clk.clkr, + [USB_FS_SYSTEM_CLK_SRC] = &usb_fs_system_clk_src.clkr, + [USB_FS_IC_CLK_SRC] = &usb_fs_ic_clk_src.clkr, + [GCC_USB_FS_AHB_CLK] = &gcc_usb_fs_ahb_clk.clkr, + [GCC_USB_FS_IC_CLK] = &gcc_usb_fs_ic_clk.clkr, + [GCC_USB_FS_SYSTEM_CLK] = &gcc_usb_fs_system_clk.clkr, + [GCC_VENUS0_CORE0_VCODEC0_CLK] = &gcc_venus0_core0_vcodec0_clk.clkr, + [GCC_VENUS0_CORE1_VCODEC0_CLK] = &gcc_venus0_core1_vcodec0_clk.clkr, + [GCC_OXILI_TIMER_CLK] = &gcc_oxili_timer_clk.clkr, +}; + +static struct gdsc *gcc_msm8939_gdscs[] = { + [VENUS_GDSC] = &venus_gdsc, + [MDSS_GDSC] = &mdss_gdsc, + [JPEG_GDSC] = &jpeg_gdsc, + [VFE_GDSC] = &vfe_gdsc, + [OXILI_GDSC] = &oxili_gdsc, + [VENUS_CORE0_GDSC] = &venus_core0_gdsc, + [VENUS_CORE1_GDSC] = &venus_core1_gdsc, +}; + +static const struct qcom_reset_map gcc_msm8939_resets[] = { + [GCC_BLSP1_BCR] = { 0x01000 }, + [GCC_BLSP1_QUP1_BCR] = { 0x02000 }, + [GCC_BLSP1_UART1_BCR] = { 0x02038 }, + [GCC_BLSP1_QUP2_BCR] = { 0x03008 }, + [GCC_BLSP1_UART2_BCR] = { 0x03028 }, + [GCC_BLSP1_QUP3_BCR] = { 0x04018 }, + [GCC_BLSP1_UART3_BCR] = { 0x04038 }, + [GCC_BLSP1_QUP4_BCR] = { 0x05018 }, + [GCC_BLSP1_QUP5_BCR] = { 0x06018 }, + [GCC_BLSP1_QUP6_BCR] = { 0x07018 }, + [GCC_IMEM_BCR] = { 0x0e000 }, + [GCC_SMMU_BCR] = { 0x12000 }, + [GCC_APSS_TCU_BCR] = { 0x12050 }, + [GCC_SMMU_XPU_BCR] = { 0x12054 }, + [GCC_PCNOC_TBU_BCR] = { 0x12058 }, + [GCC_PRNG_BCR] = { 0x13000 }, + [GCC_BOOT_ROM_BCR] = { 0x13008 }, + [GCC_CRYPTO_BCR] = { 0x16000 }, + [GCC_SEC_CTRL_BCR] = { 0x1a000 }, + [GCC_AUDIO_CORE_BCR] = { 0x1c008 }, + [GCC_ULT_AUDIO_BCR] = { 0x1c0b4 }, + [GCC_DEHR_BCR] = { 0x1f000 }, + [GCC_SYSTEM_NOC_BCR] = { 0x26000 }, + [GCC_PCNOC_BCR] = { 0x27018 }, + [GCC_TCSR_BCR] = { 0x28000 }, + [GCC_QDSS_BCR] = { 0x29000 }, + [GCC_DCD_BCR] = { 0x2a000 }, + [GCC_MSG_RAM_BCR] = { 0x2b000 }, + [GCC_MPM_BCR] = { 0x2c000 }, + [GCC_SPMI_BCR] = { 0x2e000 }, + [GCC_SPDM_BCR] = { 0x2f000 }, + [GCC_MM_SPDM_BCR] = { 0x2f024 }, + [GCC_BIMC_BCR] = { 0x31000 }, + [GCC_RBCPR_BCR] = { 0x33000 }, + [GCC_TLMM_BCR] = { 0x34000 }, + [GCC_CAMSS_CSI2_BCR] = { 0x3c038 }, + [GCC_CAMSS_CSI2PHY_BCR] = { 0x3c044 }, + [GCC_CAMSS_CSI2RDI_BCR] = { 0x3c04c }, + [GCC_CAMSS_CSI2PIX_BCR] = { 0x3c054 }, + [GCC_USB_FS_BCR] = { 0x3f000 }, + [GCC_USB_HS_BCR] = { 0x41000 }, + [GCC_USB2A_PHY_BCR] = { 0x41028 }, + [GCC_SDCC1_BCR] = { 0x42000 }, + [GCC_SDCC2_BCR] = { 0x43000 }, + [GCC_PDM_BCR] = { 0x44000 }, + [GCC_SNOC_BUS_TIMEOUT0_BCR] = { 0x47000 }, + [GCC_PCNOC_BUS_TIMEOUT0_BCR] = { 0x48000 }, + [GCC_PCNOC_BUS_TIMEOUT1_BCR] = { 0x48008 }, + [GCC_PCNOC_BUS_TIMEOUT2_BCR] = { 0x48010 }, + [GCC_PCNOC_BUS_TIMEOUT3_BCR] = { 0x48018 }, + [GCC_PCNOC_BUS_TIMEOUT4_BCR] = { 0x48020 }, + [GCC_PCNOC_BUS_TIMEOUT5_BCR] = { 0x48028 }, + [GCC_PCNOC_BUS_TIMEOUT6_BCR] = { 0x48030 }, + [GCC_PCNOC_BUS_TIMEOUT7_BCR] = { 0x48038 }, + [GCC_PCNOC_BUS_TIMEOUT8_BCR] = { 0x48040 }, + [GCC_PCNOC_BUS_TIMEOUT9_BCR] = { 0x48048 }, + [GCC_MMSS_BCR] = { 0x4b000 }, + [GCC_VENUS0_BCR] = { 0x4c014 }, + [GCC_MDSS_BCR] = { 0x4d074 }, + [GCC_CAMSS_PHY0_BCR] = { 0x4e018 }, + [GCC_CAMSS_CSI0_BCR] = { 0x4e038 }, + [GCC_CAMSS_CSI0PHY_BCR] = { 0x4e044 }, + [GCC_CAMSS_CSI0RDI_BCR] = { 0x4e04c }, + [GCC_CAMSS_CSI0PIX_BCR] = { 0x4e054 }, + [GCC_CAMSS_PHY1_BCR] = { 0x4f018 }, + [GCC_CAMSS_CSI1_BCR] = { 0x4f038 }, + [GCC_CAMSS_CSI1PHY_BCR] = { 0x4f044 }, + [GCC_CAMSS_CSI1RDI_BCR] = { 0x4f04c }, + [GCC_CAMSS_CSI1PIX_BCR] = { 0x4f054 }, + [GCC_CAMSS_ISPIF_BCR] = { 0x50000 }, + [GCC_BLSP1_QUP4_SPI_APPS_CBCR] = { 0x0501c }, + [GCC_CAMSS_CCI_BCR] = { 0x51014 }, + [GCC_CAMSS_MCLK0_BCR] = { 0x52014 }, + [GCC_CAMSS_MCLK1_BCR] = { 0x53014 }, + [GCC_CAMSS_GP0_BCR] = { 0x54014 }, + [GCC_CAMSS_GP1_BCR] = { 0x55014 }, + [GCC_CAMSS_TOP_BCR] = { 0x56000 }, + [GCC_CAMSS_MICRO_BCR] = { 0x56008 }, + [GCC_CAMSS_JPEG_BCR] = { 0x57018 }, + [GCC_CAMSS_VFE_BCR] = { 0x58030 }, + [GCC_CAMSS_CSI_VFE0_BCR] = { 0x5804c }, + [GCC_OXILI_BCR] = { 0x59018 }, + [GCC_GMEM_BCR] = { 0x5902c }, + [GCC_CAMSS_AHB_BCR] = { 0x5a018 }, + [GCC_CAMSS_MCLK2_BCR] = { 0x5c014 }, + [GCC_MDP_TBU_BCR] = { 0x62000 }, + [GCC_GFX_TBU_BCR] = { 0x63000 }, + [GCC_GFX_TCU_BCR] = { 0x64000 }, + [GCC_MSS_TBU_AXI_BCR] = { 0x65000 }, + [GCC_MSS_TBU_GSS_AXI_BCR] = { 0x66000 }, + [GCC_MSS_TBU_Q6_AXI_BCR] = { 0x67000 }, + [GCC_GTCU_AHB_BCR] = { 0x68000 }, + [GCC_SMMU_CFG_BCR] = { 0x69000 }, + [GCC_VFE_TBU_BCR] = { 0x6a000 }, + [GCC_VENUS_TBU_BCR] = { 0x6b000 }, + [GCC_JPEG_TBU_BCR] = { 0x6c000 }, + [GCC_PRONTO_TBU_BCR] = { 0x6d000 }, + [GCC_CPP_TBU_BCR] = { 0x6e000 }, + [GCC_MDP_RT_TBU_BCR] = { 0x6f000 }, + [GCC_SMMU_CATS_BCR] = { 0x7c000 }, +}; + +static const struct regmap_config gcc_msm8939_regmap_config = { + .reg_bits = 32, + .reg_stride = 4, + .val_bits = 32, + .max_register = 0x80000, + .fast_io = true, +}; + +static const struct qcom_cc_desc gcc_msm8939_desc = { + .config = &gcc_msm8939_regmap_config, + .clks = gcc_msm8939_clocks, + .num_clks = ARRAY_SIZE(gcc_msm8939_clocks), + .resets = gcc_msm8939_resets, + .num_resets = ARRAY_SIZE(gcc_msm8939_resets), + .gdscs = gcc_msm8939_gdscs, + .num_gdscs = ARRAY_SIZE(gcc_msm8939_gdscs), +}; + +static const struct of_device_id gcc_msm8939_match_table[] = { + { .compatible = "qcom,gcc-msm8939" }, + { } +}; +MODULE_DEVICE_TABLE(of, gcc_msm8939_match_table); + +static int gcc_msm8939_probe(struct platform_device *pdev) +{ + struct regmap *regmap; + + regmap = qcom_cc_map(pdev, &gcc_msm8939_desc); + if (IS_ERR(regmap)) + return PTR_ERR(regmap); + + clk_pll_configure_sr_hpm_lp(&gpll3, regmap, &gpll3_config, true); + clk_pll_configure_sr_hpm_lp(&gpll4, regmap, &gpll4_config, true); + + return qcom_cc_really_probe(pdev, &gcc_msm8939_desc, regmap); +} + +static struct platform_driver gcc_msm8939_driver = { + .probe = gcc_msm8939_probe, + .driver = { + .name = "gcc-msm8939", + .of_match_table = gcc_msm8939_match_table, + }, +}; + +static int __init gcc_msm8939_init(void) +{ + return platform_driver_register(&gcc_msm8939_driver); +} +core_initcall(gcc_msm8939_init); + +static void __exit gcc_msm8939_exit(void) +{ + platform_driver_unregister(&gcc_msm8939_driver); +} +module_exit(gcc_msm8939_exit); + +MODULE_DESCRIPTION("Qualcomm GCC MSM8939 Driver"); +MODULE_LICENSE("GPL v2"); -- GitLab From d33b7eb8506a056a866461f83e5ef4c65d0ab9b9 Mon Sep 17 00:00:00 2001 From: Sivaprakash Murugesan Date: Mon, 4 May 2020 12:00:03 +0530 Subject: [PATCH 0707/1971] dt-bindings: clock: Add YAML schemas for QCOM A53 PLL This patch adds schema for primary CPU PLL found on few Qualcomm platforms. Signed-off-by: Sivaprakash Murugesan Link: https://lkml.kernel.org/r/1588573803-3823-1-git-send-email-sivaprak@codeaurora.org Reviewed-by: Rob Herring Signed-off-by: Stephen Boyd --- .../devicetree/bindings/clock/qcom,a53pll.txt | 22 ---------- .../bindings/clock/qcom,a53pll.yaml | 40 +++++++++++++++++++ 2 files changed, 40 insertions(+), 22 deletions(-) delete mode 100644 Documentation/devicetree/bindings/clock/qcom,a53pll.txt create mode 100644 Documentation/devicetree/bindings/clock/qcom,a53pll.yaml diff --git a/Documentation/devicetree/bindings/clock/qcom,a53pll.txt b/Documentation/devicetree/bindings/clock/qcom,a53pll.txt deleted file mode 100644 index e3fa8118eaee1..0000000000000 --- a/Documentation/devicetree/bindings/clock/qcom,a53pll.txt +++ /dev/null @@ -1,22 +0,0 @@ -Qualcomm MSM8916 A53 PLL Binding --------------------------------- -The A53 PLL on MSM8916 platforms is the main CPU PLL used used for frequencies -above 1GHz. - -Required properties : -- compatible : Shall contain only one of the following: - - "qcom,msm8916-a53pll" - -- reg : shall contain base register location and length - -- #clock-cells : must be set to <0> - -Example: - - a53pll: clock@b016000 { - compatible = "qcom,msm8916-a53pll"; - reg = <0xb016000 0x40>; - #clock-cells = <0>; - }; - diff --git a/Documentation/devicetree/bindings/clock/qcom,a53pll.yaml b/Documentation/devicetree/bindings/clock/qcom,a53pll.yaml new file mode 100644 index 0000000000000..20d2638b4cd23 --- /dev/null +++ b/Documentation/devicetree/bindings/clock/qcom,a53pll.yaml @@ -0,0 +1,40 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/clock/qcom,a53pll.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Qualcomm A53 PLL Binding + +maintainers: + - Sivaprakash Murugesan + +description: + The A53 PLL on few Qualcomm platforms is the main CPU PLL used used for + frequencies above 1GHz. + +properties: + compatible: + const: qcom,msm8916-a53pll + + reg: + maxItems: 1 + + '#clock-cells': + const: 0 + +required: + - compatible + - reg + - '#clock-cells' + +additionalProperties: false + +examples: + #Example 1 - A53 PLL found on MSM8916 devices + - | + a53pll: clock@b016000 { + compatible = "qcom,msm8916-a53pll"; + reg = <0xb016000 0x40>; + #clock-cells = <0>; + }; -- GitLab From d058fd9e8984cd9f18564f7fec38e07ce671c8b8 Mon Sep 17 00:00:00 2001 From: Rahul Tanwar Date: Fri, 17 Apr 2020 13:54:47 +0800 Subject: [PATCH 0708/1971] clk: intel: Add CGU clock driver for a new SoC Clock Generation Unit(CGU) is a new clock controller IP of a forthcoming Intel network processor SoC named Lightning Mountain(LGM). It provides programming interfaces to control & configure all CPU & peripheral clocks. Add common clock framework based clock controller driver for CGU. Signed-off-by: Rahul Tanwar Link: https://lkml.kernel.org/r/42a4f71847714df482bacffdcd84341a4052800b.1587102634.git.rahul.tanwar@linux.intel.com [sboyd@kernel.org: Kill init function to alloc and cleanup newline] Signed-off-by: Stephen Boyd --- drivers/clk/Kconfig | 1 + drivers/clk/x86/Kconfig | 8 + drivers/clk/x86/Makefile | 1 + drivers/clk/x86/clk-cgu-pll.c | 156 +++++++++ drivers/clk/x86/clk-cgu.c | 636 ++++++++++++++++++++++++++++++++++ drivers/clk/x86/clk-cgu.h | 335 ++++++++++++++++++ drivers/clk/x86/clk-lgm.c | 475 +++++++++++++++++++++++++ 7 files changed, 1612 insertions(+) create mode 100644 drivers/clk/x86/Kconfig create mode 100644 drivers/clk/x86/clk-cgu-pll.c create mode 100644 drivers/clk/x86/clk-cgu.c create mode 100644 drivers/clk/x86/clk-cgu.h create mode 100644 drivers/clk/x86/clk-lgm.c diff --git a/drivers/clk/Kconfig b/drivers/clk/Kconfig index bcb257baed06d..43dab257e7aa3 100644 --- a/drivers/clk/Kconfig +++ b/drivers/clk/Kconfig @@ -360,6 +360,7 @@ source "drivers/clk/sunxi-ng/Kconfig" source "drivers/clk/tegra/Kconfig" source "drivers/clk/ti/Kconfig" source "drivers/clk/uniphier/Kconfig" +source "drivers/clk/x86/Kconfig" source "drivers/clk/zynqmp/Kconfig" endmenu diff --git a/drivers/clk/x86/Kconfig b/drivers/clk/x86/Kconfig new file mode 100644 index 0000000000000..69642e15fcc1f --- /dev/null +++ b/drivers/clk/x86/Kconfig @@ -0,0 +1,8 @@ +# SPDX-License-Identifier: GPL-2.0-only +config CLK_LGM_CGU + depends on OF && HAS_IOMEM && (X86 || COMPILE_TEST) + select OF_EARLY_FLATTREE + bool "Clock driver for Lightning Mountain(LGM) platform" + help + Clock Generation Unit(CGU) driver for Intel Lightning Mountain(LGM) + network processor SoC. diff --git a/drivers/clk/x86/Makefile b/drivers/clk/x86/Makefile index e3ec81e2a1c2a..7c774ea7ddebf 100644 --- a/drivers/clk/x86/Makefile +++ b/drivers/clk/x86/Makefile @@ -3,3 +3,4 @@ obj-$(CONFIG_PMC_ATOM) += clk-pmc-atom.o obj-$(CONFIG_X86_AMD_PLATFORM_DEVICE) += clk-st.o clk-x86-lpss-objs := clk-lpt.o obj-$(CONFIG_X86_INTEL_LPSS) += clk-x86-lpss.o +obj-$(CONFIG_CLK_LGM_CGU) += clk-cgu.o clk-cgu-pll.o clk-lgm.o diff --git a/drivers/clk/x86/clk-cgu-pll.c b/drivers/clk/x86/clk-cgu-pll.c new file mode 100644 index 0000000000000..c03cc6b85b9f1 --- /dev/null +++ b/drivers/clk/x86/clk-cgu-pll.c @@ -0,0 +1,156 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2020 Intel Corporation. + * Zhu YiXin + * Rahul Tanwar + */ + +#include +#include +#include +#include +#include + +#include "clk-cgu.h" + +#define to_lgm_clk_pll(_hw) container_of(_hw, struct lgm_clk_pll, hw) +#define PLL_REF_DIV(x) ((x) + 0x08) + +/* + * Calculate formula: + * rate = (prate * mult + (prate * frac) / frac_div) / div + */ +static unsigned long +lgm_pll_calc_rate(unsigned long prate, unsigned int mult, + unsigned int div, unsigned int frac, unsigned int frac_div) +{ + u64 crate, frate, rate64; + + rate64 = prate; + crate = rate64 * mult; + frate = rate64 * frac; + do_div(frate, frac_div); + crate += frate; + do_div(crate, div); + + return crate; +} + +static unsigned long lgm_pll_recalc_rate(struct clk_hw *hw, unsigned long prate) +{ + struct lgm_clk_pll *pll = to_lgm_clk_pll(hw); + unsigned int div, mult, frac; + unsigned long flags; + + spin_lock_irqsave(&pll->lock, flags); + mult = lgm_get_clk_val(pll->membase, PLL_REF_DIV(pll->reg), 0, 12); + div = lgm_get_clk_val(pll->membase, PLL_REF_DIV(pll->reg), 18, 6); + frac = lgm_get_clk_val(pll->membase, pll->reg, 2, 24); + spin_unlock_irqrestore(&pll->lock, flags); + + if (pll->type == TYPE_LJPLL) + div *= 4; + + return lgm_pll_calc_rate(prate, mult, div, frac, BIT(24)); +} + +static int lgm_pll_is_enabled(struct clk_hw *hw) +{ + struct lgm_clk_pll *pll = to_lgm_clk_pll(hw); + unsigned long flags; + unsigned int ret; + + spin_lock_irqsave(&pll->lock, flags); + ret = lgm_get_clk_val(pll->membase, pll->reg, 0, 1); + spin_unlock_irqrestore(&pll->lock, flags); + + return ret; +} + +static int lgm_pll_enable(struct clk_hw *hw) +{ + struct lgm_clk_pll *pll = to_lgm_clk_pll(hw); + unsigned long flags; + u32 val; + int ret; + + spin_lock_irqsave(&pll->lock, flags); + lgm_set_clk_val(pll->membase, pll->reg, 0, 1, 1); + ret = readl_poll_timeout_atomic(pll->membase + pll->reg, + val, (val & 0x1), 1, 100); + spin_unlock_irqrestore(&pll->lock, flags); + + return ret; +} + +static void lgm_pll_disable(struct clk_hw *hw) +{ + struct lgm_clk_pll *pll = to_lgm_clk_pll(hw); + unsigned long flags; + + spin_lock_irqsave(&pll->lock, flags); + lgm_set_clk_val(pll->membase, pll->reg, 0, 1, 0); + spin_unlock_irqrestore(&pll->lock, flags); +} + +static const struct clk_ops lgm_pll_ops = { + .recalc_rate = lgm_pll_recalc_rate, + .is_enabled = lgm_pll_is_enabled, + .enable = lgm_pll_enable, + .disable = lgm_pll_disable, +}; + +static struct clk_hw * +lgm_clk_register_pll(struct lgm_clk_provider *ctx, + const struct lgm_pll_clk_data *list) +{ + struct clk_init_data init = {}; + struct lgm_clk_pll *pll; + struct device *dev = ctx->dev; + struct clk_hw *hw; + int ret; + + init.ops = &lgm_pll_ops; + init.name = list->name; + init.flags = list->flags; + init.parent_data = list->parent_data; + init.num_parents = list->num_parents; + + pll = devm_kzalloc(dev, sizeof(*pll), GFP_KERNEL); + if (!pll) + return ERR_PTR(-ENOMEM); + + pll->membase = ctx->membase; + pll->lock = ctx->lock; + pll->reg = list->reg; + pll->flags = list->flags; + pll->type = list->type; + pll->hw.init = &init; + + hw = &pll->hw; + ret = clk_hw_register(dev, hw); + if (ret) + return ERR_PTR(ret); + + return hw; +} + +int lgm_clk_register_plls(struct lgm_clk_provider *ctx, + const struct lgm_pll_clk_data *list, + unsigned int nr_clk) +{ + struct clk_hw *hw; + int i; + + for (i = 0; i < nr_clk; i++, list++) { + hw = lgm_clk_register_pll(ctx, list); + if (IS_ERR(hw)) { + dev_err(ctx->dev, "failed to register pll: %s\n", + list->name); + return PTR_ERR(hw); + } + ctx->clk_data.hws[list->id] = hw; + } + + return 0; +} diff --git a/drivers/clk/x86/clk-cgu.c b/drivers/clk/x86/clk-cgu.c new file mode 100644 index 0000000000000..802a7fa88535f --- /dev/null +++ b/drivers/clk/x86/clk-cgu.c @@ -0,0 +1,636 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2020 Intel Corporation. + * Zhu YiXin + * Rahul Tanwar + */ +#include +#include +#include + +#include "clk-cgu.h" + +#define GATE_HW_REG_STAT(reg) ((reg) + 0x0) +#define GATE_HW_REG_EN(reg) ((reg) + 0x4) +#define GATE_HW_REG_DIS(reg) ((reg) + 0x8) +#define MAX_DDIV_REG 8 +#define MAX_DIVIDER_VAL 64 + +#define to_lgm_clk_mux(_hw) container_of(_hw, struct lgm_clk_mux, hw) +#define to_lgm_clk_divider(_hw) container_of(_hw, struct lgm_clk_divider, hw) +#define to_lgm_clk_gate(_hw) container_of(_hw, struct lgm_clk_gate, hw) +#define to_lgm_clk_ddiv(_hw) container_of(_hw, struct lgm_clk_ddiv, hw) + +static struct clk_hw *lgm_clk_register_fixed(struct lgm_clk_provider *ctx, + const struct lgm_clk_branch *list) +{ + unsigned long flags; + + if (list->div_flags & CLOCK_FLAG_VAL_INIT) { + spin_lock_irqsave(&ctx->lock, flags); + lgm_set_clk_val(ctx->membase, list->div_off, list->div_shift, + list->div_width, list->div_val); + spin_unlock_irqrestore(&ctx->lock, flags); + } + + return clk_hw_register_fixed_rate(NULL, list->name, + list->parent_data[0].name, + list->flags, list->mux_flags); +} + +static u8 lgm_clk_mux_get_parent(struct clk_hw *hw) +{ + struct lgm_clk_mux *mux = to_lgm_clk_mux(hw); + unsigned long flags; + u32 val; + + spin_lock_irqsave(&mux->lock, flags); + if (mux->flags & MUX_CLK_SW) + val = mux->reg; + else + val = lgm_get_clk_val(mux->membase, mux->reg, mux->shift, + mux->width); + spin_unlock_irqrestore(&mux->lock, flags); + return clk_mux_val_to_index(hw, NULL, mux->flags, val); +} + +static int lgm_clk_mux_set_parent(struct clk_hw *hw, u8 index) +{ + struct lgm_clk_mux *mux = to_lgm_clk_mux(hw); + unsigned long flags; + u32 val; + + val = clk_mux_index_to_val(NULL, mux->flags, index); + spin_lock_irqsave(&mux->lock, flags); + if (mux->flags & MUX_CLK_SW) + mux->reg = val; + else + lgm_set_clk_val(mux->membase, mux->reg, mux->shift, + mux->width, val); + spin_unlock_irqrestore(&mux->lock, flags); + + return 0; +} + +static int lgm_clk_mux_determine_rate(struct clk_hw *hw, + struct clk_rate_request *req) +{ + struct lgm_clk_mux *mux = to_lgm_clk_mux(hw); + + return clk_mux_determine_rate_flags(hw, req, mux->flags); +} + +static const struct clk_ops lgm_clk_mux_ops = { + .get_parent = lgm_clk_mux_get_parent, + .set_parent = lgm_clk_mux_set_parent, + .determine_rate = lgm_clk_mux_determine_rate, +}; + +static struct clk_hw * +lgm_clk_register_mux(struct lgm_clk_provider *ctx, + const struct lgm_clk_branch *list) +{ + unsigned long flags, cflags = list->mux_flags; + struct device *dev = ctx->dev; + u8 shift = list->mux_shift; + u8 width = list->mux_width; + struct clk_init_data init = {}; + struct lgm_clk_mux *mux; + u32 reg = list->mux_off; + struct clk_hw *hw; + int ret; + + mux = devm_kzalloc(dev, sizeof(*mux), GFP_KERNEL); + if (!mux) + return ERR_PTR(-ENOMEM); + + init.name = list->name; + init.ops = &lgm_clk_mux_ops; + init.flags = list->flags; + init.parent_data = list->parent_data; + init.num_parents = list->num_parents; + + mux->membase = ctx->membase; + mux->lock = ctx->lock; + mux->reg = reg; + mux->shift = shift; + mux->width = width; + mux->flags = cflags; + mux->hw.init = &init; + + hw = &mux->hw; + ret = clk_hw_register(dev, hw); + if (ret) + return ERR_PTR(ret); + + if (cflags & CLOCK_FLAG_VAL_INIT) { + spin_lock_irqsave(&mux->lock, flags); + lgm_set_clk_val(mux->membase, reg, shift, width, list->mux_val); + spin_unlock_irqrestore(&mux->lock, flags); + } + + return hw; +} + +static unsigned long +lgm_clk_divider_recalc_rate(struct clk_hw *hw, unsigned long parent_rate) +{ + struct lgm_clk_divider *divider = to_lgm_clk_divider(hw); + unsigned long flags; + unsigned int val; + + spin_lock_irqsave(÷r->lock, flags); + val = lgm_get_clk_val(divider->membase, divider->reg, + divider->shift, divider->width); + spin_unlock_irqrestore(÷r->lock, flags); + + return divider_recalc_rate(hw, parent_rate, val, divider->table, + divider->flags, divider->width); +} + +static long +lgm_clk_divider_round_rate(struct clk_hw *hw, unsigned long rate, + unsigned long *prate) +{ + struct lgm_clk_divider *divider = to_lgm_clk_divider(hw); + + return divider_round_rate(hw, rate, prate, divider->table, + divider->width, divider->flags); +} + +static int +lgm_clk_divider_set_rate(struct clk_hw *hw, unsigned long rate, + unsigned long prate) +{ + struct lgm_clk_divider *divider = to_lgm_clk_divider(hw); + unsigned long flags; + int value; + + value = divider_get_val(rate, prate, divider->table, + divider->width, divider->flags); + if (value < 0) + return value; + + spin_lock_irqsave(÷r->lock, flags); + lgm_set_clk_val(divider->membase, divider->reg, + divider->shift, divider->width, value); + spin_unlock_irqrestore(÷r->lock, flags); + + return 0; +} + +static int lgm_clk_divider_enable_disable(struct clk_hw *hw, int enable) +{ + struct lgm_clk_divider *div = to_lgm_clk_divider(hw); + unsigned long flags; + + spin_lock_irqsave(&div->lock, flags); + lgm_set_clk_val(div->membase, div->reg, div->shift_gate, + div->width_gate, enable); + spin_unlock_irqrestore(&div->lock, flags); + return 0; +} + +static int lgm_clk_divider_enable(struct clk_hw *hw) +{ + return lgm_clk_divider_enable_disable(hw, 1); +} + +static void lgm_clk_divider_disable(struct clk_hw *hw) +{ + lgm_clk_divider_enable_disable(hw, 0); +} + +static const struct clk_ops lgm_clk_divider_ops = { + .recalc_rate = lgm_clk_divider_recalc_rate, + .round_rate = lgm_clk_divider_round_rate, + .set_rate = lgm_clk_divider_set_rate, + .enable = lgm_clk_divider_enable, + .disable = lgm_clk_divider_disable, +}; + +static struct clk_hw * +lgm_clk_register_divider(struct lgm_clk_provider *ctx, + const struct lgm_clk_branch *list) +{ + unsigned long flags, cflags = list->div_flags; + struct device *dev = ctx->dev; + struct lgm_clk_divider *div; + struct clk_init_data init = {}; + u8 shift = list->div_shift; + u8 width = list->div_width; + u8 shift_gate = list->div_shift_gate; + u8 width_gate = list->div_width_gate; + u32 reg = list->div_off; + struct clk_hw *hw; + int ret; + + div = devm_kzalloc(dev, sizeof(*div), GFP_KERNEL); + if (!div) + return ERR_PTR(-ENOMEM); + + init.name = list->name; + init.ops = &lgm_clk_divider_ops; + init.flags = list->flags; + init.parent_data = list->parent_data; + init.num_parents = 1; + + div->membase = ctx->membase; + div->lock = ctx->lock; + div->reg = reg; + div->shift = shift; + div->width = width; + div->shift_gate = shift_gate; + div->width_gate = width_gate; + div->flags = cflags; + div->table = list->div_table; + div->hw.init = &init; + + hw = &div->hw; + ret = clk_hw_register(dev, hw); + if (ret) + return ERR_PTR(ret); + + if (cflags & CLOCK_FLAG_VAL_INIT) { + spin_lock_irqsave(&div->lock, flags); + lgm_set_clk_val(div->membase, reg, shift, width, list->div_val); + spin_unlock_irqrestore(&div->lock, flags); + } + + return hw; +} + +static struct clk_hw * +lgm_clk_register_fixed_factor(struct lgm_clk_provider *ctx, + const struct lgm_clk_branch *list) +{ + unsigned long flags; + struct clk_hw *hw; + + hw = clk_hw_register_fixed_factor(ctx->dev, list->name, + list->parent_data[0].name, list->flags, + list->mult, list->div); + if (IS_ERR(hw)) + return ERR_CAST(hw); + + if (list->div_flags & CLOCK_FLAG_VAL_INIT) { + spin_lock_irqsave(&ctx->lock, flags); + lgm_set_clk_val(ctx->membase, list->div_off, list->div_shift, + list->div_width, list->div_val); + spin_unlock_irqrestore(&ctx->lock, flags); + } + + return hw; +} + +static int lgm_clk_gate_enable(struct clk_hw *hw) +{ + struct lgm_clk_gate *gate = to_lgm_clk_gate(hw); + unsigned long flags; + unsigned int reg; + + spin_lock_irqsave(&gate->lock, flags); + reg = GATE_HW_REG_EN(gate->reg); + lgm_set_clk_val(gate->membase, reg, gate->shift, 1, 1); + spin_unlock_irqrestore(&gate->lock, flags); + + return 0; +} + +static void lgm_clk_gate_disable(struct clk_hw *hw) +{ + struct lgm_clk_gate *gate = to_lgm_clk_gate(hw); + unsigned long flags; + unsigned int reg; + + spin_lock_irqsave(&gate->lock, flags); + reg = GATE_HW_REG_DIS(gate->reg); + lgm_set_clk_val(gate->membase, reg, gate->shift, 1, 1); + spin_unlock_irqrestore(&gate->lock, flags); +} + +static int lgm_clk_gate_is_enabled(struct clk_hw *hw) +{ + struct lgm_clk_gate *gate = to_lgm_clk_gate(hw); + unsigned int reg, ret; + unsigned long flags; + + spin_lock_irqsave(&gate->lock, flags); + reg = GATE_HW_REG_STAT(gate->reg); + ret = lgm_get_clk_val(gate->membase, reg, gate->shift, 1); + spin_unlock_irqrestore(&gate->lock, flags); + + return ret; +} + +static const struct clk_ops lgm_clk_gate_ops = { + .enable = lgm_clk_gate_enable, + .disable = lgm_clk_gate_disable, + .is_enabled = lgm_clk_gate_is_enabled, +}; + +static struct clk_hw * +lgm_clk_register_gate(struct lgm_clk_provider *ctx, + const struct lgm_clk_branch *list) +{ + unsigned long flags, cflags = list->gate_flags; + const char *pname = list->parent_data[0].name; + struct device *dev = ctx->dev; + u8 shift = list->gate_shift; + struct clk_init_data init = {}; + struct lgm_clk_gate *gate; + u32 reg = list->gate_off; + struct clk_hw *hw; + int ret; + + gate = devm_kzalloc(dev, sizeof(*gate), GFP_KERNEL); + if (!gate) + return ERR_PTR(-ENOMEM); + + init.name = list->name; + init.ops = &lgm_clk_gate_ops; + init.flags = list->flags; + init.parent_names = pname ? &pname : NULL; + init.num_parents = pname ? 1 : 0; + + gate->membase = ctx->membase; + gate->lock = ctx->lock; + gate->reg = reg; + gate->shift = shift; + gate->flags = cflags; + gate->hw.init = &init; + + hw = &gate->hw; + ret = clk_hw_register(dev, hw); + if (ret) + return ERR_PTR(ret); + + if (cflags & CLOCK_FLAG_VAL_INIT) { + spin_lock_irqsave(&gate->lock, flags); + lgm_set_clk_val(gate->membase, reg, shift, 1, list->gate_val); + spin_unlock_irqrestore(&gate->lock, flags); + } + + return hw; +} + +int lgm_clk_register_branches(struct lgm_clk_provider *ctx, + const struct lgm_clk_branch *list, + unsigned int nr_clk) +{ + struct clk_hw *hw; + unsigned int idx; + + for (idx = 0; idx < nr_clk; idx++, list++) { + switch (list->type) { + case CLK_TYPE_FIXED: + hw = lgm_clk_register_fixed(ctx, list); + break; + case CLK_TYPE_MUX: + hw = lgm_clk_register_mux(ctx, list); + break; + case CLK_TYPE_DIVIDER: + hw = lgm_clk_register_divider(ctx, list); + break; + case CLK_TYPE_FIXED_FACTOR: + hw = lgm_clk_register_fixed_factor(ctx, list); + break; + case CLK_TYPE_GATE: + hw = lgm_clk_register_gate(ctx, list); + break; + default: + dev_err(ctx->dev, "invalid clk type\n"); + return -EINVAL; + } + + if (IS_ERR(hw)) { + dev_err(ctx->dev, + "register clk: %s, type: %u failed!\n", + list->name, list->type); + return -EIO; + } + ctx->clk_data.hws[list->id] = hw; + } + + return 0; +} + +static unsigned long +lgm_clk_ddiv_recalc_rate(struct clk_hw *hw, unsigned long parent_rate) +{ + struct lgm_clk_ddiv *ddiv = to_lgm_clk_ddiv(hw); + unsigned int div0, div1, exdiv; + unsigned long flags; + u64 prate; + + spin_lock_irqsave(&ddiv->lock, flags); + div0 = lgm_get_clk_val(ddiv->membase, ddiv->reg, + ddiv->shift0, ddiv->width0) + 1; + div1 = lgm_get_clk_val(ddiv->membase, ddiv->reg, + ddiv->shift1, ddiv->width1) + 1; + exdiv = lgm_get_clk_val(ddiv->membase, ddiv->reg, + ddiv->shift2, ddiv->width2); + spin_unlock_irqrestore(&ddiv->lock, flags); + + prate = (u64)parent_rate; + do_div(prate, div0); + do_div(prate, div1); + + if (exdiv) { + do_div(prate, ddiv->div); + prate *= ddiv->mult; + } + + return prate; +} + +static int lgm_clk_ddiv_enable(struct clk_hw *hw) +{ + struct lgm_clk_ddiv *ddiv = to_lgm_clk_ddiv(hw); + unsigned long flags; + + spin_lock_irqsave(&ddiv->lock, flags); + lgm_set_clk_val(ddiv->membase, ddiv->reg, ddiv->shift_gate, + ddiv->width_gate, 1); + spin_unlock_irqrestore(&ddiv->lock, flags); + return 0; +} + +static void lgm_clk_ddiv_disable(struct clk_hw *hw) +{ + struct lgm_clk_ddiv *ddiv = to_lgm_clk_ddiv(hw); + unsigned long flags; + + spin_lock_irqsave(&ddiv->lock, flags); + lgm_set_clk_val(ddiv->membase, ddiv->reg, ddiv->shift_gate, + ddiv->width_gate, 0); + spin_unlock_irqrestore(&ddiv->lock, flags); +} + +static int +lgm_clk_get_ddiv_val(u32 div, u32 *ddiv1, u32 *ddiv2) +{ + u32 idx, temp; + + *ddiv1 = 1; + *ddiv2 = 1; + + if (div > MAX_DIVIDER_VAL) + div = MAX_DIVIDER_VAL; + + if (div > 1) { + for (idx = 2; idx <= MAX_DDIV_REG; idx++) { + temp = DIV_ROUND_UP_ULL((u64)div, idx); + if (div % idx == 0 && temp <= MAX_DDIV_REG) + break; + } + + if (idx > MAX_DDIV_REG) + return -EINVAL; + + *ddiv1 = temp; + *ddiv2 = idx; + } + + return 0; +} + +static int +lgm_clk_ddiv_set_rate(struct clk_hw *hw, unsigned long rate, + unsigned long prate) +{ + struct lgm_clk_ddiv *ddiv = to_lgm_clk_ddiv(hw); + u32 div, ddiv1, ddiv2; + unsigned long flags; + + div = DIV_ROUND_CLOSEST_ULL((u64)prate, rate); + + spin_lock_irqsave(&ddiv->lock, flags); + if (lgm_get_clk_val(ddiv->membase, ddiv->reg, ddiv->shift2, 1)) { + div = DIV_ROUND_CLOSEST_ULL((u64)div, 5); + div = div * 2; + } + + if (div <= 0) { + spin_unlock_irqrestore(&ddiv->lock, flags); + return -EINVAL; + } + + if (lgm_clk_get_ddiv_val(div, &ddiv1, &ddiv2)) { + spin_unlock_irqrestore(&ddiv->lock, flags); + return -EINVAL; + } + + lgm_set_clk_val(ddiv->membase, ddiv->reg, ddiv->shift0, ddiv->width0, + ddiv1 - 1); + + lgm_set_clk_val(ddiv->membase, ddiv->reg, ddiv->shift1, ddiv->width1, + ddiv2 - 1); + spin_unlock_irqrestore(&ddiv->lock, flags); + + return 0; +} + +static long +lgm_clk_ddiv_round_rate(struct clk_hw *hw, unsigned long rate, + unsigned long *prate) +{ + struct lgm_clk_ddiv *ddiv = to_lgm_clk_ddiv(hw); + u32 div, ddiv1, ddiv2; + unsigned long flags; + u64 rate64 = rate; + + div = DIV_ROUND_CLOSEST_ULL((u64)*prate, rate); + + /* if predivide bit is enabled, modify div by factor of 2.5 */ + spin_lock_irqsave(&ddiv->lock, flags); + if (lgm_get_clk_val(ddiv->membase, ddiv->reg, ddiv->shift2, 1)) { + div = div * 2; + div = DIV_ROUND_CLOSEST_ULL((u64)div, 5); + } + + if (div <= 0) { + spin_unlock_irqrestore(&ddiv->lock, flags); + return *prate; + } + + if (lgm_clk_get_ddiv_val(div, &ddiv1, &ddiv2) != 0) { + if (lgm_clk_get_ddiv_val(div + 1, &ddiv1, &ddiv2) != 0) { + spin_unlock_irqrestore(&ddiv->lock, flags); + return -EINVAL; + } + } + + rate64 = *prate; + do_div(rate64, ddiv1); + do_div(rate64, ddiv2); + + /* if predivide bit is enabled, modify rounded rate by factor of 2.5 */ + if (lgm_get_clk_val(ddiv->membase, ddiv->reg, ddiv->shift2, 1)) { + rate64 = rate64 * 2; + rate64 = DIV_ROUND_CLOSEST_ULL(rate64, 5); + } + spin_unlock_irqrestore(&ddiv->lock, flags); + + return rate64; +} + +static const struct clk_ops lgm_clk_ddiv_ops = { + .recalc_rate = lgm_clk_ddiv_recalc_rate, + .enable = lgm_clk_ddiv_enable, + .disable = lgm_clk_ddiv_disable, + .set_rate = lgm_clk_ddiv_set_rate, + .round_rate = lgm_clk_ddiv_round_rate, +}; + +int lgm_clk_register_ddiv(struct lgm_clk_provider *ctx, + const struct lgm_clk_ddiv_data *list, + unsigned int nr_clk) +{ + struct device *dev = ctx->dev; + struct clk_init_data init = {}; + struct lgm_clk_ddiv *ddiv; + struct clk_hw *hw; + unsigned int idx; + int ret; + + for (idx = 0; idx < nr_clk; idx++, list++) { + ddiv = NULL; + ddiv = devm_kzalloc(dev, sizeof(*ddiv), GFP_KERNEL); + if (!ddiv) + return -ENOMEM; + + memset(&init, 0, sizeof(init)); + init.name = list->name; + init.ops = &lgm_clk_ddiv_ops; + init.flags = list->flags; + init.parent_data = list->parent_data; + init.num_parents = 1; + + ddiv->membase = ctx->membase; + ddiv->lock = ctx->lock; + ddiv->reg = list->reg; + ddiv->shift0 = list->shift0; + ddiv->width0 = list->width0; + ddiv->shift1 = list->shift1; + ddiv->width1 = list->width1; + ddiv->shift_gate = list->shift_gate; + ddiv->width_gate = list->width_gate; + ddiv->shift2 = list->ex_shift; + ddiv->width2 = list->ex_width; + ddiv->flags = list->div_flags; + ddiv->mult = 2; + ddiv->div = 5; + ddiv->hw.init = &init; + + hw = &ddiv->hw; + ret = clk_hw_register(dev, hw); + if (ret) { + dev_err(dev, "register clk: %s failed!\n", list->name); + return ret; + } + ctx->clk_data.hws[list->id] = hw; + } + + return 0; +} diff --git a/drivers/clk/x86/clk-cgu.h b/drivers/clk/x86/clk-cgu.h new file mode 100644 index 0000000000000..4e22bfb223128 --- /dev/null +++ b/drivers/clk/x86/clk-cgu.h @@ -0,0 +1,335 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright(c) 2020 Intel Corporation. + * Zhu YiXin + * Rahul Tanwar + */ + +#ifndef __CLK_CGU_H +#define __CLK_CGU_H + +#include + +struct lgm_clk_mux { + struct clk_hw hw; + void __iomem *membase; + unsigned int reg; + u8 shift; + u8 width; + unsigned long flags; + spinlock_t lock; +}; + +struct lgm_clk_divider { + struct clk_hw hw; + void __iomem *membase; + unsigned int reg; + u8 shift; + u8 width; + u8 shift_gate; + u8 width_gate; + unsigned long flags; + const struct clk_div_table *table; + spinlock_t lock; +}; + +struct lgm_clk_ddiv { + struct clk_hw hw; + void __iomem *membase; + unsigned int reg; + u8 shift0; + u8 width0; + u8 shift1; + u8 width1; + u8 shift2; + u8 width2; + u8 shift_gate; + u8 width_gate; + unsigned int mult; + unsigned int div; + unsigned long flags; + spinlock_t lock; +}; + +struct lgm_clk_gate { + struct clk_hw hw; + void __iomem *membase; + unsigned int reg; + u8 shift; + unsigned long flags; + spinlock_t lock; +}; + +enum lgm_clk_type { + CLK_TYPE_FIXED, + CLK_TYPE_MUX, + CLK_TYPE_DIVIDER, + CLK_TYPE_FIXED_FACTOR, + CLK_TYPE_GATE, + CLK_TYPE_NONE, +}; + +/** + * struct lgm_clk_provider + * @membase: IO mem base address for CGU. + * @np: device node + * @dev: device + * @clk_data: array of hw clocks and clk number. + */ +struct lgm_clk_provider { + void __iomem *membase; + struct device_node *np; + struct device *dev; + struct clk_hw_onecell_data clk_data; + spinlock_t lock; +}; + +enum pll_type { + TYPE_ROPLL, + TYPE_LJPLL, + TYPE_NONE, +}; + +struct lgm_clk_pll { + struct clk_hw hw; + void __iomem *membase; + unsigned int reg; + unsigned long flags; + enum pll_type type; + spinlock_t lock; +}; + +/** + * struct lgm_pll_clk_data + * @id: platform specific id of the clock. + * @name: name of this pll clock. + * @parent_data: parent clock data. + * @num_parents: number of parents. + * @flags: optional flags for basic clock. + * @type: platform type of pll. + * @reg: offset of the register. + */ +struct lgm_pll_clk_data { + unsigned int id; + const char *name; + const struct clk_parent_data *parent_data; + u8 num_parents; + unsigned long flags; + enum pll_type type; + int reg; +}; + +#define LGM_PLL(_id, _name, _pdata, _flags, \ + _reg, _type) \ + { \ + .id = _id, \ + .name = _name, \ + .parent_data = _pdata, \ + .num_parents = ARRAY_SIZE(_pdata), \ + .flags = _flags, \ + .reg = _reg, \ + .type = _type, \ + } + +struct lgm_clk_ddiv_data { + unsigned int id; + const char *name; + const struct clk_parent_data *parent_data; + u8 flags; + unsigned long div_flags; + unsigned int reg; + u8 shift0; + u8 width0; + u8 shift1; + u8 width1; + u8 shift_gate; + u8 width_gate; + u8 ex_shift; + u8 ex_width; +}; + +#define LGM_DDIV(_id, _name, _pname, _flags, _reg, \ + _shft0, _wdth0, _shft1, _wdth1, \ + _shft_gate, _wdth_gate, _xshft, _df) \ + { \ + .id = _id, \ + .name = _name, \ + .parent_data = &(const struct clk_parent_data){ \ + .fw_name = _pname, \ + .name = _pname, \ + }, \ + .flags = _flags, \ + .reg = _reg, \ + .shift0 = _shft0, \ + .width0 = _wdth0, \ + .shift1 = _shft1, \ + .width1 = _wdth1, \ + .shift_gate = _shft_gate, \ + .width_gate = _wdth_gate, \ + .ex_shift = _xshft, \ + .ex_width = 1, \ + .div_flags = _df, \ + } + +struct lgm_clk_branch { + unsigned int id; + enum lgm_clk_type type; + const char *name; + const struct clk_parent_data *parent_data; + u8 num_parents; + unsigned long flags; + unsigned int mux_off; + u8 mux_shift; + u8 mux_width; + unsigned long mux_flags; + unsigned int mux_val; + unsigned int div_off; + u8 div_shift; + u8 div_width; + u8 div_shift_gate; + u8 div_width_gate; + unsigned long div_flags; + unsigned int div_val; + const struct clk_div_table *div_table; + unsigned int gate_off; + u8 gate_shift; + unsigned long gate_flags; + unsigned int gate_val; + unsigned int mult; + unsigned int div; +}; + +/* clock flags definition */ +#define CLOCK_FLAG_VAL_INIT BIT(16) +#define MUX_CLK_SW BIT(17) + +#define LGM_MUX(_id, _name, _pdata, _f, _reg, \ + _shift, _width, _cf, _v) \ + { \ + .id = _id, \ + .type = CLK_TYPE_MUX, \ + .name = _name, \ + .parent_data = _pdata, \ + .num_parents = ARRAY_SIZE(_pdata), \ + .flags = _f, \ + .mux_off = _reg, \ + .mux_shift = _shift, \ + .mux_width = _width, \ + .mux_flags = _cf, \ + .mux_val = _v, \ + } + +#define LGM_DIV(_id, _name, _pname, _f, _reg, _shift, _width, \ + _shift_gate, _width_gate, _cf, _v, _dtable) \ + { \ + .id = _id, \ + .type = CLK_TYPE_DIVIDER, \ + .name = _name, \ + .parent_data = &(const struct clk_parent_data){ \ + .fw_name = _pname, \ + .name = _pname, \ + }, \ + .num_parents = 1, \ + .flags = _f, \ + .div_off = _reg, \ + .div_shift = _shift, \ + .div_width = _width, \ + .div_shift_gate = _shift_gate, \ + .div_width_gate = _width_gate, \ + .div_flags = _cf, \ + .div_val = _v, \ + .div_table = _dtable, \ + } + +#define LGM_GATE(_id, _name, _pname, _f, _reg, \ + _shift, _cf, _v) \ + { \ + .id = _id, \ + .type = CLK_TYPE_GATE, \ + .name = _name, \ + .parent_data = &(const struct clk_parent_data){ \ + .fw_name = _pname, \ + .name = _pname, \ + }, \ + .num_parents = !_pname ? 0 : 1, \ + .flags = _f, \ + .gate_off = _reg, \ + .gate_shift = _shift, \ + .gate_flags = _cf, \ + .gate_val = _v, \ + } + +#define LGM_FIXED(_id, _name, _pname, _f, _reg, \ + _shift, _width, _cf, _freq, _v) \ + { \ + .id = _id, \ + .type = CLK_TYPE_FIXED, \ + .name = _name, \ + .parent_data = &(const struct clk_parent_data){ \ + .fw_name = _pname, \ + .name = _pname, \ + }, \ + .num_parents = !_pname ? 0 : 1, \ + .flags = _f, \ + .div_off = _reg, \ + .div_shift = _shift, \ + .div_width = _width, \ + .div_flags = _cf, \ + .div_val = _v, \ + .mux_flags = _freq, \ + } + +#define LGM_FIXED_FACTOR(_id, _name, _pname, _f, _reg, \ + _shift, _width, _cf, _v, _m, _d) \ + { \ + .id = _id, \ + .type = CLK_TYPE_FIXED_FACTOR, \ + .name = _name, \ + .parent_data = &(const struct clk_parent_data){ \ + .fw_name = _pname, \ + .name = _pname, \ + }, \ + .num_parents = 1, \ + .flags = _f, \ + .div_off = _reg, \ + .div_shift = _shift, \ + .div_width = _width, \ + .div_flags = _cf, \ + .div_val = _v, \ + .mult = _m, \ + .div = _d, \ + } + +static inline void lgm_set_clk_val(void __iomem *membase, u32 reg, + u8 shift, u8 width, u32 set_val) +{ + u32 mask = (GENMASK(width - 1, 0) << shift); + u32 regval; + + regval = readl(membase + reg); + regval = (regval & ~mask) | ((set_val << shift) & mask); + writel(regval, membase + reg); +} + +static inline u32 lgm_get_clk_val(void __iomem *membase, u32 reg, + u8 shift, u8 width) +{ + u32 mask = (GENMASK(width - 1, 0) << shift); + u32 val; + + val = readl(membase + reg); + val = (val & mask) >> shift; + + return val; +} + +int lgm_clk_register_branches(struct lgm_clk_provider *ctx, + const struct lgm_clk_branch *list, + unsigned int nr_clk); +int lgm_clk_register_plls(struct lgm_clk_provider *ctx, + const struct lgm_pll_clk_data *list, + unsigned int nr_clk); +int lgm_clk_register_ddiv(struct lgm_clk_provider *ctx, + const struct lgm_clk_ddiv_data *list, + unsigned int nr_clk); +#endif /* __CLK_CGU_H */ diff --git a/drivers/clk/x86/clk-lgm.c b/drivers/clk/x86/clk-lgm.c new file mode 100644 index 0000000000000..020f4e83a5ccb --- /dev/null +++ b/drivers/clk/x86/clk-lgm.c @@ -0,0 +1,475 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2020 Intel Corporation. + * Zhu YiXin + * Rahul Tanwar + */ +#include +#include +#include +#include +#include "clk-cgu.h" + +#define PLL_DIV_WIDTH 4 +#define PLL_DDIV_WIDTH 3 + +/* Gate0 clock shift */ +#define G_C55_SHIFT 7 +#define G_QSPI_SHIFT 9 +#define G_EIP197_SHIFT 11 +#define G_VAULT130_SHIFT 12 +#define G_TOE_SHIFT 13 +#define G_SDXC_SHIFT 14 +#define G_EMMC_SHIFT 15 +#define G_SPIDBG_SHIFT 17 +#define G_DMA3_SHIFT 28 + +/* Gate1 clock shift */ +#define G_DMA0_SHIFT 0 +#define G_LEDC0_SHIFT 1 +#define G_LEDC1_SHIFT 2 +#define G_I2S0_SHIFT 3 +#define G_I2S1_SHIFT 4 +#define G_EBU_SHIFT 5 +#define G_PWM_SHIFT 6 +#define G_I2C0_SHIFT 7 +#define G_I2C1_SHIFT 8 +#define G_I2C2_SHIFT 9 +#define G_I2C3_SHIFT 10 + +#define G_SSC0_SHIFT 12 +#define G_SSC1_SHIFT 13 +#define G_SSC2_SHIFT 14 +#define G_SSC3_SHIFT 15 + +#define G_GPTC0_SHIFT 17 +#define G_GPTC1_SHIFT 18 +#define G_GPTC2_SHIFT 19 +#define G_GPTC3_SHIFT 20 + +#define G_ASC0_SHIFT 22 +#define G_ASC1_SHIFT 23 +#define G_ASC2_SHIFT 24 +#define G_ASC3_SHIFT 25 + +#define G_PCM0_SHIFT 27 +#define G_PCM1_SHIFT 28 +#define G_PCM2_SHIFT 29 + +/* Gate2 clock shift */ +#define G_PCIE10_SHIFT 1 +#define G_PCIE11_SHIFT 2 +#define G_PCIE30_SHIFT 3 +#define G_PCIE31_SHIFT 4 +#define G_PCIE20_SHIFT 5 +#define G_PCIE21_SHIFT 6 +#define G_PCIE40_SHIFT 7 +#define G_PCIE41_SHIFT 8 + +#define G_XPCS0_SHIFT 10 +#define G_XPCS1_SHIFT 11 +#define G_XPCS2_SHIFT 12 +#define G_XPCS3_SHIFT 13 +#define G_SATA0_SHIFT 14 +#define G_SATA1_SHIFT 15 +#define G_SATA2_SHIFT 16 +#define G_SATA3_SHIFT 17 + +/* Gate3 clock shift */ +#define G_ARCEM4_SHIFT 0 +#define G_IDMAR1_SHIFT 2 +#define G_IDMAT0_SHIFT 3 +#define G_IDMAT1_SHIFT 4 +#define G_IDMAT2_SHIFT 5 + +#define G_PPV4_SHIFT 8 +#define G_GSWIPO_SHIFT 9 +#define G_CQEM_SHIFT 10 +#define G_XPCS5_SHIFT 14 +#define G_USB1_SHIFT 25 +#define G_USB2_SHIFT 26 + + +/* Register definition */ +#define CGU_PLL0CZ_CFG0 0x000 +#define CGU_PLL0CM0_CFG0 0x020 +#define CGU_PLL0CM1_CFG0 0x040 +#define CGU_PLL0B_CFG0 0x060 +#define CGU_PLL1_CFG0 0x080 +#define CGU_PLL2_CFG0 0x0A0 +#define CGU_PLLPP_CFG0 0x0C0 +#define CGU_LJPLL3_CFG0 0x0E0 +#define CGU_LJPLL4_CFG0 0x100 +#define CGU_C55_PCMCR 0x18C +#define CGU_PCMCR 0x190 +#define CGU_IF_CLK1 0x1A0 +#define CGU_IF_CLK2 0x1A4 +#define CGU_GATE0 0x300 +#define CGU_GATE1 0x310 +#define CGU_GATE2 0x320 +#define CGU_GATE3 0x310 + +#define PLL_DIV(x) ((x) + 0x04) +#define PLL_SSC(x) ((x) + 0x10) + +#define CLK_NR_CLKS (LGM_GCLK_USB2 + 1) + +/* + * Below table defines the pair's of regval & effective dividers. + * It's more efficient to provide an explicit table due to non-linear + * relation between values. + */ +static const struct clk_div_table pll_div[] = { + { .val = 0, .div = 1 }, + { .val = 1, .div = 2 }, + { .val = 2, .div = 3 }, + { .val = 3, .div = 4 }, + { .val = 4, .div = 5 }, + { .val = 5, .div = 6 }, + { .val = 6, .div = 8 }, + { .val = 7, .div = 10 }, + { .val = 8, .div = 12 }, + { .val = 9, .div = 16 }, + { .val = 10, .div = 20 }, + { .val = 11, .div = 24 }, + { .val = 12, .div = 32 }, + { .val = 13, .div = 40 }, + { .val = 14, .div = 48 }, + { .val = 15, .div = 64 }, + {} +}; + +static const struct clk_div_table dcl_div[] = { + { .val = 0, .div = 6 }, + { .val = 1, .div = 12 }, + { .val = 2, .div = 24 }, + { .val = 3, .div = 32 }, + { .val = 4, .div = 48 }, + { .val = 5, .div = 96 }, + {} +}; + +static const struct clk_parent_data pll_p[] = { + { .fw_name = "osc", .name = "osc" }, +}; +static const struct clk_parent_data pllcm_p[] = { + { .fw_name = "cpu_cm", .name = "cpu_cm" }, +}; +static const struct clk_parent_data emmc_p[] = { + { .fw_name = "emmc4", .name = "emmc4" }, + { .fw_name = "noc4", .name = "noc4" }, +}; +static const struct clk_parent_data sdxc_p[] = { + { .fw_name = "sdxc3", .name = "sdxc3" }, + { .fw_name = "sdxc2", .name = "sdxc2" }, +}; +static const struct clk_parent_data pcm_p[] = { + { .fw_name = "v_docsis", .name = "v_docsis" }, + { .fw_name = "dcl", .name = "dcl" }, +}; +static const struct clk_parent_data cbphy_p[] = { + { .fw_name = "dd_serdes", .name = "dd_serdes" }, + { .fw_name = "dd_pcie", .name = "dd_pcie" }, +}; + +static const struct lgm_pll_clk_data lgm_pll_clks[] = { + LGM_PLL(LGM_CLK_PLL0CZ, "pll0cz", pll_p, CLK_IGNORE_UNUSED, + CGU_PLL0CZ_CFG0, TYPE_ROPLL), + LGM_PLL(LGM_CLK_PLL0CM0, "pll0cm0", pllcm_p, CLK_IGNORE_UNUSED, + CGU_PLL0CM0_CFG0, TYPE_ROPLL), + LGM_PLL(LGM_CLK_PLL0CM1, "pll0cm1", pllcm_p, CLK_IGNORE_UNUSED, + CGU_PLL0CM1_CFG0, TYPE_ROPLL), + LGM_PLL(LGM_CLK_PLL0B, "pll0b", pll_p, CLK_IGNORE_UNUSED, + CGU_PLL0B_CFG0, TYPE_ROPLL), + LGM_PLL(LGM_CLK_PLL1, "pll1", pll_p, 0, CGU_PLL1_CFG0, TYPE_ROPLL), + LGM_PLL(LGM_CLK_PLL2, "pll2", pll_p, CLK_IGNORE_UNUSED, + CGU_PLL2_CFG0, TYPE_ROPLL), + LGM_PLL(LGM_CLK_PLLPP, "pllpp", pll_p, 0, CGU_PLLPP_CFG0, TYPE_ROPLL), + LGM_PLL(LGM_CLK_LJPLL3, "ljpll3", pll_p, 0, CGU_LJPLL3_CFG0, TYPE_LJPLL), + LGM_PLL(LGM_CLK_LJPLL4, "ljpll4", pll_p, 0, CGU_LJPLL4_CFG0, TYPE_LJPLL), +}; + +static const struct lgm_clk_branch lgm_branch_clks[] = { + LGM_DIV(LGM_CLK_PP_HW, "pp_hw", "pllpp", 0, PLL_DIV(CGU_PLLPP_CFG0), + 0, PLL_DIV_WIDTH, 24, 1, 0, 0, pll_div), + LGM_DIV(LGM_CLK_PP_UC, "pp_uc", "pllpp", 0, PLL_DIV(CGU_PLLPP_CFG0), + 4, PLL_DIV_WIDTH, 25, 1, 0, 0, pll_div), + LGM_DIV(LGM_CLK_PP_FXD, "pp_fxd", "pllpp", 0, PLL_DIV(CGU_PLLPP_CFG0), + 8, PLL_DIV_WIDTH, 26, 1, 0, 0, pll_div), + LGM_DIV(LGM_CLK_PP_TBM, "pp_tbm", "pllpp", 0, PLL_DIV(CGU_PLLPP_CFG0), + 12, PLL_DIV_WIDTH, 27, 1, 0, 0, pll_div), + LGM_DIV(LGM_CLK_DDR, "ddr", "pll2", CLK_IGNORE_UNUSED, + PLL_DIV(CGU_PLL2_CFG0), 0, PLL_DIV_WIDTH, 24, 1, 0, 0, + pll_div), + LGM_DIV(LGM_CLK_CM, "cpu_cm", "pll0cz", 0, PLL_DIV(CGU_PLL0CZ_CFG0), + 0, PLL_DIV_WIDTH, 24, 1, 0, 0, pll_div), + + LGM_DIV(LGM_CLK_IC, "cpu_ic", "pll0cz", CLK_IGNORE_UNUSED, + PLL_DIV(CGU_PLL0CZ_CFG0), 4, PLL_DIV_WIDTH, 25, + 1, 0, 0, pll_div), + + LGM_DIV(LGM_CLK_SDXC3, "sdxc3", "pll0cz", 0, PLL_DIV(CGU_PLL0CZ_CFG0), + 8, PLL_DIV_WIDTH, 26, 1, 0, 0, pll_div), + + LGM_DIV(LGM_CLK_CPU0, "cm0", "pll0cm0", + CLK_IGNORE_UNUSED, PLL_DIV(CGU_PLL0CM0_CFG0), + 0, PLL_DIV_WIDTH, 24, 1, 0, 0, pll_div), + LGM_DIV(LGM_CLK_CPU1, "cm1", "pll0cm1", + CLK_IGNORE_UNUSED, PLL_DIV(CGU_PLL0CM1_CFG0), + 0, PLL_DIV_WIDTH, 24, 1, 0, 0, pll_div), + + /* + * Marking ngi_clk (next generation interconnect) and noc_clk + * (network on chip peripheral clk) as critical clocks because + * these are shared parent clock sources for many different + * peripherals. + */ + LGM_DIV(LGM_CLK_NGI, "ngi", "pll0b", + (CLK_IGNORE_UNUSED|CLK_IS_CRITICAL), PLL_DIV(CGU_PLL0B_CFG0), + 0, PLL_DIV_WIDTH, 24, 1, 0, 0, pll_div), + LGM_DIV(LGM_CLK_NOC4, "noc4", "pll0b", + (CLK_IGNORE_UNUSED|CLK_IS_CRITICAL), PLL_DIV(CGU_PLL0B_CFG0), + 4, PLL_DIV_WIDTH, 25, 1, 0, 0, pll_div), + LGM_DIV(LGM_CLK_SW, "switch", "pll0b", 0, PLL_DIV(CGU_PLL0B_CFG0), + 8, PLL_DIV_WIDTH, 26, 1, 0, 0, pll_div), + LGM_DIV(LGM_CLK_QSPI, "qspi", "pll0b", 0, PLL_DIV(CGU_PLL0B_CFG0), + 12, PLL_DIV_WIDTH, 27, 1, 0, 0, pll_div), + LGM_DIV(LGM_CLK_CT, "v_ct", "pll1", 0, PLL_DIV(CGU_PLL1_CFG0), + 0, PLL_DIV_WIDTH, 24, 1, 0, 0, pll_div), + LGM_DIV(LGM_CLK_DSP, "v_dsp", "pll1", 0, PLL_DIV(CGU_PLL1_CFG0), + 8, PLL_DIV_WIDTH, 26, 1, 0, 0, pll_div), + LGM_DIV(LGM_CLK_VIF, "v_ifclk", "pll1", 0, PLL_DIV(CGU_PLL1_CFG0), + 12, PLL_DIV_WIDTH, 27, 1, 0, 0, pll_div), + + LGM_FIXED_FACTOR(LGM_CLK_EMMC4, "emmc4", "sdxc3", 0, 0, + 0, 0, 0, 0, 1, 4), + LGM_FIXED_FACTOR(LGM_CLK_SDXC2, "sdxc2", "noc4", 0, 0, + 0, 0, 0, 0, 1, 4), + LGM_MUX(LGM_CLK_EMMC, "emmc", emmc_p, 0, CGU_IF_CLK1, + 0, 1, CLK_MUX_ROUND_CLOSEST, 0), + LGM_MUX(LGM_CLK_SDXC, "sdxc", sdxc_p, 0, CGU_IF_CLK1, + 1, 1, CLK_MUX_ROUND_CLOSEST, 0), + LGM_FIXED(LGM_CLK_OSC, "osc", NULL, 0, 0, 0, 0, 0, 40000000, 0), + LGM_FIXED(LGM_CLK_SLIC, "slic", NULL, 0, CGU_IF_CLK1, + 8, 2, CLOCK_FLAG_VAL_INIT, 8192000, 2), + LGM_FIXED(LGM_CLK_DOCSIS, "v_docsis", NULL, 0, 0, 0, 0, 0, 16000000, 0), + LGM_DIV(LGM_CLK_DCL, "dcl", "v_ifclk", 0, CGU_PCMCR, + 25, 3, 0, 0, 0, 0, dcl_div), + LGM_MUX(LGM_CLK_PCM, "pcm", pcm_p, 0, CGU_C55_PCMCR, + 0, 1, CLK_MUX_ROUND_CLOSEST, 0), + LGM_FIXED_FACTOR(LGM_CLK_DDR_PHY, "ddr_phy", "ddr", + CLK_IGNORE_UNUSED, 0, + 0, 0, 0, 0, 2, 1), + LGM_FIXED_FACTOR(LGM_CLK_PONDEF, "pondef", "dd_pool", + CLK_SET_RATE_PARENT, 0, 0, 0, 0, 0, 1, 2), + LGM_MUX(LGM_CLK_CBPHY0, "cbphy0", cbphy_p, 0, 0, + 0, 0, MUX_CLK_SW | CLK_MUX_ROUND_CLOSEST, 0), + LGM_MUX(LGM_CLK_CBPHY1, "cbphy1", cbphy_p, 0, 0, + 0, 0, MUX_CLK_SW | CLK_MUX_ROUND_CLOSEST, 0), + LGM_MUX(LGM_CLK_CBPHY2, "cbphy2", cbphy_p, 0, 0, + 0, 0, MUX_CLK_SW | CLK_MUX_ROUND_CLOSEST, 0), + LGM_MUX(LGM_CLK_CBPHY3, "cbphy3", cbphy_p, 0, 0, + 0, 0, MUX_CLK_SW | CLK_MUX_ROUND_CLOSEST, 0), + + LGM_GATE(LGM_GCLK_C55, "g_c55", NULL, 0, CGU_GATE0, + G_C55_SHIFT, 0, 0), + LGM_GATE(LGM_GCLK_QSPI, "g_qspi", "qspi", 0, CGU_GATE0, + G_QSPI_SHIFT, 0, 0), + LGM_GATE(LGM_GCLK_EIP197, "g_eip197", NULL, 0, CGU_GATE0, + G_EIP197_SHIFT, 0, 0), + LGM_GATE(LGM_GCLK_VAULT, "g_vault130", NULL, 0, CGU_GATE0, + G_VAULT130_SHIFT, 0, 0), + LGM_GATE(LGM_GCLK_TOE, "g_toe", NULL, 0, CGU_GATE0, + G_TOE_SHIFT, 0, 0), + LGM_GATE(LGM_GCLK_SDXC, "g_sdxc", "sdxc", 0, CGU_GATE0, + G_SDXC_SHIFT, 0, 0), + LGM_GATE(LGM_GCLK_EMMC, "g_emmc", "emmc", 0, CGU_GATE0, + G_EMMC_SHIFT, 0, 0), + LGM_GATE(LGM_GCLK_SPI_DBG, "g_spidbg", NULL, 0, CGU_GATE0, + G_SPIDBG_SHIFT, 0, 0), + LGM_GATE(LGM_GCLK_DMA3, "g_dma3", NULL, 0, CGU_GATE0, + G_DMA3_SHIFT, 0, 0), + + LGM_GATE(LGM_GCLK_DMA0, "g_dma0", NULL, 0, CGU_GATE1, + G_DMA0_SHIFT, 0, 0), + LGM_GATE(LGM_GCLK_LEDC0, "g_ledc0", NULL, 0, CGU_GATE1, + G_LEDC0_SHIFT, 0, 0), + LGM_GATE(LGM_GCLK_LEDC1, "g_ledc1", NULL, 0, CGU_GATE1, + G_LEDC1_SHIFT, 0, 0), + LGM_GATE(LGM_GCLK_I2S0, "g_i2s0", NULL, 0, CGU_GATE1, + G_I2S0_SHIFT, 0, 0), + LGM_GATE(LGM_GCLK_I2S1, "g_i2s1", NULL, 0, CGU_GATE1, + G_I2S1_SHIFT, 0, 0), + LGM_GATE(LGM_GCLK_EBU, "g_ebu", NULL, 0, CGU_GATE1, + G_EBU_SHIFT, 0, 0), + LGM_GATE(LGM_GCLK_PWM, "g_pwm", NULL, 0, CGU_GATE1, + G_PWM_SHIFT, 0, 0), + LGM_GATE(LGM_GCLK_I2C0, "g_i2c0", NULL, 0, CGU_GATE1, + G_I2C0_SHIFT, 0, 0), + LGM_GATE(LGM_GCLK_I2C1, "g_i2c1", NULL, 0, CGU_GATE1, + G_I2C1_SHIFT, 0, 0), + LGM_GATE(LGM_GCLK_I2C2, "g_i2c2", NULL, 0, CGU_GATE1, + G_I2C2_SHIFT, 0, 0), + LGM_GATE(LGM_GCLK_I2C3, "g_i2c3", NULL, 0, CGU_GATE1, + G_I2C3_SHIFT, 0, 0), + LGM_GATE(LGM_GCLK_SSC0, "g_ssc0", "noc4", 0, CGU_GATE1, + G_SSC0_SHIFT, 0, 0), + LGM_GATE(LGM_GCLK_SSC1, "g_ssc1", "noc4", 0, CGU_GATE1, + G_SSC1_SHIFT, 0, 0), + LGM_GATE(LGM_GCLK_SSC2, "g_ssc2", "noc4", 0, CGU_GATE1, + G_SSC2_SHIFT, 0, 0), + LGM_GATE(LGM_GCLK_SSC3, "g_ssc3", "noc4", 0, CGU_GATE1, + G_SSC3_SHIFT, 0, 0), + LGM_GATE(LGM_GCLK_GPTC0, "g_gptc0", "noc4", 0, CGU_GATE1, + G_GPTC0_SHIFT, 0, 0), + LGM_GATE(LGM_GCLK_GPTC1, "g_gptc1", "noc4", 0, CGU_GATE1, + G_GPTC1_SHIFT, 0, 0), + LGM_GATE(LGM_GCLK_GPTC2, "g_gptc2", "noc4", 0, CGU_GATE1, + G_GPTC2_SHIFT, 0, 0), + LGM_GATE(LGM_GCLK_GPTC3, "g_gptc3", "osc", 0, CGU_GATE1, + G_GPTC3_SHIFT, 0, 0), + LGM_GATE(LGM_GCLK_ASC0, "g_asc0", "noc4", 0, CGU_GATE1, + G_ASC0_SHIFT, 0, 0), + LGM_GATE(LGM_GCLK_ASC1, "g_asc1", "noc4", 0, CGU_GATE1, + G_ASC1_SHIFT, 0, 0), + LGM_GATE(LGM_GCLK_ASC2, "g_asc2", "noc4", 0, CGU_GATE1, + G_ASC2_SHIFT, 0, 0), + LGM_GATE(LGM_GCLK_ASC3, "g_asc3", "osc", 0, CGU_GATE1, + G_ASC3_SHIFT, 0, 0), + LGM_GATE(LGM_GCLK_PCM0, "g_pcm0", NULL, 0, CGU_GATE1, + G_PCM0_SHIFT, 0, 0), + LGM_GATE(LGM_GCLK_PCM1, "g_pcm1", NULL, 0, CGU_GATE1, + G_PCM1_SHIFT, 0, 0), + LGM_GATE(LGM_GCLK_PCM2, "g_pcm2", NULL, 0, CGU_GATE1, + G_PCM2_SHIFT, 0, 0), + + LGM_GATE(LGM_GCLK_PCIE10, "g_pcie10", NULL, 0, CGU_GATE2, + G_PCIE10_SHIFT, 0, 0), + LGM_GATE(LGM_GCLK_PCIE11, "g_pcie11", NULL, 0, CGU_GATE2, + G_PCIE11_SHIFT, 0, 0), + LGM_GATE(LGM_GCLK_PCIE30, "g_pcie30", NULL, 0, CGU_GATE2, + G_PCIE30_SHIFT, 0, 0), + LGM_GATE(LGM_GCLK_PCIE31, "g_pcie31", NULL, 0, CGU_GATE2, + G_PCIE31_SHIFT, 0, 0), + LGM_GATE(LGM_GCLK_PCIE20, "g_pcie20", NULL, 0, CGU_GATE2, + G_PCIE20_SHIFT, 0, 0), + LGM_GATE(LGM_GCLK_PCIE21, "g_pcie21", NULL, 0, CGU_GATE2, + G_PCIE21_SHIFT, 0, 0), + LGM_GATE(LGM_GCLK_PCIE40, "g_pcie40", NULL, 0, CGU_GATE2, + G_PCIE40_SHIFT, 0, 0), + LGM_GATE(LGM_GCLK_PCIE41, "g_pcie41", NULL, 0, CGU_GATE2, + G_PCIE41_SHIFT, 0, 0), + LGM_GATE(LGM_GCLK_XPCS0, "g_xpcs0", NULL, 0, CGU_GATE2, + G_XPCS0_SHIFT, 0, 0), + LGM_GATE(LGM_GCLK_XPCS1, "g_xpcs1", NULL, 0, CGU_GATE2, + G_XPCS1_SHIFT, 0, 0), + LGM_GATE(LGM_GCLK_XPCS2, "g_xpcs2", NULL, 0, CGU_GATE2, + G_XPCS2_SHIFT, 0, 0), + LGM_GATE(LGM_GCLK_XPCS3, "g_xpcs3", NULL, 0, CGU_GATE2, + G_XPCS3_SHIFT, 0, 0), + LGM_GATE(LGM_GCLK_SATA0, "g_sata0", NULL, 0, CGU_GATE2, + G_SATA0_SHIFT, 0, 0), + LGM_GATE(LGM_GCLK_SATA1, "g_sata1", NULL, 0, CGU_GATE2, + G_SATA1_SHIFT, 0, 0), + LGM_GATE(LGM_GCLK_SATA2, "g_sata2", NULL, 0, CGU_GATE2, + G_SATA2_SHIFT, 0, 0), + LGM_GATE(LGM_GCLK_SATA3, "g_sata3", NULL, 0, CGU_GATE2, + G_SATA3_SHIFT, 0, 0), + + LGM_GATE(LGM_GCLK_ARCEM4, "g_arcem4", NULL, 0, CGU_GATE3, + G_ARCEM4_SHIFT, 0, 0), + LGM_GATE(LGM_GCLK_IDMAR1, "g_idmar1", NULL, 0, CGU_GATE3, + G_IDMAR1_SHIFT, 0, 0), + LGM_GATE(LGM_GCLK_IDMAT0, "g_idmat0", NULL, 0, CGU_GATE3, + G_IDMAT0_SHIFT, 0, 0), + LGM_GATE(LGM_GCLK_IDMAT1, "g_idmat1", NULL, 0, CGU_GATE3, + G_IDMAT1_SHIFT, 0, 0), + LGM_GATE(LGM_GCLK_IDMAT2, "g_idmat2", NULL, 0, CGU_GATE3, + G_IDMAT2_SHIFT, 0, 0), + LGM_GATE(LGM_GCLK_PPV4, "g_ppv4", NULL, 0, CGU_GATE3, + G_PPV4_SHIFT, 0, 0), + LGM_GATE(LGM_GCLK_GSWIPO, "g_gswipo", "switch", 0, CGU_GATE3, + G_GSWIPO_SHIFT, 0, 0), + LGM_GATE(LGM_GCLK_CQEM, "g_cqem", "switch", 0, CGU_GATE3, + G_CQEM_SHIFT, 0, 0), + LGM_GATE(LGM_GCLK_XPCS5, "g_xpcs5", NULL, 0, CGU_GATE3, + G_XPCS5_SHIFT, 0, 0), + LGM_GATE(LGM_GCLK_USB1, "g_usb1", NULL, 0, CGU_GATE3, + G_USB1_SHIFT, 0, 0), + LGM_GATE(LGM_GCLK_USB2, "g_usb2", NULL, 0, CGU_GATE3, + G_USB2_SHIFT, 0, 0), +}; + + +static const struct lgm_clk_ddiv_data lgm_ddiv_clks[] = { + LGM_DDIV(LGM_CLK_CML, "dd_cml", "ljpll3", 0, + PLL_DIV(CGU_LJPLL3_CFG0), 0, PLL_DDIV_WIDTH, + 3, PLL_DDIV_WIDTH, 24, 1, 29, 0), + LGM_DDIV(LGM_CLK_SERDES, "dd_serdes", "ljpll3", 0, + PLL_DIV(CGU_LJPLL3_CFG0), 6, PLL_DDIV_WIDTH, + 9, PLL_DDIV_WIDTH, 25, 1, 28, 0), + LGM_DDIV(LGM_CLK_POOL, "dd_pool", "ljpll3", 0, + PLL_DIV(CGU_LJPLL3_CFG0), 12, PLL_DDIV_WIDTH, + 15, PLL_DDIV_WIDTH, 26, 1, 28, 0), + LGM_DDIV(LGM_CLK_PTP, "dd_ptp", "ljpll3", 0, + PLL_DIV(CGU_LJPLL3_CFG0), 18, PLL_DDIV_WIDTH, + 21, PLL_DDIV_WIDTH, 27, 1, 28, 0), + LGM_DDIV(LGM_CLK_PCIE, "dd_pcie", "ljpll4", 0, + PLL_DIV(CGU_LJPLL4_CFG0), 0, PLL_DDIV_WIDTH, + 3, PLL_DDIV_WIDTH, 24, 1, 29, 0), +}; + +static int lgm_cgu_probe(struct platform_device *pdev) +{ + struct lgm_clk_provider *ctx; + struct device *dev = &pdev->dev; + struct device_node *np = dev->of_node; + int ret; + + ctx = devm_kzalloc(dev, struct_size(ctx, clk_data.hws, CLK_NR_CLKS), + GFP_KERNEL); + if (!ctx) + return -ENOMEM; + + ctx->clk_data.num = CLK_NR_CLKS; + + ctx->membase = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(ctx->membase)) + return PTR_ERR(ctx->membase); + + ctx->np = np; + ctx->dev = dev; + spin_lock_init(&ctx->lock); + + ret = lgm_clk_register_plls(ctx, lgm_pll_clks, + ARRAY_SIZE(lgm_pll_clks)); + if (ret) + return ret; + + ret = lgm_clk_register_branches(ctx, lgm_branch_clks, + ARRAY_SIZE(lgm_branch_clks)); + if (ret) + return ret; + + ret = lgm_clk_register_ddiv(ctx, lgm_ddiv_clks, + ARRAY_SIZE(lgm_ddiv_clks)); + if (ret) + return ret; + + return devm_of_clk_add_hw_provider(dev, of_clk_hw_onecell_get, + &ctx->clk_data); +} + +static const struct of_device_id of_lgm_cgu_match[] = { + { .compatible = "intel,cgu-lgm" }, + {} +}; + +static struct platform_driver lgm_cgu_driver = { + .probe = lgm_cgu_probe, + .driver = { + .name = "cgu-lgm", + .of_match_table = of_lgm_cgu_match, + }, +}; +builtin_platform_driver(lgm_cgu_driver); -- GitLab From 3aff9b5492475621e13ca83bdbf3dc8b823fc5a3 Mon Sep 17 00:00:00 2001 From: Chunyan Zhang Date: Tue, 19 May 2020 11:00:35 +0800 Subject: [PATCH 0709/1971] clk: sprd: mark the local clock symbols static There's a few pll gate clocks which were not marked with static, and those clock are used only in the current file, so add static key word for them. Fixes: 0e4b8a2349f3 ("clk: sprd: add clocks support for SC9863A") Signed-off-by: Chunyan Zhang Link: https://lkml.kernel.org/r/20200519030036.1785-1-zhang.lyra@gmail.com Reviewed-by: Baolin Wang Signed-off-by: Stephen Boyd --- drivers/clk/sprd/sc9863a-clk.c | 32 ++++++++++++++++---------------- 1 file changed, 16 insertions(+), 16 deletions(-) diff --git a/drivers/clk/sprd/sc9863a-clk.c b/drivers/clk/sprd/sc9863a-clk.c index a0631f7756cfc..5dec0faa62392 100644 --- a/drivers/clk/sprd/sc9863a-clk.c +++ b/drivers/clk/sprd/sc9863a-clk.c @@ -23,22 +23,22 @@ #include "pll.h" /* mpll*_gate clocks control cpu cores, they were enabled by default */ -SPRD_PLL_SC_GATE_CLK_FW_NAME(mpll0_gate, "mpll0-gate", "ext-26m", 0x94, - 0x1000, BIT(0), CLK_IGNORE_UNUSED, 0, 240); -SPRD_PLL_SC_GATE_CLK_FW_NAME(dpll0_gate, "dpll0-gate", "ext-26m", 0x98, - 0x1000, BIT(0), 0, 0, 240); -SPRD_PLL_SC_GATE_CLK_FW_NAME(lpll_gate, "lpll-gate", "ext-26m", 0x9c, - 0x1000, BIT(0), 0, 0, 240); -SPRD_PLL_SC_GATE_CLK_FW_NAME(gpll_gate, "gpll-gate", "ext-26m", 0xa8, - 0x1000, BIT(0), 0, 0, 240); -SPRD_PLL_SC_GATE_CLK_FW_NAME(dpll1_gate, "dpll1-gate", "ext-26m", 0x1dc, - 0x1000, BIT(0), 0, 0, 240); -SPRD_PLL_SC_GATE_CLK_FW_NAME(mpll1_gate, "mpll1-gate", "ext-26m", 0x1e0, - 0x1000, BIT(0), CLK_IGNORE_UNUSED, 0, 240); -SPRD_PLL_SC_GATE_CLK_FW_NAME(mpll2_gate, "mpll2-gate", "ext-26m", 0x1e4, - 0x1000, BIT(0), CLK_IGNORE_UNUSED, 0, 240); -SPRD_PLL_SC_GATE_CLK_FW_NAME(isppll_gate, "isppll-gate", "ext-26m", 0x1e8, - 0x1000, BIT(0), 0, 0, 240); +static SPRD_PLL_SC_GATE_CLK_FW_NAME(mpll0_gate, "mpll0-gate", "ext-26m", 0x94, + 0x1000, BIT(0), CLK_IGNORE_UNUSED, 0, 240); +static SPRD_PLL_SC_GATE_CLK_FW_NAME(dpll0_gate, "dpll0-gate", "ext-26m", 0x98, + 0x1000, BIT(0), 0, 0, 240); +static SPRD_PLL_SC_GATE_CLK_FW_NAME(lpll_gate, "lpll-gate", "ext-26m", 0x9c, + 0x1000, BIT(0), 0, 0, 240); +static SPRD_PLL_SC_GATE_CLK_FW_NAME(gpll_gate, "gpll-gate", "ext-26m", 0xa8, + 0x1000, BIT(0), 0, 0, 240); +static SPRD_PLL_SC_GATE_CLK_FW_NAME(dpll1_gate, "dpll1-gate", "ext-26m", 0x1dc, + 0x1000, BIT(0), 0, 0, 240); +static SPRD_PLL_SC_GATE_CLK_FW_NAME(mpll1_gate, "mpll1-gate", "ext-26m", 0x1e0, + 0x1000, BIT(0), CLK_IGNORE_UNUSED, 0, 240); +static SPRD_PLL_SC_GATE_CLK_FW_NAME(mpll2_gate, "mpll2-gate", "ext-26m", 0x1e4, + 0x1000, BIT(0), CLK_IGNORE_UNUSED, 0, 240); +static SPRD_PLL_SC_GATE_CLK_FW_NAME(isppll_gate, "isppll-gate", "ext-26m", + 0x1e8, 0x1000, BIT(0), 0, 0, 240); static struct sprd_clk_common *sc9863a_pmu_gate_clks[] = { /* address base is 0x402b0000 */ -- GitLab From c2f30986d418f26abefc2eec90ebf06716c970d2 Mon Sep 17 00:00:00 2001 From: Chunyan Zhang Date: Tue, 19 May 2020 11:00:36 +0800 Subject: [PATCH 0710/1971] clk: sprd: return correct type of value for _sprd_pll_recalc_rate The function _sprd_pll_recalc_rate() defines return value to unsigned long, but it would return a negative value when malloc fail, changing to return its parent_rate makes more sense, since if the callback .recalc_rate() is not set, the framework returns the parent_rate as well. Fixes: 3e37b005580b ("clk: sprd: add adjustable pll support") Signed-off-by: Chunyan Zhang Link: https://lkml.kernel.org/r/20200519030036.1785-2-zhang.lyra@gmail.com Reviewed-by: Baolin Wang Signed-off-by: Stephen Boyd --- drivers/clk/sprd/pll.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/clk/sprd/pll.c b/drivers/clk/sprd/pll.c index 15791484388fa..13a322b2535ac 100644 --- a/drivers/clk/sprd/pll.c +++ b/drivers/clk/sprd/pll.c @@ -106,7 +106,7 @@ static unsigned long _sprd_pll_recalc_rate(const struct sprd_pll *pll, cfg = kcalloc(regs_num, sizeof(*cfg), GFP_KERNEL); if (!cfg) - return -ENOMEM; + return parent_rate; for (i = 0; i < regs_num; i++) cfg[i] = sprd_pll_read(pll, i); -- GitLab From 99e107439eea0dcd993f6d92f3db530772979861 Mon Sep 17 00:00:00 2001 From: Codrin Ciubotariu Date: Fri, 15 May 2020 17:27:20 +0300 Subject: [PATCH 0711/1971] clk: at91: Add peripheral clock for PTC PMC generates the peripheral clock for the PTC. Signed-off-by: Codrin Ciubotariu Link: https://lkml.kernel.org/r/20200515142720.290206-1-codrin.ciubotariu@microchip.com Signed-off-by: Stephen Boyd --- drivers/clk/at91/sama5d2.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/clk/at91/sama5d2.c b/drivers/clk/at91/sama5d2.c index ff7e3f727082e..73b287fbecb8c 100644 --- a/drivers/clk/at91/sama5d2.c +++ b/drivers/clk/at91/sama5d2.c @@ -89,6 +89,7 @@ static const struct { { .n = "i2s1_clk", .id = 55, .r = { .min = 0, .max = 83000000 }, }, { .n = "can0_clk", .id = 56, .r = { .min = 0, .max = 83000000 }, }, { .n = "can1_clk", .id = 57, .r = { .min = 0, .max = 83000000 }, }, + { .n = "ptc_clk", .id = 58, .r = { .min = 0, .max = 83000000 }, }, { .n = "classd_clk", .id = 59, .r = { .min = 0, .max = 83000000 }, }, }; -- GitLab From f6363c437dc6e287259c05849286bf0faefb0fdd Mon Sep 17 00:00:00 2001 From: Claudiu Beznea Date: Sun, 3 May 2020 15:18:08 +0300 Subject: [PATCH 0712/1971] clk: at91: pmc: do not continue if compatible not located pmc_register_ops() is called for all AT91 devices. Return -ENODEV in case of_find_matching_node() returns NULL. Signed-off-by: Claudiu Beznea Link: https://lkml.kernel.org/r/1588508289-10140-1-git-send-email-claudiu.beznea@microchip.com Signed-off-by: Stephen Boyd --- drivers/clk/at91/pmc.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/clk/at91/pmc.c b/drivers/clk/at91/pmc.c index b71515acdec1f..bf0570e66fc1f 100644 --- a/drivers/clk/at91/pmc.c +++ b/drivers/clk/at91/pmc.c @@ -274,6 +274,8 @@ static int __init pmc_register_ops(void) struct device_node *np; np = of_find_matching_node(NULL, sama5d2_pmc_dt_ids); + if (!np) + return -ENODEV; pmcreg = device_node_to_regmap(np); if (IS_ERR(pmcreg)) -- GitLab From e218325ff9c31de191d4a617f8fddd3cf78b5550 Mon Sep 17 00:00:00 2001 From: Claudiu Beznea Date: Sun, 3 May 2020 15:18:09 +0300 Subject: [PATCH 0713/1971] clk: at91: pmc: decrement node's refcount of_find_matching_node() increment node's refcount. Call of_node_put() to decrement it after it was used. Signed-off-by: Claudiu Beznea Link: https://lkml.kernel.org/r/1588508289-10140-2-git-send-email-claudiu.beznea@microchip.com Signed-off-by: Stephen Boyd --- drivers/clk/at91/pmc.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/clk/at91/pmc.c b/drivers/clk/at91/pmc.c index bf0570e66fc1f..e164069c81bd9 100644 --- a/drivers/clk/at91/pmc.c +++ b/drivers/clk/at91/pmc.c @@ -278,6 +278,7 @@ static int __init pmc_register_ops(void) return -ENODEV; pmcreg = device_node_to_regmap(np); + of_node_put(np); if (IS_ERR(pmcreg)) return PTR_ERR(pmcreg); -- GitLab From 7425f246f725e51c8a64802851303d1e2c25abd1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Micha=C5=82=20Miros=C5=82aw?= Date: Tue, 5 May 2020 00:37:56 +0200 Subject: [PATCH 0714/1971] clk: at91: optimize pmc data allocation MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Alloc whole data structure in one block. This makes the code shorter, more efficient and easier to extend in following patch. Signed-off-by: Michał Mirosław Link: https://lkml.kernel.org/r/fc6f6d67b8cee0beace4a9d9cca7431e5efa769d.1588630999.git.mirq-linux@rere.qmqm.pl Acked-by: Alexandre Belloni Signed-off-by: Stephen Boyd --- drivers/clk/at91/at91rm9200.c | 2 +- drivers/clk/at91/at91sam9260.c | 2 +- drivers/clk/at91/at91sam9g45.c | 2 +- drivers/clk/at91/at91sam9n12.c | 2 +- drivers/clk/at91/at91sam9rl.c | 2 +- drivers/clk/at91/at91sam9x5.c | 2 +- drivers/clk/at91/pmc.c | 34 ++++++++-------------------------- drivers/clk/at91/pmc.h | 3 ++- drivers/clk/at91/sam9x60.c | 2 +- drivers/clk/at91/sama5d2.c | 2 +- drivers/clk/at91/sama5d3.c | 2 +- drivers/clk/at91/sama5d4.c | 2 +- 12 files changed, 20 insertions(+), 37 deletions(-) diff --git a/drivers/clk/at91/at91rm9200.c b/drivers/clk/at91/at91rm9200.c index c44a431b6c972..6f4e1151553d2 100644 --- a/drivers/clk/at91/at91rm9200.c +++ b/drivers/clk/at91/at91rm9200.c @@ -187,7 +187,7 @@ static void __init at91rm9200_pmc_setup(struct device_node *np) return; err_free: - pmc_data_free(at91rm9200_pmc); + kfree(at91rm9200_pmc); } /* * While the TCB can be used as the clocksource, the system timer is most likely diff --git a/drivers/clk/at91/at91sam9260.c b/drivers/clk/at91/at91sam9260.c index a9d4234758d7d..946f03a09858c 100644 --- a/drivers/clk/at91/at91sam9260.c +++ b/drivers/clk/at91/at91sam9260.c @@ -462,7 +462,7 @@ static void __init at91sam926x_pmc_setup(struct device_node *np, return; err_free: - pmc_data_free(at91sam9260_pmc); + kfree(at91sam9260_pmc); } static void __init at91sam9260_pmc_setup(struct device_node *np) diff --git a/drivers/clk/at91/at91sam9g45.c b/drivers/clk/at91/at91sam9g45.c index 38a7d2d2df0c7..53e8252b8a63b 100644 --- a/drivers/clk/at91/at91sam9g45.c +++ b/drivers/clk/at91/at91sam9g45.c @@ -210,7 +210,7 @@ static void __init at91sam9g45_pmc_setup(struct device_node *np) return; err_free: - pmc_data_free(at91sam9g45_pmc); + kfree(at91sam9g45_pmc); } /* * The TCB is used as the clocksource so its clock is needed early. This means diff --git a/drivers/clk/at91/at91sam9n12.c b/drivers/clk/at91/at91sam9n12.c index 8bb39d2ba84b7..f3ae1cd3cb8d7 100644 --- a/drivers/clk/at91/at91sam9n12.c +++ b/drivers/clk/at91/at91sam9n12.c @@ -228,7 +228,7 @@ static void __init at91sam9n12_pmc_setup(struct device_node *np) return; err_free: - pmc_data_free(at91sam9n12_pmc); + kfree(at91sam9n12_pmc); } /* * The TCB is used as the clocksource so its clock is needed early. This means diff --git a/drivers/clk/at91/at91sam9rl.c b/drivers/clk/at91/at91sam9rl.c index 77fe83a73bf48..cc739d214ae34 100644 --- a/drivers/clk/at91/at91sam9rl.c +++ b/drivers/clk/at91/at91sam9rl.c @@ -166,6 +166,6 @@ static void __init at91sam9rl_pmc_setup(struct device_node *np) return; err_free: - pmc_data_free(at91sam9rl_pmc); + kfree(at91sam9rl_pmc); } CLK_OF_DECLARE_DRIVER(at91sam9rl_pmc, "atmel,at91sam9rl-pmc", at91sam9rl_pmc_setup); diff --git a/drivers/clk/at91/at91sam9x5.c b/drivers/clk/at91/at91sam9x5.c index 086cf0b4955c2..aac99d6995689 100644 --- a/drivers/clk/at91/at91sam9x5.c +++ b/drivers/clk/at91/at91sam9x5.c @@ -278,7 +278,7 @@ static void __init at91sam9x5_pmc_setup(struct device_node *np, return; err_free: - pmc_data_free(at91sam9x5_pmc); + kfree(at91sam9x5_pmc); } static void __init at91sam9g15_pmc_setup(struct device_node *np) diff --git a/drivers/clk/at91/pmc.c b/drivers/clk/at91/pmc.c index e164069c81bd9..ac8a76ca0266b 100644 --- a/drivers/clk/at91/pmc.c +++ b/drivers/clk/at91/pmc.c @@ -76,48 +76,30 @@ struct clk_hw *of_clk_hw_pmc_get(struct of_phandle_args *clkspec, void *data) return ERR_PTR(-EINVAL); } -void pmc_data_free(struct pmc_data *pmc_data) -{ - kfree(pmc_data->chws); - kfree(pmc_data->shws); - kfree(pmc_data->phws); - kfree(pmc_data->ghws); -} - struct pmc_data *pmc_data_allocate(unsigned int ncore, unsigned int nsystem, unsigned int nperiph, unsigned int ngck) { - struct pmc_data *pmc_data = kzalloc(sizeof(*pmc_data), GFP_KERNEL); + unsigned int num_clks = ncore + nsystem + nperiph + ngck; + struct pmc_data *pmc_data; + pmc_data = kzalloc(struct_size(pmc_data, hwtable, num_clks), + GFP_KERNEL); if (!pmc_data) return NULL; pmc_data->ncore = ncore; - pmc_data->chws = kcalloc(ncore, sizeof(struct clk_hw *), GFP_KERNEL); - if (!pmc_data->chws) - goto err; + pmc_data->chws = pmc_data->hwtable; pmc_data->nsystem = nsystem; - pmc_data->shws = kcalloc(nsystem, sizeof(struct clk_hw *), GFP_KERNEL); - if (!pmc_data->shws) - goto err; + pmc_data->shws = pmc_data->chws + ncore; pmc_data->nperiph = nperiph; - pmc_data->phws = kcalloc(nperiph, sizeof(struct clk_hw *), GFP_KERNEL); - if (!pmc_data->phws) - goto err; + pmc_data->phws = pmc_data->shws + nsystem; pmc_data->ngck = ngck; - pmc_data->ghws = kcalloc(ngck, sizeof(struct clk_hw *), GFP_KERNEL); - if (!pmc_data->ghws) - goto err; + pmc_data->ghws = pmc_data->phws + nperiph; return pmc_data; - -err: - pmc_data_free(pmc_data); - - return NULL; } #ifdef CONFIG_PM diff --git a/drivers/clk/at91/pmc.h b/drivers/clk/at91/pmc.h index 9b8db9cdcda53..fc3ef772b9d9f 100644 --- a/drivers/clk/at91/pmc.h +++ b/drivers/clk/at91/pmc.h @@ -24,6 +24,8 @@ struct pmc_data { struct clk_hw **phws; unsigned int ngck; struct clk_hw **ghws; + + struct clk_hw *hwtable[]; }; struct clk_range { @@ -95,7 +97,6 @@ struct clk_pcr_layout { #define nck(a) (a[ARRAY_SIZE(a) - 1].id + 1) struct pmc_data *pmc_data_allocate(unsigned int ncore, unsigned int nsystem, unsigned int nperiph, unsigned int ngck); -void pmc_data_free(struct pmc_data *pmc_data); int of_at91_get_clk_range(struct device_node *np, const char *propname, struct clk_range *range); diff --git a/drivers/clk/at91/sam9x60.c b/drivers/clk/at91/sam9x60.c index cc19e8fb83bec..a7d4f648db264 100644 --- a/drivers/clk/at91/sam9x60.c +++ b/drivers/clk/at91/sam9x60.c @@ -299,7 +299,7 @@ static void __init sam9x60_pmc_setup(struct device_node *np) return; err_free: - pmc_data_free(sam9x60_pmc); + kfree(sam9x60_pmc); } /* Some clks are used for a clocksource */ CLK_OF_DECLARE(sam9x60_pmc, "microchip,sam9x60-pmc", sam9x60_pmc_setup); diff --git a/drivers/clk/at91/sama5d2.c b/drivers/clk/at91/sama5d2.c index 73b287fbecb8c..a86b42e40aef8 100644 --- a/drivers/clk/at91/sama5d2.c +++ b/drivers/clk/at91/sama5d2.c @@ -351,6 +351,6 @@ static void __init sama5d2_pmc_setup(struct device_node *np) return; err_free: - pmc_data_free(sama5d2_pmc); + kfree(sama5d2_pmc); } CLK_OF_DECLARE_DRIVER(sama5d2_pmc, "atmel,sama5d2-pmc", sama5d2_pmc_setup); diff --git a/drivers/clk/at91/sama5d3.c b/drivers/clk/at91/sama5d3.c index 88506f909c080..914e6f225510b 100644 --- a/drivers/clk/at91/sama5d3.c +++ b/drivers/clk/at91/sama5d3.c @@ -231,7 +231,7 @@ static void __init sama5d3_pmc_setup(struct device_node *np) return; err_free: - pmc_data_free(sama5d3_pmc); + kfree(sama5d3_pmc); } /* * The TCB is used as the clocksource so its clock is needed early. This means diff --git a/drivers/clk/at91/sama5d4.c b/drivers/clk/at91/sama5d4.c index a6dee4a3b6e48..4ca9a46195006 100644 --- a/drivers/clk/at91/sama5d4.c +++ b/drivers/clk/at91/sama5d4.c @@ -267,6 +267,6 @@ static void __init sama5d4_pmc_setup(struct device_node *np) return; err_free: - pmc_data_free(sama5d4_pmc); + kfree(sama5d4_pmc); } CLK_OF_DECLARE_DRIVER(sama5d4_pmc, "atmel,sama5d4-pmc", sama5d4_pmc_setup); -- GitLab From 99767cd4406fd620d33fa7f820f50764453dc897 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Micha=C5=82=20Miros=C5=82aw?= Date: Tue, 5 May 2020 00:37:56 +0200 Subject: [PATCH 0715/1971] clk: at91: allow setting PCKx parent via DT MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This exposes PROGx clocks for use in assigned-clocks DeviceTree property for selecting PCKx parent clock. Signed-off-by: Michał Mirosław Link: https://lkml.kernel.org/r/0054532c00163ddf405dad658b32f0d7d97fcc8e.1588630999.git.mirq-linux@rere.qmqm.pl Acked-by: Alexandre Belloni Signed-off-by: Stephen Boyd --- drivers/clk/at91/at91rm9200.c | 4 +++- drivers/clk/at91/at91sam9260.c | 5 ++++- drivers/clk/at91/at91sam9g45.c | 4 +++- drivers/clk/at91/at91sam9n12.c | 4 +++- drivers/clk/at91/at91sam9rl.c | 4 +++- drivers/clk/at91/at91sam9x5.c | 4 +++- drivers/clk/at91/pmc.c | 12 ++++++++++-- drivers/clk/at91/pmc.h | 5 ++++- drivers/clk/at91/sam9x60.c | 4 +++- drivers/clk/at91/sama5d2.c | 4 +++- drivers/clk/at91/sama5d3.c | 4 +++- drivers/clk/at91/sama5d4.c | 4 +++- include/dt-bindings/clock/at91.h | 1 + 13 files changed, 46 insertions(+), 13 deletions(-) diff --git a/drivers/clk/at91/at91rm9200.c b/drivers/clk/at91/at91rm9200.c index 6f4e1151553d2..8da88e9a95d82 100644 --- a/drivers/clk/at91/at91rm9200.c +++ b/drivers/clk/at91/at91rm9200.c @@ -100,7 +100,7 @@ static void __init at91rm9200_pmc_setup(struct device_node *np) at91rm9200_pmc = pmc_data_allocate(PMC_MAIN + 1, nck(at91rm9200_systemck), - nck(at91rm9200_periphck), 0); + nck(at91rm9200_periphck), 0, 4); if (!at91rm9200_pmc) return; @@ -159,6 +159,8 @@ static void __init at91rm9200_pmc_setup(struct device_node *np) &at91rm9200_programmable_layout); if (IS_ERR(hw)) goto err_free; + + at91rm9200_pmc->pchws[i] = hw; } for (i = 0; i < ARRAY_SIZE(at91rm9200_systemck); i++) { diff --git a/drivers/clk/at91/at91sam9260.c b/drivers/clk/at91/at91sam9260.c index 946f03a09858c..7e5ff252fffce 100644 --- a/drivers/clk/at91/at91sam9260.c +++ b/drivers/clk/at91/at91sam9260.c @@ -354,7 +354,8 @@ static void __init at91sam926x_pmc_setup(struct device_node *np, at91sam9260_pmc = pmc_data_allocate(PMC_MAIN + 1, ndck(data->sck, data->num_sck), - ndck(data->pck, data->num_pck), 0); + ndck(data->pck, data->num_pck), + 0, data->num_progck); if (!at91sam9260_pmc) return; @@ -434,6 +435,8 @@ static void __init at91sam926x_pmc_setup(struct device_node *np, &at91rm9200_programmable_layout); if (IS_ERR(hw)) goto err_free; + + at91sam9260_pmc->pchws[i] = hw; } for (i = 0; i < data->num_sck; i++) { diff --git a/drivers/clk/at91/at91sam9g45.c b/drivers/clk/at91/at91sam9g45.c index 53e8252b8a63b..5d18eb04c2184 100644 --- a/drivers/clk/at91/at91sam9g45.c +++ b/drivers/clk/at91/at91sam9g45.c @@ -117,7 +117,7 @@ static void __init at91sam9g45_pmc_setup(struct device_node *np) at91sam9g45_pmc = pmc_data_allocate(PMC_MAIN + 1, nck(at91sam9g45_systemck), - nck(at91sam9g45_periphck), 0); + nck(at91sam9g45_periphck), 0, 2); if (!at91sam9g45_pmc) return; @@ -182,6 +182,8 @@ static void __init at91sam9g45_pmc_setup(struct device_node *np) &at91sam9g45_programmable_layout); if (IS_ERR(hw)) goto err_free; + + at91sam9g45_pmc->pchws[i] = hw; } for (i = 0; i < ARRAY_SIZE(at91sam9g45_systemck); i++) { diff --git a/drivers/clk/at91/at91sam9n12.c b/drivers/clk/at91/at91sam9n12.c index f3ae1cd3cb8d7..3a2564c2f724d 100644 --- a/drivers/clk/at91/at91sam9n12.c +++ b/drivers/clk/at91/at91sam9n12.c @@ -129,7 +129,7 @@ static void __init at91sam9n12_pmc_setup(struct device_node *np) return; at91sam9n12_pmc = pmc_data_allocate(PMC_MAIN + 1, - nck(at91sam9n12_systemck), 31, 0); + nck(at91sam9n12_systemck), 31, 0, 2); if (!at91sam9n12_pmc) return; @@ -198,6 +198,8 @@ static void __init at91sam9n12_pmc_setup(struct device_node *np) &at91sam9x5_programmable_layout); if (IS_ERR(hw)) goto err_free; + + at91sam9n12_pmc->pchws[i] = hw; } for (i = 0; i < ARRAY_SIZE(at91sam9n12_systemck); i++) { diff --git a/drivers/clk/at91/at91sam9rl.c b/drivers/clk/at91/at91sam9rl.c index cc739d214ae34..bcf07f6a0e0ee 100644 --- a/drivers/clk/at91/at91sam9rl.c +++ b/drivers/clk/at91/at91sam9rl.c @@ -89,7 +89,7 @@ static void __init at91sam9rl_pmc_setup(struct device_node *np) at91sam9rl_pmc = pmc_data_allocate(PMC_MAIN + 1, nck(at91sam9rl_systemck), - nck(at91sam9rl_periphck), 0); + nck(at91sam9rl_periphck), 0, 2); if (!at91sam9rl_pmc) return; @@ -138,6 +138,8 @@ static void __init at91sam9rl_pmc_setup(struct device_node *np) &at91rm9200_programmable_layout); if (IS_ERR(hw)) goto err_free; + + at91sam9rl_pmc->pchws[i] = hw; } for (i = 0; i < ARRAY_SIZE(at91sam9rl_systemck); i++) { diff --git a/drivers/clk/at91/at91sam9x5.c b/drivers/clk/at91/at91sam9x5.c index aac99d6995689..f13756b407e2c 100644 --- a/drivers/clk/at91/at91sam9x5.c +++ b/drivers/clk/at91/at91sam9x5.c @@ -151,7 +151,7 @@ static void __init at91sam9x5_pmc_setup(struct device_node *np, return; at91sam9x5_pmc = pmc_data_allocate(PMC_MAIN + 1, - nck(at91sam9x5_systemck), 31, 0); + nck(at91sam9x5_systemck), 31, 0, 2); if (!at91sam9x5_pmc) return; @@ -227,6 +227,8 @@ static void __init at91sam9x5_pmc_setup(struct device_node *np, &at91sam9x5_programmable_layout); if (IS_ERR(hw)) goto err_free; + + at91sam9x5_pmc->pchws[i] = hw; } for (i = 0; i < ARRAY_SIZE(at91sam9x5_systemck); i++) { diff --git a/drivers/clk/at91/pmc.c b/drivers/clk/at91/pmc.c index ac8a76ca0266b..20ee9dccee787 100644 --- a/drivers/clk/at91/pmc.c +++ b/drivers/clk/at91/pmc.c @@ -67,6 +67,10 @@ struct clk_hw *of_clk_hw_pmc_get(struct of_phandle_args *clkspec, void *data) if (idx < pmc_data->ngck) return pmc_data->ghws[idx]; break; + case PMC_TYPE_PROGRAMMABLE: + if (idx < pmc_data->npck) + return pmc_data->pchws[idx]; + break; default: break; } @@ -77,9 +81,10 @@ struct clk_hw *of_clk_hw_pmc_get(struct of_phandle_args *clkspec, void *data) } struct pmc_data *pmc_data_allocate(unsigned int ncore, unsigned int nsystem, - unsigned int nperiph, unsigned int ngck) + unsigned int nperiph, unsigned int ngck, + unsigned int npck) { - unsigned int num_clks = ncore + nsystem + nperiph + ngck; + unsigned int num_clks = ncore + nsystem + nperiph + ngck + npck; struct pmc_data *pmc_data; pmc_data = kzalloc(struct_size(pmc_data, hwtable, num_clks), @@ -99,6 +104,9 @@ struct pmc_data *pmc_data_allocate(unsigned int ncore, unsigned int nsystem, pmc_data->ngck = ngck; pmc_data->ghws = pmc_data->phws + nperiph; + pmc_data->npck = npck; + pmc_data->pchws = pmc_data->ghws + ngck; + return pmc_data; } diff --git a/drivers/clk/at91/pmc.h b/drivers/clk/at91/pmc.h index fc3ef772b9d9f..df616f2937e70 100644 --- a/drivers/clk/at91/pmc.h +++ b/drivers/clk/at91/pmc.h @@ -24,6 +24,8 @@ struct pmc_data { struct clk_hw **phws; unsigned int ngck; struct clk_hw **ghws; + unsigned int npck; + struct clk_hw **pchws; struct clk_hw *hwtable[]; }; @@ -96,7 +98,8 @@ struct clk_pcr_layout { #define ndck(a, s) (a[s - 1].id + 1) #define nck(a) (a[ARRAY_SIZE(a) - 1].id + 1) struct pmc_data *pmc_data_allocate(unsigned int ncore, unsigned int nsystem, - unsigned int nperiph, unsigned int ngck); + unsigned int nperiph, unsigned int ngck, + unsigned int npck); int of_at91_get_clk_range(struct device_node *np, const char *propname, struct clk_range *range); diff --git a/drivers/clk/at91/sam9x60.c b/drivers/clk/at91/sam9x60.c index a7d4f648db264..db14e0427c7fa 100644 --- a/drivers/clk/at91/sam9x60.c +++ b/drivers/clk/at91/sam9x60.c @@ -185,7 +185,7 @@ static void __init sam9x60_pmc_setup(struct device_node *np) sam9x60_pmc = pmc_data_allocate(PMC_MAIN + 1, nck(sam9x60_systemck), nck(sam9x60_periphck), - nck(sam9x60_gck)); + nck(sam9x60_gck), 8); if (!sam9x60_pmc) return; @@ -255,6 +255,8 @@ static void __init sam9x60_pmc_setup(struct device_node *np) &sam9x60_programmable_layout); if (IS_ERR(hw)) goto err_free; + + sam9x60_pmc->pchws[i] = hw; } for (i = 0; i < ARRAY_SIZE(sam9x60_systemck); i++) { diff --git a/drivers/clk/at91/sama5d2.c b/drivers/clk/at91/sama5d2.c index a86b42e40aef8..fa9f6126c1587 100644 --- a/drivers/clk/at91/sama5d2.c +++ b/drivers/clk/at91/sama5d2.c @@ -170,7 +170,7 @@ static void __init sama5d2_pmc_setup(struct device_node *np) sama5d2_pmc = pmc_data_allocate(PMC_I2S1_MUX + 1, nck(sama5d2_systemck), nck(sama5d2_periph32ck), - nck(sama5d2_gck)); + nck(sama5d2_gck), 3); if (!sama5d2_pmc) return; @@ -268,6 +268,8 @@ static void __init sama5d2_pmc_setup(struct device_node *np) &sama5d2_programmable_layout); if (IS_ERR(hw)) goto err_free; + + sama5d2_pmc->pchws[i] = hw; } for (i = 0; i < ARRAY_SIZE(sama5d2_systemck); i++) { diff --git a/drivers/clk/at91/sama5d3.c b/drivers/clk/at91/sama5d3.c index 914e6f225510b..507eef6797f1b 100644 --- a/drivers/clk/at91/sama5d3.c +++ b/drivers/clk/at91/sama5d3.c @@ -127,7 +127,7 @@ static void __init sama5d3_pmc_setup(struct device_node *np) sama5d3_pmc = pmc_data_allocate(PMC_MAIN + 1, nck(sama5d3_systemck), - nck(sama5d3_periphck), 0); + nck(sama5d3_periphck), 0, 3); if (!sama5d3_pmc) return; @@ -201,6 +201,8 @@ static void __init sama5d3_pmc_setup(struct device_node *np) &at91sam9x5_programmable_layout); if (IS_ERR(hw)) goto err_free; + + sama5d3_pmc->pchws[i] = hw; } for (i = 0; i < ARRAY_SIZE(sama5d3_systemck); i++) { diff --git a/drivers/clk/at91/sama5d4.c b/drivers/clk/at91/sama5d4.c index 4ca9a46195006..80692902b4e41 100644 --- a/drivers/clk/at91/sama5d4.c +++ b/drivers/clk/at91/sama5d4.c @@ -142,7 +142,7 @@ static void __init sama5d4_pmc_setup(struct device_node *np) sama5d4_pmc = pmc_data_allocate(PMC_MCK2 + 1, nck(sama5d4_systemck), - nck(sama5d4_periph32ck), 0); + nck(sama5d4_periph32ck), 0, 3); if (!sama5d4_pmc) return; @@ -224,6 +224,8 @@ static void __init sama5d4_pmc_setup(struct device_node *np) &at91sam9x5_programmable_layout); if (IS_ERR(hw)) goto err_free; + + sama5d4_pmc->pchws[i] = hw; } for (i = 0; i < ARRAY_SIZE(sama5d4_systemck); i++) { diff --git a/include/dt-bindings/clock/at91.h b/include/dt-bindings/clock/at91.h index 38b5554153c80..c3f4aa6a2d296 100644 --- a/include/dt-bindings/clock/at91.h +++ b/include/dt-bindings/clock/at91.h @@ -12,6 +12,7 @@ #define PMC_TYPE_SYSTEM 1 #define PMC_TYPE_PERIPHERAL 2 #define PMC_TYPE_GCK 3 +#define PMC_TYPE_PROGRAMMABLE 4 #define PMC_SLOW 0 #define PMC_MCK 1 -- GitLab From 03a1ee1dad0e39390ca397fff0cf84a3b1de1beb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Micha=C5=82=20Miros=C5=82aw?= Date: Tue, 5 May 2020 00:37:57 +0200 Subject: [PATCH 0716/1971] clk: at91: allow setting all PMC clock parents via DT MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit We need to have clocks accessible via phandle to select them as peripheral clock parent using assigned-clock-parents in DT. Add support for PLLACK/PLLBCK/AUDIOPLLCK clocks where available. Signed-off-by: Michał Mirosław Acked-by: Alexandre Belloni Link: https://lkml.kernel.org/r/fa39cc10dab8341ea4bc2b7152be9217b2cd34a5.1588630999.git.mirq-linux@rere.qmqm.pl Signed-off-by: Stephen Boyd --- drivers/clk/at91/at91rm9200.c | 6 +++++- drivers/clk/at91/at91sam9260.c | 6 +++++- drivers/clk/at91/at91sam9g45.c | 4 +++- drivers/clk/at91/at91sam9n12.c | 6 +++++- drivers/clk/at91/at91sam9rl.c | 4 +++- drivers/clk/at91/at91sam9x5.c | 4 +++- drivers/clk/at91/sam9x60.c | 4 +++- drivers/clk/at91/sama5d2.c | 6 +++++- drivers/clk/at91/sama5d3.c | 4 +++- drivers/clk/at91/sama5d4.c | 4 +++- include/dt-bindings/clock/at91.h | 3 +++ 11 files changed, 41 insertions(+), 10 deletions(-) diff --git a/drivers/clk/at91/at91rm9200.c b/drivers/clk/at91/at91rm9200.c index 8da88e9a95d82..38bdb49813158 100644 --- a/drivers/clk/at91/at91rm9200.c +++ b/drivers/clk/at91/at91rm9200.c @@ -98,7 +98,7 @@ static void __init at91rm9200_pmc_setup(struct device_node *np) if (IS_ERR(regmap)) return; - at91rm9200_pmc = pmc_data_allocate(PMC_MAIN + 1, + at91rm9200_pmc = pmc_data_allocate(PMC_PLLBCK + 1, nck(at91rm9200_systemck), nck(at91rm9200_periphck), 0, 4); if (!at91rm9200_pmc) @@ -123,12 +123,16 @@ static void __init at91rm9200_pmc_setup(struct device_node *np) if (IS_ERR(hw)) goto err_free; + at91rm9200_pmc->chws[PMC_PLLACK] = hw; + hw = at91_clk_register_pll(regmap, "pllbck", "mainck", 1, &at91rm9200_pll_layout, &rm9200_pll_characteristics); if (IS_ERR(hw)) goto err_free; + at91rm9200_pmc->chws[PMC_PLLBCK] = hw; + parent_names[0] = slowxtal_name; parent_names[1] = "mainck"; parent_names[2] = "pllack"; diff --git a/drivers/clk/at91/at91sam9260.c b/drivers/clk/at91/at91sam9260.c index 7e5ff252fffce..6d0723aa8b138 100644 --- a/drivers/clk/at91/at91sam9260.c +++ b/drivers/clk/at91/at91sam9260.c @@ -352,7 +352,7 @@ static void __init at91sam926x_pmc_setup(struct device_node *np, if (IS_ERR(regmap)) return; - at91sam9260_pmc = pmc_data_allocate(PMC_MAIN + 1, + at91sam9260_pmc = pmc_data_allocate(PMC_PLLBCK + 1, ndck(data->sck, data->num_sck), ndck(data->pck, data->num_pck), 0, data->num_progck); @@ -399,12 +399,16 @@ static void __init at91sam926x_pmc_setup(struct device_node *np, if (IS_ERR(hw)) goto err_free; + at91sam9260_pmc->chws[PMC_PLLACK] = hw; + hw = at91_clk_register_pll(regmap, "pllbck", "mainck", 1, data->pllb_layout, data->pllb_characteristics); if (IS_ERR(hw)) goto err_free; + at91sam9260_pmc->chws[PMC_PLLBCK] = hw; + parent_names[0] = slck_name; parent_names[1] = "mainck"; parent_names[2] = "pllack"; diff --git a/drivers/clk/at91/at91sam9g45.c b/drivers/clk/at91/at91sam9g45.c index 5d18eb04c2184..9873b583c2604 100644 --- a/drivers/clk/at91/at91sam9g45.c +++ b/drivers/clk/at91/at91sam9g45.c @@ -115,7 +115,7 @@ static void __init at91sam9g45_pmc_setup(struct device_node *np) if (IS_ERR(regmap)) return; - at91sam9g45_pmc = pmc_data_allocate(PMC_MAIN + 1, + at91sam9g45_pmc = pmc_data_allocate(PMC_PLLACK + 1, nck(at91sam9g45_systemck), nck(at91sam9g45_periphck), 0, 2); if (!at91sam9g45_pmc) @@ -143,6 +143,8 @@ static void __init at91sam9g45_pmc_setup(struct device_node *np) if (IS_ERR(hw)) goto err_free; + at91sam9g45_pmc->chws[PMC_PLLACK] = hw; + hw = at91_clk_register_utmi(regmap, NULL, "utmick", "mainck"); if (IS_ERR(hw)) goto err_free; diff --git a/drivers/clk/at91/at91sam9n12.c b/drivers/clk/at91/at91sam9n12.c index 3a2564c2f724d..630dc5d87171a 100644 --- a/drivers/clk/at91/at91sam9n12.c +++ b/drivers/clk/at91/at91sam9n12.c @@ -128,7 +128,7 @@ static void __init at91sam9n12_pmc_setup(struct device_node *np) if (IS_ERR(regmap)) return; - at91sam9n12_pmc = pmc_data_allocate(PMC_MAIN + 1, + at91sam9n12_pmc = pmc_data_allocate(PMC_PLLBCK + 1, nck(at91sam9n12_systemck), 31, 0, 2); if (!at91sam9n12_pmc) return; @@ -162,11 +162,15 @@ static void __init at91sam9n12_pmc_setup(struct device_node *np) if (IS_ERR(hw)) goto err_free; + at91sam9n12_pmc->chws[PMC_PLLACK] = hw; + hw = at91_clk_register_pll(regmap, "pllbck", "mainck", 1, &at91rm9200_pll_layout, &pllb_characteristics); if (IS_ERR(hw)) goto err_free; + at91sam9n12_pmc->chws[PMC_PLLBCK] = hw; + parent_names[0] = slck_name; parent_names[1] = "mainck"; parent_names[2] = "plladivck"; diff --git a/drivers/clk/at91/at91sam9rl.c b/drivers/clk/at91/at91sam9rl.c index bcf07f6a0e0ee..0d1cc44b056ff 100644 --- a/drivers/clk/at91/at91sam9rl.c +++ b/drivers/clk/at91/at91sam9rl.c @@ -87,7 +87,7 @@ static void __init at91sam9rl_pmc_setup(struct device_node *np) if (IS_ERR(regmap)) return; - at91sam9rl_pmc = pmc_data_allocate(PMC_MAIN + 1, + at91sam9rl_pmc = pmc_data_allocate(PMC_PLLACK + 1, nck(at91sam9rl_systemck), nck(at91sam9rl_periphck), 0, 2); if (!at91sam9rl_pmc) @@ -105,6 +105,8 @@ static void __init at91sam9rl_pmc_setup(struct device_node *np) if (IS_ERR(hw)) goto err_free; + at91sam9rl_pmc->chws[PMC_PLLACK] = hw; + hw = at91_clk_register_utmi(regmap, NULL, "utmick", "mainck"); if (IS_ERR(hw)) goto err_free; diff --git a/drivers/clk/at91/at91sam9x5.c b/drivers/clk/at91/at91sam9x5.c index f13756b407e2c..0ce3da0802876 100644 --- a/drivers/clk/at91/at91sam9x5.c +++ b/drivers/clk/at91/at91sam9x5.c @@ -150,7 +150,7 @@ static void __init at91sam9x5_pmc_setup(struct device_node *np, if (IS_ERR(regmap)) return; - at91sam9x5_pmc = pmc_data_allocate(PMC_MAIN + 1, + at91sam9x5_pmc = pmc_data_allocate(PMC_PLLACK + 1, nck(at91sam9x5_systemck), 31, 0, 2); if (!at91sam9x5_pmc) return; @@ -184,6 +184,8 @@ static void __init at91sam9x5_pmc_setup(struct device_node *np, if (IS_ERR(hw)) goto err_free; + at91sam9x5_pmc->chws[PMC_PLLACK] = hw; + hw = at91_clk_register_utmi(regmap, NULL, "utmick", "mainck"); if (IS_ERR(hw)) goto err_free; diff --git a/drivers/clk/at91/sam9x60.c b/drivers/clk/at91/sam9x60.c index db14e0427c7fa..3e20aa68259fd 100644 --- a/drivers/clk/at91/sam9x60.c +++ b/drivers/clk/at91/sam9x60.c @@ -182,7 +182,7 @@ static void __init sam9x60_pmc_setup(struct device_node *np) if (IS_ERR(regmap)) return; - sam9x60_pmc = pmc_data_allocate(PMC_MAIN + 1, + sam9x60_pmc = pmc_data_allocate(PMC_PLLACK + 1, nck(sam9x60_systemck), nck(sam9x60_periphck), nck(sam9x60_gck), 8); @@ -214,6 +214,8 @@ static void __init sam9x60_pmc_setup(struct device_node *np) if (IS_ERR(hw)) goto err_free; + sam9x60_pmc->chws[PMC_PLLACK] = hw; + hw = sam9x60_clk_register_pll(regmap, &pmc_pll_lock, "upllck", "main_osc", 1, &upll_characteristics); if (IS_ERR(hw)) diff --git a/drivers/clk/at91/sama5d2.c b/drivers/clk/at91/sama5d2.c index fa9f6126c1587..d69421d71daf0 100644 --- a/drivers/clk/at91/sama5d2.c +++ b/drivers/clk/at91/sama5d2.c @@ -167,7 +167,7 @@ static void __init sama5d2_pmc_setup(struct device_node *np) if (IS_ERR(regmap)) return; - sama5d2_pmc = pmc_data_allocate(PMC_I2S1_MUX + 1, + sama5d2_pmc = pmc_data_allocate(PMC_AUDIOPLLCK + 1, nck(sama5d2_systemck), nck(sama5d2_periph32ck), nck(sama5d2_gck), 3); @@ -203,6 +203,8 @@ static void __init sama5d2_pmc_setup(struct device_node *np) if (IS_ERR(hw)) goto err_free; + sama5d2_pmc->chws[PMC_PLLACK] = hw; + hw = at91_clk_register_audio_pll_frac(regmap, "audiopll_fracck", "mainck"); if (IS_ERR(hw)) @@ -218,6 +220,8 @@ static void __init sama5d2_pmc_setup(struct device_node *np) if (IS_ERR(hw)) goto err_free; + sama5d2_pmc->chws[PMC_AUDIOPLLCK] = hw; + regmap_sfr = syscon_regmap_lookup_by_compatible("atmel,sama5d2-sfr"); if (IS_ERR(regmap_sfr)) regmap_sfr = NULL; diff --git a/drivers/clk/at91/sama5d3.c b/drivers/clk/at91/sama5d3.c index 507eef6797f1b..5e4e44dd4c37c 100644 --- a/drivers/clk/at91/sama5d3.c +++ b/drivers/clk/at91/sama5d3.c @@ -125,7 +125,7 @@ static void __init sama5d3_pmc_setup(struct device_node *np) if (IS_ERR(regmap)) return; - sama5d3_pmc = pmc_data_allocate(PMC_MAIN + 1, + sama5d3_pmc = pmc_data_allocate(PMC_PLLACK + 1, nck(sama5d3_systemck), nck(sama5d3_periphck), 0, 3); if (!sama5d3_pmc) @@ -158,6 +158,8 @@ static void __init sama5d3_pmc_setup(struct device_node *np) if (IS_ERR(hw)) goto err_free; + sama5d3_pmc->chws[PMC_PLLACK] = hw; + hw = at91_clk_register_utmi(regmap, NULL, "utmick", "mainck"); if (IS_ERR(hw)) goto err_free; diff --git a/drivers/clk/at91/sama5d4.c b/drivers/clk/at91/sama5d4.c index 80692902b4e41..662ff5fa6e986 100644 --- a/drivers/clk/at91/sama5d4.c +++ b/drivers/clk/at91/sama5d4.c @@ -140,7 +140,7 @@ static void __init sama5d4_pmc_setup(struct device_node *np) if (IS_ERR(regmap)) return; - sama5d4_pmc = pmc_data_allocate(PMC_MCK2 + 1, + sama5d4_pmc = pmc_data_allocate(PMC_PLLACK + 1, nck(sama5d4_systemck), nck(sama5d4_periph32ck), 0, 3); if (!sama5d4_pmc) @@ -173,6 +173,8 @@ static void __init sama5d4_pmc_setup(struct device_node *np) if (IS_ERR(hw)) goto err_free; + sama5d4_pmc->chws[PMC_PLLACK] = hw; + hw = at91_clk_register_utmi(regmap, NULL, "utmick", "mainck"); if (IS_ERR(hw)) goto err_free; diff --git a/include/dt-bindings/clock/at91.h b/include/dt-bindings/clock/at91.h index c3f4aa6a2d296..eba17106608b3 100644 --- a/include/dt-bindings/clock/at91.h +++ b/include/dt-bindings/clock/at91.h @@ -21,6 +21,9 @@ #define PMC_MCK2 4 #define PMC_I2S0_MUX 5 #define PMC_I2S1_MUX 6 +#define PMC_PLLACK 7 +#define PMC_PLLBCK 8 +#define PMC_AUDIOPLLCK 9 #ifndef AT91_PMC_MOSCS #define AT91_PMC_MOSCS 0 /* MOSCS Flag */ -- GitLab From c5bd76d93e72be90b4682979b2236f56b567885d Mon Sep 17 00:00:00 2001 From: Jason Yan Date: Fri, 17 Apr 2020 15:35:23 +0800 Subject: [PATCH 0717/1971] clk: ti: dra7: remove two unused symbols MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Fix the following gcc warning: drivers/clk/ti/clk-7xx.c:320:43: warning: ‘dra7_gpu_sys_clk_data’ defined but not used [-Wunused-const-variable=] static const struct omap_clkctrl_div_data dra7_gpu_sys_clk_data __initconst = { ^~~~~~~~~~~~~~~~~~~~~ drivers/clk/ti/clk-7xx.c:315:27: warning: ‘dra7_gpu_sys_clk_parents’ defined but not used [-Wunused-const-variable=] static const char * const dra7_gpu_sys_clk_parents[] __initconst = { ^~~~~~~~~~~~~~~~~~~~~~~~ Reported-by: Hulk Robot Signed-off-by: Jason Yan Link: https://lkml.kernel.org/r/20200417073523.42520-1-yanaijie@huawei.com Acked-by: Tony Lindgren Signed-off-by: Stephen Boyd --- drivers/clk/ti/clk-7xx.c | 9 --------- 1 file changed, 9 deletions(-) diff --git a/drivers/clk/ti/clk-7xx.c b/drivers/clk/ti/clk-7xx.c index bf8fced40e426..b4cf578a69e11 100644 --- a/drivers/clk/ti/clk-7xx.c +++ b/drivers/clk/ti/clk-7xx.c @@ -312,15 +312,6 @@ static const char * const dra7_gpu_hyd_mux_parents[] __initconst = { NULL, }; -static const char * const dra7_gpu_sys_clk_parents[] __initconst = { - "sys_clkin", - NULL, -}; - -static const struct omap_clkctrl_div_data dra7_gpu_sys_clk_data __initconst = { - .max_div = 2, -}; - static const struct omap_clkctrl_bit_data dra7_gpu_core_bit_data[] __initconst = { { 24, TI_CLK_MUX, dra7_gpu_core_mux_parents, NULL, }, { 26, TI_CLK_MUX, dra7_gpu_hyd_mux_parents, NULL, }, -- GitLab From 6f4d3c13b71c6ebd4d04d0adca53859131c595eb Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Tue, 26 May 2020 23:41:16 +0100 Subject: [PATCH 0718/1971] clk: versatile: remove redundant assignment to pointer clk The pointer clk is being initialized with a value that is never read and is being updated with a new value later on. The initialization is redundant and can be removed. Addresses-Coverity: ("Unused value") Signed-off-by: Colin Ian King Link: https://lkml.kernel.org/r/20200526224116.63549-1-colin.king@canonical.com Reviewed-by: Linus Walleij Signed-off-by: Stephen Boyd --- drivers/clk/versatile/clk-versatile.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/clk/versatile/clk-versatile.c b/drivers/clk/versatile/clk-versatile.c index fd54d5c0251cc..8ed7a179f6519 100644 --- a/drivers/clk/versatile/clk-versatile.c +++ b/drivers/clk/versatile/clk-versatile.c @@ -56,7 +56,7 @@ static const struct clk_icst_desc versatile_auxosc_desc __initconst = { static void __init cm_osc_setup(struct device_node *np, const struct clk_icst_desc *desc) { - struct clk *clk = ERR_PTR(-EINVAL); + struct clk *clk; const char *clk_name = np->name; const char *parent_name; -- GitLab From 8b4f6b8d59c614477ce65da15a4f5ddc31b2c398 Mon Sep 17 00:00:00 2001 From: Chunyan Zhang Date: Wed, 27 May 2020 13:36:35 +0800 Subject: [PATCH 0719/1971] clk: sprd: check its parent status before reading gate clock Some clocks only can be accessed if their parent is enabled. mipi_csi_xx clocks on SC9863A are an examples. We have to ensure the parent clock is enabled when reading those clocks. Signed-off-by: Chunyan Zhang Link: https://lkml.kernel.org/r/20200527053638.31439-2-zhang.lyra@gmail.com Signed-off-by: Stephen Boyd --- drivers/clk/sprd/gate.c | 7 +++++++ drivers/clk/sprd/gate.h | 9 +++++++++ 2 files changed, 16 insertions(+) diff --git a/drivers/clk/sprd/gate.c b/drivers/clk/sprd/gate.c index 574cfc116bbcd..56e1714b541ef 100644 --- a/drivers/clk/sprd/gate.c +++ b/drivers/clk/sprd/gate.c @@ -94,8 +94,15 @@ static int sprd_gate_is_enabled(struct clk_hw *hw) { struct sprd_gate *sg = hw_to_sprd_gate(hw); struct sprd_clk_common *common = &sg->common; + struct clk_hw *parent; unsigned int reg; + if (sg->flags & SPRD_GATE_NON_AON) { + parent = clk_hw_get_parent(hw); + if (!parent || !clk_hw_is_enabled(parent)) + return 0; + } + regmap_read(common->regmap, common->reg, ®); if (sg->flags & CLK_GATE_SET_TO_DISABLE) diff --git a/drivers/clk/sprd/gate.h b/drivers/clk/sprd/gate.h index b55817869367f..e738dafa4fe91 100644 --- a/drivers/clk/sprd/gate.h +++ b/drivers/clk/sprd/gate.h @@ -19,6 +19,15 @@ struct sprd_gate { struct sprd_clk_common common; }; +/* + * sprd_gate->flags is used for: + * CLK_GATE_SET_TO_DISABLE BIT(0) + * CLK_GATE_HIWORD_MASK BIT(1) + * CLK_GATE_BIG_ENDIAN BIT(2) + * so we define new flags from BIT(3) + */ +#define SPRD_GATE_NON_AON BIT(3) /* not alway powered on, check before read */ + #define SPRD_SC_GATE_CLK_HW_INIT_FN(_struct, _name, _parent, _reg, \ _sc_offset, _enable_mask, _flags, \ _gate_flags, _udelay, _ops, _fn) \ -- GitLab From 82a4d4a78e59a6604aa1a7f6fe4870c595e5a761 Mon Sep 17 00:00:00 2001 From: Chunyan Zhang Date: Wed, 27 May 2020 13:36:36 +0800 Subject: [PATCH 0720/1971] dt-bindings: clk: sprd: add mipi_csi_xx clocks for SC9863A mipi_csi_xx clocks are used by camera sensors. Signed-off-by: Chunyan Zhang Acked-by: Rob Herring Link: https://lkml.kernel.org/r/20200527053638.31439-3-zhang.lyra@gmail.com Signed-off-by: Stephen Boyd --- Documentation/devicetree/bindings/clock/sprd,sc9863a-clk.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/Documentation/devicetree/bindings/clock/sprd,sc9863a-clk.yaml b/Documentation/devicetree/bindings/clock/sprd,sc9863a-clk.yaml index bb3a78d8105ee..87e8349a539a4 100644 --- a/Documentation/devicetree/bindings/clock/sprd,sc9863a-clk.yaml +++ b/Documentation/devicetree/bindings/clock/sprd,sc9863a-clk.yaml @@ -28,6 +28,7 @@ properties: - sprd,sc9863a-rpll - sprd,sc9863a-dpll - sprd,sc9863a-mm-gate + - sprd,sc9863a-mm-clk - sprd,sc9863a-apapb-gate clocks: -- GitLab From d7160288f962a63e968d1955fb6a97b79e8317fe Mon Sep 17 00:00:00 2001 From: Chunyan Zhang Date: Wed, 27 May 2020 13:36:37 +0800 Subject: [PATCH 0721/1971] clk: sprd: add dt-bindings include for mipi_csi_xx clocks mipi_csi_xx clocks are used by camera sensors. Signed-off-by: Chunyan Zhang Acked-by: Rob Herring Link: https://lkml.kernel.org/r/20200527053638.31439-4-zhang.lyra@gmail.com Signed-off-by: Stephen Boyd --- include/dt-bindings/clock/sprd,sc9863a-clk.h | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/include/dt-bindings/clock/sprd,sc9863a-clk.h b/include/dt-bindings/clock/sprd,sc9863a-clk.h index 901ba59676c2d..4e030421641f2 100644 --- a/include/dt-bindings/clock/sprd,sc9863a-clk.h +++ b/include/dt-bindings/clock/sprd,sc9863a-clk.h @@ -308,6 +308,11 @@ #define CLK_MCPHY_CFG_EB 14 #define CLK_MM_GATE_NUM (CLK_MCPHY_CFG_EB + 1) +#define CLK_MIPI_CSI 0 +#define CLK_MIPI_CSI_S 1 +#define CLK_MIPI_CSI_M 2 +#define CLK_MM_CLK_NUM (CLK_MIPI_CSI_M + 1) + #define CLK_SIM0_EB 0 #define CLK_IIS0_EB 1 #define CLK_IIS1_EB 2 -- GitLab From 2c1c9696450ff6743f01dc6ae29aecf7b1f8bc54 Mon Sep 17 00:00:00 2001 From: Chunyan Zhang Date: Wed, 27 May 2020 13:36:38 +0800 Subject: [PATCH 0722/1971] clk: sprd: add mipi_csi_xx gate clocks mipi_csi_xx clocks are used by camera sensors. Signed-off-by: Chunyan Zhang Link: https://lkml.kernel.org/r/20200527053638.31439-5-zhang.lyra@gmail.com Signed-off-by: Stephen Boyd --- drivers/clk/sprd/sc9863a-clk.c | 32 ++++++++++++++++++++++++++++++++ 1 file changed, 32 insertions(+) diff --git a/drivers/clk/sprd/sc9863a-clk.c b/drivers/clk/sprd/sc9863a-clk.c index 5dec0faa62392..c2e4fe719a176 100644 --- a/drivers/clk/sprd/sc9863a-clk.c +++ b/drivers/clk/sprd/sc9863a-clk.c @@ -1615,6 +1615,36 @@ static const struct sprd_clk_desc sc9863a_mm_gate_desc = { .hw_clks = &sc9863a_mm_gate_hws, }; +/* camera sensor clocks */ +static SPRD_GATE_CLK_HW(mipi_csi_clk, "mipi-csi-clk", &mahb_ckg_eb.common.hw, + 0x20, BIT(16), 0, SPRD_GATE_NON_AON); +static SPRD_GATE_CLK_HW(mipi_csi_s_clk, "mipi-csi-s-clk", &mahb_ckg_eb.common.hw, + 0x24, BIT(16), 0, SPRD_GATE_NON_AON); +static SPRD_GATE_CLK_HW(mipi_csi_m_clk, "mipi-csi-m-clk", &mahb_ckg_eb.common.hw, + 0x28, BIT(16), 0, SPRD_GATE_NON_AON); + +static struct sprd_clk_common *sc9863a_mm_clk_clks[] = { + /* address base is 0x60900000 */ + &mipi_csi_clk.common, + &mipi_csi_s_clk.common, + &mipi_csi_m_clk.common, +}; + +static struct clk_hw_onecell_data sc9863a_mm_clk_hws = { + .hws = { + [CLK_MIPI_CSI] = &mipi_csi_clk.common.hw, + [CLK_MIPI_CSI_S] = &mipi_csi_s_clk.common.hw, + [CLK_MIPI_CSI_M] = &mipi_csi_m_clk.common.hw, + }, + .num = CLK_MM_CLK_NUM, +}; + +static const struct sprd_clk_desc sc9863a_mm_clk_desc = { + .clk_clks = sc9863a_mm_clk_clks, + .num_clk_clks = ARRAY_SIZE(sc9863a_mm_clk_clks), + .hw_clks = &sc9863a_mm_clk_hws, +}; + static SPRD_SC_GATE_CLK_FW_NAME(sim0_eb, "sim0-eb", "ext-26m", 0x0, 0x1000, BIT(0), 0, 0); static SPRD_SC_GATE_CLK_FW_NAME(iis0_eb, "iis0-eb", "ext-26m", 0x0, @@ -1737,6 +1767,8 @@ static const struct of_device_id sprd_sc9863a_clk_ids[] = { .data = &sc9863a_aonapb_gate_desc }, { .compatible = "sprd,sc9863a-mm-gate", /* 0x60800000 */ .data = &sc9863a_mm_gate_desc }, + { .compatible = "sprd,sc9863a-mm-clk", /* 0x60900000 */ + .data = &sc9863a_mm_clk_desc }, { .compatible = "sprd,sc9863a-apapb-gate", /* 0x71300000 */ .data = &sc9863a_apapb_gate_desc }, { } -- GitLab From dc543267c7adcad139a439230052b4e609f7018b Mon Sep 17 00:00:00 2001 From: Rikard Falkeborn Date: Sat, 9 May 2020 00:02:38 +0200 Subject: [PATCH 0723/1971] clk: bcm2835: Constify struct debugfs_reg32 bcm2835_debugfs_clock_reg32 is never changed and can therefore be made const. This allows the compiler to put it in the text section instead of the data section. Before: text data bss dec hex filename 26598 16088 64 42750 a6fe drivers/clk/bcm/clk-bcm2835.o After: text data bss dec hex filename 26662 16024 64 42750 a6fe drivers/clk/bcm/clk-bcm2835.o Signed-off-by: Rikard Falkeborn Link: https://lkml.kernel.org/r/20200508220238.4883-1-rikard.falkeborn@gmail.com Signed-off-by: Stephen Boyd --- drivers/clk/bcm/clk-bcm2835.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/clk/bcm/clk-bcm2835.c b/drivers/clk/bcm/clk-bcm2835.c index 0d0eeb3b0dd52..6bb7efa12037b 100644 --- a/drivers/clk/bcm/clk-bcm2835.c +++ b/drivers/clk/bcm/clk-bcm2835.c @@ -396,8 +396,8 @@ out: } static void bcm2835_debugfs_regset(struct bcm2835_cprman *cprman, u32 base, - struct debugfs_reg32 *regs, size_t nregs, - struct dentry *dentry) + const struct debugfs_reg32 *regs, + size_t nregs, struct dentry *dentry) { struct debugfs_regset32 *regset; @@ -1240,7 +1240,7 @@ static u8 bcm2835_clock_get_parent(struct clk_hw *hw) return (src & CM_SRC_MASK) >> CM_SRC_SHIFT; } -static struct debugfs_reg32 bcm2835_debugfs_clock_reg32[] = { +static const struct debugfs_reg32 bcm2835_debugfs_clock_reg32[] = { { .name = "ctl", .offset = 0, -- GitLab From a403bbab1a73d798728d76931cab3ff0399b9560 Mon Sep 17 00:00:00 2001 From: Alain Volmat Date: Sun, 22 Mar 2020 15:07:40 +0100 Subject: [PATCH 0724/1971] clk: clk-flexgen: fix clock-critical handling Fixes an issue leading to having all clocks following a critical clocks marked as well as criticals. Fixes: fa6415affe20 ("clk: st: clk-flexgen: Detect critical clocks") Signed-off-by: Alain Volmat Link: https://lkml.kernel.org/r/20200322140740.3970-1-avolmat@me.com Reviewed-by: Patrice Chotard Signed-off-by: Stephen Boyd --- drivers/clk/st/clk-flexgen.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/clk/st/clk-flexgen.c b/drivers/clk/st/clk-flexgen.c index 4413b6e04a8ec..55873d4b76032 100644 --- a/drivers/clk/st/clk-flexgen.c +++ b/drivers/clk/st/clk-flexgen.c @@ -375,6 +375,7 @@ static void __init st_of_flexgen_setup(struct device_node *np) break; } + flex_flags &= ~CLK_IS_CRITICAL; of_clk_detect_critical(np, i, &flex_flags); /* -- GitLab From 2d491066ccd4286538450c227fc5094ceb04b494 Mon Sep 17 00:00:00 2001 From: Eddie James Date: Wed, 8 Apr 2020 15:36:16 -0500 Subject: [PATCH 0725/1971] clk: ast2600: Fix AHB clock divider for A1 The latest specs for the AST2600 A1 chip include some different bit definitions for calculating the AHB clock divider. Implement these in order to get the correct AHB clock value in Linux. Signed-off-by: Eddie James Link: https://lkml.kernel.org/r/20200408203616.4031-1-eajames@linux.ibm.com Fixes: d3d04f6c330a ("clk: Add support for AST2600 SoC") Signed-off-by: Stephen Boyd --- drivers/clk/clk-ast2600.c | 31 +++++++++++++++++++++++++------ 1 file changed, 25 insertions(+), 6 deletions(-) diff --git a/drivers/clk/clk-ast2600.c b/drivers/clk/clk-ast2600.c index 392d01705b97b..99afc949925f0 100644 --- a/drivers/clk/clk-ast2600.c +++ b/drivers/clk/clk-ast2600.c @@ -642,14 +642,22 @@ static const u32 ast2600_a0_axi_ahb_div_table[] = { 2, 2, 3, 5, }; -static const u32 ast2600_a1_axi_ahb_div_table[] = { - 4, 6, 2, 4, +static const u32 ast2600_a1_axi_ahb_div0_tbl[] = { + 3, 2, 3, 4, +}; + +static const u32 ast2600_a1_axi_ahb_div1_tbl[] = { + 3, 4, 6, 8, +}; + +static const u32 ast2600_a1_axi_ahb200_tbl[] = { + 3, 4, 3, 4, 2, 2, 2, 2, }; static void __init aspeed_g6_cc(struct regmap *map) { struct clk_hw *hw; - u32 val, div, chip_id, axi_div, ahb_div; + u32 val, div, divbits, chip_id, axi_div, ahb_div; clk_hw_register_fixed_rate(NULL, "clkin", NULL, 0, 25000000); @@ -679,11 +687,22 @@ static void __init aspeed_g6_cc(struct regmap *map) else axi_div = 2; + divbits = (val >> 11) & 0x3; regmap_read(map, ASPEED_G6_SILICON_REV, &chip_id); - if (chip_id & BIT(16)) - ahb_div = ast2600_a1_axi_ahb_div_table[(val >> 11) & 0x3]; - else + if (chip_id & BIT(16)) { + if (!divbits) { + ahb_div = ast2600_a1_axi_ahb200_tbl[(val >> 8) & 0x3]; + if (val & BIT(16)) + ahb_div *= 2; + } else { + if (val & BIT(16)) + ahb_div = ast2600_a1_axi_ahb_div1_tbl[divbits]; + else + ahb_div = ast2600_a1_axi_ahb_div0_tbl[divbits]; + } + } else { ahb_div = ast2600_a0_axi_ahb_div_table[(val >> 11) & 0x3]; + } hw = clk_hw_register_fixed_factor(NULL, "ahb", "hpll", 0, 1, axi_div * ahb_div); aspeed_g6_clk_data->hws[ASPEED_CLK_AHB] = hw; -- GitLab From dd4f2ca965ecd72f181c81e84c5cb89950405d38 Mon Sep 17 00:00:00 2001 From: Wolfram Sang Date: Tue, 19 May 2020 13:48:09 +0200 Subject: [PATCH 0726/1971] i2c: save a variable in i2c_detect() No need to populate a variable if it is used only in debug output which may get compiled away anyhow. Signed-off-by: Wolfram Sang Signed-off-by: Wolfram Sang --- drivers/i2c/i2c-core-base.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/drivers/i2c/i2c-core-base.c b/drivers/i2c/i2c-core-base.c index 9987e72752c4c..d1f278f730118 100644 --- a/drivers/i2c/i2c-core-base.c +++ b/drivers/i2c/i2c-core-base.c @@ -2186,7 +2186,6 @@ static int i2c_detect(struct i2c_adapter *adapter, struct i2c_driver *driver) const unsigned short *address_list; struct i2c_client *temp_client; int i, err = 0; - int adap_id = i2c_adapter_id(adapter); address_list = driver->address_list; if (!driver->detect || !address_list) @@ -2214,7 +2213,7 @@ static int i2c_detect(struct i2c_adapter *adapter, struct i2c_driver *driver) for (i = 0; address_list[i] != I2C_CLIENT_END; i += 1) { dev_dbg(&adapter->dev, "found normal entry for adapter %d, addr 0x%02x\n", - adap_id, address_list[i]); + i2c_adapter_id(adapter), address_list[i]); temp_client->addr = address_list[i]; err = i2c_detect_address(temp_client, driver); if (unlikely(err)) -- GitLab From ed2ac8116ff25d5b234f983ffbb0c3f5ec2930aa Mon Sep 17 00:00:00 2001 From: Aishwarya Ramakrishnan Date: Tue, 5 May 2020 20:22:30 +0530 Subject: [PATCH 0727/1971] i2c: nvidia-gpu: Use PTR_ERR_OR_ZERO() to simplify code PTR_ERR_OR_ZERO contains if(IS_ERR(...)) + PTR_ERR. Generated by: scripts/coccinelle/api/ptr_ret.cocci Signed-off-by: Aishwarya Ramakrishnan Reviewed-by: Ajay Gupta Signed-off-by: Wolfram Sang --- drivers/i2c/busses/i2c-nvidia-gpu.c | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/drivers/i2c/busses/i2c-nvidia-gpu.c b/drivers/i2c/busses/i2c-nvidia-gpu.c index f5d25ce00f03a..f480105000b85 100644 --- a/drivers/i2c/busses/i2c-nvidia-gpu.c +++ b/drivers/i2c/busses/i2c-nvidia-gpu.c @@ -277,10 +277,7 @@ static int gpu_populate_client(struct gpu_i2c_dev *i2cd, int irq) i2cd->gpu_ccgx_ucsi->irq = irq; i2cd->gpu_ccgx_ucsi->properties = ccgx_props; i2cd->ccgx_client = i2c_new_client_device(&i2cd->adapter, i2cd->gpu_ccgx_ucsi); - if (IS_ERR(i2cd->ccgx_client)) - return PTR_ERR(i2cd->ccgx_client); - - return 0; + return PTR_ERR_OR_ZERO(i2cd->ccgx_client); } static int gpu_i2c_probe(struct pci_dev *pdev, const struct pci_device_id *id) -- GitLab From 00a7a00e2d2fc2ebbaf2c3ef25427ae13e0f94da Mon Sep 17 00:00:00 2001 From: Xu Wang Date: Tue, 3 Mar 2020 18:30:52 +0800 Subject: [PATCH 0728/1971] NFS: move dprintk after nfs_alloc_fattr in nfs3_proc_lookup In nfs3_proc_lookup, if nfs_alloc_fattr fails, will only print "NFS call lookup". This may be confusing, move dprintk after nfs_alloc_fattr. Signed-off-by: Xu Wang Signed-off-by: Anna Schumaker --- fs/nfs/nfs3proc.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fs/nfs/nfs3proc.c b/fs/nfs/nfs3proc.c index a46d1d5d16d8b..2397ceedba8af 100644 --- a/fs/nfs/nfs3proc.c +++ b/fs/nfs/nfs3proc.c @@ -179,11 +179,11 @@ nfs3_proc_lookup(struct inode *dir, struct dentry *dentry, if (nfs_lookup_is_soft_revalidate(dentry)) task_flags |= RPC_TASK_TIMEOUT; - dprintk("NFS call lookup %pd2\n", dentry); res.dir_attr = nfs_alloc_fattr(); if (res.dir_attr == NULL) return -ENOMEM; + dprintk("NFS call lookup %pd2\n", dentry); nfs_fattr_init(fattr); status = rpc_call_sync(NFS_CLIENT(dir), &msg, task_flags); nfs_refresh_inode(dir, res.dir_attr); -- GitLab From 118917d696dc59fd3e1741012c2f9db2294bed6f Mon Sep 17 00:00:00 2001 From: Fedor Tokarev Date: Sat, 28 Mar 2020 14:56:55 +0300 Subject: [PATCH 0729/1971] net: sunrpc: Fix off-by-one issues in 'rpc_ntop6' Fix off-by-one issues in 'rpc_ntop6': - 'snprintf' returns the number of characters which would have been written if enough space had been available, excluding the terminating null byte. Thus, a return value of 'sizeof(scopebuf)' means that the last character was dropped. - 'strcat' adds a terminating null byte to the string, thus if len == buflen, the null byte is written past the end of the buffer. Signed-off-by: Fedor Tokarev Signed-off-by: Anna Schumaker --- net/sunrpc/addr.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/net/sunrpc/addr.c b/net/sunrpc/addr.c index 8b4d72b1a0667..010dcb876f9d7 100644 --- a/net/sunrpc/addr.c +++ b/net/sunrpc/addr.c @@ -82,11 +82,11 @@ static size_t rpc_ntop6(const struct sockaddr *sap, rc = snprintf(scopebuf, sizeof(scopebuf), "%c%u", IPV6_SCOPE_DELIMITER, sin6->sin6_scope_id); - if (unlikely((size_t)rc > sizeof(scopebuf))) + if (unlikely((size_t)rc >= sizeof(scopebuf))) return 0; len += rc; - if (unlikely(len > buflen)) + if (unlikely(len >= buflen)) return 0; strcat(buf, scopebuf); -- GitLab From d04659db7b8089531ccbee364c52909fe4670408 Mon Sep 17 00:00:00 2001 From: Zheng Bin Date: Fri, 24 Apr 2020 16:17:23 +0800 Subject: [PATCH 0730/1971] nfs4: Remove unneeded semicolon Fixes coccicheck warning: include/linux/nfs4.h:298:2-3: Unneeded semicolon Reported-by: Hulk Robot Signed-off-by: Zheng Bin Signed-off-by: Anna Schumaker --- include/linux/nfs4.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/include/linux/nfs4.h b/include/linux/nfs4.h index 82d8fb4220923..4a51db7363a55 100644 --- a/include/linux/nfs4.h +++ b/include/linux/nfs4.h @@ -295,7 +295,7 @@ static inline bool seqid_mutating_err(u32 err) case NFS4ERR_NOFILEHANDLE: case NFS4ERR_MOVED: return false; - }; + } return true; } -- GitLab From 1f9b0f3afb4f804f26d60c373e589eb9b2d247af Mon Sep 17 00:00:00 2001 From: Nishad Kamdar Date: Sat, 25 Apr 2020 18:57:58 +0530 Subject: [PATCH 0731/1971] NFS: Use the correct style for SPDX License Identifier This patch corrects the SPDX License Identifier style in header file related to NFS Client support. For C header files Documentation/process/license-rules.rst mandates C-like comments (opposed to C source files where C++ style should be used). Changes made by using a script provided by Joe Perches here: https://lkml.org/lkml/2019/2/7/46. Suggested-by: Joe Perches Signed-off-by: Nishad Kamdar Signed-off-by: Anna Schumaker --- fs/nfs/sysfs.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fs/nfs/sysfs.h b/fs/nfs/sysfs.h index f1b27411dcc0c..ebcbdc40483b4 100644 --- a/fs/nfs/sysfs.h +++ b/fs/nfs/sysfs.h @@ -1,4 +1,4 @@ -// SPDX-License-Identifier: GPL-2.0 +/* SPDX-License-Identifier: GPL-2.0 */ /* * Copyright (c) 2019 Hammerspace Inc */ -- GitLab From 1c709b766e73e54d64b1dde1b7cfbcf25bcb15b9 Mon Sep 17 00:00:00 2001 From: Olga Kornievskaia Date: Sun, 26 Apr 2020 11:30:00 -0400 Subject: [PATCH 0732/1971] NFSv4.1 fix rpc_call_done assignment for BIND_CONN_TO_SESSION Fixes: 02a95dee8cf0 ("NFS add callback_ops to nfs4_proc_bind_conn_to_session_callback") Signed-off-by: Olga Kornievskaia Signed-off-by: Anna Schumaker --- fs/nfs/nfs4proc.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index 9056f3dd380e5..e32717fd1169a 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c @@ -7909,7 +7909,7 @@ nfs4_bind_one_conn_to_session_done(struct rpc_task *task, void *calldata) } static const struct rpc_call_ops nfs4_bind_one_conn_to_session_ops = { - .rpc_call_done = &nfs4_bind_one_conn_to_session_done, + .rpc_call_done = nfs4_bind_one_conn_to_session_done, }; /* -- GitLab From 1ad8dd939a9826ff6f8c69ac13e4e1dbba076703 Mon Sep 17 00:00:00 2001 From: "Gustavo A. R. Silva" Date: Thu, 7 May 2020 14:02:23 -0500 Subject: [PATCH 0733/1971] NFS: Replace zero-length array with flexible-array The current codebase makes use of the zero-length array language extension to the C90 standard, but the preferred mechanism to declare variable-length types such as these ones is a flexible array member[1][2], introduced in C99: struct foo { int stuff; struct boo array[]; }; By making use of the mechanism above, we will get a compiler warning in case the flexible array does not occur last in the structure, which will help us prevent some kind of undefined behavior bugs from being inadvertently introduced[3] to the codebase from now on. Also, notice that, dynamic memory allocations won't be affected by this change: "Flexible array members have incomplete type, and so the sizeof operator may not be applied. As a quirk of the original implementation of zero-length arrays, sizeof evaluates to zero."[1] sizeof(flexible-array-member) triggers a warning because flexible array members have incomplete type[1]. There are some instances of code in which the sizeof operator is being incorrectly/erroneously applied to zero-length arrays and the result is zero. Such instances may be hiding some bugs. So, this work (flexible-array member conversions) will also help to get completely rid of those sorts of issues. This issue was found with the help of Coccinelle. [1] https://gcc.gnu.org/onlinedocs/gcc/Zero-Length.html [2] https://github.com/KSPP/linux/issues/21 [3] commit 76497732932f ("cxgb3/l2t: Fix undefined behaviour") Signed-off-by: Gustavo A. R. Silva Signed-off-by: Anna Schumaker --- include/linux/nfs4.h | 2 +- include/linux/nfs_xdr.h | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/include/linux/nfs4.h b/include/linux/nfs4.h index 4a51db7363a55..4dba3c948932d 100644 --- a/include/linux/nfs4.h +++ b/include/linux/nfs4.h @@ -38,7 +38,7 @@ struct nfs4_ace { struct nfs4_acl { uint32_t naces; - struct nfs4_ace aces[0]; + struct nfs4_ace aces[]; }; #define NFS4_MAXLABELLEN 2048 diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h index e5f3e7d8d3d59..5fd0a9ef425fa 100644 --- a/include/linux/nfs_xdr.h +++ b/include/linux/nfs_xdr.h @@ -1227,7 +1227,7 @@ struct nfs4_secinfo4 { struct nfs4_secinfo_flavors { unsigned int num_flavors; - struct nfs4_secinfo4 flavors[0]; + struct nfs4_secinfo4 flavors[]; }; struct nfs4_secinfo_arg { -- GitLab From ef31d878b2e7856cb5d2f76f14b37f4b834eb0f3 Mon Sep 17 00:00:00 2001 From: Chen Zhou Date: Fri, 8 May 2020 20:22:41 +0800 Subject: [PATCH 0734/1971] NFS: remove duplicate headers Remove duplicate headers which are included twice. Signed-off-by: Chen Zhou Signed-off-by: Anna Schumaker --- fs/nfs/dns_resolve.c | 1 - 1 file changed, 1 deletion(-) diff --git a/fs/nfs/dns_resolve.c b/fs/nfs/dns_resolve.c index 963800037609e..e87d500ad95aa 100644 --- a/fs/nfs/dns_resolve.c +++ b/fs/nfs/dns_resolve.c @@ -39,7 +39,6 @@ ssize_t nfs_dns_resolve_name(struct net *net, char *name, size_t namelen, #include #include #include -#include #include #include #include -- GitLab From 69049454206ac7f7e6e0683bc7fd43eb61746479 Mon Sep 17 00:00:00 2001 From: Bjorn Helgaas Date: Wed, 27 May 2020 12:43:25 -0500 Subject: [PATCH 0735/1971] xen-pciback: Use dev_printk() when possible Use dev_printk() when possible to include device and driver information in the conventional format. Add "#define dev_fmt" when needed to preserve DRV_NAME or KBUILD_MODNAME in messages. No functional change intended. Signed-off-by: Bjorn Helgaas Link: https://lore.kernel.org/r/20200527174326.254329-2-helgaas@kernel.org Reviewed-by: Boris Ostrovsky Signed-off-by: Boris Ostrovsky --- drivers/xen/xen-pciback/conf_space.c | 16 +++++---- drivers/xen/xen-pciback/conf_space_header.c | 40 +++++++++------------ drivers/xen/xen-pciback/conf_space_quirks.c | 6 ++-- drivers/xen/xen-pciback/pci_stub.c | 38 +++++++++----------- drivers/xen/xen-pciback/pciback_ops.c | 38 ++++++++------------ drivers/xen/xen-pciback/vpci.c | 10 +++--- 6 files changed, 65 insertions(+), 83 deletions(-) diff --git a/drivers/xen/xen-pciback/conf_space.c b/drivers/xen/xen-pciback/conf_space.c index da51a5d34e6e2..f2df4e55fc1be 100644 --- a/drivers/xen/xen-pciback/conf_space.c +++ b/drivers/xen/xen-pciback/conf_space.c @@ -10,6 +10,8 @@ * Author: Ryan Wilson */ +#define dev_fmt(fmt) DRV_NAME ": " fmt + #include #include #include @@ -155,8 +157,8 @@ int xen_pcibk_config_read(struct pci_dev *dev, int offset, int size, u32 value = 0, tmp_val; if (unlikely(verbose_request)) - printk(KERN_DEBUG DRV_NAME ": %s: read %d bytes at 0x%x\n", - pci_name(dev), size, offset); + dev_printk(KERN_DEBUG, &dev->dev, "read %d bytes at 0x%x\n", + size, offset); if (!valid_request(offset, size)) { err = XEN_PCI_ERR_invalid_offset; @@ -196,8 +198,8 @@ int xen_pcibk_config_read(struct pci_dev *dev, int offset, int size, out: if (unlikely(verbose_request)) - printk(KERN_DEBUG DRV_NAME ": %s: read %d bytes at 0x%x = %x\n", - pci_name(dev), size, offset, value); + dev_printk(KERN_DEBUG, &dev->dev, + "read %d bytes at 0x%x = %x\n", size, offset, value); *ret_val = value; return xen_pcibios_err_to_errno(err); @@ -213,9 +215,9 @@ int xen_pcibk_config_write(struct pci_dev *dev, int offset, int size, u32 value) int field_start, field_end; if (unlikely(verbose_request)) - printk(KERN_DEBUG - DRV_NAME ": %s: write request %d bytes at 0x%x = %x\n", - pci_name(dev), size, offset, value); + dev_printk(KERN_DEBUG, &dev->dev, + "write request %d bytes at 0x%x = %x\n", size, + offset, value); if (!valid_request(offset, size)) return XEN_PCI_ERR_invalid_offset; diff --git a/drivers/xen/xen-pciback/conf_space_header.c b/drivers/xen/xen-pciback/conf_space_header.c index fb4fccb4aeccf..b277b689f2576 100644 --- a/drivers/xen/xen-pciback/conf_space_header.c +++ b/drivers/xen/xen-pciback/conf_space_header.c @@ -6,6 +6,7 @@ */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt +#define dev_fmt pr_fmt #include #include @@ -68,8 +69,7 @@ static int command_write(struct pci_dev *dev, int offset, u16 value, void *data) dev_data = pci_get_drvdata(dev); if (!pci_is_enabled(dev) && is_enable_cmd(value)) { if (unlikely(verbose_request)) - printk(KERN_DEBUG DRV_NAME ": %s: enable\n", - pci_name(dev)); + dev_printk(KERN_DEBUG, &dev->dev, "enable\n"); err = pci_enable_device(dev); if (err) return err; @@ -77,8 +77,7 @@ static int command_write(struct pci_dev *dev, int offset, u16 value, void *data) dev_data->enable_intx = 1; } else if (pci_is_enabled(dev) && !is_enable_cmd(value)) { if (unlikely(verbose_request)) - printk(KERN_DEBUG DRV_NAME ": %s: disable\n", - pci_name(dev)); + dev_printk(KERN_DEBUG, &dev->dev, "disable\n"); pci_disable_device(dev); if (dev_data) dev_data->enable_intx = 0; @@ -86,34 +85,30 @@ static int command_write(struct pci_dev *dev, int offset, u16 value, void *data) if (!dev->is_busmaster && is_master_cmd(value)) { if (unlikely(verbose_request)) - printk(KERN_DEBUG DRV_NAME ": %s: set bus master\n", - pci_name(dev)); + dev_printk(KERN_DEBUG, &dev->dev, "set bus master\n"); pci_set_master(dev); } else if (dev->is_busmaster && !is_master_cmd(value)) { if (unlikely(verbose_request)) - printk(KERN_DEBUG DRV_NAME ": %s: clear bus master\n", - pci_name(dev)); + dev_printk(KERN_DEBUG, &dev->dev, "clear bus master\n"); pci_clear_master(dev); } if (!(cmd->val & PCI_COMMAND_INVALIDATE) && (value & PCI_COMMAND_INVALIDATE)) { if (unlikely(verbose_request)) - printk(KERN_DEBUG - DRV_NAME ": %s: enable memory-write-invalidate\n", - pci_name(dev)); + dev_printk(KERN_DEBUG, &dev->dev, + "enable memory-write-invalidate\n"); err = pci_set_mwi(dev); if (err) { - pr_warn("%s: cannot enable memory-write-invalidate (%d)\n", - pci_name(dev), err); + dev_warn(&dev->dev, "cannot enable memory-write-invalidate (%d)\n", + err); value &= ~PCI_COMMAND_INVALIDATE; } } else if ((cmd->val & PCI_COMMAND_INVALIDATE) && !(value & PCI_COMMAND_INVALIDATE)) { if (unlikely(verbose_request)) - printk(KERN_DEBUG - DRV_NAME ": %s: disable memory-write-invalidate\n", - pci_name(dev)); + dev_printk(KERN_DEBUG, &dev->dev, + "disable memory-write-invalidate\n"); pci_clear_mwi(dev); } @@ -157,8 +152,7 @@ static int rom_write(struct pci_dev *dev, int offset, u32 value, void *data) struct pci_bar_info *bar = data; if (unlikely(!bar)) { - pr_warn(DRV_NAME ": driver data not found for %s\n", - pci_name(dev)); + dev_warn(&dev->dev, "driver data not found\n"); return XEN_PCI_ERR_op_failed; } @@ -194,8 +188,7 @@ static int bar_write(struct pci_dev *dev, int offset, u32 value, void *data) u32 mask; if (unlikely(!bar)) { - pr_warn(DRV_NAME ": driver data not found for %s\n", - pci_name(dev)); + dev_warn(&dev->dev, "driver data not found\n"); return XEN_PCI_ERR_op_failed; } @@ -228,8 +221,7 @@ static int bar_read(struct pci_dev *dev, int offset, u32 * value, void *data) struct pci_bar_info *bar = data; if (unlikely(!bar)) { - pr_warn(DRV_NAME ": driver data not found for %s\n", - pci_name(dev)); + dev_warn(&dev->dev, "driver data not found\n"); return XEN_PCI_ERR_op_failed; } @@ -433,8 +425,8 @@ int xen_pcibk_config_header_add_fields(struct pci_dev *dev) default: err = -EINVAL; - pr_err("%s: Unsupported header type %d!\n", - pci_name(dev), dev->hdr_type); + dev_err(&dev->dev, "Unsupported header type %d!\n", + dev->hdr_type); break; } diff --git a/drivers/xen/xen-pciback/conf_space_quirks.c b/drivers/xen/xen-pciback/conf_space_quirks.c index ed593d1042a6e..7dc2810863024 100644 --- a/drivers/xen/xen-pciback/conf_space_quirks.c +++ b/drivers/xen/xen-pciback/conf_space_quirks.c @@ -6,6 +6,8 @@ * Author: Chris Bookholt */ +#define dev_fmt(fmt) DRV_NAME ": " fmt + #include #include #include "pciback.h" @@ -35,8 +37,8 @@ static struct xen_pcibk_config_quirk *xen_pcibk_find_quirk(struct pci_dev *dev) if (match_one_device(&tmp_quirk->devid, dev) != NULL) goto out; tmp_quirk = NULL; - printk(KERN_DEBUG DRV_NAME - ": quirk didn't match any device known\n"); + dev_printk(KERN_DEBUG, &dev->dev, + "quirk didn't match any device known\n"); out: return tmp_quirk; } diff --git a/drivers/xen/xen-pciback/pci_stub.c b/drivers/xen/xen-pciback/pci_stub.c index 7af93d65ed51f..e876c3d6dad1f 100644 --- a/drivers/xen/xen-pciback/pci_stub.c +++ b/drivers/xen/xen-pciback/pci_stub.c @@ -6,6 +6,7 @@ */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt +#define dev_fmt pr_fmt #include #include @@ -626,11 +627,11 @@ static void pcistub_remove(struct pci_dev *dev) if (found_psdev->pdev) { int domid = xen_find_device_domain_owner(dev); - pr_warn("****** removing device %s while still in-use by domain %d! ******\n", + dev_warn(&dev->dev, "****** removing device %s while still in-use by domain %d! ******\n", pci_name(found_psdev->dev), domid); - pr_warn("****** driver domain may still access this device's i/o resources!\n"); - pr_warn("****** shutdown driver domain before binding device\n"); - pr_warn("****** to other drivers or domains\n"); + dev_warn(&dev->dev, "****** driver domain may still access this device's i/o resources!\n"); + dev_warn(&dev->dev, "****** shutdown driver domain before binding device\n"); + dev_warn(&dev->dev, "****** to other drivers or domains\n"); /* N.B. This ends up calling pcistub_put_pci_dev which ends up * doing the FLR. */ @@ -711,14 +712,12 @@ static pci_ers_result_t common_process(struct pcistub_device *psdev, ret = xen_pcibk_get_pcifront_dev(psdev->dev, psdev->pdev, &aer_op->domain, &aer_op->bus, &aer_op->devfn); if (!ret) { - dev_err(&psdev->dev->dev, - DRV_NAME ": failed to get pcifront device\n"); + dev_err(&psdev->dev->dev, "failed to get pcifront device\n"); return PCI_ERS_RESULT_NONE; } wmb(); - dev_dbg(&psdev->dev->dev, - DRV_NAME ": aer_op %x dom %x bus %x devfn %x\n", + dev_dbg(&psdev->dev->dev, "aer_op %x dom %x bus %x devfn %x\n", aer_cmd, aer_op->domain, aer_op->bus, aer_op->devfn); /*local flag to mark there's aer request, xen_pcibk callback will use * this flag to judge whether we need to check pci-front give aer @@ -754,8 +753,7 @@ static pci_ers_result_t common_process(struct pcistub_device *psdev, if (test_bit(_XEN_PCIF_active, (unsigned long *)&sh_info->flags)) { - dev_dbg(&psdev->dev->dev, - "schedule pci_conf service in " DRV_NAME "\n"); + dev_dbg(&psdev->dev->dev, "schedule pci_conf service\n"); xen_pcibk_test_and_schedule_op(psdev->pdev); } @@ -786,13 +784,12 @@ static pci_ers_result_t xen_pcibk_slot_reset(struct pci_dev *dev) PCI_FUNC(dev->devfn)); if (!psdev || !psdev->pdev) { - dev_err(&dev->dev, - DRV_NAME " device is not found/assigned\n"); + dev_err(&dev->dev, "device is not found/assigned\n"); goto end; } if (!psdev->pdev->sh_info) { - dev_err(&dev->dev, DRV_NAME " device is not connected or owned" + dev_err(&dev->dev, "device is not connected or owned" " by HVM, kill it\n"); kill_domain_by_device(psdev); goto end; @@ -844,13 +841,12 @@ static pci_ers_result_t xen_pcibk_mmio_enabled(struct pci_dev *dev) PCI_FUNC(dev->devfn)); if (!psdev || !psdev->pdev) { - dev_err(&dev->dev, - DRV_NAME " device is not found/assigned\n"); + dev_err(&dev->dev, "device is not found/assigned\n"); goto end; } if (!psdev->pdev->sh_info) { - dev_err(&dev->dev, DRV_NAME " device is not connected or owned" + dev_err(&dev->dev, "device is not connected or owned" " by HVM, kill it\n"); kill_domain_by_device(psdev); goto end; @@ -902,13 +898,12 @@ static pci_ers_result_t xen_pcibk_error_detected(struct pci_dev *dev, PCI_FUNC(dev->devfn)); if (!psdev || !psdev->pdev) { - dev_err(&dev->dev, - DRV_NAME " device is not found/assigned\n"); + dev_err(&dev->dev, "device is not found/assigned\n"); goto end; } if (!psdev->pdev->sh_info) { - dev_err(&dev->dev, DRV_NAME " device is not connected or owned" + dev_err(&dev->dev, "device is not connected or owned" " by HVM, kill it\n"); kill_domain_by_device(psdev); goto end; @@ -956,13 +951,12 @@ static void xen_pcibk_error_resume(struct pci_dev *dev) PCI_FUNC(dev->devfn)); if (!psdev || !psdev->pdev) { - dev_err(&dev->dev, - DRV_NAME " device is not found/assigned\n"); + dev_err(&dev->dev, "device is not found/assigned\n"); goto end; } if (!psdev->pdev->sh_info) { - dev_err(&dev->dev, DRV_NAME " device is not connected or owned" + dev_err(&dev->dev, "device is not connected or owned" " by HVM, kill it\n"); kill_domain_by_device(psdev); goto end; diff --git a/drivers/xen/xen-pciback/pciback_ops.c b/drivers/xen/xen-pciback/pciback_ops.c index 787966f445895..8545ca78c4f1d 100644 --- a/drivers/xen/xen-pciback/pciback_ops.c +++ b/drivers/xen/xen-pciback/pciback_ops.c @@ -6,6 +6,7 @@ */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt +#define dev_fmt pr_fmt #include #include @@ -148,7 +149,7 @@ int xen_pcibk_enable_msi(struct xen_pcibk_device *pdev, int status; if (unlikely(verbose_request)) - printk(KERN_DEBUG DRV_NAME ": %s: enable MSI\n", pci_name(dev)); + dev_printk(KERN_DEBUG, &dev->dev, "enable MSI\n"); if (dev->msi_enabled) status = -EALREADY; @@ -158,9 +159,8 @@ int xen_pcibk_enable_msi(struct xen_pcibk_device *pdev, status = pci_enable_msi(dev); if (status) { - pr_warn_ratelimited("%s: error enabling MSI for guest %u: err %d\n", - pci_name(dev), pdev->xdev->otherend_id, - status); + dev_warn_ratelimited(&dev->dev, "error enabling MSI for guest %u: err %d\n", + pdev->xdev->otherend_id, status); op->value = 0; return XEN_PCI_ERR_op_failed; } @@ -170,8 +170,7 @@ int xen_pcibk_enable_msi(struct xen_pcibk_device *pdev, op->value = dev->irq ? xen_pirq_from_irq(dev->irq) : 0; if (unlikely(verbose_request)) - printk(KERN_DEBUG DRV_NAME ": %s: MSI: %d\n", pci_name(dev), - op->value); + dev_printk(KERN_DEBUG, &dev->dev, "MSI: %d\n", op->value); dev_data = pci_get_drvdata(dev); if (dev_data) @@ -185,8 +184,7 @@ int xen_pcibk_disable_msi(struct xen_pcibk_device *pdev, struct pci_dev *dev, struct xen_pci_op *op) { if (unlikely(verbose_request)) - printk(KERN_DEBUG DRV_NAME ": %s: disable MSI\n", - pci_name(dev)); + dev_printk(KERN_DEBUG, &dev->dev, "disable MSI\n"); if (dev->msi_enabled) { struct xen_pcibk_dev_data *dev_data; @@ -199,8 +197,7 @@ int xen_pcibk_disable_msi(struct xen_pcibk_device *pdev, } op->value = dev->irq ? xen_pirq_from_irq(dev->irq) : 0; if (unlikely(verbose_request)) - printk(KERN_DEBUG DRV_NAME ": %s: MSI: %d\n", pci_name(dev), - op->value); + dev_printk(KERN_DEBUG, &dev->dev, "MSI: %d\n", op->value); return 0; } @@ -214,8 +211,7 @@ int xen_pcibk_enable_msix(struct xen_pcibk_device *pdev, u16 cmd; if (unlikely(verbose_request)) - printk(KERN_DEBUG DRV_NAME ": %s: enable MSI-X\n", - pci_name(dev)); + dev_printk(KERN_DEBUG, &dev->dev, "enable MSI-X\n"); if (op->value > SH_INFO_MAX_VEC) return -EINVAL; @@ -249,16 +245,14 @@ int xen_pcibk_enable_msix(struct xen_pcibk_device *pdev, op->msix_entries[i].vector = xen_pirq_from_irq(entries[i].vector); if (unlikely(verbose_request)) - printk(KERN_DEBUG DRV_NAME ": %s: " \ - "MSI-X[%d]: %d\n", - pci_name(dev), i, + dev_printk(KERN_DEBUG, &dev->dev, + "MSI-X[%d]: %d\n", i, op->msix_entries[i].vector); } } } else - pr_warn_ratelimited("%s: error enabling MSI-X for guest %u: err %d!\n", - pci_name(dev), pdev->xdev->otherend_id, - result); + dev_warn_ratelimited(&dev->dev, "error enabling MSI-X for guest %u: err %d!\n", + pdev->xdev->otherend_id, result); kfree(entries); op->value = result; @@ -274,8 +268,7 @@ int xen_pcibk_disable_msix(struct xen_pcibk_device *pdev, struct pci_dev *dev, struct xen_pci_op *op) { if (unlikely(verbose_request)) - printk(KERN_DEBUG DRV_NAME ": %s: disable MSI-X\n", - pci_name(dev)); + dev_printk(KERN_DEBUG, &dev->dev, "disable MSI-X\n"); if (dev->msix_enabled) { struct xen_pcibk_dev_data *dev_data; @@ -292,8 +285,7 @@ int xen_pcibk_disable_msix(struct xen_pcibk_device *pdev, */ op->value = dev->irq ? xen_pirq_from_irq(dev->irq) : 0; if (unlikely(verbose_request)) - printk(KERN_DEBUG DRV_NAME ": %s: MSI-X: %d\n", - pci_name(dev), op->value); + dev_printk(KERN_DEBUG, &dev->dev, "MSI-X: %d\n", op->value); return 0; } #endif @@ -424,7 +416,7 @@ static irqreturn_t xen_pcibk_guest_interrupt(int irq, void *dev_id) dev_data->handled++; if ((dev_data->handled % 1000) == 0) { if (xen_test_irq_shared(irq)) { - pr_info("%s IRQ line is not shared " + dev_info(&dev->dev, "%s IRQ line is not shared " "with other domains. Turning ISR off\n", dev_data->irq_name); dev_data->ack_intr = 0; diff --git a/drivers/xen/xen-pciback/vpci.c b/drivers/xen/xen-pciback/vpci.c index f6ba18191c0f9..5447b5ab7c766 100644 --- a/drivers/xen/xen-pciback/vpci.c +++ b/drivers/xen/xen-pciback/vpci.c @@ -7,6 +7,7 @@ */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt +#define dev_fmt pr_fmt #include #include @@ -105,9 +106,8 @@ static int __xen_pcibk_add_pci_dev(struct xen_pcibk_device *pdev, struct pci_dev_entry, list); if (match_slot(dev, t->dev)) { - pr_info("vpci: %s: assign to virtual slot %d func %d\n", - pci_name(dev), slot, - PCI_FUNC(dev->devfn)); + dev_info(&dev->dev, "vpci: assign to virtual slot %d func %d\n", + slot, PCI_FUNC(dev->devfn)); list_add_tail(&dev_entry->list, &vpci_dev->dev_list[slot]); func = PCI_FUNC(dev->devfn); @@ -119,8 +119,8 @@ static int __xen_pcibk_add_pci_dev(struct xen_pcibk_device *pdev, /* Assign to a new slot on the virtual PCI bus */ for (slot = 0; slot < PCI_SLOT_MAX; slot++) { if (list_empty(&vpci_dev->dev_list[slot])) { - pr_info("vpci: %s: assign to virtual slot %d\n", - pci_name(dev), slot); + dev_info(&dev->dev, "vpci: assign to virtual slot %d\n", + slot); list_add_tail(&dev_entry->list, &vpci_dev->dev_list[slot]); func = dev->is_virtfn ? 0 : PCI_FUNC(dev->devfn); -- GitLab From 64b3eaf3715d24e9215e9552779334b0807eb823 Mon Sep 17 00:00:00 2001 From: Bjorn Helgaas Date: Wed, 27 May 2020 12:43:26 -0500 Subject: [PATCH 0736/1971] xenbus: Use dev_printk() when possible Use dev_printk() when possible to include device and driver information in the conventional format. Add "#define dev_fmt" to preserve KBUILD_MODNAME in messages. No functional change intended. Signed-off-by: Bjorn Helgaas Link: https://lore.kernel.org/r/20200527174326.254329-3-helgaas@kernel.org Reviewed-by: Boris Ostrovsky Signed-off-by: Boris Ostrovsky --- drivers/xen/xenbus/xenbus_probe.c | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/drivers/xen/xenbus/xenbus_probe.c b/drivers/xen/xenbus/xenbus_probe.c index 8c4d05b687b78..ee9a9170b43a0 100644 --- a/drivers/xen/xenbus/xenbus_probe.c +++ b/drivers/xen/xenbus/xenbus_probe.c @@ -31,6 +31,7 @@ */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt +#define dev_fmt pr_fmt #define DPRINTK(fmt, args...) \ pr_debug("xenbus_probe (%s:%d) " fmt ".\n", \ @@ -608,7 +609,7 @@ int xenbus_dev_suspend(struct device *dev) if (drv->suspend) err = drv->suspend(xdev); if (err) - pr_warn("suspend %s failed: %i\n", dev_name(dev), err); + dev_warn(dev, "suspend failed: %i\n", err); return 0; } EXPORT_SYMBOL_GPL(xenbus_dev_suspend); @@ -627,8 +628,7 @@ int xenbus_dev_resume(struct device *dev) drv = to_xenbus_driver(dev->driver); err = talk_to_otherend(xdev); if (err) { - pr_warn("resume (talk_to_otherend) %s failed: %i\n", - dev_name(dev), err); + dev_warn(dev, "resume (talk_to_otherend) failed: %i\n", err); return err; } @@ -637,15 +637,14 @@ int xenbus_dev_resume(struct device *dev) if (drv->resume) { err = drv->resume(xdev); if (err) { - pr_warn("resume %s failed: %i\n", dev_name(dev), err); + dev_warn(dev, "resume failed: %i\n", err); return err; } } err = watch_otherend(xdev); if (err) { - pr_warn("resume (watch_otherend) %s failed: %d.\n", - dev_name(dev), err); + dev_warn(dev, "resume (watch_otherend) failed: %d\n", err); return err; } -- GitLab From 3d89c2ef24f0772b7806ed289bef482b24083de6 Mon Sep 17 00:00:00 2001 From: Paul Mackerras Date: Thu, 28 May 2020 10:48:18 +1000 Subject: [PATCH 0737/1971] KVM: PPC: Book3S HV: Remove user-triggerable WARN_ON Although in general we do not expect valid PTEs to be found in kvmppc_create_pte when we are inserting a large page mapping, there is one situation where this can occur. That is when dirty page logging is turned off for a memslot while the VM is running. Because the new memslots are installed before the old memslot is flushed in kvmppc_core_commit_memory_region_hv(), there is a window where a hypervisor page fault can try to install a 2MB (or 1GB) page where there are already small page mappings which were installed while dirty page logging was enabled and which have not yet been flushed. Since we have a situation where valid PTEs can legitimately be found by kvmppc_unmap_free_pte, and which can be triggered by userspace, just remove the WARN_ON_ONCE, since it is undesirable to have userspace able to trigger a kernel warning. Signed-off-by: Paul Mackerras --- arch/powerpc/kvm/book3s_64_mmu_radix.c | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/arch/powerpc/kvm/book3s_64_mmu_radix.c b/arch/powerpc/kvm/book3s_64_mmu_radix.c index 97b45eaa7014a..bc3f795252fdf 100644 --- a/arch/powerpc/kvm/book3s_64_mmu_radix.c +++ b/arch/powerpc/kvm/book3s_64_mmu_radix.c @@ -429,9 +429,13 @@ void kvmppc_unmap_pte(struct kvm *kvm, pte_t *pte, unsigned long gpa, * Callers are responsible for flushing the PWC. * * When page tables are being unmapped/freed as part of page fault path - * (full == false), ptes are not expected. There is code to unmap them - * and emit a warning if encountered, but there may already be data - * corruption due to the unexpected mappings. + * (full == false), valid ptes are generally not expected; however, there + * is one situation where they arise, which is when dirty page logging is + * turned off for a memslot while the VM is running. The new memslot + * becomes visible to page faults before the memslot commit function + * gets to flush the memslot, which can lead to a 2MB page mapping being + * installed for a guest physical address where there are already 64kB + * (or 4kB) mappings (of sub-pages of the same 2MB page). */ static void kvmppc_unmap_free_pte(struct kvm *kvm, pte_t *pte, bool full, unsigned int lpid) @@ -445,7 +449,6 @@ static void kvmppc_unmap_free_pte(struct kvm *kvm, pte_t *pte, bool full, for (it = 0; it < PTRS_PER_PTE; ++it, ++p) { if (pte_val(*p) == 0) continue; - WARN_ON_ONCE(1); kvmppc_unmap_pte(kvm, p, pte_pfn(*p) << PAGE_SHIFT, PAGE_SHIFT, NULL, lpid); -- GitLab From 06030c4e33babd63b6630d358a04f3dfb34cc29c Mon Sep 17 00:00:00 2001 From: Lubomir Rintel Date: Wed, 20 May 2020 00:41:39 +0200 Subject: [PATCH 0738/1971] clk: mmp: frac: Do not lose last 4 digits of precision While calculating the output rate of a fractional divider clock, the value is divided and multipled by 10000, discarding the least significant digits -- presumably to fit the intermediate value within 32 bits. The precision we're losing is, however, not insignificant for things like I2S clock. Maybe also elsewhere, now that since commit ea56ad60260e ("clk: mmp2: Stop pretending PLL outputs are constant") the parent rates are more precise and no longer rounded to 10000s. Signed-off-by: Lubomir Rintel Link: https://lkml.kernel.org/r/20200519224151.2074597-2-lkundrak@v3.sk Signed-off-by: Stephen Boyd --- drivers/clk/mmp/clk-frac.c | 24 ++++++++++++++++-------- 1 file changed, 16 insertions(+), 8 deletions(-) diff --git a/drivers/clk/mmp/clk-frac.c b/drivers/clk/mmp/clk-frac.c index fabc09aca6c46..ed9928f5bdc7f 100644 --- a/drivers/clk/mmp/clk-frac.c +++ b/drivers/clk/mmp/clk-frac.c @@ -28,13 +28,15 @@ static long clk_factor_round_rate(struct clk_hw *hw, unsigned long drate, unsigned long *prate) { struct mmp_clk_factor *factor = to_clk_factor(hw); - unsigned long rate = 0, prev_rate; + u64 rate = 0, prev_rate; int i; for (i = 0; i < factor->ftbl_cnt; i++) { prev_rate = rate; - rate = (((*prate / 10000) * factor->ftbl[i].den) / - (factor->ftbl[i].num * factor->masks->factor)) * 10000; + rate = *prate; + rate *= factor->ftbl[i].den; + do_div(rate, factor->ftbl[i].num * factor->masks->factor); + if (rate > drate) break; } @@ -54,6 +56,7 @@ static unsigned long clk_factor_recalc_rate(struct clk_hw *hw, struct mmp_clk_factor *factor = to_clk_factor(hw); struct mmp_clk_factor_masks *masks = factor->masks; unsigned int val, num, den; + u64 rate; val = readl_relaxed(factor->base); @@ -66,8 +69,11 @@ static unsigned long clk_factor_recalc_rate(struct clk_hw *hw, if (!den) return 0; - return (((parent_rate / 10000) * den) / - (num * factor->masks->factor)) * 10000; + rate = parent_rate; + rate *= den; + do_div(rate, num * factor->masks->factor); + + return rate; } /* Configures new clock rate*/ @@ -78,12 +84,14 @@ static int clk_factor_set_rate(struct clk_hw *hw, unsigned long drate, struct mmp_clk_factor_masks *masks = factor->masks; int i; unsigned long val; - unsigned long rate = 0; unsigned long flags = 0; + u64 rate = 0; for (i = 0; i < factor->ftbl_cnt; i++) { - rate = (((prate / 10000) * factor->ftbl[i].den) / - (factor->ftbl[i].num * factor->masks->factor)) * 10000; + rate = prate; + rate *= factor->ftbl[i].den; + do_div(rate, factor->ftbl[i].num * factor->masks->factor); + if (rate > drate) break; } -- GitLab From 5278acc4418bad18ed677952ca7cd56ce312a87d Mon Sep 17 00:00:00 2001 From: Lubomir Rintel Date: Wed, 20 May 2020 00:41:40 +0200 Subject: [PATCH 0739/1971] clk: mmp: frac: Allow setting bits other than the numerator/denominator For the I2S fractional clocks, there are more bits that need to be set for the clock to run. Their actual meaning is unknown. Signed-off-by: Lubomir Rintel Link: https://lkml.kernel.org/r/20200519224151.2074597-3-lkundrak@v3.sk Signed-off-by: Stephen Boyd --- drivers/clk/mmp/clk-frac.c | 3 +++ drivers/clk/mmp/clk.h | 1 + 2 files changed, 4 insertions(+) diff --git a/drivers/clk/mmp/clk-frac.c b/drivers/clk/mmp/clk-frac.c index ed9928f5bdc7f..48f592bd633df 100644 --- a/drivers/clk/mmp/clk-frac.c +++ b/drivers/clk/mmp/clk-frac.c @@ -148,7 +148,10 @@ static int clk_factor_init(struct clk_hw *hw) val &= ~(masks->den_mask << masks->den_shift); val |= (factor->ftbl[0].den & masks->den_mask) << masks->den_shift; + } + if (!(val & masks->enable_mask) || i >= factor->ftbl_cnt) { + val |= masks->enable_mask; writel(val, factor->base); } diff --git a/drivers/clk/mmp/clk.h b/drivers/clk/mmp/clk.h index 971b4d6d992fb..0efd5b0b2f01b 100644 --- a/drivers/clk/mmp/clk.h +++ b/drivers/clk/mmp/clk.h @@ -16,6 +16,7 @@ struct mmp_clk_factor_masks { unsigned int den_mask; unsigned int num_shift; unsigned int den_shift; + unsigned int enable_mask; }; struct mmp_clk_factor_tbl { -- GitLab From edcec4a8691966962f3213089516bf00a5dc4f5c Mon Sep 17 00:00:00 2001 From: Lubomir Rintel Date: Wed, 20 May 2020 00:41:41 +0200 Subject: [PATCH 0740/1971] dt-bindings: marvell,mmp2: Add clock id for the I2S clocks There are two of these on a MMP2. Signed-off-by: Lubomir Rintel Acked-by: Rob Herring Link: https://lkml.kernel.org/r/20200519224151.2074597-4-lkundrak@v3.sk Signed-off-by: Stephen Boyd --- include/dt-bindings/clock/marvell,mmp2.h | 2 ++ 1 file changed, 2 insertions(+) diff --git a/include/dt-bindings/clock/marvell,mmp2.h b/include/dt-bindings/clock/marvell,mmp2.h index 06bb7fe4c62f4..5b083f42811e5 100644 --- a/include/dt-bindings/clock/marvell,mmp2.h +++ b/include/dt-bindings/clock/marvell,mmp2.h @@ -29,6 +29,8 @@ #define MMP3_CLK_PLL1_P 28 #define MMP3_CLK_PLL2_P 29 #define MMP3_CLK_PLL3 30 +#define MMP2_CLK_I2S0 31 +#define MMP2_CLK_I2S1 32 /* apb periphrals */ #define MMP2_CLK_TWSI0 60 -- GitLab From c227df7a097a13508eb4c3dd6c84da56d8989aa6 Mon Sep 17 00:00:00 2001 From: Lubomir Rintel Date: Wed, 20 May 2020 00:41:42 +0200 Subject: [PATCH 0741/1971] dt-bindings: marvell,mmp2: Add clock id for the Audio clock This clocks the Audio block. Signed-off-by: Lubomir Rintel Acked-by: Rob Herring Link: https://lkml.kernel.org/r/20200519224151.2074597-5-lkundrak@v3.sk Signed-off-by: Stephen Boyd --- include/dt-bindings/clock/marvell,mmp2.h | 1 + 1 file changed, 1 insertion(+) diff --git a/include/dt-bindings/clock/marvell,mmp2.h b/include/dt-bindings/clock/marvell,mmp2.h index 5b083f42811e5..87f5ad5df72f4 100644 --- a/include/dt-bindings/clock/marvell,mmp2.h +++ b/include/dt-bindings/clock/marvell,mmp2.h @@ -89,6 +89,7 @@ #define MMP3_CLK_GPU_3D MMP2_CLK_GPU_3D #define MMP3_CLK_GPU_2D 125 #define MMP3_CLK_SDH4 126 +#define MMP2_CLK_AUDIO 127 #define MMP2_NR_CLKS 200 #endif -- GitLab From 8c2427b8f7c814564bc9e4d483b8b00debb32ab5 Mon Sep 17 00:00:00 2001 From: Lubomir Rintel Date: Wed, 20 May 2020 00:41:43 +0200 Subject: [PATCH 0742/1971] clk: mmp2: Move thermal register defines up a bit A trivial change to keep the sorting sane. The APBC registers are happier when they are grouped together, instead of mixed with the APMU ones. Signed-off-by: Lubomir Rintel Link: https://lkml.kernel.org/r/20200519224151.2074597-6-lkundrak@v3.sk Signed-off-by: Stephen Boyd --- drivers/clk/mmp/clk-of-mmp2.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/clk/mmp/clk-of-mmp2.c b/drivers/clk/mmp/clk-of-mmp2.c index 52dc8b43acd9a..524574187c17a 100644 --- a/drivers/clk/mmp/clk-of-mmp2.c +++ b/drivers/clk/mmp/clk-of-mmp2.c @@ -45,6 +45,10 @@ #define APBC_SSP1 0x54 #define APBC_SSP2 0x58 #define APBC_SSP3 0x5c +#define APBC_THERMAL0 0x90 +#define APBC_THERMAL1 0x98 +#define APBC_THERMAL2 0x9c +#define APBC_THERMAL3 0xa0 #define APMU_SDH0 0x54 #define APMU_SDH1 0x58 #define APMU_SDH2 0xe8 @@ -55,10 +59,6 @@ #define APMU_DISP1 0x110 #define APMU_CCIC0 0x50 #define APMU_CCIC1 0xf4 -#define APBC_THERMAL0 0x90 -#define APBC_THERMAL1 0x98 -#define APBC_THERMAL2 0x9c -#define APBC_THERMAL3 0xa0 #define APMU_USBHSIC0 0xf8 #define APMU_USBHSIC1 0xfc #define APMU_GPU 0xcc -- GitLab From 2766c198150e33018e2e008c6a3355e8c19e6af4 Mon Sep 17 00:00:00 2001 From: Lubomir Rintel Date: Wed, 20 May 2020 00:41:44 +0200 Subject: [PATCH 0743/1971] clk: mmp2: Rename mmp2_pll_init() to mmp2_main_clk_init() This is a trivial rename for a routine that registers more clock sources than the PLLs -- there's also a XO. Signed-off-by: Lubomir Rintel Link: https://lkml.kernel.org/r/20200519224151.2074597-7-lkundrak@v3.sk Signed-off-by: Stephen Boyd --- drivers/clk/mmp/clk-of-mmp2.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/clk/mmp/clk-of-mmp2.c b/drivers/clk/mmp/clk-of-mmp2.c index 524574187c17a..ac88ea99b7c68 100644 --- a/drivers/clk/mmp/clk-of-mmp2.c +++ b/drivers/clk/mmp/clk-of-mmp2.c @@ -139,7 +139,7 @@ static struct mmp_clk_factor_tbl uart_factor_tbl[] = { {.num = 3521, .den = 689}, /*19.23MHZ */ }; -static void mmp2_pll_init(struct mmp2_clk_unit *pxa_unit) +static void mmp2_main_clk_init(struct mmp2_clk_unit *pxa_unit) { struct clk *clk; struct mmp_clk_unit *unit = &pxa_unit->unit; @@ -456,7 +456,7 @@ static void __init mmp2_clk_init(struct device_node *np) mmp_clk_init(np, &pxa_unit->unit, MMP2_NR_CLKS); - mmp2_pll_init(pxa_unit); + mmp2_main_clk_init(pxa_unit); mmp2_apb_periph_clk_init(pxa_unit); -- GitLab From 71d8254af9d1c84d88523a28e6ab03878612e4a5 Mon Sep 17 00:00:00 2001 From: Lubomir Rintel Date: Wed, 20 May 2020 00:41:45 +0200 Subject: [PATCH 0744/1971] clk: mmp2: Add the I2S clocks A pair of fractional clock sources for PLLs and gates. Signed-off-by: Lubomir Rintel Link: https://lkml.kernel.org/r/20200519224151.2074597-8-lkundrak@v3.sk Signed-off-by: Stephen Boyd --- drivers/clk/mmp/clk-of-mmp2.c | 46 +++++++++++++++++++++++++++++++++++ 1 file changed, 46 insertions(+) diff --git a/drivers/clk/mmp/clk-of-mmp2.c b/drivers/clk/mmp/clk-of-mmp2.c index ac88ea99b7c68..dcdff06a698ac 100644 --- a/drivers/clk/mmp/clk-of-mmp2.c +++ b/drivers/clk/mmp/clk-of-mmp2.c @@ -67,6 +67,9 @@ #define MPMU_POSR 0x10 #define MPMU_UART_PLL 0x14 #define MPMU_PLL2_CR 0x34 +#define MPMU_I2S0_PLL 0x40 +#define MPMU_I2S1_PLL 0x44 +#define MPMU_ACGR 0x1024 /* MMP3 specific below */ #define MPMU_PLL3_CR 0x50 #define MPMU_PLL3_CTRL1 0x58 @@ -91,6 +94,7 @@ static struct mmp_param_fixed_rate_clk fixed_rate_clks[] = { {MMP2_CLK_CLK32, "clk32", NULL, 0, 32768}, {MMP2_CLK_VCTCXO, "vctcxo", NULL, 0, 26000000}, {MMP2_CLK_USB_PLL, "usb_pll", NULL, 0, 480000000}, + {0, "i2s_pll", NULL, 0, 99666667}, }; static struct mmp_param_pll_clk pll_clks[] = { @@ -139,6 +143,34 @@ static struct mmp_clk_factor_tbl uart_factor_tbl[] = { {.num = 3521, .den = 689}, /*19.23MHZ */ }; +static struct mmp_clk_factor_masks i2s_factor_masks = { + .factor = 2, + .num_mask = 0x7fff, + .den_mask = 0x1fff, + .num_shift = 0, + .den_shift = 15, + .enable_mask = 0xd0000000, +}; + +static struct mmp_clk_factor_tbl i2s_factor_tbl[] = { + {.num = 24868, .den = 511}, /* 2.0480 MHz */ + {.num = 28003, .den = 793}, /* 2.8224 MHz */ + {.num = 24941, .den = 1025}, /* 4.0960 MHz */ + {.num = 28003, .den = 1586}, /* 5.6448 MHz */ + {.num = 31158, .den = 2561}, /* 8.1920 MHz */ + {.num = 16288, .den = 1845}, /* 11.2896 MHz */ + {.num = 20772, .den = 2561}, /* 12.2880 MHz */ + {.num = 8144, .den = 1845}, /* 22.5792 MHz */ + {.num = 10386, .den = 2561}, /* 24.5760 MHz */ +}; + +static DEFINE_SPINLOCK(acgr_lock); + +static struct mmp_param_gate_clk mpmu_gate_clks[] = { + {MMP2_CLK_I2S0, "i2s0_clk", "i2s0_pll", CLK_SET_RATE_PARENT, MPMU_ACGR, 0x200000, 0x200000, 0x0, 0, &acgr_lock}, + {MMP2_CLK_I2S1, "i2s1_clk", "i2s1_pll", CLK_SET_RATE_PARENT, MPMU_ACGR, 0x100000, 0x100000, 0x0, 0, &acgr_lock}, +}; + static void mmp2_main_clk_init(struct mmp2_clk_unit *pxa_unit) { struct clk *clk; @@ -166,6 +198,20 @@ static void mmp2_main_clk_init(struct mmp2_clk_unit *pxa_unit) &uart_factor_masks, uart_factor_tbl, ARRAY_SIZE(uart_factor_tbl), NULL); mmp_clk_add(unit, MMP2_CLK_UART_PLL, clk); + + mmp_clk_register_factor("i2s0_pll", "pll1_4", + CLK_SET_RATE_PARENT, + pxa_unit->mpmu_base + MPMU_I2S0_PLL, + &i2s_factor_masks, i2s_factor_tbl, + ARRAY_SIZE(i2s_factor_tbl), NULL); + mmp_clk_register_factor("i2s1_pll", "pll1_4", + CLK_SET_RATE_PARENT, + pxa_unit->mpmu_base + MPMU_I2S1_PLL, + &i2s_factor_masks, i2s_factor_tbl, + ARRAY_SIZE(i2s_factor_tbl), NULL); + + mmp_register_gate_clks(unit, mpmu_gate_clks, pxa_unit->mpmu_base, + ARRAY_SIZE(mpmu_gate_clks)); } static DEFINE_SPINLOCK(uart0_lock); -- GitLab From 232a3134353bbdc7e6a777f408b18488607bcf20 Mon Sep 17 00:00:00 2001 From: Lubomir Rintel Date: Wed, 20 May 2020 00:41:46 +0200 Subject: [PATCH 0745/1971] clk: mmp2: Add the audio clock This clocks the Audio block. Signed-off-by: Lubomir Rintel Link: https://lkml.kernel.org/r/20200519224151.2074597-9-lkundrak@v3.sk Signed-off-by: Stephen Boyd --- drivers/clk/mmp/clk-of-mmp2.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/drivers/clk/mmp/clk-of-mmp2.c b/drivers/clk/mmp/clk-of-mmp2.c index dcdff06a698ac..c686c16fca82b 100644 --- a/drivers/clk/mmp/clk-of-mmp2.c +++ b/drivers/clk/mmp/clk-of-mmp2.c @@ -62,6 +62,7 @@ #define APMU_USBHSIC0 0xf8 #define APMU_USBHSIC1 0xfc #define APMU_GPU 0xcc +#define APMU_AUDIO 0x10c #define MPMU_FCCR 0x8 #define MPMU_POSR 0x10 @@ -317,6 +318,8 @@ static u32 mmp2_gpu_bus_parent_table[] = { 0x0000, 0x0020, 0x0030, static const char * const mmp3_gpu_bus_parent_names[] = {"pll1_4", "pll1_6", "pll1_2", "pll2_2"}; static const char * const mmp3_gpu_gc_parent_names[] = {"pll1", "pll2", "pll1_p", "pll2_p"}; +static DEFINE_SPINLOCK(audio_lock); + static struct mmp_clk_mix_config ccic0_mix_config = { .reg_info = DEFINE_MIX_REG_INFO(4, 17, 2, 6, 32), }; @@ -372,6 +375,7 @@ static struct mmp_param_gate_clk apmu_gate_clks[] = { {MMP2_CLK_CCIC1_PHY, "ccic1_phy_clk", "ccic1_mix_clk", CLK_SET_RATE_PARENT, APMU_CCIC1, 0x24, 0x24, 0x0, 0, &ccic1_lock}, {MMP2_CLK_CCIC1_SPHY, "ccic1_sphy_clk", "ccic1_sphy_div", CLK_SET_RATE_PARENT, APMU_CCIC1, 0x300, 0x300, 0x0, 0, &ccic1_lock}, {MMP2_CLK_GPU_BUS, "gpu_bus_clk", "gpu_bus_mux", CLK_SET_RATE_PARENT, APMU_GPU, 0xa, 0xa, 0x0, MMP_CLK_GATE_NEED_DELAY, &gpu_lock}, + {MMP2_CLK_AUDIO, "audio_clk", "audio_mix_clk", CLK_SET_RATE_PARENT, APMU_AUDIO, 0x12, 0x12, 0x0, 0, &audio_lock}, }; static struct mmp_param_gate_clk mmp2_apmu_gate_clks[] = { -- GitLab From ec6bbddef634224f1e6cdd8a225d7f87d53cb1b1 Mon Sep 17 00:00:00 2001 From: Lubomir Rintel Date: Wed, 20 May 2020 00:41:47 +0200 Subject: [PATCH 0746/1971] dt-bindings: clock: Make marvell,mmp2-clock a power controller This is a binding for the MMP2 power management units. As such apart from providing the clocks, they also manage the power islands. Signed-off-by: Lubomir Rintel Link: https://lkml.kernel.org/r/20200519224151.2074597-10-lkundrak@v3.sk Acked-by: Rob Herring Signed-off-by: Stephen Boyd --- .../devicetree/bindings/clock/marvell,mmp2-clock.yaml | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/Documentation/devicetree/bindings/clock/marvell,mmp2-clock.yaml b/Documentation/devicetree/bindings/clock/marvell,mmp2-clock.yaml index e2b6ac96bbcb0..d68f0d196e7d3 100644 --- a/Documentation/devicetree/bindings/clock/marvell,mmp2-clock.yaml +++ b/Documentation/devicetree/bindings/clock/marvell,mmp2-clock.yaml @@ -42,12 +42,16 @@ properties: '#reset-cells': const: 1 + '#power-domain-cells': + const: 1 + required: - compatible - reg - reg-names - '#clock-cells' - '#reset-cells' + - '#power-domain-cells' additionalProperties: false @@ -61,4 +65,5 @@ examples: reg-names = "mpmu", "apmu", "apbc"; #clock-cells = <1>; #reset-cells = <1>; + #power-domain-cells = <1>; }; -- GitLab From 17d43046fd4c939448576480f02423686524adb0 Mon Sep 17 00:00:00 2001 From: Lubomir Rintel Date: Wed, 20 May 2020 00:41:48 +0200 Subject: [PATCH 0747/1971] dt-bindings: marvell,mmp2: Add ids for the power domains On MMP2 the audio and GPU blocks are on separate power islands. On MMP3 the camera block's power is also controlled separately. Add the numbers that we could use to refer to the power domains for respective power islands from the device tree. Signed-off-by: Lubomir Rintel Acked-by: Rob Herring Link: https://lkml.kernel.org/r/20200519224151.2074597-11-lkundrak@v3.sk Signed-off-by: Stephen Boyd --- include/dt-bindings/power/marvell,mmp2.h | 11 +++++++++++ 1 file changed, 11 insertions(+) create mode 100644 include/dt-bindings/power/marvell,mmp2.h diff --git a/include/dt-bindings/power/marvell,mmp2.h b/include/dt-bindings/power/marvell,mmp2.h new file mode 100644 index 0000000000000..c53d2b3e10574 --- /dev/null +++ b/include/dt-bindings/power/marvell,mmp2.h @@ -0,0 +1,11 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __DTS_MARVELL_MMP2_POWER_H +#define __DTS_MARVELL_MMP2_POWER_H + +#define MMP2_POWER_DOMAIN_GPU 0 +#define MMP2_POWER_DOMAIN_AUDIO 1 +#define MMP3_POWER_DOMAIN_CAMERA 2 + +#define MMP2_NR_POWER_DOMAINS 3 + +#endif -- GitLab From ee4df2363439c80bef693a2255ede06f5bc42ce6 Mon Sep 17 00:00:00 2001 From: Lubomir Rintel Date: Wed, 20 May 2020 00:41:49 +0200 Subject: [PATCH 0748/1971] clk: mmp2: Add support for power islands Apart from the clocks and resets, the PMU hardware also controls power to peripherals that are on separate power islands. On MMP2, that's the GC860 GPU and the SSPA audio interface, while on MMP3 also the camera interface is on a separate island, along with the pair of GC2000 and GC300 GPUs and the SSPA. Signed-off-by: Lubomir Rintel Link: https://lkml.kernel.org/r/20200519224151.2074597-12-lkundrak@v3.sk Signed-off-by: Stephen Boyd --- arch/arm/mach-mmp/Kconfig | 2 + drivers/clk/mmp/Makefile | 2 +- drivers/clk/mmp/clk-of-mmp2.c | 42 +++++++++++++ drivers/clk/mmp/clk.h | 10 +++ drivers/clk/mmp/pwr-island.c | 115 ++++++++++++++++++++++++++++++++++ 5 files changed, 170 insertions(+), 1 deletion(-) create mode 100644 drivers/clk/mmp/pwr-island.c diff --git a/arch/arm/mach-mmp/Kconfig b/arch/arm/mach-mmp/Kconfig index b58a03b18bdef..8a1519e6be6f9 100644 --- a/arch/arm/mach-mmp/Kconfig +++ b/arch/arm/mach-mmp/Kconfig @@ -125,6 +125,8 @@ config MACH_MMP2_DT select PINCTRL_SINGLE select ARCH_HAS_RESET_CONTROLLER select CPU_PJ4 + select PM_GENERIC_DOMAINS if PM + select PM_GENERIC_DOMAINS_OF if PM && OF help Include support for Marvell MMP2 based platforms using the device tree. diff --git a/drivers/clk/mmp/Makefile b/drivers/clk/mmp/Makefile index 14dc8a8a9d087..f9fab883a13b2 100644 --- a/drivers/clk/mmp/Makefile +++ b/drivers/clk/mmp/Makefile @@ -8,7 +8,7 @@ obj-y += clk-apbc.o clk-apmu.o clk-frac.o clk-mix.o clk-gate.o clk.o obj-$(CONFIG_RESET_CONTROLLER) += reset.o obj-$(CONFIG_MACH_MMP_DT) += clk-of-pxa168.o clk-of-pxa910.o -obj-$(CONFIG_COMMON_CLK_MMP2) += clk-of-mmp2.o clk-pll.o +obj-$(CONFIG_COMMON_CLK_MMP2) += clk-of-mmp2.o clk-pll.o pwr-island.o obj-$(CONFIG_CPU_PXA168) += clk-pxa168.o obj-$(CONFIG_CPU_PXA910) += clk-pxa910.o diff --git a/drivers/clk/mmp/clk-of-mmp2.c b/drivers/clk/mmp/clk-of-mmp2.c index c686c16fca82b..67208aea94c5c 100644 --- a/drivers/clk/mmp/clk-of-mmp2.c +++ b/drivers/clk/mmp/clk-of-mmp2.c @@ -17,8 +17,10 @@ #include #include #include +#include #include +#include #include "clk.h" #include "reset.h" @@ -63,6 +65,7 @@ #define APMU_USBHSIC1 0xfc #define APMU_GPU 0xcc #define APMU_AUDIO 0x10c +#define APMU_CAMERA 0x1fc #define MPMU_FCCR 0x8 #define MPMU_POSR 0x10 @@ -86,6 +89,8 @@ enum mmp2_clk_model { struct mmp2_clk_unit { struct mmp_clk_unit unit; enum mmp2_clk_model model; + struct genpd_onecell_data pd_data; + struct generic_pm_domain *pm_domains[MMP2_NR_POWER_DOMAINS]; void __iomem *mpmu_base; void __iomem *apmu_base; void __iomem *apbc_base; @@ -473,6 +478,41 @@ static void mmp2_clk_reset_init(struct device_node *np, mmp_clk_reset_register(np, cells, nr_resets); } +static void mmp2_pm_domain_init(struct device_node *np, + struct mmp2_clk_unit *pxa_unit) +{ + if (pxa_unit->model == CLK_MODEL_MMP3) { + pxa_unit->pm_domains[MMP2_POWER_DOMAIN_GPU] + = mmp_pm_domain_register("gpu", + pxa_unit->apmu_base + APMU_GPU, + 0x0600, 0x40003, 0x18000c, 0, &gpu_lock); + } else { + pxa_unit->pm_domains[MMP2_POWER_DOMAIN_GPU] + = mmp_pm_domain_register("gpu", + pxa_unit->apmu_base + APMU_GPU, + 0x8600, 0x00003, 0x00000c, + MMP_PM_DOMAIN_NO_DISABLE, &gpu_lock); + } + pxa_unit->pd_data.num_domains++; + + pxa_unit->pm_domains[MMP2_POWER_DOMAIN_AUDIO] + = mmp_pm_domain_register("audio", + pxa_unit->apmu_base + APMU_AUDIO, + 0x600, 0x2, 0, 0, &audio_lock); + pxa_unit->pd_data.num_domains++; + + if (pxa_unit->model == CLK_MODEL_MMP3) { + pxa_unit->pm_domains[MMP3_POWER_DOMAIN_CAMERA] + = mmp_pm_domain_register("camera", + pxa_unit->apmu_base + APMU_CAMERA, + 0x600, 0, 0, 0, NULL); + pxa_unit->pd_data.num_domains++; + } + + pxa_unit->pd_data.domains = pxa_unit->pm_domains; + of_genpd_add_provider_onecell(np, &pxa_unit->pd_data); +} + static void __init mmp2_clk_init(struct device_node *np) { struct mmp2_clk_unit *pxa_unit; @@ -504,6 +544,8 @@ static void __init mmp2_clk_init(struct device_node *np) goto unmap_apmu_region; } + mmp2_pm_domain_init(np, pxa_unit); + mmp_clk_init(np, &pxa_unit->unit, MMP2_NR_CLKS); mmp2_main_clk_init(pxa_unit); diff --git a/drivers/clk/mmp/clk.h b/drivers/clk/mmp/clk.h index 0efd5b0b2f01b..bfa2adc24a7cc 100644 --- a/drivers/clk/mmp/clk.h +++ b/drivers/clk/mmp/clk.h @@ -3,6 +3,7 @@ #define __MACH_MMP_CLK_H #include +#include #include #define APBC_NO_BUS_CTRL BIT(0) @@ -259,4 +260,13 @@ void mmp_clk_init(struct device_node *np, struct mmp_clk_unit *unit, int nr_clks); void mmp_clk_add(struct mmp_clk_unit *unit, unsigned int id, struct clk *clk); + +/* Power islands */ +#define MMP_PM_DOMAIN_NO_DISABLE BIT(0) + +struct generic_pm_domain *mmp_pm_domain_register(const char *name, + void __iomem *reg, + u32 power_on, u32 reset, u32 clock_enable, + unsigned int flags, spinlock_t *lock); + #endif diff --git a/drivers/clk/mmp/pwr-island.c b/drivers/clk/mmp/pwr-island.c new file mode 100644 index 0000000000000..ab57c0e995c1d --- /dev/null +++ b/drivers/clk/mmp/pwr-island.c @@ -0,0 +1,115 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * MMP PMU power island support + * + * Copyright (C) 2020 Lubomir Rintel + */ + +#include +#include +#include + +#include "clk.h" + +#define to_mmp_pm_domain(genpd) container_of(genpd, struct mmp_pm_domain, genpd) + +struct mmp_pm_domain { + struct generic_pm_domain genpd; + void __iomem *reg; + spinlock_t *lock; + u32 power_on; + u32 reset; + u32 clock_enable; + unsigned int flags; +}; + +static int mmp_pm_domain_power_on(struct generic_pm_domain *genpd) +{ + struct mmp_pm_domain *pm_domain = to_mmp_pm_domain(genpd); + unsigned long flags = 0; + u32 val; + + if (pm_domain->lock) + spin_lock_irqsave(pm_domain->lock, flags); + + val = readl(pm_domain->reg); + + /* Turn on the power island */ + val |= pm_domain->power_on; + writel(val, pm_domain->reg); + + /* Disable isolation */ + val |= 0x100; + writel(val, pm_domain->reg); + + /* Some blocks need to be reset after a power up */ + if (pm_domain->reset || pm_domain->clock_enable) { + u32 after_power_on = val; + + val &= ~pm_domain->reset; + writel(val, pm_domain->reg); + + val |= pm_domain->clock_enable; + writel(val, pm_domain->reg); + + val |= pm_domain->reset; + writel(val, pm_domain->reg); + + writel(after_power_on, pm_domain->reg); + } + + if (pm_domain->lock) + spin_unlock_irqrestore(pm_domain->lock, flags); + + return 0; +} + +static int mmp_pm_domain_power_off(struct generic_pm_domain *genpd) +{ + struct mmp_pm_domain *pm_domain = to_mmp_pm_domain(genpd); + unsigned long flags = 0; + u32 val; + + if (pm_domain->flags & MMP_PM_DOMAIN_NO_DISABLE) + return 0; + + if (pm_domain->lock) + spin_lock_irqsave(pm_domain->lock, flags); + + /* Turn off and isolate the the power island. */ + val = readl(pm_domain->reg); + val &= ~pm_domain->power_on; + val &= ~0x100; + writel(val, pm_domain->reg); + + if (pm_domain->lock) + spin_unlock_irqrestore(pm_domain->lock, flags); + + return 0; +} + +struct generic_pm_domain *mmp_pm_domain_register(const char *name, + void __iomem *reg, + u32 power_on, u32 reset, u32 clock_enable, + unsigned int flags, spinlock_t *lock) +{ + struct mmp_pm_domain *pm_domain; + + pm_domain = kzalloc(sizeof(*pm_domain), GFP_KERNEL); + if (!pm_domain) + return ERR_PTR(-ENOMEM); + + pm_domain->reg = reg; + pm_domain->power_on = power_on; + pm_domain->reset = reset; + pm_domain->clock_enable = clock_enable; + pm_domain->flags = flags; + pm_domain->lock = lock; + + pm_genpd_init(&pm_domain->genpd, NULL, true); + pm_domain->genpd.name = name; + pm_domain->genpd.power_on = mmp_pm_domain_power_on; + pm_domain->genpd.power_off = mmp_pm_domain_power_off; + + return &pm_domain->genpd; +} -- GitLab From e787c5b72517428cd1159637a454c64c3b0c90fb Mon Sep 17 00:00:00 2001 From: Lubomir Rintel Date: Wed, 20 May 2020 00:41:50 +0200 Subject: [PATCH 0749/1971] dt-bindings: clock: Add Marvell MMP Audio Clock Controller binding This describes the bindings for a controller that generates master and bit clocks for the I2S interface. Signed-off-by: Lubomir Rintel Link: https://lkml.kernel.org/r/20200519224151.2074597-13-lkundrak@v3.sk Reviewed-by: Rob Herring Signed-off-by: Stephen Boyd --- .../clock/marvell,mmp2-audio-clock.yaml | 74 +++++++++++++++++++ .../dt-bindings/clock/marvell,mmp2-audio.h | 10 +++ 2 files changed, 84 insertions(+) create mode 100644 Documentation/devicetree/bindings/clock/marvell,mmp2-audio-clock.yaml create mode 100644 include/dt-bindings/clock/marvell,mmp2-audio.h diff --git a/Documentation/devicetree/bindings/clock/marvell,mmp2-audio-clock.yaml b/Documentation/devicetree/bindings/clock/marvell,mmp2-audio-clock.yaml new file mode 100644 index 0000000000000..ab6e82d1d3a9e --- /dev/null +++ b/Documentation/devicetree/bindings/clock/marvell,mmp2-audio-clock.yaml @@ -0,0 +1,74 @@ +# SPDX-License-Identifier: (GPL-2.0+ OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/clock/marvell,mmp2-audio-clock.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Marvell MMP2 Audio Clock Controller + +maintainers: + - Lubomir Rintel + +description: | + The audio clock controller generates and supplies the clocks to the audio + codec. + + Each clock is assigned an identifier and client nodes use this identifier + to specify the clock which they consume. + + All these identifiers could be found in + . + +properties: + compatible: + enum: + - marvell,mmp2-audio-clock + + reg: + maxItems: 1 + + clocks: + items: + - description: Audio subsystem clock + - description: The crystal oscillator clock + - description: First I2S clock + - description: Second I2S clock + + clock-names: + items: + - const: audio + - const: vctcxo + - const: i2s0 + - const: i2s1 + + '#clock-cells': + const: 1 + + power-domains: + maxItems: 1 + +required: + - compatible + - reg + - clocks + - clock-names + - '#clock-cells' + +additionalProperties: false + +examples: + - | + #include + #include + + clock-controller@d42a0c30 { + compatible = "marvell,mmp2-audio-clock"; + reg = <0xd42a0c30 0x10>; + clock-names = "audio", "vctcxo", "i2s0", "i2s1"; + clocks = <&soc_clocks MMP2_CLK_AUDIO>, + <&soc_clocks MMP2_CLK_VCTCXO>, + <&soc_clocks MMP2_CLK_I2S0>, + <&soc_clocks MMP2_CLK_I2S1>; + power-domains = <&soc_clocks MMP2_POWER_DOMAIN_AUDIO>; + #clock-cells = <1>; + }; diff --git a/include/dt-bindings/clock/marvell,mmp2-audio.h b/include/dt-bindings/clock/marvell,mmp2-audio.h new file mode 100644 index 0000000000000..20664776f4973 --- /dev/null +++ b/include/dt-bindings/clock/marvell,mmp2-audio.h @@ -0,0 +1,10 @@ +/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-2-Clause) */ +#ifndef __DT_BINDINGS_CLOCK_MARVELL_MMP2_AUDIO_H +#define __DT_BINDINGS_CLOCK_MARVELL_MMP2_AUDIO_H + +#define MMP2_CLK_AUDIO_SYSCLK 0 +#define MMP2_CLK_AUDIO_SSPA0 1 +#define MMP2_CLK_AUDIO_SSPA1 2 + +#define MMP2_CLK_AUDIO_NR_CLKS 3 +#endif -- GitLab From 725262d29139cc8dd0c7dddbbd097c02361d0e5e Mon Sep 17 00:00:00 2001 From: Lubomir Rintel Date: Wed, 20 May 2020 00:41:51 +0200 Subject: [PATCH 0750/1971] clk: mmp2: Add audio clock controller driver This is a driver for a block that generates master and bit clocks for the I2S interface. It's separate from the PMUs that generate clocks for the peripherals. Signed-off-by: Lubomir Rintel Link: https://lkml.kernel.org/r/20200519224151.2074597-14-lkundrak@v3.sk Signed-off-by: Stephen Boyd --- drivers/clk/Kconfig | 6 + drivers/clk/mmp/Makefile | 1 + drivers/clk/mmp/clk-audio.c | 443 ++++++++++++++++++++++++++++++++++++ 3 files changed, 450 insertions(+) create mode 100644 drivers/clk/mmp/clk-audio.c diff --git a/drivers/clk/Kconfig b/drivers/clk/Kconfig index bcb257baed06d..a28cf98ffe683 100644 --- a/drivers/clk/Kconfig +++ b/drivers/clk/Kconfig @@ -326,6 +326,12 @@ config COMMON_CLK_MMP2 help Support for Marvell MMP2 and MMP3 SoC clocks +config COMMON_CLK_MMP2_AUDIO + tristate "Clock driver for MMP2 Audio subsystem" + depends on COMMON_CLK_MMP2 || COMPILE_TEST + help + This driver supports clocks for Audio subsystem on MMP2 SoC. + config COMMON_CLK_BD718XX tristate "Clock driver for 32K clk gates on ROHM PMICs" depends on MFD_ROHM_BD718XX || MFD_ROHM_BD70528 || MFD_ROHM_BD71828 diff --git a/drivers/clk/mmp/Makefile b/drivers/clk/mmp/Makefile index f9fab883a13b2..cbcc2f8430a2b 100644 --- a/drivers/clk/mmp/Makefile +++ b/drivers/clk/mmp/Makefile @@ -9,6 +9,7 @@ obj-$(CONFIG_RESET_CONTROLLER) += reset.o obj-$(CONFIG_MACH_MMP_DT) += clk-of-pxa168.o clk-of-pxa910.o obj-$(CONFIG_COMMON_CLK_MMP2) += clk-of-mmp2.o clk-pll.o pwr-island.o +obj-$(CONFIG_COMMON_CLK_MMP2_AUDIO) += clk-audio.o obj-$(CONFIG_CPU_PXA168) += clk-pxa168.o obj-$(CONFIG_CPU_PXA910) += clk-pxa910.o diff --git a/drivers/clk/mmp/clk-audio.c b/drivers/clk/mmp/clk-audio.c new file mode 100644 index 0000000000000..eea69d498bd27 --- /dev/null +++ b/drivers/clk/mmp/clk-audio.c @@ -0,0 +1,443 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * MMP Audio Clock Controller driver + * + * Copyright (C) 2020 Lubomir Rintel + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +/* Audio Controller Registers */ +#define SSPA_AUD_CTRL 0x04 +#define SSPA_AUD_PLL_CTRL0 0x08 +#define SSPA_AUD_PLL_CTRL1 0x0c + +/* SSPA Audio Control Register */ +#define SSPA_AUD_CTRL_SYSCLK_SHIFT 0 +#define SSPA_AUD_CTRL_SYSCLK_DIV_SHIFT 1 +#define SSPA_AUD_CTRL_SSPA0_MUX_SHIFT 7 +#define SSPA_AUD_CTRL_SSPA0_SHIFT 8 +#define SSPA_AUD_CTRL_SSPA0_DIV_SHIFT 9 +#define SSPA_AUD_CTRL_SSPA1_SHIFT 16 +#define SSPA_AUD_CTRL_SSPA1_DIV_SHIFT 17 +#define SSPA_AUD_CTRL_SSPA1_MUX_SHIFT 23 +#define SSPA_AUD_CTRL_DIV_MASK 0x7e + +/* SSPA Audio PLL Control 0 Register */ +#define SSPA_AUD_PLL_CTRL0_DIV_OCLK_MODULO_MASK (0x7 << 28) +#define SSPA_AUD_PLL_CTRL0_DIV_OCLK_MODULO(x) ((x) << 28) +#define SSPA_AUD_PLL_CTRL0_FRACT_MASK (0xfffff << 8) +#define SSPA_AUD_PLL_CTRL0_FRACT(x) ((x) << 8) +#define SSPA_AUD_PLL_CTRL0_ENA_DITHER (1 << 7) +#define SSPA_AUD_PLL_CTRL0_ICP_2UA (0 << 5) +#define SSPA_AUD_PLL_CTRL0_ICP_5UA (1 << 5) +#define SSPA_AUD_PLL_CTRL0_ICP_7UA (2 << 5) +#define SSPA_AUD_PLL_CTRL0_ICP_10UA (3 << 5) +#define SSPA_AUD_PLL_CTRL0_DIV_FBCCLK_MASK (0x3 << 3) +#define SSPA_AUD_PLL_CTRL0_DIV_FBCCLK(x) ((x) << 3) +#define SSPA_AUD_PLL_CTRL0_DIV_MCLK_MASK (0x1 << 2) +#define SSPA_AUD_PLL_CTRL0_DIV_MCLK(x) ((x) << 2) +#define SSPA_AUD_PLL_CTRL0_PD_OVPROT_DIS (1 << 1) +#define SSPA_AUD_PLL_CTRL0_PU (1 << 0) + +/* SSPA Audio PLL Control 1 Register */ +#define SSPA_AUD_PLL_CTRL1_SEL_FAST_CLK (1 << 24) +#define SSPA_AUD_PLL_CTRL1_CLK_SEL_MASK (1 << 11) +#define SSPA_AUD_PLL_CTRL1_CLK_SEL_AUDIO_PLL (1 << 11) +#define SSPA_AUD_PLL_CTRL1_CLK_SEL_VCXO (0 << 11) +#define SSPA_AUD_PLL_CTRL1_DIV_OCLK_PATTERN_MASK (0x7ff << 0) +#define SSPA_AUD_PLL_CTRL1_DIV_OCLK_PATTERN(x) ((x) << 0) + +struct mmp2_audio_clk { + void __iomem *mmio_base; + + struct clk_hw audio_pll_hw; + struct clk_mux sspa_mux; + struct clk_mux sspa1_mux; + struct clk_divider sysclk_div; + struct clk_divider sspa0_div; + struct clk_divider sspa1_div; + struct clk_gate sysclk_gate; + struct clk_gate sspa0_gate; + struct clk_gate sspa1_gate; + + u32 aud_ctrl; + u32 aud_pll_ctrl0; + u32 aud_pll_ctrl1; + + spinlock_t lock; + + /* Must be last */ + struct clk_hw_onecell_data clk_data; +}; + +static const struct { + unsigned long parent_rate; + unsigned long freq_vco; + unsigned char mclk; + unsigned char fbcclk; + unsigned short fract; +} predivs[] = { + { 26000000, 135475200, 0, 0, 0x8a18 }, + { 26000000, 147456000, 0, 1, 0x0da1 }, + { 38400000, 135475200, 1, 2, 0x8208 }, + { 38400000, 147456000, 1, 3, 0xaaaa }, +}; + +static const struct { + unsigned char divisor; + unsigned char modulo; + unsigned char pattern; +} postdivs[] = { + { 1, 3, 0, }, + { 2, 5, 0, }, + { 4, 0, 0, }, + { 6, 1, 1, }, + { 8, 1, 0, }, + { 9, 1, 2, }, + { 12, 2, 1, }, + { 16, 2, 0, }, + { 18, 2, 2, }, + { 24, 4, 1, }, + { 36, 4, 2, }, + { 48, 6, 1, }, + { 72, 6, 2, }, +}; + +static unsigned long audio_pll_recalc_rate(struct clk_hw *hw, + unsigned long parent_rate) +{ + struct mmp2_audio_clk *priv = container_of(hw, struct mmp2_audio_clk, audio_pll_hw); + unsigned int prediv; + unsigned int postdiv; + u32 aud_pll_ctrl0; + u32 aud_pll_ctrl1; + + aud_pll_ctrl0 = readl(priv->mmio_base + SSPA_AUD_PLL_CTRL0); + aud_pll_ctrl0 &= SSPA_AUD_PLL_CTRL0_DIV_OCLK_MODULO_MASK | + SSPA_AUD_PLL_CTRL0_FRACT_MASK | + SSPA_AUD_PLL_CTRL0_ENA_DITHER | + SSPA_AUD_PLL_CTRL0_DIV_FBCCLK_MASK | + SSPA_AUD_PLL_CTRL0_DIV_MCLK_MASK | + SSPA_AUD_PLL_CTRL0_PU; + + aud_pll_ctrl1 = readl(priv->mmio_base + SSPA_AUD_PLL_CTRL1); + aud_pll_ctrl1 &= SSPA_AUD_PLL_CTRL1_CLK_SEL_MASK | + SSPA_AUD_PLL_CTRL1_DIV_OCLK_PATTERN_MASK; + + for (prediv = 0; prediv < ARRAY_SIZE(predivs); prediv++) { + if (predivs[prediv].parent_rate != parent_rate) + continue; + for (postdiv = 0; postdiv < ARRAY_SIZE(postdivs); postdiv++) { + unsigned long freq; + u32 val; + + val = SSPA_AUD_PLL_CTRL0_ENA_DITHER; + val |= SSPA_AUD_PLL_CTRL0_PU; + val |= SSPA_AUD_PLL_CTRL0_DIV_OCLK_MODULO(postdivs[postdiv].modulo); + val |= SSPA_AUD_PLL_CTRL0_FRACT(predivs[prediv].fract); + val |= SSPA_AUD_PLL_CTRL0_DIV_FBCCLK(predivs[prediv].fbcclk); + val |= SSPA_AUD_PLL_CTRL0_DIV_MCLK(predivs[prediv].mclk); + if (val != aud_pll_ctrl0) + continue; + + val = SSPA_AUD_PLL_CTRL1_CLK_SEL_AUDIO_PLL; + val |= SSPA_AUD_PLL_CTRL1_DIV_OCLK_PATTERN(postdivs[postdiv].pattern); + if (val != aud_pll_ctrl1) + continue; + + freq = predivs[prediv].freq_vco; + freq /= postdivs[postdiv].divisor; + return freq; + } + } + + return 0; +} + +static long audio_pll_round_rate(struct clk_hw *hw, unsigned long rate, + unsigned long *parent_rate) +{ + unsigned int prediv; + unsigned int postdiv; + long rounded = 0; + + for (prediv = 0; prediv < ARRAY_SIZE(predivs); prediv++) { + if (predivs[prediv].parent_rate != *parent_rate) + continue; + for (postdiv = 0; postdiv < ARRAY_SIZE(postdivs); postdiv++) { + long freq = predivs[prediv].freq_vco; + + freq /= postdivs[postdiv].divisor; + if (freq == rate) + return rate; + if (freq < rate) + continue; + if (rounded && freq > rounded) + continue; + rounded = freq; + } + } + + return rounded; +} + +static int audio_pll_set_rate(struct clk_hw *hw, unsigned long rate, + unsigned long parent_rate) +{ + struct mmp2_audio_clk *priv = container_of(hw, struct mmp2_audio_clk, audio_pll_hw); + unsigned int prediv; + unsigned int postdiv; + unsigned long val; + + for (prediv = 0; prediv < ARRAY_SIZE(predivs); prediv++) { + if (predivs[prediv].parent_rate != parent_rate) + continue; + + for (postdiv = 0; postdiv < ARRAY_SIZE(postdivs); postdiv++) { + if (rate * postdivs[postdiv].divisor != predivs[prediv].freq_vco) + continue; + + val = SSPA_AUD_PLL_CTRL0_ENA_DITHER; + val |= SSPA_AUD_PLL_CTRL0_PU; + val |= SSPA_AUD_PLL_CTRL0_DIV_OCLK_MODULO(postdivs[postdiv].modulo); + val |= SSPA_AUD_PLL_CTRL0_FRACT(predivs[prediv].fract); + val |= SSPA_AUD_PLL_CTRL0_DIV_FBCCLK(predivs[prediv].fbcclk); + val |= SSPA_AUD_PLL_CTRL0_DIV_MCLK(predivs[prediv].mclk); + writel(val, priv->mmio_base + SSPA_AUD_PLL_CTRL0); + + val = SSPA_AUD_PLL_CTRL1_CLK_SEL_AUDIO_PLL; + val |= SSPA_AUD_PLL_CTRL1_DIV_OCLK_PATTERN(postdivs[postdiv].pattern); + writel(val, priv->mmio_base + SSPA_AUD_PLL_CTRL1); + + return 0; + } + } + + return -ERANGE; +} + +static const struct clk_ops audio_pll_ops = { + .recalc_rate = audio_pll_recalc_rate, + .round_rate = audio_pll_round_rate, + .set_rate = audio_pll_set_rate, +}; + +static int register_clocks(struct mmp2_audio_clk *priv, struct device *dev) +{ + const struct clk_parent_data sspa_mux_parents[] = { + { .hw = &priv->audio_pll_hw }, + { .fw_name = "i2s0" }, + }; + const struct clk_parent_data sspa1_mux_parents[] = { + { .hw = &priv->audio_pll_hw }, + { .fw_name = "i2s1" }, + }; + int ret; + + priv->audio_pll_hw.init = CLK_HW_INIT_FW_NAME("audio_pll", + "vctcxo", &audio_pll_ops, + CLK_SET_RATE_PARENT); + ret = devm_clk_hw_register(dev, &priv->audio_pll_hw); + if (ret) + return ret; + + priv->sspa_mux.hw.init = CLK_HW_INIT_PARENTS_DATA("sspa_mux", + sspa_mux_parents, &clk_mux_ops, + CLK_SET_RATE_PARENT); + priv->sspa_mux.reg = priv->mmio_base + SSPA_AUD_CTRL; + priv->sspa_mux.mask = 1; + priv->sspa_mux.shift = SSPA_AUD_CTRL_SSPA0_MUX_SHIFT; + ret = devm_clk_hw_register(dev, &priv->sspa_mux.hw); + if (ret) + return ret; + + priv->sysclk_div.hw.init = CLK_HW_INIT_HW("sys_div", + &priv->sspa_mux.hw, &clk_divider_ops, + CLK_SET_RATE_PARENT); + priv->sysclk_div.reg = priv->mmio_base + SSPA_AUD_CTRL; + priv->sysclk_div.shift = SSPA_AUD_CTRL_SYSCLK_DIV_SHIFT; + priv->sysclk_div.width = 6; + priv->sysclk_div.flags = CLK_DIVIDER_ONE_BASED; + priv->sysclk_div.flags |= CLK_DIVIDER_ROUND_CLOSEST; + priv->sysclk_div.flags |= CLK_DIVIDER_ALLOW_ZERO; + ret = devm_clk_hw_register(dev, &priv->sysclk_div.hw); + if (ret) + return ret; + + priv->sysclk_gate.hw.init = CLK_HW_INIT_HW("sys_clk", + &priv->sysclk_div.hw, &clk_gate_ops, + CLK_SET_RATE_PARENT); + priv->sysclk_gate.reg = priv->mmio_base + SSPA_AUD_CTRL; + priv->sysclk_gate.bit_idx = SSPA_AUD_CTRL_SYSCLK_SHIFT; + ret = devm_clk_hw_register(dev, &priv->sysclk_gate.hw); + if (ret) + return ret; + + priv->sspa0_div.hw.init = CLK_HW_INIT_HW("sspa0_div", + &priv->sspa_mux.hw, &clk_divider_ops, 0); + priv->sspa0_div.reg = priv->mmio_base + SSPA_AUD_CTRL; + priv->sspa0_div.shift = SSPA_AUD_CTRL_SSPA0_DIV_SHIFT; + priv->sspa0_div.width = 6; + priv->sspa0_div.flags = CLK_DIVIDER_ONE_BASED; + priv->sspa0_div.flags |= CLK_DIVIDER_ROUND_CLOSEST; + priv->sspa0_div.flags |= CLK_DIVIDER_ALLOW_ZERO; + ret = devm_clk_hw_register(dev, &priv->sspa0_div.hw); + if (ret) + return ret; + + priv->sspa0_gate.hw.init = CLK_HW_INIT_HW("sspa0_clk", + &priv->sspa0_div.hw, &clk_gate_ops, + CLK_SET_RATE_PARENT); + priv->sspa0_gate.reg = priv->mmio_base + SSPA_AUD_CTRL; + priv->sspa0_gate.bit_idx = SSPA_AUD_CTRL_SSPA0_SHIFT; + ret = devm_clk_hw_register(dev, &priv->sspa0_gate.hw); + if (ret) + return ret; + + priv->sspa1_mux.hw.init = CLK_HW_INIT_PARENTS_DATA("sspa1_mux", + sspa1_mux_parents, &clk_mux_ops, + CLK_SET_RATE_PARENT); + priv->sspa1_mux.reg = priv->mmio_base + SSPA_AUD_CTRL; + priv->sspa1_mux.mask = 1; + priv->sspa1_mux.shift = SSPA_AUD_CTRL_SSPA1_MUX_SHIFT; + ret = devm_clk_hw_register(dev, &priv->sspa1_mux.hw); + if (ret) + return ret; + + priv->sspa1_div.hw.init = CLK_HW_INIT_HW("sspa1_div", + &priv->sspa1_mux.hw, &clk_divider_ops, 0); + priv->sspa1_div.reg = priv->mmio_base + SSPA_AUD_CTRL; + priv->sspa1_div.shift = SSPA_AUD_CTRL_SSPA1_DIV_SHIFT; + priv->sspa1_div.width = 6; + priv->sspa1_div.flags = CLK_DIVIDER_ONE_BASED; + priv->sspa1_div.flags |= CLK_DIVIDER_ROUND_CLOSEST; + priv->sspa1_div.flags |= CLK_DIVIDER_ALLOW_ZERO; + ret = devm_clk_hw_register(dev, &priv->sspa1_div.hw); + if (ret) + return ret; + + priv->sspa1_gate.hw.init = CLK_HW_INIT_HW("sspa1_clk", + &priv->sspa1_div.hw, &clk_gate_ops, + CLK_SET_RATE_PARENT); + priv->sspa1_gate.reg = priv->mmio_base + SSPA_AUD_CTRL; + priv->sspa1_gate.bit_idx = SSPA_AUD_CTRL_SSPA1_SHIFT; + ret = devm_clk_hw_register(dev, &priv->sspa1_gate.hw); + if (ret) + return ret; + + priv->clk_data.hws[MMP2_CLK_AUDIO_SYSCLK] = &priv->sysclk_gate.hw; + priv->clk_data.hws[MMP2_CLK_AUDIO_SSPA0] = &priv->sspa0_gate.hw; + priv->clk_data.hws[MMP2_CLK_AUDIO_SSPA1] = &priv->sspa1_gate.hw; + priv->clk_data.num = MMP2_CLK_AUDIO_NR_CLKS; + + return of_clk_add_hw_provider(dev->of_node, of_clk_hw_onecell_get, + &priv->clk_data); +} + +static int mmp2_audio_clk_probe(struct platform_device *pdev) +{ + struct mmp2_audio_clk *priv; + int ret; + + priv = devm_kzalloc(&pdev->dev, + struct_size(priv, clk_data.hws, + MMP2_CLK_AUDIO_NR_CLKS), + GFP_KERNEL); + if (!priv) + return -ENOMEM; + + spin_lock_init(&priv->lock); + platform_set_drvdata(pdev, priv); + + priv->mmio_base = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(priv->mmio_base)) + return PTR_ERR(priv->mmio_base); + + pm_runtime_enable(&pdev->dev); + ret = pm_clk_create(&pdev->dev); + if (ret) + goto disable_pm_runtime; + + ret = pm_clk_add(&pdev->dev, "audio"); + if (ret) + goto destroy_pm_clk; + + ret = register_clocks(priv, &pdev->dev); + if (ret) + goto destroy_pm_clk; + + return 0; + +destroy_pm_clk: + pm_clk_destroy(&pdev->dev); +disable_pm_runtime: + pm_runtime_disable(&pdev->dev); + + return ret; +} + +static int mmp2_audio_clk_remove(struct platform_device *pdev) +{ + pm_clk_destroy(&pdev->dev); + pm_runtime_disable(&pdev->dev); + + return 0; +} + +static int __maybe_unused mmp2_audio_clk_suspend(struct device *dev) +{ + struct mmp2_audio_clk *priv = dev_get_drvdata(dev); + + priv->aud_ctrl = readl(priv->mmio_base + SSPA_AUD_CTRL); + priv->aud_pll_ctrl0 = readl(priv->mmio_base + SSPA_AUD_PLL_CTRL0); + priv->aud_pll_ctrl1 = readl(priv->mmio_base + SSPA_AUD_PLL_CTRL1); + pm_clk_suspend(dev); + + return 0; +} + +static int __maybe_unused mmp2_audio_clk_resume(struct device *dev) +{ + struct mmp2_audio_clk *priv = dev_get_drvdata(dev); + + pm_clk_resume(dev); + writel(priv->aud_ctrl, priv->mmio_base + SSPA_AUD_CTRL); + writel(priv->aud_pll_ctrl0, priv->mmio_base + SSPA_AUD_PLL_CTRL0); + writel(priv->aud_pll_ctrl1, priv->mmio_base + SSPA_AUD_PLL_CTRL1); + + return 0; +} + +static const struct dev_pm_ops mmp2_audio_clk_pm_ops = { + SET_RUNTIME_PM_OPS(mmp2_audio_clk_suspend, mmp2_audio_clk_resume, NULL) +}; + +static const struct of_device_id mmp2_audio_clk_of_match[] = { + { .compatible = "marvell,mmp2-audio-clock" }, + {} +}; + +MODULE_DEVICE_TABLE(of, mmp2_audio_clk_of_match); + +static struct platform_driver mmp2_audio_clk_driver = { + .driver = { + .name = "mmp2-audio-clock", + .of_match_table = of_match_ptr(mmp2_audio_clk_of_match), + .pm = &mmp2_audio_clk_pm_ops, + }, + .probe = mmp2_audio_clk_probe, + .remove = mmp2_audio_clk_remove, +}; +module_platform_driver(mmp2_audio_clk_driver); + +MODULE_AUTHOR("Lubomir Rintel "); +MODULE_DESCRIPTION("Clock driver for MMP2 Audio subsystem"); +MODULE_LICENSE("GPL"); -- GitLab From 11362b1befeadaae4d159a8cddcdaf6b8afe08f9 Mon Sep 17 00:00:00 2001 From: Paul Mackerras Date: Thu, 28 May 2020 10:56:42 +1000 Subject: [PATCH 0751/1971] KVM: PPC: Book3S HV: Close race with page faults around memslot flushes There is a potential race condition between hypervisor page faults and flushing a memslot. It is possible for a page fault to read the memslot before a memslot is updated and then write a PTE to the partition-scoped page tables after kvmppc_radix_flush_memslot has completed. (Note that this race has never been explicitly observed.) To close this race, it is sufficient to increment the MMU sequence number while the kvm->mmu_lock is held. That will cause mmu_notifier_retry() to return true, and the page fault will then return to the guest without inserting a PTE. Signed-off-by: Paul Mackerras --- arch/powerpc/kvm/book3s_64_mmu_radix.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/arch/powerpc/kvm/book3s_64_mmu_radix.c b/arch/powerpc/kvm/book3s_64_mmu_radix.c index bc3f795252fdf..aa41183d2a975 100644 --- a/arch/powerpc/kvm/book3s_64_mmu_radix.c +++ b/arch/powerpc/kvm/book3s_64_mmu_radix.c @@ -1130,6 +1130,11 @@ void kvmppc_radix_flush_memslot(struct kvm *kvm, kvm->arch.lpid); gpa += PAGE_SIZE; } + /* + * Increase the mmu notifier sequence number to prevent any page + * fault that read the memslot earlier from writing a PTE. + */ + kvm->mmu_notifier_seq++; spin_unlock(&kvm->mmu_lock); } -- GitLab From 2140d68d69d45e4a74ec09ca381d2c8879165d2d Mon Sep 17 00:00:00 2001 From: Serge Semin Date: Tue, 26 May 2020 16:51:00 +0300 Subject: [PATCH 0752/1971] dt-bindings: power: reset: Unrequire regmap property in syscon-reboot node Since normally syscon-reboot block is supposed to be a part of a system controller, lets mark the regmap property as deprecated and recommend the syscon-reboot node to be a sub-node of SYSCON. Signed-off-by: Serge Semin Reviewed-by: Rob Herring Cc: Alexey Malahov Cc: Thomas Bogendoerfer Signed-off-by: Sebastian Reichel --- .../bindings/power/reset/syscon-reboot.yaml | 15 ++++++++++----- 1 file changed, 10 insertions(+), 5 deletions(-) diff --git a/Documentation/devicetree/bindings/power/reset/syscon-reboot.yaml b/Documentation/devicetree/bindings/power/reset/syscon-reboot.yaml index b80772cb9f062..da25097248128 100644 --- a/Documentation/devicetree/bindings/power/reset/syscon-reboot.yaml +++ b/Documentation/devicetree/bindings/power/reset/syscon-reboot.yaml @@ -12,9 +12,12 @@ maintainers: description: |+ This is a generic reset driver using syscon to map the reset register. The reset is generally performed with a write to the reset register - defined by the register map pointed by syscon reference plus the offset - with the value and mask defined in the reboot node. - Default will be little endian mode, 32 bit access only. + defined by the SYSCON register map base plus the offset with the value and + mask defined in the reboot node. Default will be little endian mode, 32 bit + access only. The SYSCON registers map is normally retrieved from the + parental dt-node. So the SYSCON reboot node should be represented as a + sub-node of a "syscon", "simple-mfd" node. Though the regmap property + pointing to the system controller node is also supported. properties: compatible: @@ -30,7 +33,10 @@ properties: regmap: $ref: /schemas/types.yaml#/definitions/phandle - description: Phandle to the register map node. + deprecated: true + description: | + Phandle to the register map node. This property is deprecated in favor of + the syscon-reboot node been a child of a system controller node. value: $ref: /schemas/types.yaml#/definitions/uint32 @@ -38,7 +44,6 @@ properties: required: - compatible - - regmap - offset additionalProperties: false -- GitLab From c3d80522237b71480109fc397975fe4146ed2550 Mon Sep 17 00:00:00 2001 From: Serge Semin Date: Tue, 26 May 2020 16:51:01 +0300 Subject: [PATCH 0753/1971] power: reset: syscon-reboot: Add parental syscon support Since normally syscon-reboot block is supposed to be a part of a system controller, lets look for the syscon regmap in a parental DT node if regmap property isn't specified. DT binding from now considers the regmap property as deprecated. Signed-off-by: Serge Semin Cc: Alexey Malahov Cc: Thomas Bogendoerfer Signed-off-by: Sebastian Reichel --- drivers/power/reset/syscon-reboot.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/drivers/power/reset/syscon-reboot.c b/drivers/power/reset/syscon-reboot.c index 62fbba0df9710..510e363381ca2 100644 --- a/drivers/power/reset/syscon-reboot.c +++ b/drivers/power/reset/syscon-reboot.c @@ -51,8 +51,11 @@ static int syscon_reboot_probe(struct platform_device *pdev) return -ENOMEM; ctx->map = syscon_regmap_lookup_by_phandle(dev->of_node, "regmap"); - if (IS_ERR(ctx->map)) - return PTR_ERR(ctx->map); + if (IS_ERR(ctx->map)) { + ctx->map = syscon_node_to_regmap(dev->parent->of_node); + if (IS_ERR(ctx->map)) + return PTR_ERR(ctx->map); + } if (of_property_read_u32(pdev->dev.of_node, "offset", &ctx->offset)) return -EINVAL; -- GitLab From 5587fa489747a8e6cbd0558890458c862b797485 Mon Sep 17 00:00:00 2001 From: Sergei Shtylyov Date: Mon, 20 Apr 2020 22:13:58 +0300 Subject: [PATCH 0754/1971] mtd: spi-nor: spansion: fix writes on S25FS512S Spansion S25FS-S family has an issue in the Basic Flash Parameter Table (BFPT): Dword-11 bits 7:4 specify a page size of 512 bytes. Actually this is configurable in the vendor unique register (CR3V) and even the factory default setting is to "wrap at 256 bytes", so blindly relying on BFPT breaks the page writes on these chips. Add the post-BFPT fixup which restores the default page size of 256 bytes -- to properly read CR3V this early is quite intrusive and should better be done as a new feature; Alexander Sverdlin had the patch doing that: https://patchwork.ozlabs.org/project/linux-mtd/patch/20200227123657.26030-1-alexander.sverdlin@nokia.com/ Fixes: dfd2b74530e ("mtd: spi-nor: add Spansion S25FS512S ID") Signed-off-by: Sergei Shtylyov Reviewed-by: Alexander Sverdlin Tested-by: Kuldeep Singh Signed-off-by: Tudor Ambarus --- drivers/mtd/spi-nor/spansion.c | 25 +++++++++++++++++++++++-- 1 file changed, 23 insertions(+), 2 deletions(-) diff --git a/drivers/mtd/spi-nor/spansion.c b/drivers/mtd/spi-nor/spansion.c index 88183eba8ac11..0d4080ef0e447 100644 --- a/drivers/mtd/spi-nor/spansion.c +++ b/drivers/mtd/spi-nor/spansion.c @@ -8,6 +8,27 @@ #include "core.h" +static int +s25fs_s_post_bfpt_fixups(struct spi_nor *nor, + const struct sfdp_parameter_header *bfpt_header, + const struct sfdp_bfpt *bfpt, + struct spi_nor_flash_parameter *params) +{ + /* + * The S25FS-S chip family reports 512-byte pages in BFPT but + * in reality the write buffer still wraps at the safe default + * of 256 bytes. Overwrite the page size advertised by BFPT + * to get the writes working. + */ + params->page_size = 256; + + return 0; +} + +static struct spi_nor_fixups s25fs_s_fixups = { + .post_bfpt = s25fs_s_post_bfpt_fixups, +}; + static const struct flash_info spansion_parts[] = { /* Spansion/Cypress -- single (large) sector size only, at least * for the chips listed here (without boot sectors). @@ -32,8 +53,8 @@ static const struct flash_info spansion_parts[] = { SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | SPI_NOR_HAS_LOCK | USE_CLSR) }, { "s25fs512s", INFO6(0x010220, 0x4d0081, 256 * 1024, 256, - SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | - USE_CLSR) }, + SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | USE_CLSR) + .fixups = &s25fs_s_fixups, }, { "s70fl01gs", INFO(0x010221, 0x4d00, 256 * 1024, 256, 0) }, { "s25sl12800", INFO(0x012018, 0x0300, 256 * 1024, 64, 0) }, { "s25sl12801", INFO(0x012018, 0x0301, 64 * 1024, 256, 0) }, -- GitLab From fac1cd327b627a0761d10bb93a2b0d0075452ff9 Mon Sep 17 00:00:00 2001 From: Marek Szyprowski Date: Fri, 22 May 2020 12:24:48 +0200 Subject: [PATCH 0755/1971] power: charger: max14577: Add proper dt-compatible strings Add device tree compatible strings and create proper modalias structures to let this driver load automatically if compiled as module, because max14577 MFD driver creates MFD cells with such compatible strings. Signed-off-by: Marek Szyprowski Signed-off-by: Sebastian Reichel --- drivers/power/supply/max14577_charger.c | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/drivers/power/supply/max14577_charger.c b/drivers/power/supply/max14577_charger.c index 8a59feac6468f..dcedae18d7be0 100644 --- a/drivers/power/supply/max14577_charger.c +++ b/drivers/power/supply/max14577_charger.c @@ -623,9 +623,19 @@ static const struct platform_device_id max14577_charger_id[] = { }; MODULE_DEVICE_TABLE(platform, max14577_charger_id); +static const struct of_device_id of_max14577_charger_dt_match[] = { + { .compatible = "maxim,max14577-charger", + .data = (void *)MAXIM_DEVICE_TYPE_MAX14577, }, + { .compatible = "maxim,max77836-charger", + .data = (void *)MAXIM_DEVICE_TYPE_MAX77836, }, + { }, +}; +MODULE_DEVICE_TABLE(of, of_max14577_charger_dt_match); + static struct platform_driver max14577_charger_driver = { .driver = { .name = "max14577-charger", + .of_match_table = of_max14577_charger_dt_match, }, .probe = max14577_charger_probe, .remove = max14577_charger_remove, -- GitLab From c26d0d87f175b8283b8606d2122603232028cbf2 Mon Sep 17 00:00:00 2001 From: Yicong Yang Date: Wed, 22 Apr 2020 17:13:29 +0800 Subject: [PATCH 0756/1971] mtd: spi-nor: Add support for s25fs128s1 Add support for Cypress s25fs128s1 flash. Previously the flash is decoded as s25fl129p1 by mistake. Add it in the flash info list to correctly decode. The flash also needs a fixup for s25fs-s family. Further capability of the flash will be parsed from bfpt. The flash has been tested under SPI/DUAL/QUAD mode on hisi-sfc-v3xx controller, all the write/read/erase works well. Signed-off-by: Yicong Yang Signed-off-by: Tudor Ambarus --- drivers/mtd/spi-nor/spansion.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/mtd/spi-nor/spansion.c b/drivers/mtd/spi-nor/spansion.c index 0d4080ef0e447..5c5a16ad5e802 100644 --- a/drivers/mtd/spi-nor/spansion.c +++ b/drivers/mtd/spi-nor/spansion.c @@ -52,6 +52,9 @@ static const struct flash_info spansion_parts[] = { { "s25fl512s", INFO6(0x010220, 0x4d0080, 256 * 1024, 256, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | SPI_NOR_HAS_LOCK | USE_CLSR) }, + { "s25fs128s1", INFO6(0x012018, 0x4d0181, 64 * 1024, 256, + SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | USE_CLSR) + .fixups = &s25fs_s_fixups, }, { "s25fs512s", INFO6(0x010220, 0x4d0081, 256 * 1024, 256, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | USE_CLSR) .fixups = &s25fs_s_fixups, }, -- GitLab From 075fd6dff24acf51997597669e4642db6a919014 Mon Sep 17 00:00:00 2001 From: Tudor Ambarus Date: Wed, 29 Apr 2020 07:11:01 +0000 Subject: [PATCH 0757/1971] mtd: spi-nor: spansion: Differentiate between s25fl256s and s25fs256s s25fs256s was identified as s25fl256s. Differentiate between them by the Family ID using the INFO6 macro. Fixes: b199489d37b2 ("mtd: spi-nor: add the framework for SPI NOR") Signed-off-by: Tudor Ambarus Reviewed-by: Alexander Sverdlin --- drivers/mtd/spi-nor/spansion.c | 18 ++++++++++++------ 1 file changed, 12 insertions(+), 6 deletions(-) diff --git a/drivers/mtd/spi-nor/spansion.c b/drivers/mtd/spi-nor/spansion.c index 5c5a16ad5e802..11772d3597ab1 100644 --- a/drivers/mtd/spi-nor/spansion.c +++ b/drivers/mtd/spi-nor/spansion.c @@ -43,18 +43,24 @@ static const struct flash_info spansion_parts[] = { { "s25fl128s1", INFO6(0x012018, 0x4d0180, 64 * 1024, 256, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | USE_CLSR) }, - { "s25fl256s0", INFO(0x010219, 0x4d00, 256 * 1024, 128, - SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | - USE_CLSR) }, - { "s25fl256s1", INFO(0x010219, 0x4d01, 64 * 1024, 512, - SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | - USE_CLSR) }, + { "s25fl256s0", INFO6(0x010219, 0x4d0080, 256 * 1024, 128, + SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | + USE_CLSR) }, + { "s25fl256s1", INFO6(0x010219, 0x4d0180, 64 * 1024, 512, + SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | + USE_CLSR) }, { "s25fl512s", INFO6(0x010220, 0x4d0080, 256 * 1024, 256, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | SPI_NOR_HAS_LOCK | USE_CLSR) }, { "s25fs128s1", INFO6(0x012018, 0x4d0181, 64 * 1024, 256, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | USE_CLSR) .fixups = &s25fs_s_fixups, }, + { "s25fs256s0", INFO6(0x010219, 0x4d0081, 256 * 1024, 128, + SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | + USE_CLSR) }, + { "s25fs256s1", INFO6(0x010219, 0x4d0181, 64 * 1024, 512, + SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | + USE_CLSR) }, { "s25fs512s", INFO6(0x010220, 0x4d0081, 256 * 1024, 256, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | USE_CLSR) .fixups = &s25fs_s_fixups, }, -- GitLab From 1ac71ec0130cce5bed3ec11ffc88651097a24173 Mon Sep 17 00:00:00 2001 From: Tudor Ambarus Date: Tue, 28 Apr 2020 08:47:43 +0000 Subject: [PATCH 0758/1971] mtd: spi-nor: Fix SPI NOR acronym The correct terminology is serial NOR flash or SPI NOR. s/SPI-NOR/SPI NOR and s/spi-nor/SPI NOR across the subsystem. Signed-off-by: Tudor Ambarus Reviewed-by: Sergei Shtylyov --- drivers/mtd/spi-nor/Kconfig | 4 ++-- drivers/mtd/spi-nor/controllers/Kconfig | 4 ++-- drivers/mtd/spi-nor/controllers/aspeed-smc.c | 2 +- drivers/mtd/spi-nor/controllers/hisi-sfc.c | 2 +- drivers/mtd/spi-nor/controllers/nxp-spifi.c | 2 +- drivers/mtd/spi-nor/core.c | 4 ++-- include/linux/mtd/spi-nor.h | 8 ++++---- 7 files changed, 13 insertions(+), 13 deletions(-) diff --git a/drivers/mtd/spi-nor/Kconfig b/drivers/mtd/spi-nor/Kconfig index 6e816eafb312a..ffc4b380f2b16 100644 --- a/drivers/mtd/spi-nor/Kconfig +++ b/drivers/mtd/spi-nor/Kconfig @@ -1,12 +1,12 @@ # SPDX-License-Identifier: GPL-2.0-only menuconfig MTD_SPI_NOR - tristate "SPI-NOR device support" + tristate "SPI NOR device support" depends on MTD depends on MTD && SPI_MASTER select SPI_MEM help This is the framework for the SPI NOR which can be used by the SPI - device drivers and the SPI-NOR device driver. + device drivers and the SPI NOR device driver. if MTD_SPI_NOR diff --git a/drivers/mtd/spi-nor/controllers/Kconfig b/drivers/mtd/spi-nor/controllers/Kconfig index 10b86660b8214..d89a5ea9446a1 100644 --- a/drivers/mtd/spi-nor/controllers/Kconfig +++ b/drivers/mtd/spi-nor/controllers/Kconfig @@ -21,11 +21,11 @@ config SPI_CADENCE_QUADSPI Flash as an MTD device. config SPI_HISI_SFC - tristate "Hisilicon FMC SPI-NOR Flash Controller(SFC)" + tristate "Hisilicon FMC SPI NOR Flash Controller(SFC)" depends on ARCH_HISI || COMPILE_TEST depends on HAS_IOMEM help - This enables support for HiSilicon FMC SPI-NOR flash controller. + This enables support for HiSilicon FMC SPI NOR flash controller. config SPI_NXP_SPIFI tristate "NXP SPI Flash Interface (SPIFI)" diff --git a/drivers/mtd/spi-nor/controllers/aspeed-smc.c b/drivers/mtd/spi-nor/controllers/aspeed-smc.c index ae85e4c0e1143..7225870e8b18c 100644 --- a/drivers/mtd/spi-nor/controllers/aspeed-smc.c +++ b/drivers/mtd/spi-nor/controllers/aspeed-smc.c @@ -727,7 +727,7 @@ static int aspeed_smc_chip_setup_finish(struct aspeed_smc_chip *chip) /* * TODO: Adjust clocks if fast read is supported and interpret - * SPI-NOR flags to adjust controller settings. + * SPI NOR flags to adjust controller settings. */ if (chip->nor.read_proto == SNOR_PROTO_1_1_1) { if (chip->nor.read_dummy == 0) diff --git a/drivers/mtd/spi-nor/controllers/hisi-sfc.c b/drivers/mtd/spi-nor/controllers/hisi-sfc.c index 6c7a4118752ea..95c502173cbda 100644 --- a/drivers/mtd/spi-nor/controllers/hisi-sfc.c +++ b/drivers/mtd/spi-nor/controllers/hisi-sfc.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-or-later /* - * HiSilicon FMC SPI-NOR flash controller driver + * HiSilicon FMC SPI NOR flash controller driver * * Copyright (c) 2015-2016 HiSilicon Technologies Co., Ltd. */ diff --git a/drivers/mtd/spi-nor/controllers/nxp-spifi.c b/drivers/mtd/spi-nor/controllers/nxp-spifi.c index 9a5b1a7c636a0..5703e83139803 100644 --- a/drivers/mtd/spi-nor/controllers/nxp-spifi.c +++ b/drivers/mtd/spi-nor/controllers/nxp-spifi.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-only /* - * SPI-NOR driver for NXP SPI Flash Interface (SPIFI) + * SPI NOR driver for NXP SPI Flash Interface (SPIFI) * * Copyright (C) 2015 Joachim Eastwood * diff --git a/drivers/mtd/spi-nor/core.c b/drivers/mtd/spi-nor/core.c index 1ab4386a099a9..0369d98b2d12e 100644 --- a/drivers/mtd/spi-nor/core.c +++ b/drivers/mtd/spi-nor/core.c @@ -2469,7 +2469,7 @@ static int spi_nor_select_read(struct spi_nor *nor, nor->read_proto = read->proto; /* - * In the spi-nor framework, we don't need to make the difference + * In the SPI NOR framework, we don't need to make the difference * between mode clock cycles and wait state clock cycles. * Indeed, the value of the mode clock cycles is used by a QSPI * flash memory to know whether it should enter or leave its 0-4-4 @@ -3126,7 +3126,7 @@ int spi_nor_scan(struct spi_nor *nor, const char *name, /* * Make sure the XSR_RDY flag is set before calling * spi_nor_wait_till_ready(). Xilinx S3AN share MFR - * with Atmel spi-nor + * with Atmel SPI NOR. */ if (info->flags & SPI_NOR_XSR_RDY) nor->flags |= SNOR_F_READY_XSR_RDY; diff --git a/include/linux/mtd/spi-nor.h b/include/linux/mtd/spi-nor.h index bebff2729c189..60bac2c0ec456 100644 --- a/include/linux/mtd/spi-nor.h +++ b/include/linux/mtd/spi-nor.h @@ -302,7 +302,7 @@ struct spi_nor; * @read: read data from the SPI NOR. * @write: write data to the SPI NOR. * @erase: erase a sector of the SPI NOR at the offset @offs; if - * not provided by the driver, spi-nor will send the erase + * not provided by the driver, SPI NOR will send the erase * opcode via write_reg(). */ struct spi_nor_controller_ops { @@ -336,7 +336,7 @@ struct spi_nor_flash_parameter; * layer is not DMA-able * @bouncebuf_size: size of the bounce buffer * @info: SPI NOR part JEDEC MFR ID and other info - * @manufacturer: spi-nor manufacturer + * @manufacturer: SPI NOR manufacturer * @page_size: the page size of the SPI NOR * @addr_width: number of address bytes * @erase_opcode: the opcode for erasing a sector @@ -344,12 +344,12 @@ struct spi_nor_flash_parameter; * @read_dummy: the dummy needed by the read operation * @program_opcode: the program opcode * @sst_write_second: used by the SST write operation - * @flags: flag options for the current SPI-NOR (SNOR_F_*) + * @flags: flag options for the current SPI NOR (SNOR_F_*) * @read_proto: the SPI protocol for read operations * @write_proto: the SPI protocol for write operations * @reg_proto: the SPI protocol for read_reg/write_reg/erase operations * @controller_ops: SPI NOR controller driver specific operations. - * @params: [FLASH-SPECIFIC] SPI-NOR flash parameters and settings. + * @params: [FLASH-SPECIFIC] SPI NOR flash parameters and settings. * The structure includes legacy flash parameters and * settings that can be overwritten by the spi_nor_fixups * hooks, or dynamically when parsing the SFDP tables. -- GitLab From 8a2644d5f3608822925c9204a3d19a8e3025fd4a Mon Sep 17 00:00:00 2001 From: Sascha Hauer Date: Fri, 24 Apr 2020 08:56:26 +0200 Subject: [PATCH 0759/1971] mtd: spi-nor: Add support for Cypress cy15x104q The Cypress cy15b104q and cy15v104q are 4Mbit serial SPI F-RAM devices. Add support for them to the spi-nor driver. The actual Device ID of this chip is 7f 7f 7f 7f 7f 7f c2 2c 04. That is six times the continuation code 7f followed by c2 for Ramtron. Unfortunately the chip sends the Device ID in reversed order, so the continuation code is not at the beginning, but instead at the end. Even more unfortunate is that when reading further the chip sends more 7f codes which means we are not even able to count the continuation codes. We can only hope that this reversed Device ID will never match any other devices ID. Collisions are improbable as of now, the solution from above is good enough. In case of future collisions one can introduce an INFO9 macro, with the downsize that struct flash_info would grow and we have lots of flashes. A more elegant solution would be to introduce dedicated flash ID tables for each bank in JESP106BA. Signed-off-by: Sascha Hauer [tudor.ambarus@microchip.com: amend commit description with possible future solutions in case collisions occur.] Signed-off-by: Tudor Ambarus --- drivers/mtd/spi-nor/spansion.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/mtd/spi-nor/spansion.c b/drivers/mtd/spi-nor/spansion.c index 11772d3597ab1..e550cd5c9d3a5 100644 --- a/drivers/mtd/spi-nor/spansion.c +++ b/drivers/mtd/spi-nor/spansion.c @@ -102,6 +102,8 @@ static const struct flash_info spansion_parts[] = { { "s25fl256l", INFO(0x016019, 0, 64 * 1024, 512, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | SPI_NOR_4B_OPCODES) }, + { "cy15x104q", INFO6(0x042cc2, 0x7f7f7f, 512 * 1024, 1, + SPI_NOR_NO_ERASE) }, }; static void spansion_post_sfdp_fixups(struct spi_nor *nor) -- GitLab From 3e34b5135b39c4d530494e08d56b12f5b1396aa6 Mon Sep 17 00:00:00 2001 From: Andy Shevchenko Date: Thu, 28 May 2020 17:18:58 +0300 Subject: [PATCH 0760/1971] i2c: acpi: Drop double check for ACPI companion device acpi_dev_get_resources() does perform the NULL pointer check against ACPI companion device which is given as function parameter. Thus, there is no need to duplicate this check in the caller. Signed-off-by: Andy Shevchenko Signed-off-by: Wolfram Sang --- drivers/i2c/i2c-core-acpi.c | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/drivers/i2c/i2c-core-acpi.c b/drivers/i2c/i2c-core-acpi.c index c8f42f2037cbe..2ade99b105b91 100644 --- a/drivers/i2c/i2c-core-acpi.c +++ b/drivers/i2c/i2c-core-acpi.c @@ -468,16 +468,12 @@ struct notifier_block i2c_acpi_notifier = { struct i2c_client *i2c_acpi_new_device(struct device *dev, int index, struct i2c_board_info *info) { + struct acpi_device *adev = ACPI_COMPANION(dev); struct i2c_acpi_lookup lookup; struct i2c_adapter *adapter; - struct acpi_device *adev; LIST_HEAD(resource_list); int ret; - adev = ACPI_COMPANION(dev); - if (!adev) - return ERR_PTR(-EINVAL); - memset(&lookup, 0, sizeof(lookup)); lookup.info = info; lookup.device_handle = acpi_device_handle(adev); -- GitLab From bee0d92c899b8600102eedf5e6e5e88a43c0ffdf Mon Sep 17 00:00:00 2001 From: Wolfram Sang Date: Mon, 25 May 2020 11:59:33 +0200 Subject: [PATCH 0761/1971] i2c: slave-eeprom: skip useless initialization We have a kzalloced struct, no need to init to 0. Signed-off-by: Wolfram Sang Signed-off-by: Wolfram Sang --- drivers/i2c/i2c-slave-eeprom.c | 1 - 1 file changed, 1 deletion(-) diff --git a/drivers/i2c/i2c-slave-eeprom.c b/drivers/i2c/i2c-slave-eeprom.c index 787fdb7f332f0..6684852818acd 100644 --- a/drivers/i2c/i2c-slave-eeprom.c +++ b/drivers/i2c/i2c-slave-eeprom.c @@ -152,7 +152,6 @@ static int i2c_slave_eeprom_probe(struct i2c_client *client, const struct i2c_de if (!eeprom) return -ENOMEM; - eeprom->idx_write_cnt = 0; eeprom->num_address_bytes = flag_addr16 ? 2 : 1; eeprom->address_mask = size - 1; eeprom->read_only = FIELD_GET(I2C_SLAVE_FLAG_RO, id->driver_data); -- GitLab From 468ed57fd29918305d5c3f3953dcd81be6710aa7 Mon Sep 17 00:00:00 2001 From: Wolfram Sang Date: Mon, 25 May 2020 11:59:34 +0200 Subject: [PATCH 0762/1971] i2c: slave-eeprom: update documentation to recent changes Support for 16-bit addresses has been added, so remove it from the todo list. Also, in the introductory sentence, may clear we talk about "slave IP cores" to make reading easier. Fixes: 82d514815441 ("i2c-eeprom_slave: Add support for more eeprom models") Signed-off-by: Wolfram Sang Signed-off-by: Wolfram Sang --- drivers/i2c/i2c-slave-eeprom.c | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/drivers/i2c/i2c-slave-eeprom.c b/drivers/i2c/i2c-slave-eeprom.c index 6684852818acd..593f2fd39d17d 100644 --- a/drivers/i2c/i2c-slave-eeprom.c +++ b/drivers/i2c/i2c-slave-eeprom.c @@ -5,10 +5,9 @@ * Copyright (C) 2014 by Wolfram Sang, Sang Engineering * Copyright (C) 2014 by Renesas Electronics Corporation * - * Because most IP blocks can only detect one I2C slave address anyhow, this - * driver does not support simulating EEPROM types which take more than one - * address. It is prepared to simulate bigger EEPROMs with an internal 16 bit - * pointer, yet implementation is deferred until the need actually arises. + * Because most slave IP cores can only detect one I2C slave address anyhow, + * this driver does not support simulating EEPROM types which take more than + * one address. */ /* -- GitLab From 7df915e540ec51ce9ac5f2d2d9801d0356b617da Mon Sep 17 00:00:00 2001 From: Wolfram Sang Date: Mon, 25 May 2020 14:05:04 +0200 Subject: [PATCH 0763/1971] i2c: avoid confusing naming in header i2c_client pointers are usually named 'client'. Use it here to get rid of the ambiguity of 'dev->dev'. Signed-off-by: Wolfram Sang --- include/linux/i2c.h | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/include/linux/i2c.h b/include/linux/i2c.h index 49d29054e6571..c10617bb980ab 100644 --- a/include/linux/i2c.h +++ b/include/linux/i2c.h @@ -351,14 +351,14 @@ static inline struct i2c_client *kobj_to_i2c_client(struct kobject *kobj) return to_i2c_client(dev); } -static inline void *i2c_get_clientdata(const struct i2c_client *dev) +static inline void *i2c_get_clientdata(const struct i2c_client *client) { - return dev_get_drvdata(&dev->dev); + return dev_get_drvdata(&client->dev); } -static inline void i2c_set_clientdata(struct i2c_client *dev, void *data) +static inline void i2c_set_clientdata(struct i2c_client *client, void *data) { - dev_set_drvdata(&dev->dev, data); + dev_set_drvdata(&client->dev, data); } /* I2C slave support */ -- GitLab From 8e923a2168afd221ea26e3d9716f21e9578b5c4d Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Mon, 11 May 2020 22:36:27 +0900 Subject: [PATCH 0764/1971] selftests/ftrace: Use printf for backslash included command Since the built-in echo has different behavior in POSIX shell (dash) and bash, kprobe_syntax_errors.tc can fail on dash which interpret backslash escape automatically. To fix this issue, we explicitly use printf "%s" (not interpret backslash escapes) if the command string can include backslash. Reported-by: Liu Yiding Suggested-by: Xiao Yang Signed-off-by: Masami Hiramatsu Signed-off-by: Shuah Khan --- tools/testing/selftests/ftrace/test.d/functions | 8 +++++--- .../ftrace/test.d/kprobe/kprobe_syntax_errors.tc | 4 +++- 2 files changed, 8 insertions(+), 4 deletions(-) diff --git a/tools/testing/selftests/ftrace/test.d/functions b/tools/testing/selftests/ftrace/test.d/functions index 61a3c7e2634df..697c77ef2e2bf 100644 --- a/tools/testing/selftests/ftrace/test.d/functions +++ b/tools/testing/selftests/ftrace/test.d/functions @@ -119,12 +119,14 @@ yield() { ping $LOCALHOST -c 1 || sleep .001 || usleep 1 || sleep 1 } +# Since probe event command may include backslash, explicitly use printf "%s" +# to NOT interpret it. ftrace_errlog_check() { # err-prefix command-with-error-pos-by-^ command-file - pos=$(echo -n "${2%^*}" | wc -c) # error position - command=$(echo "$2" | tr -d ^) + pos=$(printf "%s" "${2%^*}" | wc -c) # error position + command=$(printf "%s" "$2" | tr -d ^) echo "Test command: $command" echo > error_log - (! echo "$command" >> "$3" ) 2> /dev/null + (! printf "%s" "$command" >> "$3" ) 2> /dev/null grep "$1: error:" -A 3 error_log N=$(tail -n 1 error_log | wc -c) # " Command: " and "^\n" => 13 diff --git a/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_syntax_errors.tc b/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_syntax_errors.tc index ef1e9bafb0984..eb0f4ab4e070a 100644 --- a/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_syntax_errors.tc +++ b/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_syntax_errors.tc @@ -91,7 +91,9 @@ esac if grep -q "Create/append/" README && grep -q "imm-value" README; then echo 'p:kprobes/testevent _do_fork' > kprobe_events check_error '^r:kprobes/testevent do_exit' # DIFF_PROBE_TYPE -echo 'p:kprobes/testevent _do_fork abcd=\1' > kprobe_events + +# Explicitly use printf "%s" to not interpret \1 +printf "%s" 'p:kprobes/testevent _do_fork abcd=\1' > kprobe_events check_error 'p:kprobes/testevent _do_fork ^bcd=\1' # DIFF_ARG_TYPE check_error 'p:kprobes/testevent _do_fork ^abcd=\1:u8' # DIFF_ARG_TYPE check_error 'p:kprobes/testevent _do_fork ^abcd=\"foo"' # DIFF_ARG_TYPE -- GitLab From 619ee76f5c9f6a1d601d1a056a454d62bf676ae4 Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Mon, 25 May 2020 19:20:57 +0900 Subject: [PATCH 0765/1971] selftests/ftrace: Return unsupported if no error_log file Check whether error_log file exists in tracing/error_log testcase and return UNSUPPORTED if no error_log file. This can happen if we run the ftracetest on the older stable kernel. Fixes: 4eab1cc461a6 ("selftests/ftrace: Add tracing/error_log testcase") Cc: stable@vger.kernel.org Signed-off-by: Masami Hiramatsu Signed-off-by: Shuah Khan --- .../testing/selftests/ftrace/test.d/ftrace/tracing-error-log.tc | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tools/testing/selftests/ftrace/test.d/ftrace/tracing-error-log.tc b/tools/testing/selftests/ftrace/test.d/ftrace/tracing-error-log.tc index 021c03fd885db..23465823532b9 100644 --- a/tools/testing/selftests/ftrace/test.d/ftrace/tracing-error-log.tc +++ b/tools/testing/selftests/ftrace/test.d/ftrace/tracing-error-log.tc @@ -14,6 +14,8 @@ if [ ! -f set_event ]; then exit_unsupported fi +[ -f error_log ] || exit_unsupported + ftrace_errlog_check 'event filter parse error' '((sig >= 10 && sig < 15) || dsig ^== 17) && comm != bash' 'events/signal/signal_generate/filter' exit 0 -- GitLab From 2e9bcaccef40e81f052e4f228df91feaedd7b26c Mon Sep 17 00:00:00 2001 From: Samuel Zou Date: Mon, 11 May 2020 19:12:57 +0800 Subject: [PATCH 0766/1971] power: supply: Make bd9995x_chip_reset static Fix the following sparse warning: drivers/power/supply/bd99954-charger.c:1028:6: warning: symbol 'bd9995x_chip_reset' was not declared. The bd9995x_chip_reset() has only one call site within bd99954-charger.c It should be static Fixes: 0902f8366491 ("power: supply: Support ROHM bd99954 charger") Reported-by: Hulk Robot Signed-off-by: Samuel Zou Reviewed-by: Matti Vaittinen Signed-off-by: Sebastian Reichel --- drivers/power/supply/bd99954-charger.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/power/supply/bd99954-charger.c b/drivers/power/supply/bd99954-charger.c index 3da39c7293e36..ffd8bfa08179c 100644 --- a/drivers/power/supply/bd99954-charger.c +++ b/drivers/power/supply/bd99954-charger.c @@ -1025,7 +1025,7 @@ static int bd9995x_fw_probe(struct bd9995x_device *bd) return 0; } -void bd9995x_chip_reset(void *bd) +static void bd9995x_chip_reset(void *bd) { __bd9995x_chip_reset(bd); } -- GitLab From 655078f5f5286fe0ab38e90c4695001a0e15e9dd Mon Sep 17 00:00:00 2001 From: Sebastian Reichel Date: Wed, 13 May 2020 20:55:57 +0200 Subject: [PATCH 0767/1971] kobject: increase allowed number of uevent variables SBS battery driver exposes 32 power supply properties now, which will result in uevent failure on (un)plugging the battery. Other drivers (e.g. bq27xxx) are also coming close to this limit, so increase it. Acked-by: Greg Kroah-Hartman Reviewed-by: Emil Velikov Signed-off-by: Sebastian Reichel --- include/linux/kobject.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/include/linux/kobject.h b/include/linux/kobject.h index e2ca0a292e210..75e822569e394 100644 --- a/include/linux/kobject.h +++ b/include/linux/kobject.h @@ -29,7 +29,7 @@ #include #define UEVENT_HELPER_PATH_LEN 256 -#define UEVENT_NUM_ENVP 32 /* number of env pointers */ +#define UEVENT_NUM_ENVP 64 /* number of env pointers */ #define UEVENT_BUFFER_SIZE 2048 /* buffer for the variables */ #ifdef CONFIG_UEVENT_HELPER -- GitLab From bac705abcf345c28e419157cfcd1c44032cc9db2 Mon Sep 17 00:00:00 2001 From: Sebastian Reichel Date: Wed, 13 May 2020 20:55:58 +0200 Subject: [PATCH 0768/1971] power: supply: core: add capacity error margin property Add a property for reporting the error margin expected by fuel gauge chips. Signed-off-by: Sebastian Reichel --- Documentation/ABI/testing/sysfs-class-power | 15 +++++++++++++++ drivers/power/supply/power_supply_sysfs.c | 1 + include/linux/power_supply.h | 1 + 3 files changed, 17 insertions(+) diff --git a/Documentation/ABI/testing/sysfs-class-power b/Documentation/ABI/testing/sysfs-class-power index bf3b48f022dc1..2f896555ae237 100644 --- a/Documentation/ABI/testing/sysfs-class-power +++ b/Documentation/ABI/testing/sysfs-class-power @@ -74,6 +74,21 @@ Description: Access: Read, Write Valid values: 0 - 100 (percent) +What: /sys/class/power_supply//capacity_error_margin +Date: April 2019 +Contact: linux-pm@vger.kernel.org +Description: + Battery capacity measurement becomes unreliable without + recalibration. This values provides the maximum error + margin expected to exist by the fuel gauge in percent. + Values close to 0% will be returned after (re-)calibration + has happened. Over time the error margin will increase. + 100% means, that the capacity related values are basically + completely useless. + + Access: Read + Valid values: 0 - 100 (percent) + What: /sys/class/power_supply//capacity_level Date: June 2009 Contact: linux-pm@vger.kernel.org diff --git a/drivers/power/supply/power_supply_sysfs.c b/drivers/power/supply/power_supply_sysfs.c index d21b4e0edf385..e664774a2d1ef 100644 --- a/drivers/power/supply/power_supply_sysfs.c +++ b/drivers/power/supply/power_supply_sysfs.c @@ -178,6 +178,7 @@ static struct power_supply_attr power_supply_attrs[] = { POWER_SUPPLY_ATTR(CAPACITY), POWER_SUPPLY_ATTR(CAPACITY_ALERT_MIN), POWER_SUPPLY_ATTR(CAPACITY_ALERT_MAX), + POWER_SUPPLY_ATTR(CAPACITY_ERROR_MARGIN), POWER_SUPPLY_ENUM_ATTR(CAPACITY_LEVEL), POWER_SUPPLY_ATTR(TEMP), POWER_SUPPLY_ATTR(TEMP_MAX), diff --git a/include/linux/power_supply.h b/include/linux/power_supply.h index 1f60731ec7fe2..453a85f256354 100644 --- a/include/linux/power_supply.h +++ b/include/linux/power_supply.h @@ -139,6 +139,7 @@ enum power_supply_property { POWER_SUPPLY_PROP_CAPACITY, /* in percents! */ POWER_SUPPLY_PROP_CAPACITY_ALERT_MIN, /* in percents! */ POWER_SUPPLY_PROP_CAPACITY_ALERT_MAX, /* in percents! */ + POWER_SUPPLY_PROP_CAPACITY_ERROR_MARGIN, /* in percents! */ POWER_SUPPLY_PROP_CAPACITY_LEVEL, POWER_SUPPLY_PROP_TEMP, POWER_SUPPLY_PROP_TEMP_MAX, -- GitLab From feabe49e46bb556b8d43e28d4a0d459940f7a5cb Mon Sep 17 00:00:00 2001 From: Sebastian Reichel Date: Wed, 13 May 2020 20:55:59 +0200 Subject: [PATCH 0769/1971] power: supply: core: add manufacture date properties Some smart batteries store their manufacture date, which is useful to identify the battery and/or to know about the cell quality. Signed-off-by: Sebastian Reichel --- Documentation/ABI/testing/sysfs-class-power | 28 +++++++++++++++++++++ drivers/power/supply/power_supply_sysfs.c | 3 +++ include/linux/power_supply.h | 3 +++ 3 files changed, 34 insertions(+) diff --git a/Documentation/ABI/testing/sysfs-class-power b/Documentation/ABI/testing/sysfs-class-power index 2f896555ae237..e6d7348766b23 100644 --- a/Documentation/ABI/testing/sysfs-class-power +++ b/Documentation/ABI/testing/sysfs-class-power @@ -680,3 +680,31 @@ Description: Valid values: - 1: enabled - 0: disabled + +What: /sys/class/power_supply//manufacture_year +Date: January 2020 +Contact: linux-pm@vger.kernel.org +Description: + Reports the year (following Gregorian calendar) when the device has been + manufactured. + + Access: Read + Valid values: Reported as integer + +What: /sys/class/power_supply//manufacture_month +Date: January 2020 +Contact: linux-pm@vger.kernel.org +Description: + Reports the month when the device has been manufactured. + + Access: Read + Valid values: 1-12 + +What: /sys/class/power_supply//manufacture_day +Date: January 2020 +Contact: linux-pm@vger.kernel.org +Description: + Reports the day of month when the device has been manufactured. + + Access: Read + Valid values: 1-31 diff --git a/drivers/power/supply/power_supply_sysfs.c b/drivers/power/supply/power_supply_sysfs.c index e664774a2d1ef..78d5382e69f11 100644 --- a/drivers/power/supply/power_supply_sysfs.c +++ b/drivers/power/supply/power_supply_sysfs.c @@ -198,6 +198,9 @@ static struct power_supply_attr power_supply_attrs[] = { POWER_SUPPLY_ATTR(PRECHARGE_CURRENT), POWER_SUPPLY_ATTR(CHARGE_TERM_CURRENT), POWER_SUPPLY_ATTR(CALIBRATE), + POWER_SUPPLY_ATTR(MANUFACTURE_YEAR), + POWER_SUPPLY_ATTR(MANUFACTURE_MONTH), + POWER_SUPPLY_ATTR(MANUFACTURE_DAY), /* Properties of type `const char *' */ POWER_SUPPLY_ATTR(MODEL_NAME), POWER_SUPPLY_ATTR(MANUFACTURER), diff --git a/include/linux/power_supply.h b/include/linux/power_supply.h index 453a85f256354..63ffe2a0a87b2 100644 --- a/include/linux/power_supply.h +++ b/include/linux/power_supply.h @@ -159,6 +159,9 @@ enum power_supply_property { POWER_SUPPLY_PROP_PRECHARGE_CURRENT, POWER_SUPPLY_PROP_CHARGE_TERM_CURRENT, POWER_SUPPLY_PROP_CALIBRATE, + POWER_SUPPLY_PROP_MANUFACTURE_YEAR, + POWER_SUPPLY_PROP_MANUFACTURE_MONTH, + POWER_SUPPLY_PROP_MANUFACTURE_DAY, /* Properties of type `const char *' */ POWER_SUPPLY_PROP_MODEL_NAME, POWER_SUPPLY_PROP_MANUFACTURER, -- GitLab From 601c2a543f02da484362b3ff9074b2cfe08750de Mon Sep 17 00:00:00 2001 From: Sebastian Reichel Date: Wed, 13 May 2020 20:56:00 +0200 Subject: [PATCH 0770/1971] power: supply: core: add POWER_SUPPLY_HEALTH_CALIBRATION_REQUIRED Some battery fuel gauges know when the battery needs to be recalibrated before providing usable values. This should be reported via the health property. Signed-off-by: Sebastian Reichel --- Documentation/ABI/testing/sysfs-class-power | 2 +- drivers/power/supply/power_supply_sysfs.c | 1 + include/linux/power_supply.h | 1 + 3 files changed, 3 insertions(+), 1 deletion(-) diff --git a/Documentation/ABI/testing/sysfs-class-power b/Documentation/ABI/testing/sysfs-class-power index e6d7348766b23..216d61a22f1e7 100644 --- a/Documentation/ABI/testing/sysfs-class-power +++ b/Documentation/ABI/testing/sysfs-class-power @@ -205,7 +205,7 @@ Description: Valid values: "Unknown", "Good", "Overheat", "Dead", "Over voltage", "Unspecified failure", "Cold", "Watchdog timer expire", "Safety timer expire", - "Over current" + "Over current", "Calibration required" What: /sys/class/power_supply//precharge_current Date: June 2017 diff --git a/drivers/power/supply/power_supply_sysfs.c b/drivers/power/supply/power_supply_sysfs.c index 78d5382e69f11..bc79560229b51 100644 --- a/drivers/power/supply/power_supply_sysfs.c +++ b/drivers/power/supply/power_supply_sysfs.c @@ -100,6 +100,7 @@ static const char * const POWER_SUPPLY_HEALTH_TEXT[] = { [POWER_SUPPLY_HEALTH_WATCHDOG_TIMER_EXPIRE] = "Watchdog timer expire", [POWER_SUPPLY_HEALTH_SAFETY_TIMER_EXPIRE] = "Safety timer expire", [POWER_SUPPLY_HEALTH_OVERCURRENT] = "Over current", + [POWER_SUPPLY_HEALTH_CALIBRATION_REQUIRED] = "Calibration required", }; static const char * const POWER_SUPPLY_TECHNOLOGY_TEXT[] = { diff --git a/include/linux/power_supply.h b/include/linux/power_supply.h index 63ffe2a0a87b2..ac1345a48ad05 100644 --- a/include/linux/power_supply.h +++ b/include/linux/power_supply.h @@ -61,6 +61,7 @@ enum { POWER_SUPPLY_HEALTH_WATCHDOG_TIMER_EXPIRE, POWER_SUPPLY_HEALTH_SAFETY_TIMER_EXPIRE, POWER_SUPPLY_HEALTH_OVERCURRENT, + POWER_SUPPLY_HEALTH_CALIBRATION_REQUIRED, }; enum { -- GitLab From 0ff969158ac7167d19a0e86e365086c093836544 Mon Sep 17 00:00:00 2001 From: Sebastian Reichel Date: Wed, 13 May 2020 20:56:01 +0200 Subject: [PATCH 0771/1971] power: supply: sbs-battery: Add TI BQ20Z65 support Add support for BQ20Z65 manufacturer data to the sbs-battery driver. Implementation has been verified using the public TRM available from [0] and tested using a GE Flex 3S2P battery. [0] http://www.ti.com/lit/pdf/sluu386 Acked-by: Rob Herring Signed-off-by: Sebastian Reichel --- .../bindings/power/supply/sbs_sbs-battery.txt | 1 + drivers/power/supply/sbs-battery.c | 15 ++++++++++----- 2 files changed, 11 insertions(+), 5 deletions(-) diff --git a/Documentation/devicetree/bindings/power/supply/sbs_sbs-battery.txt b/Documentation/devicetree/bindings/power/supply/sbs_sbs-battery.txt index 4e78e51018ebd..fa5a8b516dbfd 100644 --- a/Documentation/devicetree/bindings/power/supply/sbs_sbs-battery.txt +++ b/Documentation/devicetree/bindings/power/supply/sbs_sbs-battery.txt @@ -6,6 +6,7 @@ Required properties : part number compatible string might be used in order to take care of vendor specific registers. Known ,: + ti,bq20z65 ti,bq20z75 Optional properties : diff --git a/drivers/power/supply/sbs-battery.c b/drivers/power/supply/sbs-battery.c index 6acd242eed488..a15783802ef82 100644 --- a/drivers/power/supply/sbs-battery.c +++ b/drivers/power/supply/sbs-battery.c @@ -149,8 +149,8 @@ static enum power_supply_property sbs_properties[] = { POWER_SUPPLY_PROP_MODEL_NAME }; -/* Supports special manufacturer commands from TI BQ20Z75 IC. */ -#define SBS_FLAGS_TI_BQ20Z75 BIT(0) +/* Supports special manufacturer commands from TI BQ20Z65 and BQ20Z75 IC. */ +#define SBS_FLAGS_TI_BQ20ZX5 BIT(0) struct sbs_info { struct i2c_client *client; @@ -626,7 +626,7 @@ static int sbs_get_property(struct power_supply *psy, switch (psp) { case POWER_SUPPLY_PROP_PRESENT: case POWER_SUPPLY_PROP_HEALTH: - if (chip->flags & SBS_FLAGS_TI_BQ20Z75) + if (chip->flags & SBS_FLAGS_TI_BQ20ZX5) ret = sbs_get_ti_battery_presence_and_health(client, psp, val); else @@ -950,7 +950,7 @@ static int sbs_suspend(struct device *dev) if (chip->poll_time > 0) cancel_delayed_work_sync(&chip->work); - if (chip->flags & SBS_FLAGS_TI_BQ20Z75) { + if (chip->flags & SBS_FLAGS_TI_BQ20ZX5) { /* Write to manufacturer access with sleep command. */ ret = sbs_write_word_data(client, sbs_data[REG_MANUFACTURER_DATA].addr, @@ -970,6 +970,7 @@ static SIMPLE_DEV_PM_OPS(sbs_pm_ops, sbs_suspend, NULL); #endif static const struct i2c_device_id sbs_id[] = { + { "bq20z65", 0 }, { "bq20z75", 0 }, { "sbs-battery", 1 }, {} @@ -978,9 +979,13 @@ MODULE_DEVICE_TABLE(i2c, sbs_id); static const struct of_device_id sbs_dt_ids[] = { { .compatible = "sbs,sbs-battery" }, + { + .compatible = "ti,bq20z65", + .data = (void *)SBS_FLAGS_TI_BQ20ZX5, + }, { .compatible = "ti,bq20z75", - .data = (void *)SBS_FLAGS_TI_BQ20Z75, + .data = (void *)SBS_FLAGS_TI_BQ20ZX5, }, { } }; -- GitLab From fd6126484ceaa9d94db196931c10454090f3d677 Mon Sep 17 00:00:00 2001 From: Daeho Jeong Date: Wed, 27 May 2020 13:02:31 +0900 Subject: [PATCH 0772/1971] f2fs: protect new segment allocation in expand_inode_data Found a new segemnt allocation without f2fs_lock_op() in expand_inode_data(). So, when we do fallocate() for a pinned file and trigger checkpoint very frequently and simultaneously. F2FS gets stuck in the below code of do_checkpoint() forever. f2fs_sync_meta_pages(sbi, META, LONG_MAX, FS_CP_META_IO); /* Wait for all dirty meta pages to be submitted for IO */ <= if fallocate() here, f2fs_wait_on_all_pages(sbi, F2FS_DIRTY_META); <= it'll wait forever. Signed-off-by: Daeho Jeong Reviewed-by: Chao Yu Signed-off-by: Jaegeuk Kim --- fs/f2fs/file.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c index 67fd1c900eb49..dfa1ac2d751a0 100644 --- a/fs/f2fs/file.c +++ b/fs/f2fs/file.c @@ -1658,7 +1658,11 @@ next_alloc: down_write(&sbi->pin_sem); map.m_seg_type = CURSEG_COLD_DATA_PINNED; + + f2fs_lock_op(sbi); f2fs_allocate_new_segments(sbi, CURSEG_COLD_DATA); + f2fs_unlock_op(sbi); + err = f2fs_map_blocks(inode, &map, 1, F2FS_GET_BLOCK_PRE_DIO); up_write(&sbi->pin_sem); -- GitLab From 84597b1f9b051ff75a3471c0331b6875e94b1f7e Mon Sep 17 00:00:00 2001 From: Chao Yu Date: Wed, 27 May 2020 18:27:51 +0800 Subject: [PATCH 0773/1971] f2fs: fix wrong value of tracepoint parameter In f2fs_lookup(), we should set @err correctly before printing it in tracepoint. Signed-off-by: Chao Yu Signed-off-by: Jaegeuk Kim --- fs/f2fs/namei.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/fs/f2fs/namei.c b/fs/f2fs/namei.c index 8f55201441f78..e94e02c6580ac 100644 --- a/fs/f2fs/namei.c +++ b/fs/f2fs/namei.c @@ -504,6 +504,7 @@ static struct dentry *f2fs_lookup(struct inode *dir, struct dentry *dentry, err = PTR_ERR(page); goto out; } + err = -ENOENT; goto out_splice; } @@ -549,7 +550,7 @@ out_splice: #endif new = d_splice_alias(inode, dentry); err = PTR_ERR_OR_ZERO(new); - trace_f2fs_lookup_end(dir, dentry, ino, err); + trace_f2fs_lookup_end(dir, dentry, ino, !new ? -ENOENT : err); return new; out_iput: iput(inode); -- GitLab From 47d0d7d76437ca9f48314844bc44dd08615302a1 Mon Sep 17 00:00:00 2001 From: Chao Yu Date: Wed, 27 May 2020 18:27:52 +0800 Subject: [PATCH 0774/1971] f2fs: remove unneeded return value of __insert_discard_tree() We never use return value of __insert_discard_tree(), so remove it. Signed-off-by: Chao Yu Signed-off-by: Jaegeuk Kim --- fs/f2fs/segment.c | 9 ++------- 1 file changed, 2 insertions(+), 7 deletions(-) diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c index 1c48ec866b8cd..ebbadde6cbced 100644 --- a/fs/f2fs/segment.c +++ b/fs/f2fs/segment.c @@ -1221,7 +1221,7 @@ submit: return err; } -static struct discard_cmd *__insert_discard_tree(struct f2fs_sb_info *sbi, +static void __insert_discard_tree(struct f2fs_sb_info *sbi, struct block_device *bdev, block_t lstart, block_t start, block_t len, struct rb_node **insert_p, @@ -1230,7 +1230,6 @@ static struct discard_cmd *__insert_discard_tree(struct f2fs_sb_info *sbi, struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info; struct rb_node **p; struct rb_node *parent = NULL; - struct discard_cmd *dc = NULL; bool leftmost = true; if (insert_p && insert_parent) { @@ -1242,12 +1241,8 @@ static struct discard_cmd *__insert_discard_tree(struct f2fs_sb_info *sbi, p = f2fs_lookup_rb_tree_for_insert(sbi, &dcc->root, &parent, lstart, &leftmost); do_insert: - dc = __attach_discard_cmd(sbi, bdev, lstart, start, len, parent, + __attach_discard_cmd(sbi, bdev, lstart, start, len, parent, p, leftmost); - if (!dc) - return NULL; - - return dc; } static void __relocate_discard_cmd(struct discard_cmd_control *dcc, -- GitLab From dc35d73a42291b2a68f5b9a12b2b095b82194c1f Mon Sep 17 00:00:00 2001 From: Chao Yu Date: Tue, 26 May 2020 09:55:02 +0800 Subject: [PATCH 0775/1971] f2fs: compress: don't compress any datas after cp stop While compressed data writeback, we need to drop dirty pages like we did for non-compressed pages if cp stops, however it's not needed to compress any data in such case, so let's detect cp stop condition in cluster_may_compress() to avoid redundant compressing and let following f2fs_write_raw_pages() drops dirty pages correctly. Signed-off-by: Chao Yu Signed-off-by: Jaegeuk Kim --- fs/f2fs/compress.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/fs/f2fs/compress.c b/fs/f2fs/compress.c index bf152c0d79fe8..a53578a892112 100644 --- a/fs/f2fs/compress.c +++ b/fs/f2fs/compress.c @@ -849,6 +849,8 @@ static bool cluster_may_compress(struct compress_ctx *cc) return false; if (!f2fs_cluster_is_full(cc)) return false; + if (unlikely(f2fs_cp_error(F2FS_I_SB(cc->inode)))) + return false; return __cluster_may_compress(cc); } -- GitLab From 429ac8b75a0b1c3478ffd584de8a63075cbe25e7 Mon Sep 17 00:00:00 2001 From: Fenghua Yu Date: Thu, 30 Apr 2020 16:46:35 -0700 Subject: [PATCH 0776/1971] x86/split_lock: Add Icelake microserver and Tigerlake CPU models Icelake microserver CPU supports split lock detection while it doesn't have the split lock enumeration bit in IA32_CORE_CAPABILITIES. Tigerlake CPUs do enumerate the MSR. [ bp: Merge the two model-adding patches into one. ] Signed-off-by: Fenghua Yu Signed-off-by: Borislav Petkov Reviewed-by: Tony Luck Link: https://lkml.kernel.org/r/1588290395-2677-1-git-send-email-fenghua.yu@intel.com --- arch/x86/kernel/cpu/intel.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c index a19a680542ce7..6abbcc774b821 100644 --- a/arch/x86/kernel/cpu/intel.c +++ b/arch/x86/kernel/cpu/intel.c @@ -1135,9 +1135,12 @@ void switch_to_sld(unsigned long tifn) static const struct x86_cpu_id split_lock_cpu_ids[] __initconst = { X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_X, 0), X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_L, 0), + X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_D, 0), X86_MATCH_INTEL_FAM6_MODEL(ATOM_TREMONT, 1), X86_MATCH_INTEL_FAM6_MODEL(ATOM_TREMONT_D, 1), X86_MATCH_INTEL_FAM6_MODEL(ATOM_TREMONT_L, 1), + X86_MATCH_INTEL_FAM6_MODEL(TIGERLAKE_L, 1), + X86_MATCH_INTEL_FAM6_MODEL(TIGERLAKE, 1), {} }; -- GitLab From f45db2b909c7e76f35850e78f017221f30282b8e Mon Sep 17 00:00:00 2001 From: NeilBrown Date: Fri, 22 May 2020 12:01:32 +1000 Subject: [PATCH 0777/1971] sunrpc: check that domain table is empty at module unload. The domain table should be empty at module unload. If it isn't there is a bug somewhere. So check and report. Link: https://bugzilla.kernel.org/show_bug.cgi?id=206651 Signed-off-by: NeilBrown Signed-off-by: J. Bruce Fields --- net/sunrpc/sunrpc.h | 1 + net/sunrpc/sunrpc_syms.c | 2 ++ net/sunrpc/svcauth.c | 25 +++++++++++++++++++++++++ 3 files changed, 28 insertions(+) diff --git a/net/sunrpc/sunrpc.h b/net/sunrpc/sunrpc.h index 47a756503d11c..f6fe2e6cd65a1 100644 --- a/net/sunrpc/sunrpc.h +++ b/net/sunrpc/sunrpc.h @@ -52,4 +52,5 @@ static inline int sock_is_loopback(struct sock *sk) int rpc_clients_notifier_register(void); void rpc_clients_notifier_unregister(void); +void auth_domain_cleanup(void); #endif /* _NET_SUNRPC_SUNRPC_H */ diff --git a/net/sunrpc/sunrpc_syms.c b/net/sunrpc/sunrpc_syms.c index f9edaa9174a43..236fadc4a4399 100644 --- a/net/sunrpc/sunrpc_syms.c +++ b/net/sunrpc/sunrpc_syms.c @@ -23,6 +23,7 @@ #include #include +#include "sunrpc.h" #include "netns.h" unsigned int sunrpc_net_id; @@ -131,6 +132,7 @@ cleanup_sunrpc(void) unregister_rpc_pipefs(); rpc_destroy_mempool(); unregister_pernet_subsys(&sunrpc_net_ops); + auth_domain_cleanup(); #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) rpc_unregister_sysctl(); #endif diff --git a/net/sunrpc/svcauth.c b/net/sunrpc/svcauth.c index 552617e3467bd..998b196b61767 100644 --- a/net/sunrpc/svcauth.c +++ b/net/sunrpc/svcauth.c @@ -21,6 +21,8 @@ #include +#include "sunrpc.h" + #define RPCDBG_FACILITY RPCDBG_AUTH @@ -205,3 +207,26 @@ struct auth_domain *auth_domain_find(char *name) return NULL; } EXPORT_SYMBOL_GPL(auth_domain_find); + +/** + * auth_domain_cleanup - check that the auth_domain table is empty + * + * On module unload the auth_domain_table must be empty. To make it + * easier to catch bugs which don't clean up domains properly, we + * warn if anything remains in the table at cleanup time. + * + * Note that we cannot proactively remove the domains at this stage. + * The ->release() function might be in a module that has already been + * unloaded. + */ + +void auth_domain_cleanup(void) +{ + int h; + struct auth_domain *hp; + + for (h = 0; h < DN_HASHMAX; h++) + hlist_for_each_entry(hp, &auth_domain_table[h], hash) + pr_warn("svc: domain %s still present at module unload.\n", + hp->name); +} -- GitLab From d47a5dc2888fd1b94adf1553068b8dad76cec96c Mon Sep 17 00:00:00 2001 From: NeilBrown Date: Fri, 22 May 2020 12:01:33 +1000 Subject: [PATCH 0778/1971] sunrpc: svcauth_gss_register_pseudoflavor must reject duplicate registrations. There is no valid case for supporting duplicate pseudoflavor registrations. Currently the silent acceptance of such registrations is hiding a bug. The rpcsec_gss_krb5 module registers 2 flavours but does not unregister them, so if you load, unload, reload the module, it will happily continue to use the old registration which now has pointers to the memory were the module was originally loaded. This could lead to unexpected results. So disallow duplicate registrations. Link: https://bugzilla.kernel.org/show_bug.cgi?id=206651 Cc: stable@vger.kernel.org (v2.6.12+) Signed-off-by: NeilBrown Signed-off-by: J. Bruce Fields --- net/sunrpc/auth_gss/svcauth_gss.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/net/sunrpc/auth_gss/svcauth_gss.c b/net/sunrpc/auth_gss/svcauth_gss.c index 50d93c49ef1af..49bb346a62154 100644 --- a/net/sunrpc/auth_gss/svcauth_gss.c +++ b/net/sunrpc/auth_gss/svcauth_gss.c @@ -826,9 +826,11 @@ svcauth_gss_register_pseudoflavor(u32 pseudoflavor, char * name) new->h.flavour = &svcauthops_gss; new->pseudoflavor = pseudoflavor; - stat = 0; test = auth_domain_lookup(name, &new->h); - if (test != &new->h) { /* Duplicate registration */ + if (test != &new->h) { + pr_warn("svc: duplicate registration of gss pseudo flavour %s.\n", + name); + stat = -EADDRINUSE; auth_domain_put(test); kfree(new->h.name); goto out_free_dom; -- GitLab From 24c5efe41c29ee3e55bcf5a1c9f61ca8709622e8 Mon Sep 17 00:00:00 2001 From: NeilBrown Date: Fri, 22 May 2020 12:01:33 +1000 Subject: [PATCH 0779/1971] sunrpc: clean up properly in gss_mech_unregister() gss_mech_register() calls svcauth_gss_register_pseudoflavor() for each flavour, but gss_mech_unregister() does not call auth_domain_put(). This is unbalanced and makes it impossible to reload the module. Change svcauth_gss_register_pseudoflavor() to return the registered auth_domain, and save it for later release. Cc: stable@vger.kernel.org (v2.6.12+) Link: https://bugzilla.kernel.org/show_bug.cgi?id=206651 Signed-off-by: NeilBrown Signed-off-by: J. Bruce Fields --- include/linux/sunrpc/gss_api.h | 1 + include/linux/sunrpc/svcauth_gss.h | 3 ++- net/sunrpc/auth_gss/gss_mech_switch.c | 12 +++++++++--- net/sunrpc/auth_gss/svcauth_gss.c | 12 ++++++------ 4 files changed, 18 insertions(+), 10 deletions(-) diff --git a/include/linux/sunrpc/gss_api.h b/include/linux/sunrpc/gss_api.h index bc07e51f20d1c..bf4ac8a0268c6 100644 --- a/include/linux/sunrpc/gss_api.h +++ b/include/linux/sunrpc/gss_api.h @@ -84,6 +84,7 @@ struct pf_desc { u32 service; char *name; char *auth_domain_name; + struct auth_domain *domain; bool datatouch; }; diff --git a/include/linux/sunrpc/svcauth_gss.h b/include/linux/sunrpc/svcauth_gss.h index ca39a388dc225..f09c82b0a7aef 100644 --- a/include/linux/sunrpc/svcauth_gss.h +++ b/include/linux/sunrpc/svcauth_gss.h @@ -20,7 +20,8 @@ int gss_svc_init(void); void gss_svc_shutdown(void); int gss_svc_init_net(struct net *net); void gss_svc_shutdown_net(struct net *net); -int svcauth_gss_register_pseudoflavor(u32 pseudoflavor, char * name); +struct auth_domain *svcauth_gss_register_pseudoflavor(u32 pseudoflavor, + char *name); u32 svcauth_gss_flavor(struct auth_domain *dom); #endif /* _LINUX_SUNRPC_SVCAUTH_GSS_H */ diff --git a/net/sunrpc/auth_gss/gss_mech_switch.c b/net/sunrpc/auth_gss/gss_mech_switch.c index 69316ab1b9fac..fae632da1058a 100644 --- a/net/sunrpc/auth_gss/gss_mech_switch.c +++ b/net/sunrpc/auth_gss/gss_mech_switch.c @@ -37,6 +37,8 @@ gss_mech_free(struct gss_api_mech *gm) for (i = 0; i < gm->gm_pf_num; i++) { pf = &gm->gm_pfs[i]; + if (pf->domain) + auth_domain_put(pf->domain); kfree(pf->auth_domain_name); pf->auth_domain_name = NULL; } @@ -59,6 +61,7 @@ make_auth_domain_name(char *name) static int gss_mech_svc_setup(struct gss_api_mech *gm) { + struct auth_domain *dom; struct pf_desc *pf; int i, status; @@ -68,10 +71,13 @@ gss_mech_svc_setup(struct gss_api_mech *gm) status = -ENOMEM; if (pf->auth_domain_name == NULL) goto out; - status = svcauth_gss_register_pseudoflavor(pf->pseudoflavor, - pf->auth_domain_name); - if (status) + dom = svcauth_gss_register_pseudoflavor( + pf->pseudoflavor, pf->auth_domain_name); + if (IS_ERR(dom)) { + status = PTR_ERR(dom); goto out; + } + pf->domain = dom; } return 0; out: diff --git a/net/sunrpc/auth_gss/svcauth_gss.c b/net/sunrpc/auth_gss/svcauth_gss.c index 49bb346a62154..46027d0c903f9 100644 --- a/net/sunrpc/auth_gss/svcauth_gss.c +++ b/net/sunrpc/auth_gss/svcauth_gss.c @@ -809,7 +809,7 @@ u32 svcauth_gss_flavor(struct auth_domain *dom) EXPORT_SYMBOL_GPL(svcauth_gss_flavor); -int +struct auth_domain * svcauth_gss_register_pseudoflavor(u32 pseudoflavor, char * name) { struct gss_domain *new; @@ -832,17 +832,17 @@ svcauth_gss_register_pseudoflavor(u32 pseudoflavor, char * name) name); stat = -EADDRINUSE; auth_domain_put(test); - kfree(new->h.name); - goto out_free_dom; + goto out_free_name; } - return 0; + return test; +out_free_name: + kfree(new->h.name); out_free_dom: kfree(new); out: - return stat; + return ERR_PTR(stat); } - EXPORT_SYMBOL_GPL(svcauth_gss_register_pseudoflavor); static inline int -- GitLab From a4abc6b12eb1f7a533c2e7484cfa555454ff0977 Mon Sep 17 00:00:00 2001 From: Xiyu Yang Date: Mon, 25 May 2020 22:15:41 +0800 Subject: [PATCH 0780/1971] nfsd: Fix svc_xprt refcnt leak when setup callback client failed nfsd4_process_cb_update() invokes svc_xprt_get(), which increases the refcount of the "c->cn_xprt". The reference counting issue happens in one exception handling path of nfsd4_process_cb_update(). When setup callback client failed, the function forgets to decrease the refcnt increased by svc_xprt_get(), causing a refcnt leak. Fix this issue by calling svc_xprt_put() when setup callback client failed. Signed-off-by: Xiyu Yang Signed-off-by: Xin Tan Signed-off-by: J. Bruce Fields --- fs/nfsd/nfs4callback.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/fs/nfsd/nfs4callback.c b/fs/nfsd/nfs4callback.c index 966ca75418c81..7fbe9840a03e7 100644 --- a/fs/nfsd/nfs4callback.c +++ b/fs/nfsd/nfs4callback.c @@ -1306,6 +1306,8 @@ static void nfsd4_process_cb_update(struct nfsd4_callback *cb) err = setup_callback_client(clp, &conn, ses); if (err) { nfsd4_mark_cb_down(clp, err); + if (c) + svc_xprt_put(c->cn_xprt); return; } } -- GitLab From 037e910b52b04fbd41990f643dff97c7e43b0e47 Mon Sep 17 00:00:00 2001 From: Xiyu Yang Date: Mon, 25 May 2020 22:17:02 +0800 Subject: [PATCH 0781/1971] SUNRPC: Remove unreachable error condition in rpcb_getport_async() rpcb_getport_async() invokes rpcb_call_async(), which return the value of rpc_run_task() to "child". Since rpc_run_task() is impossible to return an ERR pointer, there is no need to add the IS_ERR() condition on "child" here. So we need to remove it. Signed-off-by: Xiyu Yang Signed-off-by: Xin Tan Signed-off-by: J. Bruce Fields --- net/sunrpc/rpcb_clnt.c | 6 ------ 1 file changed, 6 deletions(-) diff --git a/net/sunrpc/rpcb_clnt.c b/net/sunrpc/rpcb_clnt.c index 4a020b6888608..c27123e6ba80c 100644 --- a/net/sunrpc/rpcb_clnt.c +++ b/net/sunrpc/rpcb_clnt.c @@ -795,12 +795,6 @@ void rpcb_getport_async(struct rpc_task *task) child = rpcb_call_async(rpcb_clnt, map, proc); rpc_release_client(rpcb_clnt); - if (IS_ERR(child)) { - /* rpcb_map_release() has freed the arguments */ - dprintk("RPC: %5u %s: rpc_run_task failed\n", - task->tk_pid, __func__); - return; - } xprt->stat.bind_count++; rpc_put_task(child); -- GitLab From d6f56321089203a9f740c7dd7eba4de88f98f993 Mon Sep 17 00:00:00 2001 From: Sebastian Reichel Date: Wed, 13 May 2020 20:56:02 +0200 Subject: [PATCH 0782/1971] power: supply: sbs-battery: add POWER_SUPPLY_PROP_CAPACITY_ERROR_MARGIN support Add support for reporting the MaxError register from battery fuel gauges following the smart battery standard. Reviewed-by: Emil Velikov Signed-off-by: Sebastian Reichel --- drivers/power/supply/sbs-battery.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/drivers/power/supply/sbs-battery.c b/drivers/power/supply/sbs-battery.c index a15783802ef82..4356fdf25d4af 100644 --- a/drivers/power/supply/sbs-battery.c +++ b/drivers/power/supply/sbs-battery.c @@ -26,6 +26,7 @@ enum { REG_TEMPERATURE, REG_VOLTAGE, REG_CURRENT, + REG_MAX_ERR, REG_CAPACITY, REG_TIME_TO_EMPTY, REG_TIME_TO_FULL, @@ -85,6 +86,8 @@ static const struct chip_data { SBS_DATA(POWER_SUPPLY_PROP_VOLTAGE_NOW, 0x09, 0, 20000), [REG_CURRENT] = SBS_DATA(POWER_SUPPLY_PROP_CURRENT_NOW, 0x0A, -32768, 32767), + [REG_MAX_ERR] = + SBS_DATA(POWER_SUPPLY_PROP_CAPACITY_ERROR_MARGIN, 0x0c, 0, 100), [REG_CAPACITY] = SBS_DATA(POWER_SUPPLY_PROP_CAPACITY, 0x0D, 0, 100), [REG_REMAINING_CAPACITY] = @@ -132,6 +135,7 @@ static enum power_supply_property sbs_properties[] = { POWER_SUPPLY_PROP_VOLTAGE_NOW, POWER_SUPPLY_PROP_CURRENT_NOW, POWER_SUPPLY_PROP_CAPACITY, + POWER_SUPPLY_PROP_CAPACITY_ERROR_MARGIN, POWER_SUPPLY_PROP_TEMP, POWER_SUPPLY_PROP_TIME_TO_EMPTY_AVG, POWER_SUPPLY_PROP_TIME_TO_FULL_AVG, @@ -676,6 +680,7 @@ static int sbs_get_property(struct power_supply *psy, case POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN: case POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN: case POWER_SUPPLY_PROP_CAPACITY: + case POWER_SUPPLY_PROP_CAPACITY_ERROR_MARGIN: ret = sbs_get_property_index(client, psp); if (ret < 0) break; -- GitLab From c4b12a2f3f3de670f6be5e96092a2cab0b877f1a Mon Sep 17 00:00:00 2001 From: Sebastian Reichel Date: Wed, 13 May 2020 20:56:03 +0200 Subject: [PATCH 0783/1971] power: supply: sbs-battery: simplify read_read_string_data The SBS battery implements SMBus block reads. Currently the driver "emulates" this by doing an I2C byte read for the length followed by an I2C block read. The I2C subsystem actually provides a proper API for doing SMBus block reads, which can and should be used instead. The current implementation does not properly handle packet error checking (PEC). This change requires, that I2C bus drivers support I2C_M_RECV_LEN or directly provide the SMBus API to access device manufacturer and model name. Reviewed-by: Emil Velikov Signed-off-by: Sebastian Reichel --- drivers/power/supply/sbs-battery.c | 65 ++++++------------------------ 1 file changed, 12 insertions(+), 53 deletions(-) diff --git a/drivers/power/supply/sbs-battery.c b/drivers/power/supply/sbs-battery.c index 4356fdf25d4af..a9a1d28dabbea 100644 --- a/drivers/power/supply/sbs-battery.c +++ b/drivers/power/supply/sbs-battery.c @@ -202,66 +202,32 @@ static int sbs_read_string_data(struct i2c_client *client, u8 address, char *values) { struct sbs_info *chip = i2c_get_clientdata(client); - s32 ret = 0, block_length = 0; - int retries_length, retries_block; - u8 block_buffer[I2C_SMBUS_BLOCK_MAX + 1]; - - retries_length = chip->i2c_retry_count; - retries_block = chip->i2c_retry_count; + int retries = chip->i2c_retry_count; + s32 ret = 0; - /* Adapter needs to support these two functions */ if (!i2c_check_functionality(client->adapter, - I2C_FUNC_SMBUS_BYTE_DATA | - I2C_FUNC_SMBUS_I2C_BLOCK)){ + I2C_FUNC_SMBUS_READ_BLOCK_DATA)) { return -ENODEV; } - /* Get the length of block data */ - while (retries_length > 0) { - ret = i2c_smbus_read_byte_data(client, address); - if (ret >= 0) - break; - retries_length--; - } - - if (ret < 0) { - dev_dbg(&client->dev, - "%s: i2c read at address 0x%x failed\n", - __func__, address); - return ret; - } - - /* block_length does not include NULL terminator */ - block_length = ret; - if (block_length > I2C_SMBUS_BLOCK_MAX) { - dev_err(&client->dev, - "%s: Returned block_length is longer than 0x%x\n", - __func__, I2C_SMBUS_BLOCK_MAX); - return -EINVAL; - } - /* Get the block data */ - while (retries_block > 0) { - ret = i2c_smbus_read_i2c_block_data( - client, address, - block_length + 1, block_buffer); + while (retries > 0) { + ret = i2c_smbus_read_block_data(client, address, values); if (ret >= 0) break; - retries_block--; + retries--; } if (ret < 0) { - dev_dbg(&client->dev, - "%s: i2c read at address 0x%x failed\n", - __func__, address); + dev_dbg(&client->dev, "%s: failed to read block 0x%x: %d\n", + __func__, address, ret); return ret; } - /* block_buffer[0] == block_length */ - memcpy(values, block_buffer + 1, block_length); - values[block_length] = '\0'; + /* add string termination */ + values[ret] = '\0'; - return ret; + return 0; } static int sbs_write_word_data(struct i2c_client *client, u8 address, @@ -465,14 +431,7 @@ static int sbs_get_battery_property(struct i2c_client *client, static int sbs_get_battery_string_property(struct i2c_client *client, int reg_offset, enum power_supply_property psp, char *val) { - s32 ret; - - ret = sbs_read_string_data(client, sbs_data[reg_offset].addr, val); - - if (ret < 0) - return ret; - - return 0; + return sbs_read_string_data(client, sbs_data[reg_offset].addr, val); } static void sbs_unit_adjustment(struct i2c_client *client, -- GitLab From 79bcd5a4a66076a8a8dacd7f4a3be1952283aef4 Mon Sep 17 00:00:00 2001 From: Sebastian Reichel Date: Wed, 13 May 2020 20:56:04 +0200 Subject: [PATCH 0784/1971] power: supply: sbs-battery: add PEC support SBS batteries optionally have support for PEC. This enables PEC handling based on the implemented SBS version as suggested by the standard. The support for PEC is re-evaluated when the battery is hotplugged into the system, since there might be systems supporting batteries from different SBS generations. Reviewed-by: Emil Velikov Signed-off-by: Sebastian Reichel --- drivers/power/supply/sbs-battery.c | 72 ++++++++++++++++++++++++++++-- 1 file changed, 69 insertions(+), 3 deletions(-) diff --git a/drivers/power/supply/sbs-battery.c b/drivers/power/supply/sbs-battery.c index a9a1d28dabbea..ab774d4912697 100644 --- a/drivers/power/supply/sbs-battery.c +++ b/drivers/power/supply/sbs-battery.c @@ -46,6 +46,14 @@ enum { REG_MODEL_NAME, }; +#define REG_ADDR_SPEC_INFO 0x1A +#define SPEC_INFO_VERSION_MASK GENMASK(7, 4) +#define SPEC_INFO_VERSION_SHIFT 4 + +#define SBS_VERSION_1_0 1 +#define SBS_VERSION_1_1 2 +#define SBS_VERSION_1_1_WITH_PEC 3 + /* Battery Mode defines */ #define BATTERY_MODE_OFFSET 0x03 #define BATTERY_MODE_CAPACITY_MASK BIT(15) @@ -175,6 +183,64 @@ static char model_name[I2C_SMBUS_BLOCK_MAX + 1]; static char manufacturer[I2C_SMBUS_BLOCK_MAX + 1]; static bool force_load; +static int sbs_update_presence(struct sbs_info *chip, bool is_present) +{ + struct i2c_client *client = chip->client; + int retries = chip->i2c_retry_count; + s32 ret = 0; + u8 version; + + if (chip->is_present == is_present) + return 0; + + if (!is_present) { + chip->is_present = false; + /* Disable PEC when no device is present */ + client->flags &= ~I2C_CLIENT_PEC; + return 0; + } + + /* Check if device supports packet error checking and use it */ + while (retries > 0) { + ret = i2c_smbus_read_word_data(client, REG_ADDR_SPEC_INFO); + if (ret >= 0) + break; + + /* + * Some batteries trigger the detection pin before the + * I2C bus is properly connected. This works around the + * issue. + */ + msleep(100); + + retries--; + } + + if (ret < 0) { + dev_dbg(&client->dev, "failed to read spec info: %d\n", ret); + + /* fallback to old behaviour */ + client->flags &= ~I2C_CLIENT_PEC; + chip->is_present = true; + + return ret; + } + + version = (ret & SPEC_INFO_VERSION_MASK) >> SPEC_INFO_VERSION_SHIFT; + + if (version == SBS_VERSION_1_1_WITH_PEC) + client->flags |= I2C_CLIENT_PEC; + else + client->flags &= ~I2C_CLIENT_PEC; + + dev_dbg(&client->dev, "PEC: %s\n", (client->flags & I2C_CLIENT_PEC) ? + "enabled" : "disabled"); + + chip->is_present = true; + + return 0; +} + static int sbs_read_word_data(struct i2c_client *client, u8 address) { struct sbs_info *chip = i2c_get_clientdata(client); @@ -579,7 +645,7 @@ static int sbs_get_property(struct power_supply *psy, return ret; if (psp == POWER_SUPPLY_PROP_PRESENT) { val->intval = ret; - chip->is_present = val->intval; + sbs_update_presence(chip, ret); return 0; } if (ret == 0) @@ -678,7 +744,7 @@ static int sbs_get_property(struct power_supply *psy, if (!chip->gpio_detect && chip->is_present != (ret >= 0)) { - chip->is_present = (ret >= 0); + sbs_update_presence(chip, (ret >= 0)); power_supply_changed(chip->power_supply); } @@ -709,7 +775,7 @@ static void sbs_supply_changed(struct sbs_info *chip) ret = gpiod_get_value_cansleep(chip->gpio_detect); if (ret < 0) return; - chip->is_present = ret; + sbs_update_presence(chip, ret); power_supply_changed(battery); } -- GitLab From 8ce6ee43bd6e8ec0dc1f4bcbfeb103e634233d28 Mon Sep 17 00:00:00 2001 From: Sebastian Reichel Date: Wed, 13 May 2020 20:56:05 +0200 Subject: [PATCH 0785/1971] power: supply: sbs-battery: add POWER_SUPPLY_PROP_CURRENT_AVG support Expose averaged current information, which is part of the SBS standard and should be supported by all batteries. Reviewed-by: Emil Velikov Signed-off-by: Sebastian Reichel --- drivers/power/supply/sbs-battery.c | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/drivers/power/supply/sbs-battery.c b/drivers/power/supply/sbs-battery.c index ab774d4912697..611a113854962 100644 --- a/drivers/power/supply/sbs-battery.c +++ b/drivers/power/supply/sbs-battery.c @@ -25,7 +25,8 @@ enum { REG_MANUFACTURER_DATA, REG_TEMPERATURE, REG_VOLTAGE, - REG_CURRENT, + REG_CURRENT_NOW, + REG_CURRENT_AVG, REG_MAX_ERR, REG_CAPACITY, REG_TIME_TO_EMPTY, @@ -92,8 +93,10 @@ static const struct chip_data { SBS_DATA(POWER_SUPPLY_PROP_TEMP, 0x08, 0, 65535), [REG_VOLTAGE] = SBS_DATA(POWER_SUPPLY_PROP_VOLTAGE_NOW, 0x09, 0, 20000), - [REG_CURRENT] = + [REG_CURRENT_NOW] = SBS_DATA(POWER_SUPPLY_PROP_CURRENT_NOW, 0x0A, -32768, 32767), + [REG_CURRENT_AVG] = + SBS_DATA(POWER_SUPPLY_PROP_CURRENT_AVG, 0x0B, -32768, 32767), [REG_MAX_ERR] = SBS_DATA(POWER_SUPPLY_PROP_CAPACITY_ERROR_MARGIN, 0x0c, 0, 100), [REG_CAPACITY] = @@ -142,6 +145,7 @@ static enum power_supply_property sbs_properties[] = { POWER_SUPPLY_PROP_CYCLE_COUNT, POWER_SUPPLY_PROP_VOLTAGE_NOW, POWER_SUPPLY_PROP_CURRENT_NOW, + POWER_SUPPLY_PROP_CURRENT_AVG, POWER_SUPPLY_PROP_CAPACITY, POWER_SUPPLY_PROP_CAPACITY_ERROR_MARGIN, POWER_SUPPLY_PROP_TEMP, @@ -324,7 +328,7 @@ static int sbs_status_correct(struct i2c_client *client, int *intval) { int ret; - ret = sbs_read_word_data(client, sbs_data[REG_CURRENT].addr); + ret = sbs_read_word_data(client, sbs_data[REG_CURRENT_NOW].addr); if (ret < 0) return ret; @@ -521,6 +525,7 @@ static void sbs_unit_adjustment(struct i2c_client *client, case POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN: case POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN: case POWER_SUPPLY_PROP_CURRENT_NOW: + case POWER_SUPPLY_PROP_CURRENT_AVG: case POWER_SUPPLY_PROP_CHARGE_NOW: case POWER_SUPPLY_PROP_CHARGE_FULL: case POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN: @@ -699,6 +704,7 @@ static int sbs_get_property(struct power_supply *psy, case POWER_SUPPLY_PROP_CYCLE_COUNT: case POWER_SUPPLY_PROP_VOLTAGE_NOW: case POWER_SUPPLY_PROP_CURRENT_NOW: + case POWER_SUPPLY_PROP_CURRENT_AVG: case POWER_SUPPLY_PROP_TEMP: case POWER_SUPPLY_PROP_TIME_TO_EMPTY_AVG: case POWER_SUPPLY_PROP_TIME_TO_FULL_AVG: -- GitLab From 3e9544f7a3428a524f8877f6fd99d50848c765ff Mon Sep 17 00:00:00 2001 From: Sebastian Reichel Date: Wed, 13 May 2020 20:56:06 +0200 Subject: [PATCH 0786/1971] power: supply: sbs-battery: Improve POWER_SUPPLY_PROP_TECHNOLOGY support This reads the battery chemistry from the battery chip instead of incorrectly hardcoding the type to be Li-Ion. Reviewed-by: Emil Velikov Signed-off-by: Sebastian Reichel --- drivers/power/supply/sbs-battery.c | 43 ++++++++++++++++++++++++++++-- 1 file changed, 41 insertions(+), 2 deletions(-) diff --git a/drivers/power/supply/sbs-battery.c b/drivers/power/supply/sbs-battery.c index 611a113854962..b697a42d9ccf8 100644 --- a/drivers/power/supply/sbs-battery.c +++ b/drivers/power/supply/sbs-battery.c @@ -43,6 +43,7 @@ enum { REG_DESIGN_CAPACITY_CHARGE, REG_DESIGN_VOLTAGE_MIN, REG_DESIGN_VOLTAGE_MAX, + REG_CHEMISTRY, REG_MANUFACTURER, REG_MODEL_NAME, }; @@ -133,7 +134,9 @@ static const struct chip_data { [REG_MANUFACTURER] = SBS_DATA(POWER_SUPPLY_PROP_MANUFACTURER, 0x20, 0, 65535), [REG_MODEL_NAME] = - SBS_DATA(POWER_SUPPLY_PROP_MODEL_NAME, 0x21, 0, 65535) + SBS_DATA(POWER_SUPPLY_PROP_MODEL_NAME, 0x21, 0, 65535), + [REG_CHEMISTRY] = + SBS_DATA(POWER_SUPPLY_PROP_TECHNOLOGY, 0x22, 0, 65535) }; static enum power_supply_property sbs_properties[] = { @@ -185,6 +188,7 @@ struct sbs_info { static char model_name[I2C_SMBUS_BLOCK_MAX + 1]; static char manufacturer[I2C_SMBUS_BLOCK_MAX + 1]; +static char chemistry[I2C_SMBUS_BLOCK_MAX + 1]; static bool force_load; static int sbs_update_presence(struct sbs_info *chip, bool is_present) @@ -636,6 +640,38 @@ static int sbs_get_property_index(struct i2c_client *client, return -EINVAL; } +static int sbs_get_chemistry(struct i2c_client *client, + union power_supply_propval *val) +{ + enum power_supply_property psp = POWER_SUPPLY_PROP_TECHNOLOGY; + int ret; + + ret = sbs_get_property_index(client, psp); + if (ret < 0) + return ret; + + ret = sbs_get_battery_string_property(client, ret, psp, + chemistry); + if (ret < 0) + return ret; + + if (!strncasecmp(chemistry, "LION", 4)) + val->intval = POWER_SUPPLY_TECHNOLOGY_LION; + else if (!strncasecmp(chemistry, "LiP", 3)) + val->intval = POWER_SUPPLY_TECHNOLOGY_LIPO; + else if (!strncasecmp(chemistry, "NiCd", 4)) + val->intval = POWER_SUPPLY_TECHNOLOGY_NiCd; + else if (!strncasecmp(chemistry, "NiMH", 4)) + val->intval = POWER_SUPPLY_TECHNOLOGY_NiMH; + else + val->intval = POWER_SUPPLY_TECHNOLOGY_UNKNOWN; + + if (val->intval == POWER_SUPPLY_TECHNOLOGY_UNKNOWN) + dev_warn(&client->dev, "Unknown chemistry: %s\n", chemistry); + + return 0; +} + static int sbs_get_property(struct power_supply *psy, enum power_supply_property psp, union power_supply_propval *val) @@ -673,7 +709,10 @@ static int sbs_get_property(struct power_supply *psy, break; case POWER_SUPPLY_PROP_TECHNOLOGY: - val->intval = POWER_SUPPLY_TECHNOLOGY_LION; + ret = sbs_get_chemistry(client, val); + if (ret < 0) + break; + goto done; /* don't trigger power_supply_changed()! */ case POWER_SUPPLY_PROP_ENERGY_NOW: -- GitLab From 787fdbcf5b8bce9eea96315eb618f38bf849a76c Mon Sep 17 00:00:00 2001 From: Sebastian Reichel Date: Wed, 13 May 2020 20:56:07 +0200 Subject: [PATCH 0787/1971] power: supply: sbs-battery: add POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT/VOLTAGE_MAX support Expose maximum charge current/voltage information requested by the battery. Reviewed-by: Emil Velikov Signed-off-by: Sebastian Reichel --- drivers/power/supply/sbs-battery.c | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/drivers/power/supply/sbs-battery.c b/drivers/power/supply/sbs-battery.c index b697a42d9ccf8..e6f61baa9bede 100644 --- a/drivers/power/supply/sbs-battery.c +++ b/drivers/power/supply/sbs-battery.c @@ -46,6 +46,8 @@ enum { REG_CHEMISTRY, REG_MANUFACTURER, REG_MODEL_NAME, + REG_CHARGE_CURRENT, + REG_CHARGE_VOLTAGE, }; #define REG_ADDR_SPEC_INFO 0x1A @@ -114,6 +116,10 @@ static const struct chip_data { SBS_DATA(POWER_SUPPLY_PROP_TIME_TO_EMPTY_AVG, 0x12, 0, 65535), [REG_TIME_TO_FULL] = SBS_DATA(POWER_SUPPLY_PROP_TIME_TO_FULL_AVG, 0x13, 0, 65535), + [REG_CHARGE_CURRENT] = + SBS_DATA(POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX, 0x14, 0, 65535), + [REG_CHARGE_VOLTAGE] = + SBS_DATA(POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE_MAX, 0x15, 0, 65535), [REG_STATUS] = SBS_DATA(POWER_SUPPLY_PROP_STATUS, 0x16, 0, 65535), [REG_CAPACITY_LEVEL] = @@ -163,6 +169,8 @@ static enum power_supply_property sbs_properties[] = { POWER_SUPPLY_PROP_CHARGE_NOW, POWER_SUPPLY_PROP_CHARGE_FULL, POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN, + POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX, + POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE_MAX, /* Properties of type `const char *' */ POWER_SUPPLY_PROP_MANUFACTURER, POWER_SUPPLY_PROP_MODEL_NAME @@ -531,6 +539,8 @@ static void sbs_unit_adjustment(struct i2c_client *client, case POWER_SUPPLY_PROP_CURRENT_NOW: case POWER_SUPPLY_PROP_CURRENT_AVG: case POWER_SUPPLY_PROP_CHARGE_NOW: + case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX: + case POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE_MAX: case POWER_SUPPLY_PROP_CHARGE_FULL: case POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN: val->intval *= BASE_UNIT_CONVERSION; @@ -749,6 +759,8 @@ static int sbs_get_property(struct power_supply *psy, case POWER_SUPPLY_PROP_TIME_TO_FULL_AVG: case POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN: case POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN: + case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX: + case POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE_MAX: case POWER_SUPPLY_PROP_CAPACITY: case POWER_SUPPLY_PROP_CAPACITY_ERROR_MARGIN: ret = sbs_get_property_index(client, psp); -- GitLab From 7721c2fd2668c751f9ba33b35db08b86293869e5 Mon Sep 17 00:00:00 2001 From: Sebastian Reichel Date: Wed, 13 May 2020 20:56:08 +0200 Subject: [PATCH 0788/1971] power: supply: sbs-battery: add MANUFACTURE_DATE support Expose the battery's manufacture date to userspace. Reviewed-by: Emil Velikov Signed-off-by: Sebastian Reichel --- drivers/power/supply/sbs-battery.c | 43 ++++++++++++++++++++++++++++++ 1 file changed, 43 insertions(+) diff --git a/drivers/power/supply/sbs-battery.c b/drivers/power/supply/sbs-battery.c index e6f61baa9bede..4fa553d61db23 100644 --- a/drivers/power/supply/sbs-battery.c +++ b/drivers/power/supply/sbs-battery.c @@ -58,6 +58,8 @@ enum { #define SBS_VERSION_1_1 2 #define SBS_VERSION_1_1_WITH_PEC 3 +#define REG_ADDR_MANUFACTURE_DATE 0x1B + /* Battery Mode defines */ #define BATTERY_MODE_OFFSET 0x03 #define BATTERY_MODE_CAPACITY_MASK BIT(15) @@ -171,6 +173,9 @@ static enum power_supply_property sbs_properties[] = { POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN, POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX, POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE_MAX, + POWER_SUPPLY_PROP_MANUFACTURE_YEAR, + POWER_SUPPLY_PROP_MANUFACTURE_MONTH, + POWER_SUPPLY_PROP_MANUFACTURE_DAY, /* Properties of type `const char *' */ POWER_SUPPLY_PROP_MANUFACTURER, POWER_SUPPLY_PROP_MODEL_NAME @@ -682,6 +687,38 @@ static int sbs_get_chemistry(struct i2c_client *client, return 0; } +static int sbs_get_battery_manufacture_date(struct i2c_client *client, + enum power_supply_property psp, + union power_supply_propval *val) +{ + int ret; + u16 day, month, year; + + ret = sbs_read_word_data(client, REG_ADDR_MANUFACTURE_DATE); + if (ret < 0) + return ret; + + day = ret & GENMASK(4, 0); + month = (ret & GENMASK(8, 5)) >> 5; + year = ((ret & GENMASK(15, 9)) >> 9) + 1980; + + switch (psp) { + case POWER_SUPPLY_PROP_MANUFACTURE_YEAR: + val->intval = year; + break; + case POWER_SUPPLY_PROP_MANUFACTURE_MONTH: + val->intval = month; + break; + case POWER_SUPPLY_PROP_MANUFACTURE_DAY: + val->intval = day; + break; + default: + return -EINVAL; + } + + return 0; +} + static int sbs_get_property(struct power_supply *psy, enum power_supply_property psp, union power_supply_propval *val) @@ -790,6 +827,12 @@ static int sbs_get_property(struct power_supply *psy, val->strval = manufacturer; break; + case POWER_SUPPLY_PROP_MANUFACTURE_YEAR: + case POWER_SUPPLY_PROP_MANUFACTURE_MONTH: + case POWER_SUPPLY_PROP_MANUFACTURE_DAY: + ret = sbs_get_battery_manufacture_date(client, psp, val); + break; + default: dev_err(&client->dev, "%s: INVALID property\n", __func__); -- GitLab From 6f72a07aa6e9fc0f7aa08b19f960be7748bad10f Mon Sep 17 00:00:00 2001 From: Sebastian Reichel Date: Wed, 13 May 2020 20:56:09 +0200 Subject: [PATCH 0789/1971] power: supply: sbs-battery: add POWER_SUPPLY_HEALTH_CALIBRATION_REQUIRED support Add support for reporting the SBS battery's condition flag to userspace using the new "Calibration required" health status. Reviewed-by: Emil Velikov Signed-off-by: Sebastian Reichel --- drivers/power/supply/sbs-battery.c | 27 ++++++++++++++++++++++++--- 1 file changed, 24 insertions(+), 3 deletions(-) diff --git a/drivers/power/supply/sbs-battery.c b/drivers/power/supply/sbs-battery.c index 4fa553d61db23..2a2b926ad75c8 100644 --- a/drivers/power/supply/sbs-battery.c +++ b/drivers/power/supply/sbs-battery.c @@ -23,6 +23,7 @@ enum { REG_MANUFACTURER_DATA, + REG_BATTERY_MODE, REG_TEMPERATURE, REG_VOLTAGE, REG_CURRENT_NOW, @@ -94,6 +95,8 @@ static const struct chip_data { } sbs_data[] = { [REG_MANUFACTURER_DATA] = SBS_DATA(POWER_SUPPLY_PROP_PRESENT, 0x00, 0, 65535), + [REG_BATTERY_MODE] = + SBS_DATA(-1, 0x03, 0, 65535), [REG_TEMPERATURE] = SBS_DATA(POWER_SUPPLY_PROP_TEMP, 0x08, 0, 65535), [REG_VOLTAGE] = @@ -366,6 +369,17 @@ static int sbs_status_correct(struct i2c_client *client, int *intval) return 0; } +static bool sbs_bat_needs_calibration(struct i2c_client *client) +{ + int ret; + + ret = sbs_read_word_data(client, sbs_data[REG_BATTERY_MODE].addr); + if (ret < 0) + return false; + + return !!(ret & BIT(7)); +} + static int sbs_get_battery_presence_and_health( struct i2c_client *client, enum power_supply_property psp, union power_supply_propval *val) @@ -385,9 +399,14 @@ static int sbs_get_battery_presence_and_health( if (psp == POWER_SUPPLY_PROP_PRESENT) val->intval = 1; /* battery present */ - else /* POWER_SUPPLY_PROP_HEALTH */ - /* SBS spec doesn't have a general health command. */ - val->intval = POWER_SUPPLY_HEALTH_UNKNOWN; + else { /* POWER_SUPPLY_PROP_HEALTH */ + if (sbs_bat_needs_calibration(client)) { + val->intval = POWER_SUPPLY_HEALTH_CALIBRATION_REQUIRED; + } else { + /* SBS spec doesn't have a general health command. */ + val->intval = POWER_SUPPLY_HEALTH_UNKNOWN; + } + } return 0; } @@ -441,6 +460,8 @@ static int sbs_get_ti_battery_presence_and_health( val->intval = POWER_SUPPLY_HEALTH_OVERHEAT; else if (ret == 0x0C) val->intval = POWER_SUPPLY_HEALTH_DEAD; + else if (sbs_bat_needs_calibration(client)) + val->intval = POWER_SUPPLY_HEALTH_CALIBRATION_REQUIRED; else val->intval = POWER_SUPPLY_HEALTH_GOOD; } -- GitLab From f0318bc99c8107acddde3aa99a7f696c0999f37c Mon Sep 17 00:00:00 2001 From: Sebastian Reichel Date: Wed, 13 May 2020 20:56:10 +0200 Subject: [PATCH 0790/1971] power: supply: sbs-battery: fix idle battery status A battery, that is neither charged, nor discharged is not always Full. If the charger is disabled for other reasons it might simply be idle and should be marked accordingly. Signed-off-by: Sebastian Reichel --- drivers/power/supply/sbs-battery.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/power/supply/sbs-battery.c b/drivers/power/supply/sbs-battery.c index 2a2b926ad75c8..e3c685b2c2479 100644 --- a/drivers/power/supply/sbs-battery.c +++ b/drivers/power/supply/sbs-battery.c @@ -354,9 +354,9 @@ static int sbs_status_correct(struct i2c_client *client, int *intval) ret = (s16)ret; - /* Not drawing current means full (cannot be not charging) */ - if (ret == 0) - *intval = POWER_SUPPLY_STATUS_FULL; + /* Not drawing current -> not charging (i.e. idle) */ + if (*intval != POWER_SUPPLY_STATUS_FULL && ret == 0) + *intval = POWER_SUPPLY_STATUS_NOT_CHARGING; if (*intval == POWER_SUPPLY_STATUS_FULL) { /* Drawing or providing current when full */ -- GitLab From 182fc88268f3e33fb81cbb373b30d656daf0cb48 Mon Sep 17 00:00:00 2001 From: Jean-Francois Dagenais Date: Wed, 13 May 2020 20:56:11 +0200 Subject: [PATCH 0791/1971] power: supply: sbs-battery: add ability to disable charger broadcasts In certain designs, it is possible to add a battery on a populated i2c bus without an sbs compliant charger. In that case, the battery will unnecessarily and sometimes undesirably master the bus trying to write info in the charger. It is observed in many occasion that these battery "broadcasts" are even corrupting other ongoing master to slave communication. I.e. the multi-master support in the battery is inadequate. Thankfully, the CHARGER_MODE bit allows designers to disable that SBS battery behaviour. This needs to be done once when the battery is first seen on the bus. Signed-off-by: Jean-Francois Dagenais [rebased code] Signed-off-by: Sebastian Reichel --- .../bindings/power/supply/sbs_sbs-battery.txt | 2 ++ drivers/power/supply/sbs-battery.c | 29 +++++++++++++++++++ 2 files changed, 31 insertions(+) diff --git a/Documentation/devicetree/bindings/power/supply/sbs_sbs-battery.txt b/Documentation/devicetree/bindings/power/supply/sbs_sbs-battery.txt index fa5a8b516dbfd..a5093ccef5c55 100644 --- a/Documentation/devicetree/bindings/power/supply/sbs_sbs-battery.txt +++ b/Documentation/devicetree/bindings/power/supply/sbs_sbs-battery.txt @@ -16,6 +16,7 @@ Optional properties : after an external change notification. - sbs,battery-detect-gpios : The gpio which signals battery detection and a flag specifying its polarity. + - sbs,disable-charger-broadcasts: for systems without sbs compliant chargers Example: @@ -25,4 +26,5 @@ Example: sbs,i2c-retry-count = <2>; sbs,poll-retry-count = <10>; sbs,battery-detect-gpios = <&gpio-controller 122 1>; + sbs,disable-charger-broadcasts; } diff --git a/drivers/power/supply/sbs-battery.c b/drivers/power/supply/sbs-battery.c index e3c685b2c2479..7c6905a486dac 100644 --- a/drivers/power/supply/sbs-battery.c +++ b/drivers/power/supply/sbs-battery.c @@ -68,6 +68,7 @@ enum sbs_capacity_mode { CAPACITY_MODE_AMPS = 0, CAPACITY_MODE_WATTS = BATTERY_MODE_CAPACITY_MASK }; +#define BATTERY_MODE_CHARGER_MASK (1<<14) /* manufacturer access defines */ #define MANUFACTURER_ACCESS_STATUS 0x0006 @@ -193,6 +194,7 @@ struct sbs_info { bool is_present; struct gpio_desc *gpio_detect; bool enable_detection; + bool charger_broadcasts; int last_state; int poll_time; u32 i2c_retry_count; @@ -207,6 +209,27 @@ static char manufacturer[I2C_SMBUS_BLOCK_MAX + 1]; static char chemistry[I2C_SMBUS_BLOCK_MAX + 1]; static bool force_load; +static int sbs_read_word_data(struct i2c_client *client, u8 address); +static int sbs_write_word_data(struct i2c_client *client, u8 address, u16 value); + +static void sbs_disable_charger_broadcasts(struct sbs_info *chip) +{ + int val = sbs_read_word_data(chip->client, BATTERY_MODE_OFFSET); + if (val < 0) + goto exit; + + val |= BATTERY_MODE_CHARGER_MASK; + + val = sbs_write_word_data(chip->client, BATTERY_MODE_OFFSET, val); + +exit: + if (val < 0) + dev_err(&chip->client->dev, + "Failed to disable charger broadcasting: %d\n", val); + else + dev_dbg(&chip->client->dev, "%s\n", __func__); +} + static int sbs_update_presence(struct sbs_info *chip, bool is_present) { struct i2c_client *client = chip->client; @@ -260,6 +283,9 @@ static int sbs_update_presence(struct sbs_info *chip, bool is_present) dev_dbg(&client->dev, "PEC: %s\n", (client->flags & I2C_CLIENT_PEC) ? "enabled" : "disabled"); + if (!chip->is_present && is_present && !chip->charger_broadcasts) + sbs_disable_charger_broadcasts(chip); + chip->is_present = true; return 0; @@ -1017,6 +1043,9 @@ static int sbs_probe(struct i2c_client *client, } chip->i2c_retry_count = chip->i2c_retry_count + 1; + chip->charger_broadcasts = !of_property_read_bool(client->dev.of_node, + "sbs,disable-charger-broadcasts"); + chip->gpio_detect = devm_gpiod_get_optional(&client->dev, "sbs,battery-detect", GPIOD_IN); if (IS_ERR(chip->gpio_detect)) { -- GitLab From 03b758ba36f0656d905f595eea33ab247d5fc945 Mon Sep 17 00:00:00 2001 From: Sebastian Reichel Date: Wed, 13 May 2020 20:56:12 +0200 Subject: [PATCH 0792/1971] power: supply: sbs-battery: switch from of_property_* to device_property_* Switch from DT specific of_property_* API to generic and more modern device_property_* API. Signed-off-by: Sebastian Reichel --- drivers/power/supply/sbs-battery.c | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/drivers/power/supply/sbs-battery.c b/drivers/power/supply/sbs-battery.c index 7c6905a486dac..73dfe526c8676 100644 --- a/drivers/power/supply/sbs-battery.c +++ b/drivers/power/supply/sbs-battery.c @@ -14,7 +14,7 @@ #include #include #include -#include +#include #include #include #include @@ -1016,7 +1016,7 @@ static int sbs_probe(struct i2c_client *client, if (!chip) return -ENOMEM; - chip->flags = (u32)(uintptr_t)of_device_get_match_data(&client->dev); + chip->flags = (u32)(uintptr_t)device_get_match_data(&client->dev); chip->client = client; chip->enable_detection = false; psy_cfg.of_node = client->dev.of_node; @@ -1027,13 +1027,13 @@ static int sbs_probe(struct i2c_client *client, /* use pdata if available, fall back to DT properties, * or hardcoded defaults if not */ - rc = of_property_read_u32(client->dev.of_node, "sbs,i2c-retry-count", - &chip->i2c_retry_count); + rc = device_property_read_u32(&client->dev, "sbs,i2c-retry-count", + &chip->i2c_retry_count); if (rc) chip->i2c_retry_count = 0; - rc = of_property_read_u32(client->dev.of_node, "sbs,poll-retry-count", - &chip->poll_retry_count); + rc = device_property_read_u32(&client->dev, "sbs,poll-retry-count", + &chip->poll_retry_count); if (rc) chip->poll_retry_count = 0; @@ -1043,7 +1043,7 @@ static int sbs_probe(struct i2c_client *client, } chip->i2c_retry_count = chip->i2c_retry_count + 1; - chip->charger_broadcasts = !of_property_read_bool(client->dev.of_node, + chip->charger_broadcasts = !device_property_read_bool(&client->dev, "sbs,disable-charger-broadcasts"); chip->gpio_detect = devm_gpiod_get_optional(&client->dev, -- GitLab From f9ca07a123c81b6b6e1070a515267d89956efc9e Mon Sep 17 00:00:00 2001 From: Sebastian Reichel Date: Wed, 13 May 2020 20:56:13 +0200 Subject: [PATCH 0793/1971] power: supply: sbs-battery: switch to i2c's probe_new sbs-battery does not use the ID parameter, so switch to i2c's probe_new API. Signed-off-by: Sebastian Reichel --- drivers/power/supply/sbs-battery.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/drivers/power/supply/sbs-battery.c b/drivers/power/supply/sbs-battery.c index 73dfe526c8676..f0392be350eb5 100644 --- a/drivers/power/supply/sbs-battery.c +++ b/drivers/power/supply/sbs-battery.c @@ -992,8 +992,7 @@ static const struct power_supply_desc sbs_default_desc = { .external_power_changed = sbs_external_power_changed, }; -static int sbs_probe(struct i2c_client *client, - const struct i2c_device_id *id) +static int sbs_probe(struct i2c_client *client) { struct sbs_info *chip; struct power_supply_desc *sbs_desc; @@ -1172,7 +1171,7 @@ static const struct of_device_id sbs_dt_ids[] = { MODULE_DEVICE_TABLE(of, sbs_dt_ids); static struct i2c_driver sbs_battery_driver = { - .probe = sbs_probe, + .probe_new = sbs_probe, .remove = sbs_remove, .alert = sbs_alert, .id_table = sbs_id, -- GitLab From 68956dbe6fb446ae1da60b368116e7e04f385dcb Mon Sep 17 00:00:00 2001 From: Sebastian Reichel Date: Wed, 13 May 2020 20:56:14 +0200 Subject: [PATCH 0794/1971] power: supply: sbs-battery: constify power-supply property array Signed-off-by: Sebastian Reichel --- drivers/power/supply/sbs-battery.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/power/supply/sbs-battery.c b/drivers/power/supply/sbs-battery.c index f0392be350eb5..f4f73e669460a 100644 --- a/drivers/power/supply/sbs-battery.c +++ b/drivers/power/supply/sbs-battery.c @@ -151,7 +151,7 @@ static const struct chip_data { SBS_DATA(POWER_SUPPLY_PROP_TECHNOLOGY, 0x22, 0, 65535) }; -static enum power_supply_property sbs_properties[] = { +static const enum power_supply_property sbs_properties[] = { POWER_SUPPLY_PROP_STATUS, POWER_SUPPLY_PROP_CAPACITY_LEVEL, POWER_SUPPLY_PROP_HEALTH, -- GitLab From 805f64e8b5cf5838b1a0b4b9abce9084db2cf8f4 Mon Sep 17 00:00:00 2001 From: Sebastian Reichel Date: Wed, 13 May 2020 20:56:15 +0200 Subject: [PATCH 0795/1971] dt-bindings: power: sbs-battery: Convert to yaml Convert sbs-battery bindings to YAML. Reviewed-by: Rob Herring Signed-off-by: Sebastian Reichel --- .../power/supply/sbs,sbs-battery.yaml | 83 +++++++++++++++++++ .../bindings/power/supply/sbs_sbs-battery.txt | 30 ------- 2 files changed, 83 insertions(+), 30 deletions(-) create mode 100644 Documentation/devicetree/bindings/power/supply/sbs,sbs-battery.yaml delete mode 100644 Documentation/devicetree/bindings/power/supply/sbs_sbs-battery.txt diff --git a/Documentation/devicetree/bindings/power/supply/sbs,sbs-battery.yaml b/Documentation/devicetree/bindings/power/supply/sbs,sbs-battery.yaml new file mode 100644 index 0000000000000..205bc826bd208 --- /dev/null +++ b/Documentation/devicetree/bindings/power/supply/sbs,sbs-battery.yaml @@ -0,0 +1,83 @@ +# SPDX-License-Identifier: GPL-2.0 +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/power/supply/sbs,sbs-battery.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: SBS compliant battery + +maintainers: + - Sebastian Reichel + +description: | + Battery compatible with the smart battery system specifications + +properties: + + compatible: + oneOf: + - items: + - enum: + - ti,bq20z65 + - ti,bq20z75 + - enum: + - sbs,sbs-battery + - items: + - const: sbs,sbs-battery + + reg: + maxItems: 1 + + sbs,i2c-retry-count: + description: + The number of times to retry I2C transactions on I2C IO failure. + default: 0 + allOf: + - $ref: /schemas/types.yaml#/definitions/uint32 + + sbs,poll-retry-count: + description: + The number of times to try looking for new status after an external + change notification. + default: 0 + allOf: + - $ref: /schemas/types.yaml#/definitions/uint32 + + sbs,battery-detect-gpios: + description: + GPIO which signals battery detection. If this is not supplied, the bus + needs to be polled to detect the battery. + maxItems: 1 + + sbs,disable-charger-broadcasts: + description: + SBS batteries by default send broadcast messages to SBS compliant chargers to + configure max. charge current/voltage. If your hardware does not have an SBS + compliant charger it should be disabled via this property to avoid blocking + the bus. Also some SBS battery fuel gauges are known to have a buggy multi- + master implementation. + type: boolean + +required: + - compatible + - reg + +additionalProperties: false + +examples: + - | + #include + + i2c { + #address-cells = <1>; + #size-cells = <0>; + + battery@b { + compatible = "ti,bq20z75", "sbs,sbs-battery"; + reg = <0xb>; + sbs,i2c-retry-count = <2>; + sbs,poll-retry-count = <10>; + sbs,battery-detect-gpios = <&gpio 122 GPIO_ACTIVE_HIGH>; + sbs,disable-charger-broadcasts; + }; + }; diff --git a/Documentation/devicetree/bindings/power/supply/sbs_sbs-battery.txt b/Documentation/devicetree/bindings/power/supply/sbs_sbs-battery.txt deleted file mode 100644 index a5093ccef5c55..0000000000000 --- a/Documentation/devicetree/bindings/power/supply/sbs_sbs-battery.txt +++ /dev/null @@ -1,30 +0,0 @@ -SBS sbs-battery -~~~~~~~~~~ - -Required properties : - - compatible: ",", "sbs,sbs-battery" as fallback. The - part number compatible string might be used in order to take care of - vendor specific registers. - Known ,: - ti,bq20z65 - ti,bq20z75 - -Optional properties : - - sbs,i2c-retry-count : The number of times to retry i2c transactions on i2c - IO failure. - - sbs,poll-retry-count : The number of times to try looking for new status - after an external change notification. - - sbs,battery-detect-gpios : The gpio which signals battery detection and - a flag specifying its polarity. - - sbs,disable-charger-broadcasts: for systems without sbs compliant chargers - -Example: - - battery@b { - compatible = "ti,bq20z75", "sbs,sbs-battery"; - reg = <0xb>; - sbs,i2c-retry-count = <2>; - sbs,poll-retry-count = <10>; - sbs,battery-detect-gpios = <&gpio-controller 122 1>; - sbs,disable-charger-broadcasts; - } -- GitLab From d036466330d13039a09e407b23a0dabec1a9533a Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Thu, 28 May 2020 23:12:19 +0100 Subject: [PATCH 0796/1971] clk: intel: remove redundant initialization of variable rate64 The variable rate64 is being initialized with a value that is never read and it is being updated later with a new value. The initialization is redundant and can be removed. Addresses-Coverity: ("Unused value") Signed-off-by: Colin Ian King Link: https://lkml.kernel.org/r/20200528221219.535804-1-colin.king@canonical.com Signed-off-by: Stephen Boyd --- drivers/clk/x86/clk-cgu.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/clk/x86/clk-cgu.c b/drivers/clk/x86/clk-cgu.c index 802a7fa88535f..56af0e04ec1e3 100644 --- a/drivers/clk/x86/clk-cgu.c +++ b/drivers/clk/x86/clk-cgu.c @@ -538,7 +538,7 @@ lgm_clk_ddiv_round_rate(struct clk_hw *hw, unsigned long rate, struct lgm_clk_ddiv *ddiv = to_lgm_clk_ddiv(hw); u32 div, ddiv1, ddiv2; unsigned long flags; - u64 rate64 = rate; + u64 rate64; div = DIV_ROUND_CLOSEST_ULL((u64)*prate, rate); -- GitLab From 15e3ae36f71eb6564b5caa5cd02e0d0353db5b85 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E5=91=A8=E7=90=B0=E6=9D=B0=20=28Zhou=20Yanjie=29?= Date: Thu, 28 May 2020 11:15:43 +0800 Subject: [PATCH 0797/1971] clk: Ingenic: Remove unnecessary spinlock when reading registers. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit It is not necessary to use spinlock when reading registers, so remove it from cgu.c. Suggested-by: Paul Cercueil Suggested-by: Paul Burton Signed-off-by: 周琰杰 (Zhou Yanjie) Reviewed-by: Paul Cercueil Link: https://lkml.kernel.org/r/20200528031549.13846-2-zhouyanjie@wanyeetech.com Signed-off-by: Stephen Boyd --- drivers/clk/ingenic/cgu.c | 12 +----------- 1 file changed, 1 insertion(+), 11 deletions(-) diff --git a/drivers/clk/ingenic/cgu.c b/drivers/clk/ingenic/cgu.c index 6e963031cd873..ab1302ad1450b 100644 --- a/drivers/clk/ingenic/cgu.c +++ b/drivers/clk/ingenic/cgu.c @@ -76,16 +76,13 @@ ingenic_pll_recalc_rate(struct clk_hw *hw, unsigned long parent_rate) const struct ingenic_cgu_pll_info *pll_info; unsigned m, n, od_enc, od; bool bypass; - unsigned long flags; u32 ctl; clk_info = &cgu->clock_info[ingenic_clk->idx]; BUG_ON(clk_info->type != CGU_CLK_PLL); pll_info = &clk_info->pll; - spin_lock_irqsave(&cgu->lock, flags); ctl = readl(cgu->base + pll_info->reg); - spin_unlock_irqrestore(&cgu->lock, flags); m = (ctl >> pll_info->m_shift) & GENMASK(pll_info->m_bits - 1, 0); m += pll_info->m_offset; @@ -259,12 +256,9 @@ static int ingenic_pll_is_enabled(struct clk_hw *hw) struct ingenic_cgu *cgu = ingenic_clk->cgu; const struct ingenic_cgu_clk_info *clk_info = to_clk_info(ingenic_clk); const struct ingenic_cgu_pll_info *pll_info = &clk_info->pll; - unsigned long flags; u32 ctl; - spin_lock_irqsave(&cgu->lock, flags); ctl = readl(cgu->base + pll_info->reg); - spin_unlock_irqrestore(&cgu->lock, flags); return !!(ctl & BIT(pll_info->enable_bit)); } @@ -562,16 +556,12 @@ static int ingenic_clk_is_enabled(struct clk_hw *hw) struct ingenic_clk *ingenic_clk = to_ingenic_clk(hw); struct ingenic_cgu *cgu = ingenic_clk->cgu; const struct ingenic_cgu_clk_info *clk_info; - unsigned long flags; int enabled = 1; clk_info = &cgu->clock_info[ingenic_clk->idx]; - if (clk_info->type & CGU_CLK_GATE) { - spin_lock_irqsave(&cgu->lock, flags); + if (clk_info->type & CGU_CLK_GATE) enabled = !ingenic_cgu_gate_get(cgu, &clk_info->gate); - spin_unlock_irqrestore(&cgu->lock, flags); - } return enabled; } -- GitLab From 9d9cc58aff468c1589df09ac12e4e79b1eaba6db Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E5=91=A8=E7=90=B0=E6=9D=B0=20=28Zhou=20Yanjie=29?= Date: Thu, 28 May 2020 11:15:44 +0800 Subject: [PATCH 0798/1971] clk: Ingenic: Adjust cgu code to make it compatible with X1830. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The PLL of X1830 Soc from Ingenic has been greatly changed, the bypass control is placed in another register, so now two registers may needed to control the PLL. To this end, a new "bypass_reg" was introduced. In addition, when calculating rate, the PLL of X1830 introduced an extra 2x multiplier, so a new "rate_multiplier" was introduced. And adjust the code in jz47xx-cgu.c and x1000-cgu.c, make it to be compatible with the new cgu code. Signed-off-by: 周琰杰 (Zhou Yanjie) Reviewed-by: Paul Cercueil Link: https://lkml.kernel.org/r/20200528031549.13846-3-zhouyanjie@wanyeetech.com Signed-off-by: Stephen Boyd --- drivers/clk/ingenic/cgu.c | 16 +++++++++++++--- drivers/clk/ingenic/cgu.h | 4 ++++ drivers/clk/ingenic/jz4725b-cgu.c | 4 ++++ drivers/clk/ingenic/jz4740-cgu.c | 4 ++++ drivers/clk/ingenic/jz4770-cgu.c | 8 +++++++- drivers/clk/ingenic/jz4780-cgu.c | 3 +++ drivers/clk/ingenic/x1000-cgu.c | 6 ++++++ 7 files changed, 41 insertions(+), 4 deletions(-) diff --git a/drivers/clk/ingenic/cgu.c b/drivers/clk/ingenic/cgu.c index ab1302ad1450b..d7981b670221f 100644 --- a/drivers/clk/ingenic/cgu.c +++ b/drivers/clk/ingenic/cgu.c @@ -90,6 +90,9 @@ ingenic_pll_recalc_rate(struct clk_hw *hw, unsigned long parent_rate) n += pll_info->n_offset; od_enc = ctl >> pll_info->od_shift; od_enc &= GENMASK(pll_info->od_bits - 1, 0); + + ctl = readl(cgu->base + pll_info->bypass_reg); + bypass = !pll_info->no_bypass_bit && !!(ctl & BIT(pll_info->bypass_bit)); @@ -103,7 +106,8 @@ ingenic_pll_recalc_rate(struct clk_hw *hw, unsigned long parent_rate) BUG_ON(od == pll_info->od_max); od++; - return div_u64((u64)parent_rate * m, n * od); + return div_u64((u64)parent_rate * m * pll_info->rate_multiplier, + n * od); } static unsigned long @@ -136,7 +140,8 @@ ingenic_pll_calc(const struct ingenic_cgu_clk_info *clk_info, if (pod) *pod = od; - return div_u64((u64)parent_rate * m, n * od); + return div_u64((u64)parent_rate * m * pll_info->rate_multiplier, + n * od); } static inline const struct ingenic_cgu_clk_info *to_clk_info( @@ -209,9 +214,14 @@ static int ingenic_pll_enable(struct clk_hw *hw) u32 ctl; spin_lock_irqsave(&cgu->lock, flags); - ctl = readl(cgu->base + pll_info->reg); + ctl = readl(cgu->base + pll_info->bypass_reg); ctl &= ~BIT(pll_info->bypass_bit); + + writel(ctl, cgu->base + pll_info->bypass_reg); + + ctl = readl(cgu->base + pll_info->reg); + ctl |= BIT(pll_info->enable_bit); writel(ctl, cgu->base + pll_info->reg); diff --git a/drivers/clk/ingenic/cgu.h b/drivers/clk/ingenic/cgu.h index 0dc8004079ee2..2c75ef4a36f5c 100644 --- a/drivers/clk/ingenic/cgu.h +++ b/drivers/clk/ingenic/cgu.h @@ -17,6 +17,7 @@ /** * struct ingenic_cgu_pll_info - information about a PLL * @reg: the offset of the PLL's control register within the CGU + * @rate_multiplier: the multiplier needed by pll rate calculation * @m_shift: the number of bits to shift the multiplier value by (ie. the * index of the lowest bit of the multiplier value in the PLL's * control register) @@ -37,6 +38,7 @@ * @od_encoding: a pointer to an array mapping post-VCO divider values to * their encoded values in the PLL control register, or -1 for * unsupported values + * @bypass_reg: the offset of the bypass control register within the CGU * @bypass_bit: the index of the bypass bit in the PLL control register * @enable_bit: the index of the enable bit in the PLL control register * @stable_bit: the index of the stable bit in the PLL control register @@ -44,10 +46,12 @@ */ struct ingenic_cgu_pll_info { unsigned reg; + unsigned rate_multiplier; const s8 *od_encoding; u8 m_shift, m_bits, m_offset; u8 n_shift, n_bits, n_offset; u8 od_shift, od_bits, od_max; + unsigned bypass_reg; u8 bypass_bit; u8 enable_bit; u8 stable_bit; diff --git a/drivers/clk/ingenic/jz4725b-cgu.c b/drivers/clk/ingenic/jz4725b-cgu.c index a3b4635f62784..8c38e72d14a79 100644 --- a/drivers/clk/ingenic/jz4725b-cgu.c +++ b/drivers/clk/ingenic/jz4725b-cgu.c @@ -9,7 +9,9 @@ #include #include #include + #include + #include "cgu.h" #include "pm.h" @@ -54,6 +56,7 @@ static const struct ingenic_cgu_clk_info jz4725b_cgu_clocks[] = { .parents = { JZ4725B_CLK_EXT, -1, -1, -1 }, .pll = { .reg = CGU_REG_CPPCR, + .rate_multiplier = 1, .m_shift = 23, .m_bits = 9, .m_offset = 2, @@ -65,6 +68,7 @@ static const struct ingenic_cgu_clk_info jz4725b_cgu_clocks[] = { .od_max = 4, .od_encoding = pll_od_encoding, .stable_bit = 10, + .bypass_reg = CGU_REG_CPPCR, .bypass_bit = 9, .enable_bit = 8, }, diff --git a/drivers/clk/ingenic/jz4740-cgu.c b/drivers/clk/ingenic/jz4740-cgu.c index 4f0e92c877d65..c0ac9196a5819 100644 --- a/drivers/clk/ingenic/jz4740-cgu.c +++ b/drivers/clk/ingenic/jz4740-cgu.c @@ -10,7 +10,9 @@ #include #include #include + #include + #include "cgu.h" #include "pm.h" @@ -69,6 +71,7 @@ static const struct ingenic_cgu_clk_info jz4740_cgu_clocks[] = { .parents = { JZ4740_CLK_EXT, -1, -1, -1 }, .pll = { .reg = CGU_REG_CPPCR, + .rate_multiplier = 1, .m_shift = 23, .m_bits = 9, .m_offset = 2, @@ -80,6 +83,7 @@ static const struct ingenic_cgu_clk_info jz4740_cgu_clocks[] = { .od_max = 4, .od_encoding = pll_od_encoding, .stable_bit = 10, + .bypass_reg = CGU_REG_CPPCR, .bypass_bit = 9, .enable_bit = 8, }, diff --git a/drivers/clk/ingenic/jz4770-cgu.c b/drivers/clk/ingenic/jz4770-cgu.c index c051ecba5cf8e..9ea4490ecb7f1 100644 --- a/drivers/clk/ingenic/jz4770-cgu.c +++ b/drivers/clk/ingenic/jz4770-cgu.c @@ -9,7 +9,9 @@ #include #include #include + #include + #include "cgu.h" #include "pm.h" @@ -102,6 +104,7 @@ static const struct ingenic_cgu_clk_info jz4770_cgu_clocks[] = { .parents = { JZ4770_CLK_EXT }, .pll = { .reg = CGU_REG_CPPCR0, + .rate_multiplier = 1, .m_shift = 24, .m_bits = 7, .m_offset = 1, @@ -112,6 +115,7 @@ static const struct ingenic_cgu_clk_info jz4770_cgu_clocks[] = { .od_bits = 2, .od_max = 8, .od_encoding = pll_od_encoding, + .bypass_reg = CGU_REG_CPPCR0, .bypass_bit = 9, .enable_bit = 8, .stable_bit = 10, @@ -124,6 +128,7 @@ static const struct ingenic_cgu_clk_info jz4770_cgu_clocks[] = { .parents = { JZ4770_CLK_EXT }, .pll = { .reg = CGU_REG_CPPCR1, + .rate_multiplier = 1, .m_shift = 24, .m_bits = 7, .m_offset = 1, @@ -134,9 +139,10 @@ static const struct ingenic_cgu_clk_info jz4770_cgu_clocks[] = { .od_bits = 2, .od_max = 8, .od_encoding = pll_od_encoding, + .bypass_reg = CGU_REG_CPPCR1, + .no_bypass_bit = true, .enable_bit = 7, .stable_bit = 6, - .no_bypass_bit = true, }, }, diff --git a/drivers/clk/ingenic/jz4780-cgu.c b/drivers/clk/ingenic/jz4780-cgu.c index c758f16430677..6c5b8029cc8ab 100644 --- a/drivers/clk/ingenic/jz4780-cgu.c +++ b/drivers/clk/ingenic/jz4780-cgu.c @@ -13,6 +13,7 @@ #include #include + #include "cgu.h" #include "pm.h" @@ -266,6 +267,7 @@ static const struct ingenic_cgu_clk_info jz4780_cgu_clocks[] = { #define DEF_PLL(name) { \ .reg = CGU_REG_ ## name, \ + .rate_multiplier = 1, \ .m_shift = 19, \ .m_bits = 13, \ .m_offset = 1, \ @@ -277,6 +279,7 @@ static const struct ingenic_cgu_clk_info jz4780_cgu_clocks[] = { .od_max = 16, \ .od_encoding = pll_od_encoding, \ .stable_bit = 6, \ + .bypass_reg = CGU_REG_ ## name, \ .bypass_bit = 1, \ .enable_bit = 0, \ } diff --git a/drivers/clk/ingenic/x1000-cgu.c b/drivers/clk/ingenic/x1000-cgu.c index b22d87b3f555b..c33934d8ac14d 100644 --- a/drivers/clk/ingenic/x1000-cgu.c +++ b/drivers/clk/ingenic/x1000-cgu.c @@ -7,7 +7,9 @@ #include #include #include + #include + #include "cgu.h" #include "pm.h" @@ -58,6 +60,7 @@ static const struct ingenic_cgu_clk_info x1000_cgu_clocks[] = { .parents = { X1000_CLK_EXCLK, -1, -1, -1 }, .pll = { .reg = CGU_REG_APLL, + .rate_multiplier = 1, .m_shift = 24, .m_bits = 7, .m_offset = 1, @@ -68,6 +71,7 @@ static const struct ingenic_cgu_clk_info x1000_cgu_clocks[] = { .od_bits = 2, .od_max = 8, .od_encoding = pll_od_encoding, + .bypass_reg = CGU_REG_APLL, .bypass_bit = 9, .enable_bit = 8, .stable_bit = 10, @@ -79,6 +83,7 @@ static const struct ingenic_cgu_clk_info x1000_cgu_clocks[] = { .parents = { X1000_CLK_EXCLK, -1, -1, -1 }, .pll = { .reg = CGU_REG_MPLL, + .rate_multiplier = 1, .m_shift = 24, .m_bits = 7, .m_offset = 1, @@ -89,6 +94,7 @@ static const struct ingenic_cgu_clk_info x1000_cgu_clocks[] = { .od_bits = 2, .od_max = 8, .od_encoding = pll_od_encoding, + .bypass_reg = CGU_REG_MPLL, .bypass_bit = 6, .enable_bit = 7, .stable_bit = 0, -- GitLab From 9a618e6f8cdda286e49793b47f2d327878fdd827 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E5=91=A8=E7=90=B0=E6=9D=B0=20=28Zhou=20Yanjie=29?= Date: Thu, 28 May 2020 11:15:46 +0800 Subject: [PATCH 0799/1971] dt-bindings: clock: Add X1830 clock bindings. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Add the clock bindings for the X1830 Soc from Ingenic. Signed-off-by: 周琰杰 (Zhou Yanjie) Reviewed-by: Rob Herring Acked-by: Rob Herring Link: https://lkml.kernel.org/r/20200528031549.13846-5-zhouyanjie@wanyeetech.com Signed-off-by: Stephen Boyd --- include/dt-bindings/clock/x1830-cgu.h | 55 +++++++++++++++++++++++++++ 1 file changed, 55 insertions(+) create mode 100644 include/dt-bindings/clock/x1830-cgu.h diff --git a/include/dt-bindings/clock/x1830-cgu.h b/include/dt-bindings/clock/x1830-cgu.h new file mode 100644 index 0000000000000..801e1d09c881b --- /dev/null +++ b/include/dt-bindings/clock/x1830-cgu.h @@ -0,0 +1,55 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * This header provides clock numbers for the ingenic,x1830-cgu DT binding. + * + * They are roughly ordered as: + * - external clocks + * - PLLs + * - muxes/dividers in the order they appear in the x1830 programmers manual + * - gates in order of their bit in the CLKGR* registers + */ + +#ifndef __DT_BINDINGS_CLOCK_X1830_CGU_H__ +#define __DT_BINDINGS_CLOCK_X1830_CGU_H__ + +#define X1830_CLK_EXCLK 0 +#define X1830_CLK_RTCLK 1 +#define X1830_CLK_APLL 2 +#define X1830_CLK_MPLL 3 +#define X1830_CLK_EPLL 4 +#define X1830_CLK_VPLL 5 +#define X1830_CLK_OTGPHY 6 +#define X1830_CLK_SCLKA 7 +#define X1830_CLK_CPUMUX 8 +#define X1830_CLK_CPU 9 +#define X1830_CLK_L2CACHE 10 +#define X1830_CLK_AHB0 11 +#define X1830_CLK_AHB2PMUX 12 +#define X1830_CLK_AHB2 13 +#define X1830_CLK_PCLK 14 +#define X1830_CLK_DDR 15 +#define X1830_CLK_MAC 16 +#define X1830_CLK_LCD 17 +#define X1830_CLK_MSCMUX 18 +#define X1830_CLK_MSC0 19 +#define X1830_CLK_MSC1 20 +#define X1830_CLK_SSIPLL 21 +#define X1830_CLK_SSIPLL_DIV2 22 +#define X1830_CLK_SSIMUX 23 +#define X1830_CLK_EMC 24 +#define X1830_CLK_EFUSE 25 +#define X1830_CLK_OTG 26 +#define X1830_CLK_SSI0 27 +#define X1830_CLK_SMB0 28 +#define X1830_CLK_SMB1 29 +#define X1830_CLK_SMB2 30 +#define X1830_CLK_UART0 31 +#define X1830_CLK_UART1 32 +#define X1830_CLK_SSI1 33 +#define X1830_CLK_SFC 34 +#define X1830_CLK_PDMA 35 +#define X1830_CLK_TCU 36 +#define X1830_CLK_DTRNG 37 +#define X1830_CLK_OST 38 + +#endif /* __DT_BINDINGS_CLOCK_X1830_CGU_H__ */ -- GitLab From ce1d86dc92496af8edf58b0870fd24980d63748e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E5=91=A8=E7=90=B0=E6=9D=B0=20=28Zhou=20Yanjie=29?= Date: Thu, 28 May 2020 11:15:47 +0800 Subject: [PATCH 0800/1971] clk: Ingenic: Add CGU driver for X1830. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Add support for the clocks provided by the CGU in the Ingenic X1830 SoC, making use of the cgu code to do the heavy lifting. Signed-off-by: 周琰杰 (Zhou Yanjie) Reviewed-by: Paul Cercueil Link: https://lkml.kernel.org/r/20200528031549.13846-6-zhouyanjie@wanyeetech.com Signed-off-by: Stephen Boyd --- drivers/clk/ingenic/Kconfig | 10 + drivers/clk/ingenic/Makefile | 1 + drivers/clk/ingenic/x1830-cgu.c | 448 ++++++++++++++++++++++++++++++++ 3 files changed, 459 insertions(+) create mode 100644 drivers/clk/ingenic/x1830-cgu.c diff --git a/drivers/clk/ingenic/Kconfig b/drivers/clk/ingenic/Kconfig index b4555b465ea6a..580b0cf69ed51 100644 --- a/drivers/clk/ingenic/Kconfig +++ b/drivers/clk/ingenic/Kconfig @@ -55,6 +55,16 @@ config INGENIC_CGU_X1000 If building for a X1000 SoC, you want to say Y here. +config INGENIC_CGU_X1830 + bool "Ingenic X1830 CGU driver" + default MACH_X1830 + select INGENIC_CGU_COMMON + help + Support the clocks provided by the CGU hardware on Ingenic X1830 + and compatible SoCs. + + If building for a X1830 SoC, you want to say Y here. + config INGENIC_TCU_CLK bool "Ingenic JZ47xx TCU clocks driver" default MACH_INGENIC diff --git a/drivers/clk/ingenic/Makefile b/drivers/clk/ingenic/Makefile index 8b1dad9b74a72..aaa4bffe03c67 100644 --- a/drivers/clk/ingenic/Makefile +++ b/drivers/clk/ingenic/Makefile @@ -5,4 +5,5 @@ obj-$(CONFIG_INGENIC_CGU_JZ4725B) += jz4725b-cgu.o obj-$(CONFIG_INGENIC_CGU_JZ4770) += jz4770-cgu.o obj-$(CONFIG_INGENIC_CGU_JZ4780) += jz4780-cgu.o obj-$(CONFIG_INGENIC_CGU_X1000) += x1000-cgu.o +obj-$(CONFIG_INGENIC_CGU_X1830) += x1830-cgu.o obj-$(CONFIG_INGENIC_TCU_CLK) += tcu.o diff --git a/drivers/clk/ingenic/x1830-cgu.c b/drivers/clk/ingenic/x1830-cgu.c new file mode 100644 index 0000000000000..a1b2ff0ee4878 --- /dev/null +++ b/drivers/clk/ingenic/x1830-cgu.c @@ -0,0 +1,448 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * X1830 SoC CGU driver + * Copyright (c) 2019 周琰杰 (Zhou Yanjie) + */ + +#include +#include +#include +#include + +#include + +#include "cgu.h" +#include "pm.h" + +/* CGU register offsets */ +#define CGU_REG_CPCCR 0x00 +#define CGU_REG_CPPCR 0x0c +#define CGU_REG_APLL 0x10 +#define CGU_REG_MPLL 0x14 +#define CGU_REG_CLKGR0 0x20 +#define CGU_REG_OPCR 0x24 +#define CGU_REG_CLKGR1 0x28 +#define CGU_REG_DDRCDR 0x2c +#define CGU_REG_USBPCR 0x3c +#define CGU_REG_USBRDT 0x40 +#define CGU_REG_USBVBFIL 0x44 +#define CGU_REG_USBPCR1 0x48 +#define CGU_REG_MACCDR 0x54 +#define CGU_REG_EPLL 0x58 +#define CGU_REG_I2SCDR 0x60 +#define CGU_REG_LPCDR 0x64 +#define CGU_REG_MSC0CDR 0x68 +#define CGU_REG_I2SCDR1 0x70 +#define CGU_REG_SSICDR 0x74 +#define CGU_REG_CIMCDR 0x7c +#define CGU_REG_MSC1CDR 0xa4 +#define CGU_REG_CMP_INTR 0xb0 +#define CGU_REG_CMP_INTRE 0xb4 +#define CGU_REG_DRCG 0xd0 +#define CGU_REG_CPCSR 0xd4 +#define CGU_REG_VPLL 0xe0 +#define CGU_REG_MACPHYC 0xe8 + +/* bits within the OPCR register */ +#define OPCR_GATE_USBPHYCLK BIT(23) +#define OPCR_SPENDN0 BIT(7) +#define OPCR_SPENDN1 BIT(6) + +/* bits within the USBPCR register */ +#define USBPCR_SIDDQ BIT(21) +#define USBPCR_OTG_DISABLE BIT(20) + +static struct ingenic_cgu *cgu; + +static int x1830_usb_phy_enable(struct clk_hw *hw) +{ + void __iomem *reg_opcr = cgu->base + CGU_REG_OPCR; + void __iomem *reg_usbpcr = cgu->base + CGU_REG_USBPCR; + + writel((readl(reg_opcr) | OPCR_SPENDN0) & ~OPCR_GATE_USBPHYCLK, reg_opcr); + writel(readl(reg_usbpcr) & ~USBPCR_OTG_DISABLE & ~USBPCR_SIDDQ, reg_usbpcr); + return 0; +} + +static void x1830_usb_phy_disable(struct clk_hw *hw) +{ + void __iomem *reg_opcr = cgu->base + CGU_REG_OPCR; + void __iomem *reg_usbpcr = cgu->base + CGU_REG_USBPCR; + + writel((readl(reg_opcr) & ~OPCR_SPENDN0) | OPCR_GATE_USBPHYCLK, reg_opcr); + writel(readl(reg_usbpcr) | USBPCR_OTG_DISABLE | USBPCR_SIDDQ, reg_usbpcr); +} + +static int x1830_usb_phy_is_enabled(struct clk_hw *hw) +{ + void __iomem *reg_opcr = cgu->base + CGU_REG_OPCR; + void __iomem *reg_usbpcr = cgu->base + CGU_REG_USBPCR; + + return (readl(reg_opcr) & OPCR_SPENDN0) && + !(readl(reg_usbpcr) & USBPCR_SIDDQ) && + !(readl(reg_usbpcr) & USBPCR_OTG_DISABLE); +} + +static const struct clk_ops x1830_otg_phy_ops = { + .enable = x1830_usb_phy_enable, + .disable = x1830_usb_phy_disable, + .is_enabled = x1830_usb_phy_is_enabled, +}; + +static const s8 pll_od_encoding[64] = { + 0x0, 0x1, -1, 0x2, -1, -1, -1, 0x3, + -1, -1, -1, -1, -1, -1, -1, 0x4, + -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, 0x5, + -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, 0x6, +}; + +static const struct ingenic_cgu_clk_info x1830_cgu_clocks[] = { + + /* External clocks */ + + [X1830_CLK_EXCLK] = { "ext", CGU_CLK_EXT }, + [X1830_CLK_RTCLK] = { "rtc", CGU_CLK_EXT }, + + /* PLLs */ + + [X1830_CLK_APLL] = { + "apll", CGU_CLK_PLL, + .parents = { X1830_CLK_EXCLK, -1, -1, -1 }, + .pll = { + .reg = CGU_REG_APLL, + .rate_multiplier = 2, + .m_shift = 20, + .m_bits = 9, + .m_offset = 1, + .n_shift = 14, + .n_bits = 6, + .n_offset = 1, + .od_shift = 11, + .od_bits = 3, + .od_max = 64, + .od_encoding = pll_od_encoding, + .bypass_reg = CGU_REG_CPPCR, + .bypass_bit = 30, + .enable_bit = 0, + .stable_bit = 3, + }, + }, + + [X1830_CLK_MPLL] = { + "mpll", CGU_CLK_PLL, + .parents = { X1830_CLK_EXCLK, -1, -1, -1 }, + .pll = { + .reg = CGU_REG_MPLL, + .rate_multiplier = 2, + .m_shift = 20, + .m_bits = 9, + .m_offset = 1, + .n_shift = 14, + .n_bits = 6, + .n_offset = 1, + .od_shift = 11, + .od_bits = 3, + .od_max = 64, + .od_encoding = pll_od_encoding, + .bypass_reg = CGU_REG_CPPCR, + .bypass_bit = 28, + .enable_bit = 0, + .stable_bit = 3, + }, + }, + + [X1830_CLK_EPLL] = { + "epll", CGU_CLK_PLL, + .parents = { X1830_CLK_EXCLK, -1, -1, -1 }, + .pll = { + .reg = CGU_REG_EPLL, + .rate_multiplier = 2, + .m_shift = 20, + .m_bits = 9, + .m_offset = 1, + .n_shift = 14, + .n_bits = 6, + .n_offset = 1, + .od_shift = 11, + .od_bits = 3, + .od_max = 64, + .od_encoding = pll_od_encoding, + .bypass_reg = CGU_REG_CPPCR, + .bypass_bit = 24, + .enable_bit = 0, + .stable_bit = 3, + }, + }, + + [X1830_CLK_VPLL] = { + "vpll", CGU_CLK_PLL, + .parents = { X1830_CLK_EXCLK, -1, -1, -1 }, + .pll = { + .reg = CGU_REG_VPLL, + .rate_multiplier = 2, + .m_shift = 20, + .m_bits = 9, + .m_offset = 1, + .n_shift = 14, + .n_bits = 6, + .n_offset = 1, + .od_shift = 11, + .od_bits = 3, + .od_max = 64, + .od_encoding = pll_od_encoding, + .bypass_reg = CGU_REG_CPPCR, + .bypass_bit = 26, + .enable_bit = 0, + .stable_bit = 3, + }, + }, + + /* Custom (SoC-specific) OTG PHY */ + + [X1830_CLK_OTGPHY] = { + "otg_phy", CGU_CLK_CUSTOM, + .parents = { X1830_CLK_EXCLK, -1, -1, -1 }, + .custom = { &x1830_otg_phy_ops }, + }, + + /* Muxes & dividers */ + + [X1830_CLK_SCLKA] = { + "sclk_a", CGU_CLK_MUX, + .parents = { -1, X1830_CLK_EXCLK, X1830_CLK_APLL, -1 }, + .mux = { CGU_REG_CPCCR, 30, 2 }, + }, + + [X1830_CLK_CPUMUX] = { + "cpu_mux", CGU_CLK_MUX, + .parents = { -1, X1830_CLK_SCLKA, X1830_CLK_MPLL, -1 }, + .mux = { CGU_REG_CPCCR, 28, 2 }, + }, + + [X1830_CLK_CPU] = { + "cpu", CGU_CLK_DIV | CGU_CLK_GATE, + .parents = { X1830_CLK_CPUMUX, -1, -1, -1 }, + .div = { CGU_REG_CPCCR, 0, 1, 4, 22, -1, -1 }, + .gate = { CGU_REG_CLKGR1, 15 }, + }, + + [X1830_CLK_L2CACHE] = { + "l2cache", CGU_CLK_DIV, + .parents = { X1830_CLK_CPUMUX, -1, -1, -1 }, + .div = { CGU_REG_CPCCR, 4, 1, 4, 22, -1, -1 }, + }, + + [X1830_CLK_AHB0] = { + "ahb0", CGU_CLK_MUX | CGU_CLK_DIV, + .parents = { -1, X1830_CLK_SCLKA, X1830_CLK_MPLL, -1 }, + .mux = { CGU_REG_CPCCR, 26, 2 }, + .div = { CGU_REG_CPCCR, 8, 1, 4, 21, -1, -1 }, + }, + + [X1830_CLK_AHB2PMUX] = { + "ahb2_apb_mux", CGU_CLK_MUX, + .parents = { -1, X1830_CLK_SCLKA, X1830_CLK_MPLL, -1 }, + .mux = { CGU_REG_CPCCR, 24, 2 }, + }, + + [X1830_CLK_AHB2] = { + "ahb2", CGU_CLK_DIV, + .parents = { X1830_CLK_AHB2PMUX, -1, -1, -1 }, + .div = { CGU_REG_CPCCR, 12, 1, 4, 20, -1, -1 }, + }, + + [X1830_CLK_PCLK] = { + "pclk", CGU_CLK_DIV | CGU_CLK_GATE, + .parents = { X1830_CLK_AHB2PMUX, -1, -1, -1 }, + .div = { CGU_REG_CPCCR, 16, 1, 4, 20, -1, -1 }, + .gate = { CGU_REG_CLKGR1, 14 }, + }, + + [X1830_CLK_DDR] = { + "ddr", CGU_CLK_MUX | CGU_CLK_DIV | CGU_CLK_GATE, + .parents = { -1, X1830_CLK_SCLKA, X1830_CLK_MPLL, -1 }, + .mux = { CGU_REG_DDRCDR, 30, 2 }, + .div = { CGU_REG_DDRCDR, 0, 1, 4, 29, 28, 27 }, + .gate = { CGU_REG_CLKGR0, 31 }, + }, + + [X1830_CLK_MAC] = { + "mac", CGU_CLK_MUX | CGU_CLK_DIV | CGU_CLK_GATE, + .parents = { X1830_CLK_SCLKA, X1830_CLK_MPLL, + X1830_CLK_VPLL, X1830_CLK_EPLL }, + .mux = { CGU_REG_MACCDR, 30, 2 }, + .div = { CGU_REG_MACCDR, 0, 1, 8, 29, 28, 27 }, + .gate = { CGU_REG_CLKGR1, 4 }, + }, + + [X1830_CLK_LCD] = { + "lcd", CGU_CLK_MUX | CGU_CLK_DIV | CGU_CLK_GATE, + .parents = { X1830_CLK_SCLKA, X1830_CLK_MPLL, + X1830_CLK_VPLL, X1830_CLK_EPLL }, + .mux = { CGU_REG_LPCDR, 30, 2 }, + .div = { CGU_REG_LPCDR, 0, 1, 8, 28, 27, 26 }, + .gate = { CGU_REG_CLKGR1, 9 }, + }, + + [X1830_CLK_MSCMUX] = { + "msc_mux", CGU_CLK_MUX, + .parents = { X1830_CLK_SCLKA, X1830_CLK_MPLL, + X1830_CLK_VPLL, X1830_CLK_EPLL }, + .mux = { CGU_REG_MSC0CDR, 30, 2 }, + }, + + [X1830_CLK_MSC0] = { + "msc0", CGU_CLK_DIV | CGU_CLK_GATE, + .parents = { X1830_CLK_MSCMUX, -1, -1, -1 }, + .div = { CGU_REG_MSC0CDR, 0, 2, 8, 29, 28, 27 }, + .gate = { CGU_REG_CLKGR0, 4 }, + }, + + [X1830_CLK_MSC1] = { + "msc1", CGU_CLK_DIV | CGU_CLK_GATE, + .parents = { X1830_CLK_MSCMUX, -1, -1, -1 }, + .div = { CGU_REG_MSC1CDR, 0, 2, 8, 29, 28, 27 }, + .gate = { CGU_REG_CLKGR0, 5 }, + }, + + [X1830_CLK_SSIPLL] = { + "ssi_pll", CGU_CLK_MUX | CGU_CLK_DIV, + .parents = { X1830_CLK_SCLKA, X1830_CLK_MPLL, + X1830_CLK_VPLL, X1830_CLK_EPLL }, + .mux = { CGU_REG_SSICDR, 30, 2 }, + .div = { CGU_REG_SSICDR, 0, 1, 8, 28, 27, 26 }, + }, + + [X1830_CLK_SSIPLL_DIV2] = { + "ssi_pll_div2", CGU_CLK_FIXDIV, + .parents = { X1830_CLK_SSIPLL }, + .fixdiv = { 2 }, + }, + + [X1830_CLK_SSIMUX] = { + "ssi_mux", CGU_CLK_MUX, + .parents = { X1830_CLK_EXCLK, X1830_CLK_SSIPLL_DIV2, -1, -1 }, + .mux = { CGU_REG_SSICDR, 29, 1 }, + }, + + /* Gate-only clocks */ + + [X1830_CLK_EMC] = { + "emc", CGU_CLK_GATE, + .parents = { X1830_CLK_AHB2, -1, -1, -1 }, + .gate = { CGU_REG_CLKGR0, 0 }, + }, + + [X1830_CLK_EFUSE] = { + "efuse", CGU_CLK_GATE, + .parents = { X1830_CLK_AHB2, -1, -1, -1 }, + .gate = { CGU_REG_CLKGR0, 1 }, + }, + + [X1830_CLK_OTG] = { + "otg", CGU_CLK_GATE, + .parents = { X1830_CLK_EXCLK, -1, -1, -1 }, + .gate = { CGU_REG_CLKGR0, 3 }, + }, + + [X1830_CLK_SSI0] = { + "ssi0", CGU_CLK_GATE, + .parents = { X1830_CLK_SSIMUX, -1, -1, -1 }, + .gate = { CGU_REG_CLKGR0, 6 }, + }, + + [X1830_CLK_SMB0] = { + "smb0", CGU_CLK_GATE, + .parents = { X1830_CLK_PCLK, -1, -1, -1 }, + .gate = { CGU_REG_CLKGR0, 7 }, + }, + + [X1830_CLK_SMB1] = { + "smb1", CGU_CLK_GATE, + .parents = { X1830_CLK_PCLK, -1, -1, -1 }, + .gate = { CGU_REG_CLKGR0, 8 }, + }, + + [X1830_CLK_SMB2] = { + "smb2", CGU_CLK_GATE, + .parents = { X1830_CLK_PCLK, -1, -1, -1 }, + .gate = { CGU_REG_CLKGR0, 9 }, + }, + + [X1830_CLK_UART0] = { + "uart0", CGU_CLK_GATE, + .parents = { X1830_CLK_EXCLK, -1, -1, -1 }, + .gate = { CGU_REG_CLKGR0, 14 }, + }, + + [X1830_CLK_UART1] = { + "uart1", CGU_CLK_GATE, + .parents = { X1830_CLK_EXCLK, -1, -1, -1 }, + .gate = { CGU_REG_CLKGR0, 15 }, + }, + + [X1830_CLK_SSI1] = { + "ssi1", CGU_CLK_GATE, + .parents = { X1830_CLK_SSIMUX, -1, -1, -1 }, + .gate = { CGU_REG_CLKGR0, 19 }, + }, + + [X1830_CLK_SFC] = { + "sfc", CGU_CLK_GATE, + .parents = { X1830_CLK_SSIPLL, -1, -1, -1 }, + .gate = { CGU_REG_CLKGR0, 20 }, + }, + + [X1830_CLK_PDMA] = { + "pdma", CGU_CLK_GATE, + .parents = { X1830_CLK_EXCLK, -1, -1, -1 }, + .gate = { CGU_REG_CLKGR0, 21 }, + }, + + [X1830_CLK_TCU] = { + "tcu", CGU_CLK_GATE, + .parents = { X1830_CLK_EXCLK, -1, -1, -1 }, + .gate = { CGU_REG_CLKGR0, 30 }, + }, + + [X1830_CLK_DTRNG] = { + "dtrng", CGU_CLK_GATE, + .parents = { X1830_CLK_PCLK, -1, -1, -1 }, + .gate = { CGU_REG_CLKGR1, 1 }, + }, + + [X1830_CLK_OST] = { + "ost", CGU_CLK_GATE, + .parents = { X1830_CLK_EXCLK, -1, -1, -1 }, + .gate = { CGU_REG_CLKGR1, 11 }, + }, +}; + +static void __init x1830_cgu_init(struct device_node *np) +{ + int retval; + + cgu = ingenic_cgu_new(x1830_cgu_clocks, + ARRAY_SIZE(x1830_cgu_clocks), np); + if (!cgu) { + pr_err("%s: failed to initialise CGU\n", __func__); + return; + } + + retval = ingenic_cgu_register_clocks(cgu); + if (retval) { + pr_err("%s: failed to register CGU Clocks\n", __func__); + return; + } + + ingenic_cgu_register_syscore_ops(cgu); +} +/* + * CGU has some children devices, this is useful for probing children devices + * in the case where the device node is compatible with "simple-mfd". + */ +CLK_OF_DECLARE_DRIVER(x1830_cgu, "ingenic,x1830-cgu", x1830_cgu_init); -- GitLab From 424c85e1ffea7a37808a0b6e03066fa7a248eef0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E5=91=A8=E7=90=B0=E6=9D=B0=20=28Zhou=20Yanjie=29?= Date: Thu, 28 May 2020 11:15:48 +0800 Subject: [PATCH 0801/1971] dt-bindings: clock: Add and reorder ABI for X1000. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit 1.The SSI clock of X1000 not like JZ4770 and JZ4780, they are not directly derived from the output of SSIPLL, but from the clock obtained by dividing the frequency by 2. "X1000_CLK_SSIPLL_DIV2" is added for this purpose, it must between "X1000_CLK_SSIPLL" and "X1000_CLK_SSIMUX", otherwise an error will occurs when initializing the clock. These ABIs are only used for X1000, and I'm sure that no other devicetree out there is using these ABIs, so we should be able to reorder them. 2.Clocks of LCD, OTG, EMC, EFUSE, OST, TCU are also added. Signed-off-by: 周琰杰 (Zhou Yanjie) Acked-by: Rob Herring Link: https://lkml.kernel.org/r/20200528031549.13846-7-zhouyanjie@wanyeetech.com Signed-off-by: Stephen Boyd --- include/dt-bindings/clock/x1000-cgu.h | 64 +++++++++++++++------------ 1 file changed, 36 insertions(+), 28 deletions(-) diff --git a/include/dt-bindings/clock/x1000-cgu.h b/include/dt-bindings/clock/x1000-cgu.h index bbaebaf7adb90..0367c8c02e16e 100644 --- a/include/dt-bindings/clock/x1000-cgu.h +++ b/include/dt-bindings/clock/x1000-cgu.h @@ -12,33 +12,41 @@ #ifndef __DT_BINDINGS_CLOCK_X1000_CGU_H__ #define __DT_BINDINGS_CLOCK_X1000_CGU_H__ -#define X1000_CLK_EXCLK 0 -#define X1000_CLK_RTCLK 1 -#define X1000_CLK_APLL 2 -#define X1000_CLK_MPLL 3 -#define X1000_CLK_SCLKA 4 -#define X1000_CLK_CPUMUX 5 -#define X1000_CLK_CPU 6 -#define X1000_CLK_L2CACHE 7 -#define X1000_CLK_AHB0 8 -#define X1000_CLK_AHB2PMUX 9 -#define X1000_CLK_AHB2 10 -#define X1000_CLK_PCLK 11 -#define X1000_CLK_DDR 12 -#define X1000_CLK_MAC 13 -#define X1000_CLK_MSCMUX 14 -#define X1000_CLK_MSC0 15 -#define X1000_CLK_MSC1 16 -#define X1000_CLK_SSIPLL 17 -#define X1000_CLK_SSIMUX 18 -#define X1000_CLK_SFC 19 -#define X1000_CLK_I2C0 20 -#define X1000_CLK_I2C1 21 -#define X1000_CLK_I2C2 22 -#define X1000_CLK_UART0 23 -#define X1000_CLK_UART1 24 -#define X1000_CLK_UART2 25 -#define X1000_CLK_SSI 26 -#define X1000_CLK_PDMA 27 +#define X1000_CLK_EXCLK 0 +#define X1000_CLK_RTCLK 1 +#define X1000_CLK_APLL 2 +#define X1000_CLK_MPLL 3 +#define X1000_CLK_OTGPHY 4 +#define X1000_CLK_SCLKA 5 +#define X1000_CLK_CPUMUX 6 +#define X1000_CLK_CPU 7 +#define X1000_CLK_L2CACHE 8 +#define X1000_CLK_AHB0 9 +#define X1000_CLK_AHB2PMUX 10 +#define X1000_CLK_AHB2 11 +#define X1000_CLK_PCLK 12 +#define X1000_CLK_DDR 13 +#define X1000_CLK_MAC 14 +#define X1000_CLK_LCD 15 +#define X1000_CLK_MSCMUX 16 +#define X1000_CLK_MSC0 17 +#define X1000_CLK_MSC1 18 +#define X1000_CLK_OTG 19 +#define X1000_CLK_SSIPLL 20 +#define X1000_CLK_SSIPLL_DIV2 21 +#define X1000_CLK_SSIMUX 22 +#define X1000_CLK_EMC 23 +#define X1000_CLK_EFUSE 24 +#define X1000_CLK_SFC 25 +#define X1000_CLK_I2C0 26 +#define X1000_CLK_I2C1 27 +#define X1000_CLK_I2C2 28 +#define X1000_CLK_UART0 29 +#define X1000_CLK_UART1 30 +#define X1000_CLK_UART2 31 +#define X1000_CLK_TCU 32 +#define X1000_CLK_SSI 33 +#define X1000_CLK_OST 34 +#define X1000_CLK_PDMA 35 #endif /* __DT_BINDINGS_CLOCK_X1000_CGU_H__ */ -- GitLab From 440d7a6f73909f4d8fa9b442a3967973fc9d8fac Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E5=91=A8=E7=90=B0=E6=9D=B0=20=28Zhou=20Yanjie=29?= Date: Thu, 28 May 2020 11:15:49 +0800 Subject: [PATCH 0802/1971] clk: X1000: Add FIXDIV for SSI clock of X1000. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit 1.The SSI clock of X1000 not like JZ4770 and JZ4780, they are not directly derived from the output of SSIPLL, but from the clock obtained by dividing the frequency by 2. "X1000_CLK_SSIPLL_DIV2" is added for this purpose, and ensure that it initialized before "X1000_CLK_SSIMUX" when initializing the clocks. 2.Clocks of LCD, OTG, EMC, EFUSE, OST, TCU, and gates of CPU, PCLK are also added. 3.Use "CLK_OF_DECLARE_DRIVER" like the other CGU drivers. Signed-off-by: 周琰杰 (Zhou Yanjie) Reviewed-by: Paul Cercueil Link: https://lkml.kernel.org/r/20200528031549.13846-8-zhouyanjie@wanyeetech.com Signed-off-by: Stephen Boyd --- drivers/clk/ingenic/x1000-cgu.c | 117 ++++++++++++++++++++++++++++++-- 1 file changed, 111 insertions(+), 6 deletions(-) diff --git a/drivers/clk/ingenic/x1000-cgu.c b/drivers/clk/ingenic/x1000-cgu.c index c33934d8ac14d..453f3323cb99d 100644 --- a/drivers/clk/ingenic/x1000-cgu.c +++ b/drivers/clk/ingenic/x1000-cgu.c @@ -1,11 +1,12 @@ // SPDX-License-Identifier: GPL-2.0 /* * X1000 SoC CGU driver - * Copyright (c) 2019 Zhou Yanjie + * Copyright (c) 2019 周琰杰 (Zhou Yanjie) */ #include #include +#include #include #include @@ -20,6 +21,9 @@ #define CGU_REG_CLKGR 0x20 #define CGU_REG_OPCR 0x24 #define CGU_REG_DDRCDR 0x2c +#define CGU_REG_USBPCR 0x3c +#define CGU_REG_USBPCR1 0x48 +#define CGU_REG_USBCDR 0x50 #define CGU_REG_MACCDR 0x54 #define CGU_REG_I2SCDR 0x60 #define CGU_REG_LPCDR 0x64 @@ -40,8 +44,47 @@ #define OPCR_SPENDN0 BIT(7) #define OPCR_SPENDN1 BIT(6) +/* bits within the USBPCR register */ +#define USBPCR_SIDDQ BIT(21) +#define USBPCR_OTG_DISABLE BIT(20) + static struct ingenic_cgu *cgu; +static int x1000_usb_phy_enable(struct clk_hw *hw) +{ + void __iomem *reg_opcr = cgu->base + CGU_REG_OPCR; + void __iomem *reg_usbpcr = cgu->base + CGU_REG_USBPCR; + + writel(readl(reg_opcr) | OPCR_SPENDN0, reg_opcr); + writel(readl(reg_usbpcr) & ~USBPCR_OTG_DISABLE & ~USBPCR_SIDDQ, reg_usbpcr); + return 0; +} + +static void x1000_usb_phy_disable(struct clk_hw *hw) +{ + void __iomem *reg_opcr = cgu->base + CGU_REG_OPCR; + void __iomem *reg_usbpcr = cgu->base + CGU_REG_USBPCR; + + writel(readl(reg_opcr) & ~OPCR_SPENDN0, reg_opcr); + writel(readl(reg_usbpcr) | USBPCR_OTG_DISABLE | USBPCR_SIDDQ, reg_usbpcr); +} + +static int x1000_usb_phy_is_enabled(struct clk_hw *hw) +{ + void __iomem *reg_opcr = cgu->base + CGU_REG_OPCR; + void __iomem *reg_usbpcr = cgu->base + CGU_REG_USBPCR; + + return (readl(reg_opcr) & OPCR_SPENDN0) && + !(readl(reg_usbpcr) & USBPCR_SIDDQ) && + !(readl(reg_usbpcr) & USBPCR_OTG_DISABLE); +} + +static const struct clk_ops x1000_otg_phy_ops = { + .enable = x1000_usb_phy_enable, + .disable = x1000_usb_phy_disable, + .is_enabled = x1000_usb_phy_is_enabled, +}; + static const s8 pll_od_encoding[8] = { 0x0, 0x1, -1, 0x2, -1, -1, -1, 0x3, }; @@ -101,6 +144,15 @@ static const struct ingenic_cgu_clk_info x1000_cgu_clocks[] = { }, }, + + /* Custom (SoC-specific) OTG PHY */ + + [X1000_CLK_OTGPHY] = { + "otg_phy", CGU_CLK_CUSTOM, + .parents = { -1, -1, X1000_CLK_EXCLK, -1 }, + .custom = { &x1000_otg_phy_ops }, + }, + /* Muxes & dividers */ [X1000_CLK_SCLKA] = { @@ -116,9 +168,10 @@ static const struct ingenic_cgu_clk_info x1000_cgu_clocks[] = { }, [X1000_CLK_CPU] = { - "cpu", CGU_CLK_DIV, + "cpu", CGU_CLK_DIV | CGU_CLK_GATE, .parents = { X1000_CLK_CPUMUX, -1, -1, -1 }, .div = { CGU_REG_CPCCR, 0, 1, 4, 22, -1, -1 }, + .gate = { CGU_REG_CLKGR, 30 }, }, [X1000_CLK_L2CACHE] = { @@ -147,9 +200,10 @@ static const struct ingenic_cgu_clk_info x1000_cgu_clocks[] = { }, [X1000_CLK_PCLK] = { - "pclk", CGU_CLK_DIV, + "pclk", CGU_CLK_DIV | CGU_CLK_GATE, .parents = { X1000_CLK_AHB2PMUX, -1, -1, -1 }, .div = { CGU_REG_CPCCR, 16, 1, 4, 20, -1, -1 }, + .gate = { CGU_REG_CLKGR, 28 }, }, [X1000_CLK_DDR] = { @@ -162,12 +216,20 @@ static const struct ingenic_cgu_clk_info x1000_cgu_clocks[] = { [X1000_CLK_MAC] = { "mac", CGU_CLK_MUX | CGU_CLK_DIV | CGU_CLK_GATE, - .parents = { X1000_CLK_SCLKA, X1000_CLK_MPLL}, + .parents = { X1000_CLK_SCLKA, X1000_CLK_MPLL }, .mux = { CGU_REG_MACCDR, 31, 1 }, .div = { CGU_REG_MACCDR, 0, 1, 8, 29, 28, 27 }, .gate = { CGU_REG_CLKGR, 25 }, }, + [X1000_CLK_LCD] = { + "lcd", CGU_CLK_MUX | CGU_CLK_DIV | CGU_CLK_GATE, + .parents = { X1000_CLK_SCLKA, X1000_CLK_MPLL }, + .mux = { CGU_REG_LPCDR, 31, 1 }, + .div = { CGU_REG_LPCDR, 0, 1, 8, 28, 27, 26 }, + .gate = { CGU_REG_CLKGR, 23 }, + }, + [X1000_CLK_MSCMUX] = { "msc_mux", CGU_CLK_MUX, .parents = { X1000_CLK_SCLKA, X1000_CLK_MPLL}, @@ -188,6 +250,15 @@ static const struct ingenic_cgu_clk_info x1000_cgu_clocks[] = { .gate = { CGU_REG_CLKGR, 5 }, }, + [X1000_CLK_OTG] = { + "otg", CGU_CLK_DIV | CGU_CLK_GATE | CGU_CLK_MUX, + .parents = { X1000_CLK_EXCLK, -1, + X1000_CLK_APLL, X1000_CLK_MPLL }, + .mux = { CGU_REG_USBCDR, 30, 2 }, + .div = { CGU_REG_USBCDR, 0, 1, 8, 29, 28, 27 }, + .gate = { CGU_REG_CLKGR, 3 }, + }, + [X1000_CLK_SSIPLL] = { "ssi_pll", CGU_CLK_MUX | CGU_CLK_DIV, .parents = { X1000_CLK_SCLKA, X1000_CLK_MPLL, -1, -1 }, @@ -195,14 +266,32 @@ static const struct ingenic_cgu_clk_info x1000_cgu_clocks[] = { .div = { CGU_REG_SSICDR, 0, 1, 8, 29, 28, 27 }, }, + [X1000_CLK_SSIPLL_DIV2] = { + "ssi_pll_div2", CGU_CLK_FIXDIV, + .parents = { X1000_CLK_SSIPLL }, + .fixdiv = { 2 }, + }, + [X1000_CLK_SSIMUX] = { "ssi_mux", CGU_CLK_MUX, - .parents = { X1000_CLK_EXCLK, X1000_CLK_SSIPLL, -1, -1 }, + .parents = { X1000_CLK_EXCLK, X1000_CLK_SSIPLL_DIV2, -1, -1 }, .mux = { CGU_REG_SSICDR, 30, 1 }, }, /* Gate-only clocks */ + [X1000_CLK_EMC] = { + "emc", CGU_CLK_GATE, + .parents = { X1000_CLK_AHB2, -1, -1, -1 }, + .gate = { CGU_REG_CLKGR, 0 }, + }, + + [X1000_CLK_EFUSE] = { + "efuse", CGU_CLK_GATE, + .parents = { X1000_CLK_AHB2, -1, -1, -1 }, + .gate = { CGU_REG_CLKGR, 1 }, + }, + [X1000_CLK_SFC] = { "sfc", CGU_CLK_GATE, .parents = { X1000_CLK_SSIPLL, -1, -1, -1 }, @@ -245,12 +334,24 @@ static const struct ingenic_cgu_clk_info x1000_cgu_clocks[] = { .gate = { CGU_REG_CLKGR, 16 }, }, + [X1000_CLK_TCU] = { + "tcu", CGU_CLK_GATE, + .parents = { X1000_CLK_EXCLK, -1, -1, -1 }, + .gate = { CGU_REG_CLKGR, 18 }, + }, + [X1000_CLK_SSI] = { "ssi", CGU_CLK_GATE, .parents = { X1000_CLK_SSIMUX, -1, -1, -1 }, .gate = { CGU_REG_CLKGR, 19 }, }, + [X1000_CLK_OST] = { + "ost", CGU_CLK_GATE, + .parents = { X1000_CLK_EXCLK, -1, -1, -1 }, + .gate = { CGU_REG_CLKGR, 20 }, + }, + [X1000_CLK_PDMA] = { "pdma", CGU_CLK_GATE, .parents = { X1000_CLK_EXCLK, -1, -1, -1 }, @@ -277,4 +378,8 @@ static void __init x1000_cgu_init(struct device_node *np) ingenic_cgu_register_syscore_ops(cgu); } -CLK_OF_DECLARE(x1000_cgu, "ingenic,x1000-cgu", x1000_cgu_init); +/* + * CGU has some children devices, this is useful for probing children devices + * in the case where the device node is compatible with "simple-mfd". + */ +CLK_OF_DECLARE_DRIVER(x1000_cgu, "ingenic,x1000-cgu", x1000_cgu_init); -- GitLab From e480fe10adfe03c23b7eae811c46f6da6afba2ff Mon Sep 17 00:00:00 2001 From: Stephen Boyd Date: Thu, 28 May 2020 16:38:37 -0700 Subject: [PATCH 0803/1971] clk: ingenic: Mark ingenic_tcu_of_match as __maybe_unused This device id table is passed to of_match_node() later on in probe, but on CONFIG_OF=n builds of_match_node() doesn't do anything with the arguments. Lets just mark the table unused so that the compiler doesn't complain about this. drivers/clk/ingenic/tcu.c:326:34: warning: unused variable 'ingenic_tcu_of_match' [-Wunused-const-variable] static const struct of_device_id ingenic_tcu_of_match[] __initconst = { ^ 1 warning generated. Reported-by: kbuild test robot Signed-off-by: Stephen Boyd Link: https://lkml.kernel.org/r/20200528233837.70269-1-sboyd@kernel.org --- drivers/clk/ingenic/tcu.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/clk/ingenic/tcu.c b/drivers/clk/ingenic/tcu.c index 153a954b0d2fd..9382dc3aa27e6 100644 --- a/drivers/clk/ingenic/tcu.c +++ b/drivers/clk/ingenic/tcu.c @@ -323,7 +323,7 @@ static const struct ingenic_soc_info x1000_soc_info = { .has_tcu_clk = false, }; -static const struct of_device_id ingenic_tcu_of_match[] __initconst = { +static const struct of_device_id __maybe_unused ingenic_tcu_of_match[] __initconst = { { .compatible = "ingenic,jz4740-tcu", .data = &jz4740_soc_info, }, { .compatible = "ingenic,jz4725b-tcu", .data = &jz4725b_soc_info, }, { .compatible = "ingenic,jz4770-tcu", .data = &jz4770_soc_info, }, -- GitLab From b1e8d713e6b2d59ee3a7b57c0dab88a19ec0cf33 Mon Sep 17 00:00:00 2001 From: Jeffrey Hugo Date: Thu, 28 May 2020 07:22:05 -0700 Subject: [PATCH 0804/1971] clk: qcom: Add missing msm8998 ufs_unipro_core_clk_src ufs_unipro_core_clk_src is required to allow UFS to clock scale for power savings. Fixes: b5f5f525c547 ("clk: qcom: Add MSM8998 Global Clock Control (GCC) driver") Signed-off-by: Jeffrey Hugo Link: https://lkml.kernel.org/r/20200528142205.44003-1-jeffrey.l.hugo@gmail.com Signed-off-by: Stephen Boyd --- drivers/clk/qcom/gcc-msm8998.c | 27 ++++++++++++++++++++ include/dt-bindings/clock/qcom,gcc-msm8998.h | 1 + 2 files changed, 28 insertions(+) diff --git a/drivers/clk/qcom/gcc-msm8998.c b/drivers/clk/qcom/gcc-msm8998.c index df1d7056436cd..9d7016bcd6800 100644 --- a/drivers/clk/qcom/gcc-msm8998.c +++ b/drivers/clk/qcom/gcc-msm8998.c @@ -1110,6 +1110,27 @@ static struct clk_rcg2 ufs_axi_clk_src = { }, }; +static const struct freq_tbl ftbl_ufs_unipro_core_clk_src[] = { + F(37500000, P_GPLL0_OUT_MAIN, 16, 0, 0), + F(75000000, P_GPLL0_OUT_MAIN, 8, 0, 0), + F(150000000, P_GPLL0_OUT_MAIN, 4, 0, 0), + { } +}; + +static struct clk_rcg2 ufs_unipro_core_clk_src = { + .cmd_rcgr = 0x76028, + .mnd_width = 8, + .hid_width = 5, + .parent_map = gcc_parent_map_0, + .freq_tbl = ftbl_ufs_unipro_core_clk_src, + .clkr.hw.init = &(struct clk_init_data){ + .name = "ufs_unipro_core_clk_src", + .parent_names = gcc_parent_names_0, + .num_parents = 4, + .ops = &clk_rcg2_ops, + }, +}; + static const struct freq_tbl ftbl_usb30_master_clk_src[] = { F(19200000, P_XO, 1, 0, 0), F(60000000, P_GPLL0_OUT_MAIN, 10, 0, 0), @@ -2549,6 +2570,11 @@ static struct clk_branch gcc_ufs_unipro_core_clk = { .enable_mask = BIT(0), .hw.init = &(struct clk_init_data){ .name = "gcc_ufs_unipro_core_clk", + .parent_names = (const char *[]){ + "ufs_unipro_core_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, @@ -2904,6 +2930,7 @@ static struct clk_regmap *gcc_msm8998_clocks[] = { [SDCC4_APPS_CLK_SRC] = &sdcc4_apps_clk_src.clkr, [TSIF_REF_CLK_SRC] = &tsif_ref_clk_src.clkr, [UFS_AXI_CLK_SRC] = &ufs_axi_clk_src.clkr, + [UFS_UNIPRO_CORE_CLK_SRC] = &ufs_unipro_core_clk_src.clkr, [USB30_MASTER_CLK_SRC] = &usb30_master_clk_src.clkr, [USB30_MOCK_UTMI_CLK_SRC] = &usb30_mock_utmi_clk_src.clkr, [USB3_PHY_AUX_CLK_SRC] = &usb3_phy_aux_clk_src.clkr, diff --git a/include/dt-bindings/clock/qcom,gcc-msm8998.h b/include/dt-bindings/clock/qcom,gcc-msm8998.h index 63e02dc32a0bb..6a73a174f0497 100644 --- a/include/dt-bindings/clock/qcom,gcc-msm8998.h +++ b/include/dt-bindings/clock/qcom,gcc-msm8998.h @@ -183,6 +183,7 @@ #define GCC_MSS_SNOC_AXI_CLK 174 #define GCC_MSS_MNOC_BIMC_AXI_CLK 175 #define GCC_BIMC_GFX_CLK 176 +#define UFS_UNIPRO_CORE_CLK_SRC 177 #define PCIE_0_GDSC 0 #define UFS_GDSC 1 -- GitLab From ff694ab60c29cfeba81b3d5068d3c908f22110ed Mon Sep 17 00:00:00 2001 From: Ira Weiny Date: Thu, 28 May 2020 07:59:55 -0700 Subject: [PATCH 0805/1971] fs/ext4: Narrow scope of DAX check in setflags When preventing DAX and journaling on an inode. Use the effective DAX check rather than the mount option. This will be required to support per inode DAX flags. Reviewed-by: Jan Kara Signed-off-by: Ira Weiny Link: https://lore.kernel.org/r/20200528150003.828793-2-ira.weiny@intel.com Signed-off-by: Theodore Ts'o --- fs/ext4/ioctl.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/fs/ext4/ioctl.c b/fs/ext4/ioctl.c index bfc1281fc4cbc..5813e5e73eab6 100644 --- a/fs/ext4/ioctl.c +++ b/fs/ext4/ioctl.c @@ -393,9 +393,9 @@ flags_err: if ((jflag ^ oldflags) & (EXT4_JOURNAL_DATA_FL)) { /* * Changes to the journaling mode can cause unsafe changes to - * S_DAX if we are using the DAX mount option. + * S_DAX if the inode is DAX */ - if (test_opt(inode->i_sb, DAX)) { + if (IS_DAX(inode)) { err = -EBUSY; goto flags_out; } -- GitLab From 6c0d077ff8200a7b8e7131ca8e25315dec243a60 Mon Sep 17 00:00:00 2001 From: Ira Weiny Date: Thu, 28 May 2020 07:59:56 -0700 Subject: [PATCH 0806/1971] fs/ext4: Disallow verity if inode is DAX Verity and DAX are incompatible. Changing the DAX mode due to a verity flag change is wrong without a corresponding address_space_operations update. Make the 2 options mutually exclusive by returning an error if DAX was set first. (Setting DAX is already disabled if Verity is set first.) Reviewed-by: Jan Kara Signed-off-by: Ira Weiny Link: https://lore.kernel.org/r/20200528150003.828793-3-ira.weiny@intel.com Signed-off-by: Theodore Ts'o --- Documentation/filesystems/ext4/verity.rst | 3 +++ fs/ext4/verity.c | 3 +++ 2 files changed, 6 insertions(+) diff --git a/Documentation/filesystems/ext4/verity.rst b/Documentation/filesystems/ext4/verity.rst index 3e4c0ee0e0683..e99ff3fd09f7e 100644 --- a/Documentation/filesystems/ext4/verity.rst +++ b/Documentation/filesystems/ext4/verity.rst @@ -39,3 +39,6 @@ is encrypted as well as the data itself. Verity files cannot have blocks allocated past the end of the verity metadata. + +Verity and DAX are not compatible and attempts to set both of these flags +on a file will fail. diff --git a/fs/ext4/verity.c b/fs/ext4/verity.c index dc5ec724d8891..f05a09fb2ae41 100644 --- a/fs/ext4/verity.c +++ b/fs/ext4/verity.c @@ -113,6 +113,9 @@ static int ext4_begin_enable_verity(struct file *filp) handle_t *handle; int err; + if (IS_DAX(inode)) + return -EINVAL; + if (ext4_verity_in_progress(inode)) return -EBUSY; -- GitLab From fc626fe322f13ecdfac58cbfa6cfea797ed22623 Mon Sep 17 00:00:00 2001 From: Ira Weiny Date: Thu, 28 May 2020 07:59:57 -0700 Subject: [PATCH 0807/1971] fs/ext4: Change EXT4_MOUNT_DAX to EXT4_MOUNT_DAX_ALWAYS In prep for the new tri-state mount option which then introduces EXT4_MOUNT_DAX_NEVER. Reviewed-by: Jan Kara Signed-off-by: Ira Weiny Link: https://lore.kernel.org/r/20200528150003.828793-4-ira.weiny@intel.com Signed-off-by: Theodore Ts'o --- fs/ext4/ext4.h | 4 ++-- fs/ext4/inode.c | 2 +- fs/ext4/super.c | 12 ++++++------ 3 files changed, 9 insertions(+), 9 deletions(-) diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h index 91eb4381cae5b..1a3daf2d18ef7 100644 --- a/fs/ext4/ext4.h +++ b/fs/ext4/ext4.h @@ -1123,9 +1123,9 @@ struct ext4_inode_info { #define EXT4_MOUNT_MINIX_DF 0x00080 /* Mimics the Minix statfs */ #define EXT4_MOUNT_NOLOAD 0x00100 /* Don't use existing journal*/ #ifdef CONFIG_FS_DAX -#define EXT4_MOUNT_DAX 0x00200 /* Direct Access */ +#define EXT4_MOUNT_DAX_ALWAYS 0x00200 /* Direct Access */ #else -#define EXT4_MOUNT_DAX 0 +#define EXT4_MOUNT_DAX_ALWAYS 0 #endif #define EXT4_MOUNT_DATA_FLAGS 0x00C00 /* Mode for data writes: */ #define EXT4_MOUNT_JOURNAL_DATA 0x00400 /* Write data to journal */ diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c index 2a4aae6acdcb9..a10ff12194dbc 100644 --- a/fs/ext4/inode.c +++ b/fs/ext4/inode.c @@ -4400,7 +4400,7 @@ int ext4_get_inode_loc(struct inode *inode, struct ext4_iloc *iloc) static bool ext4_should_use_dax(struct inode *inode) { - if (!test_opt(inode->i_sb, DAX)) + if (!test_opt(inode->i_sb, DAX_ALWAYS)) return false; if (!S_ISREG(inode->i_mode)) return false; diff --git a/fs/ext4/super.c b/fs/ext4/super.c index bf5fcb477f667..7b99c44d0a918 100644 --- a/fs/ext4/super.c +++ b/fs/ext4/super.c @@ -1775,7 +1775,7 @@ static const struct mount_opts { {Opt_min_batch_time, 0, MOPT_GTE0}, {Opt_inode_readahead_blks, 0, MOPT_GTE0}, {Opt_init_itable, 0, MOPT_GTE0}, - {Opt_dax, EXT4_MOUNT_DAX, MOPT_SET}, + {Opt_dax, EXT4_MOUNT_DAX_ALWAYS, MOPT_SET}, {Opt_stripe, 0, MOPT_GTE0}, {Opt_resuid, 0, MOPT_GTE0}, {Opt_resgid, 0, MOPT_GTE0}, @@ -3982,7 +3982,7 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent) "both data=journal and dioread_nolock"); goto failed_mount; } - if (test_opt(sb, DAX)) { + if (test_opt(sb, DAX_ALWAYS)) { ext4_msg(sb, KERN_ERR, "can't mount with " "both data=journal and dax"); goto failed_mount; @@ -4092,7 +4092,7 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent) goto failed_mount; } - if (sbi->s_mount_opt & EXT4_MOUNT_DAX) { + if (sbi->s_mount_opt & EXT4_MOUNT_DAX_ALWAYS) { if (ext4_has_feature_inline_data(sb)) { ext4_msg(sb, KERN_ERR, "Cannot use DAX on a filesystem" " that may contain inline data"); @@ -5412,7 +5412,7 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data) err = -EINVAL; goto restore_opts; } - if (test_opt(sb, DAX)) { + if (test_opt(sb, DAX_ALWAYS)) { ext4_msg(sb, KERN_ERR, "can't mount with " "both data=journal and dax"); err = -EINVAL; @@ -5433,10 +5433,10 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data) goto restore_opts; } - if ((sbi->s_mount_opt ^ old_opts.s_mount_opt) & EXT4_MOUNT_DAX) { + if ((sbi->s_mount_opt ^ old_opts.s_mount_opt) & EXT4_MOUNT_DAX_ALWAYS) { ext4_msg(sb, KERN_WARNING, "warning: refusing change of " "dax flag with busy inodes while remounting"); - sbi->s_mount_opt ^= EXT4_MOUNT_DAX; + sbi->s_mount_opt ^= EXT4_MOUNT_DAX_ALWAYS; } if (sbi->s_mount_flags & EXT4_MF_FS_ABORTED) -- GitLab From a8ab6d3885ef5e2300d683b79a9e1999403eefd9 Mon Sep 17 00:00:00 2001 From: Ira Weiny Date: Thu, 28 May 2020 07:59:58 -0700 Subject: [PATCH 0808/1971] fs/ext4: Update ext4_should_use_dax() S_DAX should only be enabled when the underlying block device supports dax. Cache the underlying support for DAX in the super block and modify ext4_should_use_dax() to check for device support prior to the over riding mount option. While we are at it change the function to ext4_should_enable_dax() as this better reflects the ask as well as matches xfs. Reviewed-by: Jan Kara Signed-off-by: Ira Weiny Link: https://lore.kernel.org/r/20200528150003.828793-5-ira.weiny@intel.com Signed-off-by: Theodore Ts'o --- fs/ext4/ext4.h | 1 + fs/ext4/inode.c | 15 ++++++++++----- fs/ext4/super.c | 5 ++++- 3 files changed, 15 insertions(+), 6 deletions(-) diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h index 1a3daf2d18ef7..0b4db9ce77566 100644 --- a/fs/ext4/ext4.h +++ b/fs/ext4/ext4.h @@ -1979,6 +1979,7 @@ static inline bool ext4_has_incompat_features(struct super_block *sb) */ #define EXT4_FLAGS_RESIZING 0 #define EXT4_FLAGS_SHUTDOWN 1 +#define EXT4_FLAGS_BDEV_IS_DAX 2 static inline int ext4_forced_shutdown(struct ext4_sb_info *sbi) { diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c index a10ff12194dbc..6532870f6a0bc 100644 --- a/fs/ext4/inode.c +++ b/fs/ext4/inode.c @@ -4398,10 +4398,10 @@ int ext4_get_inode_loc(struct inode *inode, struct ext4_iloc *iloc) !ext4_test_inode_state(inode, EXT4_STATE_XATTR)); } -static bool ext4_should_use_dax(struct inode *inode) +static bool ext4_should_enable_dax(struct inode *inode) { - if (!test_opt(inode->i_sb, DAX_ALWAYS)) - return false; + struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); + if (!S_ISREG(inode->i_mode)) return false; if (ext4_should_journal_data(inode)) @@ -4412,7 +4412,12 @@ static bool ext4_should_use_dax(struct inode *inode) return false; if (ext4_test_inode_flag(inode, EXT4_INODE_VERITY)) return false; - return true; + if (!test_bit(EXT4_FLAGS_BDEV_IS_DAX, &sbi->s_ext4_flags)) + return false; + if (test_opt(inode->i_sb, DAX_ALWAYS)) + return true; + + return false; } void ext4_set_inode_flags(struct inode *inode) @@ -4430,7 +4435,7 @@ void ext4_set_inode_flags(struct inode *inode) new_fl |= S_NOATIME; if (flags & EXT4_DIRSYNC_FL) new_fl |= S_DIRSYNC; - if (ext4_should_use_dax(inode)) + if (ext4_should_enable_dax(inode)) new_fl |= S_DAX; if (flags & EXT4_ENCRYPT_FL) new_fl |= S_ENCRYPTED; diff --git a/fs/ext4/super.c b/fs/ext4/super.c index 7b99c44d0a918..f7d76dcaedfee 100644 --- a/fs/ext4/super.c +++ b/fs/ext4/super.c @@ -4092,13 +4092,16 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent) goto failed_mount; } + if (bdev_dax_supported(sb->s_bdev, blocksize)) + set_bit(EXT4_FLAGS_BDEV_IS_DAX, &sbi->s_ext4_flags); + if (sbi->s_mount_opt & EXT4_MOUNT_DAX_ALWAYS) { if (ext4_has_feature_inline_data(sb)) { ext4_msg(sb, KERN_ERR, "Cannot use DAX on a filesystem" " that may contain inline data"); goto failed_mount; } - if (!bdev_dax_supported(sb->s_bdev, blocksize)) { + if (!test_bit(EXT4_FLAGS_BDEV_IS_DAX, &sbi->s_ext4_flags)) { ext4_msg(sb, KERN_ERR, "DAX unsupported by block device."); goto failed_mount; -- GitLab From 043546e46dc70c25ff7e2cf6d09cbb0424fc9978 Mon Sep 17 00:00:00 2001 From: Ira Weiny Date: Thu, 28 May 2020 07:59:59 -0700 Subject: [PATCH 0809/1971] fs/ext4: Only change S_DAX on inode load To prevent complications with in memory inodes we only set S_DAX on inode load. FS_XFLAG_DAX can be changed at any time and S_DAX will change after inode eviction and reload. Add init bool to ext4_set_inode_flags() to indicate if the inode is being newly initialized. Assert that S_DAX is not set on an inode which is just being loaded. Reviewed-by: Jan Kara Signed-off-by: Ira Weiny Link: https://lore.kernel.org/r/20200528150003.828793-6-ira.weiny@intel.com Signed-off-by: Theodore Ts'o --- fs/ext4/ext4.h | 2 +- fs/ext4/ialloc.c | 2 +- fs/ext4/inode.c | 13 ++++++++++--- fs/ext4/ioctl.c | 3 ++- fs/ext4/super.c | 4 ++-- fs/ext4/verity.c | 2 +- 6 files changed, 17 insertions(+), 9 deletions(-) diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h index 0b4db9ce77566..f5291693ce6e3 100644 --- a/fs/ext4/ext4.h +++ b/fs/ext4/ext4.h @@ -2693,7 +2693,7 @@ extern int ext4_can_truncate(struct inode *inode); extern int ext4_truncate(struct inode *); extern int ext4_break_layouts(struct inode *); extern int ext4_punch_hole(struct inode *inode, loff_t offset, loff_t length); -extern void ext4_set_inode_flags(struct inode *); +extern void ext4_set_inode_flags(struct inode *, bool init); extern int ext4_alloc_da_blocks(struct inode *inode); extern void ext4_set_aops(struct inode *inode); extern int ext4_writepage_trans_blocks(struct inode *); diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c index 4b8c9a9bdf0c8..7941c140723fa 100644 --- a/fs/ext4/ialloc.c +++ b/fs/ext4/ialloc.c @@ -1116,7 +1116,7 @@ got: ei->i_block_group = group; ei->i_last_alloc_group = ~0; - ext4_set_inode_flags(inode); + ext4_set_inode_flags(inode, true); if (IS_DIRSYNC(inode)) ext4_handle_sync(handle); if (insert_inode_locked(inode) < 0) { diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c index 6532870f6a0bc..01636cf5f322f 100644 --- a/fs/ext4/inode.c +++ b/fs/ext4/inode.c @@ -4420,11 +4420,13 @@ static bool ext4_should_enable_dax(struct inode *inode) return false; } -void ext4_set_inode_flags(struct inode *inode) +void ext4_set_inode_flags(struct inode *inode, bool init) { unsigned int flags = EXT4_I(inode)->i_flags; unsigned int new_fl = 0; + WARN_ON_ONCE(IS_DAX(inode) && init); + if (flags & EXT4_SYNC_FL) new_fl |= S_SYNC; if (flags & EXT4_APPEND_FL) @@ -4435,8 +4437,13 @@ void ext4_set_inode_flags(struct inode *inode) new_fl |= S_NOATIME; if (flags & EXT4_DIRSYNC_FL) new_fl |= S_DIRSYNC; - if (ext4_should_enable_dax(inode)) + + /* Because of the way inode_set_flags() works we must preserve S_DAX + * here if already set. */ + new_fl |= (inode->i_flags & S_DAX); + if (init && ext4_should_enable_dax(inode)) new_fl |= S_DAX; + if (flags & EXT4_ENCRYPT_FL) new_fl |= S_ENCRYPTED; if (flags & EXT4_CASEFOLD_FL) @@ -4650,7 +4657,7 @@ struct inode *__ext4_iget(struct super_block *sb, unsigned long ino, * not initialized on a new filesystem. */ } ei->i_flags = le32_to_cpu(raw_inode->i_flags); - ext4_set_inode_flags(inode); + ext4_set_inode_flags(inode, true); inode->i_blocks = ext4_inode_blocks(raw_inode, ei); ei->i_file_acl = le32_to_cpu(raw_inode->i_file_acl_lo); if (ext4_has_feature_64bit(sb)) diff --git a/fs/ext4/ioctl.c b/fs/ext4/ioctl.c index 5813e5e73eab6..145083e8cd1e4 100644 --- a/fs/ext4/ioctl.c +++ b/fs/ext4/ioctl.c @@ -381,7 +381,8 @@ static int ext4_ioctl_setflags(struct inode *inode, ext4_clear_inode_flag(inode, i); } - ext4_set_inode_flags(inode); + ext4_set_inode_flags(inode, false); + inode->i_ctime = current_time(inode); err = ext4_mark_iloc_dirty(handle, inode, &iloc); diff --git a/fs/ext4/super.c b/fs/ext4/super.c index f7d76dcaedfee..80eb814c47ebc 100644 --- a/fs/ext4/super.c +++ b/fs/ext4/super.c @@ -1348,7 +1348,7 @@ static int ext4_set_context(struct inode *inode, const void *ctx, size_t len, * Update inode->i_flags - S_ENCRYPTED will be enabled, * S_DAX may be disabled */ - ext4_set_inode_flags(inode); + ext4_set_inode_flags(inode, false); } return res; } @@ -1375,7 +1375,7 @@ retry: * Update inode->i_flags - S_ENCRYPTED will be enabled, * S_DAX may be disabled */ - ext4_set_inode_flags(inode); + ext4_set_inode_flags(inode, false); res = ext4_mark_inode_dirty(handle, inode); if (res) EXT4_ERROR_INODE(inode, "Failed to mark inode dirty"); diff --git a/fs/ext4/verity.c b/fs/ext4/verity.c index f05a09fb2ae41..89a155ece3236 100644 --- a/fs/ext4/verity.c +++ b/fs/ext4/verity.c @@ -244,7 +244,7 @@ static int ext4_end_enable_verity(struct file *filp, const void *desc, if (err) goto out_stop; ext4_set_inode_flag(inode, EXT4_INODE_VERITY); - ext4_set_inode_flags(inode); + ext4_set_inode_flags(inode, false); err = ext4_mark_iloc_dirty(handle, inode, &iloc); } out_stop: -- GitLab From 9cb20f94afcd2964944f9468e38da736ee855b19 Mon Sep 17 00:00:00 2001 From: Ira Weiny Date: Thu, 28 May 2020 08:00:00 -0700 Subject: [PATCH 0810/1971] fs/ext4: Make DAX mount option a tri-state We add 'always', 'never', and 'inode' (default). '-o dax' continues to operate the same which is equivalent to 'always'. This new functionality is limited to ext4 only. Specifically we introduce a 2nd DAX mount flag EXT4_MOUNT2_DAX_NEVER and set it and EXT4_MOUNT_DAX_ALWAYS appropriately for the mode. We also force EXT4_MOUNT2_DAX_NEVER if !CONFIG_FS_DAX. Finally, EXT4_MOUNT2_DAX_INODE is used solely to detect if the user specified that option for printing. Reviewed-by: Jan Kara Signed-off-by: Ira Weiny Link: https://lore.kernel.org/r/20200528150003.828793-7-ira.weiny@intel.com Signed-off-by: Theodore Ts'o --- fs/ext4/ext4.h | 2 ++ fs/ext4/inode.c | 2 ++ fs/ext4/super.c | 67 +++++++++++++++++++++++++++++++++++++++++-------- 3 files changed, 61 insertions(+), 10 deletions(-) diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h index f5291693ce6e3..65ffb831b2b9b 100644 --- a/fs/ext4/ext4.h +++ b/fs/ext4/ext4.h @@ -1168,6 +1168,8 @@ struct ext4_inode_info { blocks */ #define EXT4_MOUNT2_HURD_COMPAT 0x00000004 /* Support HURD-castrated file systems */ +#define EXT4_MOUNT2_DAX_NEVER 0x00000008 /* Do not allow Direct Access */ +#define EXT4_MOUNT2_DAX_INODE 0x00000010 /* For printing options only */ #define EXT4_MOUNT2_EXPLICIT_JOURNAL_CHECKSUM 0x00000008 /* User explicitly specified journal checksum */ diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c index 01636cf5f322f..68fac9289109d 100644 --- a/fs/ext4/inode.c +++ b/fs/ext4/inode.c @@ -4402,6 +4402,8 @@ static bool ext4_should_enable_dax(struct inode *inode) { struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); + if (test_opt2(inode->i_sb, DAX_NEVER)) + return false; if (!S_ISREG(inode->i_mode)) return false; if (ext4_should_journal_data(inode)) diff --git a/fs/ext4/super.c b/fs/ext4/super.c index 80eb814c47ebc..5e056aa20ce97 100644 --- a/fs/ext4/super.c +++ b/fs/ext4/super.c @@ -1512,7 +1512,8 @@ enum { Opt_usrjquota, Opt_grpjquota, Opt_offusrjquota, Opt_offgrpjquota, Opt_jqfmt_vfsold, Opt_jqfmt_vfsv0, Opt_jqfmt_vfsv1, Opt_quota, Opt_noquota, Opt_barrier, Opt_nobarrier, Opt_err, - Opt_usrquota, Opt_grpquota, Opt_prjquota, Opt_i_version, Opt_dax, + Opt_usrquota, Opt_grpquota, Opt_prjquota, Opt_i_version, + Opt_dax, Opt_dax_always, Opt_dax_inode, Opt_dax_never, Opt_stripe, Opt_delalloc, Opt_nodelalloc, Opt_warn_on_error, Opt_nowarn_on_error, Opt_mblk_io_submit, Opt_lazytime, Opt_nolazytime, Opt_debug_want_extra_isize, @@ -1579,6 +1580,9 @@ static const match_table_t tokens = { {Opt_nobarrier, "nobarrier"}, {Opt_i_version, "i_version"}, {Opt_dax, "dax"}, + {Opt_dax_always, "dax=always"}, + {Opt_dax_inode, "dax=inode"}, + {Opt_dax_never, "dax=never"}, {Opt_stripe, "stripe=%u"}, {Opt_delalloc, "delalloc"}, {Opt_warn_on_error, "warn_on_error"}, @@ -1726,6 +1730,7 @@ static int clear_qf_name(struct super_block *sb, int qtype) #define MOPT_NO_EXT3 0x0200 #define MOPT_EXT4_ONLY (MOPT_NO_EXT2 | MOPT_NO_EXT3) #define MOPT_STRING 0x0400 +#define MOPT_SKIP 0x0800 static const struct mount_opts { int token; @@ -1775,7 +1780,13 @@ static const struct mount_opts { {Opt_min_batch_time, 0, MOPT_GTE0}, {Opt_inode_readahead_blks, 0, MOPT_GTE0}, {Opt_init_itable, 0, MOPT_GTE0}, - {Opt_dax, EXT4_MOUNT_DAX_ALWAYS, MOPT_SET}, + {Opt_dax, EXT4_MOUNT_DAX_ALWAYS, MOPT_SET | MOPT_SKIP}, + {Opt_dax_always, EXT4_MOUNT_DAX_ALWAYS, + MOPT_EXT4_ONLY | MOPT_SET | MOPT_SKIP}, + {Opt_dax_inode, EXT4_MOUNT2_DAX_INODE, + MOPT_EXT4_ONLY | MOPT_SET | MOPT_SKIP}, + {Opt_dax_never, EXT4_MOUNT2_DAX_NEVER, + MOPT_EXT4_ONLY | MOPT_SET | MOPT_SKIP}, {Opt_stripe, 0, MOPT_GTE0}, {Opt_resuid, 0, MOPT_GTE0}, {Opt_resgid, 0, MOPT_GTE0}, @@ -2084,13 +2095,32 @@ static int handle_mount_opt(struct super_block *sb, char *opt, int token, } sbi->s_jquota_fmt = m->mount_opt; #endif - } else if (token == Opt_dax) { + } else if (token == Opt_dax || token == Opt_dax_always || + token == Opt_dax_inode || token == Opt_dax_never) { #ifdef CONFIG_FS_DAX - ext4_msg(sb, KERN_WARNING, - "DAX enabled. Warning: EXPERIMENTAL, use at your own risk"); - sbi->s_mount_opt |= m->mount_opt; + switch (token) { + case Opt_dax: + case Opt_dax_always: + ext4_msg(sb, KERN_WARNING, + "DAX enabled. Warning: EXPERIMENTAL, use at your own risk"); + sbi->s_mount_opt |= EXT4_MOUNT_DAX_ALWAYS; + sbi->s_mount_opt2 &= ~EXT4_MOUNT2_DAX_NEVER; + break; + case Opt_dax_never: + sbi->s_mount_opt2 |= EXT4_MOUNT2_DAX_NEVER; + sbi->s_mount_opt &= ~EXT4_MOUNT_DAX_ALWAYS; + break; + case Opt_dax_inode: + sbi->s_mount_opt &= ~EXT4_MOUNT_DAX_ALWAYS; + sbi->s_mount_opt2 &= ~EXT4_MOUNT2_DAX_NEVER; + /* Strictly for printing options */ + sbi->s_mount_opt2 |= EXT4_MOUNT2_DAX_INODE; + break; + } #else ext4_msg(sb, KERN_INFO, "dax option not supported"); + sbi->s_mount_opt2 |= EXT4_MOUNT2_DAX_NEVER; + sbi->s_mount_opt &= ~EXT4_MOUNT_DAX_ALWAYS; return -1; #endif } else if (token == Opt_data_err_abort) { @@ -2254,7 +2284,7 @@ static int _ext4_show_options(struct seq_file *seq, struct super_block *sb, for (m = ext4_mount_opts; m->token != Opt_err; m++) { int want_set = m->flags & MOPT_SET; if (((m->flags & (MOPT_SET|MOPT_CLEAR)) == 0) || - (m->flags & MOPT_CLEAR_ERR)) + (m->flags & MOPT_CLEAR_ERR) || m->flags & MOPT_SKIP) continue; if (!nodefs && !(m->mount_opt & (sbi->s_mount_opt ^ def_mount_opt))) continue; /* skip if same as the default */ @@ -2314,6 +2344,17 @@ static int _ext4_show_options(struct seq_file *seq, struct super_block *sb, if (DUMMY_ENCRYPTION_ENABLED(sbi)) SEQ_OPTS_PUTS("test_dummy_encryption"); + if (test_opt(sb, DAX_ALWAYS)) { + if (IS_EXT2_SB(sb)) + SEQ_OPTS_PUTS("dax"); + else + SEQ_OPTS_PUTS("dax=always"); + } else if (test_opt2(sb, DAX_NEVER)) { + SEQ_OPTS_PUTS("dax=never"); + } else if (test_opt2(sb, DAX_INODE)) { + SEQ_OPTS_PUTS("dax=inode"); + } + ext4_show_quota_options(seq, sb); return 0; } @@ -5436,10 +5477,16 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data) goto restore_opts; } - if ((sbi->s_mount_opt ^ old_opts.s_mount_opt) & EXT4_MOUNT_DAX_ALWAYS) { + if ((sbi->s_mount_opt ^ old_opts.s_mount_opt) & EXT4_MOUNT_DAX_ALWAYS || + (sbi->s_mount_opt2 ^ old_opts.s_mount_opt2) & EXT4_MOUNT2_DAX_NEVER || + (sbi->s_mount_opt2 ^ old_opts.s_mount_opt2) & EXT4_MOUNT2_DAX_INODE) { ext4_msg(sb, KERN_WARNING, "warning: refusing change of " - "dax flag with busy inodes while remounting"); - sbi->s_mount_opt ^= EXT4_MOUNT_DAX_ALWAYS; + "dax mount option with busy inodes while remounting"); + sbi->s_mount_opt &= ~EXT4_MOUNT_DAX_ALWAYS; + sbi->s_mount_opt |= old_opts.s_mount_opt & EXT4_MOUNT_DAX_ALWAYS; + sbi->s_mount_opt2 &= ~(EXT4_MOUNT2_DAX_NEVER | EXT4_MOUNT2_DAX_INODE); + sbi->s_mount_opt2 |= old_opts.s_mount_opt2 & + (EXT4_MOUNT2_DAX_NEVER | EXT4_MOUNT2_DAX_INODE); } if (sbi->s_mount_flags & EXT4_MF_FS_ABORTED) -- GitLab From fcebc7949cd2ff97407e5b77ed99a7211674c6de Mon Sep 17 00:00:00 2001 From: Ira Weiny Date: Thu, 28 May 2020 08:00:01 -0700 Subject: [PATCH 0811/1971] fs/ext4: Remove jflag variable The jflag variable serves almost no purpose. Remove it. Signed-off-by: Ira Weiny Link: https://lore.kernel.org/r/20200528150003.828793-8-ira.weiny@intel.com Signed-off-by: Theodore Ts'o --- fs/ext4/ioctl.c | 11 ++++------- 1 file changed, 4 insertions(+), 7 deletions(-) diff --git a/fs/ext4/ioctl.c b/fs/ext4/ioctl.c index 145083e8cd1e4..779631e8e8493 100644 --- a/fs/ext4/ioctl.c +++ b/fs/ext4/ioctl.c @@ -300,7 +300,6 @@ static int ext4_ioctl_setflags(struct inode *inode, int err = -EPERM, migrate = 0; struct ext4_iloc iloc; unsigned int oldflags, mask, i; - unsigned int jflag; struct super_block *sb = inode->i_sb; /* Is it quota file? Do not allow user to mess with it */ @@ -309,9 +308,6 @@ static int ext4_ioctl_setflags(struct inode *inode, oldflags = ei->i_flags; - /* The JOURNAL_DATA flag is modifiable only by root */ - jflag = flags & EXT4_JOURNAL_DATA_FL; - err = vfs_ioc_setflags_prepare(inode, oldflags, flags); if (err) goto flags_out; @@ -320,7 +316,7 @@ static int ext4_ioctl_setflags(struct inode *inode, * The JOURNAL_DATA flag can only be changed by * the relevant capability. */ - if ((jflag ^ oldflags) & (EXT4_JOURNAL_DATA_FL)) { + if ((flags ^ oldflags) & (EXT4_JOURNAL_DATA_FL)) { if (!capable(CAP_SYS_RESOURCE)) goto flags_out; } @@ -391,7 +387,7 @@ flags_err: if (err) goto flags_out; - if ((jflag ^ oldflags) & (EXT4_JOURNAL_DATA_FL)) { + if ((flags ^ oldflags) & (EXT4_JOURNAL_DATA_FL)) { /* * Changes to the journaling mode can cause unsafe changes to * S_DAX if the inode is DAX @@ -401,7 +397,8 @@ flags_err: goto flags_out; } - err = ext4_change_inode_journal_flag(inode, jflag); + err = ext4_change_inode_journal_flag(inode, + flags & EXT4_JOURNAL_DATA_FL); if (err) goto flags_out; } -- GitLab From b383a73f2b832491a2f9e6e8ada26aad53b5763d Mon Sep 17 00:00:00 2001 From: Ira Weiny Date: Thu, 28 May 2020 08:00:02 -0700 Subject: [PATCH 0812/1971] fs/ext4: Introduce DAX inode flag Add a flag ([EXT4|FS]_DAX_FL) to preserve FS_XFLAG_DAX in the ext4 inode. Set the flag to be user visible and changeable. Set the flag to be inherited. Allow applications to change the flag at any time except if it conflicts with the set of mutually exclusive flags (Currently VERITY, ENCRYPT, JOURNAL_DATA). Furthermore, restrict setting any of the exclusive flags if DAX is set. While conceptually possible, we do not allow setting EXT4_DAX_FL while at the same time clearing exclusion flags (or vice versa) for 2 reasons: 1) The DAX flag does not take effect immediately which introduces quite a bit of complexity 2) There is no clear use case for being this flexible Finally, on regular files, flag the inode to not be cached to facilitate changing S_DAX on the next creation of the inode. Signed-off-by: Ira Weiny Link: https://lore.kernel.org/r/20200528150003.828793-9-ira.weiny@intel.com Signed-off-by: Theodore Ts'o --- fs/ext4/ext4.h | 18 ++++++++++++---- fs/ext4/inode.c | 2 +- fs/ext4/ioctl.c | 47 ++++++++++++++++++++++++++++++++++++++++- fs/ext4/super.c | 3 +++ fs/ext4/verity.c | 2 +- include/uapi/linux/fs.h | 1 + 6 files changed, 66 insertions(+), 7 deletions(-) diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h index 65ffb831b2b9b..598e00a9453fc 100644 --- a/fs/ext4/ext4.h +++ b/fs/ext4/ext4.h @@ -415,13 +415,16 @@ struct flex_groups { #define EXT4_VERITY_FL 0x00100000 /* Verity protected inode */ #define EXT4_EA_INODE_FL 0x00200000 /* Inode used for large EA */ /* 0x00400000 was formerly EXT4_EOFBLOCKS_FL */ + +#define EXT4_DAX_FL 0x02000000 /* Inode is DAX */ + #define EXT4_INLINE_DATA_FL 0x10000000 /* Inode has inline data. */ #define EXT4_PROJINHERIT_FL 0x20000000 /* Create with parents projid */ #define EXT4_CASEFOLD_FL 0x40000000 /* Casefolded file */ #define EXT4_RESERVED_FL 0x80000000 /* reserved for ext4 lib */ -#define EXT4_FL_USER_VISIBLE 0x705BDFFF /* User visible flags */ -#define EXT4_FL_USER_MODIFIABLE 0x604BC0FF /* User modifiable flags */ +#define EXT4_FL_USER_VISIBLE 0x725BDFFF /* User visible flags */ +#define EXT4_FL_USER_MODIFIABLE 0x624BC0FF /* User modifiable flags */ /* Flags we can manipulate with through EXT4_IOC_FSSETXATTR */ #define EXT4_FL_XFLAG_VISIBLE (EXT4_SYNC_FL | \ @@ -429,14 +432,16 @@ struct flex_groups { EXT4_APPEND_FL | \ EXT4_NODUMP_FL | \ EXT4_NOATIME_FL | \ - EXT4_PROJINHERIT_FL) + EXT4_PROJINHERIT_FL | \ + EXT4_DAX_FL) /* Flags that should be inherited by new inodes from their parent. */ #define EXT4_FL_INHERITED (EXT4_SECRM_FL | EXT4_UNRM_FL | EXT4_COMPR_FL |\ EXT4_SYNC_FL | EXT4_NODUMP_FL | EXT4_NOATIME_FL |\ EXT4_NOCOMPR_FL | EXT4_JOURNAL_DATA_FL |\ EXT4_NOTAIL_FL | EXT4_DIRSYNC_FL |\ - EXT4_PROJINHERIT_FL | EXT4_CASEFOLD_FL) + EXT4_PROJINHERIT_FL | EXT4_CASEFOLD_FL |\ + EXT4_DAX_FL) /* Flags that are appropriate for regular files (all but dir-specific ones). */ #define EXT4_REG_FLMASK (~(EXT4_DIRSYNC_FL | EXT4_TOPDIR_FL | EXT4_CASEFOLD_FL |\ @@ -448,6 +453,10 @@ struct flex_groups { /* The only flags that should be swapped */ #define EXT4_FL_SHOULD_SWAP (EXT4_HUGE_FILE_FL | EXT4_EXTENTS_FL) +/* Flags which are mutually exclusive to DAX */ +#define EXT4_DAX_MUT_EXCL (EXT4_VERITY_FL | EXT4_ENCRYPT_FL |\ + EXT4_JOURNAL_DATA_FL) + /* Mask out flags that are inappropriate for the given type of inode. */ static inline __u32 ext4_mask_flags(umode_t mode, __u32 flags) { @@ -488,6 +497,7 @@ enum { EXT4_INODE_VERITY = 20, /* Verity protected inode */ EXT4_INODE_EA_INODE = 21, /* Inode used for large EA */ /* 22 was formerly EXT4_INODE_EOFBLOCKS */ + EXT4_INODE_DAX = 25, /* Inode is DAX */ EXT4_INODE_INLINE_DATA = 28, /* Data in inode. */ EXT4_INODE_PROJINHERIT = 29, /* Create with parents projid */ EXT4_INODE_RESERVED = 31, /* reserved for ext4 lib */ diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c index 68fac9289109d..778b0dbe3da64 100644 --- a/fs/ext4/inode.c +++ b/fs/ext4/inode.c @@ -4419,7 +4419,7 @@ static bool ext4_should_enable_dax(struct inode *inode) if (test_opt(inode->i_sb, DAX_ALWAYS)) return true; - return false; + return ext4_test_inode_flag(inode, EXT4_INODE_DAX); } void ext4_set_inode_flags(struct inode *inode, bool init) diff --git a/fs/ext4/ioctl.c b/fs/ext4/ioctl.c index 779631e8e8493..1b520d07d3714 100644 --- a/fs/ext4/ioctl.c +++ b/fs/ext4/ioctl.c @@ -292,6 +292,38 @@ static int ext4_ioctl_check_immutable(struct inode *inode, __u32 new_projid, return 0; } +static void ext4_dax_dontcache(struct inode *inode, unsigned int flags) +{ + struct ext4_inode_info *ei = EXT4_I(inode); + + if (S_ISDIR(inode->i_mode)) + return; + + if (test_opt2(inode->i_sb, DAX_NEVER) || + test_opt(inode->i_sb, DAX_ALWAYS)) + return; + + if ((ei->i_flags ^ flags) & EXT4_DAX_FL) + d_mark_dontcache(inode); +} + +static bool dax_compatible(struct inode *inode, unsigned int oldflags, + unsigned int flags) +{ + if (flags & EXT4_DAX_FL) { + if ((oldflags & EXT4_DAX_MUT_EXCL) || + ext4_test_inode_state(inode, + EXT4_STATE_VERITY_IN_PROGRESS)) { + return false; + } + } + + if ((flags & EXT4_DAX_MUT_EXCL) && (oldflags & EXT4_DAX_FL)) + return false; + + return true; +} + static int ext4_ioctl_setflags(struct inode *inode, unsigned int flags) { @@ -320,6 +352,12 @@ static int ext4_ioctl_setflags(struct inode *inode, if (!capable(CAP_SYS_RESOURCE)) goto flags_out; } + + if (!dax_compatible(inode, oldflags, flags)) { + err = -EOPNOTSUPP; + goto flags_out; + } + if ((flags ^ oldflags) & EXT4_EXTENTS_FL) migrate = 1; @@ -365,6 +403,8 @@ static int ext4_ioctl_setflags(struct inode *inode, if (err) goto flags_err; + ext4_dax_dontcache(inode, flags); + for (i = 0, mask = 1; i < 32; i++, mask <<= 1) { if (!(mask & EXT4_FL_USER_MODIFIABLE)) continue; @@ -525,12 +565,15 @@ static inline __u32 ext4_iflags_to_xflags(unsigned long iflags) xflags |= FS_XFLAG_NOATIME; if (iflags & EXT4_PROJINHERIT_FL) xflags |= FS_XFLAG_PROJINHERIT; + if (iflags & EXT4_DAX_FL) + xflags |= FS_XFLAG_DAX; return xflags; } #define EXT4_SUPPORTED_FS_XFLAGS (FS_XFLAG_SYNC | FS_XFLAG_IMMUTABLE | \ FS_XFLAG_APPEND | FS_XFLAG_NODUMP | \ - FS_XFLAG_NOATIME | FS_XFLAG_PROJINHERIT) + FS_XFLAG_NOATIME | FS_XFLAG_PROJINHERIT | \ + FS_XFLAG_DAX) /* Transfer xflags flags to internal */ static inline unsigned long ext4_xflags_to_iflags(__u32 xflags) @@ -549,6 +592,8 @@ static inline unsigned long ext4_xflags_to_iflags(__u32 xflags) iflags |= EXT4_NOATIME_FL; if (xflags & FS_XFLAG_PROJINHERIT) iflags |= EXT4_PROJINHERIT_FL; + if (xflags & FS_XFLAG_DAX) + iflags |= EXT4_DAX_FL; return iflags; } diff --git a/fs/ext4/super.c b/fs/ext4/super.c index 5e056aa20ce97..3658e30169999 100644 --- a/fs/ext4/super.c +++ b/fs/ext4/super.c @@ -1323,6 +1323,9 @@ static int ext4_set_context(struct inode *inode, const void *ctx, size_t len, if (WARN_ON_ONCE(IS_DAX(inode) && i_size_read(inode))) return -EINVAL; + if (ext4_test_inode_flag(inode, EXT4_INODE_DAX)) + return -EOPNOTSUPP; + res = ext4_convert_inline_data(inode); if (res) return res; diff --git a/fs/ext4/verity.c b/fs/ext4/verity.c index 89a155ece3236..4fecb3e4e3388 100644 --- a/fs/ext4/verity.c +++ b/fs/ext4/verity.c @@ -113,7 +113,7 @@ static int ext4_begin_enable_verity(struct file *filp) handle_t *handle; int err; - if (IS_DAX(inode)) + if (IS_DAX(inode) || ext4_test_inode_flag(inode, EXT4_INODE_DAX)) return -EINVAL; if (ext4_verity_in_progress(inode)) diff --git a/include/uapi/linux/fs.h b/include/uapi/linux/fs.h index 379a612f8f1d9..f44eb0a04afdd 100644 --- a/include/uapi/linux/fs.h +++ b/include/uapi/linux/fs.h @@ -262,6 +262,7 @@ struct fsxattr { #define FS_EA_INODE_FL 0x00200000 /* Inode used for large EA */ #define FS_EOFBLOCKS_FL 0x00400000 /* Reserved for ext4 */ #define FS_NOCOW_FL 0x00800000 /* Do not cow file */ +#define FS_DAX_FL 0x02000000 /* Inode is DAX */ #define FS_INLINE_DATA_FL 0x10000000 /* Reserved for ext4 */ #define FS_PROJINHERIT_FL 0x20000000 /* Create with parents projid */ #define FS_CASEFOLD_FL 0x40000000 /* Folder is case insensitive */ -- GitLab From 15ee65676f1e2bbb237e9ecd317b45d8eba29237 Mon Sep 17 00:00:00 2001 From: Ira Weiny Date: Thu, 28 May 2020 08:00:03 -0700 Subject: [PATCH 0813/1971] Documentation/dax: Update DAX enablement for ext4 Update the document to reflect ext4 and xfs now behave the same. Reviewed-by: Jan Kara Signed-off-by: Ira Weiny Link: https://lore.kernel.org/r/20200528150003.828793-10-ira.weiny@intel.com Signed-off-by: Theodore Ts'o --- Documentation/filesystems/dax.txt | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/Documentation/filesystems/dax.txt b/Documentation/filesystems/dax.txt index 735fb4b541174..265c4f808dbfe 100644 --- a/Documentation/filesystems/dax.txt +++ b/Documentation/filesystems/dax.txt @@ -25,7 +25,7 @@ size when creating the filesystem. Currently 3 filesystems support DAX: ext2, ext4 and xfs. Enabling DAX on them is different. -Enabling DAX on ext4 and ext2 +Enabling DAX on ext2 ----------------------------- When mounting the filesystem, use the "-o dax" option on the command line or @@ -33,8 +33,8 @@ add 'dax' to the options in /etc/fstab. This works to enable DAX on all files within the filesystem. It is equivalent to the '-o dax=always' behavior below. -Enabling DAX on xfs -------------------- +Enabling DAX on xfs and ext4 +---------------------------- Summary ------- -- GitLab From f9eec2ea785204f139f0620f36ce1ab0851de499 Mon Sep 17 00:00:00 2001 From: Mike Looijmans Date: Thu, 7 May 2020 08:15:44 +0200 Subject: [PATCH 0814/1971] clk: clk-si5341: Add support for the Si5345 series Add support for the Si5342, Si5344 and Si5345 chips. These are equivalent to the Si5341 family, but with more clock input options (which are not supported yet by this driver). Signed-off-by: Mike Looijmans Link: https://lkml.kernel.org/r/20200507061544.11388-1-mike.looijmans@topic.nl Acked-by: Rob Herring Signed-off-by: Stephen Boyd --- .../bindings/clock/silabs,si5341.txt | 11 ++- drivers/clk/clk-si5341.c | 69 +++++++++++++++++-- 2 files changed, 74 insertions(+), 6 deletions(-) diff --git a/Documentation/devicetree/bindings/clock/silabs,si5341.txt b/Documentation/devicetree/bindings/clock/silabs,si5341.txt index a70c333e4cd40..504cce3abe464 100644 --- a/Documentation/devicetree/bindings/clock/silabs,si5341.txt +++ b/Documentation/devicetree/bindings/clock/silabs,si5341.txt @@ -1,15 +1,21 @@ -Binding for Silicon Labs Si5341 and Si5340 programmable i2c clock generator. +Binding for Silicon Labs Si5340, Si5341 Si5342, Si5344 and Si5345 programmable +i2c clock generator. Reference [1] Si5341 Data Sheet https://www.silabs.com/documents/public/data-sheets/Si5341-40-D-DataSheet.pdf [2] Si5341 Reference Manual https://www.silabs.com/documents/public/reference-manuals/Si5341-40-D-RM.pdf +[3] Si5345 Reference Manual + https://www.silabs.com/documents/public/reference-manuals/Si5345-44-42-D-RM.pdf The Si5341 and Si5340 are programmable i2c clock generators with up to 10 output clocks. The chip contains a PLL that sources 5 (or 4) multisynth clocks, which in turn can be directed to any of the 10 (or 4) outputs through a divider. The internal structure of the clock generators can be found in [2]. +The Si5345 is similar to the Si5341 with the addition of fractional input +dividers and automatic input selection, as described in [3]. +The Si5342 and Si5344 are smaller versions of the Si5345, with 2 or 4 outputs. The driver can be used in "as is" mode, reading the current settings from the chip at boot, in case you have a (pre-)programmed device. If the PLL is not @@ -28,6 +34,9 @@ Required properties: - compatible: shall be one of the following: "silabs,si5340" - Si5340 A/B/C/D "silabs,si5341" - Si5341 A/B/C/D + "silabs,si5342" - Si5342 A/B/C/D + "silabs,si5344" - Si5344 A/B/C/D + "silabs,si5345" - Si5345 A/B/C/D - reg: i2c device address, usually 0x74 - #clock-cells: from common clock binding; shall be set to 2. The first value is "0" for outputs, "1" for synthesizers. diff --git a/drivers/clk/clk-si5341.c b/drivers/clk/clk-si5341.c index 3c228b0181161..3d7acab9d280d 100644 --- a/drivers/clk/clk-si5341.c +++ b/drivers/clk/clk-si5341.c @@ -1,8 +1,14 @@ // SPDX-License-Identifier: GPL-2.0 /* - * Driver for Silicon Labs Si5341/Si5340 Clock generator + * Driver for Silicon Labs Si5340, Si5341, Si5342, Si5344 and Si5345 * Copyright (C) 2019 Topic Embedded Products * Author: Mike Looijmans + * + * The Si5341 has 10 outputs and 5 synthesizers. + * The Si5340 is a smaller version of the Si5341 with only 4 outputs. + * The Si5345 is similar to the Si5341, with the addition of fractional input + * dividers and automatic input selection. + * The Si5342 and Si5344 are smaller versions of the Si5345. */ #include @@ -18,11 +24,17 @@ #define SI5341_NUM_INPUTS 4 -#define SI5341_MAX_NUM_OUTPUTS 10 #define SI5340_MAX_NUM_OUTPUTS 4 +#define SI5341_MAX_NUM_OUTPUTS 10 +#define SI5342_MAX_NUM_OUTPUTS 2 +#define SI5344_MAX_NUM_OUTPUTS 4 +#define SI5345_MAX_NUM_OUTPUTS 10 -#define SI5341_NUM_SYNTH 5 #define SI5340_NUM_SYNTH 4 +#define SI5341_NUM_SYNTH 5 +#define SI5342_NUM_SYNTH 2 +#define SI5344_NUM_SYNTH 4 +#define SI5345_NUM_SYNTH 5 /* Range of the synthesizer fractional divider */ #define SI5341_SYNTH_N_MIN 10 @@ -65,6 +77,7 @@ struct clk_si5341 { u64 freq_vco; /* 13500–14256 MHz */ u8 num_outputs; u8 num_synth; + u16 chip_id; }; #define to_clk_si5341(_hw) container_of(_hw, struct clk_si5341, hw) @@ -142,6 +155,7 @@ static const char * const si5341_input_clock_names[] = { }; /* Output configuration registers 0..9 are not quite logically organized */ +/* Also for si5345 */ static const u16 si5341_reg_output_offset[] = { 0x0108, 0x010D, @@ -155,6 +169,7 @@ static const u16 si5341_reg_output_offset[] = { 0x013A, }; +/* for si5340, si5342 and si5344 */ static const u16 si5340_reg_output_offset[] = { 0x0112, 0x0117, @@ -974,12 +989,32 @@ static int si5341_probe_chip_id(struct clk_si5341 *data) data->reg_output_offset = si5341_reg_output_offset; data->reg_rdiv_offset = si5341_reg_rdiv_offset; break; + case 0x5342: + data->num_outputs = SI5342_MAX_NUM_OUTPUTS; + data->num_synth = SI5342_NUM_SYNTH; + data->reg_output_offset = si5340_reg_output_offset; + data->reg_rdiv_offset = si5340_reg_rdiv_offset; + break; + case 0x5344: + data->num_outputs = SI5344_MAX_NUM_OUTPUTS; + data->num_synth = SI5344_NUM_SYNTH; + data->reg_output_offset = si5340_reg_output_offset; + data->reg_rdiv_offset = si5340_reg_rdiv_offset; + break; + case 0x5345: + data->num_outputs = SI5345_MAX_NUM_OUTPUTS; + data->num_synth = SI5345_NUM_SYNTH; + data->reg_output_offset = si5341_reg_output_offset; + data->reg_rdiv_offset = si5341_reg_rdiv_offset; + break; default: dev_err(&data->i2c_client->dev, "Model '%x' not supported\n", model); return -EINVAL; } + data->chip_id = model; + return 0; } @@ -1054,6 +1089,11 @@ static const struct si5341_reg_default si5341_preamble[] = { { 0x0B4E, 0x1A }, }; +static const struct si5341_reg_default si5345_preamble[] = { + { 0x0B25, 0x00 }, + { 0x0540, 0x01 }, +}; + static int si5341_send_preamble(struct clk_si5341 *data) { int res; @@ -1068,8 +1108,14 @@ static int si5341_send_preamble(struct clk_si5341 *data) res = regmap_write(data->regmap, 0xB24, revision < 2 ? 0xD8 : 0xC0); if (res < 0) return res; - res = si5341_write_multiple(data, - si5341_preamble, ARRAY_SIZE(si5341_preamble)); + + /* The si5342..si5345 require a different preamble */ + if (data->chip_id > 0x5341) + res = si5341_write_multiple(data, + si5345_preamble, ARRAY_SIZE(si5345_preamble)); + else + res = si5341_write_multiple(data, + si5341_preamble, ARRAY_SIZE(si5341_preamble)); if (res < 0) return res; @@ -1095,6 +1141,13 @@ static int si5341_finalize_defaults(struct clk_si5341 *data) if (res < 0) return res; + /* The si5342..si5345 have an additional post-amble */ + if (data->chip_id > 0x5341) { + res = regmap_write(data->regmap, 0x540, 0x0); + if (res < 0) + return res; + } + /* Datasheet does not explain these nameless registers */ res = regmap_write(data->regmap, 0xB24, revision < 2 ? 0xDB : 0xC3); if (res < 0) @@ -1499,6 +1552,9 @@ static int si5341_probe(struct i2c_client *client, static const struct i2c_device_id si5341_id[] = { { "si5340", 0 }, { "si5341", 1 }, + { "si5342", 2 }, + { "si5344", 4 }, + { "si5345", 5 }, { } }; MODULE_DEVICE_TABLE(i2c, si5341_id); @@ -1506,6 +1562,9 @@ MODULE_DEVICE_TABLE(i2c, si5341_id); static const struct of_device_id clk_si5341_of_match[] = { { .compatible = "silabs,si5340" }, { .compatible = "silabs,si5341" }, + { .compatible = "silabs,si5342" }, + { .compatible = "silabs,si5344" }, + { .compatible = "silabs,si5345" }, { } }; MODULE_DEVICE_TABLE(of, clk_si5341_of_match); -- GitLab From 907f9291f937463c27e5ca9cb5f1d8eedf9a2738 Mon Sep 17 00:00:00 2001 From: Eugeniy Paltsev Date: Wed, 11 Mar 2020 16:41:13 +0300 Subject: [PATCH 0815/1971] CLK: HSDK: CGU: check if PLL is bypassed first If PLL is bypassed the EN (enable) bit has no effect on output clock. Signed-off-by: Eugeniy Paltsev Link: https://lkml.kernel.org/r/20200311134115.13257-2-Eugeniy.Paltsev@synopsys.com Signed-off-by: Stephen Boyd --- drivers/clk/clk-hsdk-pll.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/clk/clk-hsdk-pll.c b/drivers/clk/clk-hsdk-pll.c index 97d1e8c35b713..b47a559f35284 100644 --- a/drivers/clk/clk-hsdk-pll.c +++ b/drivers/clk/clk-hsdk-pll.c @@ -172,14 +172,14 @@ static unsigned long hsdk_pll_recalc_rate(struct clk_hw *hw, dev_dbg(clk->dev, "current configuration: %#x\n", val); - /* Check if PLL is disabled */ - if (val & CGU_PLL_CTRL_PD) - return 0; - /* Check if PLL is bypassed */ if (val & CGU_PLL_CTRL_BYPASS) return parent_rate; + /* Check if PLL is disabled */ + if (val & CGU_PLL_CTRL_PD) + return 0; + /* input divider = reg.idiv + 1 */ idiv = 1 + ((val & CGU_PLL_CTRL_IDIV_MASK) >> CGU_PLL_CTRL_IDIV_SHIFT); /* fb divider = 2*(reg.fbdiv + 1) */ -- GitLab From 423f042a65a2af82337af4e3c7f2cd828185e4f3 Mon Sep 17 00:00:00 2001 From: Eugeniy Paltsev Date: Wed, 11 Mar 2020 16:41:14 +0300 Subject: [PATCH 0816/1971] CLK: HSDK: CGU: support PLL bypassing Support setting PLL to bypass mode to support output frequency equal to input one. Signed-off-by: Eugeniy Paltsev Link: https://lkml.kernel.org/r/20200311134115.13257-3-Eugeniy.Paltsev@synopsys.com Signed-off-by: Stephen Boyd --- drivers/clk/clk-hsdk-pll.c | 61 +++++++++++++++++++++----------------- 1 file changed, 34 insertions(+), 27 deletions(-) diff --git a/drivers/clk/clk-hsdk-pll.c b/drivers/clk/clk-hsdk-pll.c index b47a559f35284..0ea7af57a5b1f 100644 --- a/drivers/clk/clk-hsdk-pll.c +++ b/drivers/clk/clk-hsdk-pll.c @@ -53,35 +53,37 @@ struct hsdk_pll_cfg { u32 fbdiv; u32 odiv; u32 band; + u32 bypass; }; static const struct hsdk_pll_cfg asdt_pll_cfg[] = { - { 100000000, 0, 11, 3, 0 }, - { 133000000, 0, 15, 3, 0 }, - { 200000000, 1, 47, 3, 0 }, - { 233000000, 1, 27, 2, 0 }, - { 300000000, 1, 35, 2, 0 }, - { 333000000, 1, 39, 2, 0 }, - { 400000000, 1, 47, 2, 0 }, - { 500000000, 0, 14, 1, 0 }, - { 600000000, 0, 17, 1, 0 }, - { 700000000, 0, 20, 1, 0 }, - { 800000000, 0, 23, 1, 0 }, - { 900000000, 1, 26, 0, 0 }, - { 1000000000, 1, 29, 0, 0 }, - { 1100000000, 1, 32, 0, 0 }, - { 1200000000, 1, 35, 0, 0 }, - { 1300000000, 1, 38, 0, 0 }, - { 1400000000, 1, 41, 0, 0 }, - { 1500000000, 1, 44, 0, 0 }, - { 1600000000, 1, 47, 0, 0 }, + { 100000000, 0, 11, 3, 0, 0 }, + { 133000000, 0, 15, 3, 0, 0 }, + { 200000000, 1, 47, 3, 0, 0 }, + { 233000000, 1, 27, 2, 0, 0 }, + { 300000000, 1, 35, 2, 0, 0 }, + { 333000000, 1, 39, 2, 0, 0 }, + { 400000000, 1, 47, 2, 0, 0 }, + { 500000000, 0, 14, 1, 0, 0 }, + { 600000000, 0, 17, 1, 0, 0 }, + { 700000000, 0, 20, 1, 0, 0 }, + { 800000000, 0, 23, 1, 0, 0 }, + { 900000000, 1, 26, 0, 0, 0 }, + { 1000000000, 1, 29, 0, 0, 0 }, + { 1100000000, 1, 32, 0, 0, 0 }, + { 1200000000, 1, 35, 0, 0, 0 }, + { 1300000000, 1, 38, 0, 0, 0 }, + { 1400000000, 1, 41, 0, 0, 0 }, + { 1500000000, 1, 44, 0, 0, 0 }, + { 1600000000, 1, 47, 0, 0, 0 }, {} }; static const struct hsdk_pll_cfg hdmi_pll_cfg[] = { - { 297000000, 0, 21, 2, 0 }, - { 540000000, 0, 19, 1, 0 }, - { 594000000, 0, 21, 1, 0 }, + { 27000000, 0, 0, 0, 0, 1 }, + { 297000000, 0, 21, 2, 0, 0 }, + { 540000000, 0, 19, 1, 0, 0 }, + { 594000000, 0, 21, 1, 0, 0 }, {} }; @@ -134,11 +136,16 @@ static inline void hsdk_pll_set_cfg(struct hsdk_pll_clk *clk, { u32 val = 0; - /* Powerdown and Bypass bits should be cleared */ - val |= cfg->idiv << CGU_PLL_CTRL_IDIV_SHIFT; - val |= cfg->fbdiv << CGU_PLL_CTRL_FBDIV_SHIFT; - val |= cfg->odiv << CGU_PLL_CTRL_ODIV_SHIFT; - val |= cfg->band << CGU_PLL_CTRL_BAND_SHIFT; + if (cfg->bypass) { + val = hsdk_pll_read(clk, CGU_PLL_CTRL); + val |= CGU_PLL_CTRL_BYPASS; + } else { + /* Powerdown and Bypass bits should be cleared */ + val |= cfg->idiv << CGU_PLL_CTRL_IDIV_SHIFT; + val |= cfg->fbdiv << CGU_PLL_CTRL_FBDIV_SHIFT; + val |= cfg->odiv << CGU_PLL_CTRL_ODIV_SHIFT; + val |= cfg->band << CGU_PLL_CTRL_BAND_SHIFT; + } dev_dbg(clk->dev, "write configuration: %#x\n", val); -- GitLab From 56fbeefe366e5920802f60f26b6b59b365c0569b Mon Sep 17 00:00:00 2001 From: Eugeniy Paltsev Date: Wed, 11 Mar 2020 16:41:15 +0300 Subject: [PATCH 0817/1971] CLK: HSDK: CGU: add support for 148.5MHz clock Add support for 148.5MHz clock for HDMI PLL Signed-off-by: Eugeniy Paltsev Link: https://lkml.kernel.org/r/20200311134115.13257-4-Eugeniy.Paltsev@synopsys.com Signed-off-by: Stephen Boyd --- drivers/clk/clk-hsdk-pll.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/clk/clk-hsdk-pll.c b/drivers/clk/clk-hsdk-pll.c index 0ea7af57a5b1f..b4f8852201cba 100644 --- a/drivers/clk/clk-hsdk-pll.c +++ b/drivers/clk/clk-hsdk-pll.c @@ -81,6 +81,7 @@ static const struct hsdk_pll_cfg asdt_pll_cfg[] = { static const struct hsdk_pll_cfg hdmi_pll_cfg[] = { { 27000000, 0, 0, 0, 0, 1 }, + { 148500000, 0, 21, 3, 0, 0 }, { 297000000, 0, 21, 2, 0, 0 }, { 540000000, 0, 19, 1, 0, 0 }, { 594000000, 0, 21, 1, 0, 0 }, -- GitLab From 7b9e111a5216640c86c206e6b419a0f4908c202b Mon Sep 17 00:00:00 2001 From: Macpaul Lin Date: Fri, 21 Feb 2020 17:52:18 +0800 Subject: [PATCH 0818/1971] dt-bindings: clock: mediatek: document clk bindings for Mediatek MT6765 SoC This patch adds the binding documentation for apmixedsys, audsys, camsys, imgsys, infracfg, mmsys, pericfg, topckgen Signed-off-by: Mars Cheng Signed-off-by: Owen Chen Signed-off-by: Macpaul Lin Acked-by: Rob Herring Link: https://lore.kernel.org/r/1582278742-1626-2-git-send-email-macpaul.lin@mediatek.com Signed-off-by: Stephen Boyd --- .../devicetree/bindings/arm/mediatek/mediatek,apmixedsys.txt | 1 + .../devicetree/bindings/arm/mediatek/mediatek,audsys.txt | 1 + .../devicetree/bindings/arm/mediatek/mediatek,camsys.txt | 1 + .../devicetree/bindings/arm/mediatek/mediatek,imgsys.txt | 1 + .../devicetree/bindings/arm/mediatek/mediatek,infracfg.txt | 1 + .../devicetree/bindings/arm/mediatek/mediatek,mmsys.txt | 1 + .../devicetree/bindings/arm/mediatek/mediatek,pericfg.txt | 1 + .../devicetree/bindings/arm/mediatek/mediatek,topckgen.txt | 1 + 8 files changed, 8 insertions(+) diff --git a/Documentation/devicetree/bindings/arm/mediatek/mediatek,apmixedsys.txt b/Documentation/devicetree/bindings/arm/mediatek/mediatek,apmixedsys.txt index ff000ccade78c..bd7a0fa5801bb 100644 --- a/Documentation/devicetree/bindings/arm/mediatek/mediatek,apmixedsys.txt +++ b/Documentation/devicetree/bindings/arm/mediatek/mediatek,apmixedsys.txt @@ -8,6 +8,7 @@ Required Properties: - compatible: Should be one of: - "mediatek,mt2701-apmixedsys" - "mediatek,mt2712-apmixedsys", "syscon" + - "mediatek,mt6765-apmixedsys", "syscon" - "mediatek,mt6779-apmixedsys", "syscon" - "mediatek,mt6797-apmixedsys" - "mediatek,mt7622-apmixedsys" diff --git a/Documentation/devicetree/bindings/arm/mediatek/mediatek,audsys.txt b/Documentation/devicetree/bindings/arm/mediatek/mediatek,audsys.txt index e4ca7b7031236..38309db115f53 100644 --- a/Documentation/devicetree/bindings/arm/mediatek/mediatek,audsys.txt +++ b/Documentation/devicetree/bindings/arm/mediatek/mediatek,audsys.txt @@ -7,6 +7,7 @@ Required Properties: - compatible: Should be one of: - "mediatek,mt2701-audsys", "syscon" + - "mediatek,mt6765-audsys", "syscon" - "mediatek,mt6779-audio", "syscon" - "mediatek,mt7622-audsys", "syscon" - "mediatek,mt7623-audsys", "mediatek,mt2701-audsys", "syscon" diff --git a/Documentation/devicetree/bindings/arm/mediatek/mediatek,camsys.txt b/Documentation/devicetree/bindings/arm/mediatek/mediatek,camsys.txt index 1f4aaa15a37ec..a0ce82085ad0c 100644 --- a/Documentation/devicetree/bindings/arm/mediatek/mediatek,camsys.txt +++ b/Documentation/devicetree/bindings/arm/mediatek/mediatek,camsys.txt @@ -6,6 +6,7 @@ The MediaTek camsys controller provides various clocks to the system. Required Properties: - compatible: Should be one of: + - "mediatek,mt6765-camsys", "syscon" - "mediatek,mt6779-camsys", "syscon" - "mediatek,mt8183-camsys", "syscon" - #clock-cells: Must be 1 diff --git a/Documentation/devicetree/bindings/arm/mediatek/mediatek,imgsys.txt b/Documentation/devicetree/bindings/arm/mediatek/mediatek,imgsys.txt index 2b693e343c56d..1e1f00718a7d6 100644 --- a/Documentation/devicetree/bindings/arm/mediatek/mediatek,imgsys.txt +++ b/Documentation/devicetree/bindings/arm/mediatek/mediatek,imgsys.txt @@ -8,6 +8,7 @@ Required Properties: - compatible: Should be one of: - "mediatek,mt2701-imgsys", "syscon" - "mediatek,mt2712-imgsys", "syscon" + - "mediatek,mt6765-imgsys", "syscon" - "mediatek,mt6779-imgsys", "syscon" - "mediatek,mt6797-imgsys", "syscon" - "mediatek,mt7623-imgsys", "mediatek,mt2701-imgsys", "syscon" diff --git a/Documentation/devicetree/bindings/arm/mediatek/mediatek,infracfg.txt b/Documentation/devicetree/bindings/arm/mediatek/mediatek,infracfg.txt index db2f4fd754e7d..49a968be1a808 100644 --- a/Documentation/devicetree/bindings/arm/mediatek/mediatek,infracfg.txt +++ b/Documentation/devicetree/bindings/arm/mediatek/mediatek,infracfg.txt @@ -9,6 +9,7 @@ Required Properties: - compatible: Should be one of: - "mediatek,mt2701-infracfg", "syscon" - "mediatek,mt2712-infracfg", "syscon" + - "mediatek,mt6765-infracfg", "syscon" - "mediatek,mt6779-infracfg_ao", "syscon" - "mediatek,mt6797-infracfg", "syscon" - "mediatek,mt7622-infracfg", "syscon" diff --git a/Documentation/devicetree/bindings/arm/mediatek/mediatek,mmsys.txt b/Documentation/devicetree/bindings/arm/mediatek/mediatek,mmsys.txt index 301eefbe16189..4a712509bec2e 100644 --- a/Documentation/devicetree/bindings/arm/mediatek/mediatek,mmsys.txt +++ b/Documentation/devicetree/bindings/arm/mediatek/mediatek,mmsys.txt @@ -8,6 +8,7 @@ Required Properties: - compatible: Should be one of: - "mediatek,mt2701-mmsys", "syscon" - "mediatek,mt2712-mmsys", "syscon" + - "mediatek,mt6765-mmsys", "syscon" - "mediatek,mt6779-mmsys", "syscon" - "mediatek,mt6797-mmsys", "syscon" - "mediatek,mt7623-mmsys", "mediatek,mt2701-mmsys", "syscon" diff --git a/Documentation/devicetree/bindings/arm/mediatek/mediatek,pericfg.txt b/Documentation/devicetree/bindings/arm/mediatek/mediatek,pericfg.txt index ecf027a9003a8..dcbd414c7fd7f 100644 --- a/Documentation/devicetree/bindings/arm/mediatek/mediatek,pericfg.txt +++ b/Documentation/devicetree/bindings/arm/mediatek/mediatek,pericfg.txt @@ -9,6 +9,7 @@ Required Properties: - compatible: Should be one of: - "mediatek,mt2701-pericfg", "syscon" - "mediatek,mt2712-pericfg", "syscon" + - "mediatek,mt6765-pericfg", "syscon" - "mediatek,mt7622-pericfg", "syscon" - "mediatek,mt7623-pericfg", "mediatek,mt2701-pericfg", "syscon" - "mediatek,mt7629-pericfg", "syscon" diff --git a/Documentation/devicetree/bindings/arm/mediatek/mediatek,topckgen.txt b/Documentation/devicetree/bindings/arm/mediatek/mediatek,topckgen.txt index 0293d693ce0ca..9b0394cbbdc91 100644 --- a/Documentation/devicetree/bindings/arm/mediatek/mediatek,topckgen.txt +++ b/Documentation/devicetree/bindings/arm/mediatek/mediatek,topckgen.txt @@ -8,6 +8,7 @@ Required Properties: - compatible: Should be one of: - "mediatek,mt2701-topckgen" - "mediatek,mt2712-topckgen", "syscon" + - "mediatek,mt6765-topckgen", "syscon" - "mediatek,mt6779-topckgen", "syscon" - "mediatek,mt6797-topckgen" - "mediatek,mt7622-topckgen" -- GitLab From 626b134b58eb03c152cdba0a94c74ca3da0805e7 Mon Sep 17 00:00:00 2001 From: Macpaul Lin Date: Fri, 21 Feb 2020 17:52:19 +0800 Subject: [PATCH 0819/1971] dt-bindings: clock: mediatek: document clk bindings mipi0a for Mediatek MT6765 SoC This patch adds the binding documentation for mipi0a. Signed-off-by: Mars Cheng Signed-off-by: Owen Chen Signed-off-by: Macpaul Lin Link: https://lore.kernel.org/r/1582278742-1626-3-git-send-email-macpaul.lin@mediatek.com Signed-off-by: Stephen Boyd --- .../bindings/arm/mediatek/mediatek,mipi0a.txt | 28 +++++++++++++++++++ 1 file changed, 28 insertions(+) create mode 100644 Documentation/devicetree/bindings/arm/mediatek/mediatek,mipi0a.txt diff --git a/Documentation/devicetree/bindings/arm/mediatek/mediatek,mipi0a.txt b/Documentation/devicetree/bindings/arm/mediatek/mediatek,mipi0a.txt new file mode 100644 index 0000000000000..8be5978f388da --- /dev/null +++ b/Documentation/devicetree/bindings/arm/mediatek/mediatek,mipi0a.txt @@ -0,0 +1,28 @@ +Mediatek mipi0a (mipi_rx_ana_csi0a) controller +============================ + +The Mediatek mipi0a controller provides various clocks +to the system. + +Required Properties: + +- compatible: Should be one of: + - "mediatek,mt6765-mipi0a", "syscon" +- #clock-cells: Must be 1 + +The mipi0a controller uses the common clk binding from +Documentation/devicetree/bindings/clock/clock-bindings.txt +The available clocks are defined in dt-bindings/clock/mt*-clk.h. + +The mipi0a controller also uses the common power domain from +Documentation/devicetree/bindings/soc/mediatek/scpsys.txt +The available power doamins are defined in dt-bindings/power/mt*-power.h. + +Example: + +mipi0a: clock-controller@11c10000 { + compatible = "mediatek,mt6765-mipi0a", "syscon"; + reg = <0 0x11c10000 0 0x1000>; + power-domains = <&scpsys MT6765_POWER_DOMAIN_CAM>; + #clock-cells = <1>; +}; -- GitLab From 0502f26c6441d82cf7299678198b644f3838e871 Mon Sep 17 00:00:00 2001 From: Macpaul Lin Date: Fri, 21 Feb 2020 17:52:20 +0800 Subject: [PATCH 0820/1971] dt-bindings: clock: mediatek: document clk bindings vcodecsys for Mediatek MT6765 SoC This patch adds the binding documentation for vcodecsys. Signed-off-by: Mars Cheng Signed-off-by: Owen Chen Signed-off-by: Macpaul Lin Link: https://lore.kernel.org/r/1582278742-1626-4-git-send-email-macpaul.lin@mediatek.com Signed-off-by: Stephen Boyd --- .../arm/mediatek/mediatek,vcodecsys.txt | 27 +++++++++++++++++++ 1 file changed, 27 insertions(+) create mode 100644 Documentation/devicetree/bindings/arm/mediatek/mediatek,vcodecsys.txt diff --git a/Documentation/devicetree/bindings/arm/mediatek/mediatek,vcodecsys.txt b/Documentation/devicetree/bindings/arm/mediatek/mediatek,vcodecsys.txt new file mode 100644 index 0000000000000..c877bcc1a5c5d --- /dev/null +++ b/Documentation/devicetree/bindings/arm/mediatek/mediatek,vcodecsys.txt @@ -0,0 +1,27 @@ +Mediatek vcodecsys controller +============================ + +The Mediatek vcodecsys controller provides various clocks to the system. + +Required Properties: + +- compatible: Should be one of: + - "mediatek,mt6765-vcodecsys", "syscon" +- #clock-cells: Must be 1 + +The vcodecsys controller uses the common clk binding from +Documentation/devicetree/bindings/clock/clock-bindings.txt +The available clocks are defined in dt-bindings/clock/mt*-clk.h. + +The vcodecsys controller also uses the common power domain from +Documentation/devicetree/bindings/soc/mediatek/scpsys.txt +The available power doamins are defined in dt-bindings/power/mt*-power.h. + +Example: + +venc_gcon: clock-controller@17000000 { + compatible = "mediatek,mt6765-vcodecsys", "syscon"; + reg = <0 0x17000000 0 0x10000>; + power-domains = <&scpsys MT6765_POWER_DOMAIN_VCODEC>; + #clock-cells = <1>; +}; -- GitLab From eb7beb65ac30afb8385cfc4032ea67835c5e7d66 Mon Sep 17 00:00:00 2001 From: Mars Cheng Date: Fri, 21 Feb 2020 17:52:21 +0800 Subject: [PATCH 0821/1971] clk: mediatek: add mt6765 clock IDs Add MT6765 clock dt-bindings, include topckgen, apmixedsys, infracfg, mcucfg and subsystem clocks. Signed-off-by: Mars Cheng Signed-off-by: Owen Chen Signed-off-by: Macpaul Lin Reviewed-by: Rob Herring Link: https://lore.kernel.org/r/1582278742-1626-5-git-send-email-macpaul.lin@mediatek.com Signed-off-by: Stephen Boyd --- include/dt-bindings/clock/mt6765-clk.h | 313 +++++++++++++++++++++++++ 1 file changed, 313 insertions(+) create mode 100644 include/dt-bindings/clock/mt6765-clk.h diff --git a/include/dt-bindings/clock/mt6765-clk.h b/include/dt-bindings/clock/mt6765-clk.h new file mode 100644 index 0000000000000..eb97e568518ee --- /dev/null +++ b/include/dt-bindings/clock/mt6765-clk.h @@ -0,0 +1,313 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef _DT_BINDINGS_CLK_MT6765_H +#define _DT_BINDINGS_CLK_MT6765_H + +/* FIX Clks */ +#define CLK_TOP_CLK26M 0 + +/* APMIXEDSYS */ +#define CLK_APMIXED_ARMPLL_L 0 +#define CLK_APMIXED_ARMPLL 1 +#define CLK_APMIXED_CCIPLL 2 +#define CLK_APMIXED_MAINPLL 3 +#define CLK_APMIXED_MFGPLL 4 +#define CLK_APMIXED_MMPLL 5 +#define CLK_APMIXED_UNIV2PLL 6 +#define CLK_APMIXED_MSDCPLL 7 +#define CLK_APMIXED_APLL1 8 +#define CLK_APMIXED_MPLL 9 +#define CLK_APMIXED_ULPOSC1 10 +#define CLK_APMIXED_ULPOSC2 11 +#define CLK_APMIXED_SSUSB26M 12 +#define CLK_APMIXED_APPLL26M 13 +#define CLK_APMIXED_MIPIC0_26M 14 +#define CLK_APMIXED_MDPLLGP26M 15 +#define CLK_APMIXED_MMSYS_F26M 16 +#define CLK_APMIXED_UFS26M 17 +#define CLK_APMIXED_MIPIC1_26M 18 +#define CLK_APMIXED_MEMPLL26M 19 +#define CLK_APMIXED_CLKSQ_LVPLL_26M 20 +#define CLK_APMIXED_MIPID0_26M 21 +#define CLK_APMIXED_NR_CLK 22 + +/* TOPCKGEN */ +#define CLK_TOP_SYSPLL 0 +#define CLK_TOP_SYSPLL_D2 1 +#define CLK_TOP_SYSPLL1_D2 2 +#define CLK_TOP_SYSPLL1_D4 3 +#define CLK_TOP_SYSPLL1_D8 4 +#define CLK_TOP_SYSPLL1_D16 5 +#define CLK_TOP_SYSPLL_D3 6 +#define CLK_TOP_SYSPLL2_D2 7 +#define CLK_TOP_SYSPLL2_D4 8 +#define CLK_TOP_SYSPLL2_D8 9 +#define CLK_TOP_SYSPLL_D5 10 +#define CLK_TOP_SYSPLL3_D2 11 +#define CLK_TOP_SYSPLL3_D4 12 +#define CLK_TOP_SYSPLL_D7 13 +#define CLK_TOP_SYSPLL4_D2 14 +#define CLK_TOP_SYSPLL4_D4 15 +#define CLK_TOP_USB20_192M 16 +#define CLK_TOP_USB20_192M_D4 17 +#define CLK_TOP_USB20_192M_D8 18 +#define CLK_TOP_USB20_192M_D16 19 +#define CLK_TOP_USB20_192M_D32 20 +#define CLK_TOP_UNIVPLL 21 +#define CLK_TOP_UNIVPLL_D2 22 +#define CLK_TOP_UNIVPLL1_D2 23 +#define CLK_TOP_UNIVPLL1_D4 24 +#define CLK_TOP_UNIVPLL_D3 25 +#define CLK_TOP_UNIVPLL2_D2 26 +#define CLK_TOP_UNIVPLL2_D4 27 +#define CLK_TOP_UNIVPLL2_D8 28 +#define CLK_TOP_UNIVPLL2_D32 29 +#define CLK_TOP_UNIVPLL_D5 30 +#define CLK_TOP_UNIVPLL3_D2 31 +#define CLK_TOP_UNIVPLL3_D4 32 +#define CLK_TOP_MMPLL 33 +#define CLK_TOP_MMPLL_D2 34 +#define CLK_TOP_MPLL 35 +#define CLK_TOP_DA_MPLL_104M_DIV 36 +#define CLK_TOP_DA_MPLL_52M_DIV 37 +#define CLK_TOP_MFGPLL 38 +#define CLK_TOP_MSDCPLL 39 +#define CLK_TOP_MSDCPLL_D2 40 +#define CLK_TOP_APLL1 41 +#define CLK_TOP_APLL1_D2 42 +#define CLK_TOP_APLL1_D4 43 +#define CLK_TOP_APLL1_D8 44 +#define CLK_TOP_ULPOSC1 45 +#define CLK_TOP_ULPOSC1_D2 46 +#define CLK_TOP_ULPOSC1_D4 47 +#define CLK_TOP_ULPOSC1_D8 48 +#define CLK_TOP_ULPOSC1_D16 49 +#define CLK_TOP_ULPOSC1_D32 50 +#define CLK_TOP_DMPLL 51 +#define CLK_TOP_F_FRTC 52 +#define CLK_TOP_F_F26M 53 +#define CLK_TOP_AXI 54 +#define CLK_TOP_MM 55 +#define CLK_TOP_SCP 56 +#define CLK_TOP_MFG 57 +#define CLK_TOP_F_FUART 58 +#define CLK_TOP_SPI 59 +#define CLK_TOP_MSDC50_0 60 +#define CLK_TOP_MSDC30_1 61 +#define CLK_TOP_AUDIO 62 +#define CLK_TOP_AUD_1 63 +#define CLK_TOP_AUD_ENGEN1 64 +#define CLK_TOP_F_FDISP_PWM 65 +#define CLK_TOP_SSPM 66 +#define CLK_TOP_DXCC 67 +#define CLK_TOP_I2C 68 +#define CLK_TOP_F_FPWM 69 +#define CLK_TOP_F_FSENINF 70 +#define CLK_TOP_AES_FDE 71 +#define CLK_TOP_F_BIST2FPC 72 +#define CLK_TOP_ARMPLL_DIVIDER_PLL0 73 +#define CLK_TOP_ARMPLL_DIVIDER_PLL1 74 +#define CLK_TOP_ARMPLL_DIVIDER_PLL2 75 +#define CLK_TOP_DA_USB20_48M_DIV 76 +#define CLK_TOP_DA_UNIV_48M_DIV 77 +#define CLK_TOP_APLL12_DIV0 78 +#define CLK_TOP_APLL12_DIV1 79 +#define CLK_TOP_APLL12_DIV2 80 +#define CLK_TOP_APLL12_DIV3 81 +#define CLK_TOP_ARMPLL_DIVIDER_PLL0_EN 82 +#define CLK_TOP_ARMPLL_DIVIDER_PLL1_EN 83 +#define CLK_TOP_ARMPLL_DIVIDER_PLL2_EN 84 +#define CLK_TOP_FMEM_OCC_DRC_EN 85 +#define CLK_TOP_USB20_48M_EN 86 +#define CLK_TOP_UNIVPLL_48M_EN 87 +#define CLK_TOP_MPLL_104M_EN 88 +#define CLK_TOP_MPLL_52M_EN 89 +#define CLK_TOP_F_UFS_MP_SAP_CFG_EN 90 +#define CLK_TOP_F_BIST2FPC_EN 91 +#define CLK_TOP_MD_32K 92 +#define CLK_TOP_MD_26M 93 +#define CLK_TOP_MD2_32K 94 +#define CLK_TOP_MD2_26M 95 +#define CLK_TOP_AXI_SEL 96 +#define CLK_TOP_MEM_SEL 97 +#define CLK_TOP_MM_SEL 98 +#define CLK_TOP_SCP_SEL 99 +#define CLK_TOP_MFG_SEL 100 +#define CLK_TOP_ATB_SEL 101 +#define CLK_TOP_CAMTG_SEL 102 +#define CLK_TOP_CAMTG1_SEL 103 +#define CLK_TOP_CAMTG2_SEL 104 +#define CLK_TOP_CAMTG3_SEL 105 +#define CLK_TOP_UART_SEL 106 +#define CLK_TOP_SPI_SEL 107 +#define CLK_TOP_MSDC50_0_HCLK_SEL 108 +#define CLK_TOP_MSDC50_0_SEL 109 +#define CLK_TOP_MSDC30_1_SEL 110 +#define CLK_TOP_AUDIO_SEL 111 +#define CLK_TOP_AUD_INTBUS_SEL 112 +#define CLK_TOP_AUD_1_SEL 113 +#define CLK_TOP_AUD_ENGEN1_SEL 114 +#define CLK_TOP_DISP_PWM_SEL 115 +#define CLK_TOP_SSPM_SEL 116 +#define CLK_TOP_DXCC_SEL 117 +#define CLK_TOP_USB_TOP_SEL 118 +#define CLK_TOP_SPM_SEL 119 +#define CLK_TOP_I2C_SEL 120 +#define CLK_TOP_PWM_SEL 121 +#define CLK_TOP_SENINF_SEL 122 +#define CLK_TOP_AES_FDE_SEL 123 +#define CLK_TOP_PWRAP_ULPOSC_SEL 124 +#define CLK_TOP_CAMTM_SEL 125 +#define CLK_TOP_NR_CLK 126 + +/* INFRACFG */ +#define CLK_IFR_ICUSB 0 +#define CLK_IFR_GCE 1 +#define CLK_IFR_THERM 2 +#define CLK_IFR_I2C_AP 3 +#define CLK_IFR_I2C_CCU 4 +#define CLK_IFR_I2C_SSPM 5 +#define CLK_IFR_I2C_RSV 6 +#define CLK_IFR_PWM_HCLK 7 +#define CLK_IFR_PWM1 8 +#define CLK_IFR_PWM2 9 +#define CLK_IFR_PWM3 10 +#define CLK_IFR_PWM4 11 +#define CLK_IFR_PWM5 12 +#define CLK_IFR_PWM 13 +#define CLK_IFR_UART0 14 +#define CLK_IFR_UART1 15 +#define CLK_IFR_GCE_26M 16 +#define CLK_IFR_CQ_DMA_FPC 17 +#define CLK_IFR_BTIF 18 +#define CLK_IFR_SPI0 19 +#define CLK_IFR_MSDC0 20 +#define CLK_IFR_MSDC1 21 +#define CLK_IFR_TRNG 22 +#define CLK_IFR_AUXADC 23 +#define CLK_IFR_CCIF1_AP 24 +#define CLK_IFR_CCIF1_MD 25 +#define CLK_IFR_AUXADC_MD 26 +#define CLK_IFR_AP_DMA 27 +#define CLK_IFR_DEVICE_APC 28 +#define CLK_IFR_CCIF_AP 29 +#define CLK_IFR_AUDIO 30 +#define CLK_IFR_CCIF_MD 31 +#define CLK_IFR_RG_PWM_FBCLK6 32 +#define CLK_IFR_DISP_PWM 33 +#define CLK_IFR_CLDMA_BCLK 34 +#define CLK_IFR_AUDIO_26M_BCLK 35 +#define CLK_IFR_SPI1 36 +#define CLK_IFR_I2C4 37 +#define CLK_IFR_SPI2 38 +#define CLK_IFR_SPI3 39 +#define CLK_IFR_I2C5 40 +#define CLK_IFR_I2C5_ARBITER 41 +#define CLK_IFR_I2C5_IMM 42 +#define CLK_IFR_I2C1_ARBITER 43 +#define CLK_IFR_I2C1_IMM 44 +#define CLK_IFR_I2C2_ARBITER 45 +#define CLK_IFR_I2C2_IMM 46 +#define CLK_IFR_SPI4 47 +#define CLK_IFR_SPI5 48 +#define CLK_IFR_CQ_DMA 49 +#define CLK_IFR_FAES_FDE 50 +#define CLK_IFR_MSDC0_SELF 51 +#define CLK_IFR_MSDC1_SELF 52 +#define CLK_IFR_I2C6 53 +#define CLK_IFR_AP_MSDC0 54 +#define CLK_IFR_MD_MSDC0 55 +#define CLK_IFR_MSDC0_SRC 56 +#define CLK_IFR_MSDC1_SRC 57 +#define CLK_IFR_AES_TOP0_BCLK 58 +#define CLK_IFR_MCU_PM_BCLK 59 +#define CLK_IFR_CCIF2_AP 60 +#define CLK_IFR_CCIF2_MD 61 +#define CLK_IFR_CCIF3_AP 62 +#define CLK_IFR_CCIF3_MD 63 +#define CLK_IFR_NR_CLK 64 + +/* AUDIO */ +#define CLK_AUDIO_AFE 0 +#define CLK_AUDIO_22M 1 +#define CLK_AUDIO_APLL_TUNER 2 +#define CLK_AUDIO_ADC 3 +#define CLK_AUDIO_DAC 4 +#define CLK_AUDIO_DAC_PREDIS 5 +#define CLK_AUDIO_TML 6 +#define CLK_AUDIO_I2S1_BCLK 7 +#define CLK_AUDIO_I2S2_BCLK 8 +#define CLK_AUDIO_I2S3_BCLK 9 +#define CLK_AUDIO_I2S4_BCLK 10 +#define CLK_AUDIO_NR_CLK 11 + +/* MIPI_RX_ANA_CSI0A */ + +#define CLK_MIPI0A_CSR_CSI_EN_0A 0 +#define CLK_MIPI0A_NR_CLK 1 + +/* MMSYS_CONFIG */ + +#define CLK_MM_MDP_RDMA0 0 +#define CLK_MM_MDP_CCORR0 1 +#define CLK_MM_MDP_RSZ0 2 +#define CLK_MM_MDP_RSZ1 3 +#define CLK_MM_MDP_TDSHP0 4 +#define CLK_MM_MDP_WROT0 5 +#define CLK_MM_MDP_WDMA0 6 +#define CLK_MM_DISP_OVL0 7 +#define CLK_MM_DISP_OVL0_2L 8 +#define CLK_MM_DISP_RSZ0 9 +#define CLK_MM_DISP_RDMA0 10 +#define CLK_MM_DISP_WDMA0 11 +#define CLK_MM_DISP_COLOR0 12 +#define CLK_MM_DISP_CCORR0 13 +#define CLK_MM_DISP_AAL0 14 +#define CLK_MM_DISP_GAMMA0 15 +#define CLK_MM_DISP_DITHER0 16 +#define CLK_MM_DSI0 17 +#define CLK_MM_FAKE_ENG 18 +#define CLK_MM_SMI_COMMON 19 +#define CLK_MM_SMI_LARB0 20 +#define CLK_MM_SMI_COMM0 21 +#define CLK_MM_SMI_COMM1 22 +#define CLK_MM_CAM_MDP 23 +#define CLK_MM_SMI_IMG 24 +#define CLK_MM_SMI_CAM 25 +#define CLK_MM_IMG_DL_RELAY 26 +#define CLK_MM_IMG_DL_ASYNC_TOP 27 +#define CLK_MM_DIG_DSI 28 +#define CLK_MM_F26M_HRTWT 29 +#define CLK_MM_NR_CLK 30 + +/* IMGSYS */ + +#define CLK_IMG_LARB2 0 +#define CLK_IMG_DIP 1 +#define CLK_IMG_FDVT 2 +#define CLK_IMG_DPE 3 +#define CLK_IMG_RSC 4 +#define CLK_IMG_NR_CLK 5 + +/* VENCSYS */ + +#define CLK_VENC_SET0_LARB 0 +#define CLK_VENC_SET1_VENC 1 +#define CLK_VENC_SET2_JPGENC 2 +#define CLK_VENC_SET3_VDEC 3 +#define CLK_VENC_NR_CLK 4 + +/* CAMSYS */ + +#define CLK_CAM_LARB3 0 +#define CLK_CAM_DFP_VAD 1 +#define CLK_CAM 2 +#define CLK_CAMTG 3 +#define CLK_CAM_SENINF 4 +#define CLK_CAMSV0 5 +#define CLK_CAMSV1 6 +#define CLK_CAMSV2 7 +#define CLK_CAM_CCU 8 +#define CLK_CAM_NR_CLK 9 + +#endif /* _DT_BINDINGS_CLK_MT6765_H */ -- GitLab From 1aca9939bf72893887cb7e3455e44c864bada2f9 Mon Sep 17 00:00:00 2001 From: Owen Chen Date: Fri, 21 Feb 2020 17:52:22 +0800 Subject: [PATCH 0822/1971] clk: mediatek: Add MT6765 clock support Add MT6765 clock support, include topckgen, apmixedsys, infracfg, mcucfg and subsystem clocks. Signed-off-by: Owen Chen Signed-off-by: Mars Cheng Signed-off-by: Macpaul Lin Link: https://lore.kernel.org/r/1582278742-1626-6-git-send-email-macpaul.lin@mediatek.com Signed-off-by: Stephen Boyd --- drivers/clk/mediatek/Kconfig | 86 ++ drivers/clk/mediatek/Makefile | 7 + drivers/clk/mediatek/clk-mt6765-audio.c | 100 +++ drivers/clk/mediatek/clk-mt6765-cam.c | 74 ++ drivers/clk/mediatek/clk-mt6765-img.c | 70 ++ drivers/clk/mediatek/clk-mt6765-mipi0a.c | 68 ++ drivers/clk/mediatek/clk-mt6765-mm.c | 96 +++ drivers/clk/mediatek/clk-mt6765-vcodec.c | 70 ++ drivers/clk/mediatek/clk-mt6765.c | 952 +++++++++++++++++++++++ 9 files changed, 1523 insertions(+) create mode 100644 drivers/clk/mediatek/clk-mt6765-audio.c create mode 100644 drivers/clk/mediatek/clk-mt6765-cam.c create mode 100644 drivers/clk/mediatek/clk-mt6765-img.c create mode 100644 drivers/clk/mediatek/clk-mt6765-mipi0a.c create mode 100644 drivers/clk/mediatek/clk-mt6765-mm.c create mode 100644 drivers/clk/mediatek/clk-mt6765-vcodec.c create mode 100644 drivers/clk/mediatek/clk-mt6765.c diff --git a/drivers/clk/mediatek/Kconfig b/drivers/clk/mediatek/Kconfig index ea3c70d1307ec..f86f842cee03a 100644 --- a/drivers/clk/mediatek/Kconfig +++ b/drivers/clk/mediatek/Kconfig @@ -117,6 +117,92 @@ config COMMON_CLK_MT2712_VENCSYS ---help--- This driver supports MediaTek MT2712 vencsys clocks. +config COMMON_CLK_MT6765 + bool "Clock driver for MediaTek MT6765" + depends on (ARCH_MEDIATEK && ARM64) || COMPILE_TEST + select COMMON_CLK_MEDIATEK + default ARCH_MEDIATEK && ARM64 + help + This driver supports MediaTek MT6765 basic clocks. + +config COMMON_CLK_MT6765_AUDIOSYS + bool "Clock driver for MediaTek MT6765 audiosys" + depends on COMMON_CLK_MT6765 + help + This driver supports MediaTek MT6765 audiosys clocks. + +config COMMON_CLK_MT6765_CAMSYS + bool "Clock driver for MediaTek MT6765 camsys" + depends on COMMON_CLK_MT6765 + help + This driver supports MediaTek MT6765 camsys clocks. + +config COMMON_CLK_MT6765_GCESYS + bool "Clock driver for MediaTek MT6765 gcesys" + depends on COMMON_CLK_MT6765 + help + This driver supports MediaTek MT6765 gcesys clocks. + +config COMMON_CLK_MT6765_MMSYS + bool "Clock driver for MediaTek MT6765 mmsys" + depends on COMMON_CLK_MT6765 + help + This driver supports MediaTek MT6765 mmsys clocks. + +config COMMON_CLK_MT6765_IMGSYS + bool "Clock driver for MediaTek MT6765 imgsys" + depends on COMMON_CLK_MT6765 + help + This driver supports MediaTek MT6765 imgsys clocks. + +config COMMON_CLK_MT6765_VCODECSYS + bool "Clock driver for MediaTek MT6765 vcodecsys" + depends on COMMON_CLK_MT6765 + help + This driver supports MediaTek MT6765 vcodecsys clocks. + +config COMMON_CLK_MT6765_MFGSYS + bool "Clock driver for MediaTek MT6765 mfgsys" + depends on COMMON_CLK_MT6765 + help + This driver supports MediaTek MT6765 mfgsys clocks. + +config COMMON_CLK_MT6765_MIPI0ASYS + bool "Clock driver for MediaTek MT6765 mipi0asys" + depends on COMMON_CLK_MT6765 + help + This driver supports MediaTek MT6765 mipi0asys clocks. + +config COMMON_CLK_MT6765_MIPI0BSYS + bool "Clock driver for MediaTek MT6765 mipi0bsys" + depends on COMMON_CLK_MT6765 + help + This driver supports MediaTek MT6765 mipi0bsys clocks. + +config COMMON_CLK_MT6765_MIPI1ASYS + bool "Clock driver for MediaTek MT6765 mipi1asys" + depends on COMMON_CLK_MT6765 + help + This driver supports MediaTek MT6765 mipi1asys clocks. + +config COMMON_CLK_MT6765_MIPI1BSYS + bool "Clock driver for MediaTek MT6765 mipi1bsys" + depends on COMMON_CLK_MT6765 + help + This driver supports MediaTek MT6765 mipi1bsys clocks. + +config COMMON_CLK_MT6765_MIPI2ASYS + bool "Clock driver for MediaTek MT6765 mipi2asys" + depends on COMMON_CLK_MT6765 + help + This driver supports MediaTek MT6765 mipi2asys clocks. + +config COMMON_CLK_MT6765_MIPI2BSYS + bool "Clock driver for MediaTek MT6765 mipi2bsys" + depends on COMMON_CLK_MT6765 + help + This driver supports MediaTek MT6765 mipi2bsys clocks. + config COMMON_CLK_MT6779 bool "Clock driver for MediaTek MT6779" depends on (ARCH_MEDIATEK && ARM64) || COMPILE_TEST diff --git a/drivers/clk/mediatek/Makefile b/drivers/clk/mediatek/Makefile index 8cdb76a5cd713..452ac3bc1837b 100644 --- a/drivers/clk/mediatek/Makefile +++ b/drivers/clk/mediatek/Makefile @@ -1,6 +1,13 @@ # SPDX-License-Identifier: GPL-2.0 obj-$(CONFIG_COMMON_CLK_MEDIATEK) += clk-mtk.o clk-pll.o clk-gate.o clk-apmixed.o clk-cpumux.o reset.o clk-mux.o +obj-$(CONFIG_COMMON_CLK_MT6765) += clk-mt6765.o +obj-$(CONFIG_COMMON_CLK_MT6765_AUDIOSYS) += clk-mt6765-audio.o +obj-$(CONFIG_COMMON_CLK_MT6765_CAMSYS) += clk-mt6765-cam.o +obj-$(CONFIG_COMMON_CLK_MT6765_IMGSYS) += clk-mt6765-img.o +obj-$(CONFIG_COMMON_CLK_MT6765_MIPI0ASYS) += clk-mt6765-mipi0a.o +obj-$(CONFIG_COMMON_CLK_MT6765_MMSYS) += clk-mt6765-mm.o +obj-$(CONFIG_COMMON_CLK_MT6765_VCODECSYS) += clk-mt6765-vcodec.o obj-$(CONFIG_COMMON_CLK_MT6779) += clk-mt6779.o obj-$(CONFIG_COMMON_CLK_MT6779_MMSYS) += clk-mt6779-mm.o obj-$(CONFIG_COMMON_CLK_MT6779_IMGSYS) += clk-mt6779-img.o diff --git a/drivers/clk/mediatek/clk-mt6765-audio.c b/drivers/clk/mediatek/clk-mt6765-audio.c new file mode 100644 index 0000000000000..4c989165d7950 --- /dev/null +++ b/drivers/clk/mediatek/clk-mt6765-audio.c @@ -0,0 +1,100 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2018 MediaTek Inc. + * Author: Owen Chen + */ + +#include +#include + +#include "clk-mtk.h" +#include "clk-gate.h" + +#include + +static const struct mtk_gate_regs audio0_cg_regs = { + .set_ofs = 0x0, + .clr_ofs = 0x0, + .sta_ofs = 0x0, +}; + +static const struct mtk_gate_regs audio1_cg_regs = { + .set_ofs = 0x4, + .clr_ofs = 0x4, + .sta_ofs = 0x4, +}; + +#define GATE_AUDIO0(_id, _name, _parent, _shift) { \ + .id = _id, \ + .name = _name, \ + .parent_name = _parent, \ + .regs = &audio0_cg_regs, \ + .shift = _shift, \ + .ops = &mtk_clk_gate_ops_no_setclr, \ + } + +#define GATE_AUDIO1(_id, _name, _parent, _shift) { \ + .id = _id, \ + .name = _name, \ + .parent_name = _parent, \ + .regs = &audio1_cg_regs, \ + .shift = _shift, \ + .ops = &mtk_clk_gate_ops_no_setclr, \ + } + +static const struct mtk_gate audio_clks[] = { + /* AUDIO0 */ + GATE_AUDIO0(CLK_AUDIO_AFE, "aud_afe", "audio_ck", 2), + GATE_AUDIO0(CLK_AUDIO_22M, "aud_22m", "aud_engen1_ck", 8), + GATE_AUDIO0(CLK_AUDIO_APLL_TUNER, "aud_apll_tuner", + "aud_engen1_ck", 19), + GATE_AUDIO0(CLK_AUDIO_ADC, "aud_adc", "audio_ck", 24), + GATE_AUDIO0(CLK_AUDIO_DAC, "aud_dac", "audio_ck", 25), + GATE_AUDIO0(CLK_AUDIO_DAC_PREDIS, "aud_dac_predis", + "audio_ck", 26), + GATE_AUDIO0(CLK_AUDIO_TML, "aud_tml", "audio_ck", 27), + /* AUDIO1 */ + GATE_AUDIO1(CLK_AUDIO_I2S1_BCLK, "aud_i2s1_bclk", + "audio_ck", 4), + GATE_AUDIO1(CLK_AUDIO_I2S2_BCLK, "aud_i2s2_bclk", + "audio_ck", 5), + GATE_AUDIO1(CLK_AUDIO_I2S3_BCLK, "aud_i2s3_bclk", + "audio_ck", 6), + GATE_AUDIO1(CLK_AUDIO_I2S4_BCLK, "aud_i2s4_bclk", + "audio_ck", 7), +}; + +static int clk_mt6765_audio_probe(struct platform_device *pdev) +{ + struct clk_onecell_data *clk_data; + int r; + struct device_node *node = pdev->dev.of_node; + + clk_data = mtk_alloc_clk_data(CLK_AUDIO_NR_CLK); + + mtk_clk_register_gates(node, audio_clks, + ARRAY_SIZE(audio_clks), clk_data); + + r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data); + + if (r) + pr_err("%s(): could not register clock provider: %d\n", + __func__, r); + + return r; +} + +static const struct of_device_id of_match_clk_mt6765_audio[] = { + { .compatible = "mediatek,mt6765-audsys", }, + {} +}; + +static struct platform_driver clk_mt6765_audio_drv = { + .probe = clk_mt6765_audio_probe, + .driver = { + .name = "clk-mt6765-audio", + .of_match_table = of_match_clk_mt6765_audio, + }, +}; + +builtin_platform_driver(clk_mt6765_audio_drv); diff --git a/drivers/clk/mediatek/clk-mt6765-cam.c b/drivers/clk/mediatek/clk-mt6765-cam.c new file mode 100644 index 0000000000000..c96394893bcfd --- /dev/null +++ b/drivers/clk/mediatek/clk-mt6765-cam.c @@ -0,0 +1,74 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2018 MediaTek Inc. + * Author: Owen Chen + */ + +#include +#include + +#include "clk-mtk.h" +#include "clk-gate.h" + +#include + +static const struct mtk_gate_regs cam_cg_regs = { + .set_ofs = 0x4, + .clr_ofs = 0x8, + .sta_ofs = 0x0, +}; + +#define GATE_CAM(_id, _name, _parent, _shift) { \ + .id = _id, \ + .name = _name, \ + .parent_name = _parent, \ + .regs = &cam_cg_regs, \ + .shift = _shift, \ + .ops = &mtk_clk_gate_ops_setclr, \ + } + +static const struct mtk_gate cam_clks[] = { + GATE_CAM(CLK_CAM_LARB3, "cam_larb3", "mm_ck", 0), + GATE_CAM(CLK_CAM_DFP_VAD, "cam_dfp_vad", "mm_ck", 1), + GATE_CAM(CLK_CAM, "cam", "mm_ck", 6), + GATE_CAM(CLK_CAMTG, "camtg", "mm_ck", 7), + GATE_CAM(CLK_CAM_SENINF, "cam_seninf", "mm_ck", 8), + GATE_CAM(CLK_CAMSV0, "camsv0", "mm_ck", 9), + GATE_CAM(CLK_CAMSV1, "camsv1", "mm_ck", 10), + GATE_CAM(CLK_CAMSV2, "camsv2", "mm_ck", 11), + GATE_CAM(CLK_CAM_CCU, "cam_ccu", "mm_ck", 12), +}; + +static int clk_mt6765_cam_probe(struct platform_device *pdev) +{ + struct clk_onecell_data *clk_data; + int r; + struct device_node *node = pdev->dev.of_node; + + clk_data = mtk_alloc_clk_data(CLK_CAM_NR_CLK); + + mtk_clk_register_gates(node, cam_clks, ARRAY_SIZE(cam_clks), clk_data); + + r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data); + + if (r) + pr_err("%s(): could not register clock provider: %d\n", + __func__, r); + + return r; +} + +static const struct of_device_id of_match_clk_mt6765_cam[] = { + { .compatible = "mediatek,mt6765-camsys", }, + {} +}; + +static struct platform_driver clk_mt6765_cam_drv = { + .probe = clk_mt6765_cam_probe, + .driver = { + .name = "clk-mt6765-cam", + .of_match_table = of_match_clk_mt6765_cam, + }, +}; + +builtin_platform_driver(clk_mt6765_cam_drv); diff --git a/drivers/clk/mediatek/clk-mt6765-img.c b/drivers/clk/mediatek/clk-mt6765-img.c new file mode 100644 index 0000000000000..6fd8bf8030fc7 --- /dev/null +++ b/drivers/clk/mediatek/clk-mt6765-img.c @@ -0,0 +1,70 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2018 MediaTek Inc. + * Author: Owen Chen + */ + +#include +#include + +#include "clk-mtk.h" +#include "clk-gate.h" + +#include + +static const struct mtk_gate_regs img_cg_regs = { + .set_ofs = 0x4, + .clr_ofs = 0x8, + .sta_ofs = 0x0, +}; + +#define GATE_IMG(_id, _name, _parent, _shift) { \ + .id = _id, \ + .name = _name, \ + .parent_name = _parent, \ + .regs = &img_cg_regs, \ + .shift = _shift, \ + .ops = &mtk_clk_gate_ops_setclr, \ + } + +static const struct mtk_gate img_clks[] = { + GATE_IMG(CLK_IMG_LARB2, "img_larb2", "mm_ck", 0), + GATE_IMG(CLK_IMG_DIP, "img_dip", "mm_ck", 2), + GATE_IMG(CLK_IMG_FDVT, "img_fdvt", "mm_ck", 3), + GATE_IMG(CLK_IMG_DPE, "img_dpe", "mm_ck", 4), + GATE_IMG(CLK_IMG_RSC, "img_rsc", "mm_ck", 5), +}; + +static int clk_mt6765_img_probe(struct platform_device *pdev) +{ + struct clk_onecell_data *clk_data; + int r; + struct device_node *node = pdev->dev.of_node; + + clk_data = mtk_alloc_clk_data(CLK_IMG_NR_CLK); + + mtk_clk_register_gates(node, img_clks, ARRAY_SIZE(img_clks), clk_data); + + r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data); + + if (r) + pr_err("%s(): could not register clock provider: %d\n", + __func__, r); + + return r; +} + +static const struct of_device_id of_match_clk_mt6765_img[] = { + { .compatible = "mediatek,mt6765-imgsys", }, + {} +}; + +static struct platform_driver clk_mt6765_img_drv = { + .probe = clk_mt6765_img_probe, + .driver = { + .name = "clk-mt6765-img", + .of_match_table = of_match_clk_mt6765_img, + }, +}; + +builtin_platform_driver(clk_mt6765_img_drv); diff --git a/drivers/clk/mediatek/clk-mt6765-mipi0a.c b/drivers/clk/mediatek/clk-mt6765-mipi0a.c new file mode 100644 index 0000000000000..81744d0f95a0f --- /dev/null +++ b/drivers/clk/mediatek/clk-mt6765-mipi0a.c @@ -0,0 +1,68 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2018 MediaTek Inc. + * Author: Owen Chen + */ + +#include +#include + +#include "clk-mtk.h" +#include "clk-gate.h" + +#include + +static const struct mtk_gate_regs mipi0a_cg_regs = { + .set_ofs = 0x80, + .clr_ofs = 0x80, + .sta_ofs = 0x80, +}; + +#define GATE_MIPI0A(_id, _name, _parent, _shift) { \ + .id = _id, \ + .name = _name, \ + .parent_name = _parent, \ + .regs = &mipi0a_cg_regs, \ + .shift = _shift, \ + .ops = &mtk_clk_gate_ops_no_setclr_inv, \ + } + +static const struct mtk_gate mipi0a_clks[] = { + GATE_MIPI0A(CLK_MIPI0A_CSR_CSI_EN_0A, + "mipi0a_csr_0a", "f_fseninf_ck", 1), +}; + +static int clk_mt6765_mipi0a_probe(struct platform_device *pdev) +{ + struct clk_onecell_data *clk_data; + int r; + struct device_node *node = pdev->dev.of_node; + + clk_data = mtk_alloc_clk_data(CLK_MIPI0A_NR_CLK); + + mtk_clk_register_gates(node, mipi0a_clks, + ARRAY_SIZE(mipi0a_clks), clk_data); + + r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data); + + if (r) + pr_err("%s(): could not register clock provider: %d\n", + __func__, r); + + return r; +} + +static const struct of_device_id of_match_clk_mt6765_mipi0a[] = { + { .compatible = "mediatek,mt6765-mipi0a", }, + {} +}; + +static struct platform_driver clk_mt6765_mipi0a_drv = { + .probe = clk_mt6765_mipi0a_probe, + .driver = { + .name = "clk-mt6765-mipi0a", + .of_match_table = of_match_clk_mt6765_mipi0a, + }, +}; + +builtin_platform_driver(clk_mt6765_mipi0a_drv); diff --git a/drivers/clk/mediatek/clk-mt6765-mm.c b/drivers/clk/mediatek/clk-mt6765-mm.c new file mode 100644 index 0000000000000..6d8214c51684e --- /dev/null +++ b/drivers/clk/mediatek/clk-mt6765-mm.c @@ -0,0 +1,96 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2018 MediaTek Inc. + * Author: Owen Chen + */ + +#include +#include + +#include "clk-mtk.h" +#include "clk-gate.h" + +#include + +static const struct mtk_gate_regs mm_cg_regs = { + .set_ofs = 0x104, + .clr_ofs = 0x108, + .sta_ofs = 0x100, +}; + +#define GATE_MM(_id, _name, _parent, _shift) { \ + .id = _id, \ + .name = _name, \ + .parent_name = _parent, \ + .regs = &mm_cg_regs, \ + .shift = _shift, \ + .ops = &mtk_clk_gate_ops_setclr, \ + } + +static const struct mtk_gate mm_clks[] = { + /* MM */ + GATE_MM(CLK_MM_MDP_RDMA0, "mm_mdp_rdma0", "mm_ck", 0), + GATE_MM(CLK_MM_MDP_CCORR0, "mm_mdp_ccorr0", "mm_ck", 1), + GATE_MM(CLK_MM_MDP_RSZ0, "mm_mdp_rsz0", "mm_ck", 2), + GATE_MM(CLK_MM_MDP_RSZ1, "mm_mdp_rsz1", "mm_ck", 3), + GATE_MM(CLK_MM_MDP_TDSHP0, "mm_mdp_tdshp0", "mm_ck", 4), + GATE_MM(CLK_MM_MDP_WROT0, "mm_mdp_wrot0", "mm_ck", 5), + GATE_MM(CLK_MM_MDP_WDMA0, "mm_mdp_wdma0", "mm_ck", 6), + GATE_MM(CLK_MM_DISP_OVL0, "mm_disp_ovl0", "mm_ck", 7), + GATE_MM(CLK_MM_DISP_OVL0_2L, "mm_disp_ovl0_2l", "mm_ck", 8), + GATE_MM(CLK_MM_DISP_RSZ0, "mm_disp_rsz0", "mm_ck", 9), + GATE_MM(CLK_MM_DISP_RDMA0, "mm_disp_rdma0", "mm_ck", 10), + GATE_MM(CLK_MM_DISP_WDMA0, "mm_disp_wdma0", "mm_ck", 11), + GATE_MM(CLK_MM_DISP_COLOR0, "mm_disp_color0", "mm_ck", 12), + GATE_MM(CLK_MM_DISP_CCORR0, "mm_disp_ccorr0", "mm_ck", 13), + GATE_MM(CLK_MM_DISP_AAL0, "mm_disp_aal0", "mm_ck", 14), + GATE_MM(CLK_MM_DISP_GAMMA0, "mm_disp_gamma0", "mm_ck", 15), + GATE_MM(CLK_MM_DISP_DITHER0, "mm_disp_dither0", "mm_ck", 16), + GATE_MM(CLK_MM_DSI0, "mm_dsi0", "mm_ck", 17), + GATE_MM(CLK_MM_FAKE_ENG, "mm_fake_eng", "mm_ck", 18), + GATE_MM(CLK_MM_SMI_COMMON, "mm_smi_common", "mm_ck", 19), + GATE_MM(CLK_MM_SMI_LARB0, "mm_smi_larb0", "mm_ck", 20), + GATE_MM(CLK_MM_SMI_COMM0, "mm_smi_comm0", "mm_ck", 21), + GATE_MM(CLK_MM_SMI_COMM1, "mm_smi_comm1", "mm_ck", 22), + GATE_MM(CLK_MM_CAM_MDP, "mm_cam_mdp_ck", "mm_ck", 23), + GATE_MM(CLK_MM_SMI_IMG, "mm_smi_img_ck", "mm_ck", 24), + GATE_MM(CLK_MM_SMI_CAM, "mm_smi_cam_ck", "mm_ck", 25), + GATE_MM(CLK_MM_IMG_DL_RELAY, "mm_img_dl_relay", "mm_ck", 26), + GATE_MM(CLK_MM_IMG_DL_ASYNC_TOP, "mm_imgdl_async", "mm_ck", 27), + GATE_MM(CLK_MM_DIG_DSI, "mm_dig_dsi_ck", "mm_ck", 28), + GATE_MM(CLK_MM_F26M_HRTWT, "mm_hrtwt", "f_f26m_ck", 29), +}; + +static int clk_mt6765_mm_probe(struct platform_device *pdev) +{ + struct clk_onecell_data *clk_data; + int r; + struct device_node *node = pdev->dev.of_node; + + clk_data = mtk_alloc_clk_data(CLK_MM_NR_CLK); + + mtk_clk_register_gates(node, mm_clks, ARRAY_SIZE(mm_clks), clk_data); + + r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data); + + if (r) + pr_err("%s(): could not register clock provider: %d\n", + __func__, r); + + return r; +} + +static const struct of_device_id of_match_clk_mt6765_mm[] = { + { .compatible = "mediatek,mt6765-mmsys", }, + {} +}; + +static struct platform_driver clk_mt6765_mm_drv = { + .probe = clk_mt6765_mm_probe, + .driver = { + .name = "clk-mt6765-mm", + .of_match_table = of_match_clk_mt6765_mm, + }, +}; + +builtin_platform_driver(clk_mt6765_mm_drv); diff --git a/drivers/clk/mediatek/clk-mt6765-vcodec.c b/drivers/clk/mediatek/clk-mt6765-vcodec.c new file mode 100644 index 0000000000000..baae665fab31c --- /dev/null +++ b/drivers/clk/mediatek/clk-mt6765-vcodec.c @@ -0,0 +1,70 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2018 MediaTek Inc. + * Author: Owen Chen + */ + +#include +#include + +#include "clk-mtk.h" +#include "clk-gate.h" + +#include + +static const struct mtk_gate_regs venc_cg_regs = { + .set_ofs = 0x4, + .clr_ofs = 0x8, + .sta_ofs = 0x0, +}; + +#define GATE_VENC(_id, _name, _parent, _shift) { \ + .id = _id, \ + .name = _name, \ + .parent_name = _parent, \ + .regs = &venc_cg_regs, \ + .shift = _shift, \ + .ops = &mtk_clk_gate_ops_setclr_inv, \ + } + +static const struct mtk_gate venc_clks[] = { + GATE_VENC(CLK_VENC_SET0_LARB, "venc_set0_larb", "mm_ck", 0), + GATE_VENC(CLK_VENC_SET1_VENC, "venc_set1_venc", "mm_ck", 4), + GATE_VENC(CLK_VENC_SET2_JPGENC, "jpgenc", "mm_ck", 8), + GATE_VENC(CLK_VENC_SET3_VDEC, "venc_set3_vdec", "mm_ck", 12), +}; + +static int clk_mt6765_vcodec_probe(struct platform_device *pdev) +{ + struct clk_onecell_data *clk_data; + int r; + struct device_node *node = pdev->dev.of_node; + + clk_data = mtk_alloc_clk_data(CLK_VENC_NR_CLK); + + mtk_clk_register_gates(node, venc_clks, + ARRAY_SIZE(venc_clks), clk_data); + + r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data); + + if (r) + pr_err("%s(): could not register clock provider: %d\n", + __func__, r); + + return r; +} + +static const struct of_device_id of_match_clk_mt6765_vcodec[] = { + { .compatible = "mediatek,mt6765-vcodecsys", }, + {} +}; + +static struct platform_driver clk_mt6765_vcodec_drv = { + .probe = clk_mt6765_vcodec_probe, + .driver = { + .name = "clk-mt6765-vcodec", + .of_match_table = of_match_clk_mt6765_vcodec, + }, +}; + +builtin_platform_driver(clk_mt6765_vcodec_drv); diff --git a/drivers/clk/mediatek/clk-mt6765.c b/drivers/clk/mediatek/clk-mt6765.c new file mode 100644 index 0000000000000..3ec53cb62eceb --- /dev/null +++ b/drivers/clk/mediatek/clk-mt6765.c @@ -0,0 +1,952 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2018 MediaTek Inc. + * Author: Owen Chen + */ + +#include +#include +#include +#include +#include +#include +#include + +#include "clk-mtk.h" +#include "clk-gate.h" +#include "clk-mux.h" + +#include + +/*fmeter div select 4*/ +#define _DIV4_ 1 + +static DEFINE_SPINLOCK(mt6765_clk_lock); + +/* Total 12 subsys */ +static void __iomem *cksys_base; +static void __iomem *apmixed_base; + +/* CKSYS */ +#define CLK_SCP_CFG_0 (cksys_base + 0x200) +#define CLK_SCP_CFG_1 (cksys_base + 0x204) + +/* CG */ +#define AP_PLL_CON3 (apmixed_base + 0x0C) +#define PLLON_CON0 (apmixed_base + 0x44) +#define PLLON_CON1 (apmixed_base + 0x48) + +/* clk cfg update */ +#define CLK_CFG_0 0x40 +#define CLK_CFG_0_SET 0x44 +#define CLK_CFG_0_CLR 0x48 +#define CLK_CFG_1 0x50 +#define CLK_CFG_1_SET 0x54 +#define CLK_CFG_1_CLR 0x58 +#define CLK_CFG_2 0x60 +#define CLK_CFG_2_SET 0x64 +#define CLK_CFG_2_CLR 0x68 +#define CLK_CFG_3 0x70 +#define CLK_CFG_3_SET 0x74 +#define CLK_CFG_3_CLR 0x78 +#define CLK_CFG_4 0x80 +#define CLK_CFG_4_SET 0x84 +#define CLK_CFG_4_CLR 0x88 +#define CLK_CFG_5 0x90 +#define CLK_CFG_5_SET 0x94 +#define CLK_CFG_5_CLR 0x98 +#define CLK_CFG_6 0xa0 +#define CLK_CFG_6_SET 0xa4 +#define CLK_CFG_6_CLR 0xa8 +#define CLK_CFG_7 0xb0 +#define CLK_CFG_7_SET 0xb4 +#define CLK_CFG_7_CLR 0xb8 +#define CLK_CFG_8 0xc0 +#define CLK_CFG_8_SET 0xc4 +#define CLK_CFG_8_CLR 0xc8 +#define CLK_CFG_9 0xd0 +#define CLK_CFG_9_SET 0xd4 +#define CLK_CFG_9_CLR 0xd8 +#define CLK_CFG_10 0xe0 +#define CLK_CFG_10_SET 0xe4 +#define CLK_CFG_10_CLR 0xe8 +#define CLK_CFG_UPDATE 0x004 + +static const struct mtk_fixed_clk fixed_clks[] = { + FIXED_CLK(CLK_TOP_F_FRTC, "f_frtc_ck", "clk32k", 32768), + FIXED_CLK(CLK_TOP_CLK26M, "clk_26m_ck", "clk26m", 26000000), + FIXED_CLK(CLK_TOP_DMPLL, "dmpll_ck", NULL, 466000000), +}; + +static const struct mtk_fixed_factor top_divs[] = { + FACTOR(CLK_TOP_SYSPLL, "syspll_ck", "mainpll", 1, 1), + FACTOR(CLK_TOP_SYSPLL_D2, "syspll_d2", "mainpll", 1, 2), + FACTOR(CLK_TOP_SYSPLL1_D2, "syspll1_d2", "syspll_d2", 1, 2), + FACTOR(CLK_TOP_SYSPLL1_D4, "syspll1_d4", "syspll_d2", 1, 4), + FACTOR(CLK_TOP_SYSPLL1_D8, "syspll1_d8", "syspll_d2", 1, 8), + FACTOR(CLK_TOP_SYSPLL1_D16, "syspll1_d16", "syspll_d2", 1, 16), + FACTOR(CLK_TOP_SYSPLL_D3, "syspll_d3", "mainpll", 1, 3), + FACTOR(CLK_TOP_SYSPLL2_D2, "syspll2_d2", "syspll_d3", 1, 2), + FACTOR(CLK_TOP_SYSPLL2_D4, "syspll2_d4", "syspll_d3", 1, 4), + FACTOR(CLK_TOP_SYSPLL2_D8, "syspll2_d8", "syspll_d3", 1, 8), + FACTOR(CLK_TOP_SYSPLL_D5, "syspll_d5", "mainpll", 1, 5), + FACTOR(CLK_TOP_SYSPLL3_D2, "syspll3_d2", "syspll_d5", 1, 2), + FACTOR(CLK_TOP_SYSPLL3_D4, "syspll3_d4", "syspll_d5", 1, 4), + FACTOR(CLK_TOP_SYSPLL_D7, "syspll_d7", "mainpll", 1, 7), + FACTOR(CLK_TOP_SYSPLL4_D2, "syspll4_d2", "syspll_d7", 1, 2), + FACTOR(CLK_TOP_SYSPLL4_D4, "syspll4_d4", "syspll_d7", 1, 4), + FACTOR(CLK_TOP_UNIVPLL, "univpll", "univ2pll", 1, 2), + FACTOR(CLK_TOP_USB20_192M, "usb20_192m_ck", "univpll", 2, 13), + FACTOR(CLK_TOP_USB20_192M_D4, "usb20_192m_d4", "usb20_192m_ck", 1, 4), + FACTOR(CLK_TOP_USB20_192M_D8, "usb20_192m_d8", "usb20_192m_ck", 1, 8), + FACTOR(CLK_TOP_USB20_192M_D16, + "usb20_192m_d16", "usb20_192m_ck", 1, 16), + FACTOR(CLK_TOP_USB20_192M_D32, + "usb20_192m_d32", "usb20_192m_ck", 1, 32), + FACTOR(CLK_TOP_UNIVPLL_D2, "univpll_d2", "univpll", 1, 2), + FACTOR(CLK_TOP_UNIVPLL1_D2, "univpll1_d2", "univpll_d2", 1, 2), + FACTOR(CLK_TOP_UNIVPLL1_D4, "univpll1_d4", "univpll_d2", 1, 4), + FACTOR(CLK_TOP_UNIVPLL_D3, "univpll_d3", "univpll", 1, 3), + FACTOR(CLK_TOP_UNIVPLL2_D2, "univpll2_d2", "univpll_d3", 1, 2), + FACTOR(CLK_TOP_UNIVPLL2_D4, "univpll2_d4", "univpll_d3", 1, 4), + FACTOR(CLK_TOP_UNIVPLL2_D8, "univpll2_d8", "univpll_d3", 1, 8), + FACTOR(CLK_TOP_UNIVPLL2_D32, "univpll2_d32", "univpll_d3", 1, 32), + FACTOR(CLK_TOP_UNIVPLL_D5, "univpll_d5", "univpll", 1, 5), + FACTOR(CLK_TOP_UNIVPLL3_D2, "univpll3_d2", "univpll_d5", 1, 2), + FACTOR(CLK_TOP_UNIVPLL3_D4, "univpll3_d4", "univpll_d5", 1, 4), + FACTOR(CLK_TOP_MMPLL, "mmpll_ck", "mmpll", 1, 1), + FACTOR(CLK_TOP_MMPLL_D2, "mmpll_d2", "mmpll_ck", 1, 2), + FACTOR(CLK_TOP_MPLL, "mpll_ck", "mpll", 1, 1), + FACTOR(CLK_TOP_DA_MPLL_104M_DIV, "mpll_104m_div", "mpll_ck", 1, 2), + FACTOR(CLK_TOP_DA_MPLL_52M_DIV, "mpll_52m_div", "mpll_ck", 1, 4), + FACTOR(CLK_TOP_MFGPLL, "mfgpll_ck", "mfgpll", 1, 1), + FACTOR(CLK_TOP_MSDCPLL, "msdcpll_ck", "msdcpll", 1, 1), + FACTOR(CLK_TOP_MSDCPLL_D2, "msdcpll_d2", "msdcpll_ck", 1, 2), + FACTOR(CLK_TOP_APLL1, "apll1_ck", "apll1", 1, 1), + FACTOR(CLK_TOP_APLL1_D2, "apll1_d2", "apll1_ck", 1, 2), + FACTOR(CLK_TOP_APLL1_D4, "apll1_d4", "apll1_ck", 1, 4), + FACTOR(CLK_TOP_APLL1_D8, "apll1_d8", "apll1_ck", 1, 8), + FACTOR(CLK_TOP_ULPOSC1, "ulposc1_ck", "ulposc1", 1, 1), + FACTOR(CLK_TOP_ULPOSC1_D2, "ulposc1_d2", "ulposc1_ck", 1, 2), + FACTOR(CLK_TOP_ULPOSC1_D4, "ulposc1_d4", "ulposc1_ck", 1, 4), + FACTOR(CLK_TOP_ULPOSC1_D8, "ulposc1_d8", "ulposc1_ck", 1, 8), + FACTOR(CLK_TOP_ULPOSC1_D16, "ulposc1_d16", "ulposc1_ck", 1, 16), + FACTOR(CLK_TOP_ULPOSC1_D32, "ulposc1_d32", "ulposc1_ck", 1, 32), + FACTOR(CLK_TOP_F_F26M, "f_f26m_ck", "clk_26m_ck", 1, 1), + FACTOR(CLK_TOP_AXI, "axi_ck", "axi_sel", 1, 1), + FACTOR(CLK_TOP_MM, "mm_ck", "mm_sel", 1, 1), + FACTOR(CLK_TOP_SCP, "scp_ck", "scp_sel", 1, 1), + FACTOR(CLK_TOP_MFG, "mfg_ck", "mfg_sel", 1, 1), + FACTOR(CLK_TOP_F_FUART, "f_fuart_ck", "uart_sel", 1, 1), + FACTOR(CLK_TOP_SPI, "spi_ck", "spi_sel", 1, 1), + FACTOR(CLK_TOP_MSDC50_0, "msdc50_0_ck", "msdc50_0_sel", 1, 1), + FACTOR(CLK_TOP_MSDC30_1, "msdc30_1_ck", "msdc30_1_sel", 1, 1), + FACTOR(CLK_TOP_AUDIO, "audio_ck", "audio_sel", 1, 1), + FACTOR(CLK_TOP_AUD_1, "aud_1_ck", "aud_1_sel", 1, 1), + FACTOR(CLK_TOP_AUD_ENGEN1, "aud_engen1_ck", "aud_engen1_sel", 1, 1), + FACTOR(CLK_TOP_F_FDISP_PWM, "f_fdisp_pwm_ck", "disp_pwm_sel", 1, 1), + FACTOR(CLK_TOP_SSPM, "sspm_ck", "sspm_sel", 1, 1), + FACTOR(CLK_TOP_DXCC, "dxcc_ck", "dxcc_sel", 1, 1), + FACTOR(CLK_TOP_I2C, "i2c_ck", "i2c_sel", 1, 1), + FACTOR(CLK_TOP_F_FPWM, "f_fpwm_ck", "pwm_sel", 1, 1), + FACTOR(CLK_TOP_F_FSENINF, "f_fseninf_ck", "seninf_sel", 1, 1), + FACTOR(CLK_TOP_AES_FDE, "aes_fde_ck", "aes_fde_sel", 1, 1), + FACTOR(CLK_TOP_F_BIST2FPC, "f_bist2fpc_ck", "univpll2_d2", 1, 1), + FACTOR(CLK_TOP_ARMPLL_DIVIDER_PLL0, "arm_div_pll0", "syspll_d2", 1, 1), + FACTOR(CLK_TOP_ARMPLL_DIVIDER_PLL1, "arm_div_pll1", "syspll_ck", 1, 1), + FACTOR(CLK_TOP_ARMPLL_DIVIDER_PLL2, "arm_div_pll2", "univpll_d2", 1, 1), + FACTOR(CLK_TOP_DA_USB20_48M_DIV, + "usb20_48m_div", "usb20_192m_d4", 1, 1), + FACTOR(CLK_TOP_DA_UNIV_48M_DIV, "univ_48m_div", "usb20_192m_d4", 1, 1), +}; + +static const char * const axi_parents[] = { + "clk26m", + "syspll_d7", + "syspll1_d4", + "syspll3_d2" +}; + +static const char * const mem_parents[] = { + "clk26m", + "dmpll_ck", + "apll1_ck" +}; + +static const char * const mm_parents[] = { + "clk26m", + "mmpll_ck", + "syspll1_d2", + "syspll_d5", + "syspll1_d4", + "univpll_d5", + "univpll1_d2", + "mmpll_d2" +}; + +static const char * const scp_parents[] = { + "clk26m", + "syspll4_d2", + "univpll2_d2", + "syspll1_d2", + "univpll1_d2", + "syspll_d3", + "univpll_d3" +}; + +static const char * const mfg_parents[] = { + "clk26m", + "mfgpll_ck", + "syspll_d3", + "univpll_d3" +}; + +static const char * const atb_parents[] = { + "clk26m", + "syspll1_d4", + "syspll1_d2" +}; + +static const char * const camtg_parents[] = { + "clk26m", + "usb20_192m_d8", + "univpll2_d8", + "usb20_192m_d4", + "univpll2_d32", + "usb20_192m_d16", + "usb20_192m_d32" +}; + +static const char * const uart_parents[] = { + "clk26m", + "univpll2_d8" +}; + +static const char * const spi_parents[] = { + "clk26m", + "syspll3_d2", + "syspll4_d2", + "syspll2_d4" +}; + +static const char * const msdc5hclk_parents[] = { + "clk26m", + "syspll1_d2", + "univpll1_d4", + "syspll2_d2" +}; + +static const char * const msdc50_0_parents[] = { + "clk26m", + "msdcpll_ck", + "syspll2_d2", + "syspll4_d2", + "univpll1_d2", + "syspll1_d2", + "univpll_d5", + "univpll1_d4" +}; + +static const char * const msdc30_1_parents[] = { + "clk26m", + "msdcpll_d2", + "univpll2_d2", + "syspll2_d2", + "syspll1_d4", + "univpll1_d4", + "usb20_192m_d4", + "syspll2_d4" +}; + +static const char * const audio_parents[] = { + "clk26m", + "syspll3_d4", + "syspll4_d4", + "syspll1_d16" +}; + +static const char * const aud_intbus_parents[] = { + "clk26m", + "syspll1_d4", + "syspll4_d2" +}; + +static const char * const aud_1_parents[] = { + "clk26m", + "apll1_ck" +}; + +static const char * const aud_engen1_parents[] = { + "clk26m", + "apll1_d2", + "apll1_d4", + "apll1_d8" +}; + +static const char * const disp_pwm_parents[] = { + "clk26m", + "univpll2_d4", + "ulposc1_d2", + "ulposc1_d8" +}; + +static const char * const sspm_parents[] = { + "clk26m", + "syspll1_d2", + "syspll_d3" +}; + +static const char * const dxcc_parents[] = { + "clk26m", + "syspll1_d2", + "syspll1_d4", + "syspll1_d8" +}; + +static const char * const usb_top_parents[] = { + "clk26m", + "univpll3_d4" +}; + +static const char * const spm_parents[] = { + "clk26m", + "syspll1_d8" +}; + +static const char * const i2c_parents[] = { + "clk26m", + "univpll3_d4", + "univpll3_d2", + "syspll1_d8", + "syspll2_d8" +}; + +static const char * const pwm_parents[] = { + "clk26m", + "univpll3_d4", + "syspll1_d8" +}; + +static const char * const seninf_parents[] = { + "clk26m", + "univpll1_d4", + "univpll1_d2", + "univpll2_d2" +}; + +static const char * const aes_fde_parents[] = { + "clk26m", + "msdcpll_ck", + "univpll_d3", + "univpll2_d2", + "univpll1_d2", + "syspll1_d2" +}; + +static const char * const ulposc_parents[] = { + "clk26m", + "ulposc1_d4", + "ulposc1_d8", + "ulposc1_d16", + "ulposc1_d32" +}; + +static const char * const camtm_parents[] = { + "clk26m", + "univpll1_d4", + "univpll1_d2", + "univpll2_d2" +}; + +#define INVALID_UPDATE_REG 0xFFFFFFFF +#define INVALID_UPDATE_SHIFT -1 +#define INVALID_MUX_GATE -1 + +static const struct mtk_mux top_muxes[] = { + /* CLK_CFG_0 */ + MUX_GATE_CLR_SET_UPD_FLAGS(CLK_TOP_AXI_SEL, "axi_sel", axi_parents, + CLK_CFG_0, CLK_CFG_0_SET, CLK_CFG_0_CLR, + 0, 2, 7, CLK_CFG_UPDATE, 0, CLK_IS_CRITICAL), + MUX_GATE_CLR_SET_UPD_FLAGS(CLK_TOP_MEM_SEL, "mem_sel", mem_parents, + CLK_CFG_0, CLK_CFG_0_SET, CLK_CFG_0_CLR, + 8, 2, 15, CLK_CFG_UPDATE, 1, CLK_IS_CRITICAL), + MUX_GATE_CLR_SET_UPD(CLK_TOP_MM_SEL, "mm_sel", mm_parents, CLK_CFG_0, + CLK_CFG_0_SET, CLK_CFG_0_CLR, 16, 3, 23, + CLK_CFG_UPDATE, 2), + MUX_GATE_CLR_SET_UPD(CLK_TOP_SCP_SEL, "scp_sel", scp_parents, CLK_CFG_0, + CLK_CFG_0_SET, CLK_CFG_0_CLR, 24, 3, 31, + CLK_CFG_UPDATE, 3), + /* CLK_CFG_1 */ + MUX_GATE_CLR_SET_UPD(CLK_TOP_MFG_SEL, "mfg_sel", mfg_parents, CLK_CFG_1, + CLK_CFG_1_SET, CLK_CFG_1_CLR, 0, 2, 7, + CLK_CFG_UPDATE, 4), + MUX_GATE_CLR_SET_UPD(CLK_TOP_ATB_SEL, "atb_sel", atb_parents, CLK_CFG_1, + CLK_CFG_1_SET, CLK_CFG_1_CLR, 8, 2, 15, + CLK_CFG_UPDATE, 5), + MUX_GATE_CLR_SET_UPD(CLK_TOP_CAMTG_SEL, "camtg_sel", + camtg_parents, CLK_CFG_1, CLK_CFG_1_SET, + CLK_CFG_1_CLR, 16, 3, 23, CLK_CFG_UPDATE, 6), + MUX_GATE_CLR_SET_UPD(CLK_TOP_CAMTG1_SEL, "camtg1_sel", camtg_parents, + CLK_CFG_1, CLK_CFG_1_SET, CLK_CFG_1_CLR, + 24, 3, 31, CLK_CFG_UPDATE, 7), + /* CLK_CFG_2 */ + MUX_GATE_CLR_SET_UPD(CLK_TOP_CAMTG2_SEL, "camtg2_sel", + camtg_parents, CLK_CFG_2, CLK_CFG_2_SET, + CLK_CFG_2_CLR, 0, 3, 7, CLK_CFG_UPDATE, 8), + MUX_GATE_CLR_SET_UPD(CLK_TOP_CAMTG3_SEL, "camtg3_sel", camtg_parents, + CLK_CFG_2, CLK_CFG_2_SET, CLK_CFG_2_CLR, + 8, 3, 15, CLK_CFG_UPDATE, 9), + MUX_GATE_CLR_SET_UPD(CLK_TOP_UART_SEL, "uart_sel", uart_parents, + CLK_CFG_2, CLK_CFG_2_SET, CLK_CFG_2_CLR, 16, 1, 23, + CLK_CFG_UPDATE, 10), + MUX_GATE_CLR_SET_UPD(CLK_TOP_SPI_SEL, "spi_sel", spi_parents, CLK_CFG_2, + CLK_CFG_2_SET, CLK_CFG_2_CLR, 24, 2, 31, + CLK_CFG_UPDATE, 11), + /* CLK_CFG_3 */ + MUX_GATE_CLR_SET_UPD(CLK_TOP_MSDC50_0_HCLK_SEL, "msdc5hclk", + msdc5hclk_parents, CLK_CFG_3, CLK_CFG_3_SET, + CLK_CFG_3_CLR, 0, 2, 7, CLK_CFG_UPDATE, 12), + MUX_GATE_CLR_SET_UPD(CLK_TOP_MSDC50_0_SEL, "msdc50_0_sel", + msdc50_0_parents, CLK_CFG_3, CLK_CFG_3_SET, + CLK_CFG_3_CLR, 8, 3, 15, CLK_CFG_UPDATE, 13), + MUX_GATE_CLR_SET_UPD(CLK_TOP_MSDC30_1_SEL, "msdc30_1_sel", + msdc30_1_parents, CLK_CFG_3, CLK_CFG_3_SET, + CLK_CFG_3_CLR, 16, 3, 23, CLK_CFG_UPDATE, 14), + MUX_GATE_CLR_SET_UPD(CLK_TOP_AUDIO_SEL, "audio_sel", audio_parents, + CLK_CFG_3, CLK_CFG_3_SET, CLK_CFG_3_CLR, + 24, 2, 31, CLK_CFG_UPDATE, 15), + /* CLK_CFG_4 */ + MUX_GATE_CLR_SET_UPD(CLK_TOP_AUD_INTBUS_SEL, "aud_intbus_sel", + aud_intbus_parents, CLK_CFG_4, CLK_CFG_4_SET, + CLK_CFG_4_CLR, 0, 2, 7, CLK_CFG_UPDATE, 16), + MUX_GATE_CLR_SET_UPD(CLK_TOP_AUD_1_SEL, "aud_1_sel", aud_1_parents, + CLK_CFG_4, CLK_CFG_4_SET, CLK_CFG_4_CLR, + 8, 1, 15, CLK_CFG_UPDATE, 17), + MUX_GATE_CLR_SET_UPD(CLK_TOP_AUD_ENGEN1_SEL, "aud_engen1_sel", + aud_engen1_parents, CLK_CFG_4, CLK_CFG_4_SET, + CLK_CFG_4_CLR, 16, 2, 23, CLK_CFG_UPDATE, 18), + MUX_GATE_CLR_SET_UPD(CLK_TOP_DISP_PWM_SEL, "disp_pwm_sel", + disp_pwm_parents, CLK_CFG_4, CLK_CFG_4_SET, + CLK_CFG_4_CLR, 24, 2, 31, CLK_CFG_UPDATE, 19), + /* CLK_CFG_5 */ + MUX_GATE_CLR_SET_UPD(CLK_TOP_SSPM_SEL, "sspm_sel", sspm_parents, + CLK_CFG_5, CLK_CFG_5_SET, CLK_CFG_5_CLR, 0, 2, 7, + CLK_CFG_UPDATE, 20), + MUX_GATE_CLR_SET_UPD(CLK_TOP_DXCC_SEL, "dxcc_sel", dxcc_parents, + CLK_CFG_5, CLK_CFG_5_SET, CLK_CFG_5_CLR, 8, 2, 15, + CLK_CFG_UPDATE, 21), + MUX_GATE_CLR_SET_UPD(CLK_TOP_USB_TOP_SEL, "usb_top_sel", + usb_top_parents, CLK_CFG_5, CLK_CFG_5_SET, + CLK_CFG_5_CLR, 16, 1, 23, CLK_CFG_UPDATE, 22), + MUX_GATE_CLR_SET_UPD(CLK_TOP_SPM_SEL, "spm_sel", spm_parents, CLK_CFG_5, + CLK_CFG_5_SET, CLK_CFG_5_CLR, 24, 1, 31, + CLK_CFG_UPDATE, 23), + /* CLK_CFG_6 */ + MUX_GATE_CLR_SET_UPD(CLK_TOP_I2C_SEL, "i2c_sel", i2c_parents, CLK_CFG_6, + CLK_CFG_6_SET, CLK_CFG_6_CLR, 0, 3, 7, CLK_CFG_UPDATE, + 24), + MUX_GATE_CLR_SET_UPD(CLK_TOP_PWM_SEL, "pwm_sel", pwm_parents, CLK_CFG_6, + CLK_CFG_6_SET, CLK_CFG_6_CLR, 8, 2, 15, CLK_CFG_UPDATE, + 25), + MUX_GATE_CLR_SET_UPD(CLK_TOP_SENINF_SEL, "seninf_sel", seninf_parents, + CLK_CFG_6, CLK_CFG_6_SET, CLK_CFG_6_CLR, 16, 2, 23, + CLK_CFG_UPDATE, 26), + MUX_GATE_CLR_SET_UPD(CLK_TOP_AES_FDE_SEL, "aes_fde_sel", + aes_fde_parents, CLK_CFG_6, CLK_CFG_6_SET, + CLK_CFG_6_CLR, 24, 3, 31, CLK_CFG_UPDATE, 27), + /* CLK_CFG_7 */ + MUX_GATE_CLR_SET_UPD_FLAGS(CLK_TOP_PWRAP_ULPOSC_SEL, "ulposc_sel", + ulposc_parents, CLK_CFG_7, CLK_CFG_7_SET, + CLK_CFG_7_CLR, 0, 3, 7, CLK_CFG_UPDATE, 28, + CLK_IS_CRITICAL), + MUX_GATE_CLR_SET_UPD(CLK_TOP_CAMTM_SEL, "camtm_sel", camtm_parents, + CLK_CFG_7, CLK_CFG_7_SET, CLK_CFG_7_CLR, 8, 2, 15, + CLK_CFG_UPDATE, 29), +}; + +static const struct mtk_gate_regs top0_cg_regs = { + .set_ofs = 0x0, + .clr_ofs = 0x0, + .sta_ofs = 0x0, +}; + +static const struct mtk_gate_regs top1_cg_regs = { + .set_ofs = 0x104, + .clr_ofs = 0x104, + .sta_ofs = 0x104, +}; + +static const struct mtk_gate_regs top2_cg_regs = { + .set_ofs = 0x320, + .clr_ofs = 0x320, + .sta_ofs = 0x320, +}; + +#define GATE_TOP0(_id, _name, _parent, _shift) { \ + .id = _id, \ + .name = _name, \ + .parent_name = _parent, \ + .regs = &top0_cg_regs, \ + .shift = _shift, \ + .ops = &mtk_clk_gate_ops_no_setclr, \ + } + +#define GATE_TOP1(_id, _name, _parent, _shift) { \ + .id = _id, \ + .name = _name, \ + .parent_name = _parent, \ + .regs = &top1_cg_regs, \ + .shift = _shift, \ + .ops = &mtk_clk_gate_ops_no_setclr_inv, \ + } + +#define GATE_TOP2(_id, _name, _parent, _shift) { \ + .id = _id, \ + .name = _name, \ + .parent_name = _parent, \ + .regs = &top2_cg_regs, \ + .shift = _shift, \ + .ops = &mtk_clk_gate_ops_no_setclr, \ + } + +static const struct mtk_gate top_clks[] = { + /* TOP0 */ + GATE_TOP0(CLK_TOP_MD_32K, "md_32k", "f_frtc_ck", 8), + GATE_TOP0(CLK_TOP_MD_26M, "md_26m", "f_f26m_ck", 9), + GATE_TOP0(CLK_TOP_MD2_32K, "md2_32k", "f_frtc_ck", 10), + GATE_TOP0(CLK_TOP_MD2_26M, "md2_26m", "f_f26m_ck", 11), + /* TOP1 */ + GATE_TOP1(CLK_TOP_ARMPLL_DIVIDER_PLL0_EN, + "arm_div_pll0_en", "arm_div_pll0", 3), + GATE_TOP1(CLK_TOP_ARMPLL_DIVIDER_PLL1_EN, + "arm_div_pll1_en", "arm_div_pll1", 4), + GATE_TOP1(CLK_TOP_ARMPLL_DIVIDER_PLL2_EN, + "arm_div_pll2_en", "arm_div_pll2", 5), + GATE_TOP1(CLK_TOP_FMEM_OCC_DRC_EN, "drc_en", "univpll2_d2", 6), + GATE_TOP1(CLK_TOP_USB20_48M_EN, "usb20_48m_en", "usb20_48m_div", 8), + GATE_TOP1(CLK_TOP_UNIVPLL_48M_EN, "univpll_48m_en", "univ_48m_div", 9), + GATE_TOP1(CLK_TOP_F_UFS_MP_SAP_CFG_EN, "ufs_sap", "f_f26m_ck", 12), + GATE_TOP1(CLK_TOP_F_BIST2FPC_EN, "bist2fpc", "f_bist2fpc_ck", 16), + /* TOP2 */ + GATE_TOP2(CLK_TOP_APLL12_DIV0, "apll12_div0", "aud_1_ck", 2), + GATE_TOP2(CLK_TOP_APLL12_DIV1, "apll12_div1", "aud_1_ck", 3), + GATE_TOP2(CLK_TOP_APLL12_DIV2, "apll12_div2", "aud_1_ck", 4), + GATE_TOP2(CLK_TOP_APLL12_DIV3, "apll12_div3", "aud_1_ck", 5), +}; + +static const struct mtk_gate_regs ifr0_cg_regs = { + .set_ofs = 0x200, + .clr_ofs = 0x200, + .sta_ofs = 0x200, +}; + +static const struct mtk_gate_regs ifr1_cg_regs = { + .set_ofs = 0x74, + .clr_ofs = 0x74, + .sta_ofs = 0x74, +}; + +static const struct mtk_gate_regs ifr2_cg_regs = { + .set_ofs = 0x80, + .clr_ofs = 0x84, + .sta_ofs = 0x90, +}; + +static const struct mtk_gate_regs ifr3_cg_regs = { + .set_ofs = 0x88, + .clr_ofs = 0x8c, + .sta_ofs = 0x94, +}; + +static const struct mtk_gate_regs ifr4_cg_regs = { + .set_ofs = 0xa4, + .clr_ofs = 0xa8, + .sta_ofs = 0xac, +}; + +static const struct mtk_gate_regs ifr5_cg_regs = { + .set_ofs = 0xc0, + .clr_ofs = 0xc4, + .sta_ofs = 0xc8, +}; + +#define GATE_IFR0(_id, _name, _parent, _shift) { \ + .id = _id, \ + .name = _name, \ + .parent_name = _parent, \ + .regs = &ifr0_cg_regs, \ + .shift = _shift, \ + .ops = &mtk_clk_gate_ops_no_setclr_inv, \ + } + +#define GATE_IFR1(_id, _name, _parent, _shift) { \ + .id = _id, \ + .name = _name, \ + .parent_name = _parent, \ + .regs = &ifr1_cg_regs, \ + .shift = _shift, \ + .ops = &mtk_clk_gate_ops_no_setclr, \ + } + +#define GATE_IFR2(_id, _name, _parent, _shift) { \ + .id = _id, \ + .name = _name, \ + .parent_name = _parent, \ + .regs = &ifr2_cg_regs, \ + .shift = _shift, \ + .ops = &mtk_clk_gate_ops_setclr, \ + } + +#define GATE_IFR3(_id, _name, _parent, _shift) { \ + .id = _id, \ + .name = _name, \ + .parent_name = _parent, \ + .regs = &ifr3_cg_regs, \ + .shift = _shift, \ + .ops = &mtk_clk_gate_ops_setclr, \ + } + +#define GATE_IFR4(_id, _name, _parent, _shift) { \ + .id = _id, \ + .name = _name, \ + .parent_name = _parent, \ + .regs = &ifr4_cg_regs, \ + .shift = _shift, \ + .ops = &mtk_clk_gate_ops_setclr, \ + } + +#define GATE_IFR5(_id, _name, _parent, _shift) { \ + .id = _id, \ + .name = _name, \ + .parent_name = _parent, \ + .regs = &ifr5_cg_regs, \ + .shift = _shift, \ + .ops = &mtk_clk_gate_ops_setclr, \ + } + +static const struct mtk_gate ifr_clks[] = { + /* INFRA_TOPAXI */ + /* INFRA PERI */ + /* INFRA mode 0 */ + GATE_IFR2(CLK_IFR_ICUSB, "ifr_icusb", "axi_ck", 8), + GATE_IFR2(CLK_IFR_GCE, "ifr_gce", "axi_ck", 9), + GATE_IFR2(CLK_IFR_THERM, "ifr_therm", "axi_ck", 10), + GATE_IFR2(CLK_IFR_I2C_AP, "ifr_i2c_ap", "i2c_ck", 11), + GATE_IFR2(CLK_IFR_I2C_CCU, "ifr_i2c_ccu", "i2c_ck", 12), + GATE_IFR2(CLK_IFR_I2C_SSPM, "ifr_i2c_sspm", "i2c_ck", 13), + GATE_IFR2(CLK_IFR_I2C_RSV, "ifr_i2c_rsv", "i2c_ck", 14), + GATE_IFR2(CLK_IFR_PWM_HCLK, "ifr_pwm_hclk", "axi_ck", 15), + GATE_IFR2(CLK_IFR_PWM1, "ifr_pwm1", "f_fpwm_ck", 16), + GATE_IFR2(CLK_IFR_PWM2, "ifr_pwm2", "f_fpwm_ck", 17), + GATE_IFR2(CLK_IFR_PWM3, "ifr_pwm3", "f_fpwm_ck", 18), + GATE_IFR2(CLK_IFR_PWM4, "ifr_pwm4", "f_fpwm_ck", 19), + GATE_IFR2(CLK_IFR_PWM5, "ifr_pwm5", "f_fpwm_ck", 20), + GATE_IFR2(CLK_IFR_PWM, "ifr_pwm", "f_fpwm_ck", 21), + GATE_IFR2(CLK_IFR_UART0, "ifr_uart0", "f_fuart_ck", 22), + GATE_IFR2(CLK_IFR_UART1, "ifr_uart1", "f_fuart_ck", 23), + GATE_IFR2(CLK_IFR_GCE_26M, "ifr_gce_26m", "f_f26m_ck", 27), + GATE_IFR2(CLK_IFR_CQ_DMA_FPC, "ifr_dma", "axi_ck", 28), + GATE_IFR2(CLK_IFR_BTIF, "ifr_btif", "axi_ck", 31), + /* INFRA mode 1 */ + GATE_IFR3(CLK_IFR_SPI0, "ifr_spi0", "spi_ck", 1), + GATE_IFR3(CLK_IFR_MSDC0, "ifr_msdc0", "msdc5hclk", 2), + GATE_IFR3(CLK_IFR_MSDC1, "ifr_msdc1", "axi_ck", 4), + GATE_IFR3(CLK_IFR_TRNG, "ifr_trng", "axi_ck", 9), + GATE_IFR3(CLK_IFR_AUXADC, "ifr_auxadc", "f_f26m_ck", 10), + GATE_IFR3(CLK_IFR_CCIF1_AP, "ifr_ccif1_ap", "axi_ck", 12), + GATE_IFR3(CLK_IFR_CCIF1_MD, "ifr_ccif1_md", "axi_ck", 13), + GATE_IFR3(CLK_IFR_AUXADC_MD, "ifr_auxadc_md", "f_f26m_ck", 14), + GATE_IFR3(CLK_IFR_AP_DMA, "ifr_ap_dma", "axi_ck", 18), + GATE_IFR3(CLK_IFR_DEVICE_APC, "ifr_dapc", "axi_ck", 20), + GATE_IFR3(CLK_IFR_CCIF_AP, "ifr_ccif_ap", "axi_ck", 23), + GATE_IFR3(CLK_IFR_AUDIO, "ifr_audio", "axi_ck", 25), + GATE_IFR3(CLK_IFR_CCIF_MD, "ifr_ccif_md", "axi_ck", 26), + /* INFRA mode 2 */ + GATE_IFR4(CLK_IFR_RG_PWM_FBCLK6, "ifr_pwmfb", "f_f26m_ck", 0), + GATE_IFR4(CLK_IFR_DISP_PWM, "ifr_disp_pwm", "f_fdisp_pwm_ck", 2), + GATE_IFR4(CLK_IFR_CLDMA_BCLK, "ifr_cldmabclk", "axi_ck", 3), + GATE_IFR4(CLK_IFR_AUDIO_26M_BCLK, "ifr_audio26m", "f_f26m_ck", 4), + GATE_IFR4(CLK_IFR_SPI1, "ifr_spi1", "spi_ck", 6), + GATE_IFR4(CLK_IFR_I2C4, "ifr_i2c4", "i2c_ck", 7), + GATE_IFR4(CLK_IFR_SPI2, "ifr_spi2", "spi_ck", 9), + GATE_IFR4(CLK_IFR_SPI3, "ifr_spi3", "spi_ck", 10), + GATE_IFR4(CLK_IFR_I2C5, "ifr_i2c5", "i2c_ck", 18), + GATE_IFR4(CLK_IFR_I2C5_ARBITER, "ifr_i2c5a", "i2c_ck", 19), + GATE_IFR4(CLK_IFR_I2C5_IMM, "ifr_i2c5_imm", "i2c_ck", 20), + GATE_IFR4(CLK_IFR_I2C1_ARBITER, "ifr_i2c1a", "i2c_ck", 21), + GATE_IFR4(CLK_IFR_I2C1_IMM, "ifr_i2c1_imm", "i2c_ck", 22), + GATE_IFR4(CLK_IFR_I2C2_ARBITER, "ifr_i2c2a", "i2c_ck", 23), + GATE_IFR4(CLK_IFR_I2C2_IMM, "ifr_i2c2_imm", "i2c_ck", 24), + GATE_IFR4(CLK_IFR_SPI4, "ifr_spi4", "spi_ck", 25), + GATE_IFR4(CLK_IFR_SPI5, "ifr_spi5", "spi_ck", 26), + GATE_IFR4(CLK_IFR_CQ_DMA, "ifr_cq_dma", "axi_ck", 27), + GATE_IFR4(CLK_IFR_FAES_FDE, "ifr_faes_fde_ck", "aes_fde_ck", 29), + /* INFRA mode 3 */ + GATE_IFR5(CLK_IFR_MSDC0_SELF, "ifr_msdc0sf", "msdc50_0_ck", 0), + GATE_IFR5(CLK_IFR_MSDC1_SELF, "ifr_msdc1sf", "msdc50_0_ck", 1), + GATE_IFR5(CLK_IFR_I2C6, "ifr_i2c6", "i2c_ck", 6), + GATE_IFR5(CLK_IFR_AP_MSDC0, "ifr_ap_msdc0", "msdc50_0_ck", 7), + GATE_IFR5(CLK_IFR_MD_MSDC0, "ifr_md_msdc0", "msdc50_0_ck", 8), + GATE_IFR5(CLK_IFR_MSDC0_SRC, "ifr_msdc0_clk", "msdc50_0_ck", 9), + GATE_IFR5(CLK_IFR_MSDC1_SRC, "ifr_msdc1_clk", "msdc30_1_ck", 10), + GATE_IFR5(CLK_IFR_MCU_PM_BCLK, "ifr_mcu_pm_bclk", "axi_ck", 17), + GATE_IFR5(CLK_IFR_CCIF2_AP, "ifr_ccif2_ap", "axi_ck", 18), + GATE_IFR5(CLK_IFR_CCIF2_MD, "ifr_ccif2_md", "axi_ck", 19), + GATE_IFR5(CLK_IFR_CCIF3_AP, "ifr_ccif3_ap", "axi_ck", 20), + GATE_IFR5(CLK_IFR_CCIF3_MD, "ifr_ccif3_md", "axi_ck", 21), +}; + +/* additional CCF control for mipi26M race condition(disp/camera) */ +static const struct mtk_gate_regs apmixed_cg_regs = { + .set_ofs = 0x14, + .clr_ofs = 0x14, + .sta_ofs = 0x14, +}; + +#define GATE_APMIXED(_id, _name, _parent, _shift) { \ + .id = _id, \ + .name = _name, \ + .parent_name = _parent, \ + .regs = &apmixed_cg_regs, \ + .shift = _shift, \ + .ops = &mtk_clk_gate_ops_no_setclr_inv, \ + } + +static const struct mtk_gate apmixed_clks[] = { + /* AUDIO0 */ + GATE_APMIXED(CLK_APMIXED_SSUSB26M, "apmixed_ssusb26m", "f_f26m_ck", + 4), + GATE_APMIXED(CLK_APMIXED_APPLL26M, "apmixed_appll26m", "f_f26m_ck", + 5), + GATE_APMIXED(CLK_APMIXED_MIPIC0_26M, "apmixed_mipic026m", "f_f26m_ck", + 6), + GATE_APMIXED(CLK_APMIXED_MDPLLGP26M, "apmixed_mdpll26m", "f_f26m_ck", + 7), + GATE_APMIXED(CLK_APMIXED_MMSYS_F26M, "apmixed_mmsys26m", "f_f26m_ck", + 8), + GATE_APMIXED(CLK_APMIXED_UFS26M, "apmixed_ufs26m", "f_f26m_ck", + 9), + GATE_APMIXED(CLK_APMIXED_MIPIC1_26M, "apmixed_mipic126m", "f_f26m_ck", + 11), + GATE_APMIXED(CLK_APMIXED_MEMPLL26M, "apmixed_mempll26m", "f_f26m_ck", + 13), + GATE_APMIXED(CLK_APMIXED_CLKSQ_LVPLL_26M, "apmixed_lvpll26m", + "f_f26m_ck", 14), + GATE_APMIXED(CLK_APMIXED_MIPID0_26M, "apmixed_mipid026m", "f_f26m_ck", + 16), +}; + +#define MT6765_PLL_FMAX (3800UL * MHZ) +#define MT6765_PLL_FMIN (1500UL * MHZ) + +#define CON0_MT6765_RST_BAR BIT(23) + +#define PLL_INFO_NULL (0xFF) + +#define PLL_B(_id, _name, _reg, _pwr_reg, _en_mask, _flags, _pcwbits, \ + _pcwibits, _pd_reg, _pd_shift, _tuner_reg, _tuner_en_reg,\ + _tuner_en_bit, _pcw_reg, _pcw_shift, _div_table) {\ + .id = _id, \ + .name = _name, \ + .reg = _reg, \ + .pwr_reg = _pwr_reg, \ + .en_mask = _en_mask, \ + .flags = _flags, \ + .rst_bar_mask = CON0_MT6765_RST_BAR, \ + .fmax = MT6765_PLL_FMAX, \ + .fmin = MT6765_PLL_FMIN, \ + .pcwbits = _pcwbits, \ + .pcwibits = _pcwibits, \ + .pd_reg = _pd_reg, \ + .pd_shift = _pd_shift, \ + .tuner_reg = _tuner_reg, \ + .tuner_en_reg = _tuner_en_reg, \ + .tuner_en_bit = _tuner_en_bit, \ + .pcw_reg = _pcw_reg, \ + .pcw_shift = _pcw_shift, \ + .div_table = _div_table, \ + } + +#define PLL(_id, _name, _reg, _pwr_reg, _en_mask, _flags, _pcwbits, \ + _pcwibits, _pd_reg, _pd_shift, _tuner_reg, \ + _tuner_en_reg, _tuner_en_bit, _pcw_reg, \ + _pcw_shift) \ + PLL_B(_id, _name, _reg, _pwr_reg, _en_mask, _flags, \ + _pcwbits, _pcwibits, _pd_reg, _pd_shift, \ + _tuner_reg, _tuner_en_reg, _tuner_en_bit, \ + _pcw_reg, _pcw_shift, NULL) \ + +static const struct mtk_pll_data plls[] = { + PLL(CLK_APMIXED_ARMPLL_L, "armpll_l", 0x021C, 0x0228, BIT(0), + PLL_AO, 22, 8, 0x0220, 24, 0, 0, 0, 0x0220, 0), + PLL(CLK_APMIXED_ARMPLL, "armpll", 0x020C, 0x0218, BIT(0), + PLL_AO, 22, 8, 0x0210, 24, 0, 0, 0, 0x0210, 0), + PLL(CLK_APMIXED_CCIPLL, "ccipll", 0x022C, 0x0238, BIT(0), + PLL_AO, 22, 8, 0x0230, 24, 0, 0, 0, 0x0230, 0), + PLL(CLK_APMIXED_MAINPLL, "mainpll", 0x023C, 0x0248, BIT(0), + (HAVE_RST_BAR | PLL_AO), 22, 8, 0x0240, 24, 0, 0, 0, 0x0240, + 0), + PLL(CLK_APMIXED_MFGPLL, "mfgpll", 0x024C, 0x0258, BIT(0), + 0, 22, 8, 0x0250, 24, 0, 0, 0, 0x0250, 0), + PLL(CLK_APMIXED_MMPLL, "mmpll", 0x025C, 0x0268, BIT(0), + 0, 22, 8, 0x0260, 24, 0, 0, 0, 0x0260, 0), + PLL(CLK_APMIXED_UNIV2PLL, "univ2pll", 0x026C, 0x0278, BIT(0), + HAVE_RST_BAR, 22, 8, 0x0270, 24, 0, 0, 0, 0x0270, 0), + PLL(CLK_APMIXED_MSDCPLL, "msdcpll", 0x027C, 0x0288, BIT(0), + 0, 22, 8, 0x0280, 24, 0, 0, 0, 0x0280, 0), + PLL(CLK_APMIXED_APLL1, "apll1", 0x028C, 0x029C, BIT(0), + 0, 32, 8, 0x0290, 24, 0x0040, 0x000C, 0, 0x0294, 0), + PLL(CLK_APMIXED_MPLL, "mpll", 0x02A0, 0x02AC, BIT(0), + PLL_AO, 22, 8, 0x02A4, 24, 0, 0, 0, 0x02A4, 0), +}; + +static int clk_mt6765_apmixed_probe(struct platform_device *pdev) +{ + struct clk_onecell_data *clk_data; + int r; + struct device_node *node = pdev->dev.of_node; + void __iomem *base; + struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + + base = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(base)) { + pr_err("%s(): ioremap failed\n", __func__); + return PTR_ERR(base); + } + + clk_data = mtk_alloc_clk_data(CLK_APMIXED_NR_CLK); + + mtk_clk_register_plls(node, plls, ARRAY_SIZE(plls), clk_data); + + mtk_clk_register_gates(node, apmixed_clks, + ARRAY_SIZE(apmixed_clks), clk_data); + r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data); + + if (r) + pr_err("%s(): could not register clock provider: %d\n", + __func__, r); + + apmixed_base = base; + /* MPLL, CCIPLL, MAINPLL set HW mode, TDCLKSQ, CLKSQ1 */ + writel(readl(AP_PLL_CON3) & 0xFFFFFFE1, AP_PLL_CON3); + writel(readl(PLLON_CON0) & 0x01041041, PLLON_CON0); + writel(readl(PLLON_CON1) & 0x01041041, PLLON_CON1); + + return r; +} + +static int clk_mt6765_top_probe(struct platform_device *pdev) +{ + int r; + struct device_node *node = pdev->dev.of_node; + void __iomem *base; + struct clk_onecell_data *clk_data; + struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + + base = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(base)) { + pr_err("%s(): ioremap failed\n", __func__); + return PTR_ERR(base); + } + + clk_data = mtk_alloc_clk_data(CLK_TOP_NR_CLK); + + mtk_clk_register_fixed_clks(fixed_clks, ARRAY_SIZE(fixed_clks), + clk_data); + mtk_clk_register_factors(top_divs, ARRAY_SIZE(top_divs), + clk_data); + mtk_clk_register_muxes(top_muxes, ARRAY_SIZE(top_muxes), node, + &mt6765_clk_lock, clk_data); + mtk_clk_register_gates(node, top_clks, ARRAY_SIZE(top_clks), + clk_data); + + r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data); + + if (r) + pr_err("%s(): could not register clock provider: %d\n", + __func__, r); + + cksys_base = base; + /* [4]:no need */ + writel(readl(CLK_SCP_CFG_0) | 0x3EF, CLK_SCP_CFG_0); + /*[1,2,3,8]: no need*/ + writel(readl(CLK_SCP_CFG_1) | 0x1, CLK_SCP_CFG_1); + + return r; +} + +static int clk_mt6765_ifr_probe(struct platform_device *pdev) +{ + struct clk_onecell_data *clk_data; + int r; + struct device_node *node = pdev->dev.of_node; + void __iomem *base; + struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + + base = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(base)) { + pr_err("%s(): ioremap failed\n", __func__); + return PTR_ERR(base); + } + + clk_data = mtk_alloc_clk_data(CLK_IFR_NR_CLK); + + mtk_clk_register_gates(node, ifr_clks, ARRAY_SIZE(ifr_clks), + clk_data); + r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data); + + if (r) + pr_err("%s(): could not register clock provider: %d\n", + __func__, r); + + return r; +} + +static const struct of_device_id of_match_clk_mt6765[] = { + { + .compatible = "mediatek,mt6765-apmixedsys", + .data = clk_mt6765_apmixed_probe, + }, { + .compatible = "mediatek,mt6765-topckgen", + .data = clk_mt6765_top_probe, + }, { + .compatible = "mediatek,mt6765-infracfg", + .data = clk_mt6765_ifr_probe, + }, { + /* sentinel */ + } +}; + +static int clk_mt6765_probe(struct platform_device *pdev) +{ + int (*clk_probe)(struct platform_device *d); + int r; + + clk_probe = of_device_get_match_data(&pdev->dev); + if (!clk_probe) + return -EINVAL; + + r = clk_probe(pdev); + if (r) + dev_err(&pdev->dev, + "could not register clock provider: %s: %d\n", + pdev->name, r); + + return r; +} + +static struct platform_driver clk_mt6765_drv = { + .probe = clk_mt6765_probe, + .driver = { + .name = "clk-mt6765", + .owner = THIS_MODULE, + .of_match_table = of_match_clk_mt6765, + }, +}; + +static int __init clk_mt6765_init(void) +{ + return platform_driver_register(&clk_mt6765_drv); +} + +arch_initcall(clk_mt6765_init); -- GitLab From 571cfadcc628dd5591444f7289e27445ea732f4c Mon Sep 17 00:00:00 2001 From: Weiyi Lu Date: Wed, 27 May 2020 14:25:49 +0800 Subject: [PATCH 0823/1971] clk: mediatek: assign the initial value to clk_init_data of mtk_mux When some new clock supports are introduced, e.g. [1] it might lead to an error although it should be NULL because clk_init_data is on the stack and it might have random values if using without initialization. Add the missing initial value to clk_init_data. [1] https://android-review.googlesource.com/c/kernel/common/+/1278046 Fixes: a3ae549917f1 ("clk: mediatek: Add new clkmux register API") Signed-off-by: Weiyi Lu Reviewed-by: Matthias Brugger Cc: Link: https://lore.kernel.org/r/1590560749-29136-1-git-send-email-weiyi.lu@mediatek.com Signed-off-by: Stephen Boyd --- drivers/clk/mediatek/clk-mux.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/clk/mediatek/clk-mux.c b/drivers/clk/mediatek/clk-mux.c index 76f9cd0391951..14e127e9a7404 100644 --- a/drivers/clk/mediatek/clk-mux.c +++ b/drivers/clk/mediatek/clk-mux.c @@ -160,7 +160,7 @@ struct clk *mtk_clk_register_mux(const struct mtk_mux *mux, spinlock_t *lock) { struct mtk_clk_mux *clk_mux; - struct clk_init_data init; + struct clk_init_data init = {}; struct clk *clk; clk_mux = kzalloc(sizeof(*clk_mux), GFP_KERNEL); -- GitLab From 6d3f922c46f2e91f63c92f8dd28381f097082912 Mon Sep 17 00:00:00 2001 From: Georgi Djakov Date: Tue, 12 May 2020 15:53:21 +0300 Subject: [PATCH 0824/1971] opp: Add support for parsing interconnect bandwidth The OPP bindings now support bandwidth values, so add support to parse it from device tree and store it into the new dev_pm_opp_icc_bw struct, which is part of the dev_pm_opp. Signed-off-by: Georgi Djakov Reviewed-by: Matthias Kaehlcke [ Viresh: Create _read_bw() and use it, renamed _of_find_icc_paths() to dev_pm_opp_of_find_icc_paths(), exported it and made opp_table argument optional. Also drop the depends on from Kconfig. ] Signed-off-by: Viresh Kumar --- drivers/opp/core.c | 28 ++++++++-- drivers/opp/of.c | 114 ++++++++++++++++++++++++++++++++++++++++- drivers/opp/opp.h | 7 +++ include/linux/pm_opp.h | 18 +++++++ 4 files changed, 162 insertions(+), 5 deletions(-) diff --git a/drivers/opp/core.c b/drivers/opp/core.c index ce7e4103ec092..d19cc7970643e 100644 --- a/drivers/opp/core.c +++ b/drivers/opp/core.c @@ -999,6 +999,12 @@ static struct opp_table *_allocate_opp_table(struct device *dev, int index) ret); } + /* Find interconnect path(s) for the device */ + ret = dev_pm_opp_of_find_icc_paths(dev, opp_table); + if (ret) + dev_warn(dev, "%s: Error finding interconnect paths: %d\n", + __func__, ret); + BLOCKING_INIT_NOTIFIER_HEAD(&opp_table->head); INIT_LIST_HEAD(&opp_table->opp_list); kref_init(&opp_table->kref); @@ -1057,6 +1063,7 @@ static void _opp_table_kref_release(struct kref *kref) { struct opp_table *opp_table = container_of(kref, struct opp_table, kref); struct opp_device *opp_dev, *temp; + int i; _of_clear_opp_table(opp_table); @@ -1064,6 +1071,12 @@ static void _opp_table_kref_release(struct kref *kref) if (!IS_ERR(opp_table->clk)) clk_put(opp_table->clk); + if (opp_table->paths) { + for (i = 0; i < opp_table->path_count; i++) + icc_put(opp_table->paths[i]); + kfree(opp_table->paths); + } + WARN_ON(!list_empty(&opp_table->opp_list)); list_for_each_entry_safe(opp_dev, temp, &opp_table->dev_list, node) { @@ -1243,19 +1256,23 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_remove_all_dynamic); struct dev_pm_opp *_opp_allocate(struct opp_table *table) { struct dev_pm_opp *opp; - int count, supply_size; + int supply_count, supply_size, icc_size; /* Allocate space for at least one supply */ - count = table->regulator_count > 0 ? table->regulator_count : 1; - supply_size = sizeof(*opp->supplies) * count; + supply_count = table->regulator_count > 0 ? table->regulator_count : 1; + supply_size = sizeof(*opp->supplies) * supply_count; + icc_size = sizeof(*opp->bandwidth) * table->path_count; /* allocate new OPP node and supplies structures */ - opp = kzalloc(sizeof(*opp) + supply_size, GFP_KERNEL); + opp = kzalloc(sizeof(*opp) + supply_size + icc_size, GFP_KERNEL); + if (!opp) return NULL; /* Put the supplies at the end of the OPP structure as an empty array */ opp->supplies = (struct dev_pm_opp_supply *)(opp + 1); + if (icc_size) + opp->bandwidth = (struct dev_pm_opp_icc_bw *)(opp->supplies + supply_count); INIT_LIST_HEAD(&opp->node); return opp; @@ -1290,6 +1307,9 @@ int _opp_compare_key(struct dev_pm_opp *opp1, struct dev_pm_opp *opp2) { if (opp1->rate != opp2->rate) return opp1->rate < opp2->rate ? -1 : 1; + if (opp1->bandwidth && opp2->bandwidth && + opp1->bandwidth[0].peak != opp2->bandwidth[0].peak) + return opp1->bandwidth[0].peak < opp2->bandwidth[0].peak ? -1 : 1; if (opp1->level != opp2->level) return opp1->level < opp2->level ? -1 : 1; return 0; diff --git a/drivers/opp/of.c b/drivers/opp/of.c index 303d2207e0ff5..0c55862f5624a 100644 --- a/drivers/opp/of.c +++ b/drivers/opp/of.c @@ -332,6 +332,62 @@ free_required_opps: return ret; } +int dev_pm_opp_of_find_icc_paths(struct device *dev, + struct opp_table *opp_table) +{ + struct device_node *np; + int ret = 0, i, count, num_paths; + struct icc_path **paths; + + np = of_node_get(dev->of_node); + if (!np) + return 0; + + count = of_count_phandle_with_args(np, "interconnects", + "#interconnect-cells"); + of_node_put(np); + if (count < 0) + return 0; + + /* two phandles when #interconnect-cells = <1> */ + if (count % 2) { + dev_err(dev, "%s: Invalid interconnects values\n", __func__); + return -EINVAL; + } + + num_paths = count / 2; + paths = kcalloc(num_paths, sizeof(*paths), GFP_KERNEL); + if (!paths) + return -ENOMEM; + + for (i = 0; i < num_paths; i++) { + paths[i] = of_icc_get_by_index(dev, i); + if (IS_ERR(paths[i])) { + ret = PTR_ERR(paths[i]); + if (ret != -EPROBE_DEFER) { + dev_err(dev, "%s: Unable to get path%d: %d\n", + __func__, i, ret); + } + goto err; + } + } + + if (opp_table) { + opp_table->paths = paths; + opp_table->path_count = num_paths; + return 0; + } + +err: + while (i--) + icc_put(paths[i]); + + kfree(paths); + + return ret; +} +EXPORT_SYMBOL_GPL(dev_pm_opp_of_find_icc_paths); + static bool _opp_is_supported(struct device *dev, struct opp_table *opp_table, struct device_node *np) { @@ -521,9 +577,45 @@ void dev_pm_opp_of_remove_table(struct device *dev) } EXPORT_SYMBOL_GPL(dev_pm_opp_of_remove_table); +static int _read_bw(struct dev_pm_opp *new_opp, struct device_node *np, + bool peak) +{ + const char *name = peak ? "opp-peak-kBps" : "opp-avg-kBps"; + struct property *prop; + int i, count, ret; + u32 *bw; + + prop = of_find_property(np, name, NULL); + if (!prop) + return -ENODEV; + + count = prop->length / sizeof(u32); + bw = kmalloc_array(count, sizeof(*bw), GFP_KERNEL); + if (!bw) + return -ENOMEM; + + ret = of_property_read_u32_array(np, name, bw, count); + if (ret) { + pr_err("%s: Error parsing %s: %d\n", __func__, name, ret); + goto out; + } + + for (i = 0; i < count; i++) { + if (peak) + new_opp->bandwidth[i].peak = kBps_to_icc(bw[i]); + else + new_opp->bandwidth[i].avg = kBps_to_icc(bw[i]); + } + +out: + kfree(bw); + return ret; +} + static int _read_opp_key(struct dev_pm_opp *new_opp, struct device_node *np, bool *rate_not_available) { + bool found = false; u64 rate; int ret; @@ -535,10 +627,30 @@ static int _read_opp_key(struct dev_pm_opp *new_opp, struct device_node *np, * bit guaranteed in clk API. */ new_opp->rate = (unsigned long)rate; + found = true; } *rate_not_available = !!ret; - of_property_read_u32(np, "opp-level", &new_opp->level); + /* + * Bandwidth consists of peak and average (optional) values: + * opp-peak-kBps = ; + * opp-avg-kBps = ; + */ + ret = _read_bw(new_opp, np, true); + if (!ret) { + found = true; + ret = _read_bw(new_opp, np, false); + } + + /* The properties were found but we failed to parse them */ + if (ret && ret != -ENODEV) + return ret; + + if (!of_property_read_u32(np, "opp-level", &new_opp->level)) + found = true; + + if (found) + return 0; return ret; } diff --git a/drivers/opp/opp.h b/drivers/opp/opp.h index bcadb1e328a4c..2b81ffef1ba4e 100644 --- a/drivers/opp/opp.h +++ b/drivers/opp/opp.h @@ -12,6 +12,7 @@ #define __DRIVER_OPP_H__ #include +#include #include #include #include @@ -59,6 +60,7 @@ extern struct list_head opp_tables; * @rate: Frequency in hertz * @level: Performance level * @supplies: Power supplies voltage/current values + * @bandwidth: Interconnect bandwidth values * @clock_latency_ns: Latency (in nanoseconds) of switching to this OPP's * frequency from any other OPP's frequency. * @required_opps: List of OPPs that are required by this OPP. @@ -81,6 +83,7 @@ struct dev_pm_opp { unsigned int level; struct dev_pm_opp_supply *supplies; + struct dev_pm_opp_icc_bw *bandwidth; unsigned long clock_latency_ns; @@ -146,6 +149,8 @@ enum opp_table_access { * @regulator_count: Number of power supply regulators. Its value can be -1 * (uninitialized), 0 (no opp-microvolt property) or > 0 (has opp-microvolt * property). + * @paths: Interconnect path handles + * @path_count: Number of interconnect paths * @genpd_performance_state: Device's power domain support performance state. * @is_genpd: Marks if the OPP table belongs to a genpd. * @set_opp: Platform specific set_opp callback @@ -189,6 +194,8 @@ struct opp_table { struct clk *clk; struct regulator **regulators; int regulator_count; + struct icc_path **paths; + unsigned int path_count; bool genpd_performance_state; bool is_genpd; diff --git a/include/linux/pm_opp.h b/include/linux/pm_opp.h index 747861816f4f5..d5c4a329321dd 100644 --- a/include/linux/pm_opp.h +++ b/include/linux/pm_opp.h @@ -41,6 +41,18 @@ struct dev_pm_opp_supply { unsigned long u_amp; }; +/** + * struct dev_pm_opp_icc_bw - Interconnect bandwidth values + * @avg: Average bandwidth corresponding to this OPP (in icc units) + * @peak: Peak bandwidth corresponding to this OPP (in icc units) + * + * This structure stores the bandwidth values for a single interconnect path. + */ +struct dev_pm_opp_icc_bw { + u32 avg; + u32 peak; +}; + /** * struct dev_pm_opp_info - OPP freq/voltage/current values * @rate: Target clk rate in hz @@ -360,6 +372,7 @@ int dev_pm_opp_of_get_sharing_cpus(struct device *cpu_dev, struct cpumask *cpuma struct device_node *dev_pm_opp_of_get_opp_desc_node(struct device *dev); struct device_node *dev_pm_opp_get_of_node(struct dev_pm_opp *opp); int of_get_required_opp_performance_state(struct device_node *np, int index); +int dev_pm_opp_of_find_icc_paths(struct device *dev, struct opp_table *opp_table); void dev_pm_opp_of_register_em(struct cpumask *cpus); #else static inline int dev_pm_opp_of_add_table(struct device *dev) @@ -408,6 +421,11 @@ static inline int of_get_required_opp_performance_state(struct device_node *np, { return -ENOTSUPP; } + +static inline int dev_pm_opp_of_find_icc_paths(struct device *dev, struct opp_table *opp_table) +{ + return -ENOTSUPP; +} #endif #endif /* __LINUX_OPP_H__ */ -- GitLab From 120e117bdc8483773fdae19dd326ef460b1ab4c2 Mon Sep 17 00:00:00 2001 From: Georgi Djakov Date: Tue, 12 May 2020 15:53:22 +0300 Subject: [PATCH 0825/1971] opp: Add sanity checks in _read_opp_key() When we read the OPP keys, it would be nice to do some sanity checks of the values we get from DT and see if they match with the information that is populated in the OPP table. Let's pass a pointer of the table, so that we can do some validation. Signed-off-by: Georgi Djakov Reviewed-by: Matthias Kaehlcke Reviewed-by: Sibi Sankar [ Viresh: Fix rebase conflicts ] Signed-off-by: Viresh Kumar --- drivers/opp/of.c | 20 +++++++++++++------- 1 file changed, 13 insertions(+), 7 deletions(-) diff --git a/drivers/opp/of.c b/drivers/opp/of.c index 0c55862f5624a..61fce1284f012 100644 --- a/drivers/opp/of.c +++ b/drivers/opp/of.c @@ -577,8 +577,8 @@ void dev_pm_opp_of_remove_table(struct device *dev) } EXPORT_SYMBOL_GPL(dev_pm_opp_of_remove_table); -static int _read_bw(struct dev_pm_opp *new_opp, struct device_node *np, - bool peak) +static int _read_bw(struct dev_pm_opp *new_opp, struct opp_table *table, + struct device_node *np, bool peak) { const char *name = peak ? "opp-peak-kBps" : "opp-avg-kBps"; struct property *prop; @@ -590,6 +590,12 @@ static int _read_bw(struct dev_pm_opp *new_opp, struct device_node *np, return -ENODEV; count = prop->length / sizeof(u32); + if (table->path_count != count) { + pr_err("%s: Mismatch between %s and paths (%d %d)\n", + __func__, name, count, table->path_count); + return -EINVAL; + } + bw = kmalloc_array(count, sizeof(*bw), GFP_KERNEL); if (!bw) return -ENOMEM; @@ -612,8 +618,8 @@ out: return ret; } -static int _read_opp_key(struct dev_pm_opp *new_opp, struct device_node *np, - bool *rate_not_available) +static int _read_opp_key(struct dev_pm_opp *new_opp, struct opp_table *table, + struct device_node *np, bool *rate_not_available) { bool found = false; u64 rate; @@ -636,10 +642,10 @@ static int _read_opp_key(struct dev_pm_opp *new_opp, struct device_node *np, * opp-peak-kBps = ; * opp-avg-kBps = ; */ - ret = _read_bw(new_opp, np, true); + ret = _read_bw(new_opp, table, np, true); if (!ret) { found = true; - ret = _read_bw(new_opp, np, false); + ret = _read_bw(new_opp, table, np, false); } /* The properties were found but we failed to parse them */ @@ -692,7 +698,7 @@ static struct dev_pm_opp *_opp_add_static_v2(struct opp_table *opp_table, if (!new_opp) return ERR_PTR(-ENOMEM); - ret = _read_opp_key(new_opp, np, &rate_not_available); + ret = _read_opp_key(new_opp, opp_table, np, &rate_not_available); if (ret < 0 && !opp_table->is_genpd) { dev_err(dev, "%s: opp key field not found\n", __func__); goto free_opp; -- GitLab From fe2af40250bfc3cca9120cd5c315e2ea7a1082ed Mon Sep 17 00:00:00 2001 From: Georgi Djakov Date: Tue, 12 May 2020 15:53:23 +0300 Subject: [PATCH 0826/1971] opp: Update the bandwidth on OPP frequency changes If the OPP bandwidth values are populated, we want to switch also the interconnect bandwidth in addition to frequency and voltage. Signed-off-by: Georgi Djakov Reviewed-by: Matthias Kaehlcke Reviewed-by: Sibi Sankar Signed-off-by: Viresh Kumar --- drivers/opp/core.c | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/drivers/opp/core.c b/drivers/opp/core.c index d19cc7970643e..c3ce39c74de84 100644 --- a/drivers/opp/core.c +++ b/drivers/opp/core.c @@ -808,7 +808,7 @@ int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq) unsigned long freq, old_freq, temp_freq; struct dev_pm_opp *old_opp, *opp; struct clk *clk; - int ret; + int ret, i; opp_table = _find_opp_table(dev); if (IS_ERR(opp_table)) { @@ -909,6 +909,17 @@ int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq) dev_err(dev, "Failed to set required opps: %d\n", ret); } + if (!ret && opp_table->paths) { + for (i = 0; i < opp_table->path_count; i++) { + ret = icc_set_bw(opp_table->paths[i], + opp->bandwidth[i].avg, + opp->bandwidth[i].peak); + if (ret) + dev_err(dev, "Failed to set bandwidth[%d]: %d\n", + i, ret); + } + } + put_opp: dev_pm_opp_put(opp); put_old_opp: -- GitLab From 8b17f17a097bdbc9546f5fdde3098b5f21cbfbff Mon Sep 17 00:00:00 2001 From: Georgi Djakov Date: Tue, 12 May 2020 15:53:24 +0300 Subject: [PATCH 0827/1971] cpufreq: dt: Add support for interconnect bandwidth scaling In addition to clocks and regulators, some devices can scale the bandwidth of their on-chip interconnect - for example between CPU and DDR memory. Add support for that, so that platforms which support it can make use of it. Signed-off-by: Georgi Djakov Reviewed-by: Matthias Kaehlcke [ Viresh: Reused dev_pm_opp_of_find_icc_paths(). Also drop the depends on from Kconfig. ] Signed-off-by: Viresh Kumar fixup! cpufreq: dt: Add support for interconnect bandwidth scaling --- drivers/cpufreq/cpufreq-dt.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/drivers/cpufreq/cpufreq-dt.c b/drivers/cpufreq/cpufreq-dt.c index 26fe8dfb9ce62..79742bbd221f0 100644 --- a/drivers/cpufreq/cpufreq-dt.c +++ b/drivers/cpufreq/cpufreq-dt.c @@ -121,6 +121,10 @@ static int resources_available(void) clk_put(cpu_clk); + ret = dev_pm_opp_of_find_icc_paths(cpu_dev, NULL); + if (ret) + return ret; + name = find_supply_name(cpu_dev); /* Platform doesn't require regulator */ if (!name) -- GitLab From 0430b1d5704b0f0f1d237236dde9c143f8669e49 Mon Sep 17 00:00:00 2001 From: Viresh Kumar Date: Mon, 18 May 2020 14:25:32 +0300 Subject: [PATCH 0828/1971] opp: Expose bandwidth information via debugfs Expose the bandwidth information as well via debugfs. Signed-off-by: Viresh Kumar Signed-off-by: Georgi Djakov --- drivers/interconnect/core.c | 18 ++++++++++++++++ drivers/opp/debugfs.c | 42 ++++++++++++++++++++++++++++++++++++ include/linux/interconnect.h | 6 ++++++ 3 files changed, 66 insertions(+) diff --git a/drivers/interconnect/core.c b/drivers/interconnect/core.c index 2d2e497805119..a56349c149851 100644 --- a/drivers/interconnect/core.c +++ b/drivers/interconnect/core.c @@ -514,6 +514,24 @@ void icc_set_tag(struct icc_path *path, u32 tag) } EXPORT_SYMBOL_GPL(icc_set_tag); +/** + * icc_get_name() - Get name of the icc path + * @path: reference to the path returned by icc_get() + * + * This function is used by an interconnect consumer to get the name of the icc + * path. + * + * Returns a valid pointer on success, or NULL otherwise. + */ +const char *icc_get_name(struct icc_path *path) +{ + if (!path) + return NULL; + + return path->name; +} +EXPORT_SYMBOL_GPL(icc_get_name); + /** * icc_set_bw() - set bandwidth constraints on an interconnect path * @path: reference to the path returned by icc_get() diff --git a/drivers/opp/debugfs.c b/drivers/opp/debugfs.c index 609665e339b6b..596c185b5dda4 100644 --- a/drivers/opp/debugfs.c +++ b/drivers/opp/debugfs.c @@ -32,6 +32,47 @@ void opp_debug_remove_one(struct dev_pm_opp *opp) debugfs_remove_recursive(opp->dentry); } +static ssize_t bw_name_read(struct file *fp, char __user *userbuf, + size_t count, loff_t *ppos) +{ + struct icc_path *path = fp->private_data; + char buf[64]; + int i; + + i = scnprintf(buf, sizeof(buf), "%.62s\n", icc_get_name(path)); + + return simple_read_from_buffer(userbuf, count, ppos, buf, i); +} + +static const struct file_operations bw_name_fops = { + .open = simple_open, + .read = bw_name_read, + .llseek = default_llseek, +}; + +static void opp_debug_create_bw(struct dev_pm_opp *opp, + struct opp_table *opp_table, + struct dentry *pdentry) +{ + struct dentry *d; + char name[11]; + int i; + + for (i = 0; i < opp_table->path_count; i++) { + snprintf(name, sizeof(name), "icc-path-%.1d", i); + + /* Create per-path directory */ + d = debugfs_create_dir(name, pdentry); + + debugfs_create_file("name", S_IRUGO, d, opp_table->paths[i], + &bw_name_fops); + debugfs_create_u32("peak_bw", S_IRUGO, d, + &opp->bandwidth[i].peak); + debugfs_create_u32("avg_bw", S_IRUGO, d, + &opp->bandwidth[i].avg); + } +} + static void opp_debug_create_supplies(struct dev_pm_opp *opp, struct opp_table *opp_table, struct dentry *pdentry) @@ -94,6 +135,7 @@ void opp_debug_create_one(struct dev_pm_opp *opp, struct opp_table *opp_table) &opp->clock_latency_ns); opp_debug_create_supplies(opp, opp_table, d); + opp_debug_create_bw(opp, opp_table, d); opp->dentry = d; } diff --git a/include/linux/interconnect.h b/include/linux/interconnect.h index 34e97231a6abb..1ad09efd296e1 100644 --- a/include/linux/interconnect.h +++ b/include/linux/interconnect.h @@ -32,6 +32,7 @@ struct icc_path *of_icc_get_by_index(struct device *dev, int idx); void icc_put(struct icc_path *path); int icc_set_bw(struct icc_path *path, u32 avg_bw, u32 peak_bw); void icc_set_tag(struct icc_path *path, u32 tag); +const char *icc_get_name(struct icc_path *path); #else @@ -65,6 +66,11 @@ static inline void icc_set_tag(struct icc_path *path, u32 tag) { } +static inline const char *icc_get_name(struct icc_path *path) +{ + return NULL; +} + #endif /* CONFIG_INTERCONNECT */ #endif /* __LINUX_INTERCONNECT_H */ -- GitLab From b23dfa3543f31fbb8c0098925bf90fc23193d17a Mon Sep 17 00:00:00 2001 From: Viresh Kumar Date: Fri, 15 May 2020 12:37:24 +0530 Subject: [PATCH 0829/1971] opp: Reorder the code for !target_freq case MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Reorder the code a bit to make it more readable. Add additional comment as well. Tested-by: Marek Szyprowski Acked-by: Clément Péron Tested-by: Clément Péron Signed-off-by: Viresh Kumar --- drivers/opp/core.c | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) diff --git a/drivers/opp/core.c b/drivers/opp/core.c index c3ce39c74de84..5e1035a041aee 100644 --- a/drivers/opp/core.c +++ b/drivers/opp/core.c @@ -817,15 +817,21 @@ int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq) } if (unlikely(!target_freq)) { - if (opp_table->required_opp_tables) { - ret = _set_required_opps(dev, opp_table, NULL); - } else if (!_get_opp_count(opp_table)) { + /* + * Some drivers need to support cases where some platforms may + * have OPP table for the device, while others don't and + * opp_set_rate() just needs to behave like clk_set_rate(). + */ + if (!_get_opp_count(opp_table)) return 0; - } else { + + if (!opp_table->required_opp_tables) { dev_err(dev, "target frequency can't be 0\n"); ret = -EINVAL; + goto put_opp_table; } + ret = _set_required_opps(dev, opp_table, NULL); goto put_opp_table; } -- GitLab From 8d45719caaf56c859be0172447f8559c0df40f93 Mon Sep 17 00:00:00 2001 From: Kamil Konieczny Date: Fri, 19 Jul 2019 17:05:32 +0200 Subject: [PATCH 0830/1971] opp: core: add regulators enable and disable MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Add enable regulators to dev_pm_opp_set_regulators() and disable regulators to dev_pm_opp_put_regulators(). Even if bootloader leaves regulators enabled, they should be enabled in kernel in order to increase the reference count. Tested-by: Marek Szyprowski Acked-by: Clément Péron Tested-by: Clément Péron Signed-off-by: Kamil Konieczny [ Viresh: Enable the regulator only after it is programmed and add a flag to track its status. ] Signed-off-by: Viresh Kumar --- drivers/opp/core.c | 28 ++++++++++++++++++++++++++-- drivers/opp/opp.h | 2 ++ 2 files changed, 28 insertions(+), 2 deletions(-) diff --git a/drivers/opp/core.c b/drivers/opp/core.c index 5e1035a041aee..8efe12a90ce89 100644 --- a/drivers/opp/core.c +++ b/drivers/opp/core.c @@ -664,7 +664,7 @@ static inline int _generic_set_opp_clk_only(struct device *dev, struct clk *clk, return ret; } -static int _generic_set_opp_regulator(const struct opp_table *opp_table, +static int _generic_set_opp_regulator(struct opp_table *opp_table, struct device *dev, unsigned long old_freq, unsigned long freq, @@ -699,6 +699,18 @@ static int _generic_set_opp_regulator(const struct opp_table *opp_table, goto restore_freq; } + /* + * Enable the regulator after setting its voltages, otherwise it breaks + * some boot-enabled regulators. + */ + if (unlikely(!opp_table->regulator_enabled)) { + ret = regulator_enable(reg); + if (ret < 0) + dev_warn(dev, "Failed to enable regulator: %d", ret); + else + opp_table->regulator_enabled = true; + } + return 0; restore_freq: @@ -825,12 +837,17 @@ int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq) if (!_get_opp_count(opp_table)) return 0; - if (!opp_table->required_opp_tables) { + if (!opp_table->required_opp_tables && !opp_table->regulators) { dev_err(dev, "target frequency can't be 0\n"); ret = -EINVAL; goto put_opp_table; } + if (opp_table->regulator_enabled) { + regulator_disable(opp_table->regulators[0]); + opp_table->regulator_enabled = false; + } + ret = _set_required_opps(dev, opp_table, NULL); goto put_opp_table; } @@ -1718,6 +1735,13 @@ void dev_pm_opp_put_regulators(struct opp_table *opp_table) /* Make sure there are no concurrent readers while updating opp_table */ WARN_ON(!list_empty(&opp_table->opp_list)); + if (opp_table->regulator_enabled) { + for (i = opp_table->regulator_count - 1; i >= 0; i--) + regulator_disable(opp_table->regulators[i]); + + opp_table->regulator_enabled = false; + } + for (i = opp_table->regulator_count - 1; i >= 0; i--) regulator_put(opp_table->regulators[i]); diff --git a/drivers/opp/opp.h b/drivers/opp/opp.h index 2b81ffef1ba4e..e51646ff279eb 100644 --- a/drivers/opp/opp.h +++ b/drivers/opp/opp.h @@ -147,6 +147,7 @@ enum opp_table_access { * @clk: Device's clock handle * @regulators: Supply regulators * @regulator_count: Number of power supply regulators. Its value can be -1 + * @regulator_enabled: Set to true if regulators were previously enabled. * (uninitialized), 0 (no opp-microvolt property) or > 0 (has opp-microvolt * property). * @paths: Interconnect path handles @@ -194,6 +195,7 @@ struct opp_table { struct clk *clk; struct regulator **regulators; int regulator_count; + bool regulator_enabled; struct icc_path **paths; unsigned int path_count; bool genpd_performance_state; -- GitLab From b00e667a6d8b2b0f53098ec12ff1ffe7f7c9be20 Mon Sep 17 00:00:00 2001 From: Viresh Kumar Date: Wed, 27 May 2020 09:33:44 +0530 Subject: [PATCH 0831/1971] opp: Remove bandwidth votes when target_freq is zero We already drop several votes when target_freq is set to zero, drop bandwidth votes as well. Reported-by: Sibi Sankar Reviewed-by: Georgi Djakov Tested-by: Georgi Djakov Reviewed-by: Sibi Sankar Tested-by: Sibi Sankar Signed-off-by: Viresh Kumar --- drivers/opp/core.c | 49 ++++++++++++++++++++++++++++++++++------------ 1 file changed, 37 insertions(+), 12 deletions(-) diff --git a/drivers/opp/core.c b/drivers/opp/core.c index 8efe12a90ce89..dfbd3d10410ca 100644 --- a/drivers/opp/core.c +++ b/drivers/opp/core.c @@ -725,6 +725,34 @@ restore_voltage: return ret; } +static int _set_opp_bw(const struct opp_table *opp_table, + struct dev_pm_opp *opp, struct device *dev, bool remove) +{ + u32 avg, peak; + int i, ret; + + if (!opp_table->paths) + return 0; + + for (i = 0; i < opp_table->path_count; i++) { + if (remove) { + avg = 0; + peak = 0; + } else { + avg = opp->bandwidth[i].avg; + peak = opp->bandwidth[i].peak; + } + ret = icc_set_bw(opp_table->paths[i], avg, peak); + if (ret) { + dev_err(dev, "Failed to %s bandwidth[%d]: %d\n", + remove ? "remove" : "set", i, ret); + return ret; + } + } + + return 0; +} + static int _set_opp_custom(const struct opp_table *opp_table, struct device *dev, unsigned long old_freq, unsigned long freq, @@ -820,7 +848,7 @@ int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq) unsigned long freq, old_freq, temp_freq; struct dev_pm_opp *old_opp, *opp; struct clk *clk; - int ret, i; + int ret; opp_table = _find_opp_table(dev); if (IS_ERR(opp_table)) { @@ -837,12 +865,17 @@ int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq) if (!_get_opp_count(opp_table)) return 0; - if (!opp_table->required_opp_tables && !opp_table->regulators) { + if (!opp_table->required_opp_tables && !opp_table->regulators && + !opp_table->paths) { dev_err(dev, "target frequency can't be 0\n"); ret = -EINVAL; goto put_opp_table; } + ret = _set_opp_bw(opp_table, NULL, dev, true); + if (ret) + return ret; + if (opp_table->regulator_enabled) { regulator_disable(opp_table->regulators[0]); opp_table->regulator_enabled = false; @@ -932,16 +965,8 @@ int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq) dev_err(dev, "Failed to set required opps: %d\n", ret); } - if (!ret && opp_table->paths) { - for (i = 0; i < opp_table->path_count; i++) { - ret = icc_set_bw(opp_table->paths[i], - opp->bandwidth[i].avg, - opp->bandwidth[i].peak); - if (ret) - dev_err(dev, "Failed to set bandwidth[%d]: %d\n", - i, ret); - } - } + if (!ret) + ret = _set_opp_bw(opp_table, opp, dev, false); put_opp: dev_pm_opp_put(opp); -- GitLab From 5ace60859e84113b7a185c117fbf2c429d381b59 Mon Sep 17 00:00:00 2001 From: Jean Delvare Date: Mon, 16 Mar 2020 11:22:24 +0100 Subject: [PATCH 0832/1971] i2c: smbus: Add a way to instantiate SPD EEPROMs automatically In simple cases we can instantiate SPD EEPROMs on the SMBus automatically. Start with just DDR2, DDR3 and DDR4 on x86 for now, and only for systems with no more than 4 memory slots. These limitations may be lifted later. Signed-off-by: Jean Delvare [wsa: minor change for new API] Signed-off-by: Wolfram Sang --- drivers/i2c/i2c-smbus.c | 104 +++++++++++++++++++++++++++++++++++++- include/linux/i2c-smbus.h | 8 ++- 2 files changed, 110 insertions(+), 2 deletions(-) diff --git a/drivers/i2c/i2c-smbus.c b/drivers/i2c/i2c-smbus.c index 809bcf8387d0e..dc0108287ccf8 100644 --- a/drivers/i2c/i2c-smbus.c +++ b/drivers/i2c/i2c-smbus.c @@ -3,10 +3,11 @@ * i2c-smbus.c - SMBus extensions to the I2C protocol * * Copyright (C) 2008 David Brownell - * Copyright (C) 2010 Jean Delvare + * Copyright (C) 2010-2019 Jean Delvare */ #include +#include #include #include #include @@ -196,6 +197,107 @@ EXPORT_SYMBOL_GPL(i2c_handle_smbus_alert); module_i2c_driver(smbalert_driver); +/* + * SPD is not part of SMBus but we include it here for convenience as the + * target systems are the same. + * Restrictions to automatic SPD instantiation: + * - Only works if all filled slots have the same memory type + * - Only works for DDR2, DDR3 and DDR4 for now + * - Only works on systems with 1 to 4 memory slots + */ +#if IS_ENABLED(CONFIG_DMI) +void i2c_register_spd(struct i2c_adapter *adap) +{ + int n, slot_count = 0, dimm_count = 0; + u16 handle; + u8 common_mem_type = 0x0, mem_type; + u64 mem_size; + const char *name; + + while ((handle = dmi_memdev_handle(slot_count)) != 0xffff) { + slot_count++; + + /* Skip empty slots */ + mem_size = dmi_memdev_size(handle); + if (!mem_size) + continue; + + /* Skip undefined memory type */ + mem_type = dmi_memdev_type(handle); + if (mem_type <= 0x02) /* Invalid, Other, Unknown */ + continue; + + if (!common_mem_type) { + /* First filled slot */ + common_mem_type = mem_type; + } else { + /* Check that all filled slots have the same type */ + if (mem_type != common_mem_type) { + dev_warn(&adap->dev, + "Different memory types mixed, not instantiating SPD\n"); + return; + } + } + dimm_count++; + } + + /* No useful DMI data, bail out */ + if (!dimm_count) + return; + + dev_info(&adap->dev, "%d/%d memory slots populated (from DMI)\n", + dimm_count, slot_count); + + if (slot_count > 4) { + dev_warn(&adap->dev, + "Systems with more than 4 memory slots not supported yet, not instantiating SPD\n"); + return; + } + + switch (common_mem_type) { + case 0x13: /* DDR2 */ + case 0x18: /* DDR3 */ + case 0x1C: /* LPDDR2 */ + case 0x1D: /* LPDDR3 */ + name = "spd"; + break; + case 0x1A: /* DDR4 */ + case 0x1E: /* LPDDR4 */ + name = "ee1004"; + break; + default: + dev_info(&adap->dev, + "Memory type 0x%02x not supported yet, not instantiating SPD\n", + common_mem_type); + return; + } + + /* + * We don't know in which slots the memory modules are. We could + * try to guess from the slot names, but that would be rather complex + * and unreliable, so better probe all possible addresses until we + * have found all memory modules. + */ + for (n = 0; n < slot_count && dimm_count; n++) { + struct i2c_board_info info; + unsigned short addr_list[2]; + + memset(&info, 0, sizeof(struct i2c_board_info)); + strlcpy(info.type, name, I2C_NAME_SIZE); + addr_list[0] = 0x50 + n; + addr_list[1] = I2C_CLIENT_END; + + if (!IS_ERR(i2c_new_scanned_device(adap, &info, addr_list, NULL))) { + dev_info(&adap->dev, + "Successfully instantiated SPD at 0x%hx\n", + addr_list[0]); + dimm_count--; + } + } +} +EXPORT_SYMBOL_GPL(i2c_register_spd); +#endif + MODULE_AUTHOR("Jean Delvare "); MODULE_DESCRIPTION("SMBus protocol extensions support"); MODULE_LICENSE("GPL"); diff --git a/include/linux/i2c-smbus.h b/include/linux/i2c-smbus.h index 8c5459034f923..1e4e0de4ef8bf 100644 --- a/include/linux/i2c-smbus.h +++ b/include/linux/i2c-smbus.h @@ -2,7 +2,7 @@ /* * i2c-smbus.h - SMBus extensions to the I2C protocol * - * Copyright (C) 2010 Jean Delvare + * Copyright (C) 2010-2019 Jean Delvare */ #ifndef _LINUX_I2C_SMBUS_H @@ -39,4 +39,10 @@ static inline int of_i2c_setup_smbus_alert(struct i2c_adapter *adap) } #endif +#if IS_ENABLED(CONFIG_I2C_SMBUS) && IS_ENABLED(CONFIG_DMI) +void i2c_register_spd(struct i2c_adapter *adap); +#else +static inline void i2c_register_spd(struct i2c_adapter *adap) { } +#endif + #endif /* _LINUX_I2C_SMBUS_H */ -- GitLab From 01590f361e94a01e9b9868fa81d4079d255c681f Mon Sep 17 00:00:00 2001 From: Jean Delvare Date: Mon, 16 Mar 2020 11:24:48 +0100 Subject: [PATCH 0833/1971] i2c: i801: Instantiate SPD EEPROMs automatically Call the function to instantiate SPD EEPROMs automatically on the main SMBus controller. Multiplexed SMBus systems are excluded for now as they are more complex to handle. Signed-off-by: Jean Delvare Signed-off-by: Wolfram Sang --- drivers/i2c/busses/i2c-i801.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c index 4f333889489c8..fea644921a768 100644 --- a/drivers/i2c/busses/i2c-i801.c +++ b/drivers/i2c/busses/i2c-i801.c @@ -1318,6 +1318,12 @@ static void i801_probe_optional_slaves(struct i801_priv *priv) if (is_dell_system_with_lis3lv02d()) register_dell_lis3lv02d_i2c_device(priv); + + /* Instantiate SPD EEPROMs unless the SMBus is multiplexed */ +#if IS_ENABLED(CONFIG_I2C_MUX_GPIO) + if (!priv->mux_drvdata) +#endif + i2c_register_spd(&priv->adapter); } #else static void __init input_apanel_init(void) {} -- GitLab From 34765c19cce3cba0c94d471d2e84f71d4978996e Mon Sep 17 00:00:00 2001 From: Viresh Kumar Date: Thu, 9 May 2019 10:13:46 +0530 Subject: [PATCH 0834/1971] i2c: sh_mobile: simplify code and remove false compilation warning This currently generates a warning: drivers/i2c/busses/i2c-sh_mobile.c: In function 'sh_mobile_i2c_isr': drivers/i2c/busses/i2c-sh_mobile.c:399:26: warning: 'data' may be used uninitialized in this function [-Wmaybe-uninitialized] Though the code looks okay and shouldn't ever use the variable uninitialized. Fix the warning by moving the code around and getting rid of 'data'. Signed-off-by: Viresh Kumar Tested-by: Wolfram Sang [wsa: minor updates to commit message] Signed-off-by: Wolfram Sang --- drivers/i2c/busses/i2c-sh_mobile.c | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/drivers/i2c/busses/i2c-sh_mobile.c b/drivers/i2c/busses/i2c-sh_mobile.c index d83ca4028fa0c..2cca1b21e26e8 100644 --- a/drivers/i2c/busses/i2c-sh_mobile.c +++ b/drivers/i2c/busses/i2c-sh_mobile.c @@ -366,7 +366,6 @@ static int sh_mobile_i2c_isr_tx(struct sh_mobile_i2c_data *pd) static int sh_mobile_i2c_isr_rx(struct sh_mobile_i2c_data *pd) { - unsigned char data; int real_pos; /* switch from TX (address) to RX (data) adds two interrupts */ @@ -387,13 +386,11 @@ static int sh_mobile_i2c_isr_rx(struct sh_mobile_i2c_data *pd) if (real_pos < 0) i2c_op(pd, OP_RX_STOP); else - data = i2c_op(pd, OP_RX_STOP_DATA); + pd->msg->buf[real_pos] = i2c_op(pd, OP_RX_STOP_DATA); } else if (real_pos >= 0) { - data = i2c_op(pd, OP_RX); + pd->msg->buf[real_pos] = i2c_op(pd, OP_RX); } - if (real_pos >= 0) - pd->msg->buf[real_pos] = data; done: pd->pos++; return pd->pos == (pd->msg->len + 2); -- GitLab From 5ad05cc8e0463f106be7ef5d1074dd877132d60a Mon Sep 17 00:00:00 2001 From: Nikolay Borisov Date: Wed, 4 Mar 2020 18:12:45 +0200 Subject: [PATCH 0835/1971] vfs: Remove duplicated d_mountpoint check in __is_local_mountpoint This function acts as an out-of-line helper for is_local_mountpoint is only called after the latter verifies the dentry is not a mountpoint. There's no semantic changes and the resulting object code is smaller: add/remove: 0/0 grow/shrink: 0/1 up/down: 0/-26 (-26) Function old new delta __is_local_mountpoint 147 121 -26 Total: Before=34161, After=34135, chg -0.08% Signed-off-by: Nikolay Borisov Signed-off-by: Al Viro --- fs/namespace.c | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/fs/namespace.c b/fs/namespace.c index a28e4db075ede..e6aed405611da 100644 --- a/fs/namespace.c +++ b/fs/namespace.c @@ -669,9 +669,6 @@ bool __is_local_mountpoint(struct dentry *dentry) struct mount *mnt; bool is_covered = false; - if (!d_mountpoint(dentry)) - goto out; - down_read(&namespace_sem); list_for_each_entry(mnt, &ns->list, mnt_list) { is_covered = (mnt->mnt_mountpoint == dentry); @@ -679,7 +676,7 @@ bool __is_local_mountpoint(struct dentry *dentry) break; } up_read(&namespace_sem); -out: + return is_covered; } -- GitLab From cc23402c1c2de8b1815212f3924cdbc3cda02b94 Mon Sep 17 00:00:00 2001 From: Yufen Yu Date: Thu, 5 Mar 2020 16:06:39 +0800 Subject: [PATCH 0836/1971] fs: fix indentation in deactivate_super() Fix the breaked indent in deactive_super(). Signed-off-by: Yufen Yu Signed-off-by: Al Viro --- fs/super.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fs/super.c b/fs/super.c index cd352530eca90..7e3491b96e1d6 100644 --- a/fs/super.c +++ b/fs/super.c @@ -361,7 +361,7 @@ EXPORT_SYMBOL(deactivate_locked_super); */ void deactivate_super(struct super_block *s) { - if (!atomic_add_unless(&s->s_active, -1, 1)) { + if (!atomic_add_unless(&s->s_active, -1, 1)) { down_write(&s->s_umount); deactivate_locked_super(s); } -- GitLab From 4969a3a2cd3c2dbb1bcdc6a3a516de9ff972372e Mon Sep 17 00:00:00 2001 From: Boris Ostrovsky Date: Thu, 28 May 2020 22:24:52 -0400 Subject: [PATCH 0837/1971] xen/pci: Get rid of verbose_request and use dev_dbg() instead Information printed under verbose_request is clearly used for debugging only. Remove it and use dev_dbg() instead. Signed-off-by: Boris Ostrovsky Link: https://lore.kernel.org/r/1590719092-8578-1-git-send-email-boris.ostrovsky@oracle.com Reviewed-by: Juergen Gross Signed-off-by: Boris Ostrovsky --- drivers/pci/xen-pcifront.c | 27 ++++++---------- drivers/xen/xen-pciback/conf_space.c | 14 +++------ drivers/xen/xen-pciback/conf_space_header.c | 20 ++++-------- drivers/xen/xen-pciback/pciback.h | 2 -- drivers/xen/xen-pciback/pciback_ops.c | 35 +++++++-------------- 5 files changed, 31 insertions(+), 67 deletions(-) diff --git a/drivers/pci/xen-pcifront.c b/drivers/pci/xen-pcifront.c index d1b16cf3403f4..fab267e359e74 100644 --- a/drivers/pci/xen-pcifront.c +++ b/drivers/pci/xen-pcifront.c @@ -77,9 +77,6 @@ static inline void pcifront_init_sd(struct pcifront_sd *sd, static DEFINE_SPINLOCK(pcifront_dev_lock); static struct pcifront_device *pcifront_dev; -static int verbose_request; -module_param(verbose_request, int, 0644); - static int errno_to_pcibios_err(int errno) { switch (errno) { @@ -190,18 +187,16 @@ static int pcifront_bus_read(struct pci_bus *bus, unsigned int devfn, struct pcifront_sd *sd = bus->sysdata; struct pcifront_device *pdev = pcifront_get_pdev(sd); - if (verbose_request) - dev_info(&pdev->xdev->dev, - "read dev=%04x:%02x:%02x.%d - offset %x size %d\n", - pci_domain_nr(bus), bus->number, PCI_SLOT(devfn), - PCI_FUNC(devfn), where, size); + dev_dbg(&pdev->xdev->dev, + "read dev=%04x:%02x:%02x.%d - offset %x size %d\n", + pci_domain_nr(bus), bus->number, PCI_SLOT(devfn), + PCI_FUNC(devfn), where, size); err = do_pci_op(pdev, &op); if (likely(!err)) { - if (verbose_request) - dev_info(&pdev->xdev->dev, "read got back value %x\n", - op.value); + dev_dbg(&pdev->xdev->dev, "read got back value %x\n", + op.value); *val = op.value; } else if (err == -ENODEV) { @@ -229,12 +224,10 @@ static int pcifront_bus_write(struct pci_bus *bus, unsigned int devfn, struct pcifront_sd *sd = bus->sysdata; struct pcifront_device *pdev = pcifront_get_pdev(sd); - if (verbose_request) - dev_info(&pdev->xdev->dev, - "write dev=%04x:%02x:%02x.%d - " - "offset %x size %d val %x\n", - pci_domain_nr(bus), bus->number, - PCI_SLOT(devfn), PCI_FUNC(devfn), where, size, val); + dev_dbg(&pdev->xdev->dev, + "write dev=%04x:%02x:%02x.%d - offset %x size %d val %x\n", + pci_domain_nr(bus), bus->number, + PCI_SLOT(devfn), PCI_FUNC(devfn), where, size, val); return errno_to_pcibios_err(do_pci_op(pdev, &op)); } diff --git a/drivers/xen/xen-pciback/conf_space.c b/drivers/xen/xen-pciback/conf_space.c index f2df4e55fc1be..059de92aea7d0 100644 --- a/drivers/xen/xen-pciback/conf_space.c +++ b/drivers/xen/xen-pciback/conf_space.c @@ -156,9 +156,7 @@ int xen_pcibk_config_read(struct pci_dev *dev, int offset, int size, * (as if device didn't respond) */ u32 value = 0, tmp_val; - if (unlikely(verbose_request)) - dev_printk(KERN_DEBUG, &dev->dev, "read %d bytes at 0x%x\n", - size, offset); + dev_dbg(&dev->dev, "read %d bytes at 0x%x\n", size, offset); if (!valid_request(offset, size)) { err = XEN_PCI_ERR_invalid_offset; @@ -197,9 +195,7 @@ int xen_pcibk_config_read(struct pci_dev *dev, int offset, int size, } out: - if (unlikely(verbose_request)) - dev_printk(KERN_DEBUG, &dev->dev, - "read %d bytes at 0x%x = %x\n", size, offset, value); + dev_dbg(&dev->dev, "read %d bytes at 0x%x = %x\n", size, offset, value); *ret_val = value; return xen_pcibios_err_to_errno(err); @@ -214,10 +210,8 @@ int xen_pcibk_config_write(struct pci_dev *dev, int offset, int size, u32 value) u32 tmp_val; int field_start, field_end; - if (unlikely(verbose_request)) - dev_printk(KERN_DEBUG, &dev->dev, - "write request %d bytes at 0x%x = %x\n", size, - offset, value); + dev_dbg(&dev->dev, "write request %d bytes at 0x%x = %x\n", + size, offset, value); if (!valid_request(offset, size)) return XEN_PCI_ERR_invalid_offset; diff --git a/drivers/xen/xen-pciback/conf_space_header.c b/drivers/xen/xen-pciback/conf_space_header.c index b277b689f2576..ac45cdc38e859 100644 --- a/drivers/xen/xen-pciback/conf_space_header.c +++ b/drivers/xen/xen-pciback/conf_space_header.c @@ -68,36 +68,30 @@ static int command_write(struct pci_dev *dev, int offset, u16 value, void *data) dev_data = pci_get_drvdata(dev); if (!pci_is_enabled(dev) && is_enable_cmd(value)) { - if (unlikely(verbose_request)) - dev_printk(KERN_DEBUG, &dev->dev, "enable\n"); + dev_dbg(&dev->dev, "enable\n"); err = pci_enable_device(dev); if (err) return err; if (dev_data) dev_data->enable_intx = 1; } else if (pci_is_enabled(dev) && !is_enable_cmd(value)) { - if (unlikely(verbose_request)) - dev_printk(KERN_DEBUG, &dev->dev, "disable\n"); + dev_dbg(&dev->dev, "disable\n"); pci_disable_device(dev); if (dev_data) dev_data->enable_intx = 0; } if (!dev->is_busmaster && is_master_cmd(value)) { - if (unlikely(verbose_request)) - dev_printk(KERN_DEBUG, &dev->dev, "set bus master\n"); + dev_dbg(&dev->dev, "set bus master\n"); pci_set_master(dev); } else if (dev->is_busmaster && !is_master_cmd(value)) { - if (unlikely(verbose_request)) - dev_printk(KERN_DEBUG, &dev->dev, "clear bus master\n"); + dev_dbg(&dev->dev, "clear bus master\n"); pci_clear_master(dev); } if (!(cmd->val & PCI_COMMAND_INVALIDATE) && (value & PCI_COMMAND_INVALIDATE)) { - if (unlikely(verbose_request)) - dev_printk(KERN_DEBUG, &dev->dev, - "enable memory-write-invalidate\n"); + dev_dbg(&dev->dev, "enable memory-write-invalidate\n"); err = pci_set_mwi(dev); if (err) { dev_warn(&dev->dev, "cannot enable memory-write-invalidate (%d)\n", @@ -106,9 +100,7 @@ static int command_write(struct pci_dev *dev, int offset, u16 value, void *data) } } else if ((cmd->val & PCI_COMMAND_INVALIDATE) && !(value & PCI_COMMAND_INVALIDATE)) { - if (unlikely(verbose_request)) - dev_printk(KERN_DEBUG, &dev->dev, - "disable memory-write-invalidate\n"); + dev_dbg(&dev->dev, "disable memory-write-invalidate\n"); pci_clear_mwi(dev); } diff --git a/drivers/xen/xen-pciback/pciback.h b/drivers/xen/xen-pciback/pciback.h index 7c95516a860fa..f1ed2dbf685cb 100644 --- a/drivers/xen/xen-pciback/pciback.h +++ b/drivers/xen/xen-pciback/pciback.h @@ -186,8 +186,6 @@ void xen_pcibk_do_op(struct work_struct *data); int xen_pcibk_xenbus_register(void); void xen_pcibk_xenbus_unregister(void); -extern int verbose_request; - void xen_pcibk_test_and_schedule_op(struct xen_pcibk_device *pdev); #endif diff --git a/drivers/xen/xen-pciback/pciback_ops.c b/drivers/xen/xen-pciback/pciback_ops.c index 8545ca78c4f1d..e11a7438e1a25 100644 --- a/drivers/xen/xen-pciback/pciback_ops.c +++ b/drivers/xen/xen-pciback/pciback_ops.c @@ -15,9 +15,6 @@ #include #include "pciback.h" -int verbose_request; -module_param(verbose_request, int, 0644); - static irqreturn_t xen_pcibk_guest_interrupt(int irq, void *dev_id); /* Ensure a device is has the fake IRQ handler "turned on/off" and is @@ -148,9 +145,6 @@ int xen_pcibk_enable_msi(struct xen_pcibk_device *pdev, struct xen_pcibk_dev_data *dev_data; int status; - if (unlikely(verbose_request)) - dev_printk(KERN_DEBUG, &dev->dev, "enable MSI\n"); - if (dev->msi_enabled) status = -EALREADY; else if (dev->msix_enabled) @@ -169,8 +163,8 @@ int xen_pcibk_enable_msi(struct xen_pcibk_device *pdev, * the local domain's IRQ number. */ op->value = dev->irq ? xen_pirq_from_irq(dev->irq) : 0; - if (unlikely(verbose_request)) - dev_printk(KERN_DEBUG, &dev->dev, "MSI: %d\n", op->value); + + dev_dbg(&dev->dev, "MSI: %d\n", op->value); dev_data = pci_get_drvdata(dev); if (dev_data) @@ -183,9 +177,6 @@ static int xen_pcibk_disable_msi(struct xen_pcibk_device *pdev, struct pci_dev *dev, struct xen_pci_op *op) { - if (unlikely(verbose_request)) - dev_printk(KERN_DEBUG, &dev->dev, "disable MSI\n"); - if (dev->msi_enabled) { struct xen_pcibk_dev_data *dev_data; @@ -196,8 +187,9 @@ int xen_pcibk_disable_msi(struct xen_pcibk_device *pdev, dev_data->ack_intr = 1; } op->value = dev->irq ? xen_pirq_from_irq(dev->irq) : 0; - if (unlikely(verbose_request)) - dev_printk(KERN_DEBUG, &dev->dev, "MSI: %d\n", op->value); + + dev_dbg(&dev->dev, "MSI: %d\n", op->value); + return 0; } @@ -210,8 +202,7 @@ int xen_pcibk_enable_msix(struct xen_pcibk_device *pdev, struct msix_entry *entries; u16 cmd; - if (unlikely(verbose_request)) - dev_printk(KERN_DEBUG, &dev->dev, "enable MSI-X\n"); + dev_dbg(&dev->dev, "enable MSI-X\n"); if (op->value > SH_INFO_MAX_VEC) return -EINVAL; @@ -244,10 +235,8 @@ int xen_pcibk_enable_msix(struct xen_pcibk_device *pdev, if (entries[i].vector) { op->msix_entries[i].vector = xen_pirq_from_irq(entries[i].vector); - if (unlikely(verbose_request)) - dev_printk(KERN_DEBUG, &dev->dev, - "MSI-X[%d]: %d\n", i, - op->msix_entries[i].vector); + dev_dbg(&dev->dev, "MSI-X[%d]: %d\n", i, + op->msix_entries[i].vector); } } } else @@ -267,9 +256,6 @@ static int xen_pcibk_disable_msix(struct xen_pcibk_device *pdev, struct pci_dev *dev, struct xen_pci_op *op) { - if (unlikely(verbose_request)) - dev_printk(KERN_DEBUG, &dev->dev, "disable MSI-X\n"); - if (dev->msix_enabled) { struct xen_pcibk_dev_data *dev_data; @@ -284,8 +270,9 @@ int xen_pcibk_disable_msix(struct xen_pcibk_device *pdev, * an undefined IRQ value of zero. */ op->value = dev->irq ? xen_pirq_from_irq(dev->irq) : 0; - if (unlikely(verbose_request)) - dev_printk(KERN_DEBUG, &dev->dev, "MSI-X: %d\n", op->value); + + dev_dbg(&dev->dev, "MSI-X: %d\n", op->value); + return 0; } #endif -- GitLab From 47fa116e5faec894f6238f87322c01dd41d21e23 Mon Sep 17 00:00:00 2001 From: Yuantian Tang Date: Tue, 26 May 2020 14:02:12 +0800 Subject: [PATCH 0838/1971] thermal: qoriq: Update the settings for TMUv2 For TMU v2, TMSAR registers need to be set properly to get the accurate temperature values. Also the temperature read needs to be converted to degree Celsius since it is in degrees Kelvin. Signed-off-by: Yuantian Tang Signed-off-by: Daniel Lezcano Link: https://lore.kernel.org/r/20200526060212.4118-1-andy.tang@nxp.com --- drivers/thermal/qoriq_thermal.c | 26 ++++++++++++++++++++++++-- 1 file changed, 24 insertions(+), 2 deletions(-) diff --git a/drivers/thermal/qoriq_thermal.c b/drivers/thermal/qoriq_thermal.c index 028a6bbf75dc8..73049f9bea252 100644 --- a/drivers/thermal/qoriq_thermal.c +++ b/drivers/thermal/qoriq_thermal.c @@ -11,6 +11,7 @@ #include #include #include +#include #include "thermal_core.h" #include "thermal_hwmon.h" @@ -23,6 +24,7 @@ #define TMTMIR_DEFAULT 0x0000000f #define TIER_DISABLE 0x0 #define TEUMR0_V2 0x51009c00 +#define TMSARA_V2 0xe #define TMU_VER1 0x1 #define TMU_VER2 0x2 @@ -50,6 +52,9 @@ * Site Register */ #define TRITSR_V BIT(31) +#define REGS_V2_TMSAR(n) (0x304 + 16 * (n)) /* TMU monitoring + * site adjustment register + */ #define REGS_TTRnCR(n) (0xf10 + 4 * (n)) /* Temperature Range n * Control Register */ @@ -85,12 +90,21 @@ static int tmu_get_temp(void *p, int *temp) /* * REGS_TRITSR(id) has the following layout: * + * For TMU Rev1: * 31 ... 7 6 5 4 3 2 1 0 * V TEMP * * Where V bit signifies if the measurement is ready and is * within sensor range. TEMP is an 8 bit value representing - * temperature in C. + * temperature in Celsius. + + * For TMU Rev2: + * 31 ... 8 7 6 5 4 3 2 1 0 + * V TEMP + * + * Where V bit signifies if the measurement is ready and is + * within sensor range. TEMP is an 9 bit value representing + * temperature in KelVin. */ if (regmap_read_poll_timeout(qdata->regmap, REGS_TRITSR(qsensor->id), @@ -100,7 +114,10 @@ static int tmu_get_temp(void *p, int *temp) 10 * USEC_PER_MSEC)) return -ENODATA; - *temp = (val & 0xff) * 1000; + if (qdata->ver == TMU_VER1) + *temp = (val & GENMASK(7, 0)) * MILLIDEGREE_PER_DEGREE; + else + *temp = kelvin_to_millicelsius(val & GENMASK(8, 0)); return 0; } @@ -192,6 +209,8 @@ static int qoriq_tmu_calibration(struct device *dev, static void qoriq_tmu_init_device(struct qoriq_tmu_data *data) { + int i; + /* Disable interrupt, using polling instead */ regmap_write(data->regmap, REGS_TIER, TIER_DISABLE); @@ -202,6 +221,8 @@ static void qoriq_tmu_init_device(struct qoriq_tmu_data *data) } else { regmap_write(data->regmap, REGS_V2_TMTMIR, TMTMIR_DEFAULT); regmap_write(data->regmap, REGS_V2_TEUMR(0), TEUMR0_V2); + for (i = 0; i < SITES_MAX; i++) + regmap_write(data->regmap, REGS_V2_TMSAR(i), TMSARA_V2); } /* Disable monitoring */ @@ -212,6 +233,7 @@ static const struct regmap_range qoriq_yes_ranges[] = { regmap_reg_range(REGS_TMR, REGS_TSCFGR), regmap_reg_range(REGS_TTRnCR(0), REGS_TTRnCR(3)), regmap_reg_range(REGS_V2_TEUMR(0), REGS_V2_TEUMR(2)), + regmap_reg_range(REGS_V2_TMSAR(0), REGS_V2_TMSAR(15)), regmap_reg_range(REGS_IPBRR(0), REGS_IPBRR(1)), /* Read only registers below */ regmap_reg_range(REGS_TRITSR(0), REGS_TRITSR(15)), -- GitLab From 0ba13c763aacb27ab32bde5d559bf40e88465921 Mon Sep 17 00:00:00 2001 From: Matthew Garrett Date: Mon, 13 Apr 2020 19:09:51 -0700 Subject: [PATCH 0839/1971] thermal/int340x_thermal: Export GDDV Implementing DPTF properly requires making use of firmware-provided information associated with the INT3400 device. Calling GDDV provides a buffer of information which userland can then interpret to determine appropriate DPTF policy. Conflicts: drivers/thermal/intel/int340x_thermal/int3400_thermal.c Signed-off-by: Matthew Garrett Tested-by: Pandruvada, Srinivas Signed-off-by: Zhang Rui Link: https://lore.kernel.org/r/20200414020953.255364-1-matthewgarrett@google.com --- .../intel/int340x_thermal/int3400_thermal.c | 60 +++++++++++++++++++ 1 file changed, 60 insertions(+) diff --git a/drivers/thermal/intel/int340x_thermal/int3400_thermal.c b/drivers/thermal/intel/int340x_thermal/int3400_thermal.c index e802922a13cfc..0ad0469e91bae 100644 --- a/drivers/thermal/intel/int340x_thermal/int3400_thermal.c +++ b/drivers/thermal/intel/int340x_thermal/int3400_thermal.c @@ -52,6 +52,25 @@ struct int3400_thermal_priv { u8 uuid_bitmap; int rel_misc_dev_res; int current_uuid_index; + char *data_vault; +}; + +static ssize_t data_vault_read(struct file *file, struct kobject *kobj, + struct bin_attribute *attr, char *buf, loff_t off, size_t count) +{ + memcpy(buf, attr->private + off, count); + return count; +} + +static BIN_ATTR_RO(data_vault, 0); + +static struct bin_attribute *data_attributes[] = { + &bin_attr_data_vault, + NULL, +}; + +static const struct attribute_group data_attribute_group = { + .bin_attrs = data_attributes, }; static ssize_t available_uuids_show(struct device *dev, @@ -280,6 +299,32 @@ static struct thermal_zone_params int3400_thermal_params = { .no_hwmon = true, }; +static void int3400_setup_gddv(struct int3400_thermal_priv *priv) +{ + struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; + union acpi_object *obj; + acpi_status status; + + status = acpi_evaluate_object(priv->adev->handle, "GDDV", NULL, + &buffer); + if (ACPI_FAILURE(status) || !buffer.length) + return; + + obj = buffer.pointer; + if (obj->type != ACPI_TYPE_PACKAGE || obj->package.count != 1 + || obj->package.elements[0].type != ACPI_TYPE_BUFFER) { + kfree(buffer.pointer); + return; + } + + priv->data_vault = kmemdup(obj->package.elements[0].buffer.pointer, + obj->package.elements[0].buffer.length, + GFP_KERNEL); + bin_attr_data_vault.private = priv->data_vault; + bin_attr_data_vault.size = obj->package.elements[0].buffer.length; + kfree(buffer.pointer); +} + static int int3400_thermal_probe(struct platform_device *pdev) { struct acpi_device *adev = ACPI_COMPANION(&pdev->dev); @@ -311,6 +356,8 @@ static int int3400_thermal_probe(struct platform_device *pdev) platform_set_drvdata(pdev, priv); + int3400_setup_gddv(priv); + priv->thermal = thermal_zone_device_register("INT3400 Thermal", 0, 0, priv, &int3400_thermal_ops, &int3400_thermal_params, 0, 0); @@ -326,6 +373,13 @@ static int int3400_thermal_probe(struct platform_device *pdev) if (result) goto free_rel_misc; + if (priv->data_vault) { + result = sysfs_create_group(&pdev->dev.kobj, + &data_attribute_group); + if (result) + goto free_uuid; + } + result = acpi_install_notify_handler( priv->adev->handle, ACPI_DEVICE_NOTIFY, int3400_notify, (void *)priv); @@ -335,6 +389,9 @@ static int int3400_thermal_probe(struct platform_device *pdev) return 0; free_sysfs: + if (priv->data_vault) + sysfs_remove_group(&pdev->dev.kobj, &data_attribute_group); +free_uuid: sysfs_remove_group(&pdev->dev.kobj, &uuid_attribute_group); free_rel_misc: if (!priv->rel_misc_dev_res) @@ -359,8 +416,11 @@ static int int3400_thermal_remove(struct platform_device *pdev) if (!priv->rel_misc_dev_res) acpi_thermal_rel_misc_device_remove(priv->adev->handle); + if (priv->data_vault) + sysfs_remove_group(&pdev->dev.kobj, &data_attribute_group); sysfs_remove_group(&pdev->dev.kobj, &uuid_attribute_group); thermal_zone_device_unregister(priv->thermal); + kfree(priv->data_vault); kfree(priv->trts); kfree(priv->arts); kfree(priv); -- GitLab From 006f006f1e5c48e5ddd6515eab2c9a7b27ce1144 Mon Sep 17 00:00:00 2001 From: Matthew Garrett Date: Mon, 13 Apr 2020 19:09:52 -0700 Subject: [PATCH 0840/1971] thermal/int340x_thermal: Export OEM vendor variables The platform vendor may expose an array of OEM-specific values to be used in determining DPTF policy. These are obtained via the ODVP method, and then simply exposed in sysfs. In addition, they are updated when a notification is received or when the DPTF policy is changed by userland. Conflicts: drivers/thermal/intel/int340x_thermal/int3400_thermal.c Signed-off-by: Matthew Garrett Tested-by: Pandruvada, Srinivas Signed-off-by: Zhang Rui Link: https://lore.kernel.org/r/20200414020953.255364-2-matthewgarrett@google.com --- .../intel/int340x_thermal/int3400_thermal.c | 132 +++++++++++++++++- 1 file changed, 131 insertions(+), 1 deletion(-) diff --git a/drivers/thermal/intel/int340x_thermal/int3400_thermal.c b/drivers/thermal/intel/int340x_thermal/int3400_thermal.c index 0ad0469e91bae..87d9b075363cf 100644 --- a/drivers/thermal/intel/int340x_thermal/int3400_thermal.c +++ b/drivers/thermal/intel/int340x_thermal/int3400_thermal.c @@ -13,6 +13,7 @@ #include "acpi_thermal_rel.h" #define INT3400_THERMAL_TABLE_CHANGED 0x83 +#define INT3400_ODVP_CHANGED 0x88 enum int3400_thermal_uuid { INT3400_THERMAL_PASSIVE_1, @@ -41,8 +42,11 @@ static char *int3400_thermal_uuids[INT3400_THERMAL_MAXIMUM_UUID] = { "BE84BABF-C4D4-403D-B495-3128FD44dAC1", }; +struct odvp_attr; + struct int3400_thermal_priv { struct acpi_device *adev; + struct platform_device *pdev; struct thermal_zone_device *thermal; int mode; int art_count; @@ -53,6 +57,17 @@ struct int3400_thermal_priv { int rel_misc_dev_res; int current_uuid_index; char *data_vault; + int odvp_count; + int *odvp; + struct odvp_attr *odvp_attrs; +}; + +static int evaluate_odvp(struct int3400_thermal_priv *priv); + +struct odvp_attr { + int odvp; + struct int3400_thermal_priv *priv; + struct kobj_attribute attr; }; static ssize_t data_vault_read(struct file *file, struct kobject *kobj, @@ -210,9 +225,110 @@ static int int3400_thermal_run_osc(acpi_handle handle, result = -EPERM; kfree(context.ret.pointer); + return result; } +static ssize_t odvp_show(struct kobject *kobj, struct kobj_attribute *attr, + char *buf) +{ + struct odvp_attr *odvp_attr; + + odvp_attr = container_of(attr, struct odvp_attr, attr); + + return sprintf(buf, "%d\n", odvp_attr->priv->odvp[odvp_attr->odvp]); +} + +static void cleanup_odvp(struct int3400_thermal_priv *priv) +{ + int i; + + if (priv->odvp_attrs) { + for (i = 0; i < priv->odvp_count; i++) { + sysfs_remove_file(&priv->pdev->dev.kobj, + &priv->odvp_attrs[i].attr.attr); + kfree(priv->odvp_attrs[i].attr.attr.name); + } + kfree(priv->odvp_attrs); + } + kfree(priv->odvp); + priv->odvp_count = 0; +} + +static int evaluate_odvp(struct int3400_thermal_priv *priv) +{ + struct acpi_buffer odvp = { ACPI_ALLOCATE_BUFFER, NULL }; + union acpi_object *obj = NULL; + acpi_status status; + int i, ret; + + status = acpi_evaluate_object(priv->adev->handle, "ODVP", NULL, &odvp); + if (ACPI_FAILURE(status)) { + ret = -EINVAL; + goto out_err; + } + + obj = odvp.pointer; + if (obj->type != ACPI_TYPE_PACKAGE) { + ret = -EINVAL; + goto out_err; + } + + if (priv->odvp == NULL) { + priv->odvp_count = obj->package.count; + priv->odvp = kmalloc_array(priv->odvp_count, sizeof(int), + GFP_KERNEL); + if (!priv->odvp) { + ret = -ENOMEM; + goto out_err; + } + } + + if (priv->odvp_attrs == NULL) { + priv->odvp_attrs = kcalloc(priv->odvp_count, + sizeof(struct odvp_attr), + GFP_KERNEL); + if (!priv->odvp_attrs) { + ret = -ENOMEM; + goto out_err; + } + for (i = 0; i < priv->odvp_count; i++) { + struct odvp_attr *odvp = &priv->odvp_attrs[i]; + + sysfs_attr_init(&odvp->attr.attr); + odvp->priv = priv; + odvp->odvp = i; + odvp->attr.attr.name = kasprintf(GFP_KERNEL, + "odvp%d", i); + + if (!odvp->attr.attr.name) { + ret = -ENOMEM; + goto out_err; + } + odvp->attr.attr.mode = 0444; + odvp->attr.show = odvp_show; + odvp->attr.store = NULL; + ret = sysfs_create_file(&priv->pdev->dev.kobj, + &odvp->attr.attr); + if (ret) + goto out_err; + } + } + + for (i = 0; i < obj->package.count; i++) { + if (obj->package.elements[i].type == ACPI_TYPE_INTEGER) + priv->odvp[i] = obj->package.elements[i].integer.value; + } + + kfree(obj); + return 0; + +out_err: + cleanup_odvp(priv); + kfree(obj); + return ret; +} + static void int3400_notify(acpi_handle handle, u32 event, void *data) @@ -236,6 +352,9 @@ static void int3400_notify(acpi_handle handle, kobject_uevent_env(&priv->thermal->device.kobj, KOBJ_CHANGE, thermal_prop); break; + case INT3400_ODVP_CHANGED: + evaluate_odvp(priv); + break; default: /* Ignore unknown notification codes sent to INT3400 device */ break; @@ -285,6 +404,9 @@ static int int3400_thermal_set_mode(struct thermal_zone_device *thermal, priv->current_uuid_index, enable); } + + evaluate_odvp(priv); + return result; } @@ -338,6 +460,7 @@ static int int3400_thermal_probe(struct platform_device *pdev) if (!priv) return -ENOMEM; + priv->pdev = pdev; priv->adev = adev; result = int3400_thermal_get_uuids(priv); @@ -358,6 +481,8 @@ static int int3400_thermal_probe(struct platform_device *pdev) int3400_setup_gddv(priv); + evaluate_odvp(priv); + priv->thermal = thermal_zone_device_register("INT3400 Thermal", 0, 0, priv, &int3400_thermal_ops, &int3400_thermal_params, 0, 0); @@ -389,8 +514,11 @@ static int int3400_thermal_probe(struct platform_device *pdev) return 0; free_sysfs: - if (priv->data_vault) + cleanup_odvp(priv); + if (priv->data_vault) { sysfs_remove_group(&pdev->dev.kobj, &data_attribute_group); + kfree(priv->data_vault); + } free_uuid: sysfs_remove_group(&pdev->dev.kobj, &uuid_attribute_group); free_rel_misc: @@ -413,6 +541,8 @@ static int int3400_thermal_remove(struct platform_device *pdev) priv->adev->handle, ACPI_DEVICE_NOTIFY, int3400_notify); + cleanup_odvp(priv); + if (!priv->rel_misc_dev_res) acpi_thermal_rel_misc_device_remove(priv->adev->handle); -- GitLab From 8d485da0ddee79d0e6713405694253d401e41b93 Mon Sep 17 00:00:00 2001 From: Matthew Garrett Date: Mon, 13 Apr 2020 19:09:53 -0700 Subject: [PATCH 0841/1971] thermal/int340x_thermal: Don't require IDSP to exist The IDSP method doesn't appear to exist on the most recent Intel platforms: instead, the IDSP data is included in the GDDV blob. Since we probably don't want to decompress and parse that in-kernel, just allow any UUID to be written if IDSP is missing. Signed-off-by: Matthew Garrett Tested-by: Pandruvada, Srinivas [ rzhang: fix checkpatch warning in changelog ] Signed-off-by: Zhang Rui Link: https://lore.kernel.org/r/20200414020953.255364-3-matthewgarrett@google.com --- .../intel/int340x_thermal/int3400_thermal.c | 30 ++++++++++++++----- 1 file changed, 22 insertions(+), 8 deletions(-) diff --git a/drivers/thermal/intel/int340x_thermal/int3400_thermal.c b/drivers/thermal/intel/int340x_thermal/int3400_thermal.c index 87d9b075363cf..0b3a626558435 100644 --- a/drivers/thermal/intel/int340x_thermal/int3400_thermal.c +++ b/drivers/thermal/intel/int340x_thermal/int3400_thermal.c @@ -96,6 +96,9 @@ static ssize_t available_uuids_show(struct device *dev, int i; int length = 0; + if (!priv->uuid_bitmap) + return sprintf(buf, "UNKNOWN\n"); + for (i = 0; i < INT3400_THERMAL_MAXIMUM_UUID; i++) { if (priv->uuid_bitmap & (1 << i)) if (PAGE_SIZE - length > 0) @@ -113,11 +116,11 @@ static ssize_t current_uuid_show(struct device *dev, { struct int3400_thermal_priv *priv = dev_get_drvdata(dev); - if (priv->uuid_bitmap & (1 << priv->current_uuid_index)) - return sprintf(buf, "%s\n", - int3400_thermal_uuids[priv->current_uuid_index]); - else + if (priv->current_uuid_index == -1) return sprintf(buf, "INVALID\n"); + + return sprintf(buf, "%s\n", + int3400_thermal_uuids[priv->current_uuid_index]); } static ssize_t current_uuid_store(struct device *dev, @@ -128,9 +131,16 @@ static ssize_t current_uuid_store(struct device *dev, int i; for (i = 0; i < INT3400_THERMAL_MAXIMUM_UUID; ++i) { - if ((priv->uuid_bitmap & (1 << i)) && - !(strncmp(buf, int3400_thermal_uuids[i], - sizeof(int3400_thermal_uuids[i]) - 1))) { + if (!strncmp(buf, int3400_thermal_uuids[i], + sizeof(int3400_thermal_uuids[i]) - 1)) { + /* + * If we have a list of supported UUIDs, make sure + * this one is supported. + */ + if (priv->uuid_bitmap && + !(priv->uuid_bitmap & (1 << i))) + return -EINVAL; + priv->current_uuid_index = i; return count; } @@ -464,9 +474,13 @@ static int int3400_thermal_probe(struct platform_device *pdev) priv->adev = adev; result = int3400_thermal_get_uuids(priv); - if (result) + + /* Missing IDSP isn't fatal */ + if (result && result != -ENODEV) goto free_priv; + priv->current_uuid_index = -1; + result = acpi_parse_art(priv->adev->handle, &priv->art_count, &priv->arts, true); if (result) -- GitLab From 7e71609f64ec81b8367b7fa59ab06bb571d17e3b Mon Sep 17 00:00:00 2001 From: Al Viro Date: Wed, 19 Feb 2020 09:54:24 -0500 Subject: [PATCH 0842/1971] pselect6() and friends: take handling the combined 6th/7th args into helper ... and use unsafe_get_user(), while we are at it. Signed-off-by: Al Viro --- fs/select.c | 112 ++++++++++++++++++++++++++++++---------------------- 1 file changed, 64 insertions(+), 48 deletions(-) diff --git a/fs/select.c b/fs/select.c index 11d0285d46b7e..7aef49552d4c2 100644 --- a/fs/select.c +++ b/fs/select.c @@ -766,22 +766,38 @@ static long do_pselect(int n, fd_set __user *inp, fd_set __user *outp, * which has a pointer to the sigset_t itself followed by a size_t containing * the sigset size. */ +struct sigset_argpack { + sigset_t __user *p; + size_t size; +}; + +static inline int get_sigset_argpack(struct sigset_argpack *to, + struct sigset_argpack __user *from) +{ + // the path is hot enough for overhead of copy_from_user() to matter + if (from) { + if (!user_read_access_begin(from, sizeof(*from))) + return -EFAULT; + unsafe_get_user(to->p, &from->p, Efault); + unsafe_get_user(to->size, &from->size, Efault); + user_read_access_end(); + } + return 0; +Efault: + user_access_end(); + return -EFAULT; +} + SYSCALL_DEFINE6(pselect6, int, n, fd_set __user *, inp, fd_set __user *, outp, fd_set __user *, exp, struct __kernel_timespec __user *, tsp, void __user *, sig) { - size_t sigsetsize = 0; - sigset_t __user *up = NULL; - - if (sig) { - if (!access_ok(sig, sizeof(void *)+sizeof(size_t)) - || __get_user(up, (sigset_t __user * __user *)sig) - || __get_user(sigsetsize, - (size_t __user *)(sig+sizeof(void *)))) - return -EFAULT; - } + struct sigset_argpack x = {NULL, 0}; + + if (get_sigset_argpack(&x, sig)) + return -EFAULT; - return do_pselect(n, inp, outp, exp, tsp, up, sigsetsize, PT_TIMESPEC); + return do_pselect(n, inp, outp, exp, tsp, x.p, x.size, PT_TIMESPEC); } #if defined(CONFIG_COMPAT_32BIT_TIME) && !defined(CONFIG_64BIT) @@ -790,18 +806,12 @@ SYSCALL_DEFINE6(pselect6_time32, int, n, fd_set __user *, inp, fd_set __user *, fd_set __user *, exp, struct old_timespec32 __user *, tsp, void __user *, sig) { - size_t sigsetsize = 0; - sigset_t __user *up = NULL; - - if (sig) { - if (!access_ok(sig, sizeof(void *)+sizeof(size_t)) - || __get_user(up, (sigset_t __user * __user *)sig) - || __get_user(sigsetsize, - (size_t __user *)(sig+sizeof(void *)))) - return -EFAULT; - } + struct sigset_argpack x = {NULL, 0}; + + if (get_sigset_argpack(&x, sig)) + return -EFAULT; - return do_pselect(n, inp, outp, exp, tsp, up, sigsetsize, PT_OLD_TIMESPEC); + return do_pselect(n, inp, outp, exp, tsp, x.p, x.size, PT_OLD_TIMESPEC); } #endif @@ -1325,24 +1335,37 @@ static long do_compat_pselect(int n, compat_ulong_t __user *inp, return poll_select_finish(&end_time, tsp, type, ret); } +struct compat_sigset_argpack { + compat_uptr_t p; + compat_size_t size; +}; +static inline int get_compat_sigset_argpack(struct compat_sigset_argpack *to, + struct compat_sigset_argpack __user *from) +{ + if (from) { + if (!user_read_access_begin(from, sizeof(*from))) + return -EFAULT; + unsafe_get_user(to->p, &from->p, Efault); + unsafe_get_user(to->size, &from->size, Efault); + user_read_access_end(); + } + return 0; +Efault: + user_access_end(); + return -EFAULT; +} + COMPAT_SYSCALL_DEFINE6(pselect6_time64, int, n, compat_ulong_t __user *, inp, compat_ulong_t __user *, outp, compat_ulong_t __user *, exp, struct __kernel_timespec __user *, tsp, void __user *, sig) { - compat_size_t sigsetsize = 0; - compat_uptr_t up = 0; - - if (sig) { - if (!access_ok(sig, - sizeof(compat_uptr_t)+sizeof(compat_size_t)) || - __get_user(up, (compat_uptr_t __user *)sig) || - __get_user(sigsetsize, - (compat_size_t __user *)(sig+sizeof(up)))) - return -EFAULT; - } + struct compat_sigset_argpack x = {0, 0}; + + if (get_compat_sigset_argpack(&x, sig)) + return -EFAULT; - return do_compat_pselect(n, inp, outp, exp, tsp, compat_ptr(up), - sigsetsize, PT_TIMESPEC); + return do_compat_pselect(n, inp, outp, exp, tsp, compat_ptr(x.p), + x.size, PT_TIMESPEC); } #if defined(CONFIG_COMPAT_32BIT_TIME) @@ -1351,20 +1374,13 @@ COMPAT_SYSCALL_DEFINE6(pselect6_time32, int, n, compat_ulong_t __user *, inp, compat_ulong_t __user *, outp, compat_ulong_t __user *, exp, struct old_timespec32 __user *, tsp, void __user *, sig) { - compat_size_t sigsetsize = 0; - compat_uptr_t up = 0; - - if (sig) { - if (!access_ok(sig, - sizeof(compat_uptr_t)+sizeof(compat_size_t)) || - __get_user(up, (compat_uptr_t __user *)sig) || - __get_user(sigsetsize, - (compat_size_t __user *)(sig+sizeof(up)))) - return -EFAULT; - } + struct compat_sigset_argpack x = {0, 0}; + + if (get_compat_sigset_argpack(&x, sig)) + return -EFAULT; - return do_compat_pselect(n, inp, outp, exp, tsp, compat_ptr(up), - sigsetsize, PT_OLD_TIMESPEC); + return do_compat_pselect(n, inp, outp, exp, tsp, compat_ptr(x.p), + x.size, PT_OLD_TIMESPEC); } #endif -- GitLab From e00091071615378e66291c61ea02d02d15a2cc7e Mon Sep 17 00:00:00 2001 From: Bin Meng Date: Fri, 1 May 2020 21:30:21 -0700 Subject: [PATCH 0843/1971] m68k: Drop CONFIG_MTD_M25P80 in stmark2_defconfig Drop CONFIG_MTD_M25P80 that was removed in commit b35b9a10362d ("mtd: spi-nor: Move m25p80 code in spi-nor.c") Signed-off-by: Bin Meng Signed-off-by: Greg Ungerer --- arch/m68k/configs/stmark2_defconfig | 1 - 1 file changed, 1 deletion(-) diff --git a/arch/m68k/configs/stmark2_defconfig b/arch/m68k/configs/stmark2_defconfig index 27fa9465d19d2..2b746f55f419f 100644 --- a/arch/m68k/configs/stmark2_defconfig +++ b/arch/m68k/configs/stmark2_defconfig @@ -48,7 +48,6 @@ CONFIG_MTD_CFI_STAA=y CONFIG_MTD_ROM=y CONFIG_MTD_COMPLEX_MAPPINGS=y CONFIG_MTD_PLATRAM=y -CONFIG_MTD_M25P80=y CONFIG_MTD_SPI_NOR=y # CONFIG_INPUT_KEYBOARD is not set # CONFIG_INPUT_MOUSE is not set -- GitLab From ce3e83759c674ae7c04938d3577dbf0f3144c8f0 Mon Sep 17 00:00:00 2001 From: Luc Van Oostenryck Date: Fri, 29 May 2020 21:02:17 +0200 Subject: [PATCH 0844/1971] m68k,nommu: add missing __user in uaccess' __ptr() macro The assembly for __get_user() & __put_user() uses a macro, __ptr(), to cast the pointer to 'unsigned long *' but the pointer is always a __user one and so this cast creates a lot of warnings when using Sparse. So, change to the cast to 'unsigned long __user *'. Reported-by: kbuild test robot Signed-off-by: Luc Van Oostenryck Signed-off-by: Greg Ungerer --- arch/m68k/include/asm/uaccess_no.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/m68k/include/asm/uaccess_no.h b/arch/m68k/include/asm/uaccess_no.h index a24cfe4a0d328..9651766a62af5 100644 --- a/arch/m68k/include/asm/uaccess_no.h +++ b/arch/m68k/include/asm/uaccess_no.h @@ -60,7 +60,7 @@ extern int __put_user_bad(void); * aliasing issues. */ -#define __ptr(x) ((unsigned long *)(x)) +#define __ptr(x) ((unsigned long __user *)(x)) #define __put_user_asm(err,x,ptr,bwl) \ __asm__ ("move" #bwl " %0,%1" \ -- GitLab From 9e2b6ed41f8f99c97b13c9d15cbef235dbd97fb6 Mon Sep 17 00:00:00 2001 From: Luc Van Oostenryck Date: Fri, 29 May 2020 21:02:18 +0200 Subject: [PATCH 0845/1971] m68k,nommu: fix implicit cast from __user in __{get,put}_user_asm() The assembly for __get_user_asm() & __put_user_asm() uses memcpy() when the size is 8. However, the pointer is always a __user one while memcpy() expects a plain one and so this cast creates a lot of warnings when using Sparse. So, fix this by adding a cast to 'void __force *' at memcpy()'s argument. Reported-by: kbuild test robot Signed-off-by: Luc Van Oostenryck Signed-off-by: Greg Ungerer --- arch/m68k/include/asm/uaccess_no.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/m68k/include/asm/uaccess_no.h b/arch/m68k/include/asm/uaccess_no.h index 9651766a62af5..dcfb69361408a 100644 --- a/arch/m68k/include/asm/uaccess_no.h +++ b/arch/m68k/include/asm/uaccess_no.h @@ -42,7 +42,7 @@ static inline int _access_ok(unsigned long addr, unsigned long size) __put_user_asm(__pu_err, __pu_val, ptr, l); \ break; \ case 8: \ - memcpy(ptr, &__pu_val, sizeof (*(ptr))); \ + memcpy((void __force *)ptr, &__pu_val, sizeof(*(ptr))); \ break; \ default: \ __pu_err = __put_user_bad(); \ @@ -85,7 +85,7 @@ extern int __put_user_bad(void); u64 l; \ __typeof__(*(ptr)) t; \ } __gu_val; \ - memcpy(&__gu_val.l, ptr, sizeof(__gu_val.l)); \ + memcpy(&__gu_val.l, (const void __force *)ptr, sizeof(__gu_val.l)); \ (x) = __gu_val.t; \ break; \ } \ -- GitLab From c5ec874e55ba87764357398d60e22bace0d2511d Mon Sep 17 00:00:00 2001 From: Ira Weiny Date: Thu, 30 Apr 2020 07:41:33 -0700 Subject: [PATCH 0846/1971] fs/xfs: Remove unnecessary initialization of i_rwsem An earlier call of xfs_reinit_inode() from xfs_iget_cache_hit() already handles initialization of i_rwsem. Doing so again is unneeded. Reviewed-by: Darrick J. Wong Reviewed-by: Dave Chinner Signed-off-by: Ira Weiny Signed-off-by: Darrick J. Wong --- fs/xfs/xfs_icache.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/fs/xfs/xfs_icache.c b/fs/xfs/xfs_icache.c index d7deab608e2c2..888646d74d7d5 100644 --- a/fs/xfs/xfs_icache.c +++ b/fs/xfs/xfs_icache.c @@ -423,6 +423,7 @@ xfs_iget_cache_hit( spin_unlock(&ip->i_flags_lock); rcu_read_unlock(); + ASSERT(!rwsem_is_locked(&inode->i_rwsem)); error = xfs_reinit_inode(mp, inode); if (error) { bool wake; @@ -456,9 +457,6 @@ xfs_iget_cache_hit( ip->i_sick = 0; ip->i_checked = 0; - ASSERT(!rwsem_is_locked(&inode->i_rwsem)); - init_rwsem(&inode->i_rwsem); - spin_unlock(&ip->i_flags_lock); spin_unlock(&pag->pag_ici_lock); } else { -- GitLab From fd58c62b65626934c5ac9f9a15e0a8d4bcf9ef47 Mon Sep 17 00:00:00 2001 From: Ira Weiny Date: Thu, 30 Apr 2020 07:41:35 -0700 Subject: [PATCH 0847/1971] fs/xfs: Change XFS_MOUNT_DAX to XFS_MOUNT_DAX_ALWAYS In prep for the new tri-state mount option which then introduces XFS_MOUNT_DAX_NEVER. Reviewed-by: Dave Chinner Reviewed-by: Darrick J. Wong Signed-off-by: Ira Weiny Signed-off-by: Darrick J. Wong --- fs/xfs/xfs_iops.c | 2 +- fs/xfs/xfs_mount.h | 3 +-- fs/xfs/xfs_super.c | 8 ++++---- 3 files changed, 6 insertions(+), 7 deletions(-) diff --git a/fs/xfs/xfs_iops.c b/fs/xfs/xfs_iops.c index f7a99b3bbcf7a..462f89af479a7 100644 --- a/fs/xfs/xfs_iops.c +++ b/fs/xfs/xfs_iops.c @@ -1248,7 +1248,7 @@ xfs_inode_supports_dax( return false; /* DAX mount option or DAX iflag must be set. */ - if (!(mp->m_flags & XFS_MOUNT_DAX) && + if (!(mp->m_flags & XFS_MOUNT_DAX_ALWAYS) && !(ip->i_d.di_flags2 & XFS_DIFLAG2_DAX)) return false; diff --git a/fs/xfs/xfs_mount.h b/fs/xfs/xfs_mount.h index b2e4598fdf7d3..f6123fb0113c6 100644 --- a/fs/xfs/xfs_mount.h +++ b/fs/xfs/xfs_mount.h @@ -237,8 +237,7 @@ typedef struct xfs_mount { #define XFS_MOUNT_FILESTREAMS (1ULL << 24) /* enable the filestreams allocator */ #define XFS_MOUNT_NOATTR2 (1ULL << 25) /* disable use of attr2 format */ - -#define XFS_MOUNT_DAX (1ULL << 62) /* TEST ONLY! */ +#define XFS_MOUNT_DAX_ALWAYS (1ULL << 26) /* * Max and min values for mount-option defined I/O diff --git a/fs/xfs/xfs_super.c b/fs/xfs/xfs_super.c index a7f8e387ebfef..51fdef91edda5 100644 --- a/fs/xfs/xfs_super.c +++ b/fs/xfs/xfs_super.c @@ -129,7 +129,7 @@ xfs_fs_show_options( { XFS_MOUNT_GRPID, ",grpid" }, { XFS_MOUNT_DISCARD, ",discard" }, { XFS_MOUNT_LARGEIO, ",largeio" }, - { XFS_MOUNT_DAX, ",dax" }, + { XFS_MOUNT_DAX_ALWAYS, ",dax" }, { 0, NULL } }; struct xfs_mount *mp = XFS_M(root->d_sb); @@ -1261,7 +1261,7 @@ xfs_fc_parse_param( return 0; #ifdef CONFIG_FS_DAX case Opt_dax: - mp->m_flags |= XFS_MOUNT_DAX; + mp->m_flags |= XFS_MOUNT_DAX_ALWAYS; return 0; #endif default: @@ -1454,7 +1454,7 @@ xfs_fc_fill_super( if (XFS_SB_VERSION_NUM(&mp->m_sb) == XFS_SB_VERSION_5) sb->s_flags |= SB_I_VERSION; - if (mp->m_flags & XFS_MOUNT_DAX) { + if (mp->m_flags & XFS_MOUNT_DAX_ALWAYS) { bool rtdev_is_dax = false, datadev_is_dax; xfs_warn(mp, @@ -1468,7 +1468,7 @@ xfs_fc_fill_super( if (!rtdev_is_dax && !datadev_is_dax) { xfs_alert(mp, "DAX unsupported by block device. Turning off DAX."); - mp->m_flags &= ~XFS_MOUNT_DAX; + mp->m_flags &= ~XFS_MOUNT_DAX_ALWAYS; } if (xfs_sb_version_hasreflink(&mp->m_sb)) { xfs_alert(mp, -- GitLab From 02beb2686ff964884756c581d513e103542dcc6a Mon Sep 17 00:00:00 2001 From: Ira Weiny Date: Thu, 30 Apr 2020 07:41:35 -0700 Subject: [PATCH 0848/1971] fs/xfs: Make DAX mount option a tri-state As agreed upon[1]. We make the dax mount option a tri-state. '-o dax' continues to operate the same. We add 'always', 'never', and 'inode' (default). [1] https://lore.kernel.org/lkml/20200405061945.GA94792@iweiny-DESK2.sc.intel.com/ Signed-off-by: Ira Weiny Reviewed-by: Darrick J. Wong Signed-off-by: Darrick J. Wong --- fs/xfs/xfs_mount.h | 1 + fs/xfs/xfs_super.c | 46 ++++++++++++++++++++++++++++++++++++++++++---- 2 files changed, 43 insertions(+), 4 deletions(-) diff --git a/fs/xfs/xfs_mount.h b/fs/xfs/xfs_mount.h index f6123fb0113c6..37bfb50db8090 100644 --- a/fs/xfs/xfs_mount.h +++ b/fs/xfs/xfs_mount.h @@ -238,6 +238,7 @@ typedef struct xfs_mount { allocator */ #define XFS_MOUNT_NOATTR2 (1ULL << 25) /* disable use of attr2 format */ #define XFS_MOUNT_DAX_ALWAYS (1ULL << 26) +#define XFS_MOUNT_DAX_NEVER (1ULL << 27) /* * Max and min values for mount-option defined I/O diff --git a/fs/xfs/xfs_super.c b/fs/xfs/xfs_super.c index 51fdef91edda5..6f91c13fb6ea8 100644 --- a/fs/xfs/xfs_super.c +++ b/fs/xfs/xfs_super.c @@ -47,6 +47,39 @@ static struct kset *xfs_kset; /* top-level xfs sysfs dir */ static struct xfs_kobj xfs_dbg_kobj; /* global debug sysfs attrs */ #endif +enum xfs_dax_mode { + XFS_DAX_INODE = 0, + XFS_DAX_ALWAYS = 1, + XFS_DAX_NEVER = 2, +}; + +static void +xfs_mount_set_dax_mode( + struct xfs_mount *mp, + enum xfs_dax_mode mode) +{ + switch (mode) { + case XFS_DAX_INODE: + mp->m_flags &= ~(XFS_MOUNT_DAX_ALWAYS | XFS_MOUNT_DAX_NEVER); + break; + case XFS_DAX_ALWAYS: + mp->m_flags |= XFS_MOUNT_DAX_ALWAYS; + mp->m_flags &= ~XFS_MOUNT_DAX_NEVER; + break; + case XFS_DAX_NEVER: + mp->m_flags |= XFS_MOUNT_DAX_NEVER; + mp->m_flags &= ~XFS_MOUNT_DAX_ALWAYS; + break; + } +} + +static const struct constant_table dax_param_enums[] = { + {"inode", XFS_DAX_INODE }, + {"always", XFS_DAX_ALWAYS }, + {"never", XFS_DAX_NEVER }, + {} +}; + /* * Table driven mount option parser. */ @@ -59,7 +92,7 @@ enum { Opt_filestreams, Opt_quota, Opt_noquota, Opt_usrquota, Opt_grpquota, Opt_prjquota, Opt_uquota, Opt_gquota, Opt_pquota, Opt_uqnoenforce, Opt_gqnoenforce, Opt_pqnoenforce, Opt_qnoenforce, - Opt_discard, Opt_nodiscard, Opt_dax, + Opt_discard, Opt_nodiscard, Opt_dax, Opt_dax_enum, }; static const struct fs_parameter_spec xfs_fs_parameters[] = { @@ -103,6 +136,7 @@ static const struct fs_parameter_spec xfs_fs_parameters[] = { fsparam_flag("discard", Opt_discard), fsparam_flag("nodiscard", Opt_nodiscard), fsparam_flag("dax", Opt_dax), + fsparam_enum("dax", Opt_dax_enum, dax_param_enums), {} }; @@ -129,7 +163,8 @@ xfs_fs_show_options( { XFS_MOUNT_GRPID, ",grpid" }, { XFS_MOUNT_DISCARD, ",discard" }, { XFS_MOUNT_LARGEIO, ",largeio" }, - { XFS_MOUNT_DAX_ALWAYS, ",dax" }, + { XFS_MOUNT_DAX_ALWAYS, ",dax=always" }, + { XFS_MOUNT_DAX_NEVER, ",dax=never" }, { 0, NULL } }; struct xfs_mount *mp = XFS_M(root->d_sb); @@ -1261,7 +1296,10 @@ xfs_fc_parse_param( return 0; #ifdef CONFIG_FS_DAX case Opt_dax: - mp->m_flags |= XFS_MOUNT_DAX_ALWAYS; + xfs_mount_set_dax_mode(mp, XFS_DAX_ALWAYS); + return 0; + case Opt_dax_enum: + xfs_mount_set_dax_mode(mp, result.uint_32); return 0; #endif default: @@ -1468,7 +1506,7 @@ xfs_fc_fill_super( if (!rtdev_is_dax && !datadev_is_dax) { xfs_alert(mp, "DAX unsupported by block device. Turning off DAX."); - mp->m_flags &= ~XFS_MOUNT_DAX_ALWAYS; + xfs_mount_set_dax_mode(mp, XFS_DAX_NEVER); } if (xfs_sb_version_hasreflink(&mp->m_sb)) { xfs_alert(mp, -- GitLab From f7bf743714ab3f0a93c2c58e915541db83bebdee Mon Sep 17 00:00:00 2001 From: Ira Weiny Date: Thu, 30 Apr 2020 07:41:36 -0700 Subject: [PATCH 0849/1971] fs/xfs: Create function xfs_inode_should_enable_dax() xfs_inode_supports_dax() should reflect if the inode can support DAX not that it is enabled for DAX. Change the use of xfs_inode_supports_dax() to reflect only if the inode and underlying storage support dax. Add a new function xfs_inode_should_enable_dax() which reflects if the inode should be enabled for DAX. Reviewed-by: Dave Chinner Reviewed-by: Darrick J. Wong Signed-off-by: Ira Weiny Signed-off-by: Darrick J. Wong --- fs/xfs/xfs_iops.c | 28 ++++++++++++++++++++++------ 1 file changed, 22 insertions(+), 6 deletions(-) diff --git a/fs/xfs/xfs_iops.c b/fs/xfs/xfs_iops.c index 462f89af479a7..1814f10e43d37 100644 --- a/fs/xfs/xfs_iops.c +++ b/fs/xfs/xfs_iops.c @@ -1243,13 +1243,12 @@ xfs_inode_supports_dax( { struct xfs_mount *mp = ip->i_mount; - /* Only supported on non-reflinked files. */ - if (!S_ISREG(VFS_I(ip)->i_mode) || xfs_is_reflink_inode(ip)) + /* Only supported on regular files. */ + if (!S_ISREG(VFS_I(ip)->i_mode)) return false; - /* DAX mount option or DAX iflag must be set. */ - if (!(mp->m_flags & XFS_MOUNT_DAX_ALWAYS) && - !(ip->i_d.di_flags2 & XFS_DIFLAG2_DAX)) + /* Only supported on non-reflinked files. */ + if (xfs_is_reflink_inode(ip)) return false; /* Block size must match page size */ @@ -1260,6 +1259,23 @@ xfs_inode_supports_dax( return xfs_inode_buftarg(ip)->bt_daxdev != NULL; } +static bool +xfs_inode_should_enable_dax( + struct xfs_inode *ip) +{ + if (!IS_ENABLED(CONFIG_FS_DAX)) + return false; + if (ip->i_mount->m_flags & XFS_MOUNT_DAX_NEVER) + return false; + if (!xfs_inode_supports_dax(ip)) + return false; + if (ip->i_mount->m_flags & XFS_MOUNT_DAX_ALWAYS) + return true; + if (ip->i_d.di_flags2 & XFS_DIFLAG2_DAX) + return true; + return false; +} + STATIC void xfs_diflags_to_iflags( struct inode *inode, @@ -1278,7 +1294,7 @@ xfs_diflags_to_iflags( inode->i_flags |= S_SYNC; if (flags & XFS_DIFLAG_NOATIME) inode->i_flags |= S_NOATIME; - if (xfs_inode_supports_dax(ip)) + if (xfs_inode_should_enable_dax(ip)) inode->i_flags |= S_DAX; } -- GitLab From b5df62843273f914c2ac7be8e33931a414df15d7 Mon Sep 17 00:00:00 2001 From: Ira Weiny Date: Thu, 30 Apr 2020 07:41:36 -0700 Subject: [PATCH 0850/1971] fs/xfs: Combine xfs_diflags_to_linux() and xfs_diflags_to_iflags() The functionality in xfs_diflags_to_linux() and xfs_diflags_to_iflags() are nearly identical. The only difference is that *_to_linux() is called after inode setup and disallows changing the DAX flag. Combining them can be done with a flag which indicates if this is the initial setup to allow the DAX flag to be properly set only at init time. So remove xfs_diflags_to_linux() and call the modified xfs_diflags_to_iflags() directly. While we are here simplify xfs_diflags_to_iflags() to take struct xfs_inode and use xfs_ip2xflags() to ensure future diflags are included correctly. Reviewed-by: Dave Chinner Reviewed-by: Darrick J. Wong Signed-off-by: Ira Weiny Signed-off-by: Darrick J. Wong --- fs/xfs/xfs_inode.h | 1 + fs/xfs/xfs_ioctl.c | 33 +-------------------------------- fs/xfs/xfs_iops.c | 46 +++++++++++++++++++++++++++------------------- 3 files changed, 29 insertions(+), 51 deletions(-) diff --git a/fs/xfs/xfs_inode.h b/fs/xfs/xfs_inode.h index 95209e3256a23..d8ce3eaa246eb 100644 --- a/fs/xfs/xfs_inode.h +++ b/fs/xfs/xfs_inode.h @@ -466,6 +466,7 @@ int xfs_break_layouts(struct inode *inode, uint *iolock, /* from xfs_iops.c */ extern void xfs_setup_inode(struct xfs_inode *ip); extern void xfs_setup_iops(struct xfs_inode *ip); +extern void xfs_diflags_to_iflags(struct xfs_inode *ip, bool init); /* * When setting up a newly allocated inode, we need to call diff --git a/fs/xfs/xfs_ioctl.c b/fs/xfs/xfs_ioctl.c index 309958186d338..104495ac187cf 100644 --- a/fs/xfs/xfs_ioctl.c +++ b/fs/xfs/xfs_ioctl.c @@ -1201,37 +1201,6 @@ xfs_flags2diflags2( return di_flags2; } -STATIC void -xfs_diflags_to_linux( - struct xfs_inode *ip) -{ - struct inode *inode = VFS_I(ip); - unsigned int xflags = xfs_ip2xflags(ip); - - if (xflags & FS_XFLAG_IMMUTABLE) - inode->i_flags |= S_IMMUTABLE; - else - inode->i_flags &= ~S_IMMUTABLE; - if (xflags & FS_XFLAG_APPEND) - inode->i_flags |= S_APPEND; - else - inode->i_flags &= ~S_APPEND; - if (xflags & FS_XFLAG_SYNC) - inode->i_flags |= S_SYNC; - else - inode->i_flags &= ~S_SYNC; - if (xflags & FS_XFLAG_NOATIME) - inode->i_flags |= S_NOATIME; - else - inode->i_flags &= ~S_NOATIME; -#if 0 /* disabled until the flag switching races are sorted out */ - if (xflags & FS_XFLAG_DAX) - inode->i_flags |= S_DAX; - else - inode->i_flags &= ~S_DAX; -#endif -} - static int xfs_ioctl_setattr_xflags( struct xfs_trans *tp, @@ -1269,7 +1238,7 @@ xfs_ioctl_setattr_xflags( ip->i_d.di_flags = xfs_flags2diflags(ip, fa->fsx_xflags); ip->i_d.di_flags2 = di_flags2; - xfs_diflags_to_linux(ip); + xfs_diflags_to_iflags(ip, false); xfs_trans_ichgtime(tp, ip, XFS_ICHGTIME_CHG); xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); XFS_STATS_INC(mp, xs_ig_attrchg); diff --git a/fs/xfs/xfs_iops.c b/fs/xfs/xfs_iops.c index 1814f10e43d37..b70b735fe4a45 100644 --- a/fs/xfs/xfs_iops.c +++ b/fs/xfs/xfs_iops.c @@ -1276,26 +1276,34 @@ xfs_inode_should_enable_dax( return false; } -STATIC void +void xfs_diflags_to_iflags( - struct inode *inode, - struct xfs_inode *ip) + struct xfs_inode *ip, + bool init) { - uint16_t flags = ip->i_d.di_flags; - - inode->i_flags &= ~(S_IMMUTABLE | S_APPEND | S_SYNC | - S_NOATIME | S_DAX); - - if (flags & XFS_DIFLAG_IMMUTABLE) - inode->i_flags |= S_IMMUTABLE; - if (flags & XFS_DIFLAG_APPEND) - inode->i_flags |= S_APPEND; - if (flags & XFS_DIFLAG_SYNC) - inode->i_flags |= S_SYNC; - if (flags & XFS_DIFLAG_NOATIME) - inode->i_flags |= S_NOATIME; - if (xfs_inode_should_enable_dax(ip)) - inode->i_flags |= S_DAX; + struct inode *inode = VFS_I(ip); + unsigned int xflags = xfs_ip2xflags(ip); + unsigned int flags = 0; + + ASSERT(!(IS_DAX(inode) && init)); + + if (xflags & FS_XFLAG_IMMUTABLE) + flags |= S_IMMUTABLE; + if (xflags & FS_XFLAG_APPEND) + flags |= S_APPEND; + if (xflags & FS_XFLAG_SYNC) + flags |= S_SYNC; + if (xflags & FS_XFLAG_NOATIME) + flags |= S_NOATIME; + if (init && xfs_inode_should_enable_dax(ip)) + flags |= S_DAX; + + /* + * S_DAX can only be set during inode initialization and is never set by + * the VFS, so we cannot mask off S_DAX in i_flags. + */ + inode->i_flags &= ~(S_IMMUTABLE | S_APPEND | S_SYNC | S_NOATIME); + inode->i_flags |= flags; } /* @@ -1321,7 +1329,7 @@ xfs_setup_inode( inode_fake_hash(inode); i_size_write(inode, ip->i_d.di_size); - xfs_diflags_to_iflags(inode, ip); + xfs_diflags_to_iflags(ip, true); if (S_ISDIR(inode->i_mode)) { /* -- GitLab From e4f9ba20d3b8c2b86ec71f326882e1a3c4e47953 Mon Sep 17 00:00:00 2001 From: Ira Weiny Date: Thu, 30 Apr 2020 07:41:38 -0700 Subject: [PATCH 0851/1971] fs/xfs: Update xfs_ioctl_setattr_dax_invalidate() Because of the separation of FS_XFLAG_DAX from S_DAX and the delayed setting of S_DAX, data invalidation no longer needs to happen when FS_XFLAG_DAX is changed. Change xfs_ioctl_setattr_dax_invalidate() to be xfs_ioctl_dax_check_set_cache() and alter the code to reflect the new functionality. Furthermore, we no longer need the locking so we remove the join_flags logic. Signed-off-by: Ira Weiny Reviewed-by: Darrick J. Wong Signed-off-by: Darrick J. Wong --- fs/xfs/xfs_ioctl.c | 108 +++++++++------------------------------------ 1 file changed, 20 insertions(+), 88 deletions(-) diff --git a/fs/xfs/xfs_ioctl.c b/fs/xfs/xfs_ioctl.c index 104495ac187cf..ff474f2c9acf6 100644 --- a/fs/xfs/xfs_ioctl.c +++ b/fs/xfs/xfs_ioctl.c @@ -1245,64 +1245,26 @@ xfs_ioctl_setattr_xflags( return 0; } -/* - * If we are changing DAX flags, we have to ensure the file is clean and any - * cached objects in the address space are invalidated and removed. This - * requires us to lock out other IO and page faults similar to a truncate - * operation. The locks need to be held until the transaction has been committed - * so that the cache invalidation is atomic with respect to the DAX flag - * manipulation. - */ -static int -xfs_ioctl_setattr_dax_invalidate( +static void +xfs_ioctl_setattr_prepare_dax( struct xfs_inode *ip, - struct fsxattr *fa, - int *join_flags) + struct fsxattr *fa) { - struct inode *inode = VFS_I(ip); - struct super_block *sb = inode->i_sb; - int error; - - *join_flags = 0; - - /* - * It is only valid to set the DAX flag on regular files and - * directories on filesystems where the block size is equal to the page - * size. On directories it serves as an inherited hint so we don't - * have to check the device for dax support or flush pagecache. - */ - if (fa->fsx_xflags & FS_XFLAG_DAX) { - struct xfs_buftarg *target = xfs_inode_buftarg(ip); - - if (!bdev_dax_supported(target->bt_bdev, sb->s_blocksize)) - return -EINVAL; - } - - /* If the DAX state is not changing, we have nothing to do here. */ - if ((fa->fsx_xflags & FS_XFLAG_DAX) && IS_DAX(inode)) - return 0; - if (!(fa->fsx_xflags & FS_XFLAG_DAX) && !IS_DAX(inode)) - return 0; + struct xfs_mount *mp = ip->i_mount; + struct inode *inode = VFS_I(ip); if (S_ISDIR(inode->i_mode)) - return 0; - - /* lock, flush and invalidate mapping in preparation for flag change */ - xfs_ilock(ip, XFS_MMAPLOCK_EXCL | XFS_IOLOCK_EXCL); - error = filemap_write_and_wait(inode->i_mapping); - if (error) - goto out_unlock; - error = invalidate_inode_pages2(inode->i_mapping); - if (error) - goto out_unlock; - - *join_flags = XFS_MMAPLOCK_EXCL | XFS_IOLOCK_EXCL; - return 0; + return; -out_unlock: - xfs_iunlock(ip, XFS_MMAPLOCK_EXCL | XFS_IOLOCK_EXCL); - return error; + if ((mp->m_flags & XFS_MOUNT_DAX_ALWAYS) || + (mp->m_flags & XFS_MOUNT_DAX_NEVER)) + return; + if (((fa->fsx_xflags & FS_XFLAG_DAX) && + !(ip->i_d.di_flags2 & XFS_DIFLAG2_DAX)) || + (!(fa->fsx_xflags & FS_XFLAG_DAX) && + (ip->i_d.di_flags2 & XFS_DIFLAG2_DAX))) + d_mark_dontcache(inode); } /* @@ -1310,17 +1272,10 @@ out_unlock: * have permission to do so. On success, return a clean transaction and the * inode locked exclusively ready for further operation specific checks. On * failure, return an error without modifying or locking the inode. - * - * The inode might already be IO locked on call. If this is the case, it is - * indicated in @join_flags and we take full responsibility for ensuring they - * are unlocked from now on. Hence if we have an error here, we still have to - * unlock them. Otherwise, once they are joined to the transaction, they will - * be unlocked on commit/cancel. */ static struct xfs_trans * xfs_ioctl_setattr_get_trans( - struct xfs_inode *ip, - int join_flags) + struct xfs_inode *ip) { struct xfs_mount *mp = ip->i_mount; struct xfs_trans *tp; @@ -1337,8 +1292,7 @@ xfs_ioctl_setattr_get_trans( goto out_unlock; xfs_ilock(ip, XFS_ILOCK_EXCL); - xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL | join_flags); - join_flags = 0; + xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL); /* * CAP_FOWNER overrides the following restrictions: @@ -1359,8 +1313,6 @@ xfs_ioctl_setattr_get_trans( out_cancel: xfs_trans_cancel(tp); out_unlock: - if (join_flags) - xfs_iunlock(ip, join_flags); return ERR_PTR(error); } @@ -1486,7 +1438,6 @@ xfs_ioctl_setattr( struct xfs_dquot *pdqp = NULL; struct xfs_dquot *olddquot = NULL; int code; - int join_flags = 0; trace_xfs_ioctl_setattr(ip); @@ -1510,18 +1461,9 @@ xfs_ioctl_setattr( return code; } - /* - * Changing DAX config may require inode locking for mapping - * invalidation. These need to be held all the way to transaction commit - * or cancel time, so need to be passed through to - * xfs_ioctl_setattr_get_trans() so it can apply them to the join call - * appropriately. - */ - code = xfs_ioctl_setattr_dax_invalidate(ip, fa, &join_flags); - if (code) - goto error_free_dquots; + xfs_ioctl_setattr_prepare_dax(ip, fa); - tp = xfs_ioctl_setattr_get_trans(ip, join_flags); + tp = xfs_ioctl_setattr_get_trans(ip); if (IS_ERR(tp)) { code = PTR_ERR(tp); goto error_free_dquots; @@ -1651,7 +1593,6 @@ xfs_ioc_setxflags( struct fsxattr fa; struct fsxattr old_fa; unsigned int flags; - int join_flags = 0; int error; if (copy_from_user(&flags, arg, sizeof(flags))) @@ -1668,18 +1609,9 @@ xfs_ioc_setxflags( if (error) return error; - /* - * Changing DAX config may require inode locking for mapping - * invalidation. These need to be held all the way to transaction commit - * or cancel time, so need to be passed through to - * xfs_ioctl_setattr_get_trans() so it can apply them to the join call - * appropriately. - */ - error = xfs_ioctl_setattr_dax_invalidate(ip, &fa, &join_flags); - if (error) - goto out_drop_write; + xfs_ioctl_setattr_prepare_dax(ip, &fa); - tp = xfs_ioctl_setattr_get_trans(ip, join_flags); + tp = xfs_ioctl_setattr_get_trans(ip); if (IS_ERR(tp)) { error = PTR_ERR(tp); goto out_drop_write; -- GitLab From 3264d9e5cf453fc9d392dfbaca3ba0afd1c54462 Mon Sep 17 00:00:00 2001 From: Serge Semin Date: Thu, 28 May 2020 12:33:11 +0300 Subject: [PATCH 0852/1971] dt-bindings: i2c: Convert DW I2C binding to DT schema Modern device tree bindings are supposed to be created as YAML-files in accordance with dt-schema. This commit replaces Synopsys DW I2C legacy bare text bindings with YAML file. As before the bindings file states that the corresponding dts node is supposed to be compatible either with generic DW I2C controller or with Microsemi Ocelot SoC I2C one, to have registers, interrupts and clocks properties. In addition the node may have clock-frequency, i2c-sda-hold-time-ns, i2c-scl-falling-time-ns and i2c-sda-falling-time-ns optional properties. Signed-off-by: Serge Semin Reviewed-by: Rob Herring Signed-off-by: Wolfram Sang --- .../bindings/i2c/i2c-designware.txt | 73 --------- .../bindings/i2c/snps,designware-i2c.yaml | 154 ++++++++++++++++++ 2 files changed, 154 insertions(+), 73 deletions(-) delete mode 100644 Documentation/devicetree/bindings/i2c/i2c-designware.txt create mode 100644 Documentation/devicetree/bindings/i2c/snps,designware-i2c.yaml diff --git a/Documentation/devicetree/bindings/i2c/i2c-designware.txt b/Documentation/devicetree/bindings/i2c/i2c-designware.txt deleted file mode 100644 index 08be4d3846e57..0000000000000 --- a/Documentation/devicetree/bindings/i2c/i2c-designware.txt +++ /dev/null @@ -1,73 +0,0 @@ -* Synopsys DesignWare I2C - -Required properties : - - - compatible : should be "snps,designware-i2c" - or "mscc,ocelot-i2c" with "snps,designware-i2c" for fallback - - reg : Offset and length of the register set for the device - - interrupts : where IRQ is the interrupt number. - - clocks : phandles for the clocks, see the description of clock-names below. - The phandle for the "ic_clk" clock is required. The phandle for the "pclk" - clock is optional. If a single clock is specified but no clock-name, it is - the "ic_clk" clock. If both clocks are listed, the "ic_clk" must be first. - -Recommended properties : - - - clock-frequency : desired I2C bus clock frequency in Hz. - -Optional properties : - - - clock-names : Contains the names of the clocks: - "ic_clk", for the core clock used to generate the external I2C clock. - "pclk", the interface clock, required for register access. - - - reg : for "mscc,ocelot-i2c", a second register set to configure the SDA hold - time, named ICPU_CFG:TWI_DELAY in the datasheet. - - - i2c-sda-hold-time-ns : should contain the SDA hold time in nanoseconds. - This option is only supported in hardware blocks version 1.11a or newer and - on Microsemi SoCs ("mscc,ocelot-i2c" compatible). - - - i2c-scl-falling-time-ns : should contain the SCL falling time in nanoseconds. - This value which is by default 300ns is used to compute the tLOW period. - - - i2c-sda-falling-time-ns : should contain the SDA falling time in nanoseconds. - This value which is by default 300ns is used to compute the tHIGH period. - -Examples : - - i2c@f0000 { - #address-cells = <1>; - #size-cells = <0>; - compatible = "snps,designware-i2c"; - reg = <0xf0000 0x1000>; - interrupts = <11>; - clock-frequency = <400000>; - }; - - i2c@1120000 { - #address-cells = <1>; - #size-cells = <0>; - compatible = "snps,designware-i2c"; - reg = <0x1120000 0x1000>; - interrupt-parent = <&ictl>; - interrupts = <12 1>; - clock-frequency = <400000>; - i2c-sda-hold-time-ns = <300>; - i2c-sda-falling-time-ns = <300>; - i2c-scl-falling-time-ns = <300>; - }; - - i2c@1120000 { - #address-cells = <1>; - #size-cells = <0>; - reg = <0x2000 0x100>; - clock-frequency = <400000>; - clocks = <&i2cclk>; - interrupts = <0>; - - eeprom@64 { - compatible = "linux,slave-24c02"; - reg = <0x40000064>; - }; - }; diff --git a/Documentation/devicetree/bindings/i2c/snps,designware-i2c.yaml b/Documentation/devicetree/bindings/i2c/snps,designware-i2c.yaml new file mode 100644 index 0000000000000..4bd430b2b41d1 --- /dev/null +++ b/Documentation/devicetree/bindings/i2c/snps,designware-i2c.yaml @@ -0,0 +1,154 @@ +# SPDX-License-Identifier: GPL-2.0-only +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/i2c/snps,designware-i2c.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Synopsys DesignWare APB I2C Controller + +maintainers: + - Jarkko Nikula + +allOf: + - $ref: /schemas/i2c/i2c-controller.yaml# + - if: + properties: + compatible: + not: + contains: + const: mscc,ocelot-i2c + then: + properties: + reg: + maxItems: 1 + +properties: + compatible: + oneOf: + - description: Generic Synopsys DesignWare I2C controller + const: snps,designware-i2c + - description: Microsemi Ocelot SoCs I2C controller + items: + - const: mscc,ocelot-i2c + - const: snps,designware-i2c + + reg: + minItems: 1 + items: + - description: DW APB I2C controller memory mapped registers + - description: | + ICPU_CFG:TWI_DELAY registers to setup the SDA hold time. + This registers are specific to the Ocelot I2C-controller. + + interrupts: + maxItems: 1 + + clocks: + minItems: 1 + items: + - description: I2C controller reference clock source + - description: APB interface clock source + + clock-names: + minItems: 1 + items: + - const: ref + - const: pclk + + resets: + maxItems: 1 + + clock-frequency: + description: Desired I2C bus clock frequency in Hz + enum: [100000, 400000, 1000000, 3400000] + default: 400000 + + i2c-sda-hold-time-ns: + maxItems: 1 + description: | + The property should contain the SDA hold time in nanoseconds. This option + is only supported in hardware blocks version 1.11a or newer or on + Microsemi SoCs. + + i2c-scl-falling-time-ns: + maxItems: 1 + description: | + The property should contain the SCL falling time in nanoseconds. + This value is used to compute the tLOW period. + default: 300 + + i2c-sda-falling-time-ns: + maxItems: 1 + description: | + The property should contain the SDA falling time in nanoseconds. + This value is used to compute the tHIGH period. + default: 300 + + dmas: + items: + - description: TX DMA Channel + - description: RX DMA Channel + + dma-names: + items: + - const: tx + - const: rx + +unevaluatedProperties: false + +required: + - compatible + - reg + - "#address-cells" + - "#size-cells" + - interrupts + +examples: + - | + i2c@f0000 { + compatible = "snps,designware-i2c"; + reg = <0xf0000 0x1000>; + #address-cells = <1>; + #size-cells = <0>; + interrupts = <11>; + clock-frequency = <400000>; + }; + - | + i2c@1120000 { + compatible = "snps,designware-i2c"; + reg = <0x1120000 0x1000>; + #address-cells = <1>; + #size-cells = <0>; + interrupts = <12 1>; + clock-frequency = <400000>; + i2c-sda-hold-time-ns = <300>; + i2c-sda-falling-time-ns = <300>; + i2c-scl-falling-time-ns = <300>; + }; + - | + i2c@2000 { + compatible = "snps,designware-i2c"; + reg = <0x2000 0x100>; + #address-cells = <1>; + #size-cells = <0>; + clock-frequency = <400000>; + clocks = <&i2cclk>; + interrupts = <0>; + + eeprom@64 { + compatible = "linux,slave-24c02"; + reg = <0x40000064>; + }; + }; + - | + i2c@100400 { + compatible = "mscc,ocelot-i2c", "snps,designware-i2c"; + reg = <0x100400 0x100>, <0x198 0x8>; + pinctrl-0 = <&i2c_pins>; + pinctrl-names = "default"; + #address-cells = <1>; + #size-cells = <0>; + interrupts = <8>; + clocks = <&ahb_clk>; + }; +... -- GitLab From 25d11e9ebe0a5560dc057d1261215d9157c0ae8a Mon Sep 17 00:00:00 2001 From: Serge Semin Date: Thu, 28 May 2020 12:33:12 +0300 Subject: [PATCH 0853/1971] dt-bindings: i2c: Convert DW I2C slave to the DW I2C master example dtc currently doesn't support I2C_OWN_SLAVE_ADDRESS flag set in the i2c "reg" property. If dtc finds an i2c-slave sub-node having an address higher than ten-bits wide it'll print an ugly warning: Warning (i2c_bus_reg): /example-2/i2c@1120000/eeprom@64: I2C bus unit address format error, expected "40000064" Warning (i2c_bus_reg): /example-2/i2c@1120000/eeprom@64:reg: I2C address must be less than 10-bits, got "0x40000064" In order to silence dtc up let's replace the corresponding DT binding example with a normal DW I2C master mode-based one. It's done by clearing the I2C_OWN_SLAVE_ADDRESS bit in the reg property and converting the sub-node to be compatible with normal EEPROM like "atmel,24c02". Just revert this commit when dtc is fixed. Signed-off-by: Serge Semin Reviewed-by: Rob Herring Signed-off-by: Wolfram Sang --- .../devicetree/bindings/i2c/snps,designware-i2c.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Documentation/devicetree/bindings/i2c/snps,designware-i2c.yaml b/Documentation/devicetree/bindings/i2c/snps,designware-i2c.yaml index 4bd430b2b41d1..dff3f267bdee8 100644 --- a/Documentation/devicetree/bindings/i2c/snps,designware-i2c.yaml +++ b/Documentation/devicetree/bindings/i2c/snps,designware-i2c.yaml @@ -136,8 +136,8 @@ examples: interrupts = <0>; eeprom@64 { - compatible = "linux,slave-24c02"; - reg = <0x40000064>; + compatible = "atmel,24c02"; + reg = <0x64>; }; }; - | -- GitLab From 0029d097956e700b6b498ce39d6d745adc35b39c Mon Sep 17 00:00:00 2001 From: Serge Semin Date: Thu, 28 May 2020 12:33:13 +0300 Subject: [PATCH 0854/1971] dt-bindings: i2c: dw: Add Baikal-T1 SoC I2C controller Add the "baikal,bt1-sys-i2c" compatible string to the DW I2C binding. Even though the corresponding node is supposed to be a child of the Baikal-T1 System Controller, its reg property is left required for compatibility. Signed-off-by: Serge Semin Reviewed-by: Rob Herring Signed-off-by: Wolfram Sang --- Documentation/devicetree/bindings/i2c/snps,designware-i2c.yaml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/Documentation/devicetree/bindings/i2c/snps,designware-i2c.yaml b/Documentation/devicetree/bindings/i2c/snps,designware-i2c.yaml index dff3f267bdee8..4f746bef23742 100644 --- a/Documentation/devicetree/bindings/i2c/snps,designware-i2c.yaml +++ b/Documentation/devicetree/bindings/i2c/snps,designware-i2c.yaml @@ -31,6 +31,8 @@ properties: items: - const: mscc,ocelot-i2c - const: snps,designware-i2c + - description: Baikal-T1 SoC System I2C controller + const: baikal,bt1-sys-i2c reg: minItems: 1 -- GitLab From bbc5d36c5f66f19778d8e49a1e7d2c52e6873753 Mon Sep 17 00:00:00 2001 From: Serge Semin Date: Thu, 28 May 2020 12:33:14 +0300 Subject: [PATCH 0855/1971] i2c: designware: Use `-y` to build multi-object modules Since commit 4f8272802739 ("Documentation: update kbuild loadable modules goals & examples") `-objs` is fitted for building host programs, lets change DW I2C core, platform and PCI driver kbuild directives to using `-y`, which more straightforward for device drivers. By doing so we can discard the ifeq construction in favor to the more natural and less bulky `-$(CONFIG_X) += x.o` Signed-off-by: Serge Semin Acked-by: Jarkko Nikula Signed-off-by: Wolfram Sang --- drivers/i2c/busses/Makefile | 18 ++++++++---------- 1 file changed, 8 insertions(+), 10 deletions(-) diff --git a/drivers/i2c/busses/Makefile b/drivers/i2c/busses/Makefile index d5f56e6179a4b..e18497fb85c5b 100644 --- a/drivers/i2c/busses/Makefile +++ b/drivers/i2c/busses/Makefile @@ -48,17 +48,15 @@ obj-$(CONFIG_I2C_CADENCE) += i2c-cadence.o obj-$(CONFIG_I2C_CBUS_GPIO) += i2c-cbus-gpio.o obj-$(CONFIG_I2C_CPM) += i2c-cpm.o obj-$(CONFIG_I2C_DAVINCI) += i2c-davinci.o -obj-$(CONFIG_I2C_DESIGNWARE_CORE) += i2c-designware-core.o -i2c-designware-core-objs := i2c-designware-common.o -i2c-designware-core-objs += i2c-designware-master.o -ifeq ($(CONFIG_I2C_DESIGNWARE_SLAVE),y) -i2c-designware-core-objs += i2c-designware-slave.o -endif -obj-$(CONFIG_I2C_DESIGNWARE_PLATFORM) += i2c-designware-platform.o -i2c-designware-platform-objs := i2c-designware-platdrv.o +obj-$(CONFIG_I2C_DESIGNWARE_CORE) += i2c-designware-core.o +i2c-designware-core-y := i2c-designware-common.o +i2c-designware-core-y += i2c-designware-master.o +i2c-designware-core-$(CONFIG_I2C_DESIGNWARE_SLAVE) += i2c-designware-slave.o +obj-$(CONFIG_I2C_DESIGNWARE_PLATFORM) += i2c-designware-platform.o +i2c-designware-platform-y := i2c-designware-platdrv.o i2c-designware-platform-$(CONFIG_I2C_DESIGNWARE_BAYTRAIL) += i2c-designware-baytrail.o -obj-$(CONFIG_I2C_DESIGNWARE_PCI) += i2c-designware-pci.o -i2c-designware-pci-objs := i2c-designware-pcidrv.o +obj-$(CONFIG_I2C_DESIGNWARE_PCI) += i2c-designware-pci.o +i2c-designware-pci-y := i2c-designware-pcidrv.o obj-$(CONFIG_I2C_DIGICOLOR) += i2c-digicolor.o obj-$(CONFIG_I2C_EFM32) += i2c-efm32.o obj-$(CONFIG_I2C_EG20T) += i2c-eg20t.o -- GitLab From 68fe6cedf02e381da96dcf3cd0a53e4b938a0660 Mon Sep 17 00:00:00 2001 From: Serge Semin Date: Thu, 28 May 2020 12:33:15 +0300 Subject: [PATCH 0856/1971] i2c: designware: slave: Set DW I2C core module dependency DW APB I2C slave code in fact depends on the DW I2C driver core, but not on the platform code as it used to be before commit 90bc1ee6de9f ("i2c: designware: Allow slave mode for PCI enumerated devices"). Yes, the I2C slave interface is currently supported by both the platform and PCI versions of the IP core, but it still depends on the DW I2C core functionality and must be available only if the last one is enabled. So make sure the DW APB I2C slave config is only available if the I2C_DESIGNWARE_CORE config is enabled. Signed-off-by: Serge Semin Acked-by: Jarkko Nikula Signed-off-by: Wolfram Sang --- drivers/i2c/busses/Kconfig | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig index 6bf68d52a65a9..3d92b99a41541 100644 --- a/drivers/i2c/busses/Kconfig +++ b/drivers/i2c/busses/Kconfig @@ -529,6 +529,7 @@ config I2C_DESIGNWARE_CORE config I2C_DESIGNWARE_SLAVE bool "Synopsys DesignWare Slave" + depends on I2C_DESIGNWARE_CORE select I2C_SLAVE help If you say yes to this option, support will be included for the -- GitLab From c2549011db2cd520b1d4ffb85a790a4ba22a80d1 Mon Sep 17 00:00:00 2001 From: Serge Semin Date: Thu, 28 May 2020 12:33:16 +0300 Subject: [PATCH 0857/1971] i2c: designware: Add Baytrail sem config DW I2C platform dependency Currently Intel Baytrail I2C semaphore is a feature of the DW APB I2C platform driver. It's a bit confusing to see it's config in the menu at some separated place with no reference to the platform code. Let's move the config definition to be below the I2C_DESIGNWARE_PLATFORM config and mark it with "depends on I2C_DESIGNWARE_PLATFORM" statement. By doing so the config menu will display the feature right below the DW I2C platform driver item and will indent it to the right so signifying its belonging. Signed-off-by: Serge Semin Reviewed-by: Andy Shevchenko Signed-off-by: Wolfram Sang --- drivers/i2c/busses/Kconfig | 23 ++++++++++++----------- 1 file changed, 12 insertions(+), 11 deletions(-) diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig index 3d92b99a41541..6a6fadfc3f930 100644 --- a/drivers/i2c/busses/Kconfig +++ b/drivers/i2c/busses/Kconfig @@ -549,20 +549,10 @@ config I2C_DESIGNWARE_PLATFORM This driver can also be built as a module. If so, the module will be called i2c-designware-platform. -config I2C_DESIGNWARE_PCI - tristate "Synopsys DesignWare PCI" - depends on PCI - select I2C_DESIGNWARE_CORE - help - If you say yes to this option, support will be included for the - Synopsys DesignWare I2C adapter. Only master mode is supported. - - This driver can also be built as a module. If so, the module - will be called i2c-designware-pci. - config I2C_DESIGNWARE_BAYTRAIL bool "Intel Baytrail I2C semaphore support" depends on ACPI + depends on I2C_DESIGNWARE_PLATFORM depends on (I2C_DESIGNWARE_PLATFORM=m && IOSF_MBI) || \ (I2C_DESIGNWARE_PLATFORM=y && IOSF_MBI=y) help @@ -572,6 +562,17 @@ config I2C_DESIGNWARE_BAYTRAIL the platform firmware controlling it. You should say Y if running on a BayTrail system using the AXP288. +config I2C_DESIGNWARE_PCI + tristate "Synopsys DesignWare PCI" + depends on PCI + select I2C_DESIGNWARE_CORE + help + If you say yes to this option, support will be included for the + Synopsys DesignWare I2C adapter. Only master mode is supported. + + This driver can also be built as a module. If so, the module + will be called i2c-designware-pci. + config I2C_DIGICOLOR tristate "Conexant Digicolor I2C driver" depends on ARCH_DIGICOLOR || COMPILE_TEST -- GitLab From c615f5c65f622fb62df17d58485f46a544d6e07a Mon Sep 17 00:00:00 2001 From: Serge Semin Date: Thu, 28 May 2020 12:33:17 +0300 Subject: [PATCH 0858/1971] i2c: designware: Discard Cherry Trail model flag A PM workaround activated by the flag MODEL_CHERRYTRAIL has been removed since commit 9cbeeca05049 ("i2c: designware: Remove Cherry Trail PMIC I2C bus pm_disabled workaround"), but the flag most likely by mistake has been left in the Dw I2C drivers. Let's remove it. Since MODEL_MSCC_OCELOT is the only model-flag left, redefine it to be 0x100 so setting a very first bit in the MODEL_MASK bits range. Signed-off-by: Serge Semin Acked-by: Jarkko Nikula Reviewed-by: Andy Shevchenko Signed-off-by: Wolfram Sang --- drivers/i2c/busses/i2c-designware-core.h | 3 +-- drivers/i2c/busses/i2c-designware-pcidrv.c | 1 - drivers/i2c/busses/i2c-designware-platdrv.c | 2 +- 3 files changed, 2 insertions(+), 4 deletions(-) diff --git a/drivers/i2c/busses/i2c-designware-core.h b/drivers/i2c/busses/i2c-designware-core.h index 150de5e5c31bd..b9ef9b0deef0d 100644 --- a/drivers/i2c/busses/i2c-designware-core.h +++ b/drivers/i2c/busses/i2c-designware-core.h @@ -289,8 +289,7 @@ struct dw_i2c_dev { #define ACCESS_INTR_MASK 0x00000004 #define ACCESS_NO_IRQ_SUSPEND 0x00000008 -#define MODEL_CHERRYTRAIL 0x00000100 -#define MODEL_MSCC_OCELOT 0x00000200 +#define MODEL_MSCC_OCELOT 0x00000100 #define MODEL_MASK 0x00000f00 u32 dw_readl(struct dw_i2c_dev *dev, int offset); diff --git a/drivers/i2c/busses/i2c-designware-pcidrv.c b/drivers/i2c/busses/i2c-designware-pcidrv.c index 11a5e4751eab7..947c096f86e37 100644 --- a/drivers/i2c/busses/i2c-designware-pcidrv.c +++ b/drivers/i2c/busses/i2c-designware-pcidrv.c @@ -149,7 +149,6 @@ static struct dw_pci_controller dw_pci_controllers[] = { }, [cherrytrail] = { .bus_num = -1, - .flags = MODEL_CHERRYTRAIL, .scl_sda_cfg = &byt_config, }, [elkhartlake] = { diff --git a/drivers/i2c/busses/i2c-designware-platdrv.c b/drivers/i2c/busses/i2c-designware-platdrv.c index f6d2c96e35cef..ca057aa9eac4c 100644 --- a/drivers/i2c/busses/i2c-designware-platdrv.c +++ b/drivers/i2c/busses/i2c-designware-platdrv.c @@ -44,7 +44,7 @@ static const struct acpi_device_id dw_i2c_acpi_match[] = { { "INT3432", 0 }, { "INT3433", 0 }, { "80860F41", ACCESS_NO_IRQ_SUSPEND }, - { "808622C1", ACCESS_NO_IRQ_SUSPEND | MODEL_CHERRYTRAIL }, + { "808622C1", ACCESS_NO_IRQ_SUSPEND }, { "AMD0010", ACCESS_INTR_MASK }, { "AMDI0010", ACCESS_INTR_MASK }, { "AMDI0510", 0 }, -- GitLab From ca7f76e680745d3b8a386638045f85dac1c4b2f4 Mon Sep 17 00:00:00 2001 From: Chao Yu Date: Fri, 29 May 2020 17:29:47 +0800 Subject: [PATCH 0859/1971] f2fs: fix wrong discard space Under heavy fsstress, we may triggle panic while issuing discard, because __check_sit_bitmap() detects that discard command may earse valid data blocks, the root cause is as below race stack described, since we removed lock when flushing quota data, quota data writeback may race with write_checkpoint(), so that it causes inconsistency in between cached discard entry and segment bitmap. - f2fs_write_checkpoint - block_operations - set_sbi_flag(sbi, SBI_QUOTA_SKIP_FLUSH) - f2fs_flush_sit_entries - add_discard_addrs - __set_bit_le(i, (void *)de->discard_map); - f2fs_write_data_pages - f2fs_write_single_data_page : inode is quota one, cp_rwsem won't be locked - f2fs_do_write_data_page - f2fs_allocate_data_block - f2fs_wait_discard_bio : discard entry has not been added yet. - update_sit_entry - f2fs_clear_prefree_segments - f2fs_issue_discard : add discard entry In order to fix this, this patch uses node_write to serialize f2fs_allocate_data_block and checkpoint. Fixes: 435cbab95e39 ("f2fs: fix quota_sync failure due to f2fs_lock_op") Signed-off-by: Chao Yu Signed-off-by: Jaegeuk Kim --- fs/f2fs/segment.c | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c index ebbadde6cbced..196f315035118 100644 --- a/fs/f2fs/segment.c +++ b/fs/f2fs/segment.c @@ -3107,6 +3107,14 @@ void f2fs_allocate_data_block(struct f2fs_sb_info *sbi, struct page *page, type = CURSEG_COLD_DATA; } + /* + * We need to wait for node_write to avoid block allocation during + * checkpoint. This can only happen to quota writes which can cause + * the below discard race condition. + */ + if (IS_DATASEG(type)) + down_write(&sbi->node_write); + down_read(&SM_I(sbi)->curseg_lock); mutex_lock(&curseg->curseg_mutex); @@ -3172,6 +3180,9 @@ void f2fs_allocate_data_block(struct f2fs_sb_info *sbi, struct page *page, up_read(&SM_I(sbi)->curseg_lock); + if (IS_DATASEG(type)) + up_write(&sbi->node_write); + if (put_pin_sem) up_read(&sbi->pin_sem); } -- GitLab From f9acd7fa80be6ee14aecdc54429f2a48e56224e8 Mon Sep 17 00:00:00 2001 From: Pratyush Yadav Date: Mon, 25 May 2020 14:45:31 +0530 Subject: [PATCH 0860/1971] mtd: spi-nor: sfdp: default to addr_width of 3 for configurable widths JESD216D.01 says that when the address width can be 3 or 4, it defaults to 3 and enters 4-byte mode when given the appropriate command. So, when we see a configurable width, default to 3 and let flash that default to 4 change it in a post-bfpt fixup. This fixes SMPT parsing for flashes with configurable address width. If the SMPT descriptor advertises variable address width, we use nor->addr_width as the address width. But since it was not set to any value from the SFDP table, the read command uses an address width of 0, resulting in an incorrect read being issued. Signed-off-by: Pratyush Yadav Signed-off-by: Tudor Ambarus --- drivers/mtd/spi-nor/sfdp.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/mtd/spi-nor/sfdp.c b/drivers/mtd/spi-nor/sfdp.c index f6038d3a36842..688aa36e863a7 100644 --- a/drivers/mtd/spi-nor/sfdp.c +++ b/drivers/mtd/spi-nor/sfdp.c @@ -460,6 +460,7 @@ static int spi_nor_parse_bfpt(struct spi_nor *nor, /* Number of address bytes. */ switch (bfpt.dwords[BFPT_DWORD(1)] & BFPT_DWORD1_ADDRESS_BYTES_MASK) { case BFPT_DWORD1_ADDRESS_BYTES_3_ONLY: + case BFPT_DWORD1_ADDRESS_BYTES_3_OR_4: nor->addr_width = 3; break; -- GitLab From aec6adc560a0fa090659f23f7501e77d18306b84 Mon Sep 17 00:00:00 2001 From: Serge Semin Date: Wed, 27 May 2020 01:20:53 +0300 Subject: [PATCH 0861/1971] dt-bindings: clk: Add Baikal-T1 CCU PLLs binding Baikal-T1 Clocks Control Unit is responsible for transformation of a signal coming from an external oscillator into clocks of various frequencies to propagate them then to the corresponding clocks consumers (either individual IP-blocks or clock domains). In order to create a set of high-frequency clocks the external signal is firstly handled by the embedded into CCU PLLs. So the corresponding dts-node is just a normal clock-provider node with standard set of properties. Note as being part of the Baikal-T1 System Controller its DT node is supposed to be a child the system controller node. Signed-off-by: Serge Semin Cc: Alexey Malahov Cc: Arnd Bergmann Cc: linux-mips@vger.kernel.org Link: https://lore.kernel.org/r/20200526222056.18072-2-Sergey.Semin@baikalelectronics.ru Reviewed-by: Rob Herring Signed-off-by: Stephen Boyd --- .../bindings/clock/baikal,bt1-ccu-pll.yaml | 131 ++++++++++++++++++ include/dt-bindings/clock/bt1-ccu.h | 16 +++ 2 files changed, 147 insertions(+) create mode 100644 Documentation/devicetree/bindings/clock/baikal,bt1-ccu-pll.yaml create mode 100644 include/dt-bindings/clock/bt1-ccu.h diff --git a/Documentation/devicetree/bindings/clock/baikal,bt1-ccu-pll.yaml b/Documentation/devicetree/bindings/clock/baikal,bt1-ccu-pll.yaml new file mode 100644 index 0000000000000..97131bfa6f87e --- /dev/null +++ b/Documentation/devicetree/bindings/clock/baikal,bt1-ccu-pll.yaml @@ -0,0 +1,131 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +# Copyright (C) 2020 BAIKAL ELECTRONICS, JSC +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/clock/baikal,bt1-ccu-pll.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Baikal-T1 Clock Control Unit PLL + +maintainers: + - Serge Semin + +description: | + Clocks Control Unit is the core of Baikal-T1 SoC System Controller + responsible for the chip subsystems clocking and resetting. The CCU is + connected with an external fixed rate oscillator, which signal is transformed + into clocks of various frequencies and then propagated to either individual + IP-blocks or to groups of blocks (clock domains). The transformation is done + by means of PLLs and gateable/non-gateable dividers embedded into the CCU. + It's logically divided into the next components: + 1) External oscillator (normally XTAL's 25 MHz crystal oscillator, but + in general can provide any frequency supported by the CCU PLLs). + 2) PLLs clocks generators (PLLs) - described in this binding file. + 3) AXI-bus clock dividers (AXI). + 4) System devices reference clock dividers (SYS). + which are connected with each other as shown on the next figure: + + +---------------+ + | Baikal-T1 CCU | + | +----+------|- MIPS P5600 cores + | +-|PLLs|------|- DDR controller + | | +----+ | + +----+ | | | | | + |XTAL|--|-+ | | +---+-| + +----+ | | | +-|AXI|-|- AXI-bus + | | | +---+-| + | | | | + | | +----+---+-|- APB-bus + | +-------|SYS|-|- Low-speed Devices + | +---+-|- High-speed Devices + +---------------+ + + Each CCU sub-block is represented as a separate dts-node and has an + individual driver to be bound with. + + In order to create signals of wide range frequencies the external oscillator + output is primarily connected to a set of CCU PLLs. There are five PLLs + to create a clock for the MIPS P5600 cores, the embedded DDR controller, + SATA, Ethernet and PCIe domains. The last three domains though named by the + biggest system interfaces in fact include nearly all of the rest SoC + peripherals. Each of the PLLs is based on True Circuits TSMC CLN28HPM core + with an interface wrapper (so called safe PLL' clocks switcher) to simplify + the PLL configuration procedure. The PLLs work as depicted on the next + diagram: + + +--------------------------+ + | | + +-->+---+ +---+ +---+ | +---+ 0|\ + CLKF--->|/NF|--->|PFD|...|VCO|-+->|/OD|--->| | + +---+ +->+---+ +---+ /->+---+ | |--->CLKOUT + CLKOD---------C----------------+ 1| | + +--------C--------------------------->|/ + | | ^ + Rclk-+->+---+ | | + CLKR--->|/NR|-+ | + +---+ | + BYPASS--------------------------------------+ + BWADJ---> + + where Rclk is the reference clock coming from XTAL, NR - reference clock + divider, NF - PLL clock multiplier, OD - VCO output clock divider, CLKOUT - + output clock, BWADJ is the PLL bandwidth adjustment parameter. At this moment + the binding supports the PLL dividers configuration in accordance with a + requested rate, while bypassing and bandwidth adjustment settings can be + added in future if it gets to be necessary. + + The PLLs CLKOUT is then either directly connected with the corresponding + clocks consumer (like P5600 cores or DDR controller) or passed over a CCU + divider to create a signal required for the clock domain. + + The CCU PLL dts-node uses the common clock bindings with no custom + parameters. The list of exported clocks can be found in + 'include/dt-bindings/clock/bt1-ccu.h'. Since CCU PLL is a part of the + Baikal-T1 SoC System Controller its DT node is supposed to be a child of + later one. + +properties: + compatible: + const: baikal,bt1-ccu-pll + + reg: + maxItems: 1 + + "#clock-cells": + const: 1 + + clocks: + description: External reference clock + maxItems: 1 + + clock-names: + const: ref_clk + +unevaluatedProperties: false + +required: + - compatible + - "#clock-cells" + - clocks + - clock-names + +examples: + # Clock Control Unit PLL node: + - | + clock-controller@1f04d000 { + compatible = "baikal,bt1-ccu-pll"; + reg = <0x1f04d000 0x028>; + #clock-cells = <1>; + + clocks = <&clk25m>; + clock-names = "ref_clk"; + }; + # Required external oscillator: + - | + clk25m: clock-oscillator-25m { + compatible = "fixed-clock"; + #clock-cells = <0>; + clock-frequency = <25000000>; + clock-output-names = "clk25m"; + }; +... diff --git a/include/dt-bindings/clock/bt1-ccu.h b/include/dt-bindings/clock/bt1-ccu.h new file mode 100644 index 0000000000000..931a4bea67c02 --- /dev/null +++ b/include/dt-bindings/clock/bt1-ccu.h @@ -0,0 +1,16 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2020 BAIKAL ELECTRONICS, JSC + * + * Baikal-T1 CCU clock indices + */ +#ifndef __DT_BINDINGS_CLOCK_BT1_CCU_H +#define __DT_BINDINGS_CLOCK_BT1_CCU_H + +#define CCU_CPU_PLL 0 +#define CCU_SATA_PLL 1 +#define CCU_DDR_PLL 2 +#define CCU_PCIE_PLL 3 +#define CCU_ETH_PLL 4 + +#endif /* __DT_BINDINGS_CLOCK_BT1_CCU_H */ -- GitLab From 11ea09b9e2ed0d9680a890f8fffa204dcb1a2654 Mon Sep 17 00:00:00 2001 From: Serge Semin Date: Wed, 27 May 2020 01:20:54 +0300 Subject: [PATCH 0862/1971] dt-bindings: clk: Add Baikal-T1 CCU Dividers binding After being gained by the CCU PLLs the signals must be transformed to be suitable for the clock-consumers. This is done by a set of dividers embedded into the CCU. A first block of dividers is used to create reference clocks for AXI-bus of high-speed peripheral IP-cores of the chip. The second block dividers alter the PLLs output signals to be then consumed by SoC peripheral devices. Both block DT nodes are ordinary clock-providers with standard set of properties supported. But in addition to that each clock provider can be used to reset the corresponding clock domain. This makes the AXI-bus and System Devices CCU DT nodes to be also reset-providers. Signed-off-by: Serge Semin Cc: Alexey Malahov Cc: Arnd Bergmann Cc: linux-mips@vger.kernel.org Link: https://lore.kernel.org/r/20200526222056.18072-3-Sergey.Semin@baikalelectronics.ru Reviewed-by: Rob Herring Signed-off-by: Stephen Boyd --- .../bindings/clock/baikal,bt1-ccu-div.yaml | 188 ++++++++++++++++++ include/dt-bindings/clock/bt1-ccu.h | 32 +++ include/dt-bindings/reset/bt1-ccu.h | 25 +++ 3 files changed, 245 insertions(+) create mode 100644 Documentation/devicetree/bindings/clock/baikal,bt1-ccu-div.yaml create mode 100644 include/dt-bindings/reset/bt1-ccu.h diff --git a/Documentation/devicetree/bindings/clock/baikal,bt1-ccu-div.yaml b/Documentation/devicetree/bindings/clock/baikal,bt1-ccu-div.yaml new file mode 100644 index 0000000000000..2821425ee4459 --- /dev/null +++ b/Documentation/devicetree/bindings/clock/baikal,bt1-ccu-div.yaml @@ -0,0 +1,188 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +# Copyright (C) 2020 BAIKAL ELECTRONICS, JSC +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/clock/baikal,bt1-ccu-div.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Baikal-T1 Clock Control Unit Dividers + +maintainers: + - Serge Semin + +description: | + Clocks Control Unit is the core of Baikal-T1 SoC System Controller + responsible for the chip subsystems clocking and resetting. The CCU is + connected with an external fixed rate oscillator, which signal is transformed + into clocks of various frequencies and then propagated to either individual + IP-blocks or to groups of blocks (clock domains). The transformation is done + by means of an embedded into CCU PLLs and gateable/non-gateable dividers. The + later ones are described in this binding. Each clock domain can be also + individually reset by using the domain clocks divider configuration + registers. Baikal-T1 CCU is logically divided into the next components: + 1) External oscillator (normally XTAL's 25 MHz crystal oscillator, but + in general can provide any frequency supported by the CCU PLLs). + 2) PLLs clocks generators (PLLs). + 3) AXI-bus clock dividers (AXI) - described in this binding file. + 4) System devices reference clock dividers (SYS) - described in this binding + file. + which are connected with each other as shown on the next figure: + + +---------------+ + | Baikal-T1 CCU | + | +----+------|- MIPS P5600 cores + | +-|PLLs|------|- DDR controller + | | +----+ | + +----+ | | | | | + |XTAL|--|-+ | | +---+-| + +----+ | | | +-|AXI|-|- AXI-bus + | | | +---+-| + | | | | + | | +----+---+-|- APB-bus + | +-------|SYS|-|- Low-speed Devices + | +---+-|- High-speed Devices + +---------------+ + + Each sub-block is represented as a separate DT node and has an individual + driver to be bound with. + + In order to create signals of wide range frequencies the external oscillator + output is primarily connected to a set of CCU PLLs. Some of PLLs CLKOUT are + then passed over CCU dividers to create signals required for the target clock + domain (like AXI-bus or System Device consumers). The dividers have the + following structure: + + +--------------+ + CLKIN --|->+----+ 1|\ | + SETCLK--|--|/DIV|->| | | + CLKDIV--|--| | | |-|->CLKLOUT + LOCK----|--+----+ | | | + | |/ | + | | | + EN------|-----------+ | + RST-----|--------------|->RSTOUT + +--------------+ + + where CLKIN is the reference clock coming either from CCU PLLs or from an + external clock oscillator, SETCLK - a command to update the output clock in + accordance with a set divider, CLKDIV - clocks divider, LOCK - a signal of + the output clock stabilization, EN - enable/disable the divider block, + RST/RSTOUT - reset clocks domain signal. Depending on the consumer IP-core + peculiarities the dividers may lack of some functionality depicted on the + figure above (like EN, CLKDIV/LOCK/SETCLK). In this case the corresponding + clock provider just doesn't expose either switching functions, or the rate + configuration, or both of them. + + The clock dividers, which output clock is then consumed by the SoC individual + devices, are united into a single clocks provider called System Devices CCU. + Similarly the dividers with output clocks utilized as AXI-bus reference clocks + are called AXI-bus CCU. Both of them use the common clock bindings with no + custom properties. The list of exported clocks and reset signals can be found + in the files: 'include/dt-bindings/clock/bt1-ccu.h' and + 'include/dt-bindings/reset/bt1-ccu.h'. Since System Devices and AXI-bus CCU + are a part of the Baikal-T1 SoC System Controller their DT nodes are supposed + to be a children of later one. + +if: + properties: + compatible: + contains: + const: baikal,bt1-ccu-axi + +then: + properties: + clocks: + items: + - description: CCU SATA PLL output clock + - description: CCU PCIe PLL output clock + - description: CCU Ethernet PLL output clock + + clock-names: + items: + - const: sata_clk + - const: pcie_clk + - const: eth_clk + +else: + properties: + clocks: + items: + - description: External reference clock + - description: CCU SATA PLL output clock + - description: CCU PCIe PLL output clock + - description: CCU Ethernet PLL output clock + + clock-names: + items: + - const: ref_clk + - const: sata_clk + - const: pcie_clk + - const: eth_clk + +properties: + compatible: + enum: + - baikal,bt1-ccu-axi + - baikal,bt1-ccu-sys + + reg: + maxItems: 1 + + "#clock-cells": + const: 1 + + "#reset-cells": + const: 1 + +unevaluatedProperties: false + +required: + - compatible + - "#clock-cells" + - clocks + - clock-names + +examples: + # AXI-bus Clock Control Unit node: + - | + #include + + clock-controller@1f04d030 { + compatible = "baikal,bt1-ccu-axi"; + reg = <0x1f04d030 0x030>; + #clock-cells = <1>; + #reset-cells = <1>; + + clocks = <&ccu_pll CCU_SATA_PLL>, + <&ccu_pll CCU_PCIE_PLL>, + <&ccu_pll CCU_ETH_PLL>; + clock-names = "sata_clk", "pcie_clk", "eth_clk"; + }; + # System Devices Clock Control Unit node: + - | + #include + + clock-controller@1f04d060 { + compatible = "baikal,bt1-ccu-sys"; + reg = <0x1f04d060 0x0a0>; + #clock-cells = <1>; + #reset-cells = <1>; + + clocks = <&clk25m>, + <&ccu_pll CCU_SATA_PLL>, + <&ccu_pll CCU_PCIE_PLL>, + <&ccu_pll CCU_ETH_PLL>; + clock-names = "ref_clk", "sata_clk", "pcie_clk", + "eth_clk"; + }; + # Required Clock Control Unit PLL node: + - | + ccu_pll: clock-controller@1f04d000 { + compatible = "baikal,bt1-ccu-pll"; + reg = <0x1f04d000 0x028>; + #clock-cells = <1>; + + clocks = <&clk25m>; + clock-names = "ref_clk"; + }; +... diff --git a/include/dt-bindings/clock/bt1-ccu.h b/include/dt-bindings/clock/bt1-ccu.h index 931a4bea67c02..5f166d27a00ab 100644 --- a/include/dt-bindings/clock/bt1-ccu.h +++ b/include/dt-bindings/clock/bt1-ccu.h @@ -13,4 +13,36 @@ #define CCU_PCIE_PLL 3 #define CCU_ETH_PLL 4 +#define CCU_AXI_MAIN_CLK 0 +#define CCU_AXI_DDR_CLK 1 +#define CCU_AXI_SATA_CLK 2 +#define CCU_AXI_GMAC0_CLK 3 +#define CCU_AXI_GMAC1_CLK 4 +#define CCU_AXI_XGMAC_CLK 5 +#define CCU_AXI_PCIE_M_CLK 6 +#define CCU_AXI_PCIE_S_CLK 7 +#define CCU_AXI_USB_CLK 8 +#define CCU_AXI_HWA_CLK 9 +#define CCU_AXI_SRAM_CLK 10 + +#define CCU_SYS_SATA_REF_CLK 0 +#define CCU_SYS_APB_CLK 1 +#define CCU_SYS_GMAC0_TX_CLK 2 +#define CCU_SYS_GMAC0_PTP_CLK 3 +#define CCU_SYS_GMAC1_TX_CLK 4 +#define CCU_SYS_GMAC1_PTP_CLK 5 +#define CCU_SYS_XGMAC_REF_CLK 6 +#define CCU_SYS_XGMAC_PTP_CLK 7 +#define CCU_SYS_USB_CLK 8 +#define CCU_SYS_PVT_CLK 9 +#define CCU_SYS_HWA_CLK 10 +#define CCU_SYS_UART_CLK 11 +#define CCU_SYS_I2C1_CLK 12 +#define CCU_SYS_I2C2_CLK 13 +#define CCU_SYS_GPIO_CLK 14 +#define CCU_SYS_TIMER0_CLK 15 +#define CCU_SYS_TIMER1_CLK 16 +#define CCU_SYS_TIMER2_CLK 17 +#define CCU_SYS_WDT_CLK 18 + #endif /* __DT_BINDINGS_CLOCK_BT1_CCU_H */ diff --git a/include/dt-bindings/reset/bt1-ccu.h b/include/dt-bindings/reset/bt1-ccu.h new file mode 100644 index 0000000000000..3578e83026bc8 --- /dev/null +++ b/include/dt-bindings/reset/bt1-ccu.h @@ -0,0 +1,25 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2020 BAIKAL ELECTRONICS, JSC + * + * Baikal-T1 CCU reset indices + */ +#ifndef __DT_BINDINGS_RESET_BT1_CCU_H +#define __DT_BINDINGS_RESET_BT1_CCU_H + +#define CCU_AXI_MAIN_RST 0 +#define CCU_AXI_DDR_RST 1 +#define CCU_AXI_SATA_RST 2 +#define CCU_AXI_GMAC0_RST 3 +#define CCU_AXI_GMAC1_RST 4 +#define CCU_AXI_XGMAC_RST 5 +#define CCU_AXI_PCIE_M_RST 6 +#define CCU_AXI_PCIE_S_RST 7 +#define CCU_AXI_USB_RST 8 +#define CCU_AXI_HWA_RST 9 +#define CCU_AXI_SRAM_RST 10 + +#define CCU_SYS_SATA_REF_RST 0 +#define CCU_SYS_APB_RST 1 + +#endif /* __DT_BINDINGS_RESET_BT1_CCU_H */ -- GitLab From b7d950b9281f1dc5a5e37eaaf04cf33067e575f6 Mon Sep 17 00:00:00 2001 From: Serge Semin Date: Wed, 27 May 2020 01:20:55 +0300 Subject: [PATCH 0863/1971] clk: Add Baikal-T1 CCU PLLs driver Baikal-T1 is supposed to be supplied with a high-frequency external oscillator. But in order to create signals suitable for each IP-block embedded into the SoC the oscillator output is primarily connected to a set of CCU PLLs. There are five of them to create clocks for the MIPS P5600 cores, an embedded DDR controller, SATA, Ethernet and PCIe domains. The last three domains though named by the biggest system interfaces in fact include nearly all of the rest SoC peripherals. Each of the PLLs is based on True Circuits TSMC CLN28HPM IP-core with an interface wrapper (so called safe PLL' clocks switcher) to simplify the PLL configuration procedure. This driver creates the of-based hardware clocks to use them then in the corresponding subsystems. In order to simplify the driver code we split the functionality up into the PLLs clocks operations and hardware clocks declaration/registration procedures. Even though the PLLs are based on the same IP-core, they may have some differences. In particular, some CCU PLLs support the output clock change without gating them (like CPU or PCIe PLLs), while the others don't, some CCU PLLs are critical and aren't supposed to be gated. In order to cover all of these cases the hardware clocks driver is designed with an info-descriptor pattern. So there are special static descriptors declared for each PLL, which is then used to create a hardware clock with proper operations. Additionally debugfs-files are provided for each PLL' field to make sure the implemented rate-PLLs-dividers calculation algorithm is correct. Signed-off-by: Serge Semin Cc: Alexey Malahov Cc: Arnd Bergmann Cc: Rob Herring Cc: linux-mips@vger.kernel.org Cc: devicetree@vger.kernel.org Link: https://lore.kernel.org/r/20200526222056.18072-4-Sergey.Semin@baikalelectronics.ru [sboyd@kernel.org: Silence sparse warning about initializing structs with NULL vs. integer] Signed-off-by: Stephen Boyd --- drivers/clk/Kconfig | 1 + drivers/clk/Makefile | 1 + drivers/clk/baikal-t1/Kconfig | 30 ++ drivers/clk/baikal-t1/Makefile | 2 + drivers/clk/baikal-t1/ccu-pll.c | 558 ++++++++++++++++++++++++++++ drivers/clk/baikal-t1/ccu-pll.h | 64 ++++ drivers/clk/baikal-t1/clk-ccu-pll.c | 204 ++++++++++ 7 files changed, 860 insertions(+) create mode 100644 drivers/clk/baikal-t1/Kconfig create mode 100644 drivers/clk/baikal-t1/Makefile create mode 100644 drivers/clk/baikal-t1/ccu-pll.c create mode 100644 drivers/clk/baikal-t1/ccu-pll.h create mode 100644 drivers/clk/baikal-t1/clk-ccu-pll.c diff --git a/drivers/clk/Kconfig b/drivers/clk/Kconfig index bcb257baed06d..b32da34ebcf9d 100644 --- a/drivers/clk/Kconfig +++ b/drivers/clk/Kconfig @@ -341,6 +341,7 @@ config COMMON_CLK_FIXED_MMIO source "drivers/clk/actions/Kconfig" source "drivers/clk/analogbits/Kconfig" +source "drivers/clk/baikal-t1/Kconfig" source "drivers/clk/bcm/Kconfig" source "drivers/clk/hisilicon/Kconfig" source "drivers/clk/imgtec/Kconfig" diff --git a/drivers/clk/Makefile b/drivers/clk/Makefile index f4169cc2fd318..1496045d4e01b 100644 --- a/drivers/clk/Makefile +++ b/drivers/clk/Makefile @@ -75,6 +75,7 @@ obj-y += analogbits/ obj-$(CONFIG_COMMON_CLK_AT91) += at91/ obj-$(CONFIG_ARCH_ARTPEC) += axis/ obj-$(CONFIG_ARC_PLAT_AXS10X) += axs10x/ +obj-$(CONFIG_CLK_BAIKAL_T1) += baikal-t1/ obj-y += bcm/ obj-$(CONFIG_ARCH_BERLIN) += berlin/ obj-$(CONFIG_ARCH_DAVINCI) += davinci/ diff --git a/drivers/clk/baikal-t1/Kconfig b/drivers/clk/baikal-t1/Kconfig new file mode 100644 index 0000000000000..00398ee916dc3 --- /dev/null +++ b/drivers/clk/baikal-t1/Kconfig @@ -0,0 +1,30 @@ +# SPDX-License-Identifier: GPL-2.0-only +config CLK_BAIKAL_T1 + bool "Baikal-T1 Clocks Control Unit interface" + depends on (MIPS_BAIKAL_T1 && OF) || COMPILE_TEST + default MIPS_BAIKAL_T1 + help + Clocks Control Unit is the core of Baikal-T1 SoC System Controller + responsible for the chip subsystems clocking and resetting. It + consists of multiple global clock domains, which can be reset by + means of the CCU control registers. These domains and devices placed + in them are fed with clocks generated by a hierarchy of PLLs, + configurable and fixed clock dividers. Enable this option to be able + to select Baikal-T1 CCU PLLs and Dividers drivers. + +if CLK_BAIKAL_T1 + +config CLK_BT1_CCU_PLL + bool "Baikal-T1 CCU PLLs support" + select MFD_SYSCON + default MIPS_BAIKAL_T1 + help + Enable this to support the PLLs embedded into the Baikal-T1 SoC + System Controller. These are five PLLs placed at the root of the + clocks hierarchy, right after an external reference oscillator + (normally of 25MHz). They are used to generate high frequency + signals, which are either directly wired to the consumers (like + CPUs, DDR, etc.) or passed over the clock dividers to be only + then used as an individual reference clock of a target device. + +endif diff --git a/drivers/clk/baikal-t1/Makefile b/drivers/clk/baikal-t1/Makefile new file mode 100644 index 0000000000000..4a612bbacc374 --- /dev/null +++ b/drivers/clk/baikal-t1/Makefile @@ -0,0 +1,2 @@ +# SPDX-License-Identifier: GPL-2.0-only +obj-$(CONFIG_CLK_BT1_CCU_PLL) += ccu-pll.o clk-ccu-pll.o diff --git a/drivers/clk/baikal-t1/ccu-pll.c b/drivers/clk/baikal-t1/ccu-pll.c new file mode 100644 index 0000000000000..13ef28001439e --- /dev/null +++ b/drivers/clk/baikal-t1/ccu-pll.c @@ -0,0 +1,558 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2020 BAIKAL ELECTRONICS, JSC + * + * Authors: + * Serge Semin + * Dmitry Dunaev + * + * Baikal-T1 CCU PLL interface driver + */ + +#define pr_fmt(fmt) "bt1-ccu-pll: " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "ccu-pll.h" + +#define CCU_PLL_CTL 0x000 +#define CCU_PLL_CTL_EN BIT(0) +#define CCU_PLL_CTL_RST BIT(1) +#define CCU_PLL_CTL_CLKR_FLD 2 +#define CCU_PLL_CTL_CLKR_MASK GENMASK(7, CCU_PLL_CTL_CLKR_FLD) +#define CCU_PLL_CTL_CLKF_FLD 8 +#define CCU_PLL_CTL_CLKF_MASK GENMASK(20, CCU_PLL_CTL_CLKF_FLD) +#define CCU_PLL_CTL_CLKOD_FLD 21 +#define CCU_PLL_CTL_CLKOD_MASK GENMASK(24, CCU_PLL_CTL_CLKOD_FLD) +#define CCU_PLL_CTL_BYPASS BIT(30) +#define CCU_PLL_CTL_LOCK BIT(31) +#define CCU_PLL_CTL1 0x004 +#define CCU_PLL_CTL1_BWADJ_FLD 3 +#define CCU_PLL_CTL1_BWADJ_MASK GENMASK(14, CCU_PLL_CTL1_BWADJ_FLD) + +#define CCU_PLL_LOCK_CHECK_RETRIES 50 + +#define CCU_PLL_NR_MAX \ + ((CCU_PLL_CTL_CLKR_MASK >> CCU_PLL_CTL_CLKR_FLD) + 1) +#define CCU_PLL_NF_MAX \ + ((CCU_PLL_CTL_CLKF_MASK >> (CCU_PLL_CTL_CLKF_FLD + 1)) + 1) +#define CCU_PLL_OD_MAX \ + ((CCU_PLL_CTL_CLKOD_MASK >> CCU_PLL_CTL_CLKOD_FLD) + 1) +#define CCU_PLL_NB_MAX \ + ((CCU_PLL_CTL1_BWADJ_MASK >> CCU_PLL_CTL1_BWADJ_FLD) + 1) +#define CCU_PLL_FDIV_MIN 427000UL +#define CCU_PLL_FDIV_MAX 3500000000UL +#define CCU_PLL_FOUT_MIN 200000000UL +#define CCU_PLL_FOUT_MAX 2500000000UL +#define CCU_PLL_FVCO_MIN 700000000UL +#define CCU_PLL_FVCO_MAX 3500000000UL +#define CCU_PLL_CLKOD_FACTOR 2 + +static inline unsigned long ccu_pll_lock_delay_us(unsigned long ref_clk, + unsigned long nr) +{ + u64 us = 500ULL * nr * USEC_PER_SEC; + + do_div(us, ref_clk); + + return us; +} + +static inline unsigned long ccu_pll_calc_freq(unsigned long ref_clk, + unsigned long nr, + unsigned long nf, + unsigned long od) +{ + u64 tmp = ref_clk; + + do_div(tmp, nr); + tmp *= nf; + do_div(tmp, od); + + return tmp; +} + +static int ccu_pll_reset(struct ccu_pll *pll, unsigned long ref_clk, + unsigned long nr) +{ + unsigned long ud, ut; + u32 val; + + ud = ccu_pll_lock_delay_us(ref_clk, nr); + ut = ud * CCU_PLL_LOCK_CHECK_RETRIES; + + regmap_update_bits(pll->sys_regs, pll->reg_ctl, + CCU_PLL_CTL_RST, CCU_PLL_CTL_RST); + + return regmap_read_poll_timeout_atomic(pll->sys_regs, pll->reg_ctl, val, + val & CCU_PLL_CTL_LOCK, ud, ut); +} + +static int ccu_pll_enable(struct clk_hw *hw) +{ + struct clk_hw *parent_hw = clk_hw_get_parent(hw); + struct ccu_pll *pll = to_ccu_pll(hw); + unsigned long flags; + u32 val = 0; + int ret; + + if (!parent_hw) { + pr_err("Can't enable '%s' with no parent", clk_hw_get_name(hw)); + return -EINVAL; + } + + regmap_read(pll->sys_regs, pll->reg_ctl, &val); + if (val & CCU_PLL_CTL_EN) + return 0; + + spin_lock_irqsave(&pll->lock, flags); + regmap_write(pll->sys_regs, pll->reg_ctl, val | CCU_PLL_CTL_EN); + ret = ccu_pll_reset(pll, clk_hw_get_rate(parent_hw), + FIELD_GET(CCU_PLL_CTL_CLKR_MASK, val) + 1); + spin_unlock_irqrestore(&pll->lock, flags); + if (ret) + pr_err("PLL '%s' reset timed out\n", clk_hw_get_name(hw)); + + return ret; +} + +static void ccu_pll_disable(struct clk_hw *hw) +{ + struct ccu_pll *pll = to_ccu_pll(hw); + unsigned long flags; + + spin_lock_irqsave(&pll->lock, flags); + regmap_update_bits(pll->sys_regs, pll->reg_ctl, CCU_PLL_CTL_EN, 0); + spin_unlock_irqrestore(&pll->lock, flags); +} + +static int ccu_pll_is_enabled(struct clk_hw *hw) +{ + struct ccu_pll *pll = to_ccu_pll(hw); + u32 val = 0; + + regmap_read(pll->sys_regs, pll->reg_ctl, &val); + + return !!(val & CCU_PLL_CTL_EN); +} + +static unsigned long ccu_pll_recalc_rate(struct clk_hw *hw, + unsigned long parent_rate) +{ + struct ccu_pll *pll = to_ccu_pll(hw); + unsigned long nr, nf, od; + u32 val = 0; + + regmap_read(pll->sys_regs, pll->reg_ctl, &val); + nr = FIELD_GET(CCU_PLL_CTL_CLKR_MASK, val) + 1; + nf = FIELD_GET(CCU_PLL_CTL_CLKF_MASK, val) + 1; + od = FIELD_GET(CCU_PLL_CTL_CLKOD_MASK, val) + 1; + + return ccu_pll_calc_freq(parent_rate, nr, nf, od); +} + +static void ccu_pll_calc_factors(unsigned long rate, unsigned long parent_rate, + unsigned long *nr, unsigned long *nf, + unsigned long *od) +{ + unsigned long err, freq, min_err = ULONG_MAX; + unsigned long num, denom, n1, d1, nri; + unsigned long nr_max, nf_max, od_max; + + /* + * Make sure PLL is working with valid input signal (Fdiv). If + * you want to speed the function up just reduce CCU_PLL_NR_MAX. + * This will cause a worse approximation though. + */ + nri = (parent_rate / CCU_PLL_FDIV_MAX) + 1; + nr_max = min(parent_rate / CCU_PLL_FDIV_MIN, CCU_PLL_NR_MAX); + + /* + * Find a closest [nr;nf;od] vector taking into account the + * limitations like: 1) 700MHz <= Fvco <= 3.5GHz, 2) PLL Od is + * either 1 or even number within the acceptable range (alas 1s + * is also excluded by the next loop). + */ + for (; nri <= nr_max; ++nri) { + /* Use Od factor to fulfill the limitation 2). */ + num = CCU_PLL_CLKOD_FACTOR * rate; + denom = parent_rate / nri; + + /* + * Make sure Fvco is within the acceptable range to fulfill + * the condition 1). Note due to the CCU_PLL_CLKOD_FACTOR value + * the actual upper limit is also divided by that factor. + * It's not big problem for us since practically there is no + * need in clocks with that high frequency. + */ + nf_max = min(CCU_PLL_FVCO_MAX / denom, CCU_PLL_NF_MAX); + od_max = CCU_PLL_OD_MAX / CCU_PLL_CLKOD_FACTOR; + + /* + * Bypass the out-of-bound values, which can't be properly + * handled by the rational fraction approximation algorithm. + */ + if (num / denom >= nf_max) { + n1 = nf_max; + d1 = 1; + } else if (denom / num >= od_max) { + n1 = 1; + d1 = od_max; + } else { + rational_best_approximation(num, denom, nf_max, od_max, + &n1, &d1); + } + + /* Select the best approximation of the target rate. */ + freq = ccu_pll_calc_freq(parent_rate, nri, n1, d1); + err = abs((int64_t)freq - num); + if (err < min_err) { + min_err = err; + *nr = nri; + *nf = n1; + *od = CCU_PLL_CLKOD_FACTOR * d1; + } + } +} + +static long ccu_pll_round_rate(struct clk_hw *hw, unsigned long rate, + unsigned long *parent_rate) +{ + unsigned long nr = 1, nf = 1, od = 1; + + ccu_pll_calc_factors(rate, *parent_rate, &nr, &nf, &od); + + return ccu_pll_calc_freq(*parent_rate, nr, nf, od); +} + +/* + * This method is used for PLLs, which support the on-the-fly dividers + * adjustment. So there is no need in gating such clocks. + */ +static int ccu_pll_set_rate_reset(struct clk_hw *hw, unsigned long rate, + unsigned long parent_rate) +{ + struct ccu_pll *pll = to_ccu_pll(hw); + unsigned long nr, nf, od; + unsigned long flags; + u32 mask, val; + int ret; + + ccu_pll_calc_factors(rate, parent_rate, &nr, &nf, &od); + + mask = CCU_PLL_CTL_CLKR_MASK | CCU_PLL_CTL_CLKF_MASK | + CCU_PLL_CTL_CLKOD_MASK; + val = FIELD_PREP(CCU_PLL_CTL_CLKR_MASK, nr - 1) | + FIELD_PREP(CCU_PLL_CTL_CLKF_MASK, nf - 1) | + FIELD_PREP(CCU_PLL_CTL_CLKOD_MASK, od - 1); + + spin_lock_irqsave(&pll->lock, flags); + regmap_update_bits(pll->sys_regs, pll->reg_ctl, mask, val); + ret = ccu_pll_reset(pll, parent_rate, nr); + spin_unlock_irqrestore(&pll->lock, flags); + if (ret) + pr_err("PLL '%s' reset timed out\n", clk_hw_get_name(hw)); + + return ret; +} + +/* + * This method is used for PLLs, which don't support the on-the-fly dividers + * adjustment. So the corresponding clocks are supposed to be gated first. + */ +static int ccu_pll_set_rate_norst(struct clk_hw *hw, unsigned long rate, + unsigned long parent_rate) +{ + struct ccu_pll *pll = to_ccu_pll(hw); + unsigned long nr, nf, od; + unsigned long flags; + u32 mask, val; + + ccu_pll_calc_factors(rate, parent_rate, &nr, &nf, &od); + + /* + * Disable PLL if it was enabled by default or left enabled by the + * system bootloader. + */ + mask = CCU_PLL_CTL_CLKR_MASK | CCU_PLL_CTL_CLKF_MASK | + CCU_PLL_CTL_CLKOD_MASK | CCU_PLL_CTL_EN; + val = FIELD_PREP(CCU_PLL_CTL_CLKR_MASK, nr - 1) | + FIELD_PREP(CCU_PLL_CTL_CLKF_MASK, nf - 1) | + FIELD_PREP(CCU_PLL_CTL_CLKOD_MASK, od - 1); + + spin_lock_irqsave(&pll->lock, flags); + regmap_update_bits(pll->sys_regs, pll->reg_ctl, mask, val); + spin_unlock_irqrestore(&pll->lock, flags); + + return 0; +} + +#ifdef CONFIG_DEBUG_FS + +struct ccu_pll_dbgfs_bit { + struct ccu_pll *pll; + const char *name; + unsigned int reg; + u32 mask; +}; + +struct ccu_pll_dbgfs_fld { + struct ccu_pll *pll; + const char *name; + unsigned int reg; + unsigned int lsb; + u32 mask; + u32 min; + u32 max; +}; + +#define CCU_PLL_DBGFS_BIT_ATTR(_name, _reg, _mask) \ + { \ + .name = _name, \ + .reg = _reg, \ + .mask = _mask \ + } + +#define CCU_PLL_DBGFS_FLD_ATTR(_name, _reg, _lsb, _mask, _min, _max) \ + { \ + .name = _name, \ + .reg = _reg, \ + .lsb = _lsb, \ + .mask = _mask, \ + .min = _min, \ + .max = _max \ + } + +static const struct ccu_pll_dbgfs_bit ccu_pll_bits[] = { + CCU_PLL_DBGFS_BIT_ATTR("pll_en", CCU_PLL_CTL, CCU_PLL_CTL_EN), + CCU_PLL_DBGFS_BIT_ATTR("pll_rst", CCU_PLL_CTL, CCU_PLL_CTL_RST), + CCU_PLL_DBGFS_BIT_ATTR("pll_bypass", CCU_PLL_CTL, CCU_PLL_CTL_BYPASS), + CCU_PLL_DBGFS_BIT_ATTR("pll_lock", CCU_PLL_CTL, CCU_PLL_CTL_LOCK) +}; + +#define CCU_PLL_DBGFS_BIT_NUM ARRAY_SIZE(ccu_pll_bits) + +static const struct ccu_pll_dbgfs_fld ccu_pll_flds[] = { + CCU_PLL_DBGFS_FLD_ATTR("pll_nr", CCU_PLL_CTL, CCU_PLL_CTL_CLKR_FLD, + CCU_PLL_CTL_CLKR_MASK, 1, CCU_PLL_NR_MAX), + CCU_PLL_DBGFS_FLD_ATTR("pll_nf", CCU_PLL_CTL, CCU_PLL_CTL_CLKF_FLD, + CCU_PLL_CTL_CLKF_MASK, 1, CCU_PLL_NF_MAX), + CCU_PLL_DBGFS_FLD_ATTR("pll_od", CCU_PLL_CTL, CCU_PLL_CTL_CLKOD_FLD, + CCU_PLL_CTL_CLKOD_MASK, 1, CCU_PLL_OD_MAX), + CCU_PLL_DBGFS_FLD_ATTR("pll_nb", CCU_PLL_CTL1, CCU_PLL_CTL1_BWADJ_FLD, + CCU_PLL_CTL1_BWADJ_MASK, 1, CCU_PLL_NB_MAX) +}; + +#define CCU_PLL_DBGFS_FLD_NUM ARRAY_SIZE(ccu_pll_flds) + +/* + * It can be dangerous to change the PLL settings behind clock framework back, + * therefore we don't provide any kernel config based compile time option for + * this feature to enable. + */ +#undef CCU_PLL_ALLOW_WRITE_DEBUGFS +#ifdef CCU_PLL_ALLOW_WRITE_DEBUGFS + +static int ccu_pll_dbgfs_bit_set(void *priv, u64 val) +{ + const struct ccu_pll_dbgfs_bit *bit = priv; + struct ccu_pll *pll = bit->pll; + unsigned long flags; + + spin_lock_irqsave(&pll->lock, flags); + regmap_update_bits(pll->sys_regs, pll->reg_ctl + bit->reg, + bit->mask, val ? bit->mask : 0); + spin_unlock_irqrestore(&pll->lock, flags); + + return 0; +} + +static int ccu_pll_dbgfs_fld_set(void *priv, u64 val) +{ + struct ccu_pll_dbgfs_fld *fld = priv; + struct ccu_pll *pll = fld->pll; + unsigned long flags; + u32 data; + + val = clamp_t(u64, val, fld->min, fld->max); + data = ((val - 1) << fld->lsb) & fld->mask; + + spin_lock_irqsave(&pll->lock, flags); + regmap_update_bits(pll->sys_regs, pll->reg_ctl + fld->reg, fld->mask, + data); + spin_unlock_irqrestore(&pll->lock, flags); + + return 0; +} + +#define ccu_pll_dbgfs_mode 0644 + +#else /* !CCU_PLL_ALLOW_WRITE_DEBUGFS */ + +#define ccu_pll_dbgfs_bit_set NULL +#define ccu_pll_dbgfs_fld_set NULL +#define ccu_pll_dbgfs_mode 0444 + +#endif /* !CCU_PLL_ALLOW_WRITE_DEBUGFS */ + +static int ccu_pll_dbgfs_bit_get(void *priv, u64 *val) +{ + struct ccu_pll_dbgfs_bit *bit = priv; + struct ccu_pll *pll = bit->pll; + u32 data = 0; + + regmap_read(pll->sys_regs, pll->reg_ctl + bit->reg, &data); + *val = !!(data & bit->mask); + + return 0; +} +DEFINE_DEBUGFS_ATTRIBUTE(ccu_pll_dbgfs_bit_fops, + ccu_pll_dbgfs_bit_get, ccu_pll_dbgfs_bit_set, "%llu\n"); + +static int ccu_pll_dbgfs_fld_get(void *priv, u64 *val) +{ + struct ccu_pll_dbgfs_fld *fld = priv; + struct ccu_pll *pll = fld->pll; + u32 data = 0; + + regmap_read(pll->sys_regs, pll->reg_ctl + fld->reg, &data); + *val = ((data & fld->mask) >> fld->lsb) + 1; + + return 0; +} +DEFINE_DEBUGFS_ATTRIBUTE(ccu_pll_dbgfs_fld_fops, + ccu_pll_dbgfs_fld_get, ccu_pll_dbgfs_fld_set, "%llu\n"); + +static void ccu_pll_debug_init(struct clk_hw *hw, struct dentry *dentry) +{ + struct ccu_pll *pll = to_ccu_pll(hw); + struct ccu_pll_dbgfs_bit *bits; + struct ccu_pll_dbgfs_fld *flds; + int idx; + + bits = kcalloc(CCU_PLL_DBGFS_BIT_NUM, sizeof(*bits), GFP_KERNEL); + if (!bits) + return; + + for (idx = 0; idx < CCU_PLL_DBGFS_BIT_NUM; ++idx) { + bits[idx] = ccu_pll_bits[idx]; + bits[idx].pll = pll; + + debugfs_create_file_unsafe(bits[idx].name, ccu_pll_dbgfs_mode, + dentry, &bits[idx], + &ccu_pll_dbgfs_bit_fops); + } + + flds = kcalloc(CCU_PLL_DBGFS_FLD_NUM, sizeof(*flds), GFP_KERNEL); + if (!flds) + return; + + for (idx = 0; idx < CCU_PLL_DBGFS_FLD_NUM; ++idx) { + flds[idx] = ccu_pll_flds[idx]; + flds[idx].pll = pll; + + debugfs_create_file_unsafe(flds[idx].name, ccu_pll_dbgfs_mode, + dentry, &flds[idx], + &ccu_pll_dbgfs_fld_fops); + } +} + +#else /* !CONFIG_DEBUG_FS */ + +#define ccu_pll_debug_init NULL + +#endif /* !CONFIG_DEBUG_FS */ + +static const struct clk_ops ccu_pll_gate_to_set_ops = { + .enable = ccu_pll_enable, + .disable = ccu_pll_disable, + .is_enabled = ccu_pll_is_enabled, + .recalc_rate = ccu_pll_recalc_rate, + .round_rate = ccu_pll_round_rate, + .set_rate = ccu_pll_set_rate_norst, + .debug_init = ccu_pll_debug_init +}; + +static const struct clk_ops ccu_pll_straight_set_ops = { + .enable = ccu_pll_enable, + .disable = ccu_pll_disable, + .is_enabled = ccu_pll_is_enabled, + .recalc_rate = ccu_pll_recalc_rate, + .round_rate = ccu_pll_round_rate, + .set_rate = ccu_pll_set_rate_reset, + .debug_init = ccu_pll_debug_init +}; + +struct ccu_pll *ccu_pll_hw_register(const struct ccu_pll_init_data *pll_init) +{ + struct clk_parent_data parent_data = { }; + struct clk_init_data hw_init = { }; + struct ccu_pll *pll; + int ret; + + if (!pll_init) + return ERR_PTR(-EINVAL); + + pll = kzalloc(sizeof(*pll), GFP_KERNEL); + if (!pll) + return ERR_PTR(-ENOMEM); + + /* + * Note since Baikal-T1 System Controller registers are MMIO-backed + * we won't check the regmap IO operations return status, because it + * must be zero anyway. + */ + pll->hw.init = &hw_init; + pll->reg_ctl = pll_init->base + CCU_PLL_CTL; + pll->reg_ctl1 = pll_init->base + CCU_PLL_CTL1; + pll->sys_regs = pll_init->sys_regs; + pll->id = pll_init->id; + spin_lock_init(&pll->lock); + + hw_init.name = pll_init->name; + hw_init.flags = pll_init->flags; + + if (hw_init.flags & CLK_SET_RATE_GATE) + hw_init.ops = &ccu_pll_gate_to_set_ops; + else + hw_init.ops = &ccu_pll_straight_set_ops; + + if (!pll_init->parent_name) { + ret = -EINVAL; + goto err_free_pll; + } + parent_data.fw_name = pll_init->parent_name; + hw_init.parent_data = &parent_data; + hw_init.num_parents = 1; + + ret = of_clk_hw_register(pll_init->np, &pll->hw); + if (ret) + goto err_free_pll; + + return pll; + +err_free_pll: + kfree(pll); + + return ERR_PTR(ret); +} + +void ccu_pll_hw_unregister(struct ccu_pll *pll) +{ + clk_hw_unregister(&pll->hw); + + kfree(pll); +} diff --git a/drivers/clk/baikal-t1/ccu-pll.h b/drivers/clk/baikal-t1/ccu-pll.h new file mode 100644 index 0000000000000..76cd9132a2196 --- /dev/null +++ b/drivers/clk/baikal-t1/ccu-pll.h @@ -0,0 +1,64 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2020 BAIKAL ELECTRONICS, JSC + * + * Baikal-T1 CCU PLL interface driver + */ +#ifndef __CLK_BT1_CCU_PLL_H__ +#define __CLK_BT1_CCU_PLL_H__ + +#include +#include +#include +#include +#include + +/* + * struct ccu_pll_init_data - CCU PLL initialization data + * @id: Clock private identifier. + * @name: Clocks name. + * @parent_name: Clocks parent name in a fw node. + * @base: PLL registers base address with respect to the sys_regs base. + * @sys_regs: Baikal-T1 System Controller registers map. + * @np: Pointer to the node describing the CCU PLLs. + * @flags: PLL clock flags. + */ +struct ccu_pll_init_data { + unsigned int id; + const char *name; + const char *parent_name; + unsigned int base; + struct regmap *sys_regs; + struct device_node *np; + unsigned long flags; +}; + +/* + * struct ccu_pll - CCU PLL descriptor + * @hw: clk_hw of the PLL. + * @id: Clock private identifier. + * @reg_ctl: PLL control register base. + * @reg_ctl1: PLL control1 register base. + * @sys_regs: Baikal-T1 System Controller registers map. + * @lock: PLL state change spin-lock. + */ +struct ccu_pll { + struct clk_hw hw; + unsigned int id; + unsigned int reg_ctl; + unsigned int reg_ctl1; + struct regmap *sys_regs; + spinlock_t lock; +}; +#define to_ccu_pll(_hw) container_of(_hw, struct ccu_pll, hw) + +static inline struct clk_hw *ccu_pll_get_clk_hw(struct ccu_pll *pll) +{ + return pll ? &pll->hw : NULL; +} + +struct ccu_pll *ccu_pll_hw_register(const struct ccu_pll_init_data *init); + +void ccu_pll_hw_unregister(struct ccu_pll *pll); + +#endif /* __CLK_BT1_CCU_PLL_H__ */ diff --git a/drivers/clk/baikal-t1/clk-ccu-pll.c b/drivers/clk/baikal-t1/clk-ccu-pll.c new file mode 100644 index 0000000000000..1eec8c0b8f50f --- /dev/null +++ b/drivers/clk/baikal-t1/clk-ccu-pll.c @@ -0,0 +1,204 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2020 BAIKAL ELECTRONICS, JSC + * + * Authors: + * Serge Semin + * Dmitry Dunaev + * + * Baikal-T1 CCU PLL clocks driver + */ + +#define pr_fmt(fmt) "bt1-ccu-pll: " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include "ccu-pll.h" + +#define CCU_CPU_PLL_BASE 0x000 +#define CCU_SATA_PLL_BASE 0x008 +#define CCU_DDR_PLL_BASE 0x010 +#define CCU_PCIE_PLL_BASE 0x018 +#define CCU_ETH_PLL_BASE 0x020 + +#define CCU_PLL_INFO(_id, _name, _pname, _base, _flags) \ + { \ + .id = _id, \ + .name = _name, \ + .parent_name = _pname, \ + .base = _base, \ + .flags = _flags \ + } + +#define CCU_PLL_NUM ARRAY_SIZE(pll_info) + +struct ccu_pll_info { + unsigned int id; + const char *name; + const char *parent_name; + unsigned int base; + unsigned long flags; +}; + +/* + * Mark as critical all PLLs except Ethernet one. CPU and DDR PLLs are sources + * of CPU cores and DDR controller reference clocks, due to which they + * obviously shouldn't be ever gated. SATA and PCIe PLLs are the parents of + * APB-bus and DDR controller AXI-bus clocks. If they are gated the system will + * be unusable. + */ +static const struct ccu_pll_info pll_info[] = { + CCU_PLL_INFO(CCU_CPU_PLL, "cpu_pll", "ref_clk", CCU_CPU_PLL_BASE, + CLK_IS_CRITICAL), + CCU_PLL_INFO(CCU_SATA_PLL, "sata_pll", "ref_clk", CCU_SATA_PLL_BASE, + CLK_IS_CRITICAL | CLK_SET_RATE_GATE), + CCU_PLL_INFO(CCU_DDR_PLL, "ddr_pll", "ref_clk", CCU_DDR_PLL_BASE, + CLK_IS_CRITICAL | CLK_SET_RATE_GATE), + CCU_PLL_INFO(CCU_PCIE_PLL, "pcie_pll", "ref_clk", CCU_PCIE_PLL_BASE, + CLK_IS_CRITICAL), + CCU_PLL_INFO(CCU_ETH_PLL, "eth_pll", "ref_clk", CCU_ETH_PLL_BASE, + CLK_SET_RATE_GATE) +}; + +struct ccu_pll_data { + struct device_node *np; + struct regmap *sys_regs; + struct ccu_pll *plls[CCU_PLL_NUM]; +}; + +static struct ccu_pll *ccu_pll_find_desc(struct ccu_pll_data *data, + unsigned int clk_id) +{ + struct ccu_pll *pll; + int idx; + + for (idx = 0; idx < CCU_PLL_NUM; ++idx) { + pll = data->plls[idx]; + if (pll && pll->id == clk_id) + return pll; + } + + return ERR_PTR(-EINVAL); +} + +static struct ccu_pll_data *ccu_pll_create_data(struct device_node *np) +{ + struct ccu_pll_data *data; + + data = kzalloc(sizeof(*data), GFP_KERNEL); + if (!data) + return ERR_PTR(-ENOMEM); + + data->np = np; + + return data; +} + +static void ccu_pll_free_data(struct ccu_pll_data *data) +{ + kfree(data); +} + +static int ccu_pll_find_sys_regs(struct ccu_pll_data *data) +{ + data->sys_regs = syscon_node_to_regmap(data->np->parent); + if (IS_ERR(data->sys_regs)) { + pr_err("Failed to find syscon regs for '%s'\n", + of_node_full_name(data->np)); + return PTR_ERR(data->sys_regs); + } + + return 0; +} + +static struct clk_hw *ccu_pll_of_clk_hw_get(struct of_phandle_args *clkspec, + void *priv) +{ + struct ccu_pll_data *data = priv; + struct ccu_pll *pll; + unsigned int clk_id; + + clk_id = clkspec->args[0]; + pll = ccu_pll_find_desc(data, clk_id); + if (IS_ERR(pll)) { + pr_info("Invalid PLL clock ID %d specified\n", clk_id); + return ERR_CAST(pll); + } + + return ccu_pll_get_clk_hw(pll); +} + +static int ccu_pll_clk_register(struct ccu_pll_data *data) +{ + int idx, ret; + + for (idx = 0; idx < CCU_PLL_NUM; ++idx) { + const struct ccu_pll_info *info = &pll_info[idx]; + struct ccu_pll_init_data init = {0}; + + init.id = info->id; + init.name = info->name; + init.parent_name = info->parent_name; + init.base = info->base; + init.sys_regs = data->sys_regs; + init.np = data->np; + init.flags = info->flags; + + data->plls[idx] = ccu_pll_hw_register(&init); + if (IS_ERR(data->plls[idx])) { + ret = PTR_ERR(data->plls[idx]); + pr_err("Couldn't register PLL hw '%s'\n", + init.name); + goto err_hw_unregister; + } + } + + ret = of_clk_add_hw_provider(data->np, ccu_pll_of_clk_hw_get, data); + if (ret) { + pr_err("Couldn't register PLL provider of '%s'\n", + of_node_full_name(data->np)); + goto err_hw_unregister; + } + + return 0; + +err_hw_unregister: + for (--idx; idx >= 0; --idx) + ccu_pll_hw_unregister(data->plls[idx]); + + return ret; +} + +static __init void ccu_pll_init(struct device_node *np) +{ + struct ccu_pll_data *data; + int ret; + + data = ccu_pll_create_data(np); + if (IS_ERR(data)) + return; + + ret = ccu_pll_find_sys_regs(data); + if (ret) + goto err_free_data; + + ret = ccu_pll_clk_register(data); + if (ret) + goto err_free_data; + + return; + +err_free_data: + ccu_pll_free_data(data); +} +CLK_OF_DECLARE(ccu_pll, "baikal,bt1-ccu-pll", ccu_pll_init); -- GitLab From 353afa3a8d2ef4a4b25db823ffd05d440b3530cb Mon Sep 17 00:00:00 2001 From: Serge Semin Date: Wed, 27 May 2020 01:20:56 +0300 Subject: [PATCH 0864/1971] clk: Add Baikal-T1 CCU Dividers driver Nearly each Baikal-T1 IP-core is supposed to have a clock source of particular frequency. But since there are greater than five IP-blocks embedded into the SoC, the CCU PLLs can't fulfill all the needs. Baikal-T1 CCU provides a set of fixed and configurable clock dividers in order to generate a necessary signal for each chip sub-block. This driver creates the of-based hardware clocks for each divider available in Baikal-T1 CCU. The same way as for PLLs we split the functionality up into the clocks operations (gate, ungate, set rate, etc) and hardware clocks declaration/registration procedures. In accordance with the CCU documentation all its dividers are distributed into two CCU sub-blocks: AXI-bus and system devices reference clocks. The former sub-block is used to supply the clocks for AXI-bus interfaces (AXI clock domains) and the later one provides the SoC IP-cores reference clocks. Each sub-block is represented by a dedicated DT node, so they have different compatible strings to distinguish one from another. For some reason CCU provides the dividers of different types. Some dividers can be gateable some can't, some are fixed while the others are variable, some have special divider' limitations, some've got a non-standard register layout and so on. In order to cover all of these cases the hardware clocks driver is designed with an info-descriptor pattern. So there are special static descriptors declared for the dividers of each type with additional flags describing the block peculiarity. These descriptors are then used to create hardware clocks with proper operations. Some CCU dividers provide a way to reset a domain they generate a clock for. So the CCU AXI-bus and CCU system devices clock drivers also perform the reset controller registration. Signed-off-by: Serge Semin Cc: Alexey Malahov Cc: Arnd Bergmann Cc: Rob Herring Cc: linux-mips@vger.kernel.org Cc: devicetree@vger.kernel.org Link: https://lore.kernel.org/r/20200526222056.18072-5-Sergey.Semin@baikalelectronics.ru [sboyd@kernel.org: Drop return from void function, silence sparse warnings about initializing structs with NULL vs. integer] Signed-off-by: Stephen Boyd --- drivers/clk/baikal-t1/Kconfig | 12 + drivers/clk/baikal-t1/Makefile | 1 + drivers/clk/baikal-t1/ccu-div.c | 602 ++++++++++++++++++++++++++++ drivers/clk/baikal-t1/ccu-div.h | 110 +++++ drivers/clk/baikal-t1/clk-ccu-div.c | 485 ++++++++++++++++++++++ 5 files changed, 1210 insertions(+) create mode 100644 drivers/clk/baikal-t1/ccu-div.c create mode 100644 drivers/clk/baikal-t1/ccu-div.h create mode 100644 drivers/clk/baikal-t1/clk-ccu-div.c diff --git a/drivers/clk/baikal-t1/Kconfig b/drivers/clk/baikal-t1/Kconfig index 00398ee916dc3..03102f1094bca 100644 --- a/drivers/clk/baikal-t1/Kconfig +++ b/drivers/clk/baikal-t1/Kconfig @@ -27,4 +27,16 @@ config CLK_BT1_CCU_PLL CPUs, DDR, etc.) or passed over the clock dividers to be only then used as an individual reference clock of a target device. +config CLK_BT1_CCU_DIV + bool "Baikal-T1 CCU Dividers support" + select RESET_CONTROLLER + select MFD_SYSCON + default MIPS_BAIKAL_T1 + help + Enable this to support the CCU dividers used to distribute clocks + between AXI-bus and system devices coming from CCU PLLs of Baikal-T1 + SoC. CCU dividers can be either configurable or with fixed divider, + either gateable or ungateable. Some of the CCU dividers can be as well + used to reset the domains they're supplying clock to. + endif diff --git a/drivers/clk/baikal-t1/Makefile b/drivers/clk/baikal-t1/Makefile index 4a612bbacc374..b3b9590b95ed9 100644 --- a/drivers/clk/baikal-t1/Makefile +++ b/drivers/clk/baikal-t1/Makefile @@ -1,2 +1,3 @@ # SPDX-License-Identifier: GPL-2.0-only obj-$(CONFIG_CLK_BT1_CCU_PLL) += ccu-pll.o clk-ccu-pll.o +obj-$(CONFIG_CLK_BT1_CCU_DIV) += ccu-div.o clk-ccu-div.o diff --git a/drivers/clk/baikal-t1/ccu-div.c b/drivers/clk/baikal-t1/ccu-div.c new file mode 100644 index 0000000000000..bd40f5936f082 --- /dev/null +++ b/drivers/clk/baikal-t1/ccu-div.c @@ -0,0 +1,602 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2020 BAIKAL ELECTRONICS, JSC + * + * Authors: + * Serge Semin + * Dmitry Dunaev + * + * Baikal-T1 CCU Dividers interface driver + */ + +#define pr_fmt(fmt) "bt1-ccu-div: " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "ccu-div.h" + +#define CCU_DIV_CTL 0x00 +#define CCU_DIV_CTL_EN BIT(0) +#define CCU_DIV_CTL_RST BIT(1) +#define CCU_DIV_CTL_SET_CLKDIV BIT(2) +#define CCU_DIV_CTL_CLKDIV_FLD 4 +#define CCU_DIV_CTL_CLKDIV_MASK(_width) \ + GENMASK((_width) + CCU_DIV_CTL_CLKDIV_FLD - 1, CCU_DIV_CTL_CLKDIV_FLD) +#define CCU_DIV_CTL_LOCK_SHIFTED BIT(27) +#define CCU_DIV_CTL_LOCK_NORMAL BIT(31) + +#define CCU_DIV_RST_DELAY_US 1 +#define CCU_DIV_LOCK_CHECK_RETRIES 50 + +#define CCU_DIV_CLKDIV_MIN 0 +#define CCU_DIV_CLKDIV_MAX(_mask) \ + ((_mask) >> CCU_DIV_CTL_CLKDIV_FLD) + +/* + * Use the next two methods until there are generic field setter and + * getter available with non-constant mask support. + */ +static inline u32 ccu_div_get(u32 mask, u32 val) +{ + return (val & mask) >> CCU_DIV_CTL_CLKDIV_FLD; +} + +static inline u32 ccu_div_prep(u32 mask, u32 val) +{ + return (val << CCU_DIV_CTL_CLKDIV_FLD) & mask; +} + +static inline unsigned long ccu_div_lock_delay_ns(unsigned long ref_clk, + unsigned long div) +{ + u64 ns = 4ULL * (div ?: 1) * NSEC_PER_SEC; + + do_div(ns, ref_clk); + + return ns; +} + +static inline unsigned long ccu_div_calc_freq(unsigned long ref_clk, + unsigned long div) +{ + return ref_clk / (div ?: 1); +} + +static int ccu_div_var_update_clkdiv(struct ccu_div *div, + unsigned long parent_rate, + unsigned long divider) +{ + unsigned long nd; + u32 val = 0; + u32 lock; + int count; + + nd = ccu_div_lock_delay_ns(parent_rate, divider); + + if (div->features & CCU_DIV_LOCK_SHIFTED) + lock = CCU_DIV_CTL_LOCK_SHIFTED; + else + lock = CCU_DIV_CTL_LOCK_NORMAL; + + regmap_update_bits(div->sys_regs, div->reg_ctl, + CCU_DIV_CTL_SET_CLKDIV, CCU_DIV_CTL_SET_CLKDIV); + + /* + * Until there is nsec-version of readl_poll_timeout() is available + * we have to implement the next polling loop. + */ + count = CCU_DIV_LOCK_CHECK_RETRIES; + do { + ndelay(nd); + regmap_read(div->sys_regs, div->reg_ctl, &val); + if (val & lock) + return 0; + } while (--count); + + return -ETIMEDOUT; +} + +static int ccu_div_var_enable(struct clk_hw *hw) +{ + struct clk_hw *parent_hw = clk_hw_get_parent(hw); + struct ccu_div *div = to_ccu_div(hw); + unsigned long flags; + u32 val = 0; + int ret; + + if (!parent_hw) { + pr_err("Can't enable '%s' with no parent", clk_hw_get_name(hw)); + return -EINVAL; + } + + regmap_read(div->sys_regs, div->reg_ctl, &val); + if (val & CCU_DIV_CTL_EN) + return 0; + + spin_lock_irqsave(&div->lock, flags); + ret = ccu_div_var_update_clkdiv(div, clk_hw_get_rate(parent_hw), + ccu_div_get(div->mask, val)); + if (!ret) + regmap_update_bits(div->sys_regs, div->reg_ctl, + CCU_DIV_CTL_EN, CCU_DIV_CTL_EN); + spin_unlock_irqrestore(&div->lock, flags); + if (ret) + pr_err("Divider '%s' lock timed out\n", clk_hw_get_name(hw)); + + return ret; +} + +static int ccu_div_gate_enable(struct clk_hw *hw) +{ + struct ccu_div *div = to_ccu_div(hw); + unsigned long flags; + + spin_lock_irqsave(&div->lock, flags); + regmap_update_bits(div->sys_regs, div->reg_ctl, + CCU_DIV_CTL_EN, CCU_DIV_CTL_EN); + spin_unlock_irqrestore(&div->lock, flags); + + return 0; +} + +static void ccu_div_gate_disable(struct clk_hw *hw) +{ + struct ccu_div *div = to_ccu_div(hw); + unsigned long flags; + + spin_lock_irqsave(&div->lock, flags); + regmap_update_bits(div->sys_regs, div->reg_ctl, CCU_DIV_CTL_EN, 0); + spin_unlock_irqrestore(&div->lock, flags); +} + +static int ccu_div_gate_is_enabled(struct clk_hw *hw) +{ + struct ccu_div *div = to_ccu_div(hw); + u32 val = 0; + + regmap_read(div->sys_regs, div->reg_ctl, &val); + + return !!(val & CCU_DIV_CTL_EN); +} + +static unsigned long ccu_div_var_recalc_rate(struct clk_hw *hw, + unsigned long parent_rate) +{ + struct ccu_div *div = to_ccu_div(hw); + unsigned long divider; + u32 val = 0; + + regmap_read(div->sys_regs, div->reg_ctl, &val); + divider = ccu_div_get(div->mask, val); + + return ccu_div_calc_freq(parent_rate, divider); +} + +static inline unsigned long ccu_div_var_calc_divider(unsigned long rate, + unsigned long parent_rate, + unsigned int mask) +{ + unsigned long divider; + + divider = parent_rate / rate; + return clamp_t(unsigned long, divider, CCU_DIV_CLKDIV_MIN, + CCU_DIV_CLKDIV_MAX(mask)); +} + +static long ccu_div_var_round_rate(struct clk_hw *hw, unsigned long rate, + unsigned long *parent_rate) +{ + struct ccu_div *div = to_ccu_div(hw); + unsigned long divider; + + divider = ccu_div_var_calc_divider(rate, *parent_rate, div->mask); + + return ccu_div_calc_freq(*parent_rate, divider); +} + +/* + * This method is used for the clock divider blocks, which support the + * on-the-fly rate change. So due to lacking the EN bit functionality + * they can't be gated before the rate adjustment. + */ +static int ccu_div_var_set_rate_slow(struct clk_hw *hw, unsigned long rate, + unsigned long parent_rate) +{ + struct ccu_div *div = to_ccu_div(hw); + unsigned long flags, divider; + u32 val; + int ret; + + divider = ccu_div_var_calc_divider(rate, parent_rate, div->mask); + if (divider == 1 && div->features & CCU_DIV_SKIP_ONE) { + divider = 0; + } else if (div->features & CCU_DIV_SKIP_ONE_TO_THREE) { + if (divider == 1 || divider == 2) + divider = 0; + else if (divider == 3) + divider = 4; + } + + val = ccu_div_prep(div->mask, divider); + + spin_lock_irqsave(&div->lock, flags); + regmap_update_bits(div->sys_regs, div->reg_ctl, div->mask, val); + ret = ccu_div_var_update_clkdiv(div, parent_rate, divider); + spin_unlock_irqrestore(&div->lock, flags); + if (ret) + pr_err("Divider '%s' lock timed out\n", clk_hw_get_name(hw)); + + return ret; +} + +/* + * This method is used for the clock divider blocks, which don't support + * the on-the-fly rate change. + */ +static int ccu_div_var_set_rate_fast(struct clk_hw *hw, unsigned long rate, + unsigned long parent_rate) +{ + struct ccu_div *div = to_ccu_div(hw); + unsigned long flags, divider = 1; + u32 val; + + divider = ccu_div_var_calc_divider(rate, parent_rate, div->mask); + val = ccu_div_prep(div->mask, divider); + + /* + * Also disable the clock divider block if it was enabled by default + * or by the bootloader. + */ + spin_lock_irqsave(&div->lock, flags); + regmap_update_bits(div->sys_regs, div->reg_ctl, + div->mask | CCU_DIV_CTL_EN, val); + spin_unlock_irqrestore(&div->lock, flags); + + return 0; +} + +static unsigned long ccu_div_fixed_recalc_rate(struct clk_hw *hw, + unsigned long parent_rate) +{ + struct ccu_div *div = to_ccu_div(hw); + + return ccu_div_calc_freq(parent_rate, div->divider); +} + +static long ccu_div_fixed_round_rate(struct clk_hw *hw, unsigned long rate, + unsigned long *parent_rate) +{ + struct ccu_div *div = to_ccu_div(hw); + + return ccu_div_calc_freq(*parent_rate, div->divider); +} + +static int ccu_div_fixed_set_rate(struct clk_hw *hw, unsigned long rate, + unsigned long parent_rate) +{ + return 0; +} + +int ccu_div_reset_domain(struct ccu_div *div) +{ + unsigned long flags; + + if (!div || !(div->features & CCU_DIV_RESET_DOMAIN)) + return -EINVAL; + + spin_lock_irqsave(&div->lock, flags); + regmap_update_bits(div->sys_regs, div->reg_ctl, + CCU_DIV_CTL_RST, CCU_DIV_CTL_RST); + spin_unlock_irqrestore(&div->lock, flags); + + /* The next delay must be enough to cover all the resets. */ + udelay(CCU_DIV_RST_DELAY_US); + + return 0; +} + +#ifdef CONFIG_DEBUG_FS + +struct ccu_div_dbgfs_bit { + struct ccu_div *div; + const char *name; + u32 mask; +}; + +#define CCU_DIV_DBGFS_BIT_ATTR(_name, _mask) { \ + .name = _name, \ + .mask = _mask \ + } + +static const struct ccu_div_dbgfs_bit ccu_div_bits[] = { + CCU_DIV_DBGFS_BIT_ATTR("div_en", CCU_DIV_CTL_EN), + CCU_DIV_DBGFS_BIT_ATTR("div_rst", CCU_DIV_CTL_RST), + CCU_DIV_DBGFS_BIT_ATTR("div_bypass", CCU_DIV_CTL_SET_CLKDIV), + CCU_DIV_DBGFS_BIT_ATTR("div_lock", CCU_DIV_CTL_LOCK_NORMAL) +}; + +#define CCU_DIV_DBGFS_BIT_NUM ARRAY_SIZE(ccu_div_bits) + +/* + * It can be dangerous to change the Divider settings behind clock framework + * back, therefore we don't provide any kernel config based compile time option + * for this feature to enable. + */ +#undef CCU_DIV_ALLOW_WRITE_DEBUGFS +#ifdef CCU_DIV_ALLOW_WRITE_DEBUGFS + +static int ccu_div_dbgfs_bit_set(void *priv, u64 val) +{ + const struct ccu_div_dbgfs_bit *bit = priv; + struct ccu_div *div = bit->div; + unsigned long flags; + + spin_lock_irqsave(&div->lock, flags); + regmap_update_bits(div->sys_regs, div->reg_ctl, + bit->mask, val ? bit->mask : 0); + spin_unlock_irqrestore(&div->lock, flags); + + return 0; +} + +static int ccu_div_dbgfs_var_clkdiv_set(void *priv, u64 val) +{ + struct ccu_div *div = priv; + unsigned long flags; + u32 data; + + val = clamp_t(u64, val, CCU_DIV_CLKDIV_MIN, + CCU_DIV_CLKDIV_MAX(div->mask)); + data = ccu_div_prep(div->mask, val); + + spin_lock_irqsave(&div->lock, flags); + regmap_update_bits(div->sys_regs, div->reg_ctl, div->mask, data); + spin_unlock_irqrestore(&div->lock, flags); + + return 0; +} + +#define ccu_div_dbgfs_mode 0644 + +#else /* !CCU_DIV_ALLOW_WRITE_DEBUGFS */ + +#define ccu_div_dbgfs_bit_set NULL +#define ccu_div_dbgfs_var_clkdiv_set NULL +#define ccu_div_dbgfs_mode 0444 + +#endif /* !CCU_DIV_ALLOW_WRITE_DEBUGFS */ + +static int ccu_div_dbgfs_bit_get(void *priv, u64 *val) +{ + const struct ccu_div_dbgfs_bit *bit = priv; + struct ccu_div *div = bit->div; + u32 data = 0; + + regmap_read(div->sys_regs, div->reg_ctl, &data); + *val = !!(data & bit->mask); + + return 0; +} +DEFINE_DEBUGFS_ATTRIBUTE(ccu_div_dbgfs_bit_fops, + ccu_div_dbgfs_bit_get, ccu_div_dbgfs_bit_set, "%llu\n"); + +static int ccu_div_dbgfs_var_clkdiv_get(void *priv, u64 *val) +{ + struct ccu_div *div = priv; + u32 data = 0; + + regmap_read(div->sys_regs, div->reg_ctl, &data); + *val = ccu_div_get(div->mask, data); + + return 0; +} +DEFINE_DEBUGFS_ATTRIBUTE(ccu_div_dbgfs_var_clkdiv_fops, + ccu_div_dbgfs_var_clkdiv_get, ccu_div_dbgfs_var_clkdiv_set, "%llu\n"); + +static int ccu_div_dbgfs_fixed_clkdiv_get(void *priv, u64 *val) +{ + struct ccu_div *div = priv; + + *val = div->divider; + + return 0; +} +DEFINE_DEBUGFS_ATTRIBUTE(ccu_div_dbgfs_fixed_clkdiv_fops, + ccu_div_dbgfs_fixed_clkdiv_get, NULL, "%llu\n"); + +static void ccu_div_var_debug_init(struct clk_hw *hw, struct dentry *dentry) +{ + struct ccu_div *div = to_ccu_div(hw); + struct ccu_div_dbgfs_bit *bits; + int didx, bidx, num = 2; + const char *name; + + num += !!(div->flags & CLK_SET_RATE_GATE) + + !!(div->features & CCU_DIV_RESET_DOMAIN); + + bits = kcalloc(num, sizeof(*bits), GFP_KERNEL); + if (!bits) + return; + + for (didx = 0, bidx = 0; bidx < CCU_DIV_DBGFS_BIT_NUM; ++bidx) { + name = ccu_div_bits[bidx].name; + if (!(div->flags & CLK_SET_RATE_GATE) && + !strcmp("div_en", name)) { + continue; + } + + if (!(div->features & CCU_DIV_RESET_DOMAIN) && + !strcmp("div_rst", name)) { + continue; + } + + bits[didx] = ccu_div_bits[bidx]; + bits[didx].div = div; + + if (div->features & CCU_DIV_LOCK_SHIFTED && + !strcmp("div_lock", name)) { + bits[didx].mask = CCU_DIV_CTL_LOCK_SHIFTED; + } + + debugfs_create_file_unsafe(bits[didx].name, ccu_div_dbgfs_mode, + dentry, &bits[didx], + &ccu_div_dbgfs_bit_fops); + ++didx; + } + + debugfs_create_file_unsafe("div_clkdiv", ccu_div_dbgfs_mode, dentry, + div, &ccu_div_dbgfs_var_clkdiv_fops); +} + +static void ccu_div_gate_debug_init(struct clk_hw *hw, struct dentry *dentry) +{ + struct ccu_div *div = to_ccu_div(hw); + struct ccu_div_dbgfs_bit *bit; + + bit = kmalloc(sizeof(*bit), GFP_KERNEL); + if (!bit) + return; + + *bit = ccu_div_bits[0]; + bit->div = div; + debugfs_create_file_unsafe(bit->name, ccu_div_dbgfs_mode, dentry, bit, + &ccu_div_dbgfs_bit_fops); + + debugfs_create_file_unsafe("div_clkdiv", 0400, dentry, div, + &ccu_div_dbgfs_fixed_clkdiv_fops); +} + +static void ccu_div_fixed_debug_init(struct clk_hw *hw, struct dentry *dentry) +{ + struct ccu_div *div = to_ccu_div(hw); + + debugfs_create_file_unsafe("div_clkdiv", 0400, dentry, div, + &ccu_div_dbgfs_fixed_clkdiv_fops); +} + +#else /* !CONFIG_DEBUG_FS */ + +#define ccu_div_var_debug_init NULL +#define ccu_div_gate_debug_init NULL +#define ccu_div_fixed_debug_init NULL + +#endif /* !CONFIG_DEBUG_FS */ + +static const struct clk_ops ccu_div_var_gate_to_set_ops = { + .enable = ccu_div_var_enable, + .disable = ccu_div_gate_disable, + .is_enabled = ccu_div_gate_is_enabled, + .recalc_rate = ccu_div_var_recalc_rate, + .round_rate = ccu_div_var_round_rate, + .set_rate = ccu_div_var_set_rate_fast, + .debug_init = ccu_div_var_debug_init +}; + +static const struct clk_ops ccu_div_var_nogate_ops = { + .recalc_rate = ccu_div_var_recalc_rate, + .round_rate = ccu_div_var_round_rate, + .set_rate = ccu_div_var_set_rate_slow, + .debug_init = ccu_div_var_debug_init +}; + +static const struct clk_ops ccu_div_gate_ops = { + .enable = ccu_div_gate_enable, + .disable = ccu_div_gate_disable, + .is_enabled = ccu_div_gate_is_enabled, + .recalc_rate = ccu_div_fixed_recalc_rate, + .round_rate = ccu_div_fixed_round_rate, + .set_rate = ccu_div_fixed_set_rate, + .debug_init = ccu_div_gate_debug_init +}; + +static const struct clk_ops ccu_div_fixed_ops = { + .recalc_rate = ccu_div_fixed_recalc_rate, + .round_rate = ccu_div_fixed_round_rate, + .set_rate = ccu_div_fixed_set_rate, + .debug_init = ccu_div_fixed_debug_init +}; + +struct ccu_div *ccu_div_hw_register(const struct ccu_div_init_data *div_init) +{ + struct clk_parent_data parent_data = { }; + struct clk_init_data hw_init = { }; + struct ccu_div *div; + int ret; + + if (!div_init) + return ERR_PTR(-EINVAL); + + div = kzalloc(sizeof(*div), GFP_KERNEL); + if (!div) + return ERR_PTR(-ENOMEM); + + /* + * Note since Baikal-T1 System Controller registers are MMIO-backed + * we won't check the regmap IO operations return status, because it + * must be zero anyway. + */ + div->hw.init = &hw_init; + div->id = div_init->id; + div->reg_ctl = div_init->base + CCU_DIV_CTL; + div->sys_regs = div_init->sys_regs; + div->flags = div_init->flags; + div->features = div_init->features; + spin_lock_init(&div->lock); + + hw_init.name = div_init->name; + hw_init.flags = div_init->flags; + + if (div_init->type == CCU_DIV_VAR) { + if (hw_init.flags & CLK_SET_RATE_GATE) + hw_init.ops = &ccu_div_var_gate_to_set_ops; + else + hw_init.ops = &ccu_div_var_nogate_ops; + div->mask = CCU_DIV_CTL_CLKDIV_MASK(div_init->width); + } else if (div_init->type == CCU_DIV_GATE) { + hw_init.ops = &ccu_div_gate_ops; + div->divider = div_init->divider; + } else if (div_init->type == CCU_DIV_FIXED) { + hw_init.ops = &ccu_div_fixed_ops; + div->divider = div_init->divider; + } else { + ret = -EINVAL; + goto err_free_div; + } + + if (!div_init->parent_name) { + ret = -EINVAL; + goto err_free_div; + } + parent_data.fw_name = div_init->parent_name; + hw_init.parent_data = &parent_data; + hw_init.num_parents = 1; + + ret = of_clk_hw_register(div_init->np, &div->hw); + if (ret) + goto err_free_div; + + return div; + +err_free_div: + kfree(div); + + return ERR_PTR(ret); +} + +void ccu_div_hw_unregister(struct ccu_div *div) +{ + clk_hw_unregister(&div->hw); + + kfree(div); +} diff --git a/drivers/clk/baikal-t1/ccu-div.h b/drivers/clk/baikal-t1/ccu-div.h new file mode 100644 index 0000000000000..795665caefbdc --- /dev/null +++ b/drivers/clk/baikal-t1/ccu-div.h @@ -0,0 +1,110 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2020 BAIKAL ELECTRONICS, JSC + * + * Baikal-T1 CCU Dividers interface driver + */ +#ifndef __CLK_BT1_CCU_DIV_H__ +#define __CLK_BT1_CCU_DIV_H__ + +#include +#include +#include +#include +#include + +/* + * CCU Divider private flags + * @CCU_DIV_SKIP_ONE: Due to some reason divider can't be set to 1. + * It can be 0 though, which is functionally the same. + * @CCU_DIV_SKIP_ONE_TO_THREE: For some reason divider can't be within [1,3]. + * It can be either 0 or greater than 3. + * @CCU_DIV_LOCK_SHIFTED: Find lock-bit at non-standard position. + * @CCU_DIV_RESET_DOMAIN: Provide reset clock domain method. + */ +#define CCU_DIV_SKIP_ONE BIT(1) +#define CCU_DIV_SKIP_ONE_TO_THREE BIT(2) +#define CCU_DIV_LOCK_SHIFTED BIT(3) +#define CCU_DIV_RESET_DOMAIN BIT(4) + +/* + * enum ccu_div_type - CCU Divider types + * @CCU_DIV_VAR: Clocks gate with variable divider. + * @CCU_DIV_GATE: Clocks gate with fixed divider. + * @CCU_DIV_FIXED: Ungateable clock with fixed divider. + */ +enum ccu_div_type { + CCU_DIV_VAR, + CCU_DIV_GATE, + CCU_DIV_FIXED +}; + +/* + * struct ccu_div_init_data - CCU Divider initialization data + * @id: Clocks private identifier. + * @name: Clocks name. + * @parent_name: Parent clocks name in a fw node. + * @base: Divider register base address with respect to the sys_regs base. + * @sys_regs: Baikal-T1 System Controller registers map. + * @np: Pointer to the node describing the CCU Dividers. + * @type: CCU divider type (variable, fixed with and without gate). + * @width: Divider width if it's variable. + * @divider: Divider fixed value. + * @flags: CCU Divider clock flags. + * @features: CCU Divider private features. + */ +struct ccu_div_init_data { + unsigned int id; + const char *name; + const char *parent_name; + unsigned int base; + struct regmap *sys_regs; + struct device_node *np; + enum ccu_div_type type; + union { + unsigned int width; + unsigned int divider; + }; + unsigned long flags; + unsigned long features; +}; + +/* + * struct ccu_div - CCU Divider descriptor + * @hw: clk_hw of the divider. + * @id: Clock private identifier. + * @reg_ctl: Divider control register base address. + * @sys_regs: Baikal-T1 System Controller registers map. + * @lock: Divider state change spin-lock. + * @mask: Divider field mask. + * @divider: Divider fixed value. + * @flags: Divider clock flags. + * @features: CCU Divider private features. + */ +struct ccu_div { + struct clk_hw hw; + unsigned int id; + unsigned int reg_ctl; + struct regmap *sys_regs; + spinlock_t lock; + union { + u32 mask; + unsigned int divider; + }; + unsigned long flags; + unsigned long features; +}; +#define to_ccu_div(_hw) container_of(_hw, struct ccu_div, hw) + +static inline struct clk_hw *ccu_div_get_clk_hw(struct ccu_div *div) +{ + return div ? &div->hw : NULL; +} + +struct ccu_div *ccu_div_hw_register(const struct ccu_div_init_data *init); + +void ccu_div_hw_unregister(struct ccu_div *div); + +int ccu_div_reset_domain(struct ccu_div *div); + +#endif /* __CLK_BT1_CCU_DIV_H__ */ diff --git a/drivers/clk/baikal-t1/clk-ccu-div.c b/drivers/clk/baikal-t1/clk-ccu-div.c new file mode 100644 index 0000000000000..b479156e5e9bc --- /dev/null +++ b/drivers/clk/baikal-t1/clk-ccu-div.c @@ -0,0 +1,485 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2020 BAIKAL ELECTRONICS, JSC + * + * Authors: + * Serge Semin + * Dmitry Dunaev + * + * Baikal-T1 CCU Dividers clock driver + */ + +#define pr_fmt(fmt) "bt1-ccu-div: " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include "ccu-div.h" + +#define CCU_AXI_MAIN_BASE 0x030 +#define CCU_AXI_DDR_BASE 0x034 +#define CCU_AXI_SATA_BASE 0x038 +#define CCU_AXI_GMAC0_BASE 0x03C +#define CCU_AXI_GMAC1_BASE 0x040 +#define CCU_AXI_XGMAC_BASE 0x044 +#define CCU_AXI_PCIE_M_BASE 0x048 +#define CCU_AXI_PCIE_S_BASE 0x04C +#define CCU_AXI_USB_BASE 0x050 +#define CCU_AXI_HWA_BASE 0x054 +#define CCU_AXI_SRAM_BASE 0x058 + +#define CCU_SYS_SATA_REF_BASE 0x060 +#define CCU_SYS_APB_BASE 0x064 +#define CCU_SYS_GMAC0_BASE 0x068 +#define CCU_SYS_GMAC1_BASE 0x06C +#define CCU_SYS_XGMAC_BASE 0x070 +#define CCU_SYS_USB_BASE 0x074 +#define CCU_SYS_PVT_BASE 0x078 +#define CCU_SYS_HWA_BASE 0x07C +#define CCU_SYS_UART_BASE 0x084 +#define CCU_SYS_TIMER0_BASE 0x088 +#define CCU_SYS_TIMER1_BASE 0x08C +#define CCU_SYS_TIMER2_BASE 0x090 +#define CCU_SYS_WDT_BASE 0x150 + +#define CCU_DIV_VAR_INFO(_id, _name, _pname, _base, _width, _flags, _features) \ + { \ + .id = _id, \ + .name = _name, \ + .parent_name = _pname, \ + .base = _base, \ + .type = CCU_DIV_VAR, \ + .width = _width, \ + .flags = _flags, \ + .features = _features \ + } + +#define CCU_DIV_GATE_INFO(_id, _name, _pname, _base, _divider) \ + { \ + .id = _id, \ + .name = _name, \ + .parent_name = _pname, \ + .base = _base, \ + .type = CCU_DIV_GATE, \ + .divider = _divider \ + } + +#define CCU_DIV_FIXED_INFO(_id, _name, _pname, _divider) \ + { \ + .id = _id, \ + .name = _name, \ + .parent_name = _pname, \ + .type = CCU_DIV_FIXED, \ + .divider = _divider \ + } + +#define CCU_DIV_RST_MAP(_rst_id, _clk_id) \ + { \ + .rst_id = _rst_id, \ + .clk_id = _clk_id \ + } + +struct ccu_div_info { + unsigned int id; + const char *name; + const char *parent_name; + unsigned int base; + enum ccu_div_type type; + union { + unsigned int width; + unsigned int divider; + }; + unsigned long flags; + unsigned long features; +}; + +struct ccu_div_rst_map { + unsigned int rst_id; + unsigned int clk_id; +}; + +struct ccu_div_data { + struct device_node *np; + struct regmap *sys_regs; + + unsigned int divs_num; + const struct ccu_div_info *divs_info; + struct ccu_div **divs; + + unsigned int rst_num; + const struct ccu_div_rst_map *rst_map; + struct reset_controller_dev rcdev; +}; +#define to_ccu_div_data(_rcdev) container_of(_rcdev, struct ccu_div_data, rcdev) + +/* + * AXI Main Interconnect (axi_main_clk) and DDR AXI-bus (axi_ddr_clk) clocks + * must be left enabled in any case, since former one is responsible for + * clocking a bus between CPU cores and the rest of the SoC components, while + * the later is clocking the AXI-bus between DDR controller and the Main + * Interconnect. So should any of these clocks get to be disabled, the system + * will literally stop working. That's why we marked them as critical. + */ +static const struct ccu_div_info axi_info[] = { + CCU_DIV_VAR_INFO(CCU_AXI_MAIN_CLK, "axi_main_clk", "pcie_clk", + CCU_AXI_MAIN_BASE, 4, + CLK_IS_CRITICAL, CCU_DIV_RESET_DOMAIN), + CCU_DIV_VAR_INFO(CCU_AXI_DDR_CLK, "axi_ddr_clk", "sata_clk", + CCU_AXI_DDR_BASE, 4, + CLK_IS_CRITICAL | CLK_SET_RATE_GATE, + CCU_DIV_RESET_DOMAIN), + CCU_DIV_VAR_INFO(CCU_AXI_SATA_CLK, "axi_sata_clk", "sata_clk", + CCU_AXI_SATA_BASE, 4, + CLK_SET_RATE_GATE, CCU_DIV_RESET_DOMAIN), + CCU_DIV_VAR_INFO(CCU_AXI_GMAC0_CLK, "axi_gmac0_clk", "eth_clk", + CCU_AXI_GMAC0_BASE, 4, + CLK_SET_RATE_GATE, CCU_DIV_RESET_DOMAIN), + CCU_DIV_VAR_INFO(CCU_AXI_GMAC1_CLK, "axi_gmac1_clk", "eth_clk", + CCU_AXI_GMAC1_BASE, 4, + CLK_SET_RATE_GATE, CCU_DIV_RESET_DOMAIN), + CCU_DIV_VAR_INFO(CCU_AXI_XGMAC_CLK, "axi_xgmac_clk", "eth_clk", + CCU_AXI_XGMAC_BASE, 4, + CLK_SET_RATE_GATE, CCU_DIV_RESET_DOMAIN), + CCU_DIV_VAR_INFO(CCU_AXI_PCIE_M_CLK, "axi_pcie_m_clk", "pcie_clk", + CCU_AXI_PCIE_M_BASE, 4, + CLK_SET_RATE_GATE, CCU_DIV_RESET_DOMAIN), + CCU_DIV_VAR_INFO(CCU_AXI_PCIE_S_CLK, "axi_pcie_s_clk", "pcie_clk", + CCU_AXI_PCIE_S_BASE, 4, + CLK_SET_RATE_GATE, CCU_DIV_RESET_DOMAIN), + CCU_DIV_VAR_INFO(CCU_AXI_USB_CLK, "axi_usb_clk", "sata_clk", + CCU_AXI_USB_BASE, 4, + CLK_SET_RATE_GATE, CCU_DIV_RESET_DOMAIN), + CCU_DIV_VAR_INFO(CCU_AXI_HWA_CLK, "axi_hwa_clk", "sata_clk", + CCU_AXI_HWA_BASE, 4, + CLK_SET_RATE_GATE, CCU_DIV_RESET_DOMAIN), + CCU_DIV_VAR_INFO(CCU_AXI_SRAM_CLK, "axi_sram_clk", "eth_clk", + CCU_AXI_SRAM_BASE, 4, + CLK_SET_RATE_GATE, CCU_DIV_RESET_DOMAIN) +}; + +static const struct ccu_div_rst_map axi_rst_map[] = { + CCU_DIV_RST_MAP(CCU_AXI_MAIN_RST, CCU_AXI_MAIN_CLK), + CCU_DIV_RST_MAP(CCU_AXI_DDR_RST, CCU_AXI_DDR_CLK), + CCU_DIV_RST_MAP(CCU_AXI_SATA_RST, CCU_AXI_SATA_CLK), + CCU_DIV_RST_MAP(CCU_AXI_GMAC0_RST, CCU_AXI_GMAC0_CLK), + CCU_DIV_RST_MAP(CCU_AXI_GMAC1_RST, CCU_AXI_GMAC1_CLK), + CCU_DIV_RST_MAP(CCU_AXI_XGMAC_RST, CCU_AXI_XGMAC_CLK), + CCU_DIV_RST_MAP(CCU_AXI_PCIE_M_RST, CCU_AXI_PCIE_M_CLK), + CCU_DIV_RST_MAP(CCU_AXI_PCIE_S_RST, CCU_AXI_PCIE_S_CLK), + CCU_DIV_RST_MAP(CCU_AXI_USB_RST, CCU_AXI_USB_CLK), + CCU_DIV_RST_MAP(CCU_AXI_HWA_RST, CCU_AXI_HWA_CLK), + CCU_DIV_RST_MAP(CCU_AXI_SRAM_RST, CCU_AXI_SRAM_CLK) +}; + +/* + * APB-bus clock is marked as critical since it's a main communication bus + * for the SoC devices registers IO-operations. + */ +static const struct ccu_div_info sys_info[] = { + CCU_DIV_VAR_INFO(CCU_SYS_SATA_REF_CLK, "sys_sata_ref_clk", + "sata_clk", CCU_SYS_SATA_REF_BASE, 4, + CLK_SET_RATE_GATE, + CCU_DIV_SKIP_ONE | CCU_DIV_LOCK_SHIFTED | + CCU_DIV_RESET_DOMAIN), + CCU_DIV_VAR_INFO(CCU_SYS_APB_CLK, "sys_apb_clk", + "pcie_clk", CCU_SYS_APB_BASE, 5, + CLK_IS_CRITICAL, CCU_DIV_RESET_DOMAIN), + CCU_DIV_GATE_INFO(CCU_SYS_GMAC0_TX_CLK, "sys_gmac0_tx_clk", + "eth_clk", CCU_SYS_GMAC0_BASE, 5), + CCU_DIV_FIXED_INFO(CCU_SYS_GMAC0_PTP_CLK, "sys_gmac0_ptp_clk", + "eth_clk", 10), + CCU_DIV_GATE_INFO(CCU_SYS_GMAC1_TX_CLK, "sys_gmac1_tx_clk", + "eth_clk", CCU_SYS_GMAC1_BASE, 5), + CCU_DIV_FIXED_INFO(CCU_SYS_GMAC1_PTP_CLK, "sys_gmac1_ptp_clk", + "eth_clk", 10), + CCU_DIV_GATE_INFO(CCU_SYS_XGMAC_REF_CLK, "sys_xgmac_ref_clk", + "eth_clk", CCU_SYS_XGMAC_BASE, 8), + CCU_DIV_FIXED_INFO(CCU_SYS_XGMAC_PTP_CLK, "sys_xgmac_ptp_clk", + "eth_clk", 10), + CCU_DIV_GATE_INFO(CCU_SYS_USB_CLK, "sys_usb_clk", + "eth_clk", CCU_SYS_USB_BASE, 10), + CCU_DIV_VAR_INFO(CCU_SYS_PVT_CLK, "sys_pvt_clk", + "ref_clk", CCU_SYS_PVT_BASE, 5, + CLK_SET_RATE_GATE, 0), + CCU_DIV_VAR_INFO(CCU_SYS_HWA_CLK, "sys_hwa_clk", + "sata_clk", CCU_SYS_HWA_BASE, 4, + CLK_SET_RATE_GATE, 0), + CCU_DIV_VAR_INFO(CCU_SYS_UART_CLK, "sys_uart_clk", + "eth_clk", CCU_SYS_UART_BASE, 17, + CLK_SET_RATE_GATE, 0), + CCU_DIV_FIXED_INFO(CCU_SYS_I2C1_CLK, "sys_i2c1_clk", + "eth_clk", 10), + CCU_DIV_FIXED_INFO(CCU_SYS_I2C2_CLK, "sys_i2c2_clk", + "eth_clk", 10), + CCU_DIV_FIXED_INFO(CCU_SYS_GPIO_CLK, "sys_gpio_clk", + "ref_clk", 25), + CCU_DIV_VAR_INFO(CCU_SYS_TIMER0_CLK, "sys_timer0_clk", + "ref_clk", CCU_SYS_TIMER0_BASE, 17, + CLK_SET_RATE_GATE, 0), + CCU_DIV_VAR_INFO(CCU_SYS_TIMER1_CLK, "sys_timer1_clk", + "ref_clk", CCU_SYS_TIMER1_BASE, 17, + CLK_SET_RATE_GATE, 0), + CCU_DIV_VAR_INFO(CCU_SYS_TIMER2_CLK, "sys_timer2_clk", + "ref_clk", CCU_SYS_TIMER2_BASE, 17, + CLK_SET_RATE_GATE, 0), + CCU_DIV_VAR_INFO(CCU_SYS_WDT_CLK, "sys_wdt_clk", + "eth_clk", CCU_SYS_WDT_BASE, 17, + CLK_SET_RATE_GATE, CCU_DIV_SKIP_ONE_TO_THREE) +}; + +static const struct ccu_div_rst_map sys_rst_map[] = { + CCU_DIV_RST_MAP(CCU_SYS_SATA_REF_RST, CCU_SYS_SATA_REF_CLK), + CCU_DIV_RST_MAP(CCU_SYS_APB_RST, CCU_SYS_APB_CLK), +}; + +static struct ccu_div *ccu_div_find_desc(struct ccu_div_data *data, + unsigned int clk_id) +{ + struct ccu_div *div; + int idx; + + for (idx = 0; idx < data->divs_num; ++idx) { + div = data->divs[idx]; + if (div && div->id == clk_id) + return div; + } + + return ERR_PTR(-EINVAL); +} + +static int ccu_div_reset(struct reset_controller_dev *rcdev, + unsigned long rst_id) +{ + struct ccu_div_data *data = to_ccu_div_data(rcdev); + const struct ccu_div_rst_map *map; + struct ccu_div *div; + int idx, ret; + + for (idx = 0, map = data->rst_map; idx < data->rst_num; ++idx, ++map) { + if (map->rst_id == rst_id) + break; + } + if (idx == data->rst_num) { + pr_err("Invalid reset ID %lu specified\n", rst_id); + return -EINVAL; + } + + div = ccu_div_find_desc(data, map->clk_id); + if (IS_ERR(div)) { + pr_err("Invalid clock ID %d in mapping\n", map->clk_id); + return PTR_ERR(div); + } + + ret = ccu_div_reset_domain(div); + if (ret) { + pr_err("Reset isn't supported by divider %s\n", + clk_hw_get_name(ccu_div_get_clk_hw(div))); + } + + return ret; +} + +static const struct reset_control_ops ccu_div_rst_ops = { + .reset = ccu_div_reset, +}; + +static struct ccu_div_data *ccu_div_create_data(struct device_node *np) +{ + struct ccu_div_data *data; + int ret; + + data = kzalloc(sizeof(*data), GFP_KERNEL); + if (!data) + return ERR_PTR(-ENOMEM); + + data->np = np; + if (of_device_is_compatible(np, "baikal,bt1-ccu-axi")) { + data->divs_num = ARRAY_SIZE(axi_info); + data->divs_info = axi_info; + data->rst_num = ARRAY_SIZE(axi_rst_map); + data->rst_map = axi_rst_map; + } else if (of_device_is_compatible(np, "baikal,bt1-ccu-sys")) { + data->divs_num = ARRAY_SIZE(sys_info); + data->divs_info = sys_info; + data->rst_num = ARRAY_SIZE(sys_rst_map); + data->rst_map = sys_rst_map; + } else { + pr_err("Uncompatible DT node '%s' specified\n", + of_node_full_name(np)); + ret = -EINVAL; + goto err_kfree_data; + } + + data->divs = kcalloc(data->divs_num, sizeof(*data->divs), GFP_KERNEL); + if (!data->divs) { + ret = -ENOMEM; + goto err_kfree_data; + } + + return data; + +err_kfree_data: + kfree(data); + + return ERR_PTR(ret); +} + +static void ccu_div_free_data(struct ccu_div_data *data) +{ + kfree(data->divs); + + kfree(data); +} + +static int ccu_div_find_sys_regs(struct ccu_div_data *data) +{ + data->sys_regs = syscon_node_to_regmap(data->np->parent); + if (IS_ERR(data->sys_regs)) { + pr_err("Failed to find syscon regs for '%s'\n", + of_node_full_name(data->np)); + return PTR_ERR(data->sys_regs); + } + + return 0; +} + +static struct clk_hw *ccu_div_of_clk_hw_get(struct of_phandle_args *clkspec, + void *priv) +{ + struct ccu_div_data *data = priv; + struct ccu_div *div; + unsigned int clk_id; + + clk_id = clkspec->args[0]; + div = ccu_div_find_desc(data, clk_id); + if (IS_ERR(div)) { + pr_info("Invalid clock ID %d specified\n", clk_id); + return ERR_CAST(div); + } + + return ccu_div_get_clk_hw(div); +} + +static int ccu_div_clk_register(struct ccu_div_data *data) +{ + int idx, ret; + + for (idx = 0; idx < data->divs_num; ++idx) { + const struct ccu_div_info *info = &data->divs_info[idx]; + struct ccu_div_init_data init = {0}; + + init.id = info->id; + init.name = info->name; + init.parent_name = info->parent_name; + init.np = data->np; + init.type = info->type; + init.flags = info->flags; + init.features = info->features; + + if (init.type == CCU_DIV_VAR) { + init.base = info->base; + init.sys_regs = data->sys_regs; + init.width = info->width; + } else if (init.type == CCU_DIV_GATE) { + init.base = info->base; + init.sys_regs = data->sys_regs; + init.divider = info->divider; + } else { + init.divider = info->divider; + } + + data->divs[idx] = ccu_div_hw_register(&init); + if (IS_ERR(data->divs[idx])) { + ret = PTR_ERR(data->divs[idx]); + pr_err("Couldn't register divider '%s' hw\n", + init.name); + goto err_hw_unregister; + } + } + + ret = of_clk_add_hw_provider(data->np, ccu_div_of_clk_hw_get, data); + if (ret) { + pr_err("Couldn't register dividers '%s' clock provider\n", + of_node_full_name(data->np)); + goto err_hw_unregister; + } + + return 0; + +err_hw_unregister: + for (--idx; idx >= 0; --idx) + ccu_div_hw_unregister(data->divs[idx]); + + return ret; +} + +static void ccu_div_clk_unregister(struct ccu_div_data *data) +{ + int idx; + + of_clk_del_provider(data->np); + + for (idx = 0; idx < data->divs_num; ++idx) + ccu_div_hw_unregister(data->divs[idx]); +} + +static int ccu_div_rst_register(struct ccu_div_data *data) +{ + int ret; + + data->rcdev.ops = &ccu_div_rst_ops; + data->rcdev.of_node = data->np; + data->rcdev.nr_resets = data->rst_num; + + ret = reset_controller_register(&data->rcdev); + if (ret) + pr_err("Couldn't register divider '%s' reset controller\n", + of_node_full_name(data->np)); + + return ret; +} + +static void ccu_div_init(struct device_node *np) +{ + struct ccu_div_data *data; + int ret; + + data = ccu_div_create_data(np); + if (IS_ERR(data)) + return; + + ret = ccu_div_find_sys_regs(data); + if (ret) + goto err_free_data; + + ret = ccu_div_clk_register(data); + if (ret) + goto err_free_data; + + ret = ccu_div_rst_register(data); + if (ret) + goto err_clk_unregister; + + return; + +err_clk_unregister: + ccu_div_clk_unregister(data); + +err_free_data: + ccu_div_free_data(data); +} + +CLK_OF_DECLARE(ccu_axi, "baikal,bt1-ccu-axi", ccu_div_init); +CLK_OF_DECLARE(ccu_sys, "baikal,bt1-ccu-sys", ccu_div_init); -- GitLab From dec18bd8f4f2c600df581c075d59747e73bf6f3f Mon Sep 17 00:00:00 2001 From: Pratyush Yadav Date: Mon, 25 May 2020 14:45:32 +0530 Subject: [PATCH 0865/1971] mtd: spi-nor: sfdp: prepare BFPT parsing for JESD216 rev D JESD216 rev D makes BFPT 20 DWORDs. Update the BFPT size define to reflect that. The check for rev A or later compared the BFPT header length with the maximum BFPT length, BFPT_DWORD_MAX. Since BFPT_DWORD_MAX was 16, and so was the BFPT length for both rev A and B, this check worked fine. But now, since BFPT_DWORD_MAX is 20, it means this check will also stop BFPT parsing for rev A or B, since their length is 16. So, instead check for BFPT_DWORD_MAX_JESD216 to stop BFPT parsing for the first JESD216 version, and check for BFPT_DWORD_MAX_JESD216B for the next two versions. Signed-off-by: Pratyush Yadav Signed-off-by: Tudor Ambarus --- drivers/mtd/spi-nor/sfdp.c | 7 ++++++- drivers/mtd/spi-nor/sfdp.h | 5 +++-- 2 files changed, 9 insertions(+), 3 deletions(-) diff --git a/drivers/mtd/spi-nor/sfdp.c b/drivers/mtd/spi-nor/sfdp.c index 688aa36e863a7..ddb4808a0e9e6 100644 --- a/drivers/mtd/spi-nor/sfdp.c +++ b/drivers/mtd/spi-nor/sfdp.c @@ -549,7 +549,7 @@ static int spi_nor_parse_bfpt(struct spi_nor *nor, SNOR_ERASE_TYPE_MASK; /* Stop here if not JESD216 rev A or later. */ - if (bfpt_header->length < BFPT_DWORD_MAX) + if (bfpt_header->length == BFPT_DWORD_MAX_JESD216) return spi_nor_post_bfpt_fixups(nor, bfpt_header, &bfpt, params); @@ -605,6 +605,11 @@ static int spi_nor_parse_bfpt(struct spi_nor *nor, return -EINVAL; } + /* Stop here if not JESD216 rev C or later. */ + if (bfpt_header->length == BFPT_DWORD_MAX_JESD216B) + return spi_nor_post_bfpt_fixups(nor, bfpt_header, &bfpt, + params); + return spi_nor_post_bfpt_fixups(nor, bfpt_header, &bfpt, params); } diff --git a/drivers/mtd/spi-nor/sfdp.h b/drivers/mtd/spi-nor/sfdp.h index e0a8ded048909..f8198af43a63f 100644 --- a/drivers/mtd/spi-nor/sfdp.h +++ b/drivers/mtd/spi-nor/sfdp.h @@ -10,11 +10,11 @@ /* Basic Flash Parameter Table */ /* - * JESD216 rev B defines a Basic Flash Parameter Table of 16 DWORDs. + * JESD216 rev D defines a Basic Flash Parameter Table of 20 DWORDs. * They are indexed from 1 but C arrays are indexed from 0. */ #define BFPT_DWORD(i) ((i) - 1) -#define BFPT_DWORD_MAX 16 +#define BFPT_DWORD_MAX 20 struct sfdp_bfpt { u32 dwords[BFPT_DWORD_MAX]; @@ -22,6 +22,7 @@ struct sfdp_bfpt { /* The first version of JESD216 defined only 9 DWORDs. */ #define BFPT_DWORD_MAX_JESD216 9 +#define BFPT_DWORD_MAX_JESD216B 16 /* 1st DWORD. */ #define BFPT_DWORD1_FAST_READ_1_1_2 BIT(16) -- GitLab From 2bda748e6ad89b786d337914f03a3ad2adea01fe Mon Sep 17 00:00:00 2001 From: Adam Ford Date: Sat, 4 Apr 2020 11:15:35 -0500 Subject: [PATCH 0866/1971] clk: vc5: Add support for IDT VersaClock 5P49V6965 Update IDT VersaClock 5 driver to support 5P49V6965. Signed-off-by: Adam Ford Link: https://lore.kernel.org/r/20200404161537.2312297-1-aford173@gmail.com Signed-off-by: Stephen Boyd --- drivers/clk/clk-versaclock5.c | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/drivers/clk/clk-versaclock5.c b/drivers/clk/clk-versaclock5.c index 24fef51fbcb54..fa96659f8023d 100644 --- a/drivers/clk/clk-versaclock5.c +++ b/drivers/clk/clk-versaclock5.c @@ -124,6 +124,7 @@ enum vc5_model { IDT_VC5_5P49V5933, IDT_VC5_5P49V5935, IDT_VC6_5P49V6901, + IDT_VC6_5P49V6965, }; /* Structure to describe features of a particular VC5 model */ @@ -683,6 +684,7 @@ static int vc5_map_index_to_output(const enum vc5_model model, case IDT_VC5_5P49V5925: case IDT_VC5_5P49V5935: case IDT_VC6_5P49V6901: + case IDT_VC6_5P49V6965: default: return n; } @@ -956,12 +958,20 @@ static const struct vc5_chip_info idt_5p49v6901_info = { .flags = VC5_HAS_PFD_FREQ_DBL, }; +static const struct vc5_chip_info idt_5p49v6965_info = { + .model = IDT_VC6_5P49V6965, + .clk_fod_cnt = 4, + .clk_out_cnt = 5, + .flags = 0, +}; + static const struct i2c_device_id vc5_id[] = { { "5p49v5923", .driver_data = IDT_VC5_5P49V5923 }, { "5p49v5925", .driver_data = IDT_VC5_5P49V5925 }, { "5p49v5933", .driver_data = IDT_VC5_5P49V5933 }, { "5p49v5935", .driver_data = IDT_VC5_5P49V5935 }, { "5p49v6901", .driver_data = IDT_VC6_5P49V6901 }, + { "5p49v6965", .driver_data = IDT_VC6_5P49V6965 }, { } }; MODULE_DEVICE_TABLE(i2c, vc5_id); @@ -972,6 +982,7 @@ static const struct of_device_id clk_vc5_of_match[] = { { .compatible = "idt,5p49v5933", .data = &idt_5p49v5933_info }, { .compatible = "idt,5p49v5935", .data = &idt_5p49v5935_info }, { .compatible = "idt,5p49v6901", .data = &idt_5p49v6901_info }, + { .compatible = "idt,5p49v6965", .data = &idt_5p49v6965_info }, { }, }; MODULE_DEVICE_TABLE(of, clk_vc5_of_match); -- GitLab From d63ed4ff41bb2d1f0738f801bcd9ff60dfc1eb7f Mon Sep 17 00:00:00 2001 From: Adam Ford Date: Sat, 4 Apr 2020 11:15:36 -0500 Subject: [PATCH 0867/1971] dt: Add bindings for IDT VersaClock 5P49V5925 IDT VersaClock 5 5P49V6965 has 5 clock outputs, 4 fractional dividers. Signed-off-by: Adam Ford Link: https://lkml.kernel.org/r/20200404161537.2312297-2-aford173@gmail.com Acked-by: Rob Herring Signed-off-by: Stephen Boyd --- Documentation/devicetree/bindings/clock/idt,versaclock5.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/Documentation/devicetree/bindings/clock/idt,versaclock5.txt b/Documentation/devicetree/bindings/clock/idt,versaclock5.txt index 05a245c9df08f..bcff681a4bd0b 100644 --- a/Documentation/devicetree/bindings/clock/idt,versaclock5.txt +++ b/Documentation/devicetree/bindings/clock/idt,versaclock5.txt @@ -12,6 +12,7 @@ Required properties: "idt,5p49v5933" "idt,5p49v5935" "idt,5p49v6901" + "idt,5p49v6965" - reg: i2c device address, shall be 0x68 or 0x6a. - #clock-cells: from common clock binding; shall be set to 1. - clocks: from common clock binding; list of parent clock handles, -- GitLab From 0daede80f870025141cbb16e6a826d5e7f43f4a5 Mon Sep 17 00:00:00 2001 From: Serge Semin Date: Thu, 28 May 2020 12:33:18 +0300 Subject: [PATCH 0868/1971] i2c: designware: Convert driver to using regmap API Seeing the DW I2C driver is using flags-based accessors with two conditional clauses it would be better to replace them with the regmap API IO methods and to initialize the regmap object with read/write callbacks specific to the controller registers map implementation. This will be also handy for the drivers with non-standard registers mapping (like an embedded into the Baikal-T1 System Controller DW I2C block, which glue-driver is a part of this series). As before the driver tries to detect the mapping setup at probe stage and creates a regmap object accordingly, which will be used by the rest of the code to correctly access the controller registers. In two places it was appropriate to convert the hand-written read-modify-write and read-poll-loop design patterns to the corresponding regmap API ready-to-use methods. Note the regmap IO methods return value is checked only at the probe stage. The rest of the code won't do this because basically we have MMIO-based regmap so non of the read/write methods can fail (this also won't be needed for the Baikal-T1-specific I2C controller). Suggested-by: Andy Shevchenko Signed-off-by: Serge Semin Tested-by: Jarkko Nikula Acked-by: Jarkko Nikula Reviewed-by: Andy Shevchenko [wsa: fix type of 'rx_valid' and remove outdated kdoc var description] Signed-off-by: Wolfram Sang --- drivers/i2c/busses/Kconfig | 1 + drivers/i2c/busses/i2c-designware-common.c | 177 +++++++++++++++------ drivers/i2c/busses/i2c-designware-core.h | 22 ++- drivers/i2c/busses/i2c-designware-master.c | 127 ++++++++------- drivers/i2c/busses/i2c-designware-slave.c | 77 ++++----- 5 files changed, 248 insertions(+), 156 deletions(-) diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig index 6a6fadfc3f930..c748beaaa8903 100644 --- a/drivers/i2c/busses/Kconfig +++ b/drivers/i2c/busses/Kconfig @@ -526,6 +526,7 @@ config I2C_DAVINCI config I2C_DESIGNWARE_CORE tristate + select REGMAP config I2C_DESIGNWARE_SLAVE bool "Synopsys DesignWare Slave" diff --git a/drivers/i2c/busses/i2c-designware-common.c b/drivers/i2c/busses/i2c-designware-common.c index ed302342f8db1..e3a8640db7da1 100644 --- a/drivers/i2c/busses/i2c-designware-common.c +++ b/drivers/i2c/busses/i2c-designware-common.c @@ -21,6 +21,7 @@ #include #include #include +#include #include #include @@ -57,66 +58,122 @@ static char *abort_sources[] = { "incorrect slave-transmitter mode configuration", }; -u32 dw_readl(struct dw_i2c_dev *dev, int offset) +static int dw_reg_read(void *context, unsigned int reg, unsigned int *val) { - u32 value; + struct dw_i2c_dev *dev = context; - if (dev->flags & ACCESS_16BIT) - value = readw_relaxed(dev->base + offset) | - (readw_relaxed(dev->base + offset + 2) << 16); - else - value = readl_relaxed(dev->base + offset); + *val = readl_relaxed(dev->base + reg); - if (dev->flags & ACCESS_SWAP) - return swab32(value); - else - return value; + return 0; } -void dw_writel(struct dw_i2c_dev *dev, u32 b, int offset) +static int dw_reg_write(void *context, unsigned int reg, unsigned int val) { - if (dev->flags & ACCESS_SWAP) - b = swab32(b); - - if (dev->flags & ACCESS_16BIT) { - writew_relaxed((u16)b, dev->base + offset); - writew_relaxed((u16)(b >> 16), dev->base + offset + 2); - } else { - writel_relaxed(b, dev->base + offset); - } + struct dw_i2c_dev *dev = context; + + writel_relaxed(val, dev->base + reg); + + return 0; +} + +static int dw_reg_read_swab(void *context, unsigned int reg, unsigned int *val) +{ + struct dw_i2c_dev *dev = context; + + *val = swab32(readl_relaxed(dev->base + reg)); + + return 0; +} + +static int dw_reg_write_swab(void *context, unsigned int reg, unsigned int val) +{ + struct dw_i2c_dev *dev = context; + + writel_relaxed(swab32(val), dev->base + reg); + + return 0; +} + +static int dw_reg_read_word(void *context, unsigned int reg, unsigned int *val) +{ + struct dw_i2c_dev *dev = context; + + *val = readw_relaxed(dev->base + reg) | + (readw_relaxed(dev->base + reg + 2) << 16); + + return 0; +} + +static int dw_reg_write_word(void *context, unsigned int reg, unsigned int val) +{ + struct dw_i2c_dev *dev = context; + + writew_relaxed(val, dev->base + reg); + writew_relaxed(val >> 16, dev->base + reg + 2); + + return 0; } /** - * i2c_dw_set_reg_access() - Set register access flags + * i2c_dw_init_regmap() - Initialize registers map * @dev: device private data * - * Autodetects needed register access mode and sets access flags accordingly. - * This must be called before doing any other register access. + * Autodetects needed register access mode and creates the regmap with + * corresponding read/write callbacks. This must be called before doing any + * other register access. */ -int i2c_dw_set_reg_access(struct dw_i2c_dev *dev) +int i2c_dw_init_regmap(struct dw_i2c_dev *dev) { + struct regmap_config map_cfg = { + .reg_bits = 32, + .val_bits = 32, + .reg_stride = 4, + .disable_locking = true, + .reg_read = dw_reg_read, + .reg_write = dw_reg_write, + .max_register = DW_IC_COMP_TYPE, + }; u32 reg; int ret; + /* + * Skip detecting the registers map configuration if the regmap has + * already been provided by a higher code. + */ + if (dev->map) + return 0; + ret = i2c_dw_acquire_lock(dev); if (ret) return ret; - reg = dw_readl(dev, DW_IC_COMP_TYPE); + reg = readl(dev->base + DW_IC_COMP_TYPE); i2c_dw_release_lock(dev); if (reg == swab32(DW_IC_COMP_TYPE_VALUE)) { - /* Configure register endianness access */ - dev->flags |= ACCESS_SWAP; + map_cfg.reg_read = dw_reg_read_swab; + map_cfg.reg_write = dw_reg_write_swab; } else if (reg == (DW_IC_COMP_TYPE_VALUE & 0x0000ffff)) { - /* Configure register access mode 16bit */ - dev->flags |= ACCESS_16BIT; + map_cfg.reg_read = dw_reg_read_word; + map_cfg.reg_write = dw_reg_write_word; } else if (reg != DW_IC_COMP_TYPE_VALUE) { dev_err(dev->dev, "Unknown Synopsys component type: 0x%08x\n", reg); return -ENODEV; } + /* + * Note we'll check the return value of the regmap IO accessors only + * at the probe stage. The rest of the code won't do this because + * basically we have MMIO-based regmap so non of the read/write methods + * can fail. + */ + dev->map = devm_regmap_init(dev->dev, NULL, dev, &map_cfg); + if (IS_ERR(dev->map)) { + dev_err(dev->dev, "Failed to init the registers map\n"); + return PTR_ERR(dev->map); + } + return 0; } @@ -327,11 +384,17 @@ int i2c_dw_set_sda_hold(struct dw_i2c_dev *dev) return ret; /* Configure SDA Hold Time if required */ - reg = dw_readl(dev, DW_IC_COMP_VERSION); + ret = regmap_read(dev->map, DW_IC_COMP_VERSION, ®); + if (ret) + goto err_release_lock; + if (reg >= DW_IC_SDA_HOLD_MIN_VERS) { if (!dev->sda_hold_time) { /* Keep previous hold time setting if no one set it */ - dev->sda_hold_time = dw_readl(dev, DW_IC_SDA_HOLD); + ret = regmap_read(dev->map, DW_IC_SDA_HOLD, + &dev->sda_hold_time); + if (ret) + goto err_release_lock; } /* @@ -355,14 +418,16 @@ int i2c_dw_set_sda_hold(struct dw_i2c_dev *dev) dev->sda_hold_time = 0; } +err_release_lock: i2c_dw_release_lock(dev); - return 0; + return ret; } void __i2c_dw_disable(struct dw_i2c_dev *dev) { int timeout = 100; + u32 status; do { __i2c_dw_disable_nowait(dev); @@ -370,7 +435,8 @@ void __i2c_dw_disable(struct dw_i2c_dev *dev) * The enable status register may be unimplemented, but * in that case this test reads zero and exits the loop. */ - if ((dw_readl(dev, DW_IC_ENABLE_STATUS) & 1) == 0) + regmap_read(dev->map, DW_IC_ENABLE_STATUS, &status); + if ((status & 1) == 0) return; /* @@ -449,22 +515,23 @@ void i2c_dw_release_lock(struct dw_i2c_dev *dev) */ int i2c_dw_wait_bus_not_busy(struct dw_i2c_dev *dev) { - int timeout = TIMEOUT; + u32 status; + int ret; - while (dw_readl(dev, DW_IC_STATUS) & DW_IC_STATUS_ACTIVITY) { - if (timeout <= 0) { - dev_warn(dev->dev, "timeout waiting for bus ready\n"); - i2c_recover_bus(&dev->adapter); + ret = regmap_read_poll_timeout(dev->map, DW_IC_STATUS, status, + !(status & DW_IC_STATUS_ACTIVITY), + 1100, 20000); + if (ret) { + dev_warn(dev->dev, "timeout waiting for bus ready\n"); - if (dw_readl(dev, DW_IC_STATUS) & DW_IC_STATUS_ACTIVITY) - return -ETIMEDOUT; - return 0; - } - timeout--; - usleep_range(1000, 1100); + i2c_recover_bus(&dev->adapter); + + regmap_read(dev->map, DW_IC_STATUS, &status); + if (!(status & DW_IC_STATUS_ACTIVITY)) + ret = 0; } - return 0; + return ret; } int i2c_dw_handle_tx_abort(struct dw_i2c_dev *dev) @@ -490,15 +557,19 @@ int i2c_dw_handle_tx_abort(struct dw_i2c_dev *dev) return -EIO; } -void i2c_dw_set_fifo_size(struct dw_i2c_dev *dev) +int i2c_dw_set_fifo_size(struct dw_i2c_dev *dev) { u32 param, tx_fifo_depth, rx_fifo_depth; + int ret; /* * Try to detect the FIFO depth if not set by interface driver, * the depth could be from 2 to 256 from HW spec. */ - param = dw_readl(dev, DW_IC_COMP_PARAM_1); + ret = regmap_read(dev->map, DW_IC_COMP_PARAM_1, ¶m); + if (ret) + return ret; + tx_fifo_depth = ((param >> 16) & 0xff) + 1; rx_fifo_depth = ((param >> 8) & 0xff) + 1; if (!dev->tx_fifo_depth) { @@ -510,6 +581,8 @@ void i2c_dw_set_fifo_size(struct dw_i2c_dev *dev) dev->rx_fifo_depth = min_t(u32, dev->rx_fifo_depth, rx_fifo_depth); } + + return 0; } u32 i2c_dw_func(struct i2c_adapter *adap) @@ -521,17 +594,19 @@ u32 i2c_dw_func(struct i2c_adapter *adap) void i2c_dw_disable(struct dw_i2c_dev *dev) { + u32 dummy; + /* Disable controller */ __i2c_dw_disable(dev); /* Disable all interrupts */ - dw_writel(dev, 0, DW_IC_INTR_MASK); - dw_readl(dev, DW_IC_CLR_INTR); + regmap_write(dev->map, DW_IC_INTR_MASK, 0); + regmap_read(dev->map, DW_IC_CLR_INTR, &dummy); } void i2c_dw_disable_int(struct dw_i2c_dev *dev) { - dw_writel(dev, 0, DW_IC_INTR_MASK); + regmap_write(dev->map, DW_IC_INTR_MASK, 0); } MODULE_DESCRIPTION("Synopsys DesignWare I2C bus adapter core"); diff --git a/drivers/i2c/busses/i2c-designware-core.h b/drivers/i2c/busses/i2c-designware-core.h index b9ef9b0deef0d..f5bbe3d6bcf8b 100644 --- a/drivers/i2c/busses/i2c-designware-core.h +++ b/drivers/i2c/busses/i2c-designware-core.h @@ -15,6 +15,7 @@ #include #include #include +#include #include #define DW_IC_DEFAULT_FUNCTIONALITY (I2C_FUNC_I2C | \ @@ -126,8 +127,6 @@ #define STATUS_WRITE_IN_PROGRESS 0x1 #define STATUS_READ_IN_PROGRESS 0x2 -#define TIMEOUT 20 /* ms */ - /* * operation modes */ @@ -183,7 +182,9 @@ struct reset_control; /** * struct dw_i2c_dev - private i2c-designware data * @dev: driver model device node + * @map: IO registers map * @base: IO registers pointer + * @ext: Extended IO registers pointer * @cmd_complete: tx completion indicator * @clk: input reference clock * @pclk: clock required to access the registers @@ -233,6 +234,7 @@ struct reset_control; */ struct dw_i2c_dev { struct device *dev; + struct regmap *map; void __iomem *base; void __iomem *ext; struct completion cmd_complete; @@ -284,17 +286,13 @@ struct dw_i2c_dev { bool suspended; }; -#define ACCESS_SWAP 0x00000001 -#define ACCESS_16BIT 0x00000002 -#define ACCESS_INTR_MASK 0x00000004 -#define ACCESS_NO_IRQ_SUSPEND 0x00000008 +#define ACCESS_INTR_MASK 0x00000001 +#define ACCESS_NO_IRQ_SUSPEND 0x00000002 #define MODEL_MSCC_OCELOT 0x00000100 #define MODEL_MASK 0x00000f00 -u32 dw_readl(struct dw_i2c_dev *dev, int offset); -void dw_writel(struct dw_i2c_dev *dev, u32 b, int offset); -int i2c_dw_set_reg_access(struct dw_i2c_dev *dev); +int i2c_dw_init_regmap(struct dw_i2c_dev *dev); u32 i2c_dw_scl_hcnt(u32 ic_clk, u32 tSYMBOL, u32 tf, int cond, int offset); u32 i2c_dw_scl_lcnt(u32 ic_clk, u32 tLOW, u32 tf, int offset); int i2c_dw_set_sda_hold(struct dw_i2c_dev *dev); @@ -304,19 +302,19 @@ int i2c_dw_acquire_lock(struct dw_i2c_dev *dev); void i2c_dw_release_lock(struct dw_i2c_dev *dev); int i2c_dw_wait_bus_not_busy(struct dw_i2c_dev *dev); int i2c_dw_handle_tx_abort(struct dw_i2c_dev *dev); -void i2c_dw_set_fifo_size(struct dw_i2c_dev *dev); +int i2c_dw_set_fifo_size(struct dw_i2c_dev *dev); u32 i2c_dw_func(struct i2c_adapter *adap); void i2c_dw_disable(struct dw_i2c_dev *dev); void i2c_dw_disable_int(struct dw_i2c_dev *dev); static inline void __i2c_dw_enable(struct dw_i2c_dev *dev) { - dw_writel(dev, 1, DW_IC_ENABLE); + regmap_write(dev->map, DW_IC_ENABLE, 1); } static inline void __i2c_dw_disable_nowait(struct dw_i2c_dev *dev) { - dw_writel(dev, 0, DW_IC_ENABLE); + regmap_write(dev->map, DW_IC_ENABLE, 0); } void __i2c_dw_disable(struct dw_i2c_dev *dev); diff --git a/drivers/i2c/busses/i2c-designware-master.c b/drivers/i2c/busses/i2c-designware-master.c index 95eeec53c744f..d6425ad6e6a38 100644 --- a/drivers/i2c/busses/i2c-designware-master.c +++ b/drivers/i2c/busses/i2c-designware-master.c @@ -18,6 +18,7 @@ #include #include #include +#include #include #include "i2c-designware-core.h" @@ -25,11 +26,11 @@ static void i2c_dw_configure_fifo_master(struct dw_i2c_dev *dev) { /* Configure Tx/Rx FIFO threshold levels */ - dw_writel(dev, dev->tx_fifo_depth / 2, DW_IC_TX_TL); - dw_writel(dev, 0, DW_IC_RX_TL); + regmap_write(dev->map, DW_IC_TX_TL, dev->tx_fifo_depth / 2); + regmap_write(dev->map, DW_IC_RX_TL, 0); /* Configure the I2C master */ - dw_writel(dev, dev->master_cfg, DW_IC_CON); + regmap_write(dev->map, DW_IC_CON, dev->master_cfg); } static int i2c_dw_set_timings_master(struct dw_i2c_dev *dev) @@ -44,8 +45,11 @@ static int i2c_dw_set_timings_master(struct dw_i2c_dev *dev) ret = i2c_dw_acquire_lock(dev); if (ret) return ret; - comp_param1 = dw_readl(dev, DW_IC_COMP_PARAM_1); + + ret = regmap_read(dev->map, DW_IC_COMP_PARAM_1, &comp_param1); i2c_dw_release_lock(dev); + if (ret) + return ret; /* Set standard and fast speed dividers for high/low periods */ sda_falling_time = t->sda_fall_ns ?: 300; /* ns */ @@ -187,22 +191,22 @@ static int i2c_dw_init_master(struct dw_i2c_dev *dev) __i2c_dw_disable(dev); /* Write standard speed timing parameters */ - dw_writel(dev, dev->ss_hcnt, DW_IC_SS_SCL_HCNT); - dw_writel(dev, dev->ss_lcnt, DW_IC_SS_SCL_LCNT); + regmap_write(dev->map, DW_IC_SS_SCL_HCNT, dev->ss_hcnt); + regmap_write(dev->map, DW_IC_SS_SCL_LCNT, dev->ss_lcnt); /* Write fast mode/fast mode plus timing parameters */ - dw_writel(dev, dev->fs_hcnt, DW_IC_FS_SCL_HCNT); - dw_writel(dev, dev->fs_lcnt, DW_IC_FS_SCL_LCNT); + regmap_write(dev->map, DW_IC_FS_SCL_HCNT, dev->fs_hcnt); + regmap_write(dev->map, DW_IC_FS_SCL_LCNT, dev->fs_lcnt); /* Write high speed timing parameters if supported */ if (dev->hs_hcnt && dev->hs_lcnt) { - dw_writel(dev, dev->hs_hcnt, DW_IC_HS_SCL_HCNT); - dw_writel(dev, dev->hs_lcnt, DW_IC_HS_SCL_LCNT); + regmap_write(dev->map, DW_IC_HS_SCL_HCNT, dev->hs_hcnt); + regmap_write(dev->map, DW_IC_HS_SCL_LCNT, dev->hs_lcnt); } /* Write SDA hold time if supported */ if (dev->sda_hold_time) - dw_writel(dev, dev->sda_hold_time, DW_IC_SDA_HOLD); + regmap_write(dev->map, DW_IC_SDA_HOLD, dev->sda_hold_time); i2c_dw_configure_fifo_master(dev); i2c_dw_release_lock(dev); @@ -213,15 +217,15 @@ static int i2c_dw_init_master(struct dw_i2c_dev *dev) static void i2c_dw_xfer_init(struct dw_i2c_dev *dev) { struct i2c_msg *msgs = dev->msgs; - u32 ic_con, ic_tar = 0; + u32 ic_con = 0, ic_tar = 0; + u32 dummy; /* Disable the adapter */ __i2c_dw_disable(dev); /* If the slave address is ten bit address, enable 10BITADDR */ - ic_con = dw_readl(dev, DW_IC_CON); if (msgs[dev->msg_write_idx].flags & I2C_M_TEN) { - ic_con |= DW_IC_CON_10BITADDR_MASTER; + ic_con = DW_IC_CON_10BITADDR_MASTER; /* * If I2C_DYNAMIC_TAR_UPDATE is set, the 10-bit addressing * mode has to be enabled via bit 12 of IC_TAR register. @@ -229,17 +233,17 @@ static void i2c_dw_xfer_init(struct dw_i2c_dev *dev) * detected from registers. */ ic_tar = DW_IC_TAR_10BITADDR_MASTER; - } else { - ic_con &= ~DW_IC_CON_10BITADDR_MASTER; } - dw_writel(dev, ic_con, DW_IC_CON); + regmap_update_bits(dev->map, DW_IC_CON, DW_IC_CON_10BITADDR_MASTER, + ic_con); /* * Set the slave (target) address and enable 10-bit addressing mode * if applicable. */ - dw_writel(dev, msgs[dev->msg_write_idx].addr | ic_tar, DW_IC_TAR); + regmap_write(dev->map, DW_IC_TAR, + msgs[dev->msg_write_idx].addr | ic_tar); /* Enforce disabled interrupts (due to HW issues) */ i2c_dw_disable_int(dev); @@ -248,11 +252,11 @@ static void i2c_dw_xfer_init(struct dw_i2c_dev *dev) __i2c_dw_enable(dev); /* Dummy read to avoid the register getting stuck on Bay Trail */ - dw_readl(dev, DW_IC_ENABLE_STATUS); + regmap_read(dev->map, DW_IC_ENABLE_STATUS, &dummy); /* Clear and enable interrupts */ - dw_readl(dev, DW_IC_CLR_INTR); - dw_writel(dev, DW_IC_INTR_MASTER_MASK, DW_IC_INTR_MASK); + regmap_read(dev->map, DW_IC_CLR_INTR, &dummy); + regmap_write(dev->map, DW_IC_INTR_MASK, DW_IC_INTR_MASTER_MASK); } /* @@ -271,6 +275,7 @@ i2c_dw_xfer_msg(struct dw_i2c_dev *dev) u32 buf_len = dev->tx_buf_len; u8 *buf = dev->tx_buf; bool need_restart = false; + unsigned int flr; intr_mask = DW_IC_INTR_MASTER_MASK; @@ -303,8 +308,11 @@ i2c_dw_xfer_msg(struct dw_i2c_dev *dev) need_restart = true; } - tx_limit = dev->tx_fifo_depth - dw_readl(dev, DW_IC_TXFLR); - rx_limit = dev->rx_fifo_depth - dw_readl(dev, DW_IC_RXFLR); + regmap_read(dev->map, DW_IC_TXFLR, &flr); + tx_limit = dev->tx_fifo_depth - flr; + + regmap_read(dev->map, DW_IC_RXFLR, &flr); + rx_limit = dev->rx_fifo_depth - flr; while (buf_len > 0 && tx_limit > 0 && rx_limit > 0) { u32 cmd = 0; @@ -337,11 +345,14 @@ i2c_dw_xfer_msg(struct dw_i2c_dev *dev) if (dev->rx_outstanding >= dev->rx_fifo_depth) break; - dw_writel(dev, cmd | 0x100, DW_IC_DATA_CMD); + regmap_write(dev->map, DW_IC_DATA_CMD, + cmd | 0x100); rx_limit--; dev->rx_outstanding++; - } else - dw_writel(dev, cmd | *buf++, DW_IC_DATA_CMD); + } else { + regmap_write(dev->map, DW_IC_DATA_CMD, + cmd | *buf++); + } tx_limit--; buf_len--; } @@ -371,7 +382,7 @@ i2c_dw_xfer_msg(struct dw_i2c_dev *dev) if (dev->msg_err) intr_mask = 0; - dw_writel(dev, intr_mask, DW_IC_INTR_MASK); + regmap_write(dev->map, DW_IC_INTR_MASK, intr_mask); } static u8 @@ -396,10 +407,10 @@ static void i2c_dw_read(struct dw_i2c_dev *dev) { struct i2c_msg *msgs = dev->msgs; - int rx_valid; + unsigned int rx_valid; for (; dev->msg_read_idx < dev->msgs_num; dev->msg_read_idx++) { - u32 len; + u32 len, tmp; u8 *buf; if (!(msgs[dev->msg_read_idx].flags & I2C_M_RD)) @@ -413,18 +424,18 @@ i2c_dw_read(struct dw_i2c_dev *dev) buf = dev->rx_buf; } - rx_valid = dw_readl(dev, DW_IC_RXFLR); + regmap_read(dev->map, DW_IC_RXFLR, &rx_valid); for (; len > 0 && rx_valid > 0; len--, rx_valid--) { u32 flags = msgs[dev->msg_read_idx].flags; - *buf = dw_readl(dev, DW_IC_DATA_CMD); + regmap_read(dev->map, DW_IC_DATA_CMD, &tmp); /* Ensure length byte is a valid value */ if (flags & I2C_M_RECV_LEN && - *buf <= I2C_SMBUS_BLOCK_MAX && *buf > 0) { - len = i2c_dw_recv_len(dev, *buf); + tmp <= I2C_SMBUS_BLOCK_MAX && tmp > 0) { + len = i2c_dw_recv_len(dev, tmp); } - buf++; + *buf++ = tmp; dev->rx_outstanding--; } @@ -542,7 +553,7 @@ static const struct i2c_adapter_quirks i2c_dw_quirks = { static u32 i2c_dw_read_clear_intrbits(struct dw_i2c_dev *dev) { - u32 stat; + u32 stat, dummy; /* * The IC_INTR_STAT register just indicates "enabled" interrupts. @@ -550,47 +561,47 @@ static u32 i2c_dw_read_clear_intrbits(struct dw_i2c_dev *dev) * in the IC_RAW_INTR_STAT register. * * That is, - * stat = dw_readl(IC_INTR_STAT); + * stat = readl(IC_INTR_STAT); * equals to, - * stat = dw_readl(IC_RAW_INTR_STAT) & dw_readl(IC_INTR_MASK); + * stat = readl(IC_RAW_INTR_STAT) & readl(IC_INTR_MASK); * * The raw version might be useful for debugging purposes. */ - stat = dw_readl(dev, DW_IC_INTR_STAT); + regmap_read(dev->map, DW_IC_INTR_STAT, &stat); /* * Do not use the IC_CLR_INTR register to clear interrupts, or * you'll miss some interrupts, triggered during the period from - * dw_readl(IC_INTR_STAT) to dw_readl(IC_CLR_INTR). + * readl(IC_INTR_STAT) to readl(IC_CLR_INTR). * * Instead, use the separately-prepared IC_CLR_* registers. */ if (stat & DW_IC_INTR_RX_UNDER) - dw_readl(dev, DW_IC_CLR_RX_UNDER); + regmap_read(dev->map, DW_IC_CLR_RX_UNDER, &dummy); if (stat & DW_IC_INTR_RX_OVER) - dw_readl(dev, DW_IC_CLR_RX_OVER); + regmap_read(dev->map, DW_IC_CLR_RX_OVER, &dummy); if (stat & DW_IC_INTR_TX_OVER) - dw_readl(dev, DW_IC_CLR_TX_OVER); + regmap_read(dev->map, DW_IC_CLR_TX_OVER, &dummy); if (stat & DW_IC_INTR_RD_REQ) - dw_readl(dev, DW_IC_CLR_RD_REQ); + regmap_read(dev->map, DW_IC_CLR_RD_REQ, &dummy); if (stat & DW_IC_INTR_TX_ABRT) { /* * The IC_TX_ABRT_SOURCE register is cleared whenever * the IC_CLR_TX_ABRT is read. Preserve it beforehand. */ - dev->abort_source = dw_readl(dev, DW_IC_TX_ABRT_SOURCE); - dw_readl(dev, DW_IC_CLR_TX_ABRT); + regmap_read(dev->map, DW_IC_TX_ABRT_SOURCE, &dev->abort_source); + regmap_read(dev->map, DW_IC_CLR_TX_ABRT, &dummy); } if (stat & DW_IC_INTR_RX_DONE) - dw_readl(dev, DW_IC_CLR_RX_DONE); + regmap_read(dev->map, DW_IC_CLR_RX_DONE, &dummy); if (stat & DW_IC_INTR_ACTIVITY) - dw_readl(dev, DW_IC_CLR_ACTIVITY); + regmap_read(dev->map, DW_IC_CLR_ACTIVITY, &dummy); if (stat & DW_IC_INTR_STOP_DET) - dw_readl(dev, DW_IC_CLR_STOP_DET); + regmap_read(dev->map, DW_IC_CLR_STOP_DET, &dummy); if (stat & DW_IC_INTR_START_DET) - dw_readl(dev, DW_IC_CLR_START_DET); + regmap_read(dev->map, DW_IC_CLR_START_DET, &dummy); if (stat & DW_IC_INTR_GEN_CALL) - dw_readl(dev, DW_IC_CLR_GEN_CALL); + regmap_read(dev->map, DW_IC_CLR_GEN_CALL, &dummy); return stat; } @@ -612,7 +623,7 @@ static int i2c_dw_irq_handler_master(struct dw_i2c_dev *dev) * Anytime TX_ABRT is set, the contents of the tx/rx * buffers are flushed. Make sure to skip them. */ - dw_writel(dev, 0, DW_IC_INTR_MASK); + regmap_write(dev->map, DW_IC_INTR_MASK, 0); goto tx_aborted; } @@ -633,9 +644,9 @@ tx_aborted: complete(&dev->cmd_complete); else if (unlikely(dev->flags & ACCESS_INTR_MASK)) { /* Workaround to trigger pending interrupt */ - stat = dw_readl(dev, DW_IC_INTR_MASK); + regmap_read(dev->map, DW_IC_INTR_MASK, &stat); i2c_dw_disable_int(dev); - dw_writel(dev, stat, DW_IC_INTR_MASK); + regmap_write(dev->map, DW_IC_INTR_MASK, stat); } return 0; @@ -646,8 +657,8 @@ static irqreturn_t i2c_dw_isr(int this_irq, void *dev_id) struct dw_i2c_dev *dev = dev_id; u32 stat, enabled; - enabled = dw_readl(dev, DW_IC_ENABLE); - stat = dw_readl(dev, DW_IC_RAW_INTR_STAT); + regmap_read(dev->map, DW_IC_ENABLE, &enabled); + regmap_read(dev->map, DW_IC_RAW_INTR_STAT, &stat); dev_dbg(dev->dev, "enabled=%#x stat=%#x\n", enabled, stat); if (!enabled || !(stat & ~DW_IC_INTR_ACTIVITY)) return IRQ_NONE; @@ -739,7 +750,7 @@ int i2c_dw_probe_master(struct dw_i2c_dev *dev) dev->disable = i2c_dw_disable; dev->disable_int = i2c_dw_disable_int; - ret = i2c_dw_set_reg_access(dev); + ret = i2c_dw_init_regmap(dev); if (ret) return ret; @@ -747,7 +758,9 @@ int i2c_dw_probe_master(struct dw_i2c_dev *dev) if (ret) return ret; - i2c_dw_set_fifo_size(dev); + ret = i2c_dw_set_fifo_size(dev); + if (ret) + return ret; ret = dev->init(dev); if (ret) diff --git a/drivers/i2c/busses/i2c-designware-slave.c b/drivers/i2c/busses/i2c-designware-slave.c index 576e7af4e94be..44974b53a6268 100644 --- a/drivers/i2c/busses/i2c-designware-slave.c +++ b/drivers/i2c/busses/i2c-designware-slave.c @@ -14,18 +14,19 @@ #include #include #include +#include #include "i2c-designware-core.h" static void i2c_dw_configure_fifo_slave(struct dw_i2c_dev *dev) { /* Configure Tx/Rx FIFO threshold levels. */ - dw_writel(dev, 0, DW_IC_TX_TL); - dw_writel(dev, 0, DW_IC_RX_TL); + regmap_write(dev->map, DW_IC_TX_TL, 0); + regmap_write(dev->map, DW_IC_RX_TL, 0); /* Configure the I2C slave. */ - dw_writel(dev, dev->slave_cfg, DW_IC_CON); - dw_writel(dev, DW_IC_INTR_SLAVE_MASK, DW_IC_INTR_MASK); + regmap_write(dev->map, DW_IC_CON, dev->slave_cfg); + regmap_write(dev->map, DW_IC_INTR_MASK, DW_IC_INTR_SLAVE_MASK); } /** @@ -49,7 +50,7 @@ static int i2c_dw_init_slave(struct dw_i2c_dev *dev) /* Write SDA hold time if supported */ if (dev->sda_hold_time) - dw_writel(dev, dev->sda_hold_time, DW_IC_SDA_HOLD); + regmap_write(dev->map, DW_IC_SDA_HOLD, dev->sda_hold_time); i2c_dw_configure_fifo_slave(dev); i2c_dw_release_lock(dev); @@ -72,7 +73,7 @@ static int i2c_dw_reg_slave(struct i2c_client *slave) * the address to which the DW_apb_i2c responds. */ __i2c_dw_disable_nowait(dev); - dw_writel(dev, slave->addr, DW_IC_SAR); + regmap_write(dev->map, DW_IC_SAR, slave->addr); dev->slave = slave; __i2c_dw_enable(dev); @@ -103,7 +104,7 @@ static int i2c_dw_unreg_slave(struct i2c_client *slave) static u32 i2c_dw_read_clear_intrbits_slave(struct dw_i2c_dev *dev) { - u32 stat; + u32 stat, dummy; /* * The IC_INTR_STAT register just indicates "enabled" interrupts. @@ -111,39 +112,39 @@ static u32 i2c_dw_read_clear_intrbits_slave(struct dw_i2c_dev *dev) * in the IC_RAW_INTR_STAT register. * * That is, - * stat = dw_readl(IC_INTR_STAT); + * stat = readl(IC_INTR_STAT); * equals to, - * stat = dw_readl(IC_RAW_INTR_STAT) & dw_readl(IC_INTR_MASK); + * stat = readl(IC_RAW_INTR_STAT) & readl(IC_INTR_MASK); * * The raw version might be useful for debugging purposes. */ - stat = dw_readl(dev, DW_IC_INTR_STAT); + regmap_read(dev->map, DW_IC_INTR_STAT, &stat); /* * Do not use the IC_CLR_INTR register to clear interrupts, or * you'll miss some interrupts, triggered during the period from - * dw_readl(IC_INTR_STAT) to dw_readl(IC_CLR_INTR). + * readl(IC_INTR_STAT) to readl(IC_CLR_INTR). * * Instead, use the separately-prepared IC_CLR_* registers. */ if (stat & DW_IC_INTR_TX_ABRT) - dw_readl(dev, DW_IC_CLR_TX_ABRT); + regmap_read(dev->map, DW_IC_CLR_TX_ABRT, &dummy); if (stat & DW_IC_INTR_RX_UNDER) - dw_readl(dev, DW_IC_CLR_RX_UNDER); + regmap_read(dev->map, DW_IC_CLR_RX_UNDER, &dummy); if (stat & DW_IC_INTR_RX_OVER) - dw_readl(dev, DW_IC_CLR_RX_OVER); + regmap_read(dev->map, DW_IC_CLR_RX_OVER, &dummy); if (stat & DW_IC_INTR_TX_OVER) - dw_readl(dev, DW_IC_CLR_TX_OVER); + regmap_read(dev->map, DW_IC_CLR_TX_OVER, &dummy); if (stat & DW_IC_INTR_RX_DONE) - dw_readl(dev, DW_IC_CLR_RX_DONE); + regmap_read(dev->map, DW_IC_CLR_RX_DONE, &dummy); if (stat & DW_IC_INTR_ACTIVITY) - dw_readl(dev, DW_IC_CLR_ACTIVITY); + regmap_read(dev->map, DW_IC_CLR_ACTIVITY, &dummy); if (stat & DW_IC_INTR_STOP_DET) - dw_readl(dev, DW_IC_CLR_STOP_DET); + regmap_read(dev->map, DW_IC_CLR_STOP_DET, &dummy); if (stat & DW_IC_INTR_START_DET) - dw_readl(dev, DW_IC_CLR_START_DET); + regmap_read(dev->map, DW_IC_CLR_START_DET, &dummy); if (stat & DW_IC_INTR_GEN_CALL) - dw_readl(dev, DW_IC_CLR_GEN_CALL); + regmap_read(dev->map, DW_IC_CLR_GEN_CALL, &dummy); return stat; } @@ -155,14 +156,14 @@ static u32 i2c_dw_read_clear_intrbits_slave(struct dw_i2c_dev *dev) static int i2c_dw_irq_handler_slave(struct dw_i2c_dev *dev) { - u32 raw_stat, stat, enabled; - u8 val, slave_activity; + u32 raw_stat, stat, enabled, tmp; + u8 val = 0, slave_activity; - stat = dw_readl(dev, DW_IC_INTR_STAT); - enabled = dw_readl(dev, DW_IC_ENABLE); - raw_stat = dw_readl(dev, DW_IC_RAW_INTR_STAT); - slave_activity = ((dw_readl(dev, DW_IC_STATUS) & - DW_IC_STATUS_SLAVE_ACTIVITY) >> 6); + regmap_read(dev->map, DW_IC_INTR_STAT, &stat); + regmap_read(dev->map, DW_IC_ENABLE, &enabled); + regmap_read(dev->map, DW_IC_RAW_INTR_STAT, &raw_stat); + regmap_read(dev->map, DW_IC_STATUS, &tmp); + slave_activity = ((tmp & DW_IC_STATUS_SLAVE_ACTIVITY) >> 6); if (!enabled || !(raw_stat & ~DW_IC_INTR_ACTIVITY) || !dev->slave) return 0; @@ -177,7 +178,8 @@ static int i2c_dw_irq_handler_slave(struct dw_i2c_dev *dev) if (stat & DW_IC_INTR_RD_REQ) { if (slave_activity) { if (stat & DW_IC_INTR_RX_FULL) { - val = dw_readl(dev, DW_IC_DATA_CMD); + regmap_read(dev->map, DW_IC_DATA_CMD, &tmp); + val = tmp; if (!i2c_slave_event(dev->slave, I2C_SLAVE_WRITE_RECEIVED, @@ -185,24 +187,24 @@ static int i2c_dw_irq_handler_slave(struct dw_i2c_dev *dev) dev_vdbg(dev->dev, "Byte %X acked!", val); } - dw_readl(dev, DW_IC_CLR_RD_REQ); + regmap_read(dev->map, DW_IC_CLR_RD_REQ, &tmp); stat = i2c_dw_read_clear_intrbits_slave(dev); } else { - dw_readl(dev, DW_IC_CLR_RD_REQ); - dw_readl(dev, DW_IC_CLR_RX_UNDER); + regmap_read(dev->map, DW_IC_CLR_RD_REQ, &tmp); + regmap_read(dev->map, DW_IC_CLR_RX_UNDER, &tmp); stat = i2c_dw_read_clear_intrbits_slave(dev); } if (!i2c_slave_event(dev->slave, I2C_SLAVE_READ_REQUESTED, &val)) - dw_writel(dev, val, DW_IC_DATA_CMD); + regmap_write(dev->map, DW_IC_DATA_CMD, val); } } if (stat & DW_IC_INTR_RX_DONE) { if (!i2c_slave_event(dev->slave, I2C_SLAVE_READ_PROCESSED, &val)) - dw_readl(dev, DW_IC_CLR_RX_DONE); + regmap_read(dev->map, DW_IC_CLR_RX_DONE, &tmp); i2c_slave_event(dev->slave, I2C_SLAVE_STOP, &val); stat = i2c_dw_read_clear_intrbits_slave(dev); @@ -210,7 +212,8 @@ static int i2c_dw_irq_handler_slave(struct dw_i2c_dev *dev) } if (stat & DW_IC_INTR_RX_FULL) { - val = dw_readl(dev, DW_IC_DATA_CMD); + regmap_read(dev->map, DW_IC_DATA_CMD, &tmp); + val = tmp; if (!i2c_slave_event(dev->slave, I2C_SLAVE_WRITE_RECEIVED, &val)) dev_vdbg(dev->dev, "Byte %X acked!", val); @@ -263,7 +266,7 @@ int i2c_dw_probe_slave(struct dw_i2c_dev *dev) dev->disable = i2c_dw_disable; dev->disable_int = i2c_dw_disable_int; - ret = i2c_dw_set_reg_access(dev); + ret = i2c_dw_init_regmap(dev); if (ret) return ret; @@ -271,7 +274,9 @@ int i2c_dw_probe_slave(struct dw_i2c_dev *dev) if (ret) return ret; - i2c_dw_set_fifo_size(dev); + ret = i2c_dw_set_fifo_size(dev); + if (ret) + return ret; ret = dev->init(dev); if (ret) -- GitLab From fac25d7aaa03c4b5e11c7b7c1e286f85d21e008a Mon Sep 17 00:00:00 2001 From: Serge Semin Date: Thu, 28 May 2020 12:33:19 +0300 Subject: [PATCH 0869/1971] i2c: designware: Retrieve quirk flags as early as possible Some platforms might need to activate the driver quirks at a very early probe stage. For instance, Baikal-T1 System I2C doesn't need to map the registers space as ones belong to the system controller. Instead it will request the syscon regmap from the parental DT node. In order to be able to do so let's retrieve the model flags right after the DW I2C private data is created. While at it replace the or-assignment with just assignment operator since or-ing is redundant at this stage. Signed-off-by: Serge Semin Reviewed-by: Andy Shevchenko Signed-off-by: Wolfram Sang --- drivers/i2c/busses/i2c-designware-platdrv.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/i2c/busses/i2c-designware-platdrv.c b/drivers/i2c/busses/i2c-designware-platdrv.c index ca057aa9eac4c..38657d821c727 100644 --- a/drivers/i2c/busses/i2c-designware-platdrv.c +++ b/drivers/i2c/busses/i2c-designware-platdrv.c @@ -124,6 +124,8 @@ static int dw_i2c_plat_probe(struct platform_device *pdev) if (!dev) return -ENOMEM; + dev->flags = (uintptr_t)device_get_match_data(&pdev->dev); + dev->base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(dev->base)) return PTR_ERR(dev->base); @@ -146,8 +148,6 @@ static int dw_i2c_plat_probe(struct platform_device *pdev) i2c_dw_acpi_adjust_bus_speed(&pdev->dev); - dev->flags |= (uintptr_t)device_get_match_data(&pdev->dev); - if (pdev->dev.of_node) dw_i2c_of_configure(pdev); -- GitLab From b7c3d0777808cd7b5cee6078bd0ef51e82bd3db9 Mon Sep 17 00:00:00 2001 From: Serge Semin Date: Thu, 28 May 2020 12:33:20 +0300 Subject: [PATCH 0870/1971] i2c: designware: Move reg-space remapping into a dedicated function This is a preparation patch before adding a quirk with custom registers map creation required for the Baikal-T1 System I2C support. Signed-off-by: Serge Semin Reviewed-by: Andy Shevchenko Signed-off-by: Wolfram Sang --- drivers/i2c/busses/i2c-designware-platdrv.c | 18 +++++++++++++----- 1 file changed, 13 insertions(+), 5 deletions(-) diff --git a/drivers/i2c/busses/i2c-designware-platdrv.c b/drivers/i2c/busses/i2c-designware-platdrv.c index 38657d821c727..9d467fa0e1634 100644 --- a/drivers/i2c/busses/i2c-designware-platdrv.c +++ b/drivers/i2c/busses/i2c-designware-platdrv.c @@ -108,6 +108,15 @@ static void dw_i2c_plat_pm_cleanup(struct dw_i2c_dev *dev) pm_runtime_put_noidle(dev->dev); } +static int dw_i2c_plat_request_regs(struct dw_i2c_dev *dev) +{ + struct platform_device *pdev = to_platform_device(dev->dev); + + dev->base = devm_platform_ioremap_resource(pdev, 0); + + return PTR_ERR_OR_ZERO(dev->base); +} + static int dw_i2c_plat_probe(struct platform_device *pdev) { struct dw_i2c_platform_data *pdata = dev_get_platdata(&pdev->dev); @@ -125,15 +134,14 @@ static int dw_i2c_plat_probe(struct platform_device *pdev) return -ENOMEM; dev->flags = (uintptr_t)device_get_match_data(&pdev->dev); - - dev->base = devm_platform_ioremap_resource(pdev, 0); - if (IS_ERR(dev->base)) - return PTR_ERR(dev->base); - dev->dev = &pdev->dev; dev->irq = irq; platform_set_drvdata(pdev, dev); + ret = dw_i2c_plat_request_regs(dev); + if (ret) + return ret; + dev->rst = devm_reset_control_get_optional_exclusive(&pdev->dev, NULL); if (IS_ERR(dev->rst)) return PTR_ERR(dev->rst); -- GitLab From fcb82a939df86018641f38124cb9a7811a5f8505 Mon Sep 17 00:00:00 2001 From: Serge Semin Date: Thu, 28 May 2020 12:33:21 +0300 Subject: [PATCH 0871/1971] i2c: designware: Add Baikal-T1 System I2C support Baikal-T1 System Controller is equipped with a dedicated I2C Controller which functionality is based on the DW APB I2C IP-core, the only difference in a way it' registers are accessed. There are three access register provided in the System Controller registers map, which indirectly address the normal DW APB I2C registers space. So in order to have the Baikal-T1 System I2C Controller supported by the common DW APB I2C driver we created a dedicated Dw I2C controller model quirk, which retrieves the syscon regmap from the parental dt node and creates a new regmap based on it. Signed-off-by: Serge Semin Reviewed-by: Andy Shevchenko Signed-off-by: Wolfram Sang --- drivers/i2c/busses/Kconfig | 3 +- drivers/i2c/busses/i2c-designware-core.h | 3 + drivers/i2c/busses/i2c-designware-platdrv.c | 78 ++++++++++++++++++++- 3 files changed, 81 insertions(+), 3 deletions(-) diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig index c748beaaa8903..14e2e93a5698c 100644 --- a/drivers/i2c/busses/Kconfig +++ b/drivers/i2c/busses/Kconfig @@ -541,8 +541,9 @@ config I2C_DESIGNWARE_SLAVE config I2C_DESIGNWARE_PLATFORM tristate "Synopsys DesignWare Platform" - select I2C_DESIGNWARE_CORE depends on (ACPI && COMMON_CLK) || !ACPI + select I2C_DESIGNWARE_CORE + select MFD_SYSCON if MIPS_BAIKAL_T1 help If you say yes to this option, support will be included for the Synopsys DesignWare I2C adapter. diff --git a/drivers/i2c/busses/i2c-designware-core.h b/drivers/i2c/busses/i2c-designware-core.h index f5bbe3d6bcf8b..556673a1f61bd 100644 --- a/drivers/i2c/busses/i2c-designware-core.h +++ b/drivers/i2c/busses/i2c-designware-core.h @@ -183,6 +183,7 @@ struct reset_control; * struct dw_i2c_dev - private i2c-designware data * @dev: driver model device node * @map: IO registers map + * @sysmap: System controller registers map * @base: IO registers pointer * @ext: Extended IO registers pointer * @cmd_complete: tx completion indicator @@ -235,6 +236,7 @@ struct reset_control; struct dw_i2c_dev { struct device *dev; struct regmap *map; + struct regmap *sysmap; void __iomem *base; void __iomem *ext; struct completion cmd_complete; @@ -290,6 +292,7 @@ struct dw_i2c_dev { #define ACCESS_NO_IRQ_SUSPEND 0x00000002 #define MODEL_MSCC_OCELOT 0x00000100 +#define MODEL_BAIKAL_BT1 0x00000200 #define MODEL_MASK 0x00000f00 int i2c_dw_init_regmap(struct dw_i2c_dev *dev); diff --git a/drivers/i2c/busses/i2c-designware-platdrv.c b/drivers/i2c/busses/i2c-designware-platdrv.c index 9d467fa0e1634..b55c730f28cf8 100644 --- a/drivers/i2c/busses/i2c-designware-platdrv.c +++ b/drivers/i2c/busses/i2c-designware-platdrv.c @@ -18,6 +18,7 @@ #include #include #include +#include #include #include #include @@ -25,6 +26,7 @@ #include #include #include +#include #include #include #include @@ -58,6 +60,63 @@ MODULE_DEVICE_TABLE(acpi, dw_i2c_acpi_match); #endif #ifdef CONFIG_OF +#define BT1_I2C_CTL 0x100 +#define BT1_I2C_CTL_ADDR_MASK GENMASK(7, 0) +#define BT1_I2C_CTL_WR BIT(8) +#define BT1_I2C_CTL_GO BIT(31) +#define BT1_I2C_DI 0x104 +#define BT1_I2C_DO 0x108 + +static int bt1_i2c_read(void *context, unsigned int reg, unsigned int *val) +{ + struct dw_i2c_dev *dev = context; + int ret; + + /* + * Note these methods shouldn't ever fail because the system controller + * registers are memory mapped. We check the return value just in case. + */ + ret = regmap_write(dev->sysmap, BT1_I2C_CTL, + BT1_I2C_CTL_GO | (reg & BT1_I2C_CTL_ADDR_MASK)); + if (ret) + return ret; + + return regmap_read(dev->sysmap, BT1_I2C_DO, val); +} + +static int bt1_i2c_write(void *context, unsigned int reg, unsigned int val) +{ + struct dw_i2c_dev *dev = context; + int ret; + + ret = regmap_write(dev->sysmap, BT1_I2C_DI, val); + if (ret) + return ret; + + return regmap_write(dev->sysmap, BT1_I2C_CTL, + BT1_I2C_CTL_GO | BT1_I2C_CTL_WR | (reg & BT1_I2C_CTL_ADDR_MASK)); +} + +static struct regmap_config bt1_i2c_cfg = { + .reg_bits = 32, + .val_bits = 32, + .reg_stride = 4, + .fast_io = true, + .reg_read = bt1_i2c_read, + .reg_write = bt1_i2c_write, + .max_register = DW_IC_COMP_TYPE, +}; + +static int bt1_i2c_request_regs(struct dw_i2c_dev *dev) +{ + dev->sysmap = syscon_node_to_regmap(dev->dev->of_node->parent); + if (IS_ERR(dev->sysmap)) + return PTR_ERR(dev->sysmap); + + dev->map = devm_regmap_init(dev->dev, NULL, dev, &bt1_i2c_cfg); + return PTR_ERR_OR_ZERO(dev->map); +} + #define MSCC_ICPU_CFG_TWI_DELAY 0x0 #define MSCC_ICPU_CFG_TWI_DELAY_ENABLE BIT(0) #define MSCC_ICPU_CFG_TWI_SPIKE_FILTER 0x4 @@ -90,10 +149,16 @@ static int dw_i2c_of_configure(struct platform_device *pdev) static const struct of_device_id dw_i2c_of_match[] = { { .compatible = "snps,designware-i2c", }, { .compatible = "mscc,ocelot-i2c", .data = (void *)MODEL_MSCC_OCELOT }, + { .compatible = "baikal,bt1-sys-i2c", .data = (void *)MODEL_BAIKAL_BT1 }, {}, }; MODULE_DEVICE_TABLE(of, dw_i2c_of_match); #else +static int bt1_i2c_request_regs(struct dw_i2c_dev *dev) +{ + return -ENODEV; +} + static inline int dw_i2c_of_configure(struct platform_device *pdev) { return -ENODEV; @@ -111,10 +176,19 @@ static void dw_i2c_plat_pm_cleanup(struct dw_i2c_dev *dev) static int dw_i2c_plat_request_regs(struct dw_i2c_dev *dev) { struct platform_device *pdev = to_platform_device(dev->dev); + int ret; - dev->base = devm_platform_ioremap_resource(pdev, 0); + switch (dev->flags & MODEL_MASK) { + case MODEL_BAIKAL_BT1: + ret = bt1_i2c_request_regs(dev); + break; + default: + dev->base = devm_platform_ioremap_resource(pdev, 0); + ret = PTR_ERR_OR_ZERO(dev->base); + break; + } - return PTR_ERR_OR_ZERO(dev->base); + return ret; } static int dw_i2c_plat_probe(struct platform_device *pdev) -- GitLab From 00d9990acb23c4319a1dc961d4e29a9213923b67 Mon Sep 17 00:00:00 2001 From: Jason Yan Date: Fri, 3 Apr 2020 11:52:08 +0800 Subject: [PATCH 0872/1971] mailbox: pcc: make pcc_mbox_driver static Fix the following sparse warning: drivers/mailbox/pcc.c:571:24: warning: symbol 'pcc_mbox_driver' was not declared. Should it be static? Reported-by: Hulk Robot Signed-off-by: Jason Yan Signed-off-by: Jassi Brar --- drivers/mailbox/pcc.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/mailbox/pcc.c b/drivers/mailbox/pcc.c index 34844b7a36752..8c7fac38bb1cb 100644 --- a/drivers/mailbox/pcc.c +++ b/drivers/mailbox/pcc.c @@ -568,7 +568,7 @@ static int pcc_mbox_probe(struct platform_device *pdev) return ret; } -struct platform_driver pcc_mbox_driver = { +static struct platform_driver pcc_mbox_driver = { .probe = pcc_mbox_probe, .driver = { .name = "PCCT", -- GitLab From 676f23eab75adbf72c029bdfb7b5a7b2f1129177 Mon Sep 17 00:00:00 2001 From: Anson Huang Date: Mon, 13 Apr 2020 20:25:30 +0800 Subject: [PATCH 0873/1971] mailbox: imx: Support runtime PM Some power hungry sub-systems like VPU has its own MUs which also use mailbox driver, current mailbox driver uses platform driver model and MU's power will be ON after driver probed and left ON there, it may cause the whole sub-system can NOT enter lower power mode, take VPU driver for example, it has runtime PM support, but due to its MU always ON, the VPU sub-system will be always ON and consume many power during kernel idle. To save power in kernel idle, mailbox driver needs to support runtime PM in order to power off MU when it is unused. However, the runtime suspend/resume can ONLY be implemented in mailbox's .shutdown/.startup callback, so its consumer needs to call mbox_request_channel()/mbox_free_channel() in consumer driver's runtime PM callback, then the MU's power will be ON/OFF along with consumer's runtime PM status. Signed-off-by: Anson Huang Signed-off-by: Jassi Brar --- drivers/mailbox/imx-mailbox.c | 27 ++++++++++++++++++++++++++- 1 file changed, 26 insertions(+), 1 deletion(-) diff --git a/drivers/mailbox/imx-mailbox.c b/drivers/mailbox/imx-mailbox.c index 7906624a731c1..97bf0acf51d8c 100644 --- a/drivers/mailbox/imx-mailbox.c +++ b/drivers/mailbox/imx-mailbox.c @@ -12,6 +12,7 @@ #include #include #include +#include #include #define IMX_MU_xSR_GIPn(x) BIT(28 + (3 - (x))) @@ -287,6 +288,7 @@ static int imx_mu_startup(struct mbox_chan *chan) struct imx_mu_con_priv *cp = chan->con_priv; int ret; + pm_runtime_get_sync(priv->dev); if (cp->type == IMX_MU_TYPE_TXDB) { /* Tx doorbell don't have ACK support */ tasklet_init(&cp->txdb_tasklet, imx_mu_txdb_tasklet, @@ -323,6 +325,7 @@ static void imx_mu_shutdown(struct mbox_chan *chan) if (cp->type == IMX_MU_TYPE_TXDB) { tasklet_kill(&cp->txdb_tasklet); + pm_runtime_put_sync(priv->dev); return; } @@ -341,6 +344,7 @@ static void imx_mu_shutdown(struct mbox_chan *chan) } free_irq(priv->irq, chan); + pm_runtime_put_sync(priv->dev); } static const struct mbox_chan_ops imx_mu_ops = { @@ -508,7 +512,27 @@ static int imx_mu_probe(struct platform_device *pdev) platform_set_drvdata(pdev, priv); - return devm_mbox_controller_register(dev, &priv->mbox); + ret = devm_mbox_controller_register(dev, &priv->mbox); + if (ret) + return ret; + + pm_runtime_enable(dev); + + ret = pm_runtime_get_sync(dev); + if (ret < 0) { + pm_runtime_put_noidle(dev); + goto disable_runtime_pm; + } + + ret = pm_runtime_put_sync(dev); + if (ret < 0) + goto disable_runtime_pm; + + return 0; + +disable_runtime_pm: + pm_runtime_disable(dev); + return ret; } static int imx_mu_remove(struct platform_device *pdev) @@ -516,6 +540,7 @@ static int imx_mu_remove(struct platform_device *pdev) struct imx_mu_priv *priv = platform_get_drvdata(pdev); clk_disable_unprepare(priv->clk); + pm_runtime_disable(priv->dev); return 0; } -- GitLab From 1b3a347b7d56aa637157da1b7df225071af1421f Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Tue, 7 Apr 2020 12:27:53 +0300 Subject: [PATCH 0874/1971] mailbox: imx: Fix return in imx_mu_scu_xlate() This called from mbox_request_channel(). The caller is expecting error pointers and not NULL so this "return NULL;" will lead to an Oops. Fixes: 0a67003b1985 ("mailbox: imx: add SCU MU support") Signed-off-by: Dan Carpenter Signed-off-by: Jassi Brar --- drivers/mailbox/imx-mailbox.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/mailbox/imx-mailbox.c b/drivers/mailbox/imx-mailbox.c index 97bf0acf51d8c..cacc60662f24c 100644 --- a/drivers/mailbox/imx-mailbox.c +++ b/drivers/mailbox/imx-mailbox.c @@ -378,7 +378,7 @@ static struct mbox_chan *imx_mu_scu_xlate(struct mbox_controller *mbox, break; default: dev_err(mbox->dev, "Invalid chan type: %d\n", type); - return NULL; + return ERR_PTR(-EINVAL); } if (chan >= mbox->num_chans) { -- GitLab From fad5972a1eca71c797e2e209ec403ade9bf69384 Mon Sep 17 00:00:00 2001 From: Wolfram Sang Date: Wed, 27 May 2020 13:30:39 +0200 Subject: [PATCH 0875/1971] i2c: add 'single-master' property to generic bindings It is useful to know if we are the only master on a given bus. Because this is a HW description of the bus, add it to the generic bindings. Signed-off-by: Wolfram Sang Reviewed-by: Rob Herring Signed-off-by: Wolfram Sang --- Documentation/devicetree/bindings/i2c/i2c.txt | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/Documentation/devicetree/bindings/i2c/i2c.txt b/Documentation/devicetree/bindings/i2c/i2c.txt index 819436b48faef..438ae123107e7 100644 --- a/Documentation/devicetree/bindings/i2c/i2c.txt +++ b/Documentation/devicetree/bindings/i2c/i2c.txt @@ -70,7 +70,12 @@ wants to support one of the below features, it should adapt these bindings. - multi-master states that there is another master active on this bus. The OS can use this information to adapt power management to keep the arbitration awake - all the time, for example. + all the time, for example. Can not be combined with 'single-master'. + +- single-master + states that there is no other master active on this bus. The OS can use + this information to detect a stalled bus more reliably, for example. + Can not be combined with 'multi-master'. Required properties (per child device) -------------------------------------- -- GitLab From 47303f9438954bc7bca593310ac8685d11297725 Mon Sep 17 00:00:00 2001 From: Fabio Estevam Date: Sat, 30 May 2020 17:56:28 -0500 Subject: [PATCH 0876/1971] mailbox: imx: Disable the clock on devm_mbox_controller_register() failure devm_mbox_controller_register() may fail, and in the case of failure the priv->clk clock that was previously enabled, should be disabled. Fixes: 2bb7005696e2 ("mailbox: Add support for i.MX messaging unit") Signed-off-by: Fabio Estevam Reviewed-by: Peng Fan Acked-by: Oleksij Rempel [Jassi: fixed merge/am conflict] Signed-off-by: Jassi Brar --- drivers/mailbox/imx-mailbox.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/drivers/mailbox/imx-mailbox.c b/drivers/mailbox/imx-mailbox.c index cacc60662f24c..19f8d79cebac0 100644 --- a/drivers/mailbox/imx-mailbox.c +++ b/drivers/mailbox/imx-mailbox.c @@ -513,8 +513,10 @@ static int imx_mu_probe(struct platform_device *pdev) platform_set_drvdata(pdev, priv); ret = devm_mbox_controller_register(dev, &priv->mbox); - if (ret) + if (ret) { + clk_disable_unprepare(priv->clk); return ret; + } pm_runtime_enable(dev); -- GitLab From ec32481b1669ad45480ae3b439e3dcde69bfaa68 Mon Sep 17 00:00:00 2001 From: Markus Elfring Date: Sun, 5 Apr 2020 15:10:16 +0200 Subject: [PATCH 0877/1971] mailbox: ZynqMP IPI: Delete an error message in zynqmp_ipi_probe() The function platform_get_irq can log an error already. Thus omit a redundant message for the exception handling in the calling function. This issue was detected by using the Coccinelle software. Signed-off-by: Markus Elfring Signed-off-by: Jassi Brar --- drivers/mailbox/zynqmp-ipi-mailbox.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/drivers/mailbox/zynqmp-ipi-mailbox.c b/drivers/mailbox/zynqmp-ipi-mailbox.c index 86887c9a349a0..a4d176f160b83 100644 --- a/drivers/mailbox/zynqmp-ipi-mailbox.c +++ b/drivers/mailbox/zynqmp-ipi-mailbox.c @@ -668,10 +668,9 @@ static int zynqmp_ipi_probe(struct platform_device *pdev) /* IPI IRQ */ ret = platform_get_irq(pdev, 0); - if (ret < 0) { - dev_err(dev, "unable to find IPI IRQ.\n"); + if (ret < 0) goto free_mbox_dev; - } + pdata->irq = ret; ret = devm_request_irq(dev, pdata->irq, zynqmp_ipi_interrupt, IRQF_SHARED, dev_name(dev), pdata); -- GitLab From fa75386538e5284a2e3ec2afe0c15c0532fb54d2 Mon Sep 17 00:00:00 2001 From: Baolin Wang Date: Fri, 22 May 2020 21:31:08 +0800 Subject: [PATCH 0878/1971] dt-bindings: mailbox: Add the Spreadtrum mailbox documentation Add the Spreadtrum mailbox documentation. Reviewed-by: Rob Herring Signed-off-by: Baolin Wang Signed-off-by: Jassi Brar --- .../bindings/mailbox/sprd-mailbox.yaml | 60 +++++++++++++++++++ 1 file changed, 60 insertions(+) create mode 100644 Documentation/devicetree/bindings/mailbox/sprd-mailbox.yaml diff --git a/Documentation/devicetree/bindings/mailbox/sprd-mailbox.yaml b/Documentation/devicetree/bindings/mailbox/sprd-mailbox.yaml new file mode 100644 index 0000000000000..0f7451b42d7e7 --- /dev/null +++ b/Documentation/devicetree/bindings/mailbox/sprd-mailbox.yaml @@ -0,0 +1,60 @@ +# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause) +%YAML 1.2 +--- +$id: "http://devicetree.org/schemas/mailbox/sprd-mailbox.yaml#" +$schema: "http://devicetree.org/meta-schemas/core.yaml#" + +title: Spreadtrum mailbox controller bindings + +maintainers: + - Orson Zhai + - Baolin Wang + - Chunyan Zhang + +properties: + compatible: + enum: + - sprd,sc9860-mailbox + + reg: + items: + - description: inbox registers' base address + - description: outbox registers' base address + + interrupts: + items: + - description: inbox interrupt + - description: outbox interrupt + + clocks: + maxItems: 1 + + clock-names: + items: + - const: enable + + "#mbox-cells": + const: 1 + +required: + - compatible + - reg + - interrupts + - "#mbox-cells" + - clocks + - clock-names + +additionalProperties: false + +examples: + - | + #include + mailbox: mailbox@400a0000 { + compatible = "sprd,sc9860-mailbox"; + reg = <0 0x400a0000 0 0x8000>, <0 0x400a8000 0 0x8000>; + #mbox-cells = <1>; + clock-names = "enable"; + clocks = <&aon_gate 53>; + interrupts = , ; + }; +... -- GitLab From ca27fc26cd2219d964b4fc0efac634ab237b6c8e Mon Sep 17 00:00:00 2001 From: Baolin Wang Date: Fri, 22 May 2020 21:31:09 +0800 Subject: [PATCH 0879/1971] mailbox: sprd: Add Spreadtrum mailbox driver The Spreadtrum mailbox controller supports 8 channels to communicate with MCUs, and it contains 2 different parts: inbox and outbox, which are used to send and receive messages by IRQ mode. Signed-off-by: Baolin Wang Signed-off-by: Jassi Brar --- drivers/mailbox/Kconfig | 8 + drivers/mailbox/Makefile | 2 + drivers/mailbox/sprd-mailbox.c | 361 +++++++++++++++++++++++++++++++++ 3 files changed, 371 insertions(+) create mode 100644 drivers/mailbox/sprd-mailbox.c diff --git a/drivers/mailbox/Kconfig b/drivers/mailbox/Kconfig index 5a577a6734cf4..e03f3fb5caed1 100644 --- a/drivers/mailbox/Kconfig +++ b/drivers/mailbox/Kconfig @@ -236,4 +236,12 @@ config SUN6I_MSGBOX various Allwinner SoCs. This mailbox is used for communication between the application CPUs and the power management coprocessor. +config SPRD_MBOX + tristate "Spreadtrum Mailbox" + depends on ARCH_SPRD || COMPILE_TEST + help + Mailbox driver implementation for the Spreadtrum platform. It is used + to send message between application processors and MCU. Say Y here if + you want to build the Spreatrum mailbox controller driver. + endif diff --git a/drivers/mailbox/Makefile b/drivers/mailbox/Makefile index 2e4364ef5c473..9caf4ede6ce0c 100644 --- a/drivers/mailbox/Makefile +++ b/drivers/mailbox/Makefile @@ -50,3 +50,5 @@ obj-$(CONFIG_MTK_CMDQ_MBOX) += mtk-cmdq-mailbox.o obj-$(CONFIG_ZYNQMP_IPI_MBOX) += zynqmp-ipi-mailbox.o obj-$(CONFIG_SUN6I_MSGBOX) += sun6i-msgbox.o + +obj-$(CONFIG_SPRD_MBOX) += sprd-mailbox.o diff --git a/drivers/mailbox/sprd-mailbox.c b/drivers/mailbox/sprd-mailbox.c new file mode 100644 index 0000000000000..f6fab24ae8a9a --- /dev/null +++ b/drivers/mailbox/sprd-mailbox.c @@ -0,0 +1,361 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Spreadtrum mailbox driver + * + * Copyright (c) 2020 Spreadtrum Communications Inc. + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#define SPRD_MBOX_ID 0x0 +#define SPRD_MBOX_MSG_LOW 0x4 +#define SPRD_MBOX_MSG_HIGH 0x8 +#define SPRD_MBOX_TRIGGER 0xc +#define SPRD_MBOX_FIFO_RST 0x10 +#define SPRD_MBOX_FIFO_STS 0x14 +#define SPRD_MBOX_IRQ_STS 0x18 +#define SPRD_MBOX_IRQ_MSK 0x1c +#define SPRD_MBOX_LOCK 0x20 +#define SPRD_MBOX_FIFO_DEPTH 0x24 + +/* Bit and mask definiation for inbox's SPRD_MBOX_FIFO_STS register */ +#define SPRD_INBOX_FIFO_DELIVER_MASK GENMASK(23, 16) +#define SPRD_INBOX_FIFO_OVERLOW_MASK GENMASK(15, 8) +#define SPRD_INBOX_FIFO_DELIVER_SHIFT 16 +#define SPRD_INBOX_FIFO_BUSY_MASK GENMASK(7, 0) + +/* Bit and mask definiation for SPRD_MBOX_IRQ_STS register */ +#define SPRD_MBOX_IRQ_CLR BIT(0) + +/* Bit and mask definiation for outbox's SPRD_MBOX_FIFO_STS register */ +#define SPRD_OUTBOX_FIFO_FULL BIT(0) +#define SPRD_OUTBOX_FIFO_WR_SHIFT 16 +#define SPRD_OUTBOX_FIFO_RD_SHIFT 24 +#define SPRD_OUTBOX_FIFO_POS_MASK GENMASK(7, 0) + +/* Bit and mask definiation for inbox's SPRD_MBOX_IRQ_MSK register */ +#define SPRD_INBOX_FIFO_BLOCK_IRQ BIT(0) +#define SPRD_INBOX_FIFO_OVERFLOW_IRQ BIT(1) +#define SPRD_INBOX_FIFO_DELIVER_IRQ BIT(2) +#define SPRD_INBOX_FIFO_IRQ_MASK GENMASK(2, 0) + +/* Bit and mask definiation for outbox's SPRD_MBOX_IRQ_MSK register */ +#define SPRD_OUTBOX_FIFO_NOT_EMPTY_IRQ BIT(0) +#define SPRD_OUTBOX_FIFO_IRQ_MASK GENMASK(4, 0) + +#define SPRD_MBOX_CHAN_MAX 8 + +struct sprd_mbox_priv { + struct mbox_controller mbox; + struct device *dev; + void __iomem *inbox_base; + void __iomem *outbox_base; + struct clk *clk; + u32 outbox_fifo_depth; + + struct mbox_chan chan[SPRD_MBOX_CHAN_MAX]; +}; + +static struct sprd_mbox_priv *to_sprd_mbox_priv(struct mbox_controller *mbox) +{ + return container_of(mbox, struct sprd_mbox_priv, mbox); +} + +static u32 sprd_mbox_get_fifo_len(struct sprd_mbox_priv *priv, u32 fifo_sts) +{ + u32 wr_pos = (fifo_sts >> SPRD_OUTBOX_FIFO_WR_SHIFT) & + SPRD_OUTBOX_FIFO_POS_MASK; + u32 rd_pos = (fifo_sts >> SPRD_OUTBOX_FIFO_RD_SHIFT) & + SPRD_OUTBOX_FIFO_POS_MASK; + u32 fifo_len; + + /* + * If the read pointer is equal with write pointer, which means the fifo + * is full or empty. + */ + if (wr_pos == rd_pos) { + if (fifo_sts & SPRD_OUTBOX_FIFO_FULL) + fifo_len = priv->outbox_fifo_depth; + else + fifo_len = 0; + } else if (wr_pos > rd_pos) { + fifo_len = wr_pos - rd_pos; + } else { + fifo_len = priv->outbox_fifo_depth - rd_pos + wr_pos; + } + + return fifo_len; +} + +static irqreturn_t sprd_mbox_outbox_isr(int irq, void *data) +{ + struct sprd_mbox_priv *priv = data; + struct mbox_chan *chan; + u32 fifo_sts, fifo_len, msg[2]; + int i, id; + + fifo_sts = readl(priv->outbox_base + SPRD_MBOX_FIFO_STS); + + fifo_len = sprd_mbox_get_fifo_len(priv, fifo_sts); + if (!fifo_len) { + dev_warn_ratelimited(priv->dev, "spurious outbox interrupt\n"); + return IRQ_NONE; + } + + for (i = 0; i < fifo_len; i++) { + msg[0] = readl(priv->outbox_base + SPRD_MBOX_MSG_LOW); + msg[1] = readl(priv->outbox_base + SPRD_MBOX_MSG_HIGH); + id = readl(priv->outbox_base + SPRD_MBOX_ID); + + chan = &priv->chan[id]; + mbox_chan_received_data(chan, (void *)msg); + + /* Trigger to update outbox FIFO pointer */ + writel(0x1, priv->outbox_base + SPRD_MBOX_TRIGGER); + } + + /* Clear irq status after reading all message. */ + writel(SPRD_MBOX_IRQ_CLR, priv->outbox_base + SPRD_MBOX_IRQ_STS); + + return IRQ_HANDLED; +} + +static irqreturn_t sprd_mbox_inbox_isr(int irq, void *data) +{ + struct sprd_mbox_priv *priv = data; + struct mbox_chan *chan; + u32 fifo_sts, send_sts, busy, id; + + fifo_sts = readl(priv->inbox_base + SPRD_MBOX_FIFO_STS); + + /* Get the inbox data delivery status */ + send_sts = (fifo_sts & SPRD_INBOX_FIFO_DELIVER_MASK) >> + SPRD_INBOX_FIFO_DELIVER_SHIFT; + if (!send_sts) { + dev_warn_ratelimited(priv->dev, "spurious inbox interrupt\n"); + return IRQ_NONE; + } + + while (send_sts) { + id = __ffs(send_sts); + send_sts &= (send_sts - 1); + + chan = &priv->chan[id]; + + /* + * Check if the message was fetched by remote traget, if yes, + * that means the transmission has been completed. + */ + busy = fifo_sts & SPRD_INBOX_FIFO_BUSY_MASK; + if (!(busy & BIT(id))) + mbox_chan_txdone(chan, 0); + } + + /* Clear FIFO delivery and overflow status */ + writel(fifo_sts & + (SPRD_INBOX_FIFO_DELIVER_MASK | SPRD_INBOX_FIFO_OVERLOW_MASK), + priv->inbox_base + SPRD_MBOX_FIFO_RST); + + /* Clear irq status */ + writel(SPRD_MBOX_IRQ_CLR, priv->inbox_base + SPRD_MBOX_IRQ_STS); + + return IRQ_HANDLED; +} + +static int sprd_mbox_send_data(struct mbox_chan *chan, void *msg) +{ + struct sprd_mbox_priv *priv = to_sprd_mbox_priv(chan->mbox); + unsigned long id = (unsigned long)chan->con_priv; + u32 *data = msg; + + /* Write data into inbox FIFO, and only support 8 bytes every time */ + writel(data[0], priv->inbox_base + SPRD_MBOX_MSG_LOW); + writel(data[1], priv->inbox_base + SPRD_MBOX_MSG_HIGH); + + /* Set target core id */ + writel(id, priv->inbox_base + SPRD_MBOX_ID); + + /* Trigger remote request */ + writel(0x1, priv->inbox_base + SPRD_MBOX_TRIGGER); + + return 0; +} + +static int sprd_mbox_flush(struct mbox_chan *chan, unsigned long timeout) +{ + struct sprd_mbox_priv *priv = to_sprd_mbox_priv(chan->mbox); + unsigned long id = (unsigned long)chan->con_priv; + u32 busy; + + timeout = jiffies + msecs_to_jiffies(timeout); + + while (time_before(jiffies, timeout)) { + busy = readl(priv->inbox_base + SPRD_MBOX_FIFO_STS) & + SPRD_INBOX_FIFO_BUSY_MASK; + if (!(busy & BIT(id))) { + mbox_chan_txdone(chan, 0); + return 0; + } + + udelay(1); + } + + return -ETIME; +} + +static int sprd_mbox_startup(struct mbox_chan *chan) +{ + struct sprd_mbox_priv *priv = to_sprd_mbox_priv(chan->mbox); + u32 val; + + /* Select outbox FIFO mode and reset the outbox FIFO status */ + writel(0x0, priv->outbox_base + SPRD_MBOX_FIFO_RST); + + /* Enable inbox FIFO overflow and delivery interrupt */ + val = readl(priv->inbox_base + SPRD_MBOX_IRQ_MSK); + val &= ~(SPRD_INBOX_FIFO_OVERFLOW_IRQ | SPRD_INBOX_FIFO_DELIVER_IRQ); + writel(val, priv->inbox_base + SPRD_MBOX_IRQ_MSK); + + /* Enable outbox FIFO not empty interrupt */ + val = readl(priv->outbox_base + SPRD_MBOX_IRQ_MSK); + val &= ~SPRD_OUTBOX_FIFO_NOT_EMPTY_IRQ; + writel(val, priv->outbox_base + SPRD_MBOX_IRQ_MSK); + + return 0; +} + +static void sprd_mbox_shutdown(struct mbox_chan *chan) +{ + struct sprd_mbox_priv *priv = to_sprd_mbox_priv(chan->mbox); + + /* Disable inbox & outbox interrupt */ + writel(SPRD_INBOX_FIFO_IRQ_MASK, priv->inbox_base + SPRD_MBOX_IRQ_MSK); + writel(SPRD_OUTBOX_FIFO_IRQ_MASK, priv->outbox_base + SPRD_MBOX_IRQ_MSK); +} + +static const struct mbox_chan_ops sprd_mbox_ops = { + .send_data = sprd_mbox_send_data, + .flush = sprd_mbox_flush, + .startup = sprd_mbox_startup, + .shutdown = sprd_mbox_shutdown, +}; + +static void sprd_mbox_disable(void *data) +{ + struct sprd_mbox_priv *priv = data; + + clk_disable_unprepare(priv->clk); +} + +static int sprd_mbox_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct sprd_mbox_priv *priv; + int ret, inbox_irq, outbox_irq; + unsigned long id; + + priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + priv->dev = dev; + + /* + * The Spreadtrum mailbox uses an inbox to send messages to the target + * core, and uses an outbox to receive messages from other cores. + * + * Thus the mailbox controller supplies 2 different register addresses + * and IRQ numbers for inbox and outbox. + */ + priv->inbox_base = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(priv->inbox_base)) + return PTR_ERR(priv->inbox_base); + + priv->outbox_base = devm_platform_ioremap_resource(pdev, 1); + if (IS_ERR(priv->outbox_base)) + return PTR_ERR(priv->outbox_base); + + priv->clk = devm_clk_get(dev, "enable"); + if (IS_ERR(priv->clk)) { + dev_err(dev, "failed to get mailbox clock\n"); + return PTR_ERR(priv->clk); + } + + ret = clk_prepare_enable(priv->clk); + if (ret) + return ret; + + ret = devm_add_action_or_reset(dev, sprd_mbox_disable, priv); + if (ret) { + dev_err(dev, "failed to add mailbox disable action\n"); + return ret; + } + + inbox_irq = platform_get_irq(pdev, 0); + if (inbox_irq < 0) + return inbox_irq; + + ret = devm_request_irq(dev, inbox_irq, sprd_mbox_inbox_isr, + IRQF_NO_SUSPEND, dev_name(dev), priv); + if (ret) { + dev_err(dev, "failed to request inbox IRQ: %d\n", ret); + return ret; + } + + outbox_irq = platform_get_irq(pdev, 1); + if (outbox_irq < 0) + return outbox_irq; + + ret = devm_request_irq(dev, outbox_irq, sprd_mbox_outbox_isr, + IRQF_NO_SUSPEND, dev_name(dev), priv); + if (ret) { + dev_err(dev, "failed to request outbox IRQ: %d\n", ret); + return ret; + } + + /* Get the default outbox FIFO depth */ + priv->outbox_fifo_depth = + readl(priv->outbox_base + SPRD_MBOX_FIFO_DEPTH) + 1; + priv->mbox.dev = dev; + priv->mbox.chans = &priv->chan[0]; + priv->mbox.num_chans = SPRD_MBOX_CHAN_MAX; + priv->mbox.ops = &sprd_mbox_ops; + priv->mbox.txdone_irq = true; + + for (id = 0; id < SPRD_MBOX_CHAN_MAX; id++) + priv->chan[id].con_priv = (void *)id; + + ret = devm_mbox_controller_register(dev, &priv->mbox); + if (ret) { + dev_err(dev, "failed to register mailbox: %d\n", ret); + return ret; + } + + return 0; +} + +static const struct of_device_id sprd_mbox_of_match[] = { + { .compatible = "sprd,sc9860-mailbox", }, + { }, +}; +MODULE_DEVICE_TABLE(of, sprd_mbox_of_match); + +static struct platform_driver sprd_mbox_driver = { + .driver = { + .name = "sprd-mailbox", + .of_match_table = sprd_mbox_of_match, + }, + .probe = sprd_mbox_probe, +}; +module_platform_driver(sprd_mbox_driver); + +MODULE_AUTHOR("Baolin Wang "); +MODULE_DESCRIPTION("Spreadtrum mailbox driver"); +MODULE_LICENSE("GPL v2"); -- GitLab From 9d8ca628c0286a35e3f8382e44f8b79846a88603 Mon Sep 17 00:00:00 2001 From: Peng Fan Date: Tue, 14 Apr 2020 21:21:15 +0800 Subject: [PATCH 0880/1971] mailbox: imx-mailbox: fix scu msg header size check The i.MX8 SCU message header size is the number of "u32" elements, not "u8", so fix the check. Reported-by: coverity-bot Addresses-Coverity-ID: 1461658 ("Memory - corruptions") Signed-off-by: Peng Fan Reviewed-by: Leonard Crestez Acked-by: Oleksij Rempel Signed-off-by: Jassi Brar --- drivers/mailbox/imx-mailbox.c | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/drivers/mailbox/imx-mailbox.c b/drivers/mailbox/imx-mailbox.c index 19f8d79cebac0..bd69ecfb93522 100644 --- a/drivers/mailbox/imx-mailbox.c +++ b/drivers/mailbox/imx-mailbox.c @@ -155,12 +155,17 @@ static int imx_mu_scu_tx(struct imx_mu_priv *priv, switch (cp->type) { case IMX_MU_TYPE_TX: - if (msg->hdr.size > sizeof(*msg)) { + /* + * msg->hdr.size specifies the number of u32 words while + * sizeof yields bytes. + */ + + if (msg->hdr.size > sizeof(*msg) / 4) { /* * The real message size can be different to * struct imx_sc_rpc_msg_max size */ - dev_err(priv->dev, "Exceed max msg size (%zu) on TX, got: %i\n", sizeof(*msg), msg->hdr.size); + dev_err(priv->dev, "Maximal message size (%zu bytes) exceeded on TX; got: %i bytes\n", sizeof(*msg), msg->hdr.size << 2); return -EINVAL; } @@ -199,9 +204,8 @@ static int imx_mu_scu_rx(struct imx_mu_priv *priv, imx_mu_xcr_rmw(priv, 0, IMX_MU_xCR_RIEn(0)); *data++ = imx_mu_read(priv, priv->dcfg->xRR[0]); - if (msg.hdr.size > sizeof(msg)) { - dev_err(priv->dev, "Exceed max msg size (%zu) on RX, got: %i\n", - sizeof(msg), msg.hdr.size); + if (msg.hdr.size > sizeof(msg) / 4) { + dev_err(priv->dev, "Maximal message size (%zu bytes) exceeded on RX; got: %i bytes\n", sizeof(msg), msg.hdr.size << 2); return -EINVAL; } -- GitLab From 445aeeb569f8d7904f8cf80b7c6826bb651ef80e Mon Sep 17 00:00:00 2001 From: Wei Yongjun Date: Wed, 29 Apr 2020 09:35:03 +0000 Subject: [PATCH 0881/1971] mailbox: zynqmp-ipi: Fix NULL vs IS_ERR() check in zynqmp_ipi_mbox_probe() In case of error, the function devm_ioremap() returns NULL pointer not ERR_PTR(). So we should check whether the return value of devm_ioremap() is NULL instead of IS_ERR. Fixes: 4981b82ba2ff ("mailbox: ZynqMP IPI mailbox controller") Signed-off-by: Wei Yongjun Signed-off-by: Jassi Brar --- drivers/mailbox/zynqmp-ipi-mailbox.c | 20 ++++++++------------ 1 file changed, 8 insertions(+), 12 deletions(-) diff --git a/drivers/mailbox/zynqmp-ipi-mailbox.c b/drivers/mailbox/zynqmp-ipi-mailbox.c index a4d176f160b83..f44079d62b1a7 100644 --- a/drivers/mailbox/zynqmp-ipi-mailbox.c +++ b/drivers/mailbox/zynqmp-ipi-mailbox.c @@ -504,10 +504,9 @@ static int zynqmp_ipi_mbox_probe(struct zynqmp_ipi_mbox *ipi_mbox, mchan->req_buf_size = resource_size(&res); mchan->req_buf = devm_ioremap(mdev, res.start, mchan->req_buf_size); - if (IS_ERR(mchan->req_buf)) { + if (!mchan->req_buf) { dev_err(mdev, "Unable to map IPI buffer I/O memory\n"); - ret = PTR_ERR(mchan->req_buf); - return ret; + return -ENOMEM; } } else if (ret != -ENODEV) { dev_err(mdev, "Unmatched resource %s, %d.\n", name, ret); @@ -520,10 +519,9 @@ static int zynqmp_ipi_mbox_probe(struct zynqmp_ipi_mbox *ipi_mbox, mchan->resp_buf_size = resource_size(&res); mchan->resp_buf = devm_ioremap(mdev, res.start, mchan->resp_buf_size); - if (IS_ERR(mchan->resp_buf)) { + if (!mchan->resp_buf) { dev_err(mdev, "Unable to map IPI buffer I/O memory\n"); - ret = PTR_ERR(mchan->resp_buf); - return ret; + return -ENOMEM; } } else if (ret != -ENODEV) { dev_err(mdev, "Unmatched resource %s.\n", name); @@ -543,10 +541,9 @@ static int zynqmp_ipi_mbox_probe(struct zynqmp_ipi_mbox *ipi_mbox, mchan->req_buf_size = resource_size(&res); mchan->req_buf = devm_ioremap(mdev, res.start, mchan->req_buf_size); - if (IS_ERR(mchan->req_buf)) { + if (!mchan->req_buf) { dev_err(mdev, "Unable to map IPI buffer I/O memory\n"); - ret = PTR_ERR(mchan->req_buf); - return ret; + return -ENOMEM; } } else if (ret != -ENODEV) { dev_err(mdev, "Unmatched resource %s.\n", name); @@ -559,10 +556,9 @@ static int zynqmp_ipi_mbox_probe(struct zynqmp_ipi_mbox *ipi_mbox, mchan->resp_buf_size = resource_size(&res); mchan->resp_buf = devm_ioremap(mdev, res.start, mchan->resp_buf_size); - if (IS_ERR(mchan->resp_buf)) { + if (!mchan->resp_buf) { dev_err(mdev, "Unable to map IPI buffer I/O memory\n"); - ret = PTR_ERR(mchan->resp_buf); - return ret; + return -ENOMEM; } } else if (ret != -ENODEV) { dev_err(mdev, "Unmatched resource %s.\n", name); -- GitLab From a01822e94ee53e8ebc9632fe2764048b81921254 Mon Sep 17 00:00:00 2001 From: Manivannan Sadhasivam Date: Wed, 20 May 2020 14:18:52 +0530 Subject: [PATCH 0882/1971] dt-bindings: mailbox: Add devicetree binding for Qcom IPCC Add devicetree YAML binding for Qualcomm Inter-Processor Communication Controller (IPCC) block. Reviewed-by: Bjorn Andersson Signed-off-by: Manivannan Sadhasivam Reviewed-by: Rob Herring Signed-off-by: Jassi Brar --- .../bindings/mailbox/qcom-ipcc.yaml | 80 +++++++++++++++++++ include/dt-bindings/mailbox/qcom-ipcc.h | 33 ++++++++ 2 files changed, 113 insertions(+) create mode 100644 Documentation/devicetree/bindings/mailbox/qcom-ipcc.yaml create mode 100644 include/dt-bindings/mailbox/qcom-ipcc.h diff --git a/Documentation/devicetree/bindings/mailbox/qcom-ipcc.yaml b/Documentation/devicetree/bindings/mailbox/qcom-ipcc.yaml new file mode 100644 index 0000000000000..4ac2123d91937 --- /dev/null +++ b/Documentation/devicetree/bindings/mailbox/qcom-ipcc.yaml @@ -0,0 +1,80 @@ +# SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/mailbox/qcom-ipcc.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Qualcomm Technologies, Inc. Inter-Processor Communication Controller + +maintainers: + - Manivannan Sadhasivam + +description: + The Inter-Processor Communication Controller (IPCC) is a centralized hardware + to route interrupts across various subsystems. It involves a three-level + addressing scheme called protocol, client and signal. For example, consider an + entity on the Application Processor Subsystem (APSS) that wants to listen to + Modem's interrupts via Shared Memory Point to Point (SMP2P) interface. In such + a case, the client would be Modem (client-id is 2) and the signal would be + SMP2P (signal-id is 2). The SMP2P itself falls under the Multiprocessor (MPROC) + protocol (protocol-id is 0). Refer include/dt-bindings/mailbox/qcom-ipcc.h + for the list of such IDs. + +properties: + compatible: + items: + - enum: + - qcom,sm8250-ipcc + - const: qcom,ipcc + + reg: + maxItems: 1 + + interrupts: + maxItems: 1 + + interrupt-controller: true + + "#interrupt-cells": + const: 3 + description: + The first cell is the client-id, the second cell is the signal-id and the + third cell is the interrupt type. + + "#mbox-cells": + const: 2 + description: + The first cell is the client-id, and the second cell is the signal-id. + +required: + - compatible + - reg + - interrupts + - interrupt-controller + - "#interrupt-cells" + - "#mbox-cells" + +additionalProperties: false + +examples: + - | + #include + #include + + mailbox@408000 { + compatible = "qcom,sm8250-ipcc", "qcom,ipcc"; + reg = <0x408000 0x1000>; + interrupts = ; + interrupt-controller; + #interrupt-cells = <3>; + #mbox-cells = <2>; + }; + + smp2p-modem { + compatible = "qcom,smp2p"; + interrupts-extended = <&ipcc_mproc IPCC_CLIENT_MPSS + IPCC_MPROC_SIGNAL_SMP2P IRQ_TYPE_EDGE_RISING>; + mboxes = <&ipcc_mproc IPCC_CLIENT_MPSS IPCC_MPROC_SIGNAL_SMP2P>; + + /* Other SMP2P fields */ + }; diff --git a/include/dt-bindings/mailbox/qcom-ipcc.h b/include/dt-bindings/mailbox/qcom-ipcc.h new file mode 100644 index 0000000000000..4c23eefed5f30 --- /dev/null +++ b/include/dt-bindings/mailbox/qcom-ipcc.h @@ -0,0 +1,33 @@ +/* SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause */ +/* + * Copyright (c) 2018-2020, The Linux Foundation. All rights reserved. + */ + +#ifndef __DT_BINDINGS_MAILBOX_IPCC_H +#define __DT_BINDINGS_MAILBOX_IPCC_H + +/* Signal IDs for MPROC protocol */ +#define IPCC_MPROC_SIGNAL_GLINK_QMP 0 +#define IPCC_MPROC_SIGNAL_SMP2P 2 +#define IPCC_MPROC_SIGNAL_PING 3 + +/* Client IDs */ +#define IPCC_CLIENT_AOP 0 +#define IPCC_CLIENT_TZ 1 +#define IPCC_CLIENT_MPSS 2 +#define IPCC_CLIENT_LPASS 3 +#define IPCC_CLIENT_SLPI 4 +#define IPCC_CLIENT_SDC 5 +#define IPCC_CLIENT_CDSP 6 +#define IPCC_CLIENT_NPU 7 +#define IPCC_CLIENT_APSS 8 +#define IPCC_CLIENT_GPU 9 +#define IPCC_CLIENT_CVP 10 +#define IPCC_CLIENT_CAM 11 +#define IPCC_CLIENT_VPU 12 +#define IPCC_CLIENT_PCIE0 13 +#define IPCC_CLIENT_PCIE1 14 +#define IPCC_CLIENT_PCIE2 15 +#define IPCC_CLIENT_SPSS 16 + +#endif -- GitLab From fa74a0257f45c5a92b82ae95c8455f06c598792f Mon Sep 17 00:00:00 2001 From: Manivannan Sadhasivam Date: Sat, 30 May 2020 18:15:12 -0500 Subject: [PATCH 0883/1971] mailbox: Add support for Qualcomm IPCC Add support for the Inter-Processor Communication Controller (IPCC) block from Qualcomm that coordinates the interrupts (inbound & outbound) for Multiprocessor (MPROC), COMPUTE-Level0 (COMPUTE-L0) & COMPUTE-Level1 (COMPUTE-L1) protocols for the Application Processor Subsystem (APSS). This driver is modeled as an irqchip+mailbox driver. The irqchip part helps in receiving the interrupts from the IPCC clients such as modems, DSPs, PCI-E etc... and forwards them to respective entities in APSS. On the other hand, the mailbox part is used to send interrupts to the IPCC clients from the entities of APSS. Reviewed-by: Bjorn Andersson Signed-off-by: Raghavendra Rao Ananta Signed-off-by: Venkata Narendra Kumar Gutta Signed-off-by: Bjorn Andersson [mani: moved to mailbox, added static mbox channels and cleanups] Signed-off-by: Manivannan Sadhasivam Signed-off-by: Jassi Brar --- drivers/mailbox/Kconfig | 10 ++ drivers/mailbox/Makefile | 2 + drivers/mailbox/qcom-ipcc.c | 286 ++++++++++++++++++++++++++++++++++++ 3 files changed, 298 insertions(+) create mode 100644 drivers/mailbox/qcom-ipcc.c diff --git a/drivers/mailbox/Kconfig b/drivers/mailbox/Kconfig index e03f3fb5caed1..05b1009e28207 100644 --- a/drivers/mailbox/Kconfig +++ b/drivers/mailbox/Kconfig @@ -244,4 +244,14 @@ config SPRD_MBOX to send message between application processors and MCU. Say Y here if you want to build the Spreatrum mailbox controller driver. +config QCOM_IPCC + bool "Qualcomm Technologies, Inc. IPCC driver" + depends on ARCH_QCOM || COMPILE_TEST + help + Qualcomm Technologies, Inc. Inter-Processor Communication Controller + (IPCC) driver for MSM devices. The driver provides mailbox support for + sending interrupts to the clients. On the other hand, the driver also + acts as an interrupt controller for receiving interrupts from clients. + Say Y here if you want to build this driver. + endif diff --git a/drivers/mailbox/Makefile b/drivers/mailbox/Makefile index 9caf4ede6ce0c..60d224b723a1a 100644 --- a/drivers/mailbox/Makefile +++ b/drivers/mailbox/Makefile @@ -52,3 +52,5 @@ obj-$(CONFIG_ZYNQMP_IPI_MBOX) += zynqmp-ipi-mailbox.o obj-$(CONFIG_SUN6I_MSGBOX) += sun6i-msgbox.o obj-$(CONFIG_SPRD_MBOX) += sprd-mailbox.o + +obj-$(CONFIG_QCOM_IPCC) += qcom-ipcc.o diff --git a/drivers/mailbox/qcom-ipcc.c b/drivers/mailbox/qcom-ipcc.c new file mode 100644 index 0000000000000..2d13c72944c6f --- /dev/null +++ b/drivers/mailbox/qcom-ipcc.c @@ -0,0 +1,286 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2018-2020, The Linux Foundation. All rights reserved. + */ + +#include +#include +#include +#include +#include +#include +#include + +#include + +#define IPCC_MBOX_MAX_CHAN 48 + +/* IPCC Register offsets */ +#define IPCC_REG_SEND_ID 0x0c +#define IPCC_REG_RECV_ID 0x10 +#define IPCC_REG_RECV_SIGNAL_ENABLE 0x14 +#define IPCC_REG_RECV_SIGNAL_DISABLE 0x18 +#define IPCC_REG_RECV_SIGNAL_CLEAR 0x1c +#define IPCC_REG_CLIENT_CLEAR 0x38 + +#define IPCC_SIGNAL_ID_MASK GENMASK(15, 0) +#define IPCC_CLIENT_ID_MASK GENMASK(31, 16) + +#define IPCC_NO_PENDING_IRQ GENMASK(31, 0) + +/** + * struct qcom_ipcc_chan_info - Per-mailbox-channel info + * @client_id: The client-id to which the interrupt has to be triggered + * @signal_id: The signal-id to which the interrupt has to be triggered + */ +struct qcom_ipcc_chan_info { + u16 client_id; + u16 signal_id; +}; + +/** + * struct qcom_ipcc - Holder for the mailbox driver + * @dev: Device associated with this instance + * @base: Base address of the IPCC frame associated to APSS + * @irq_domain: The irq_domain associated with this instance + * @chan: The mailbox channels array + * @mchan: The per-mailbox channel info array + * @mbox: The mailbox controller + * @irq: Summary irq + */ +struct qcom_ipcc { + struct device *dev; + void __iomem *base; + struct irq_domain *irq_domain; + struct mbox_chan chan[IPCC_MBOX_MAX_CHAN]; + struct qcom_ipcc_chan_info mchan[IPCC_MBOX_MAX_CHAN]; + struct mbox_controller mbox; + int irq; +}; + +static inline struct qcom_ipcc *to_qcom_ipcc(struct mbox_controller *mbox) +{ + return container_of(mbox, struct qcom_ipcc, mbox); +} + +static inline u32 qcom_ipcc_get_hwirq(u16 client_id, u16 signal_id) +{ + return FIELD_PREP(IPCC_CLIENT_ID_MASK, client_id) | + FIELD_PREP(IPCC_SIGNAL_ID_MASK, signal_id); +} + +static irqreturn_t qcom_ipcc_irq_fn(int irq, void *data) +{ + struct qcom_ipcc *ipcc = data; + u32 hwirq; + int virq; + + for (;;) { + hwirq = readl(ipcc->base + IPCC_REG_RECV_ID); + if (hwirq == IPCC_NO_PENDING_IRQ) + break; + + virq = irq_find_mapping(ipcc->irq_domain, hwirq); + writel(hwirq, ipcc->base + IPCC_REG_RECV_SIGNAL_CLEAR); + generic_handle_irq(virq); + } + + return IRQ_HANDLED; +} + +static void qcom_ipcc_mask_irq(struct irq_data *irqd) +{ + struct qcom_ipcc *ipcc = irq_data_get_irq_chip_data(irqd); + irq_hw_number_t hwirq = irqd_to_hwirq(irqd); + + writel(hwirq, ipcc->base + IPCC_REG_RECV_SIGNAL_DISABLE); +} + +static void qcom_ipcc_unmask_irq(struct irq_data *irqd) +{ + struct qcom_ipcc *ipcc = irq_data_get_irq_chip_data(irqd); + irq_hw_number_t hwirq = irqd_to_hwirq(irqd); + + writel(hwirq, ipcc->base + IPCC_REG_RECV_SIGNAL_ENABLE); +} + +static struct irq_chip qcom_ipcc_irq_chip = { + .name = "ipcc", + .irq_mask = qcom_ipcc_mask_irq, + .irq_unmask = qcom_ipcc_unmask_irq, + .flags = IRQCHIP_SKIP_SET_WAKE, +}; + +static int qcom_ipcc_domain_map(struct irq_domain *d, unsigned int irq, + irq_hw_number_t hw) +{ + struct qcom_ipcc *ipcc = d->host_data; + + irq_set_chip_and_handler(irq, &qcom_ipcc_irq_chip, handle_level_irq); + irq_set_chip_data(irq, ipcc); + irq_set_noprobe(irq); + + return 0; +} + +static int qcom_ipcc_domain_xlate(struct irq_domain *d, + struct device_node *node, const u32 *intspec, + unsigned int intsize, + unsigned long *out_hwirq, + unsigned int *out_type) +{ + if (intsize != 3) + return -EINVAL; + + *out_hwirq = qcom_ipcc_get_hwirq(intspec[0], intspec[1]); + *out_type = intspec[2] & IRQ_TYPE_SENSE_MASK; + + return 0; +} + +static const struct irq_domain_ops qcom_ipcc_irq_ops = { + .map = qcom_ipcc_domain_map, + .xlate = qcom_ipcc_domain_xlate, +}; + +static int qcom_ipcc_mbox_send_data(struct mbox_chan *chan, void *data) +{ + struct qcom_ipcc *ipcc = to_qcom_ipcc(chan->mbox); + struct qcom_ipcc_chan_info *mchan = chan->con_priv; + u32 hwirq; + + hwirq = qcom_ipcc_get_hwirq(mchan->client_id, mchan->signal_id); + writel(hwirq, ipcc->base + IPCC_REG_SEND_ID); + + return 0; +} + +static struct mbox_chan *qcom_ipcc_mbox_xlate(struct mbox_controller *mbox, + const struct of_phandle_args *ph) +{ + struct qcom_ipcc *ipcc = to_qcom_ipcc(mbox); + struct qcom_ipcc_chan_info *mchan; + struct mbox_chan *chan; + unsigned int i; + + if (ph->args_count != 2) + return ERR_PTR(-EINVAL); + + for (i = 0; i < IPCC_MBOX_MAX_CHAN; i++) { + chan = &ipcc->chan[i]; + if (!chan->con_priv) { + mchan = &ipcc->mchan[i]; + mchan->client_id = ph->args[0]; + mchan->signal_id = ph->args[1]; + chan->con_priv = mchan; + break; + } + + chan = NULL; + } + + return chan ?: ERR_PTR(-EBUSY); +} + +static const struct mbox_chan_ops ipcc_mbox_chan_ops = { + .send_data = qcom_ipcc_mbox_send_data, +}; + +static int qcom_ipcc_setup_mbox(struct qcom_ipcc *ipcc) +{ + struct mbox_controller *mbox; + struct device *dev = ipcc->dev; + + mbox = &ipcc->mbox; + mbox->dev = dev; + mbox->num_chans = IPCC_MBOX_MAX_CHAN; + mbox->chans = ipcc->chan; + mbox->ops = &ipcc_mbox_chan_ops; + mbox->of_xlate = qcom_ipcc_mbox_xlate; + mbox->txdone_irq = false; + mbox->txdone_poll = false; + + return devm_mbox_controller_register(dev, mbox); +} + +static int qcom_ipcc_probe(struct platform_device *pdev) +{ + struct qcom_ipcc *ipcc; + int ret; + + ipcc = devm_kzalloc(&pdev->dev, sizeof(*ipcc), GFP_KERNEL); + if (!ipcc) + return -ENOMEM; + + ipcc->dev = &pdev->dev; + + ipcc->base = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(ipcc->base)) + return PTR_ERR(ipcc->base); + + ipcc->irq = platform_get_irq(pdev, 0); + if (ipcc->irq < 0) + return ipcc->irq; + + ipcc->irq_domain = irq_domain_add_tree(pdev->dev.of_node, + &qcom_ipcc_irq_ops, ipcc); + if (!ipcc->irq_domain) + return -ENOMEM; + + ret = qcom_ipcc_setup_mbox(ipcc); + if (ret) + goto err_mbox; + + ret = devm_request_irq(&pdev->dev, ipcc->irq, qcom_ipcc_irq_fn, + IRQF_TRIGGER_HIGH, "ipcc", ipcc); + if (ret < 0) { + dev_err(&pdev->dev, "Failed to register the irq: %d\n", ret); + goto err_mbox; + } + + enable_irq_wake(ipcc->irq); + platform_set_drvdata(pdev, ipcc); + + return 0; + +err_mbox: + irq_domain_remove(ipcc->irq_domain); + + return ret; +} + +static int qcom_ipcc_remove(struct platform_device *pdev) +{ + struct qcom_ipcc *ipcc = platform_get_drvdata(pdev); + + disable_irq_wake(ipcc->irq); + irq_domain_remove(ipcc->irq_domain); + + return 0; +} + +static const struct of_device_id qcom_ipcc_of_match[] = { + { .compatible = "qcom,ipcc"}, + {} +}; +MODULE_DEVICE_TABLE(of, qcom_ipcc_of_match); + +static struct platform_driver qcom_ipcc_driver = { + .probe = qcom_ipcc_probe, + .remove = qcom_ipcc_remove, + .driver = { + .name = "qcom-ipcc", + .of_match_table = qcom_ipcc_of_match, + }, +}; + +static int __init qcom_ipcc_init(void) +{ + return platform_driver_register(&qcom_ipcc_driver); +} +arch_initcall(qcom_ipcc_init); + +MODULE_AUTHOR("Venkata Narendra Kumar Gutta "); +MODULE_AUTHOR("Manivannan Sadhasivam "); +MODULE_DESCRIPTION("Qualcomm Technologies, Inc. IPCC driver"); +MODULE_LICENSE("GPL v2"); -- GitLab From d61b7997ca467a8249d104f529194a3c6337e767 Mon Sep 17 00:00:00 2001 From: Manivannan Sadhasivam Date: Wed, 20 May 2020 14:18:54 +0530 Subject: [PATCH 0884/1971] MAINTAINERS: Add entry for Qualcomm IPCC driver Add MAINTAINERS entry for Qualcomm IPCC driver and its binding. Reviewed-by: Bjorn Andersson Signed-off-by: Manivannan Sadhasivam Signed-off-by: Jassi Brar --- MAINTAINERS | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/MAINTAINERS b/MAINTAINERS index 50659d76976b7..ede56ab0fe3c2 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -14030,6 +14030,14 @@ L: linux-arm-msm@vger.kernel.org S: Maintained F: drivers/iommu/qcom_iommu.c +QUALCOMM IPCC MAILBOX DRIVER +M: Manivannan Sadhasivam +L: linux-arm-msm@vger.kernel.org +S: Supported +F: Documentation/devicetree/bindings/mailbox/qcom-ipcc.yaml +F: drivers/mailbox/qcom-ipcc.c +F: include/dt-bindings/mailbox/qcom-ipcc.h + QUALCOMM RMNET DRIVER M: Subash Abhinov Kasiviswanathan M: Sean Tranchetti -- GitLab From 0fa712c9db96b2c453809f85614aa008740ca8ec Mon Sep 17 00:00:00 2001 From: Sergei Shtylyov Date: Wed, 22 Apr 2020 22:00:18 +0300 Subject: [PATCH 0885/1971] mtd: spi-nor: sfdp: add/use local variable in spi_nor_parse_bfpt() Despite of how spi_nor_parse_bfpt() abuses the structure fields during their calculation, gcc manages to make some decent code out of that. :-) Yet adding a local variable to store the BFPT DWORDs during calculations still saves 12 bytes of the object code (AArch64 gcc 4.8.5)... Signed-off-by: Sergei Shtylyov Signed-off-by: Tudor Ambarus --- drivers/mtd/spi-nor/sfdp.c | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/drivers/mtd/spi-nor/sfdp.c b/drivers/mtd/spi-nor/sfdp.c index ddb4808a0e9e6..d67182c12c4ae 100644 --- a/drivers/mtd/spi-nor/sfdp.c +++ b/drivers/mtd/spi-nor/sfdp.c @@ -437,7 +437,7 @@ static int spi_nor_parse_bfpt(struct spi_nor *nor, struct sfdp_bfpt bfpt; size_t len; int i, cmd, err; - u32 addr; + u32 addr, val; u16 half; u8 erase_mask; @@ -473,21 +473,21 @@ static int spi_nor_parse_bfpt(struct spi_nor *nor, } /* Flash Memory Density (in bits). */ - params->size = bfpt.dwords[BFPT_DWORD(2)]; - if (params->size & BIT(31)) { - params->size &= ~BIT(31); + val = bfpt.dwords[BFPT_DWORD(2)]; + if (val & BIT(31)) { + val &= ~BIT(31); /* * Prevent overflows on params->size. Anyway, a NOR of 2^64 * bits is unlikely to exist so this error probably means * the BFPT we are reading is corrupted/wrong. */ - if (params->size > 63) + if (val > 63) return -EINVAL; - params->size = 1ULL << params->size; + params->size = 1ULL << val; } else { - params->size++; + params->size = val + 1; } params->size >>= 3; /* Convert to bytes. */ @@ -554,10 +554,10 @@ static int spi_nor_parse_bfpt(struct spi_nor *nor, params); /* Page size: this field specifies 'N' so the page size = 2^N bytes. */ - params->page_size = bfpt.dwords[BFPT_DWORD(11)]; - params->page_size &= BFPT_DWORD11_PAGE_SIZE_MASK; - params->page_size >>= BFPT_DWORD11_PAGE_SIZE_SHIFT; - params->page_size = 1U << params->page_size; + val = bfpt.dwords[BFPT_DWORD(11)]; + val &= BFPT_DWORD11_PAGE_SIZE_MASK; + val >>= BFPT_DWORD11_PAGE_SIZE_SHIFT; + params->page_size = 1U << val; /* Quad Enable Requirements. */ switch (bfpt.dwords[BFPT_DWORD(15)] & BFPT_DWORD15_QER_MASK) { -- GitLab From e8aec15dd5842b5b11b0e621a2293348d3574a61 Mon Sep 17 00:00:00 2001 From: Mantas Pucka Date: Wed, 15 Apr 2020 16:48:30 +0300 Subject: [PATCH 0886/1971] mtd: spi-nor: winbond: Fix 4-byte opcode support for w25q256 There are 2 different chips (w25q256fv and w25q256jv) that share the same JEDEC ID. Only w25q256jv fully supports 4-byte opcodes. Use SFDP header version to differentiate between them. Fixes: 10050a02f7d5 ("mtd: spi-nor: Add 4B_OPCODES flag to w25q256") Signed-off-by: Mantas Pucka Signed-off-by: Tudor Ambarus --- drivers/mtd/spi-nor/sfdp.c | 4 ---- drivers/mtd/spi-nor/sfdp.h | 6 ++++++ drivers/mtd/spi-nor/winbond.c | 29 +++++++++++++++++++++++++++-- 3 files changed, 33 insertions(+), 6 deletions(-) diff --git a/drivers/mtd/spi-nor/sfdp.c b/drivers/mtd/spi-nor/sfdp.c index d67182c12c4ae..55c0c508464bc 100644 --- a/drivers/mtd/spi-nor/sfdp.c +++ b/drivers/mtd/spi-nor/sfdp.c @@ -21,10 +21,6 @@ #define SFDP_4BAIT_ID 0xff84 /* 4-byte Address Instruction Table */ #define SFDP_SIGNATURE 0x50444653U -#define SFDP_JESD216_MAJOR 1 -#define SFDP_JESD216_MINOR 0 -#define SFDP_JESD216A_MINOR 5 -#define SFDP_JESD216B_MINOR 6 struct sfdp_header { u32 signature; /* Ox50444653U <=> "SFDP" */ diff --git a/drivers/mtd/spi-nor/sfdp.h b/drivers/mtd/spi-nor/sfdp.h index f8198af43a63f..7f9846b3a1ad5 100644 --- a/drivers/mtd/spi-nor/sfdp.h +++ b/drivers/mtd/spi-nor/sfdp.h @@ -7,6 +7,12 @@ #ifndef __LINUX_MTD_SFDP_H #define __LINUX_MTD_SFDP_H +/* SFDP revisions */ +#define SFDP_JESD216_MAJOR 1 +#define SFDP_JESD216_MINOR 0 +#define SFDP_JESD216A_MINOR 5 +#define SFDP_JESD216B_MINOR 6 + /* Basic Flash Parameter Table */ /* diff --git a/drivers/mtd/spi-nor/winbond.c b/drivers/mtd/spi-nor/winbond.c index 17deabad57e11..5062af10f1389 100644 --- a/drivers/mtd/spi-nor/winbond.c +++ b/drivers/mtd/spi-nor/winbond.c @@ -8,6 +8,31 @@ #include "core.h" +static int +w25q256_post_bfpt_fixups(struct spi_nor *nor, + const struct sfdp_parameter_header *bfpt_header, + const struct sfdp_bfpt *bfpt, + struct spi_nor_flash_parameter *params) +{ + /* + * W25Q256JV supports 4B opcodes but W25Q256FV does not. + * Unfortunately, Winbond has re-used the same JEDEC ID for both + * variants which prevents us from defining a new entry in the parts + * table. + * To differentiate between W25Q256JV and W25Q256FV check SFDP header + * version: only JV has JESD216A compliant structure (version 5). + */ + if (bfpt_header->major == SFDP_JESD216_MAJOR && + bfpt_header->minor == SFDP_JESD216A_MINOR) + nor->flags |= SNOR_F_4B_OPCODES; + + return 0; +} + +static struct spi_nor_fixups w25q256_fixups = { + .post_bfpt = w25q256_post_bfpt_fixups, +}; + static const struct flash_info winbond_parts[] = { /* Winbond -- w25x "blocks" are 64K, "sectors" are 4KiB */ { "w25x05", INFO(0xef3010, 0, 64 * 1024, 1, SECT_4K) }, @@ -53,8 +78,8 @@ static const struct flash_info winbond_parts[] = { { "w25q80bl", INFO(0xef4014, 0, 64 * 1024, 16, SECT_4K) }, { "w25q128", INFO(0xef4018, 0, 64 * 1024, 256, SECT_4K) }, { "w25q256", INFO(0xef4019, 0, 64 * 1024, 512, - SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | - SPI_NOR_4B_OPCODES) }, + SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) + .fixups = &w25q256_fixups }, { "w25q256jvm", INFO(0xef7019, 0, 64 * 1024, 512, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) }, { "w25q256jw", INFO(0xef6019, 0, 64 * 1024, 512, -- GitLab From 4a3d21bc25c180a4f6a00e3244b54d9bf8c506be Mon Sep 17 00:00:00 2001 From: Miquel Raynal Date: Tue, 19 May 2020 14:59:35 +0200 Subject: [PATCH 0887/1971] mtd: rawnand: au1550nd: Stop using nand_release() This helper is not very useful and very often people get confused: they use nand_release() instead of nand_cleanup(). Let's stop using nand_release() by calling mtd_device_unregister() and nand_cleanup() directly. Signed-off-by: Miquel Raynal Link: https://lore.kernel.org/linux-mtd/20200519130035.1883-3-miquel.raynal@bootlin.com --- drivers/mtd/nand/raw/au1550nd.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/drivers/mtd/nand/raw/au1550nd.c b/drivers/mtd/nand/raw/au1550nd.c index 2ac84cb0501d3..d865200ccd08f 100644 --- a/drivers/mtd/nand/raw/au1550nd.c +++ b/drivers/mtd/nand/raw/au1550nd.c @@ -325,8 +325,12 @@ static int au1550nd_remove(struct platform_device *pdev) { struct au1550nd_ctx *ctx = platform_get_drvdata(pdev); struct resource *r = platform_get_resource(pdev, IORESOURCE_MEM, 0); + struct nand_chip *chip = &ctx->chip; + int ret; - nand_release(&ctx->chip); + ret = mtd_device_unregister(nand_to_mtd(chip)); + WARN_ON(ret); + nand_cleanup(chip); iounmap(ctx->base); release_mem_region(r->start, 0x1000); kfree(ctx); -- GitLab From 9369043059282dbe69853c414b0f4be522d1010e Mon Sep 17 00:00:00 2001 From: Miquel Raynal Date: Tue, 19 May 2020 14:59:36 +0200 Subject: [PATCH 0888/1971] mtd: rawnand: bcm47xx: Stop using nand_release() This helper is not very useful and very often people get confused: they use nand_release() instead of nand_cleanup(). Let's stop using nand_release() by calling mtd_device_unregister() and nand_cleanup() directly. Signed-off-by: Miquel Raynal Link: https://lore.kernel.org/linux-mtd/20200519130035.1883-4-miquel.raynal@bootlin.com --- drivers/mtd/nand/raw/bcm47xxnflash/main.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/drivers/mtd/nand/raw/bcm47xxnflash/main.c b/drivers/mtd/nand/raw/bcm47xxnflash/main.c index 8dae97c1dbe71..dcc70d9dc6e5e 100644 --- a/drivers/mtd/nand/raw/bcm47xxnflash/main.c +++ b/drivers/mtd/nand/raw/bcm47xxnflash/main.c @@ -60,8 +60,12 @@ static int bcm47xxnflash_probe(struct platform_device *pdev) static int bcm47xxnflash_remove(struct platform_device *pdev) { struct bcm47xxnflash *nflash = platform_get_drvdata(pdev); + struct nand_chip *chip = &nflash->nand_chip; + int ret; - nand_release(&nflash->nand_chip); + ret = mtd_device_unregister(nand_to_mtd(chip)); + WARN_ON(ret); + nand_cleanup(chip); return 0; } -- GitLab From 937d039dfdcf3140fe081ce0f28a634555859744 Mon Sep 17 00:00:00 2001 From: Miquel Raynal Date: Tue, 19 May 2020 14:59:37 +0200 Subject: [PATCH 0889/1971] mtd: rawnand: brcmnand: Stop using nand_release() This helper is not very useful and very often people get confused: they use nand_release() instead of nand_cleanup(). Let's stop using nand_release() by calling mtd_device_unregister() and nand_cleanup() directly. Signed-off-by: Miquel Raynal Cc: Brian Norris Cc: Kamal Dasu Link: https://lore.kernel.org/linux-mtd/20200519130035.1883-5-miquel.raynal@bootlin.com --- drivers/mtd/nand/raw/brcmnand/brcmnand.c | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/drivers/mtd/nand/raw/brcmnand/brcmnand.c b/drivers/mtd/nand/raw/brcmnand/brcmnand.c index 30a9a8dbcc050..132e1da2a3891 100644 --- a/drivers/mtd/nand/raw/brcmnand/brcmnand.c +++ b/drivers/mtd/nand/raw/brcmnand/brcmnand.c @@ -3048,9 +3048,15 @@ int brcmnand_remove(struct platform_device *pdev) { struct brcmnand_controller *ctrl = dev_get_drvdata(&pdev->dev); struct brcmnand_host *host; + struct nand_chip *chip; + int ret; - list_for_each_entry(host, &ctrl->host_list, node) - nand_release(&host->chip); + list_for_each_entry(host, &ctrl->host_list, node) { + chip = &host->chip; + ret = mtd_device_unregister(nand_to_mtd(chip)); + WARN_ON(ret); + nand_cleanup(chip); + } clk_disable_unprepare(ctrl->clk); -- GitLab From 8b88f4e0a88b27caf1ca210533d167913745ccdd Mon Sep 17 00:00:00 2001 From: Miquel Raynal Date: Tue, 19 May 2020 14:59:38 +0200 Subject: [PATCH 0890/1971] mtd: rawnand: cadence: Stop using nand_release() This helper is not very useful and very often people get confused: they use nand_release() instead of nand_cleanup(). Let's stop using nand_release() by calling mtd_device_unregister() and nand_cleanup() directly. Signed-off-by: Miquel Raynal Link: https://lore.kernel.org/linux-mtd/20200519130035.1883-6-miquel.raynal@bootlin.com --- drivers/mtd/nand/raw/cadence-nand-controller.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/drivers/mtd/nand/raw/cadence-nand-controller.c b/drivers/mtd/nand/raw/cadence-nand-controller.c index e7abb15c72536..c405722adfe10 100644 --- a/drivers/mtd/nand/raw/cadence-nand-controller.c +++ b/drivers/mtd/nand/raw/cadence-nand-controller.c @@ -2780,9 +2780,14 @@ static int cadence_nand_chip_init(struct cdns_nand_ctrl *cdns_ctrl, static void cadence_nand_chips_cleanup(struct cdns_nand_ctrl *cdns_ctrl) { struct cdns_nand_chip *entry, *temp; + struct nand_chip *chip; + int ret; list_for_each_entry_safe(entry, temp, &cdns_ctrl->chips, node) { - nand_release(&entry->chip); + chip = &entry->chip; + ret = mtd_device_unregister(nand_to_mtd(chip)); + WARN_ON(ret); + nand_cleanup(chip); list_del(&entry->node); } } -- GitLab From 544bac8999a691a7a4cf033f547160c7a00eee60 Mon Sep 17 00:00:00 2001 From: Miquel Raynal Date: Tue, 19 May 2020 14:59:39 +0200 Subject: [PATCH 0891/1971] mtd: rawnand: cafe: Stop using nand_release() This helper is not very useful and very often people get confused: they use nand_release() instead of nand_cleanup(). Let's stop using nand_release() by calling mtd_device_unregister() and nand_cleanup() directly. Signed-off-by: Miquel Raynal Link: https://lore.kernel.org/linux-mtd/20200519130035.1883-7-miquel.raynal@bootlin.com --- drivers/mtd/nand/raw/cafe_nand.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/drivers/mtd/nand/raw/cafe_nand.c b/drivers/mtd/nand/raw/cafe_nand.c index 2a0df13df5f35..92173790f20ba 100644 --- a/drivers/mtd/nand/raw/cafe_nand.c +++ b/drivers/mtd/nand/raw/cafe_nand.c @@ -807,11 +807,14 @@ static void cafe_nand_remove(struct pci_dev *pdev) struct mtd_info *mtd = pci_get_drvdata(pdev); struct nand_chip *chip = mtd_to_nand(mtd); struct cafe_priv *cafe = nand_get_controller_data(chip); + int ret; /* Disable NAND IRQ in global IRQ mask register */ cafe_writel(cafe, ~1 & cafe_readl(cafe, GLOBAL_IRQ_MASK), GLOBAL_IRQ_MASK); free_irq(pdev->irq, mtd); - nand_release(chip); + ret = mtd_device_unregister(mtd); + WARN_ON(ret); + nand_cleanup(chip); free_rs(cafe->rs); pci_iounmap(pdev, cafe->mmio); dma_free_coherent(&cafe->pdev->dev, 2112, cafe->dmabuf, cafe->dmaaddr); -- GitLab From 970024f031aec4c761fde80572b8186c591af59c Mon Sep 17 00:00:00 2001 From: Miquel Raynal Date: Tue, 19 May 2020 14:59:41 +0200 Subject: [PATCH 0892/1971] mtd: rawnand: cs553x: Stop using nand_release() This helper is not very useful and very often people get confused: they use nand_release() instead of nand_cleanup(). Let's stop using nand_release() by calling mtd_device_unregister() and nand_cleanup() directly. Signed-off-by: Miquel Raynal Link: https://lore.kernel.org/linux-mtd/20200519130035.1883-9-miquel.raynal@bootlin.com --- drivers/mtd/nand/raw/cs553x_nand.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/drivers/mtd/nand/raw/cs553x_nand.c b/drivers/mtd/nand/raw/cs553x_nand.c index df5e24a2bbd7b..9472bf798ed50 100644 --- a/drivers/mtd/nand/raw/cs553x_nand.c +++ b/drivers/mtd/nand/raw/cs553x_nand.c @@ -392,12 +392,15 @@ static void __exit cs553x_cleanup(void) struct cs553x_nand_controller *controller = controllers[i]; struct nand_chip *this = &controller->chip; struct mtd_info *mtd = nand_to_mtd(this); + int ret; if (!mtd) continue; /* Release resources, unregister device */ - nand_release(this); + ret = mtd_device_unregister(mtd); + WARN_ON(ret); + nand_cleanup(this); kfree(mtd->name); controllers[i] = NULL; -- GitLab From a9575c48e52042ccc06105e6b7fe4a26f49f0a67 Mon Sep 17 00:00:00 2001 From: Miquel Raynal Date: Tue, 19 May 2020 14:59:42 +0200 Subject: [PATCH 0893/1971] mtd: rawnand: davinci: Stop using nand_release() This helper is not very useful and very often people get confused: they use nand_release() instead of nand_cleanup(). Let's stop using nand_release() by calling mtd_device_unregister() and nand_cleanup() directly. Signed-off-by: Miquel Raynal Link: https://lore.kernel.org/linux-mtd/20200519130035.1883-10-miquel.raynal@bootlin.com --- drivers/mtd/nand/raw/davinci_nand.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/drivers/mtd/nand/raw/davinci_nand.c b/drivers/mtd/nand/raw/davinci_nand.c index d8aa61a6928a5..52b87304954b1 100644 --- a/drivers/mtd/nand/raw/davinci_nand.c +++ b/drivers/mtd/nand/raw/davinci_nand.c @@ -823,13 +823,17 @@ err_cleanup_nand: static int nand_davinci_remove(struct platform_device *pdev) { struct davinci_nand_info *info = platform_get_drvdata(pdev); + struct nand_chip *chip = &info->chip; + int ret; spin_lock_irq(&davinci_nand_lock); if (info->chip.ecc.mode == NAND_ECC_HW_SYNDROME) ecc4_busy = false; spin_unlock_irq(&davinci_nand_lock); - nand_release(&info->chip); + ret = mtd_device_unregister(nand_to_mtd(chip)); + WARN_ON(ret); + nand_cleanup(chip); return 0; } -- GitLab From 6ac64a17554ff98b5419af29ed139860a910ad0b Mon Sep 17 00:00:00 2001 From: Miquel Raynal Date: Tue, 19 May 2020 14:59:43 +0200 Subject: [PATCH 0894/1971] mtd: rawnand: denali: Delete items from the list in the _remove() path Denali driver keeps track of devices with a list. Delete items of this list as long as they are not in use anymore. Signed-off-by: Miquel Raynal Acked-by: Masahiro Yamada Link: https://lore.kernel.org/linux-mtd/20200519130035.1883-11-miquel.raynal@bootlin.com --- drivers/mtd/nand/raw/denali.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/drivers/mtd/nand/raw/denali.c b/drivers/mtd/nand/raw/denali.c index 7a76b761dd0b2..6ac125b9e9279 100644 --- a/drivers/mtd/nand/raw/denali.c +++ b/drivers/mtd/nand/raw/denali.c @@ -1359,10 +1359,12 @@ EXPORT_SYMBOL(denali_init); void denali_remove(struct denali_controller *denali) { - struct denali_chip *dchip; + struct denali_chip *dchip, *tmp; - list_for_each_entry(dchip, &denali->chips, node) + list_for_each_entry_safe(dchip, tmp, &denali->chips, node) { nand_release(&dchip->chip); + list_del(&dchip->node); + } denali_disable_irq(denali); } -- GitLab From 009e2e1d831897794e6ff7ea4767af8a420360d3 Mon Sep 17 00:00:00 2001 From: Miquel Raynal Date: Tue, 19 May 2020 14:59:44 +0200 Subject: [PATCH 0895/1971] mtd: rawnand: denali: Stop using nand_release() This helper is not very useful and very often people get confused: they use nand_release() instead of nand_cleanup(). Let's stop using nand_release() by calling mtd_device_unregister() and nand_cleanup() directly. Signed-off-by: Miquel Raynal Acked-by: Masahiro Yamada Link: https://lore.kernel.org/linux-mtd/20200519130035.1883-12-miquel.raynal@bootlin.com --- drivers/mtd/nand/raw/denali.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/drivers/mtd/nand/raw/denali.c b/drivers/mtd/nand/raw/denali.c index 6ac125b9e9279..4e6e1578aa2d1 100644 --- a/drivers/mtd/nand/raw/denali.c +++ b/drivers/mtd/nand/raw/denali.c @@ -1360,9 +1360,14 @@ EXPORT_SYMBOL(denali_init); void denali_remove(struct denali_controller *denali) { struct denali_chip *dchip, *tmp; + struct nand_chip *chip; + int ret; list_for_each_entry_safe(dchip, tmp, &denali->chips, node) { - nand_release(&dchip->chip); + chip = &dchip->chip; + ret = mtd_device_unregister(nand_to_mtd(chip)); + WARN_ON(ret); + nand_cleanup(chip); list_del(&dchip->node); } -- GitLab From c5be12e45940f1aa1b5dfa04db5d15ad24f7c896 Mon Sep 17 00:00:00 2001 From: Miquel Raynal Date: Tue, 19 May 2020 14:59:45 +0200 Subject: [PATCH 0896/1971] mtd: rawnand: diskonchip: Fix the probe error path Not sure nand_cleanup() is the right function to call here but in any case it is not nand_release(). Indeed, even a comment says that calling nand_release() is a bit of a hack as there is no MTD device to unregister. So switch to nand_cleanup() for now and drop this comment. There is no Fixes tag applying here as the use of nand_release() in this driver predates by far the introduction of nand_cleanup() in commit d44154f969a4 ("mtd: nand: Provide nand_cleanup() function to free NAND related resources") which makes this change possible. However, pointing this commit as the culprit for backporting purposes makes sense even if it did not intruce any bug. Fixes: d44154f969a4 ("mtd: nand: Provide nand_cleanup() function to free NAND related resources") Signed-off-by: Miquel Raynal Cc: stable@vger.kernel.org Link: https://lore.kernel.org/linux-mtd/20200519130035.1883-13-miquel.raynal@bootlin.com --- drivers/mtd/nand/raw/diskonchip.c | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/drivers/mtd/nand/raw/diskonchip.c b/drivers/mtd/nand/raw/diskonchip.c index 97f0b05b47c12..f8ccee7976451 100644 --- a/drivers/mtd/nand/raw/diskonchip.c +++ b/drivers/mtd/nand/raw/diskonchip.c @@ -1482,13 +1482,10 @@ static int __init doc_probe(unsigned long physadr) numchips = doc2001_init(mtd); if ((ret = nand_scan(nand, numchips)) || (ret = doc->late_init(mtd))) { - /* DBB note: i believe nand_release is necessary here, as + /* DBB note: i believe nand_cleanup is necessary here, as buffers may have been allocated in nand_base. Check with Thomas. FIX ME! */ - /* nand_release will call mtd_device_unregister, but we - haven't yet added it. This is handled without incident by - mtd_device_unregister, as far as I can tell. */ - nand_release(nand); + nand_cleanup(nand); goto fail; } -- GitLab From 63a1460768a1ae34746b3013940233afe5d88105 Mon Sep 17 00:00:00 2001 From: Miquel Raynal Date: Tue, 19 May 2020 14:59:46 +0200 Subject: [PATCH 0897/1971] mtd: rawnand: diskonchip: Stop using nand_release() This helper is not very useful and very often people get confused: they use nand_release() instead of nand_cleanup(). Let's stop using nand_release() by calling mtd_device_unregister() and nand_cleanup() directly. Signed-off-by: Miquel Raynal Link: https://lore.kernel.org/linux-mtd/20200519130035.1883-14-miquel.raynal@bootlin.com --- drivers/mtd/nand/raw/diskonchip.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/drivers/mtd/nand/raw/diskonchip.c b/drivers/mtd/nand/raw/diskonchip.c index f8ccee7976451..43721863a0d8e 100644 --- a/drivers/mtd/nand/raw/diskonchip.c +++ b/drivers/mtd/nand/raw/diskonchip.c @@ -1514,13 +1514,16 @@ static void release_nanddoc(void) struct mtd_info *mtd, *nextmtd; struct nand_chip *nand; struct doc_priv *doc; + int ret; for (mtd = doclist; mtd; mtd = nextmtd) { nand = mtd_to_nand(mtd); doc = nand_get_controller_data(nand); nextmtd = doc->nextdoc; - nand_release(nand); + ret = mtd_device_unregister(mtd); + WARN_ON(ret); + nand_cleanup(nand); iounmap(doc->virtadr); release_mem_region(doc->physadr, DOC_IOREMAP_LEN); free_rs(doc->rs_decoder); -- GitLab From 128bbbf0ac4d812c4bc4be89126e319bf6c6a480 Mon Sep 17 00:00:00 2001 From: Miquel Raynal Date: Tue, 19 May 2020 14:59:47 +0200 Subject: [PATCH 0898/1971] mtd: rawnand: fsl_elbc: Stop using nand_release() This helper is not very useful and very often people get confused: they use nand_release() instead of nand_cleanup(). Let's stop using nand_release() by calling mtd_device_unregister() and nand_cleanup() directly. Signed-off-by: Miquel Raynal Link: https://lore.kernel.org/linux-mtd/20200519130035.1883-15-miquel.raynal@bootlin.com --- drivers/mtd/nand/raw/fsl_elbc_nand.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/drivers/mtd/nand/raw/fsl_elbc_nand.c b/drivers/mtd/nand/raw/fsl_elbc_nand.c index e1dc675b12bba..088692b2e27a7 100644 --- a/drivers/mtd/nand/raw/fsl_elbc_nand.c +++ b/drivers/mtd/nand/raw/fsl_elbc_nand.c @@ -956,8 +956,13 @@ static int fsl_elbc_nand_remove(struct platform_device *pdev) { struct fsl_elbc_fcm_ctrl *elbc_fcm_ctrl = fsl_lbc_ctrl_dev->nand; struct fsl_elbc_mtd *priv = dev_get_drvdata(&pdev->dev); + struct nand_chip *chip = &priv->chip; + int ret; + + ret = mtd_device_unregister(nand_to_mtd(chip)); + WARN_ON(ret); + nand_cleanup(chip); - nand_release(&priv->chip); fsl_elbc_chip_remove(priv); mutex_lock(&fsl_elbc_nand_mutex); -- GitLab From e9f2f5a807541b6ad1f8ab73586650b0e1275cf1 Mon Sep 17 00:00:00 2001 From: Miquel Raynal Date: Tue, 19 May 2020 14:59:48 +0200 Subject: [PATCH 0899/1971] mtd: rawnand: fsl_ifc: Stop using nand_release() This helper is not very useful and very often people get confused: they use nand_release() instead of nand_cleanup(). Let's stop using nand_release() by calling mtd_device_unregister() and nand_cleanup() directly. Signed-off-by: Miquel Raynal Link: https://lore.kernel.org/linux-mtd/20200519130035.1883-16-miquel.raynal@bootlin.com --- drivers/mtd/nand/raw/fsl_ifc_nand.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/drivers/mtd/nand/raw/fsl_ifc_nand.c b/drivers/mtd/nand/raw/fsl_ifc_nand.c index 2af09edf405b5..00ae7a910b032 100644 --- a/drivers/mtd/nand/raw/fsl_ifc_nand.c +++ b/drivers/mtd/nand/raw/fsl_ifc_nand.c @@ -1093,8 +1093,13 @@ err: static int fsl_ifc_nand_remove(struct platform_device *dev) { struct fsl_ifc_mtd *priv = dev_get_drvdata(&dev->dev); + struct nand_chip *chip = &priv->chip; + int ret; + + ret = mtd_device_unregister(nand_to_mtd(chip)); + WARN_ON(ret); + nand_cleanup(chip); - nand_release(&priv->chip); fsl_ifc_chip_remove(priv); mutex_lock(&fsl_ifc_nand_mutex); -- GitLab From f6c4e661491ab3de80b4568e2bcdbf52fcbbaf33 Mon Sep 17 00:00:00 2001 From: Miquel Raynal Date: Tue, 19 May 2020 14:59:49 +0200 Subject: [PATCH 0900/1971] mtd: rawnand: fsl_upm: Stop using nand_release() This helper is not very useful and very often people get confused: they use nand_release() instead of nand_cleanup(). Let's stop using nand_release() by calling mtd_device_unregister() and nand_cleanup() directly. Signed-off-by: Miquel Raynal Link: https://lore.kernel.org/linux-mtd/20200519130035.1883-17-miquel.raynal@bootlin.com --- drivers/mtd/nand/raw/fsl_upm.c | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/drivers/mtd/nand/raw/fsl_upm.c b/drivers/mtd/nand/raw/fsl_upm.c index f31fae3a4c689..627deb26db512 100644 --- a/drivers/mtd/nand/raw/fsl_upm.c +++ b/drivers/mtd/nand/raw/fsl_upm.c @@ -317,10 +317,13 @@ err1: static int fun_remove(struct platform_device *ofdev) { struct fsl_upm_nand *fun = dev_get_drvdata(&ofdev->dev); - struct mtd_info *mtd = nand_to_mtd(&fun->chip); - int i; + struct nand_chip *chip = &fun->chip; + struct mtd_info *mtd = nand_to_mtd(chip); + int ret, i; - nand_release(&fun->chip); + ret = mtd_device_unregister(mtd); + WARN_ON(ret); + nand_cleanup(chip); kfree(mtd->name); for (i = 0; i < fun->mchip_count; i++) { -- GitLab From 9cc02f4c0a87a9eba17e5c2cbbda375c9a4a2b06 Mon Sep 17 00:00:00 2001 From: Miquel Raynal Date: Tue, 19 May 2020 14:59:50 +0200 Subject: [PATCH 0901/1971] mtd: rawnand: fsmc: Stop using nand_release() This helper is not very useful and very often people get confused: they use nand_release() instead of nand_cleanup(). Let's stop using nand_release() by calling mtd_device_unregister() and nand_cleanup() directly. Signed-off-by: Miquel Raynal Link: https://lore.kernel.org/linux-mtd/20200519130035.1883-18-miquel.raynal@bootlin.com --- drivers/mtd/nand/raw/fsmc_nand.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/drivers/mtd/nand/raw/fsmc_nand.c b/drivers/mtd/nand/raw/fsmc_nand.c index 2a9222e99bcc5..3909752b14c52 100644 --- a/drivers/mtd/nand/raw/fsmc_nand.c +++ b/drivers/mtd/nand/raw/fsmc_nand.c @@ -1136,7 +1136,12 @@ static int fsmc_nand_remove(struct platform_device *pdev) struct fsmc_nand_data *host = platform_get_drvdata(pdev); if (host) { - nand_release(&host->nand); + struct nand_chip *chip = &host->nand; + int ret; + + ret = mtd_device_unregister(nand_to_mtd(chip)); + WARN_ON(ret); + nand_cleanup(chip); fsmc_nand_disable(host); if (host->mode == USE_DMA_ACCESS) { -- GitLab From dbe0241570edc37fbc24ff2d4d5c11fa6a0712eb Mon Sep 17 00:00:00 2001 From: Miquel Raynal Date: Tue, 19 May 2020 14:59:51 +0200 Subject: [PATCH 0902/1971] mtd: rawnand: gpio: Stop using nand_release() This helper is not very useful and very often people get confused: they use nand_release() instead of nand_cleanup(). Let's stop using nand_release() by calling mtd_device_unregister() and nand_cleanup() directly. Signed-off-by: Miquel Raynal Link: https://lore.kernel.org/linux-mtd/20200519130035.1883-19-miquel.raynal@bootlin.com --- drivers/mtd/nand/raw/gpio.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/drivers/mtd/nand/raw/gpio.c b/drivers/mtd/nand/raw/gpio.c index f6b12354024f4..938077e5c6a9b 100644 --- a/drivers/mtd/nand/raw/gpio.c +++ b/drivers/mtd/nand/raw/gpio.c @@ -190,8 +190,12 @@ gpio_nand_get_io_sync(struct platform_device *pdev) static int gpio_nand_remove(struct platform_device *pdev) { struct gpiomtd *gpiomtd = platform_get_drvdata(pdev); + struct nand_chip *chip = &gpiomtd->nand_chip; + int ret; - nand_release(&gpiomtd->nand_chip); + ret = mtd_device_unregister(nand_to_mtd(chip)); + WARN_ON(ret); + nand_cleanup(chip); /* Enable write protection and disable the chip */ if (gpiomtd->nwp && !IS_ERR(gpiomtd->nwp)) -- GitLab From 194f6c48cdd88d1da581d53527d000626a0d8203 Mon Sep 17 00:00:00 2001 From: Miquel Raynal Date: Tue, 19 May 2020 14:59:52 +0200 Subject: [PATCH 0903/1971] mtd: rawnand: gpmi: Stop using nand_release() This helper is not very useful and very often people get confused: they use nand_release() instead of nand_cleanup(). Let's stop using nand_release() by calling mtd_device_unregister() and nand_cleanup() directly. Signed-off-by: Miquel Raynal Cc: Han Xu Link: https://lore.kernel.org/linux-mtd/20200519130035.1883-20-miquel.raynal@bootlin.com --- drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c b/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c index cc4cb190968e5..9a4a1d30669de 100644 --- a/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c +++ b/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c @@ -2696,11 +2696,15 @@ exit_acquire_resources: static int gpmi_nand_remove(struct platform_device *pdev) { struct gpmi_nand_data *this = platform_get_drvdata(pdev); + struct nand_chip *chip = &this->nand; + int ret; pm_runtime_put_sync(&pdev->dev); pm_runtime_disable(&pdev->dev); - nand_release(&this->nand); + ret = mtd_device_unregister(nand_to_mtd(chip)); + WARN_ON(ret); + nand_cleanup(chip); gpmi_free_dma_buffer(this); release_resources(this); return 0; -- GitLab From 71a4917b4d4bf7cf5f6033522b3136ac7c1b4155 Mon Sep 17 00:00:00 2001 From: Miquel Raynal Date: Tue, 19 May 2020 14:59:53 +0200 Subject: [PATCH 0904/1971] mtd: rawnand: hisi504: Stop using nand_release() This helper is not very useful and very often people get confused: they use nand_release() instead of nand_cleanup(). Let's stop using nand_release() by calling mtd_device_unregister() and nand_cleanup() directly. Signed-off-by: Miquel Raynal Link: https://lore.kernel.org/linux-mtd/20200519130035.1883-21-miquel.raynal@bootlin.com --- drivers/mtd/nand/raw/hisi504_nand.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/drivers/mtd/nand/raw/hisi504_nand.c b/drivers/mtd/nand/raw/hisi504_nand.c index 0b48be54ba6f2..b84238e2268ae 100644 --- a/drivers/mtd/nand/raw/hisi504_nand.c +++ b/drivers/mtd/nand/raw/hisi504_nand.c @@ -806,8 +806,12 @@ static int hisi_nfc_probe(struct platform_device *pdev) static int hisi_nfc_remove(struct platform_device *pdev) { struct hinfc_host *host = platform_get_drvdata(pdev); + struct nand_chip *chip = &host->chip; + int ret; - nand_release(&host->chip); + ret = mtd_device_unregister(nand_to_mtd(chip)); + WARN_ON(ret); + nand_cleanup(chip); return 0; } -- GitLab From de17cade0e034e9b721a6db9b488014effac1e5a Mon Sep 17 00:00:00 2001 From: Miquel Raynal Date: Tue, 19 May 2020 14:59:54 +0200 Subject: [PATCH 0905/1971] mtd: rawnand: ingenic: Fix the probe error path nand_release() is supposed be called after MTD device registration. Here, only nand_scan() happened, so use nand_cleanup() instead. There is no real Fixes tag applying here as the use of nand_release() in this driver predates the introduction of nand_cleanup() in commit d44154f969a4 ("mtd: nand: Provide nand_cleanup() function to free NAND related resources") which makes this change possible. Hence, pointing it as the commit to fix for backporting purposes, even if this commit is not introducing any bug makes sense. Fixes: d44154f969a4 ("mtd: nand: Provide nand_cleanup() function to free NAND related resources") Signed-off-by: Miquel Raynal Cc: stable@vger.kernel.org Cc: Paul Cercueil Cc: Harvey Hunt Link: https://lore.kernel.org/linux-mtd/20200519130035.1883-22-miquel.raynal@bootlin.com --- drivers/mtd/nand/raw/ingenic/ingenic_nand_drv.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/mtd/nand/raw/ingenic/ingenic_nand_drv.c b/drivers/mtd/nand/raw/ingenic/ingenic_nand_drv.c index e7bd845fdbf5f..3bfb6fa8bad9d 100644 --- a/drivers/mtd/nand/raw/ingenic/ingenic_nand_drv.c +++ b/drivers/mtd/nand/raw/ingenic/ingenic_nand_drv.c @@ -376,7 +376,7 @@ static int ingenic_nand_init_chip(struct platform_device *pdev, ret = mtd_device_register(mtd, NULL, 0); if (ret) { - nand_release(chip); + nand_cleanup(chip); return ret; } -- GitLab From 28dcc4e8a8318beb77a7b42bccb9ed57707c546d Mon Sep 17 00:00:00 2001 From: Miquel Raynal Date: Tue, 19 May 2020 14:59:55 +0200 Subject: [PATCH 0906/1971] mtd: rawnand: ingenic: Stop using nand_release() This helper is not very useful and very often people get confused: they use nand_release() instead of nand_cleanup(). Let's stop using nand_release() by calling mtd_device_unregister() and nand_cleanup() directly. Signed-off-by: Miquel Raynal Cc: Paul Cercueil Cc: Harvey Hunt Link: https://lore.kernel.org/linux-mtd/20200519130035.1883-23-miquel.raynal@bootlin.com --- drivers/mtd/nand/raw/ingenic/ingenic_nand_drv.c | 15 ++++++++++----- 1 file changed, 10 insertions(+), 5 deletions(-) diff --git a/drivers/mtd/nand/raw/ingenic/ingenic_nand_drv.c b/drivers/mtd/nand/raw/ingenic/ingenic_nand_drv.c index 3bfb6fa8bad9d..2af1b54d5d9da 100644 --- a/drivers/mtd/nand/raw/ingenic/ingenic_nand_drv.c +++ b/drivers/mtd/nand/raw/ingenic/ingenic_nand_drv.c @@ -387,13 +387,18 @@ static int ingenic_nand_init_chip(struct platform_device *pdev, static void ingenic_nand_cleanup_chips(struct ingenic_nfc *nfc) { - struct ingenic_nand *chip; + struct ingenic_nand *ingenic_chip; + struct nand_chip *chip; + int ret; while (!list_empty(&nfc->chips)) { - chip = list_first_entry(&nfc->chips, - struct ingenic_nand, chip_list); - nand_release(&chip->chip); - list_del(&chip->chip_list); + ingenic_chip = list_first_entry(&nfc->chips, + struct ingenic_nand, chip_list); + chip = &ingenic_chip->chip; + ret = mtd_device_unregister(nand_to_mtd(chip)); + WARN_ON(ret); + nand_cleanup(chip); + list_del(&ingenic_chip->chip_list); } } -- GitLab From 5f3bce3a52758227e2a0b51531b9c0b7069d1724 Mon Sep 17 00:00:00 2001 From: Miquel Raynal Date: Tue, 19 May 2020 14:59:56 +0200 Subject: [PATCH 0907/1971] mtd: rawnand: lpc32xx_mlc: Stop using nand_release() This helper is not very useful and very often people get confused: they use nand_release() instead of nand_cleanup(). Let's stop using nand_release() by calling mtd_device_unregister() and nand_cleanup() directly. Signed-off-by: Miquel Raynal Link: https://lore.kernel.org/linux-mtd/20200519130035.1883-24-miquel.raynal@bootlin.com --- drivers/mtd/nand/raw/lpc32xx_mlc.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/drivers/mtd/nand/raw/lpc32xx_mlc.c b/drivers/mtd/nand/raw/lpc32xx_mlc.c index 241b58b83240d..7521038af2efe 100644 --- a/drivers/mtd/nand/raw/lpc32xx_mlc.c +++ b/drivers/mtd/nand/raw/lpc32xx_mlc.c @@ -826,8 +826,13 @@ free_gpio: static int lpc32xx_nand_remove(struct platform_device *pdev) { struct lpc32xx_nand_host *host = platform_get_drvdata(pdev); + struct nand_chip *chip = &host->nand_chip; + int ret; + + ret = mtd_device_unregister(nand_to_mtd(chip)); + WARN_ON(ret); + nand_cleanup(chip); - nand_release(&host->nand_chip); free_irq(host->irq, host); if (use_dma) dma_release_channel(host->dma_chan); -- GitLab From 21b7582777248ed3668c111811296ada4df295a9 Mon Sep 17 00:00:00 2001 From: Miquel Raynal Date: Tue, 19 May 2020 14:59:57 +0200 Subject: [PATCH 0908/1971] mtd: rawnand: lpc32xx_slc: Stop using nand_release() This helper is not very useful and very often people get confused: they use nand_release() instead of nand_cleanup(). Let's stop using nand_release() by calling mtd_device_unregister() and nand_cleanup() directly. Signed-off-by: Miquel Raynal Link: https://lore.kernel.org/linux-mtd/20200519130035.1883-25-miquel.raynal@bootlin.com --- drivers/mtd/nand/raw/lpc32xx_slc.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/drivers/mtd/nand/raw/lpc32xx_slc.c b/drivers/mtd/nand/raw/lpc32xx_slc.c index 163f976353f8e..b151fd0008156 100644 --- a/drivers/mtd/nand/raw/lpc32xx_slc.c +++ b/drivers/mtd/nand/raw/lpc32xx_slc.c @@ -947,8 +947,12 @@ static int lpc32xx_nand_remove(struct platform_device *pdev) { uint32_t tmp; struct lpc32xx_nand_host *host = platform_get_drvdata(pdev); + struct nand_chip *chip = &host->nand_chip; + int ret; - nand_release(&host->nand_chip); + ret = mtd_device_unregister(nand_to_mtd(chip)); + WARN_ON(ret); + nand_cleanup(chip); dma_release_channel(host->dma_chan); /* Force CE high */ -- GitLab From 5ecbba617446dbe1ec3241fa6362cce68838671d Mon Sep 17 00:00:00 2001 From: Miquel Raynal Date: Tue, 19 May 2020 14:59:58 +0200 Subject: [PATCH 0909/1971] mtd: rawnand: marvell: Stop using nand_release() This helper is not very useful and very often people get confused: they use nand_release() instead of nand_cleanup(). Let's stop using nand_release() by calling mtd_device_unregister() and nand_cleanup() directly. Signed-off-by: Miquel Raynal Link: https://lore.kernel.org/linux-mtd/20200519130035.1883-26-miquel.raynal@bootlin.com --- drivers/mtd/nand/raw/marvell_nand.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/drivers/mtd/nand/raw/marvell_nand.c b/drivers/mtd/nand/raw/marvell_nand.c index a79ce4bdd31cf..260a0430313e1 100644 --- a/drivers/mtd/nand/raw/marvell_nand.c +++ b/drivers/mtd/nand/raw/marvell_nand.c @@ -2676,9 +2676,14 @@ static int marvell_nand_chip_init(struct device *dev, struct marvell_nfc *nfc, static void marvell_nand_chips_cleanup(struct marvell_nfc *nfc) { struct marvell_nand_chip *entry, *temp; + struct nand_chip *chip; + int ret; list_for_each_entry_safe(entry, temp, &nfc->chips, node) { - nand_release(&entry->chip); + chip = &entry->chip; + ret = mtd_device_unregister(nand_to_mtd(chip)); + WARN_ON(ret); + nand_cleanup(chip); list_del(&entry->node); } } -- GitLab From 1a36a7f78898c45a34ce9e9d557f9559dd6618cc Mon Sep 17 00:00:00 2001 From: Miquel Raynal Date: Tue, 19 May 2020 14:59:59 +0200 Subject: [PATCH 0910/1971] mtd: rawnand: mpc5121: Stop using nand_release() This helper is not very useful and very often people get confused: they use nand_release() instead of nand_cleanup(). Let's stop using nand_release() by calling mtd_device_unregister() and nand_cleanup() directly. Signed-off-by: Miquel Raynal Link: https://lore.kernel.org/linux-mtd/20200519130035.1883-27-miquel.raynal@bootlin.com --- drivers/mtd/nand/raw/mpc5121_nfc.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/drivers/mtd/nand/raw/mpc5121_nfc.c b/drivers/mtd/nand/raw/mpc5121_nfc.c index a2fcb739e5f8b..18ecb096a32de 100644 --- a/drivers/mtd/nand/raw/mpc5121_nfc.c +++ b/drivers/mtd/nand/raw/mpc5121_nfc.c @@ -805,8 +805,11 @@ static int mpc5121_nfc_remove(struct platform_device *op) { struct device *dev = &op->dev; struct mtd_info *mtd = dev_get_drvdata(dev); + int ret; - nand_release(mtd_to_nand(mtd)); + ret = mtd_device_unregister(mtd); + WARN_ON(ret); + nand_cleanup(mtd_to_nand(mtd)); mpc5121_nfc_free(dev, mtd); return 0; -- GitLab From 8a82bbcadec877f5f938c54026278dfc1f05a332 Mon Sep 17 00:00:00 2001 From: Miquel Raynal Date: Tue, 19 May 2020 15:00:00 +0200 Subject: [PATCH 0911/1971] mtd: rawnand: mtk: Fix the probe error path nand_release() is supposed be called after MTD device registration. Here, only nand_scan() happened, so use nand_cleanup() instead. There is no real Fixes tag applying here as the use of nand_release() in this driver predates the introduction of nand_cleanup() in commit d44154f969a4 ("mtd: nand: Provide nand_cleanup() function to free NAND related resources") which makes this change possible. However, pointing this commit as the culprit for backporting purposes makes sense even if this commit is not introducing any bug. Fixes: d44154f969a4 ("mtd: nand: Provide nand_cleanup() function to free NAND related resources") Signed-off-by: Miquel Raynal Cc: stable@vger.kernel.org Link: https://lore.kernel.org/linux-mtd/20200519130035.1883-28-miquel.raynal@bootlin.com --- drivers/mtd/nand/raw/mtk_nand.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/mtd/nand/raw/mtk_nand.c b/drivers/mtd/nand/raw/mtk_nand.c index e7ec30e784fd1..9dad08bed2bb4 100644 --- a/drivers/mtd/nand/raw/mtk_nand.c +++ b/drivers/mtd/nand/raw/mtk_nand.c @@ -1419,7 +1419,7 @@ static int mtk_nfc_nand_chip_init(struct device *dev, struct mtk_nfc *nfc, ret = mtd_device_register(mtd, NULL, 0); if (ret) { dev_err(dev, "mtd parse partition error\n"); - nand_release(nand); + nand_cleanup(nand); return ret; } -- GitLab From 1fec333aadc2aec9293b7ec10d24e94f75a89b2e Mon Sep 17 00:00:00 2001 From: Miquel Raynal Date: Tue, 19 May 2020 15:00:01 +0200 Subject: [PATCH 0912/1971] mtd: rawnand: mtk: Stop using nand_release() This helper is not very useful and very often people get confused: they use nand_release() instead of nand_cleanup(). Let's stop using nand_release() by calling mtd_device_unregister() and nand_cleanup() directly. Signed-off-by: Miquel Raynal Link: https://lore.kernel.org/linux-mtd/20200519130035.1883-29-miquel.raynal@bootlin.com --- drivers/mtd/nand/raw/mtk_nand.c | 15 ++++++++++----- 1 file changed, 10 insertions(+), 5 deletions(-) diff --git a/drivers/mtd/nand/raw/mtk_nand.c b/drivers/mtd/nand/raw/mtk_nand.c index 9dad08bed2bb4..c1a6e31aabb88 100644 --- a/drivers/mtd/nand/raw/mtk_nand.c +++ b/drivers/mtd/nand/raw/mtk_nand.c @@ -1578,13 +1578,18 @@ release_ecc: static int mtk_nfc_remove(struct platform_device *pdev) { struct mtk_nfc *nfc = platform_get_drvdata(pdev); - struct mtk_nfc_nand_chip *chip; + struct mtk_nfc_nand_chip *mtk_chip; + struct nand_chip *chip; + int ret; while (!list_empty(&nfc->chips)) { - chip = list_first_entry(&nfc->chips, struct mtk_nfc_nand_chip, - node); - nand_release(&chip->nand); - list_del(&chip->node); + mtk_chip = list_first_entry(&nfc->chips, + struct mtk_nfc_nand_chip, node); + chip = &mtk_chip->nand; + ret = mtd_device_unregister(nand_to_mtd(chip)); + WARN_ON(ret); + nand_cleanup(chip); + list_del(&mtk_chip->node); } mtk_ecc_release(nfc->ecc); -- GitLab From c6dc082793d2fc9609e36a7b165624c4ee57d36f Mon Sep 17 00:00:00 2001 From: Miquel Raynal Date: Tue, 19 May 2020 15:00:02 +0200 Subject: [PATCH 0913/1971] mtd: rawnand: mxc: Stop using nand_release() This helper is not very useful and very often people get confused: they use nand_release() instead of nand_cleanup(). Let's stop using nand_release() by calling mtd_device_unregister() and nand_cleanup() directly. Signed-off-by: Miquel Raynal Link: https://lore.kernel.org/linux-mtd/20200519130035.1883-30-miquel.raynal@bootlin.com --- drivers/mtd/nand/raw/mxc_nand.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/drivers/mtd/nand/raw/mxc_nand.c b/drivers/mtd/nand/raw/mxc_nand.c index 59554c187e01a..09dacb83cb5af 100644 --- a/drivers/mtd/nand/raw/mxc_nand.c +++ b/drivers/mtd/nand/raw/mxc_nand.c @@ -1919,8 +1919,12 @@ escan: static int mxcnd_remove(struct platform_device *pdev) { struct mxc_nand_host *host = platform_get_drvdata(pdev); + struct nand_chip *chip = &host->nand; + int ret; - nand_release(&host->nand); + ret = mtd_device_unregister(nand_to_mtd(chip)); + WARN_ON(ret); + nand_cleanup(chip); if (host->clk_act) clk_disable_unprepare(host->clk); -- GitLab From 8fd507bb42104c1aabd557a9a993e8d0376c6fee Mon Sep 17 00:00:00 2001 From: Miquel Raynal Date: Tue, 19 May 2020 15:00:03 +0200 Subject: [PATCH 0914/1971] mtd: rawnand: mxic: Stop using nand_release() This helper is not very useful and very often people get confused: they use nand_release() instead of nand_cleanup(). Let's stop using nand_release() by calling mtd_device_unregister() and nand_cleanup() directly. Signed-off-by: Miquel Raynal Link: https://lore.kernel.org/linux-mtd/20200519130035.1883-31-miquel.raynal@bootlin.com --- drivers/mtd/nand/raw/mxic_nand.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/drivers/mtd/nand/raw/mxic_nand.c b/drivers/mtd/nand/raw/mxic_nand.c index 5a5a5b3b546db..57f36721f4c6e 100644 --- a/drivers/mtd/nand/raw/mxic_nand.c +++ b/drivers/mtd/nand/raw/mxic_nand.c @@ -556,8 +556,13 @@ fail: static int mxic_nfc_remove(struct platform_device *pdev) { struct mxic_nand_ctlr *nfc = platform_get_drvdata(pdev); + struct nand_chip *chip = &nfc->chip; + int ret; + + ret = mtd_device_unregister(nand_to_mtd(chip)); + WARN_ON(ret); + nand_cleanup(chip); - nand_release(&nfc->chip); mxic_nfc_clk_disable(nfc); return 0; } -- GitLab From a9384f95fe7767320842f8070b08ec865300c622 Mon Sep 17 00:00:00 2001 From: Miquel Raynal Date: Tue, 19 May 2020 15:00:04 +0200 Subject: [PATCH 0915/1971] mtd: rawnand: ndfc: Stop using nand_release() This helper is not very useful and very often people get confused: they use nand_release() instead of nand_cleanup(). Let's stop using nand_release() by calling mtd_device_unregister() and nand_cleanup() directly. Signed-off-by: Miquel Raynal Link: https://lore.kernel.org/linux-mtd/20200519130035.1883-32-miquel.raynal@bootlin.com --- drivers/mtd/nand/raw/ndfc.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/drivers/mtd/nand/raw/ndfc.c b/drivers/mtd/nand/raw/ndfc.c index d324396ab7ffa..ed38338c13832 100644 --- a/drivers/mtd/nand/raw/ndfc.c +++ b/drivers/mtd/nand/raw/ndfc.c @@ -244,9 +244,13 @@ static int ndfc_probe(struct platform_device *ofdev) static int ndfc_remove(struct platform_device *ofdev) { struct ndfc_controller *ndfc = dev_get_drvdata(&ofdev->dev); - struct mtd_info *mtd = nand_to_mtd(&ndfc->chip); + struct nand_chip *chip = &ndfc->chip; + struct mtd_info *mtd = nand_to_mtd(chip); + int ret; - nand_release(&ndfc->chip); + ret = mtd_device_unregister(mtd); + WARN_ON(ret); + nand_cleanup(chip); kfree(mtd->name); return 0; -- GitLab From b4533679c9587a522a871075a68fba14febd590e Mon Sep 17 00:00:00 2001 From: Miquel Raynal Date: Tue, 19 May 2020 15:00:05 +0200 Subject: [PATCH 0916/1971] mtd: rawnand: omap2: Stop using nand_release() This helper is not very useful and very often people get confused: they use nand_release() instead of nand_cleanup(). Let's stop using nand_release() by calling mtd_device_unregister() and nand_cleanup() directly. Signed-off-by: Miquel Raynal Link: https://lore.kernel.org/linux-mtd/20200519130035.1883-33-miquel.raynal@bootlin.com --- drivers/mtd/nand/raw/omap2.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/drivers/mtd/nand/raw/omap2.c b/drivers/mtd/nand/raw/omap2.c index ad77c112a78a5..eb7fcfd9276b7 100644 --- a/drivers/mtd/nand/raw/omap2.c +++ b/drivers/mtd/nand/raw/omap2.c @@ -2283,14 +2283,18 @@ static int omap_nand_remove(struct platform_device *pdev) struct mtd_info *mtd = platform_get_drvdata(pdev); struct nand_chip *nand_chip = mtd_to_nand(mtd); struct omap_nand_info *info = mtd_to_omap(mtd); + int ret; + if (nand_chip->ecc.priv) { nand_bch_free(nand_chip->ecc.priv); nand_chip->ecc.priv = NULL; } if (info->dma) dma_release_channel(info->dma); - nand_release(nand_chip); - return 0; + ret = mtd_device_unregister(mtd); + WARN_ON(ret); + nand_cleanup(nand_chip); + return ret; } static const struct of_device_id omap_nand_ids[] = { -- GitLab From be238fbf78e4c7c586dac235ab967d3e565a4d1a Mon Sep 17 00:00:00 2001 From: Miquel Raynal Date: Tue, 19 May 2020 15:00:06 +0200 Subject: [PATCH 0917/1971] mtd: rawnand: orion: Fix the probe error path nand_release() is supposed be called after MTD device registration. Here, only nand_scan() happened, so use nand_cleanup() instead. There is no real Fixes tag applying here as the use of nand_release() in this driver predates by far the introduction of nand_cleanup() in commit d44154f969a4 ("mtd: nand: Provide nand_cleanup() function to free NAND related resources") which makes this change possible. However, pointing this commit as the culprit for backporting purposes makes sense even if this commit is not introducing any bug. Fixes: d44154f969a4 ("mtd: nand: Provide nand_cleanup() function to free NAND related resources") Signed-off-by: Miquel Raynal Cc: stable@vger.kernel.org Link: https://lore.kernel.org/linux-mtd/20200519130035.1883-34-miquel.raynal@bootlin.com --- drivers/mtd/nand/raw/orion_nand.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/mtd/nand/raw/orion_nand.c b/drivers/mtd/nand/raw/orion_nand.c index d27b39a7223c0..a3dcdf25f5f27 100644 --- a/drivers/mtd/nand/raw/orion_nand.c +++ b/drivers/mtd/nand/raw/orion_nand.c @@ -180,7 +180,7 @@ static int __init orion_nand_probe(struct platform_device *pdev) mtd->name = "orion_nand"; ret = mtd_device_register(mtd, board->parts, board->nr_parts); if (ret) { - nand_release(nc); + nand_cleanup(nc); goto no_dev; } -- GitLab From f342df67b19a4545ee742301594ad1e9b6e5aa91 Mon Sep 17 00:00:00 2001 From: Miquel Raynal Date: Tue, 19 May 2020 15:00:07 +0200 Subject: [PATCH 0918/1971] mtd: rawnand: orion: Stop using nand_release() This helper is not very useful and very often people get confused: they use nand_release() instead of nand_cleanup(). Let's stop using nand_release() by calling mtd_device_unregister() and nand_cleanup() directly. Signed-off-by: Miquel Raynal Link: https://lore.kernel.org/linux-mtd/20200519130035.1883-35-miquel.raynal@bootlin.com --- drivers/mtd/nand/raw/orion_nand.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/drivers/mtd/nand/raw/orion_nand.c b/drivers/mtd/nand/raw/orion_nand.c index a3dcdf25f5f27..880b54ca1b414 100644 --- a/drivers/mtd/nand/raw/orion_nand.c +++ b/drivers/mtd/nand/raw/orion_nand.c @@ -195,8 +195,12 @@ static int orion_nand_remove(struct platform_device *pdev) { struct orion_nand_info *info = platform_get_drvdata(pdev); struct nand_chip *chip = &info->chip; + int ret; - nand_release(chip); + ret = mtd_device_unregister(nand_to_mtd(chip)); + WARN_ON(ret); + + nand_cleanup(chip); clk_disable_unprepare(info->clk); -- GitLab From 383fc3f613e7eac9f2e3c13b6f9fb8c1f39cb9d5 Mon Sep 17 00:00:00 2001 From: Miquel Raynal Date: Tue, 19 May 2020 15:00:08 +0200 Subject: [PATCH 0919/1971] mtd: rawnand: oxnas: Keep track of registered devices All initialized and registered devices should be listed somewhere so that we can unregister/free them in the _remove() path. This patch is not a fix per-se but is needed to apply three other fixes coming right after, explaining the Fixes/Cc: stable tags. Fixes: 668592492409 ("mtd: nand: Add OX820 NAND Support") Signed-off-by: Miquel Raynal Link: https://lore.kernel.org/linux-mtd/20200519130035.1883-36-miquel.raynal@bootlin.com --- drivers/mtd/nand/raw/oxnas_nand.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/mtd/nand/raw/oxnas_nand.c b/drivers/mtd/nand/raw/oxnas_nand.c index c43cb4d92d3d1..bead5ac701607 100644 --- a/drivers/mtd/nand/raw/oxnas_nand.c +++ b/drivers/mtd/nand/raw/oxnas_nand.c @@ -32,6 +32,7 @@ struct oxnas_nand_ctrl { void __iomem *io_base; struct clk *clk; struct nand_chip *chips[OXNAS_NAND_MAX_CHIPS]; + unsigned int nchips; }; static uint8_t oxnas_nand_read_byte(struct nand_chip *chip) @@ -79,7 +80,6 @@ static int oxnas_nand_probe(struct platform_device *pdev) struct nand_chip *chip; struct mtd_info *mtd; struct resource *res; - int nchips = 0; int count = 0; int err = 0; @@ -145,12 +145,12 @@ static int oxnas_nand_probe(struct platform_device *pdev) goto err_release_child; } - oxnas->chips[nchips] = chip; - ++nchips; + oxnas->chips[oxnas->nchips] = chip; + ++oxnas->nchips; } /* Exit if no chips found */ - if (!nchips) { + if (!oxnas->nchips) { err = -ENODEV; goto err_clk_unprepare; } -- GitLab From 154298e2a3f6c9ce1d76cdb48d89fd5b107ea1a3 Mon Sep 17 00:00:00 2001 From: Miquel Raynal Date: Tue, 19 May 2020 15:00:09 +0200 Subject: [PATCH 0920/1971] mtd: rawnand: oxnas: Fix the probe error path nand_release() is supposed be called after MTD device registration. Here, only nand_scan() happened, so use nand_cleanup() instead. While at it, be consistent and move the function call in the error path thanks to a goto statement. Fixes: 668592492409 ("mtd: nand: Add OX820 NAND Support") Signed-off-by: Miquel Raynal Cc: stable@vger.kernel.org Link: https://lore.kernel.org/linux-mtd/20200519130035.1883-37-miquel.raynal@bootlin.com --- drivers/mtd/nand/raw/oxnas_nand.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/mtd/nand/raw/oxnas_nand.c b/drivers/mtd/nand/raw/oxnas_nand.c index bead5ac701607..4fadfa118582e 100644 --- a/drivers/mtd/nand/raw/oxnas_nand.c +++ b/drivers/mtd/nand/raw/oxnas_nand.c @@ -140,10 +140,8 @@ static int oxnas_nand_probe(struct platform_device *pdev) goto err_release_child; err = mtd_device_register(mtd, NULL, 0); - if (err) { - nand_release(chip); - goto err_release_child; - } + if (err) + goto err_cleanup_nand; oxnas->chips[oxnas->nchips] = chip; ++oxnas->nchips; @@ -159,6 +157,8 @@ static int oxnas_nand_probe(struct platform_device *pdev) return 0; +err_cleanup_nand: + nand_cleanup(chip); err_release_child: of_node_put(nand_np); err_clk_unprepare: -- GitLab From b60391eb17b2956ff2fc4c348e5a464da21ff9cb Mon Sep 17 00:00:00 2001 From: Miquel Raynal Date: Tue, 19 May 2020 15:00:10 +0200 Subject: [PATCH 0921/1971] mtd: rawnand: oxnas: Unregister all devices on error On error, the oxnas probe path just frees the device which failed and aborts the probe, leaving unreleased resources. Fix this situation by calling mtd_device_unregister()/nand_cleanup() on these. Fixes: 668592492409 ("mtd: nand: Add OX820 NAND Support") Signed-off-by: Miquel Raynal Link: https://lore.kernel.org/linux-mtd/20200519130035.1883-38-miquel.raynal@bootlin.com --- drivers/mtd/nand/raw/oxnas_nand.c | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/drivers/mtd/nand/raw/oxnas_nand.c b/drivers/mtd/nand/raw/oxnas_nand.c index 4fadfa118582e..25862d62f9948 100644 --- a/drivers/mtd/nand/raw/oxnas_nand.c +++ b/drivers/mtd/nand/raw/oxnas_nand.c @@ -82,6 +82,7 @@ static int oxnas_nand_probe(struct platform_device *pdev) struct resource *res; int count = 0; int err = 0; + int i; /* Allocate memory for the device structure (and zero it) */ oxnas = devm_kzalloc(&pdev->dev, sizeof(*oxnas), @@ -161,6 +162,13 @@ err_cleanup_nand: nand_cleanup(chip); err_release_child: of_node_put(nand_np); + + for (i = 0; i < oxnas->nchips; i++) { + chip = oxnas->chips[i]; + WARN_ON(mtd_device_unregister(nand_to_mtd(chip))); + nand_cleanup(chip); + } + err_clk_unprepare: clk_disable_unprepare(oxnas->clk); return err; -- GitLab From 0a5f45e57e35d0840bedb816974ce2e63406cd8b Mon Sep 17 00:00:00 2001 From: Miquel Raynal Date: Tue, 19 May 2020 15:00:11 +0200 Subject: [PATCH 0922/1971] mtd: rawnand: oxnas: Release all devices in the _remove() path oxnans_nand_remove() should release all MTD devices and clean all NAND devices, not only the first one registered. Fixes: 668592492409 ("mtd: nand: Add OX820 NAND Support") Signed-off-by: Miquel Raynal Link: https://lore.kernel.org/linux-mtd/20200519130035.1883-39-miquel.raynal@bootlin.com --- drivers/mtd/nand/raw/oxnas_nand.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/drivers/mtd/nand/raw/oxnas_nand.c b/drivers/mtd/nand/raw/oxnas_nand.c index 25862d62f9948..23c222b6c40e4 100644 --- a/drivers/mtd/nand/raw/oxnas_nand.c +++ b/drivers/mtd/nand/raw/oxnas_nand.c @@ -177,9 +177,13 @@ err_clk_unprepare: static int oxnas_nand_remove(struct platform_device *pdev) { struct oxnas_nand_ctrl *oxnas = platform_get_drvdata(pdev); + struct nand_chip *chip; + int i; - if (oxnas->chips[0]) - nand_release(oxnas->chips[0]); + for (i = 0; i < oxnas->nchips; i++) { + chip = oxnas->chips[i]; + nand_release(chip); + } clk_disable_unprepare(oxnas->clk); -- GitLab From 2d9cf6f129f8e6730d568628c920fe3b591a34be Mon Sep 17 00:00:00 2001 From: Miquel Raynal Date: Tue, 19 May 2020 15:00:12 +0200 Subject: [PATCH 0923/1971] mtd: rawnand: oxnas: Stop using nand_release() This helper is not very useful and very often people get confused: they use nand_release() instead of nand_cleanup(). Let's stop using nand_release() by calling mtd_device_unregister() and nand_cleanup() directly. Signed-off-by: Miquel Raynal Link: https://lore.kernel.org/linux-mtd/20200519130035.1883-40-miquel.raynal@bootlin.com --- drivers/mtd/nand/raw/oxnas_nand.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/mtd/nand/raw/oxnas_nand.c b/drivers/mtd/nand/raw/oxnas_nand.c index 23c222b6c40e4..8d0d76ad319d3 100644 --- a/drivers/mtd/nand/raw/oxnas_nand.c +++ b/drivers/mtd/nand/raw/oxnas_nand.c @@ -182,7 +182,8 @@ static int oxnas_nand_remove(struct platform_device *pdev) for (i = 0; i < oxnas->nchips; i++) { chip = oxnas->chips[i]; - nand_release(chip); + WARN_ON(mtd_device_unregister(nand_to_mtd(chip))); + nand_cleanup(chip); } clk_disable_unprepare(oxnas->clk); -- GitLab From f51466901c07e6930435d30b02a21f0841174f61 Mon Sep 17 00:00:00 2001 From: Miquel Raynal Date: Tue, 19 May 2020 15:00:13 +0200 Subject: [PATCH 0924/1971] mtd: rawnand: pasemi: Fix the probe error path nand_cleanup() is supposed to be called on error after a successful call to nand_scan() to free all NAND resources. There is no real Fixes tag applying here as the use of nand_release() in this driver predates by far the introduction of nand_cleanup() in commit d44154f969a4 ("mtd: nand: Provide nand_cleanup() function to free NAND related resources") which makes this change possible, hence pointing it as the commit to fix for backporting purposes, even if this commit is not introducing any bug. Fixes: d44154f969a4 ("mtd: nand: Provide nand_cleanup() function to free NAND related resources") Signed-off-by: Miquel Raynal Cc: stable@vger.kernel.org Link: https://lore.kernel.org/linux-mtd/20200519130035.1883-41-miquel.raynal@bootlin.com --- drivers/mtd/nand/raw/pasemi_nand.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/drivers/mtd/nand/raw/pasemi_nand.c b/drivers/mtd/nand/raw/pasemi_nand.c index 9cfe7395172a4..066ff6dc9a239 100644 --- a/drivers/mtd/nand/raw/pasemi_nand.c +++ b/drivers/mtd/nand/raw/pasemi_nand.c @@ -146,7 +146,7 @@ static int pasemi_nand_probe(struct platform_device *ofdev) if (mtd_device_register(pasemi_nand_mtd, NULL, 0)) { dev_err(dev, "Unable to register MTD device\n"); err = -ENODEV; - goto out_lpc; + goto out_cleanup_nand; } dev_info(dev, "PA Semi NAND flash at %pR, control at I/O %x\n", &res, @@ -154,6 +154,8 @@ static int pasemi_nand_probe(struct platform_device *ofdev) return 0; + out_cleanup_nand: + nand_cleanup(chip); out_lpc: release_region(lpcctl, 4); out_ior: -- GitLab From 23cf3461501011759614b5494da390feeae3a1b3 Mon Sep 17 00:00:00 2001 From: Miquel Raynal Date: Tue, 19 May 2020 15:00:14 +0200 Subject: [PATCH 0925/1971] mtd: rawnand: pasemi: Stop using nand_release() This helper is not very useful and very often people get confused: they use nand_release() instead of nand_cleanup(). Let's stop using nand_release() by calling mtd_device_unregister() and nand_cleanup() directly. Signed-off-by: Miquel Raynal Link: https://lore.kernel.org/linux-mtd/20200519130035.1883-42-miquel.raynal@bootlin.com --- drivers/mtd/nand/raw/pasemi_nand.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/drivers/mtd/nand/raw/pasemi_nand.c b/drivers/mtd/nand/raw/pasemi_nand.c index 066ff6dc9a239..d8eca8c3fdcd8 100644 --- a/drivers/mtd/nand/raw/pasemi_nand.c +++ b/drivers/mtd/nand/raw/pasemi_nand.c @@ -169,6 +169,7 @@ static int pasemi_nand_probe(struct platform_device *ofdev) static int pasemi_nand_remove(struct platform_device *ofdev) { struct nand_chip *chip; + int ret; if (!pasemi_nand_mtd) return 0; @@ -176,7 +177,9 @@ static int pasemi_nand_remove(struct platform_device *ofdev) chip = mtd_to_nand(pasemi_nand_mtd); /* Release resources, unregister device */ - nand_release(chip); + ret = mtd_device_unregister(pasemi_nand_mtd); + WARN_ON(ret); + nand_cleanup(chip); release_region(lpcctl, 4); -- GitLab From 5284024b4dac5e94f7f374ca905c7580dbc455e9 Mon Sep 17 00:00:00 2001 From: Miquel Raynal Date: Tue, 19 May 2020 15:00:15 +0200 Subject: [PATCH 0926/1971] mtd: rawnand: plat_nand: Fix the probe error path nand_release() is supposed be called after MTD device registration. Here, only nand_scan() happened, so use nand_cleanup() instead. There is no real Fixes tag applying here as the use of nand_release() in this driver predates by far the introduction of nand_cleanup() in commit d44154f969a4 ("mtd: nand: Provide nand_cleanup() function to free NAND related resources") which makes this change possible, hence pointing it as the commit to fix for backporting purposes, even if this commit is not introducing any bug. Fixes: d44154f969a4 ("mtd: nand: Provide nand_cleanup() function to free NAND related resources") Signed-off-by: Miquel Raynal Cc: stable@vger.kernel.org Link: https://lore.kernel.org/linux-mtd/20200519130035.1883-43-miquel.raynal@bootlin.com --- drivers/mtd/nand/raw/plat_nand.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/mtd/nand/raw/plat_nand.c b/drivers/mtd/nand/raw/plat_nand.c index dc0f3074ddbff..3a495b233443f 100644 --- a/drivers/mtd/nand/raw/plat_nand.c +++ b/drivers/mtd/nand/raw/plat_nand.c @@ -92,7 +92,7 @@ static int plat_nand_probe(struct platform_device *pdev) if (!err) return err; - nand_release(&data->chip); + nand_cleanup(&data->chip); out: if (pdata->ctrl.remove) pdata->ctrl.remove(pdev); -- GitLab From d1aae005a00e94e9f34f5c44d8c2c0afa72cba59 Mon Sep 17 00:00:00 2001 From: Miquel Raynal Date: Tue, 19 May 2020 15:00:16 +0200 Subject: [PATCH 0927/1971] mtd: rawnand: plat_nand: Stop using nand_release() This helper is not very useful and very often people get confused: they use nand_release() instead of nand_cleanup(). Let's stop using nand_release() by calling mtd_device_unregister() and nand_cleanup() directly. Signed-off-by: Miquel Raynal Link: https://lore.kernel.org/linux-mtd/20200519130035.1883-44-miquel.raynal@bootlin.com --- drivers/mtd/nand/raw/plat_nand.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/drivers/mtd/nand/raw/plat_nand.c b/drivers/mtd/nand/raw/plat_nand.c index 3a495b233443f..556182f260577 100644 --- a/drivers/mtd/nand/raw/plat_nand.c +++ b/drivers/mtd/nand/raw/plat_nand.c @@ -106,8 +106,12 @@ static int plat_nand_remove(struct platform_device *pdev) { struct plat_nand_data *data = platform_get_drvdata(pdev); struct platform_nand_data *pdata = dev_get_platdata(&pdev->dev); + struct nand_chip *chip = &data->chip; + int ret; - nand_release(&data->chip); + ret = mtd_device_unregister(nand_to_mtd(chip)); + WARN_ON(ret); + nand_cleanup(chip); if (pdata->ctrl.remove) pdata->ctrl.remove(pdev); -- GitLab From 0a2bc9919cf74e3436644cee88490bfd379127a1 Mon Sep 17 00:00:00 2001 From: Miquel Raynal Date: Tue, 19 May 2020 15:00:17 +0200 Subject: [PATCH 0928/1971] mtd: rawnand: qcom: Stop using nand_release() This helper is not very useful and very often people get confused: they use nand_release() instead of nand_cleanup(). Let's stop using nand_release() by calling mtd_device_unregister() and nand_cleanup() directly. Signed-off-by: Miquel Raynal Link: https://lore.kernel.org/linux-mtd/20200519130035.1883-45-miquel.raynal@bootlin.com --- drivers/mtd/nand/raw/qcom_nandc.c | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/drivers/mtd/nand/raw/qcom_nandc.c b/drivers/mtd/nand/raw/qcom_nandc.c index 9ab22c5d4166c..f1daf330951be 100644 --- a/drivers/mtd/nand/raw/qcom_nandc.c +++ b/drivers/mtd/nand/raw/qcom_nandc.c @@ -3005,10 +3005,15 @@ static int qcom_nandc_remove(struct platform_device *pdev) struct qcom_nand_controller *nandc = platform_get_drvdata(pdev); struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0); struct qcom_nand_host *host; + struct nand_chip *chip; + int ret; - list_for_each_entry(host, &nandc->host_list, node) - nand_release(&host->chip); - + list_for_each_entry(host, &nandc->host_list, node) { + chip = &host->chip; + ret = mtd_device_unregister(nand_to_mtd(chip)); + WARN_ON(ret); + nand_cleanup(chip); + } qcom_nandc_unalloc(nandc); -- GitLab From 10b87750ae17920977d57333d39483ff2bed0bf9 Mon Sep 17 00:00:00 2001 From: Miquel Raynal Date: Tue, 19 May 2020 15:00:18 +0200 Subject: [PATCH 0929/1971] mtd: rawnand: r852: Stop using nand_release() This helper is not very useful and very often people get confused: they use nand_release() instead of nand_cleanup(). Let's stop using nand_release() by calling mtd_device_unregister() and nand_cleanup() directly. Signed-off-by: Miquel Raynal Cc: Maxim Levitsky Link: https://lore.kernel.org/linux-mtd/20200519130035.1883-46-miquel.raynal@bootlin.com --- drivers/mtd/nand/raw/r852.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/drivers/mtd/nand/raw/r852.c b/drivers/mtd/nand/raw/r852.c index 77774250fb11b..f865e3a47b01d 100644 --- a/drivers/mtd/nand/raw/r852.c +++ b/drivers/mtd/nand/raw/r852.c @@ -651,7 +651,8 @@ static int r852_register_nand_device(struct r852_device *dev) dev->card_registered = 1; return 0; error3: - nand_release(dev->chip); + WARN_ON(mtd_device_unregister(nand_to_mtd(dev->chip))); + nand_cleanup(dev->chip); error1: /* Force card redetect */ dev->card_detected = 0; @@ -670,7 +671,8 @@ static void r852_unregister_nand_device(struct r852_device *dev) return; device_remove_file(&mtd->dev, &dev_attr_media_type); - nand_release(dev->chip); + WARN_ON(mtd_device_unregister(mtd)); + nand_cleanup(dev->chip); r852_engine_disable(dev); dev->card_registered = 0; } -- GitLab From 9748110bd22c59f6bcd9447cf752fa2d4c59d1e6 Mon Sep 17 00:00:00 2001 From: Miquel Raynal Date: Tue, 19 May 2020 15:00:19 +0200 Subject: [PATCH 0930/1971] mtd: rawnand: s3c2410: Stop using nand_release() This helper is not very useful and very often people get confused: they use nand_release() instead of nand_cleanup(). Let's stop using nand_release() by calling mtd_device_unregister() and nand_cleanup() directly. Signed-off-by: Miquel Raynal Link: https://lore.kernel.org/linux-mtd/20200519130035.1883-47-miquel.raynal@bootlin.com --- drivers/mtd/nand/raw/s3c2410.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/mtd/nand/raw/s3c2410.c b/drivers/mtd/nand/raw/s3c2410.c index 0009c1820e217..f86dff311464f 100644 --- a/drivers/mtd/nand/raw/s3c2410.c +++ b/drivers/mtd/nand/raw/s3c2410.c @@ -779,7 +779,8 @@ static int s3c24xx_nand_remove(struct platform_device *pdev) for (mtdno = 0; mtdno < info->mtd_count; mtdno++, ptr++) { pr_debug("releasing mtd %d (%p)\n", mtdno, ptr); - nand_release(&ptr->chip); + WARN_ON(mtd_device_unregister(nand_to_mtd(&ptr->chip))); + nand_cleanup(&ptr->chip); } } -- GitLab From 50abacbb621f130c390c57d3a68f6f913d48ad07 Mon Sep 17 00:00:00 2001 From: Miquel Raynal Date: Tue, 19 May 2020 15:00:20 +0200 Subject: [PATCH 0931/1971] mtd: rawnand: sh_flctl: Stop using nand_release() This helper is not very useful and very often people get confused: they use nand_release() instead of nand_cleanup(). Let's stop using nand_release() by calling mtd_device_unregister() and nand_cleanup() directly. Signed-off-by: Miquel Raynal Link: https://lore.kernel.org/linux-mtd/20200519130035.1883-48-miquel.raynal@bootlin.com --- drivers/mtd/nand/raw/sh_flctl.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/drivers/mtd/nand/raw/sh_flctl.c b/drivers/mtd/nand/raw/sh_flctl.c index 058e99d0cbcf0..a661b8bb2dd56 100644 --- a/drivers/mtd/nand/raw/sh_flctl.c +++ b/drivers/mtd/nand/raw/sh_flctl.c @@ -1204,9 +1204,13 @@ err_chip: static int flctl_remove(struct platform_device *pdev) { struct sh_flctl *flctl = platform_get_drvdata(pdev); + struct nand_chip *chip = &flctl->chip; + int ret; flctl_release_dma(flctl); - nand_release(&flctl->chip); + ret = mtd_device_unregister(nand_to_mtd(chip)); + WARN_ON(ret); + nand_cleanup(chip); pm_runtime_disable(&pdev->dev); return 0; -- GitLab From 0f44b3275b3798ccb97a2f51ac85871c30d6fbbc Mon Sep 17 00:00:00 2001 From: Miquel Raynal Date: Tue, 19 May 2020 15:00:21 +0200 Subject: [PATCH 0932/1971] mtd: rawnand: sharpsl: Fix the probe error path nand_release() is supposed be called after MTD device registration. Here, only nand_scan() happened, so use nand_cleanup() instead. There is no Fixes tag applying here as the use of nand_release() in this driver predates by far the introduction of nand_cleanup() in commit d44154f969a4 ("mtd: nand: Provide nand_cleanup() function to free NAND related resources") which makes this change possible. However, pointing this commit as the culprit for backporting purposes makes sense. Fixes: d44154f969a4 ("mtd: nand: Provide nand_cleanup() function to free NAND related resources") Signed-off-by: Miquel Raynal Cc: stable@vger.kernel.org Link: https://lore.kernel.org/linux-mtd/20200519130035.1883-49-miquel.raynal@bootlin.com --- drivers/mtd/nand/raw/sharpsl.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/mtd/nand/raw/sharpsl.c b/drivers/mtd/nand/raw/sharpsl.c index b47a9eaff89b1..d8c52a0160800 100644 --- a/drivers/mtd/nand/raw/sharpsl.c +++ b/drivers/mtd/nand/raw/sharpsl.c @@ -183,7 +183,7 @@ static int sharpsl_nand_probe(struct platform_device *pdev) return 0; err_add: - nand_release(this); + nand_cleanup(this); err_scan: iounmap(sharpsl->io); -- GitLab From 35a37f9198e5ee594750f4331b29de34acf8c84c Mon Sep 17 00:00:00 2001 From: Miquel Raynal Date: Tue, 19 May 2020 15:00:22 +0200 Subject: [PATCH 0933/1971] mtd: rawnand: sharpsl: Stop using nand_release() This helper is not very useful and very often people get confused: they use nand_release() instead of nand_cleanup(). Let's stop using nand_release() by calling mtd_device_unregister() and nand_cleanup() directly. Signed-off-by: Miquel Raynal Link: https://lore.kernel.org/linux-mtd/20200519130035.1883-50-miquel.raynal@bootlin.com --- drivers/mtd/nand/raw/sharpsl.c | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/drivers/mtd/nand/raw/sharpsl.c b/drivers/mtd/nand/raw/sharpsl.c index d8c52a0160800..51286f7acf54f 100644 --- a/drivers/mtd/nand/raw/sharpsl.c +++ b/drivers/mtd/nand/raw/sharpsl.c @@ -199,13 +199,19 @@ err_get_res: static int sharpsl_nand_remove(struct platform_device *pdev) { struct sharpsl_nand *sharpsl = platform_get_drvdata(pdev); + struct nand_chip *chip = &sharpsl->chip; + int ret; - /* Release resources, unregister device */ - nand_release(&sharpsl->chip); + /* Unregister device */ + ret = mtd_device_unregister(nand_to_mtd(chip)); + WARN_ON(ret); + + /* Release resources */ + nand_cleanup(chip); iounmap(sharpsl->io); - /* Free the MTD device structure */ + /* Free the driver's structure */ kfree(sharpsl); return 0; -- GitLab From 9c6c2e5cc77119ce0dacb4f9feedb73ce0354421 Mon Sep 17 00:00:00 2001 From: Miquel Raynal Date: Tue, 19 May 2020 15:00:23 +0200 Subject: [PATCH 0934/1971] mtd: rawnand: socrates: Fix the probe error path nand_release() is supposed be called after MTD device registration. Here, only nand_scan() happened, so use nand_cleanup() instead. There is no real Fixes tag applying here as the use of nand_release() in this driver predates by far the introduction of nand_cleanup() in commit d44154f969a4 ("mtd: nand: Provide nand_cleanup() function to free NAND related resources") which makes this change possible. However, pointing this commit as the culprit for backporting purposes makes sense even if this commit is not introducing any bug. Fixes: d44154f969a4 ("mtd: nand: Provide nand_cleanup() function to free NAND related resources") Signed-off-by: Miquel Raynal Cc: stable@vger.kernel.org Link: https://lore.kernel.org/linux-mtd/20200519130035.1883-51-miquel.raynal@bootlin.com --- drivers/mtd/nand/raw/socrates_nand.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/mtd/nand/raw/socrates_nand.c b/drivers/mtd/nand/raw/socrates_nand.c index 20f40c0e812c8..7c94fc51a6113 100644 --- a/drivers/mtd/nand/raw/socrates_nand.c +++ b/drivers/mtd/nand/raw/socrates_nand.c @@ -169,7 +169,7 @@ static int socrates_nand_probe(struct platform_device *ofdev) if (!res) return res; - nand_release(nand_chip); + nand_cleanup(nand_chip); out: iounmap(host->io_base); -- GitLab From c121cb980c096229d400f08cb3908911900e281d Mon Sep 17 00:00:00 2001 From: Miquel Raynal Date: Tue, 19 May 2020 15:00:24 +0200 Subject: [PATCH 0935/1971] mtd: rawnand: socrates: Stop using nand_release() This helper is not very useful and very often people get confused: they use nand_release() instead of nand_cleanup(). Let's stop using nand_release() by calling mtd_device_unregister() and nand_cleanup() directly. Signed-off-by: Miquel Raynal Link: https://lore.kernel.org/linux-mtd/20200519130035.1883-52-miquel.raynal@bootlin.com --- drivers/mtd/nand/raw/socrates_nand.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/drivers/mtd/nand/raw/socrates_nand.c b/drivers/mtd/nand/raw/socrates_nand.c index 7c94fc51a6113..243b34cfbc1b2 100644 --- a/drivers/mtd/nand/raw/socrates_nand.c +++ b/drivers/mtd/nand/raw/socrates_nand.c @@ -182,8 +182,12 @@ out: static int socrates_nand_remove(struct platform_device *ofdev) { struct socrates_nand_host *host = dev_get_drvdata(&ofdev->dev); + struct nand_chip *chip = &host->nand_chip; + int ret; - nand_release(&host->nand_chip); + ret = mtd_device_unregister(nand_to_mtd(chip)); + WARN_ON(ret); + nand_cleanup(chip); iounmap(host->io_base); -- GitLab From 24acc3fa8b36f998b355bb7e337361f4e76160fb Mon Sep 17 00:00:00 2001 From: Miquel Raynal Date: Tue, 19 May 2020 15:00:25 +0200 Subject: [PATCH 0936/1971] mtd: rawnand: stm32_fmc2: Stop using nand_release() This helper is not very useful and very often people get confused: they use nand_release() instead of nand_cleanup(). Let's stop using nand_release() by calling mtd_device_unregister() and nand_cleanup() directly. Signed-off-by: Miquel Raynal Cc: Christophe Kerello Reviewed-by: Christophe Kerello Link: https://lore.kernel.org/linux-mtd/20200519130035.1883-53-miquel.raynal@bootlin.com --- drivers/mtd/nand/raw/stm32_fmc2_nand.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/drivers/mtd/nand/raw/stm32_fmc2_nand.c b/drivers/mtd/nand/raw/stm32_fmc2_nand.c index 8f02d5e9ba215..65c9d17b25a3c 100644 --- a/drivers/mtd/nand/raw/stm32_fmc2_nand.c +++ b/drivers/mtd/nand/raw/stm32_fmc2_nand.c @@ -1983,8 +1983,12 @@ static int stm32_fmc2_nfc_remove(struct platform_device *pdev) { struct stm32_fmc2_nfc *nfc = platform_get_drvdata(pdev); struct stm32_fmc2_nand *nand = &nfc->nand; + struct nand_chip *chip = &nand->chip; + int ret; - nand_release(&nand->chip); + ret = mtd_device_unregister(nand_to_mtd(chip)); + WARN_ON(ret); + nand_cleanup(chip); if (nfc->dma_ecc_ch) dma_release_channel(nfc->dma_ecc_ch); -- GitLab From 3d84515ffd8fb657e10fa5b1215e9f095fa7efca Mon Sep 17 00:00:00 2001 From: Miquel Raynal Date: Tue, 19 May 2020 15:00:26 +0200 Subject: [PATCH 0937/1971] mtd: rawnand: sunxi: Fix the probe error path nand_release() is supposed be called after MTD device registration. Here, only nand_scan() happened, so use nand_cleanup() instead. Fixes: 1fef62c1423b ("mtd: nand: add sunxi NAND flash controller support") Signed-off-by: Miquel Raynal Cc: stable@vger.kernel.org Link: https://lore.kernel.org/linux-mtd/20200519130035.1883-54-miquel.raynal@bootlin.com --- drivers/mtd/nand/raw/sunxi_nand.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/mtd/nand/raw/sunxi_nand.c b/drivers/mtd/nand/raw/sunxi_nand.c index 26d862213cacc..9f51fd20a52ea 100644 --- a/drivers/mtd/nand/raw/sunxi_nand.c +++ b/drivers/mtd/nand/raw/sunxi_nand.c @@ -2004,7 +2004,7 @@ static int sunxi_nand_chip_init(struct device *dev, struct sunxi_nfc *nfc, ret = mtd_device_register(mtd, NULL, 0); if (ret) { dev_err(dev, "failed to register mtd device: %d\n", ret); - nand_release(nand); + nand_cleanup(nand); return ret; } -- GitLab From 068d86ecd9d980b10532d8b141badaf5d8d6b7dc Mon Sep 17 00:00:00 2001 From: Miquel Raynal Date: Tue, 19 May 2020 15:00:27 +0200 Subject: [PATCH 0938/1971] mtd: rawnand: sunxi: Stop using nand_release() This helper is not very useful and very often people get confused: they use nand_release() instead of nand_cleanup(). Let's stop using nand_release() by calling mtd_device_unregister() and nand_cleanup() directly. Signed-off-by: Miquel Raynal Link: https://lore.kernel.org/linux-mtd/20200519130035.1883-55-miquel.raynal@bootlin.com --- drivers/mtd/nand/raw/sunxi_nand.c | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/drivers/mtd/nand/raw/sunxi_nand.c b/drivers/mtd/nand/raw/sunxi_nand.c index 9f51fd20a52ea..ffbc1651fadc4 100644 --- a/drivers/mtd/nand/raw/sunxi_nand.c +++ b/drivers/mtd/nand/raw/sunxi_nand.c @@ -2039,13 +2039,18 @@ static int sunxi_nand_chips_init(struct device *dev, struct sunxi_nfc *nfc) static void sunxi_nand_chips_cleanup(struct sunxi_nfc *nfc) { struct sunxi_nand_chip *sunxi_nand; + struct nand_chip *chip; + int ret; while (!list_empty(&nfc->chips)) { sunxi_nand = list_first_entry(&nfc->chips, struct sunxi_nand_chip, node); - nand_release(&sunxi_nand->nand); - sunxi_nand_ecc_cleanup(&sunxi_nand->nand.ecc); + chip = &sunxi_nand->nand; + ret = mtd_device_unregister(nand_to_mtd(chip)); + WARN_ON(ret); + nand_cleanup(chip); + sunxi_nand_ecc_cleanup(&chip->ecc); list_del(&sunxi_nand->node); } } -- GitLab From ab135c51bb815c57e7f297b9f78f487c70d34899 Mon Sep 17 00:00:00 2001 From: Miquel Raynal Date: Tue, 19 May 2020 15:00:28 +0200 Subject: [PATCH 0939/1971] mtd: rawnand: tango: Stop using nand_release() This helper is not very useful and very often people get confused: they use nand_release() instead of nand_cleanup(). Let's stop using nand_release() by calling mtd_device_unregister() and nand_cleanup() directly. Signed-off-by: Miquel Raynal Link: https://lore.kernel.org/linux-mtd/20200519130035.1883-56-miquel.raynal@bootlin.com --- drivers/mtd/nand/raw/tango_nand.c | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/drivers/mtd/nand/raw/tango_nand.c b/drivers/mtd/nand/raw/tango_nand.c index b92de603e6db4..246871e01027f 100644 --- a/drivers/mtd/nand/raw/tango_nand.c +++ b/drivers/mtd/nand/raw/tango_nand.c @@ -600,14 +600,19 @@ static int chip_init(struct device *dev, struct device_node *np) static int tango_nand_remove(struct platform_device *pdev) { - int cs; struct tango_nfc *nfc = platform_get_drvdata(pdev); + struct nand_chip *chip; + int cs, ret; dma_release_channel(nfc->chan); for (cs = 0; cs < MAX_CS; ++cs) { - if (nfc->chips[cs]) - nand_release(&nfc->chips[cs]->nand_chip); + if (nfc->chips[cs]) { + chip = &nfc->chips[cs]->nand_chip; + ret = mtd_device_unregister(nand_to_mtd(chip)); + WARN_ON(ret); + nand_cleanup(chip); + } } return 0; -- GitLab From 75e9a330a9bd48f97a55a08000236084fe3dae56 Mon Sep 17 00:00:00 2001 From: Miquel Raynal Date: Tue, 19 May 2020 15:00:29 +0200 Subject: [PATCH 0940/1971] mtd: rawnand: tmio: Fix the probe error path nand_release() is supposed be called after MTD device registration. Here, only nand_scan() happened, so use nand_cleanup() instead. There is no real Fixes tag applying here as the use of nand_release() in this driver predates by far the introduction of nand_cleanup() in commit d44154f969a4 ("mtd: nand: Provide nand_cleanup() function to free NAND related resources") which makes this change possible. However, pointing this commit as the culprit for backporting purposes makes sense even if this commit is not introducing any bug. Fixes: d44154f969a4 ("mtd: nand: Provide nand_cleanup() function to free NAND related resources") Signed-off-by: Miquel Raynal Cc: stable@vger.kernel.org Link: https://lore.kernel.org/linux-mtd/20200519130035.1883-57-miquel.raynal@bootlin.com --- drivers/mtd/nand/raw/tmio_nand.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/mtd/nand/raw/tmio_nand.c b/drivers/mtd/nand/raw/tmio_nand.c index db030f1701ee2..4e9a6d94f6e8b 100644 --- a/drivers/mtd/nand/raw/tmio_nand.c +++ b/drivers/mtd/nand/raw/tmio_nand.c @@ -448,7 +448,7 @@ static int tmio_probe(struct platform_device *dev) if (!retval) return retval; - nand_release(nand_chip); + nand_cleanup(nand_chip); err_irq: tmio_hw_stop(dev, tmio); -- GitLab From f3e169f44bdb0c61aba0b9156e5b381a869e579b Mon Sep 17 00:00:00 2001 From: Miquel Raynal Date: Tue, 19 May 2020 15:00:30 +0200 Subject: [PATCH 0941/1971] mtd: rawnand: tmio: Stop using nand_release() This helper is not very useful and very often people get confused: they use nand_release() instead of nand_cleanup(). Let's stop using nand_release() by calling mtd_device_unregister() and nand_cleanup() directly. Signed-off-by: Miquel Raynal Link: https://lore.kernel.org/linux-mtd/20200519130035.1883-58-miquel.raynal@bootlin.com --- drivers/mtd/nand/raw/tmio_nand.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/drivers/mtd/nand/raw/tmio_nand.c b/drivers/mtd/nand/raw/tmio_nand.c index 4e9a6d94f6e8b..843a8683b7371 100644 --- a/drivers/mtd/nand/raw/tmio_nand.c +++ b/drivers/mtd/nand/raw/tmio_nand.c @@ -458,8 +458,12 @@ err_irq: static int tmio_remove(struct platform_device *dev) { struct tmio_nand *tmio = platform_get_drvdata(dev); + struct nand_chip *chip = &tmio->chip; + int ret; - nand_release(&tmio->chip); + ret = mtd_device_unregister(nand_to_mtd(chip)); + WARN_ON(ret); + nand_cleanup(chip); tmio_hw_stop(dev, tmio); return 0; } -- GitLab From f6fc75978d882e9c8970f11082f9be7e5ba95e45 Mon Sep 17 00:00:00 2001 From: Miquel Raynal Date: Tue, 19 May 2020 15:00:31 +0200 Subject: [PATCH 0942/1971] mtd: rawnand: txx9ndfmc: Stop using nand_release() This helper is not very useful and very often people get confused: they use nand_release() instead of nand_cleanup(). Let's stop using nand_release() by calling mtd_device_unregister() and nand_cleanup() directly. Signed-off-by: Miquel Raynal Link: https://lore.kernel.org/linux-mtd/20200519130035.1883-59-miquel.raynal@bootlin.com --- drivers/mtd/nand/raw/txx9ndfmc.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/drivers/mtd/nand/raw/txx9ndfmc.c b/drivers/mtd/nand/raw/txx9ndfmc.c index 2642d5bb32418..47d966871445b 100644 --- a/drivers/mtd/nand/raw/txx9ndfmc.c +++ b/drivers/mtd/nand/raw/txx9ndfmc.c @@ -371,7 +371,7 @@ static int __init txx9ndfmc_probe(struct platform_device *dev) static int __exit txx9ndfmc_remove(struct platform_device *dev) { struct txx9ndfmc_drvdata *drvdata = platform_get_drvdata(dev); - int i; + int ret, i; if (!drvdata) return 0; @@ -385,7 +385,9 @@ static int __exit txx9ndfmc_remove(struct platform_device *dev) chip = mtd_to_nand(mtd); txx9_priv = nand_get_controller_data(chip); - nand_release(chip); + ret = mtd_device_unregister(nand_to_mtd(chip)); + WARN_ON(ret); + nand_cleanup(chip); kfree(txx9_priv->mtdname); kfree(txx9_priv); } -- GitLab From d9f2a1af817d5c9ee5a6adf70c5a217a583d3af0 Mon Sep 17 00:00:00 2001 From: Miquel Raynal Date: Tue, 19 May 2020 15:00:32 +0200 Subject: [PATCH 0943/1971] mtd: rawnand: vf610: Stop using nand_release() This helper is not very useful and very often people get confused: they use nand_release() instead of nand_cleanup(). Let's stop using nand_release() by calling mtd_device_unregister() and nand_cleanup() directly. Signed-off-by: Miquel Raynal Cc: Stefan Agner Link: https://lore.kernel.org/linux-mtd/20200519130035.1883-60-miquel.raynal@bootlin.com --- drivers/mtd/nand/raw/vf610_nfc.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/drivers/mtd/nand/raw/vf610_nfc.c b/drivers/mtd/nand/raw/vf610_nfc.c index bd9e16de78a29..7248c59011836 100644 --- a/drivers/mtd/nand/raw/vf610_nfc.c +++ b/drivers/mtd/nand/raw/vf610_nfc.c @@ -917,8 +917,12 @@ err_disable_clk: static int vf610_nfc_remove(struct platform_device *pdev) { struct vf610_nfc *nfc = platform_get_drvdata(pdev); + struct nand_chip *chip = &nfc->chip; + int ret; - nand_release(&nfc->chip); + ret = mtd_device_unregister(nand_to_mtd(chip)); + WARN_ON(ret); + nand_cleanup(chip); clk_disable_unprepare(nfc->clk); return 0; } -- GitLab From 34531be5e804a8e1abf314a6c3a19fe342e4a154 Mon Sep 17 00:00:00 2001 From: Miquel Raynal Date: Tue, 19 May 2020 15:00:33 +0200 Subject: [PATCH 0944/1971] mtd: rawnand: xway: Fix the probe error path nand_release() is supposed be called after MTD device registration. Here, only nand_scan() happened, so use nand_cleanup() instead. There is no real Fixes tag applying here as the use of nand_release() in this driver predates the introduction of nand_cleanup() in commit d44154f969a4 ("mtd: nand: Provide nand_cleanup() function to free NAND related resources") which makes this change possible. However, pointing this commit as the culprit for backporting purposes makes sense even if this commit is not introducing any bug. Fixes: d44154f969a4 ("mtd: nand: Provide nand_cleanup() function to free NAND related resources") Signed-off-by: Miquel Raynal Cc: stable@vger.kernel.org Link: https://lore.kernel.org/linux-mtd/20200519130035.1883-61-miquel.raynal@bootlin.com --- drivers/mtd/nand/raw/xway_nand.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/mtd/nand/raw/xway_nand.c b/drivers/mtd/nand/raw/xway_nand.c index 834f794816a97..018311dc8fe11 100644 --- a/drivers/mtd/nand/raw/xway_nand.c +++ b/drivers/mtd/nand/raw/xway_nand.c @@ -210,7 +210,7 @@ static int xway_nand_probe(struct platform_device *pdev) err = mtd_device_register(mtd, NULL, 0); if (err) - nand_release(&data->chip); + nand_cleanup(&data->chip); return err; } -- GitLab From 9fdd78f7bcda3e6a9f53c355529b3bf037c0e24f Mon Sep 17 00:00:00 2001 From: Miquel Raynal Date: Tue, 19 May 2020 15:00:34 +0200 Subject: [PATCH 0945/1971] mtd: rawnand: xway: Stop using nand_release() This helper is not very useful and very often people get confused: they use nand_release() instead of nand_cleanup(). Let's stop using nand_release() by calling mtd_device_unregister() and nand_cleanup() directly. Signed-off-by: Miquel Raynal Link: https://lore.kernel.org/linux-mtd/20200519130035.1883-62-miquel.raynal@bootlin.com --- drivers/mtd/nand/raw/xway_nand.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/drivers/mtd/nand/raw/xway_nand.c b/drivers/mtd/nand/raw/xway_nand.c index 018311dc8fe11..94bfba9943265 100644 --- a/drivers/mtd/nand/raw/xway_nand.c +++ b/drivers/mtd/nand/raw/xway_nand.c @@ -221,8 +221,12 @@ static int xway_nand_probe(struct platform_device *pdev) static int xway_nand_remove(struct platform_device *pdev) { struct xway_nand_data *data = platform_get_drvdata(pdev); + struct nand_chip *chip = &data->chip; + int ret; - nand_release(&data->chip); + ret = mtd_device_unregister(mtd); + WARN_ON(ret); + nand_cleanup(chip); return 0; } -- GitLab From 2e263011017cb22c0e7b65af791aae8cb5e1098a Mon Sep 17 00:00:00 2001 From: Boris Brezillon Date: Wed, 20 May 2020 01:24:53 +0200 Subject: [PATCH 0946/1971] mtd: rawnand: ingenic: Fix the RB gpio active-high property on qi, lb60 The rb-gpios semantics was undocumented and qi,lb60 (along with the ingenic driver) got it wrong. The active state encodes the NAND ready state, which is high level. Since there's no signal inverter on this board, it should be active-high. Let's fix that here for older DTs so we can re-use the generic nand_gpio_waitrdy() helper, and be consistent with what other drivers do. Suggested-by: Paul Cercueil Signed-off-by: Boris Brezillon Signed-off-by: Miquel Raynal Link: https://lore.kernel.org/linux-mtd/20200519232454.374081-3-boris.brezillon@collabora.com --- drivers/mtd/nand/raw/ingenic/ingenic_nand_drv.c | 14 +++++++++++++- 1 file changed, 13 insertions(+), 1 deletion(-) diff --git a/drivers/mtd/nand/raw/ingenic/ingenic_nand_drv.c b/drivers/mtd/nand/raw/ingenic/ingenic_nand_drv.c index 2af1b54d5d9da..221f480776e5f 100644 --- a/drivers/mtd/nand/raw/ingenic/ingenic_nand_drv.c +++ b/drivers/mtd/nand/raw/ingenic/ingenic_nand_drv.c @@ -184,7 +184,7 @@ static int ingenic_nand_dev_ready(struct nand_chip *chip) { struct ingenic_nand *nand = to_ingenic_nand(nand_to_mtd(chip)); - return !gpiod_get_value_cansleep(nand->busy_gpio); + return gpiod_get_value_cansleep(nand->busy_gpio); } static void ingenic_nand_ecc_hwctl(struct nand_chip *chip, int mode) @@ -343,6 +343,18 @@ static int ingenic_nand_init_chip(struct platform_device *pdev, nand->chip.legacy.dev_ready = ingenic_nand_dev_ready; } + /* + * The rb-gpios semantics was undocumented and qi,lb60 (along with + * the ingenic driver) got it wrong. The active state encodes the + * NAND ready state, which is high level. Since there's no signal + * inverter on this board, it should be active-high. Let's fix that + * here for older DTs so we can re-use the generic nand_gpio_waitrdy() + * helper, and be consistent with what other drivers do. + */ + if (of_machine_is_compatible("qi,lb60") && + gpiod_is_active_low(nand->busy_gpio)) + gpiod_toggle_active_low(nand->busy_gpio); + nand->wp_gpio = devm_gpiod_get_optional(dev, "wp", GPIOD_OUT_LOW); if (IS_ERR(nand->wp_gpio)) { -- GitLab From 5d55714fa5ac1ac93a9224821f289fbf46532d30 Mon Sep 17 00:00:00 2001 From: Boris Brezillon Date: Wed, 20 May 2020 01:24:54 +0200 Subject: [PATCH 0947/1971] mtd: rawnand: ingenic: Convert the driver to exec_op() Let's convert the driver to exec_op() to have one less driver relying on the legacy interface. Signed-off-by: Boris Brezillon Tested-by: Paul Cercueil Signed-off-by: Miquel Raynal Link: https://lore.kernel.org/linux-mtd/20200519232454.374081-4-boris.brezillon@collabora.com --- .../mtd/nand/raw/ingenic/ingenic_nand_drv.c | 139 +++++++++++------- 1 file changed, 83 insertions(+), 56 deletions(-) diff --git a/drivers/mtd/nand/raw/ingenic/ingenic_nand_drv.c b/drivers/mtd/nand/raw/ingenic/ingenic_nand_drv.c index 221f480776e5f..69423bb29adbd 100644 --- a/drivers/mtd/nand/raw/ingenic/ingenic_nand_drv.c +++ b/drivers/mtd/nand/raw/ingenic/ingenic_nand_drv.c @@ -27,9 +27,6 @@ #define DRV_NAME "ingenic-nand" -/* Command delay when there is no R/B pin. */ -#define RB_DELAY_US 100 - struct jz_soc_info { unsigned long data_offset; unsigned long addr_offset; @@ -49,7 +46,6 @@ struct ingenic_nfc { struct nand_controller controller; unsigned int num_banks; struct list_head chips; - int selected; struct ingenic_nand_cs cs[]; }; @@ -142,51 +138,6 @@ static const struct mtd_ooblayout_ops jz4725b_ooblayout_ops = { .free = jz4725b_ooblayout_free, }; -static void ingenic_nand_select_chip(struct nand_chip *chip, int chipnr) -{ - struct ingenic_nand *nand = to_ingenic_nand(nand_to_mtd(chip)); - struct ingenic_nfc *nfc = to_ingenic_nfc(nand->chip.controller); - struct ingenic_nand_cs *cs; - - /* Ensure the currently selected chip is deasserted. */ - if (chipnr == -1 && nfc->selected >= 0) { - cs = &nfc->cs[nfc->selected]; - jz4780_nemc_assert(nfc->dev, cs->bank, false); - } - - nfc->selected = chipnr; -} - -static void ingenic_nand_cmd_ctrl(struct nand_chip *chip, int cmd, - unsigned int ctrl) -{ - struct ingenic_nand *nand = to_ingenic_nand(nand_to_mtd(chip)); - struct ingenic_nfc *nfc = to_ingenic_nfc(nand->chip.controller); - struct ingenic_nand_cs *cs; - - if (WARN_ON(nfc->selected < 0)) - return; - - cs = &nfc->cs[nfc->selected]; - - jz4780_nemc_assert(nfc->dev, cs->bank, ctrl & NAND_NCE); - - if (cmd == NAND_CMD_NONE) - return; - - if (ctrl & NAND_ALE) - writeb(cmd, cs->base + nfc->soc_info->addr_offset); - else if (ctrl & NAND_CLE) - writeb(cmd, cs->base + nfc->soc_info->cmd_offset); -} - -static int ingenic_nand_dev_ready(struct nand_chip *chip) -{ - struct ingenic_nand *nand = to_ingenic_nand(nand_to_mtd(chip)); - - return gpiod_get_value_cansleep(nand->busy_gpio); -} - static void ingenic_nand_ecc_hwctl(struct nand_chip *chip, int mode) { struct ingenic_nand *nand = to_ingenic_nand(nand_to_mtd(chip)); @@ -298,8 +249,91 @@ static int ingenic_nand_attach_chip(struct nand_chip *chip) return 0; } +static int ingenic_nand_exec_instr(struct nand_chip *chip, + struct ingenic_nand_cs *cs, + const struct nand_op_instr *instr) +{ + struct ingenic_nand *nand = to_ingenic_nand(nand_to_mtd(chip)); + struct ingenic_nfc *nfc = to_ingenic_nfc(chip->controller); + unsigned int i; + + switch (instr->type) { + case NAND_OP_CMD_INSTR: + writeb(instr->ctx.cmd.opcode, + cs->base + nfc->soc_info->cmd_offset); + return 0; + case NAND_OP_ADDR_INSTR: + for (i = 0; i < instr->ctx.addr.naddrs; i++) + writeb(instr->ctx.addr.addrs[i], + cs->base + nfc->soc_info->addr_offset); + return 0; + case NAND_OP_DATA_IN_INSTR: + if (instr->ctx.data.force_8bit || + !(chip->options & NAND_BUSWIDTH_16)) + ioread8_rep(cs->base + nfc->soc_info->data_offset, + instr->ctx.data.buf.in, + instr->ctx.data.len); + else + ioread16_rep(cs->base + nfc->soc_info->data_offset, + instr->ctx.data.buf.in, + instr->ctx.data.len); + return 0; + case NAND_OP_DATA_OUT_INSTR: + if (instr->ctx.data.force_8bit || + !(chip->options & NAND_BUSWIDTH_16)) + iowrite8_rep(cs->base + nfc->soc_info->data_offset, + instr->ctx.data.buf.out, + instr->ctx.data.len); + else + iowrite16_rep(cs->base + nfc->soc_info->data_offset, + instr->ctx.data.buf.out, + instr->ctx.data.len); + return 0; + case NAND_OP_WAITRDY_INSTR: + if (!nand->busy_gpio) + return nand_soft_waitrdy(chip, + instr->ctx.waitrdy.timeout_ms); + + return nand_gpio_waitrdy(chip, nand->busy_gpio, + instr->ctx.waitrdy.timeout_ms); + default: + break; + } + + return -EINVAL; +} + +static int ingenic_nand_exec_op(struct nand_chip *chip, + const struct nand_operation *op, + bool check_only) +{ + struct ingenic_nand *nand = to_ingenic_nand(nand_to_mtd(chip)); + struct ingenic_nfc *nfc = to_ingenic_nfc(nand->chip.controller); + struct ingenic_nand_cs *cs; + unsigned int i; + int ret = 0; + + if (check_only) + return 0; + + cs = &nfc->cs[op->cs]; + jz4780_nemc_assert(nfc->dev, cs->bank, true); + for (i = 0; i < op->ninstrs; i++) { + ret = ingenic_nand_exec_instr(chip, cs, &op->instrs[i]); + if (ret) + break; + + if (op->instrs[i].delay_ns) + ndelay(op->instrs[i].delay_ns); + } + jz4780_nemc_assert(nfc->dev, cs->bank, false); + + return ret; +} + static const struct nand_controller_ops ingenic_nand_controller_ops = { .attach_chip = ingenic_nand_attach_chip, + .exec_op = ingenic_nand_exec_op, }; static int ingenic_nand_init_chip(struct platform_device *pdev, @@ -339,8 +373,6 @@ static int ingenic_nand_init_chip(struct platform_device *pdev, ret = PTR_ERR(nand->busy_gpio); dev_err(dev, "failed to request busy GPIO: %d\n", ret); return ret; - } else if (nand->busy_gpio) { - nand->chip.legacy.dev_ready = ingenic_nand_dev_ready; } /* @@ -371,12 +403,7 @@ static int ingenic_nand_init_chip(struct platform_device *pdev, return -ENOMEM; mtd->dev.parent = dev; - chip->legacy.IO_ADDR_R = cs->base + nfc->soc_info->data_offset; - chip->legacy.IO_ADDR_W = cs->base + nfc->soc_info->data_offset; - chip->legacy.chip_delay = RB_DELAY_US; chip->options = NAND_NO_SUBPAGE_WRITE; - chip->legacy.select_chip = ingenic_nand_select_chip; - chip->legacy.cmd_ctrl = ingenic_nand_cmd_ctrl; chip->ecc.mode = NAND_ECC_HW; chip->controller = &nfc->controller; nand_set_flash_node(chip, np); -- GitLab From 550e68ea36a6671a96576c0531685ce6e6c0d19d Mon Sep 17 00:00:00 2001 From: Dinghao Liu Date: Fri, 22 May 2020 17:51:39 +0800 Subject: [PATCH 0948/1971] mtd: rawnand: gpmi: Fix runtime PM imbalance on error pm_runtime_get_sync() increments the runtime PM usage counter even when it returns an error code. Thus a pairing decrement is needed on the error handling path to keep the counter balanced. Signed-off-by: Dinghao Liu Signed-off-by: Miquel Raynal Link: https://lore.kernel.org/linux-mtd/20200522095139.19653-1-dinghao.liu@zju.edu.cn --- drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c b/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c index 9a4a1d30669de..630de1a90eb4c 100644 --- a/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c +++ b/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c @@ -540,8 +540,10 @@ static int bch_set_geometry(struct gpmi_nand_data *this) return ret; ret = pm_runtime_get_sync(this->dev); - if (ret < 0) + if (ret < 0) { + pm_runtime_put_autosuspend(this->dev); return ret; + } /* * Due to erratum #2847 of the MX23, the BCH cannot be soft reset on this -- GitLab From 8e935b92d269826f1e4bcf31d3f688dbcdf71219 Mon Sep 17 00:00:00 2001 From: Dinghao Liu Date: Fri, 22 May 2020 18:17:13 +0800 Subject: [PATCH 0949/1971] mtd: rawnand: gpmi: Fix runtime PM imbalance in gpmi_nand_probe There is no reason that the failure of __gpmi_enable_clk() could lead to PM usage counter decrement. Signed-off-by: Dinghao Liu Signed-off-by: Miquel Raynal Link: https://lore.kernel.org/linux-mtd/20200522101713.24350-1-dinghao.liu@zju.edu.cn --- drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c b/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c index 630de1a90eb4c..c2d448812b85f 100644 --- a/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c +++ b/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c @@ -2663,7 +2663,7 @@ static int gpmi_nand_probe(struct platform_device *pdev) ret = __gpmi_enable_clk(this, true); if (ret) - goto exit_nfc_init; + goto exit_acquire_resources; pm_runtime_set_autosuspend_delay(&pdev->dev, 500); pm_runtime_use_autosuspend(&pdev->dev); -- GitLab From 37f7212148cf1d796135cdf8d0c7fee13067674b Mon Sep 17 00:00:00 2001 From: Dinghao Liu Date: Fri, 22 May 2020 18:40:06 +0800 Subject: [PATCH 0950/1971] mtd: rawnand: omap_elm: Fix runtime PM imbalance on error pm_runtime_get_sync() increments the runtime PM usage counter even when it returns an error code. Thus a pairing decrement is needed on the error handling path to keep the counter balanced. Signed-off-by: Dinghao Liu Signed-off-by: Miquel Raynal Link: https://lore.kernel.org/linux-mtd/20200522104008.28340-1-dinghao.liu@zju.edu.cn --- drivers/mtd/nand/raw/omap_elm.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/mtd/nand/raw/omap_elm.c b/drivers/mtd/nand/raw/omap_elm.c index 3fa0e2cbbe537..078b1022ac2a1 100644 --- a/drivers/mtd/nand/raw/omap_elm.c +++ b/drivers/mtd/nand/raw/omap_elm.c @@ -411,6 +411,7 @@ static int elm_probe(struct platform_device *pdev) pm_runtime_enable(&pdev->dev); if (pm_runtime_get_sync(&pdev->dev) < 0) { ret = -EINVAL; + pm_runtime_put_sync(&pdev->dev); pm_runtime_disable(&pdev->dev); dev_err(&pdev->dev, "can't enable clock\n"); return ret; -- GitLab From e5e5631cc88987a6f3cd8304660bd9190da95916 Mon Sep 17 00:00:00 2001 From: Miquel Raynal Date: Fri, 8 May 2020 19:18:05 +0200 Subject: [PATCH 0951/1971] mtd: rawnand: gpmi: Use nand_extract_bits() Drop the use of gpmi_copy_bits() in favor of the NAND helper nand_extract_bits(). Signed-off-by: Miquel Raynal Link: https://lore.kernel.org/linux-mtd/20200508171805.8627-1-miquel.raynal@bootlin.com --- drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c | 174 ++------------------- 1 file changed, 10 insertions(+), 164 deletions(-) diff --git a/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c b/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c index c2d448812b85f..061a8ddda2756 100644 --- a/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c +++ b/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c @@ -836,158 +836,6 @@ map_fail: return false; } -/** - * gpmi_copy_bits - copy bits from one memory region to another - * @dst: destination buffer - * @dst_bit_off: bit offset we're starting to write at - * @src: source buffer - * @src_bit_off: bit offset we're starting to read from - * @nbits: number of bits to copy - * - * This functions copies bits from one memory region to another, and is used by - * the GPMI driver to copy ECC sections which are not guaranteed to be byte - * aligned. - * - * src and dst should not overlap. - * - */ -static void gpmi_copy_bits(u8 *dst, size_t dst_bit_off, const u8 *src, - size_t src_bit_off, size_t nbits) -{ - size_t i; - size_t nbytes; - u32 src_buffer = 0; - size_t bits_in_src_buffer = 0; - - if (!nbits) - return; - - /* - * Move src and dst pointers to the closest byte pointer and store bit - * offsets within a byte. - */ - src += src_bit_off / 8; - src_bit_off %= 8; - - dst += dst_bit_off / 8; - dst_bit_off %= 8; - - /* - * Initialize the src_buffer value with bits available in the first - * byte of data so that we end up with a byte aligned src pointer. - */ - if (src_bit_off) { - src_buffer = src[0] >> src_bit_off; - if (nbits >= (8 - src_bit_off)) { - bits_in_src_buffer += 8 - src_bit_off; - } else { - src_buffer &= GENMASK(nbits - 1, 0); - bits_in_src_buffer += nbits; - } - nbits -= bits_in_src_buffer; - src++; - } - - /* Calculate the number of bytes that can be copied from src to dst. */ - nbytes = nbits / 8; - - /* Try to align dst to a byte boundary. */ - if (dst_bit_off) { - if (bits_in_src_buffer < (8 - dst_bit_off) && nbytes) { - src_buffer |= src[0] << bits_in_src_buffer; - bits_in_src_buffer += 8; - src++; - nbytes--; - } - - if (bits_in_src_buffer >= (8 - dst_bit_off)) { - dst[0] &= GENMASK(dst_bit_off - 1, 0); - dst[0] |= src_buffer << dst_bit_off; - src_buffer >>= (8 - dst_bit_off); - bits_in_src_buffer -= (8 - dst_bit_off); - dst_bit_off = 0; - dst++; - if (bits_in_src_buffer > 7) { - bits_in_src_buffer -= 8; - dst[0] = src_buffer; - dst++; - src_buffer >>= 8; - } - } - } - - if (!bits_in_src_buffer && !dst_bit_off) { - /* - * Both src and dst pointers are byte aligned, thus we can - * just use the optimized memcpy function. - */ - if (nbytes) - memcpy(dst, src, nbytes); - } else { - /* - * src buffer is not byte aligned, hence we have to copy each - * src byte to the src_buffer variable before extracting a byte - * to store in dst. - */ - for (i = 0; i < nbytes; i++) { - src_buffer |= src[i] << bits_in_src_buffer; - dst[i] = src_buffer; - src_buffer >>= 8; - } - } - /* Update dst and src pointers */ - dst += nbytes; - src += nbytes; - - /* - * nbits is the number of remaining bits. It should not exceed 8 as - * we've already copied as much bytes as possible. - */ - nbits %= 8; - - /* - * If there's no more bits to copy to the destination and src buffer - * was already byte aligned, then we're done. - */ - if (!nbits && !bits_in_src_buffer) - return; - - /* Copy the remaining bits to src_buffer */ - if (nbits) - src_buffer |= (*src & GENMASK(nbits - 1, 0)) << - bits_in_src_buffer; - bits_in_src_buffer += nbits; - - /* - * In case there were not enough bits to get a byte aligned dst buffer - * prepare the src_buffer variable to match the dst organization (shift - * src_buffer by dst_bit_off and retrieve the least significant bits - * from dst). - */ - if (dst_bit_off) - src_buffer = (src_buffer << dst_bit_off) | - (*dst & GENMASK(dst_bit_off - 1, 0)); - bits_in_src_buffer += dst_bit_off; - - /* - * Keep most significant bits from dst if we end up with an unaligned - * number of bits. - */ - nbytes = bits_in_src_buffer / 8; - if (bits_in_src_buffer % 8) { - src_buffer |= (dst[nbytes] & - GENMASK(7, bits_in_src_buffer % 8)) << - (nbytes * 8); - nbytes++; - } - - /* Copy the remaining bytes to dst */ - for (i = 0; i < nbytes; i++) { - dst[i] = src_buffer; - src_buffer >>= 8; - } -} - /* add our owner bbt descriptor */ static uint8_t scan_ff_pattern[] = { 0xff }; static struct nand_bbt_descr gpmi_bbt_descr = { @@ -1715,7 +1563,7 @@ static int gpmi_ecc_write_oob(struct nand_chip *chip, int page) * inline (interleaved with payload DATA), and do not align data chunk on * byte boundaries. * We thus need to take care moving the payload data and ECC bits stored in the - * page into the provided buffers, which is why we're using gpmi_copy_bits. + * page into the provided buffers, which is why we're using nand_extract_bits(). * * See set_geometry_by_ecc_info inline comments to have a full description * of the layout used by the GPMI controller. @@ -1764,9 +1612,8 @@ static int gpmi_ecc_read_page_raw(struct nand_chip *chip, uint8_t *buf, /* Extract interleaved payload data and ECC bits */ for (step = 0; step < nfc_geo->ecc_chunk_count; step++) { if (buf) - gpmi_copy_bits(buf, step * eccsize * 8, - tmp_buf, src_bit_off, - eccsize * 8); + nand_extract_bits(buf, step * eccsize, tmp_buf, + src_bit_off, eccsize * 8); src_bit_off += eccsize * 8; /* Align last ECC block to align a byte boundary */ @@ -1775,9 +1622,8 @@ static int gpmi_ecc_read_page_raw(struct nand_chip *chip, uint8_t *buf, eccbits += 8 - ((oob_bit_off + eccbits) % 8); if (oob_required) - gpmi_copy_bits(oob, oob_bit_off, - tmp_buf, src_bit_off, - eccbits); + nand_extract_bits(oob, oob_bit_off, tmp_buf, + src_bit_off, eccbits); src_bit_off += eccbits; oob_bit_off += eccbits; @@ -1802,7 +1648,7 @@ static int gpmi_ecc_read_page_raw(struct nand_chip *chip, uint8_t *buf, * inline (interleaved with payload DATA), and do not align data chunk on * byte boundaries. * We thus need to take care moving the OOB area at the right place in the - * final page, which is why we're using gpmi_copy_bits. + * final page, which is why we're using nand_extract_bits(). * * See set_geometry_by_ecc_info inline comments to have a full description * of the layout used by the GPMI controller. @@ -1841,8 +1687,8 @@ static int gpmi_ecc_write_page_raw(struct nand_chip *chip, const uint8_t *buf, /* Interleave payload data and ECC bits */ for (step = 0; step < nfc_geo->ecc_chunk_count; step++) { if (buf) - gpmi_copy_bits(tmp_buf, dst_bit_off, - buf, step * eccsize * 8, eccsize * 8); + nand_extract_bits(tmp_buf, dst_bit_off, buf, + step * eccsize * 8, eccsize * 8); dst_bit_off += eccsize * 8; /* Align last ECC block to align a byte boundary */ @@ -1851,8 +1697,8 @@ static int gpmi_ecc_write_page_raw(struct nand_chip *chip, const uint8_t *buf, eccbits += 8 - ((oob_bit_off + eccbits) % 8); if (oob_required) - gpmi_copy_bits(tmp_buf, dst_bit_off, - oob, oob_bit_off, eccbits); + nand_extract_bits(tmp_buf, dst_bit_off, oob, + oob_bit_off, eccbits); dst_bit_off += eccbits; oob_bit_off += eccbits; -- GitLab From 4fd639092b17d4252368b6009573339aeab5c7bd Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C3=81lvaro=20Fern=C3=A1ndez=20Rojas?= Date: Fri, 22 May 2020 14:15:20 +0200 Subject: [PATCH 0952/1971] mtd: rawnand: brcmnand: rename v4 registers MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit These registers are also used on v3.3. Signed-off-by: Álvaro Fernández Rojas Reviewed-by: Miquel Raynal Acked-by: Florian Fainelli Signed-off-by: Miquel Raynal Link: https://lore.kernel.org/linux-mtd/20200522121524.4161539-2-noltari@gmail.com --- drivers/mtd/nand/raw/brcmnand/brcmnand.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/mtd/nand/raw/brcmnand/brcmnand.c b/drivers/mtd/nand/raw/brcmnand/brcmnand.c index 132e1da2a3891..0abd5d5fd54df 100644 --- a/drivers/mtd/nand/raw/brcmnand/brcmnand.c +++ b/drivers/mtd/nand/raw/brcmnand/brcmnand.c @@ -337,8 +337,8 @@ enum brcmnand_reg { BRCMNAND_FC_BASE, }; -/* BRCMNAND v4.0 */ -static const u16 brcmnand_regs_v40[] = { +/* BRCMNAND v3.3-v4.0 */ +static const u16 brcmnand_regs_v33[] = { [BRCMNAND_CMD_START] = 0x04, [BRCMNAND_CMD_EXT_ADDRESS] = 0x08, [BRCMNAND_CMD_ADDRESS] = 0x0c, @@ -590,8 +590,8 @@ static int brcmnand_revision_init(struct brcmnand_controller *ctrl) ctrl->reg_offsets = brcmnand_regs_v60; else if (ctrl->nand_version >= 0x0500) ctrl->reg_offsets = brcmnand_regs_v50; - else if (ctrl->nand_version >= 0x0400) - ctrl->reg_offsets = brcmnand_regs_v40; + else if (ctrl->nand_version >= 0x0303) + ctrl->reg_offsets = brcmnand_regs_v33; /* Chip-select stride */ if (ctrl->nand_version >= 0x0701) -- GitLab From 3d3fb3c5be9ce07fa85d8f67fb3922e4613b955b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C3=81lvaro=20Fern=C3=A1ndez=20Rojas?= Date: Fri, 22 May 2020 14:15:21 +0200 Subject: [PATCH 0953/1971] mtd: rawnand: brcmnand: fix CS0 layout MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Only v3.3-v5.0 have a different CS0 layout. Controllers before v3.3 use the same layout for every CS. Fixes: 27c5b17cd1b1 ("mtd: nand: add NAND driver "library" for Broadcom STB NAND controller") Signed-off-by: Álvaro Fernández Rojas Acked-by: Florian Fainelli Signed-off-by: Miquel Raynal Link: https://lore.kernel.org/linux-mtd/20200522121524.4161539-3-noltari@gmail.com --- drivers/mtd/nand/raw/brcmnand/brcmnand.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/drivers/mtd/nand/raw/brcmnand/brcmnand.c b/drivers/mtd/nand/raw/brcmnand/brcmnand.c index 0abd5d5fd54df..0a2a761950474 100644 --- a/drivers/mtd/nand/raw/brcmnand/brcmnand.c +++ b/drivers/mtd/nand/raw/brcmnand/brcmnand.c @@ -605,8 +605,9 @@ static int brcmnand_revision_init(struct brcmnand_controller *ctrl) } else { ctrl->cs_offsets = brcmnand_cs_offsets; - /* v5.0 and earlier has a different CS0 offset layout */ - if (ctrl->nand_version <= 0x0500) + /* v3.3-5.0 have a different CS0 offset layout */ + if (ctrl->nand_version >= 0x0303 && + ctrl->nand_version <= 0x0500) ctrl->cs0_offsets = brcmnand_cs_offsets_cs0; } -- GitLab From eeeac9cbc4ca5b8c245972f3a765d1cb5b7ef038 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C3=81lvaro=20Fern=C3=A1ndez=20Rojas?= Date: Fri, 22 May 2020 14:15:22 +0200 Subject: [PATCH 0954/1971] mtd: rawnand: brcmnand: rename page sizes MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Current pages sizes apply to controllers after v3.4 Signed-off-by: Álvaro Fernández Rojas Acked-by: Florian Fainelli Signed-off-by: Miquel Raynal Link: https://lore.kernel.org/linux-mtd/20200522121524.4161539-4-noltari@gmail.com --- drivers/mtd/nand/raw/brcmnand/brcmnand.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/mtd/nand/raw/brcmnand/brcmnand.c b/drivers/mtd/nand/raw/brcmnand/brcmnand.c index 0a2a761950474..7fa247ed3ebfe 100644 --- a/drivers/mtd/nand/raw/brcmnand/brcmnand.c +++ b/drivers/mtd/nand/raw/brcmnand/brcmnand.c @@ -570,7 +570,7 @@ static int brcmnand_revision_init(struct brcmnand_controller *ctrl) { static const unsigned int block_sizes_v6[] = { 8, 16, 128, 256, 512, 1024, 2048, 0 }; static const unsigned int block_sizes_v4[] = { 16, 128, 8, 512, 256, 1024, 2048, 0 }; - static const unsigned int page_sizes[] = { 512, 2048, 4096, 8192, 0 }; + static const unsigned int page_sizes_v3_4[] = { 512, 2048, 4096, 8192, 0 }; ctrl->nand_version = nand_readreg(ctrl, 0) & 0xffff; @@ -617,7 +617,7 @@ static int brcmnand_revision_init(struct brcmnand_controller *ctrl) ctrl->max_page_size = 16 * 1024; ctrl->max_block_size = 2 * 1024 * 1024; } else { - ctrl->page_sizes = page_sizes; + ctrl->page_sizes = page_sizes_v3_4; if (ctrl->nand_version >= 0x0600) ctrl->block_sizes = block_sizes_v6; else -- GitLab From b1713b5b2c026dfac1ea433477ea4e383809333f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C3=81lvaro=20Fern=C3=A1ndez=20Rojas?= Date: Fri, 22 May 2020 14:15:23 +0200 Subject: [PATCH 0955/1971] dt-bindings: mtd: brcmnand: add v2.1 and v2.2 support MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Add brcm,brcmnand-v2.1 and brcm,brcmnand-v2.2 as possible compatible strings to support brcmnand controllers v2.1 and v2.2. Signed-off-by: Álvaro Fernández Rojas Acked-by: Florian Fainelli Signed-off-by: Miquel Raynal Link: https://lore.kernel.org/linux-mtd/20200522121524.4161539-5-noltari@gmail.com --- Documentation/devicetree/bindings/mtd/brcm,brcmnand.txt | 2 ++ 1 file changed, 2 insertions(+) diff --git a/Documentation/devicetree/bindings/mtd/brcm,brcmnand.txt b/Documentation/devicetree/bindings/mtd/brcm,brcmnand.txt index 05651a654c669..44335a4f8bfb5 100644 --- a/Documentation/devicetree/bindings/mtd/brcm,brcmnand.txt +++ b/Documentation/devicetree/bindings/mtd/brcm,brcmnand.txt @@ -20,6 +20,8 @@ Required properties: "brcm,brcmnand" and an appropriate version compatibility string, like "brcm,brcmnand-v7.0" Possible values: + brcm,brcmnand-v2.1 + brcm,brcmnand-v2.2 brcm,brcmnand-v4.0 brcm,brcmnand-v5.0 brcm,brcmnand-v6.0 -- GitLab From 7e7c7df5d50fe06469be106967fc5b5d62be8868 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C3=81lvaro=20Fern=C3=A1ndez=20Rojas?= Date: Fri, 22 May 2020 14:15:24 +0200 Subject: [PATCH 0956/1971] mtd: rawnand: brcmnand: support v2.1-v2.2 controllers MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit v2.1: tested on Netgear DGND3700v1 (BCM6368) v2.2: tested on Netgear DGND3700v2 (BCM6362) Signed-off-by: Álvaro Fernández Rojas Acked-by: Florian Fainelli Signed-off-by: Miquel Raynal Link: https://lore.kernel.org/linux-mtd/20200522121524.4161539-6-noltari@gmail.com --- drivers/mtd/nand/raw/brcmnand/brcmnand.c | 85 +++++++++++++++++++++--- 1 file changed, 76 insertions(+), 9 deletions(-) diff --git a/drivers/mtd/nand/raw/brcmnand/brcmnand.c b/drivers/mtd/nand/raw/brcmnand/brcmnand.c index 7fa247ed3ebfe..4a0a7053fb7ab 100644 --- a/drivers/mtd/nand/raw/brcmnand/brcmnand.c +++ b/drivers/mtd/nand/raw/brcmnand/brcmnand.c @@ -263,6 +263,7 @@ struct brcmnand_controller { const unsigned int *block_sizes; unsigned int max_page_size; const unsigned int *page_sizes; + unsigned int page_size_shift; unsigned int max_oob; u32 features; @@ -337,6 +338,36 @@ enum brcmnand_reg { BRCMNAND_FC_BASE, }; +/* BRCMNAND v2.1-v2.2 */ +static const u16 brcmnand_regs_v21[] = { + [BRCMNAND_CMD_START] = 0x04, + [BRCMNAND_CMD_EXT_ADDRESS] = 0x08, + [BRCMNAND_CMD_ADDRESS] = 0x0c, + [BRCMNAND_INTFC_STATUS] = 0x5c, + [BRCMNAND_CS_SELECT] = 0x14, + [BRCMNAND_CS_XOR] = 0x18, + [BRCMNAND_LL_OP] = 0, + [BRCMNAND_CS0_BASE] = 0x40, + [BRCMNAND_CS1_BASE] = 0, + [BRCMNAND_CORR_THRESHOLD] = 0, + [BRCMNAND_CORR_THRESHOLD_EXT] = 0, + [BRCMNAND_UNCORR_COUNT] = 0, + [BRCMNAND_CORR_COUNT] = 0, + [BRCMNAND_CORR_EXT_ADDR] = 0x60, + [BRCMNAND_CORR_ADDR] = 0x64, + [BRCMNAND_UNCORR_EXT_ADDR] = 0x68, + [BRCMNAND_UNCORR_ADDR] = 0x6c, + [BRCMNAND_SEMAPHORE] = 0x50, + [BRCMNAND_ID] = 0x54, + [BRCMNAND_ID_EXT] = 0, + [BRCMNAND_LL_RDATA] = 0, + [BRCMNAND_OOB_READ_BASE] = 0x20, + [BRCMNAND_OOB_READ_10_BASE] = 0, + [BRCMNAND_OOB_WRITE_BASE] = 0x30, + [BRCMNAND_OOB_WRITE_10_BASE] = 0, + [BRCMNAND_FC_BASE] = 0x200, +}; + /* BRCMNAND v3.3-v4.0 */ static const u16 brcmnand_regs_v33[] = { [BRCMNAND_CMD_START] = 0x04, @@ -535,6 +566,9 @@ enum { CFG_BUS_WIDTH = BIT(CFG_BUS_WIDTH_SHIFT), CFG_DEVICE_SIZE_SHIFT = 24, + /* Only for v2.1 */ + CFG_PAGE_SIZE_SHIFT_v2_1 = 30, + /* Only for pre-v7.1 (with no CFG_EXT register) */ CFG_PAGE_SIZE_SHIFT = 20, CFG_BLK_SIZE_SHIFT = 28, @@ -570,12 +604,16 @@ static int brcmnand_revision_init(struct brcmnand_controller *ctrl) { static const unsigned int block_sizes_v6[] = { 8, 16, 128, 256, 512, 1024, 2048, 0 }; static const unsigned int block_sizes_v4[] = { 16, 128, 8, 512, 256, 1024, 2048, 0 }; + static const unsigned int block_sizes_v2_2[] = { 16, 128, 8, 512, 256, 0 }; + static const unsigned int block_sizes_v2_1[] = { 16, 128, 8, 512, 0 }; static const unsigned int page_sizes_v3_4[] = { 512, 2048, 4096, 8192, 0 }; + static const unsigned int page_sizes_v2_2[] = { 512, 2048, 4096, 0 }; + static const unsigned int page_sizes_v2_1[] = { 512, 2048, 0 }; ctrl->nand_version = nand_readreg(ctrl, 0) & 0xffff; - /* Only support v4.0+? */ - if (ctrl->nand_version < 0x0400) { + /* Only support v2.1+ */ + if (ctrl->nand_version < 0x0201) { dev_err(ctrl->dev, "version %#x not supported\n", ctrl->nand_version); return -ENODEV; @@ -592,6 +630,8 @@ static int brcmnand_revision_init(struct brcmnand_controller *ctrl) ctrl->reg_offsets = brcmnand_regs_v50; else if (ctrl->nand_version >= 0x0303) ctrl->reg_offsets = brcmnand_regs_v33; + else if (ctrl->nand_version >= 0x0201) + ctrl->reg_offsets = brcmnand_regs_v21; /* Chip-select stride */ if (ctrl->nand_version >= 0x0701) @@ -617,14 +657,32 @@ static int brcmnand_revision_init(struct brcmnand_controller *ctrl) ctrl->max_page_size = 16 * 1024; ctrl->max_block_size = 2 * 1024 * 1024; } else { - ctrl->page_sizes = page_sizes_v3_4; + if (ctrl->nand_version >= 0x0304) + ctrl->page_sizes = page_sizes_v3_4; + else if (ctrl->nand_version >= 0x0202) + ctrl->page_sizes = page_sizes_v2_2; + else + ctrl->page_sizes = page_sizes_v2_1; + + if (ctrl->nand_version >= 0x0202) + ctrl->page_size_shift = CFG_PAGE_SIZE_SHIFT; + else + ctrl->page_size_shift = CFG_PAGE_SIZE_SHIFT_v2_1; + if (ctrl->nand_version >= 0x0600) ctrl->block_sizes = block_sizes_v6; - else + else if (ctrl->nand_version >= 0x0400) ctrl->block_sizes = block_sizes_v4; + else if (ctrl->nand_version >= 0x0202) + ctrl->block_sizes = block_sizes_v2_2; + else + ctrl->block_sizes = block_sizes_v2_1; if (ctrl->nand_version < 0x0400) { - ctrl->max_page_size = 4096; + if (ctrl->nand_version < 0x0202) + ctrl->max_page_size = 2048; + else + ctrl->max_page_size = 4096; ctrl->max_block_size = 512 * 1024; } } @@ -810,6 +868,9 @@ static void brcmnand_wr_corr_thresh(struct brcmnand_host *host, u8 val) enum brcmnand_reg reg = BRCMNAND_CORR_THRESHOLD; int cs = host->cs; + if (!ctrl->reg_offsets[reg]) + return; + if (ctrl->nand_version == 0x0702) bits = 7; else if (ctrl->nand_version >= 0x0600) @@ -868,8 +929,10 @@ static inline u32 brcmnand_spare_area_mask(struct brcmnand_controller *ctrl) return GENMASK(7, 0); else if (ctrl->nand_version >= 0x0600) return GENMASK(6, 0); - else + else if (ctrl->nand_version >= 0x0303) return GENMASK(5, 0); + else + return GENMASK(4, 0); } #define NAND_ACC_CONTROL_ECC_SHIFT 16 @@ -2380,7 +2443,7 @@ static int brcmnand_set_cfg(struct brcmnand_host *host, (!!(cfg->device_width == 16) << CFG_BUS_WIDTH_SHIFT) | (device_size << CFG_DEVICE_SIZE_SHIFT); if (cfg_offs == cfg_ext_offs) { - tmp |= (page_size << CFG_PAGE_SIZE_SHIFT) | + tmp |= (page_size << ctrl->page_size_shift) | (block_size << CFG_BLK_SIZE_SHIFT); nand_writereg(ctrl, cfg_offs, tmp); } else { @@ -2392,9 +2455,11 @@ static int brcmnand_set_cfg(struct brcmnand_host *host, tmp = nand_readreg(ctrl, acc_control_offs); tmp &= ~brcmnand_ecc_level_mask(ctrl); - tmp |= cfg->ecc_level << NAND_ACC_CONTROL_ECC_SHIFT; tmp &= ~brcmnand_spare_area_mask(ctrl); - tmp |= cfg->spare_area_size; + if (ctrl->nand_version >= 0x0302) { + tmp |= cfg->ecc_level << NAND_ACC_CONTROL_ECC_SHIFT; + tmp |= cfg->spare_area_size; + } nand_writereg(ctrl, acc_control_offs, tmp); brcmnand_set_sector_size_1k(host, cfg->sector_size_1k); @@ -2768,6 +2833,8 @@ const struct dev_pm_ops brcmnand_pm_ops = { EXPORT_SYMBOL_GPL(brcmnand_pm_ops); static const struct of_device_id brcmnand_of_match[] = { + { .compatible = "brcm,brcmnand-v2.1" }, + { .compatible = "brcm,brcmnand-v2.2" }, { .compatible = "brcm,brcmnand-v4.0" }, { .compatible = "brcm,brcmnand-v5.0" }, { .compatible = "brcm,brcmnand-v6.0" }, -- GitLab From 6be834c667d3075a40dbbbd54ee211b8177e1530 Mon Sep 17 00:00:00 2001 From: Miquel Raynal Date: Mon, 25 May 2020 10:58:35 +0200 Subject: [PATCH 0957/1971] mtd: rawnand: nandsim: Consistent use of 'ns' instead of 'dev' The nandsim object is called 'ns' almost everywhere, keep it that way everywhere for consistency. Signed-off-by: Miquel Raynal Link: https://lore.kernel.org/linux-mtd/20200525085851.17682-2-miquel.raynal@bootlin.com --- drivers/mtd/nand/raw/nandsim.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/mtd/nand/raw/nandsim.c b/drivers/mtd/nand/raw/nandsim.c index 23cda67a3f53a..0062e4fedcc08 100644 --- a/drivers/mtd/nand/raw/nandsim.c +++ b/drivers/mtd/nand/raw/nandsim.c @@ -487,12 +487,12 @@ DEFINE_SHOW_ATTRIBUTE(nandsim); /** * nandsim_debugfs_create - initialize debugfs - * @dev: nandsim device description object + * @ns: nandsim device description object * * This function creates all debugfs files for UBI device @ubi. Returns zero in * case of success and a negative error code in case of failure. */ -static int nandsim_debugfs_create(struct nandsim *dev) +static int nandsim_debugfs_create(struct nandsim *ns) { struct dentry *root = nsmtd->dbg.dfs_dir; struct dentry *dent; @@ -508,8 +508,8 @@ static int nandsim_debugfs_create(struct nandsim *dev) return 0; } - dent = debugfs_create_file("nandsim_wear_report", S_IRUSR, - root, dev, &nandsim_fops); + dent = debugfs_create_file("nandsim_wear_report", S_IRUSR, root, ns, + &nandsim_fops); if (IS_ERR_OR_NULL(dent)) { NS_ERR("cannot create \"nandsim_wear_report\" debugfs entry\n"); return -1; -- GitLab From b81fa3c45e5c50af34777e0d383c6c16798d918c Mon Sep 17 00:00:00 2001 From: Miquel Raynal Date: Mon, 25 May 2020 10:58:36 +0200 Subject: [PATCH 0958/1971] mtd: rawnand: nandsim: Use octal permissions Symbolic permissions 'S_IRUSR' are not preferred. Checkpatch.pl advises to use octal permissions '0400'. Signed-off-by: Miquel Raynal Link: https://lore.kernel.org/linux-mtd/20200525085851.17682-3-miquel.raynal@bootlin.com --- drivers/mtd/nand/raw/nandsim.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/mtd/nand/raw/nandsim.c b/drivers/mtd/nand/raw/nandsim.c index 0062e4fedcc08..ea46f7011a0f5 100644 --- a/drivers/mtd/nand/raw/nandsim.c +++ b/drivers/mtd/nand/raw/nandsim.c @@ -508,7 +508,7 @@ static int nandsim_debugfs_create(struct nandsim *ns) return 0; } - dent = debugfs_create_file("nandsim_wear_report", S_IRUSR, root, ns, + dent = debugfs_create_file("nandsim_wear_report", 0400, root, ns, &nandsim_fops); if (IS_ERR_OR_NULL(dent)) { NS_ERR("cannot create \"nandsim_wear_report\" debugfs entry\n"); -- GitLab From 88f9f3e89a8ad1594c4d9c599bdf9c904e3976fe Mon Sep 17 00:00:00 2001 From: Miquel Raynal Date: Mon, 25 May 2020 10:58:37 +0200 Subject: [PATCH 0959/1971] mtd: rawnand: nandsim: Use a consistent ns_ prefix for all functions Some functions are prefixed "nandsim_", others "ns_" and many are simply not prefixed at all. Make this file consistent by prefixing all functions by "ns_". This is a mechanical change. Sometimes the line is a bit reworked as well to fit the kernel coding style. For instance, there are several places where displayed strings are cut. When one of this line is changed because of the naming update, the two parts of the strings gets concatenated. Signed-off-by: Miquel Raynal Link: https://lore.kernel.org/linux-mtd/20200525085851.17682-4-miquel.raynal@bootlin.com --- drivers/mtd/nand/raw/nandsim.c | 248 +++++++++++++++++---------------- 1 file changed, 131 insertions(+), 117 deletions(-) diff --git a/drivers/mtd/nand/raw/nandsim.c b/drivers/mtd/nand/raw/nandsim.c index ea46f7011a0f5..2c335cc8bcdf9 100644 --- a/drivers/mtd/nand/raw/nandsim.c +++ b/drivers/mtd/nand/raw/nandsim.c @@ -432,7 +432,7 @@ static unsigned long total_wear = 0; /* MTD structure for NAND controller */ static struct mtd_info *nsmtd; -static int nandsim_show(struct seq_file *m, void *private) +static int ns_show(struct seq_file *m, void *private) { unsigned long wmin = -1, wmax = 0, avg; unsigned long deciles[10], decile_max[10], tot = 0; @@ -483,16 +483,16 @@ static int nandsim_show(struct seq_file *m, void *private) return 0; } -DEFINE_SHOW_ATTRIBUTE(nandsim); +DEFINE_SHOW_ATTRIBUTE(ns); /** - * nandsim_debugfs_create - initialize debugfs + * ns_debugfs_create - initialize debugfs * @ns: nandsim device description object * * This function creates all debugfs files for UBI device @ubi. Returns zero in * case of success and a negative error code in case of failure. */ -static int nandsim_debugfs_create(struct nandsim *ns) +static int ns_debugfs_create(struct nandsim *ns) { struct dentry *root = nsmtd->dbg.dfs_dir; struct dentry *dent; @@ -509,7 +509,7 @@ static int nandsim_debugfs_create(struct nandsim *ns) } dent = debugfs_create_file("nandsim_wear_report", 0400, root, ns, - &nandsim_fops); + &ns_fops); if (IS_ERR_OR_NULL(dent)) { NS_ERR("cannot create \"nandsim_wear_report\" debugfs entry\n"); return -1; @@ -524,7 +524,7 @@ static int nandsim_debugfs_create(struct nandsim *ns) * * RETURNS: 0 if success, -ENOMEM if memory alloc fails. */ -static int __init alloc_device(struct nandsim *ns) +static int __init ns_alloc_device(struct nandsim *ns) { struct file *cfile; int i, err; @@ -588,7 +588,7 @@ err_close: /* * Free any allocated pages, and free the array of page pointers. */ -static void free_device(struct nandsim *ns) +static void ns_free_device(struct nandsim *ns) { int i; @@ -610,7 +610,7 @@ static void free_device(struct nandsim *ns) } } -static char __init *get_partition_name(int i) +static char __init *ns_get_partition_name(int i) { return kasprintf(GFP_KERNEL, "NAND simulator partition %d", i); } @@ -620,7 +620,7 @@ static char __init *get_partition_name(int i) * * RETURNS: 0 if success, -ERRNO if failure. */ -static int __init init_nandsim(struct mtd_info *mtd) +static int __init ns_init(struct mtd_info *mtd) { struct nand_chip *chip = mtd_to_nand(mtd); struct nandsim *ns = nand_get_controller_data(chip); @@ -693,7 +693,7 @@ static int __init init_nandsim(struct mtd_info *mtd) NS_ERR("bad partition size.\n"); return -EINVAL; } - ns->partitions[i].name = get_partition_name(i); + ns->partitions[i].name = ns_get_partition_name(i); if (!ns->partitions[i].name) { NS_ERR("unable to allocate memory.\n"); return -ENOMEM; @@ -709,7 +709,7 @@ static int __init init_nandsim(struct mtd_info *mtd) NS_ERR("too many partitions.\n"); return -EINVAL; } - ns->partitions[i].name = get_partition_name(i); + ns->partitions[i].name = ns_get_partition_name(i); if (!ns->partitions[i].name) { NS_ERR("unable to allocate memory.\n"); return -ENOMEM; @@ -739,7 +739,7 @@ static int __init init_nandsim(struct mtd_info *mtd) printk("sector address bytes: %u\n", ns->geom.secaddrbytes); printk("options: %#x\n", ns->options); - if ((ret = alloc_device(ns)) != 0) + if ((ret = ns_alloc_device(ns)) != 0) return ret; /* Allocate / initialize the internal buffer */ @@ -757,15 +757,15 @@ static int __init init_nandsim(struct mtd_info *mtd) /* * Free the nandsim structure. */ -static void free_nandsim(struct nandsim *ns) +static void ns_free(struct nandsim *ns) { kfree(ns->buf.byte); - free_device(ns); + ns_free_device(ns); return; } -static int parse_badblocks(struct nandsim *ns, struct mtd_info *mtd) +static int ns_parse_badblocks(struct nandsim *ns, struct mtd_info *mtd) { char *w; int zero_ok; @@ -793,7 +793,7 @@ static int parse_badblocks(struct nandsim *ns, struct mtd_info *mtd) return 0; } -static int parse_weakblocks(void) +static int ns_parse_weakblocks(void) { char *w; int zero_ok; @@ -830,7 +830,7 @@ static int parse_weakblocks(void) return 0; } -static int erase_error(unsigned int erase_block_no) +static int ns_erase_error(unsigned int erase_block_no) { struct weak_block *wb; @@ -844,7 +844,7 @@ static int erase_error(unsigned int erase_block_no) return 0; } -static int parse_weakpages(void) +static int ns_parse_weakpages(void) { char *w; int zero_ok; @@ -881,7 +881,7 @@ static int parse_weakpages(void) return 0; } -static int write_error(unsigned int page_no) +static int ns_write_error(unsigned int page_no) { struct weak_page *wp; @@ -895,7 +895,7 @@ static int write_error(unsigned int page_no) return 0; } -static int parse_gravepages(void) +static int ns_parse_gravepages(void) { char *g; int zero_ok; @@ -932,7 +932,7 @@ static int parse_gravepages(void) return 0; } -static int read_error(unsigned int page_no) +static int ns_read_error(unsigned int page_no) { struct grave_page *gp; @@ -946,7 +946,7 @@ static int read_error(unsigned int page_no) return 0; } -static void free_lists(void) +static void ns_free_lists(void) { struct list_head *pos, *n; list_for_each_safe(pos, n, &weak_blocks) { @@ -964,7 +964,7 @@ static void free_lists(void) kfree(erase_block_wear); } -static int setup_wear_reporting(struct mtd_info *mtd) +static int ns_setup_wear_reporting(struct mtd_info *mtd) { size_t mem; @@ -982,7 +982,7 @@ static int setup_wear_reporting(struct mtd_info *mtd) return 0; } -static void update_wear(unsigned int erase_block_no) +static void ns_update_wear(unsigned int erase_block_no) { if (!erase_block_wear) return; @@ -1001,7 +1001,7 @@ static void update_wear(unsigned int erase_block_no) /* * Returns the string representation of 'state' state. */ -static char *get_state_name(uint32_t state) +static char *ns_get_state_name(uint32_t state) { switch (NS_STATE(state)) { case STATE_CMD_READ0: @@ -1061,7 +1061,7 @@ static char *get_state_name(uint32_t state) * * RETURNS: 1 if wrong command, 0 if right. */ -static int check_command(int cmd) +static int ns_check_command(int cmd) { switch (cmd) { @@ -1088,7 +1088,7 @@ static int check_command(int cmd) /* * Returns state after command is accepted by command number. */ -static uint32_t get_state_by_command(unsigned command) +static uint32_t ns_get_state_by_command(unsigned command) { switch (command) { case NAND_CMD_READ0: @@ -1126,7 +1126,7 @@ static uint32_t get_state_by_command(unsigned command) /* * Move an address byte to the correspondent internal register. */ -static inline void accept_addr_byte(struct nandsim *ns, u_char bt) +static inline void ns_accept_addr_byte(struct nandsim *ns, u_char bt) { uint byte = (uint)bt; @@ -1144,9 +1144,10 @@ static inline void accept_addr_byte(struct nandsim *ns, u_char bt) /* * Switch to STATE_READY state. */ -static inline void switch_to_ready_state(struct nandsim *ns, u_char status) +static inline void ns_switch_to_ready_state(struct nandsim *ns, u_char status) { - NS_DBG("switch_to_ready_state: switch to %s state\n", get_state_name(STATE_READY)); + NS_DBG("switch_to_ready_state: switch to %s state\n", + ns_get_state_name(STATE_READY)); ns->state = STATE_READY; ns->nxstate = STATE_UNKNOWN; @@ -1203,7 +1204,7 @@ static inline void switch_to_ready_state(struct nandsim *ns, u_char status) * -1 - several matches. * 0 - operation is found. */ -static int find_operation(struct nandsim *ns, uint32_t flag) +static int ns_find_operation(struct nandsim *ns, uint32_t flag) { int opsfound = 0; int i, j, idx = 0; @@ -1256,7 +1257,8 @@ static int find_operation(struct nandsim *ns, uint32_t flag) ns->state = ns->op[ns->stateidx]; ns->nxstate = ns->op[ns->stateidx + 1]; NS_DBG("find_operation: operation found, index: %d, state: %s, nxstate %s\n", - idx, get_state_name(ns->state), get_state_name(ns->nxstate)); + idx, ns_get_state_name(ns->state), + ns_get_state_name(ns->nxstate)); return 0; } @@ -1264,13 +1266,13 @@ static int find_operation(struct nandsim *ns, uint32_t flag) /* Nothing was found. Try to ignore previous commands (if any) and search again */ if (ns->npstates != 0) { NS_DBG("find_operation: no operation found, try again with state %s\n", - get_state_name(ns->state)); + ns_get_state_name(ns->state)); ns->npstates = 0; - return find_operation(ns, 0); + return ns_find_operation(ns, 0); } NS_DBG("find_operation: no operations found\n"); - switch_to_ready_state(ns, NS_STATUS_FAILED(ns)); + ns_switch_to_ready_state(ns, NS_STATUS_FAILED(ns)); return -2; } @@ -1287,7 +1289,7 @@ static int find_operation(struct nandsim *ns, uint32_t flag) return -1; } -static void put_pages(struct nandsim *ns) +static void ns_put_pages(struct nandsim *ns) { int i; @@ -1296,7 +1298,8 @@ static void put_pages(struct nandsim *ns) } /* Get page cache pages in advance to provide NOFS memory allocation */ -static int get_pages(struct nandsim *ns, struct file *file, size_t count, loff_t pos) +static int ns_get_pages(struct nandsim *ns, struct file *file, size_t count, + loff_t pos) { pgoff_t index, start_index, end_index; struct page *page; @@ -1316,7 +1319,7 @@ static int get_pages(struct nandsim *ns, struct file *file, size_t count, loff_t page = find_or_create_page(mapping, index, GFP_NOFS); } if (page == NULL) { - put_pages(ns); + ns_put_pages(ns); return -ENOMEM; } unlock_page(page); @@ -1326,35 +1329,37 @@ static int get_pages(struct nandsim *ns, struct file *file, size_t count, loff_t return 0; } -static ssize_t read_file(struct nandsim *ns, struct file *file, void *buf, size_t count, loff_t pos) +static ssize_t ns_read_file(struct nandsim *ns, struct file *file, void *buf, + size_t count, loff_t pos) { ssize_t tx; int err; unsigned int noreclaim_flag; - err = get_pages(ns, file, count, pos); + err = ns_get_pages(ns, file, count, pos); if (err) return err; noreclaim_flag = memalloc_noreclaim_save(); tx = kernel_read(file, buf, count, &pos); memalloc_noreclaim_restore(noreclaim_flag); - put_pages(ns); + ns_put_pages(ns); return tx; } -static ssize_t write_file(struct nandsim *ns, struct file *file, void *buf, size_t count, loff_t pos) +static ssize_t ns_write_file(struct nandsim *ns, struct file *file, void *buf, + size_t count, loff_t pos) { ssize_t tx; int err; unsigned int noreclaim_flag; - err = get_pages(ns, file, count, pos); + err = ns_get_pages(ns, file, count, pos); if (err) return err; noreclaim_flag = memalloc_noreclaim_save(); tx = kernel_write(file, buf, count, &pos); memalloc_noreclaim_restore(noreclaim_flag); - put_pages(ns); + ns_put_pages(ns); return tx; } @@ -1374,11 +1379,11 @@ static inline u_char *NS_PAGE_BYTE_OFF(struct nandsim *ns) return NS_GET_PAGE(ns)->byte + ns->regs.column + ns->regs.off; } -static int do_read_error(struct nandsim *ns, int num) +static int ns_do_read_error(struct nandsim *ns, int num) { unsigned int page_no = ns->regs.row; - if (read_error(page_no)) { + if (ns_read_error(page_no)) { prandom_bytes(ns->buf.byte, num); NS_WARN("simulating read error in page %u\n", page_no); return 1; @@ -1386,7 +1391,7 @@ static int do_read_error(struct nandsim *ns, int num) return 0; } -static void do_bit_flips(struct nandsim *ns, int num) +static void ns_do_bit_flips(struct nandsim *ns, int num) { if (bitflips && prandom_u32() < (1 << 22)) { int flips = 1; @@ -1406,7 +1411,7 @@ static void do_bit_flips(struct nandsim *ns, int num) /* * Fill the NAND buffer with data read from the specified page. */ -static void read_page(struct nandsim *ns, int num) +static void ns_read_page(struct nandsim *ns, int num) { union ns_mem *mypage; @@ -1420,15 +1425,16 @@ static void read_page(struct nandsim *ns, int num) NS_DBG("read_page: page %d written, reading from %d\n", ns->regs.row, ns->regs.column + ns->regs.off); - if (do_read_error(ns, num)) + if (ns_do_read_error(ns, num)) return; pos = (loff_t)NS_RAW_OFFSET(ns) + ns->regs.off; - tx = read_file(ns, ns->cfile, ns->buf.byte, num, pos); + tx = ns_read_file(ns, ns->cfile, ns->buf.byte, num, + pos); if (tx != num) { NS_ERR("read_page: read error for page %d ret %ld\n", ns->regs.row, (long)tx); return; } - do_bit_flips(ns, num); + ns_do_bit_flips(ns, num); } return; } @@ -1440,17 +1446,17 @@ static void read_page(struct nandsim *ns, int num) } else { NS_DBG("read_page: page %d allocated, reading from %d\n", ns->regs.row, ns->regs.column + ns->regs.off); - if (do_read_error(ns, num)) + if (ns_do_read_error(ns, num)) return; memcpy(ns->buf.byte, NS_PAGE_BYTE_OFF(ns), num); - do_bit_flips(ns, num); + ns_do_bit_flips(ns, num); } } /* * Erase all pages in the specified sector. */ -static void erase_sector(struct nandsim *ns) +static void ns_erase_sector(struct nandsim *ns) { union ns_mem *mypage; int i; @@ -1478,7 +1484,7 @@ static void erase_sector(struct nandsim *ns) /* * Program the specified page with the contents from the NAND buffer. */ -static int prog_page(struct nandsim *ns, int num) +static int ns_prog_page(struct nandsim *ns, int num) { int i; union ns_mem *mypage; @@ -1497,7 +1503,7 @@ static int prog_page(struct nandsim *ns, int num) memset(ns->file_buf, 0xff, ns->geom.pgszoob); } else { all = 0; - tx = read_file(ns, ns->cfile, pg_off, num, off); + tx = ns_read_file(ns, ns->cfile, pg_off, num, off); if (tx != num) { NS_ERR("prog_page: read error for page %d ret %ld\n", ns->regs.row, (long)tx); return -1; @@ -1507,14 +1513,15 @@ static int prog_page(struct nandsim *ns, int num) pg_off[i] &= ns->buf.byte[i]; if (all) { loff_t pos = (loff_t)ns->regs.row * ns->geom.pgszoob; - tx = write_file(ns, ns->cfile, ns->file_buf, ns->geom.pgszoob, pos); + tx = ns_write_file(ns, ns->cfile, ns->file_buf, + ns->geom.pgszoob, pos); if (tx != ns->geom.pgszoob) { NS_ERR("prog_page: write error for page %d ret %ld\n", ns->regs.row, (long)tx); return -1; } __set_bit(ns->regs.row, ns->pages_written); } else { - tx = write_file(ns, ns->cfile, pg_off, num, off); + tx = ns_write_file(ns, ns->cfile, pg_off, num, off); if (tx != num) { NS_ERR("prog_page: write error for page %d ret %ld\n", ns->regs.row, (long)tx); return -1; @@ -1552,7 +1559,7 @@ static int prog_page(struct nandsim *ns, int num) * * RETURNS: 0 if success, -1 if error. */ -static int do_state_action(struct nandsim *ns, uint32_t action) +static int ns_do_state_action(struct nandsim *ns, uint32_t action) { int num; int busdiv = ns->busw == 8 ? 1 : 2; @@ -1579,7 +1586,7 @@ static int do_state_action(struct nandsim *ns, uint32_t action) break; } num = ns->geom.pgszoob - ns->regs.off - ns->regs.column; - read_page(ns, num); + ns_read_page(ns, num); NS_DBG("do_state_action: (ACTION_CPY:) copy %d bytes to int buf, raw offset %d\n", num, NS_RAW_OFFSET(ns) + ns->regs.off); @@ -1622,14 +1629,14 @@ static int do_state_action(struct nandsim *ns, uint32_t action) ns->regs.row, NS_RAW_OFFSET(ns)); NS_LOG("erase sector %u\n", erase_block_no); - erase_sector(ns); + ns_erase_sector(ns); NS_MDELAY(erase_delay); if (erase_block_wear) - update_wear(erase_block_no); + ns_update_wear(erase_block_no); - if (erase_error(erase_block_no)) { + if (ns_erase_error(erase_block_no)) { NS_WARN("simulating erase failure in erase block %u\n", erase_block_no); return -1; } @@ -1653,7 +1660,7 @@ static int do_state_action(struct nandsim *ns, uint32_t action) return -1; } - if (prog_page(ns, num) == -1) + if (ns_prog_page(ns, num) == -1) return -1; page_no = ns->regs.row; @@ -1665,7 +1672,7 @@ static int do_state_action(struct nandsim *ns, uint32_t action) NS_UDELAY(programm_delay); NS_UDELAY(output_cycle * ns->geom.pgsz / 1000 / busdiv); - if (write_error(page_no)) { + if (ns_write_error(page_no)) { NS_WARN("simulating write failure in page %u\n", page_no); return -1; } @@ -1702,7 +1709,7 @@ static int do_state_action(struct nandsim *ns, uint32_t action) /* * Switch simulator's state. */ -static void switch_state(struct nandsim *ns) +static void ns_switch_state(struct nandsim *ns) { if (ns->op) { /* @@ -1716,11 +1723,13 @@ static void switch_state(struct nandsim *ns) NS_DBG("switch_state: operation is known, switch to the next state, " "state: %s, nxstate: %s\n", - get_state_name(ns->state), get_state_name(ns->nxstate)); + ns_get_state_name(ns->state), + ns_get_state_name(ns->nxstate)); /* See, whether we need to do some action */ - if ((ns->state & ACTION_MASK) && do_state_action(ns, ns->state) < 0) { - switch_to_ready_state(ns, NS_STATUS_FAILED(ns)); + if ((ns->state & ACTION_MASK) && + ns_do_state_action(ns, ns->state) < 0) { + ns_switch_to_ready_state(ns, NS_STATUS_FAILED(ns)); return; } @@ -1734,15 +1743,16 @@ static void switch_state(struct nandsim *ns) * The only event causing the switch_state function to * be called with yet unknown operation is new command. */ - ns->state = get_state_by_command(ns->regs.command); + ns->state = ns_get_state_by_command(ns->regs.command); NS_DBG("switch_state: operation is unknown, try to find it\n"); - if (find_operation(ns, 0) != 0) + if (ns_find_operation(ns, 0) != 0) return; - if ((ns->state & ACTION_MASK) && do_state_action(ns, ns->state) < 0) { - switch_to_ready_state(ns, NS_STATUS_FAILED(ns)); + if ((ns->state & ACTION_MASK) && + ns_do_state_action(ns, ns->state) < 0) { + ns_switch_to_ready_state(ns, NS_STATUS_FAILED(ns)); return; } } @@ -1770,7 +1780,7 @@ static void switch_state(struct nandsim *ns) NS_DBG("switch_state: operation complete, switch to STATE_READY state\n"); - switch_to_ready_state(ns, status); + ns_switch_to_ready_state(ns, status); return; } else if (ns->nxstate & (STATE_DATAIN_MASK | STATE_DATAOUT_MASK)) { @@ -1784,7 +1794,8 @@ static void switch_state(struct nandsim *ns) NS_DBG("switch_state: the next state is data I/O, switch, " "state: %s, nxstate: %s\n", - get_state_name(ns->state), get_state_name(ns->nxstate)); + ns_get_state_name(ns->state), + ns_get_state_name(ns->nxstate)); /* * Set the internal register to the count of bytes which @@ -1862,8 +1873,8 @@ static u_char ns_nand_read_byte(struct nand_chip *chip) return outb; } if (!(ns->state & STATE_DATAOUT_MASK)) { - NS_WARN("read_byte: unexpected data output cycle, state is %s " - "return %#x\n", get_state_name(ns->state), (uint)outb); + NS_WARN("read_byte: unexpected data output cycle, state is %s return %#x\n", + ns_get_state_name(ns->state), (uint)outb); return outb; } @@ -1902,7 +1913,7 @@ static u_char ns_nand_read_byte(struct nand_chip *chip) NS_DBG("read_byte: all bytes were read\n"); if (NS_STATE(ns->nxstate) == STATE_READY) - switch_state(ns); + ns_switch_state(ns); } return outb; @@ -1929,12 +1940,12 @@ static void ns_nand_write_byte(struct nand_chip *chip, u_char byte) if (byte == NAND_CMD_RESET) { NS_LOG("reset chip\n"); - switch_to_ready_state(ns, NS_STATUS_OK(ns)); + ns_switch_to_ready_state(ns, NS_STATUS_OK(ns)); return; } /* Check that the command byte is correct */ - if (check_command(byte)) { + if (ns_check_command(byte)) { NS_ERR("write_byte: unknown command %#x\n", (uint)byte); return; } @@ -1943,7 +1954,7 @@ static void ns_nand_write_byte(struct nand_chip *chip, u_char byte) || NS_STATE(ns->state) == STATE_DATAOUT) { int row = ns->regs.row; - switch_state(ns); + ns_switch_state(ns); if (byte == NAND_CMD_RNDOUT) ns->regs.row = row; } @@ -1958,16 +1969,17 @@ static void ns_nand_write_byte(struct nand_chip *chip, u_char byte) * was expected but command was input. In this case ignore * previous command(s)/state(s) and accept the last one. */ - NS_WARN("write_byte: command (%#x) wasn't expected, expected state is %s, " - "ignore previous states\n", (uint)byte, get_state_name(ns->nxstate)); + NS_WARN("write_byte: command (%#x) wasn't expected, expected state is %s, ignore previous states\n", + (uint)byte, + ns_get_state_name(ns->nxstate)); } - switch_to_ready_state(ns, NS_STATUS_FAILED(ns)); + ns_switch_to_ready_state(ns, NS_STATUS_FAILED(ns)); } NS_DBG("command byte corresponding to %s state accepted\n", - get_state_name(get_state_by_command(byte))); + ns_get_state_name(ns_get_state_by_command(byte))); ns->regs.command = byte; - switch_state(ns); + ns_switch_state(ns); } else if (ns->lines.ale == 1) { /* @@ -1978,11 +1990,13 @@ static void ns_nand_write_byte(struct nand_chip *chip, u_char byte) NS_DBG("write_byte: operation isn't known yet, identify it\n"); - if (find_operation(ns, 1) < 0) + if (ns_find_operation(ns, 1) < 0) return; - if ((ns->state & ACTION_MASK) && do_state_action(ns, ns->state) < 0) { - switch_to_ready_state(ns, NS_STATUS_FAILED(ns)); + if ((ns->state & ACTION_MASK) && + ns_do_state_action(ns, ns->state) < 0) { + ns_switch_to_ready_state(ns, + NS_STATUS_FAILED(ns)); return; } @@ -2004,20 +2018,20 @@ static void ns_nand_write_byte(struct nand_chip *chip, u_char byte) /* Check that chip is expecting address */ if (!(ns->nxstate & STATE_ADDR_MASK)) { - NS_ERR("write_byte: address (%#x) isn't expected, expected state is %s, " - "switch to STATE_READY\n", (uint)byte, get_state_name(ns->nxstate)); - switch_to_ready_state(ns, NS_STATUS_FAILED(ns)); + NS_ERR("write_byte: address (%#x) isn't expected, expected state is %s, switch to STATE_READY\n", + (uint)byte, ns_get_state_name(ns->nxstate)); + ns_switch_to_ready_state(ns, NS_STATUS_FAILED(ns)); return; } /* Check if this is expected byte */ if (ns->regs.count == ns->regs.num) { NS_ERR("write_byte: no more address bytes expected\n"); - switch_to_ready_state(ns, NS_STATUS_FAILED(ns)); + ns_switch_to_ready_state(ns, NS_STATUS_FAILED(ns)); return; } - accept_addr_byte(ns, byte); + ns_accept_addr_byte(ns, byte); ns->regs.count += 1; @@ -2026,7 +2040,7 @@ static void ns_nand_write_byte(struct nand_chip *chip, u_char byte) if (ns->regs.count == ns->regs.num) { NS_DBG("address (%#x, %#x) is accepted\n", ns->regs.row, ns->regs.column); - switch_state(ns); + ns_switch_state(ns); } } else { @@ -2036,10 +2050,10 @@ static void ns_nand_write_byte(struct nand_chip *chip, u_char byte) /* Check that chip is expecting data input */ if (!(ns->state & STATE_DATAIN_MASK)) { - NS_ERR("write_byte: data input (%#x) isn't expected, state is %s, " - "switch to %s\n", (uint)byte, - get_state_name(ns->state), get_state_name(STATE_READY)); - switch_to_ready_state(ns, NS_STATUS_FAILED(ns)); + NS_ERR("write_byte: data input (%#x) isn't expected, state is %s, switch to %s\n", + (uint)byte, ns_get_state_name(ns->state), + ns_get_state_name(STATE_READY)); + ns_switch_to_ready_state(ns, NS_STATUS_FAILED(ns)); return; } @@ -2069,16 +2083,16 @@ static void ns_nand_write_buf(struct nand_chip *chip, const u_char *buf, /* Check that chip is expecting data input */ if (!(ns->state & STATE_DATAIN_MASK)) { - NS_ERR("write_buf: data input isn't expected, state is %s, " - "switch to STATE_READY\n", get_state_name(ns->state)); - switch_to_ready_state(ns, NS_STATUS_FAILED(ns)); + NS_ERR("write_buf: data input isn't expected, state is %s, switch to STATE_READY\n", + ns_get_state_name(ns->state)); + ns_switch_to_ready_state(ns, NS_STATUS_FAILED(ns)); return; } /* Check if these are expected bytes */ if (ns->regs.count + len > ns->regs.num) { NS_ERR("write_buf: too many input bytes\n"); - switch_to_ready_state(ns, NS_STATUS_FAILED(ns)); + ns_switch_to_ready_state(ns, NS_STATUS_FAILED(ns)); return; } @@ -2105,7 +2119,7 @@ static void ns_nand_read_buf(struct nand_chip *chip, u_char *buf, int len) } if (!(ns->state & STATE_DATAOUT_MASK)) { NS_WARN("read_buf: unexpected data output cycle, current state is %s\n", - get_state_name(ns->state)); + ns_get_state_name(ns->state)); return; } @@ -2121,7 +2135,7 @@ static void ns_nand_read_buf(struct nand_chip *chip, u_char *buf, int len) /* Check if these are expected bytes */ if (ns->regs.count + len > ns->regs.num) { NS_ERR("read_buf: too many bytes to read\n"); - switch_to_ready_state(ns, NS_STATUS_FAILED(ns)); + ns_switch_to_ready_state(ns, NS_STATUS_FAILED(ns)); return; } @@ -2130,7 +2144,7 @@ static void ns_nand_read_buf(struct nand_chip *chip, u_char *buf, int len) if (ns->regs.count == ns->regs.num) { if (NS_STATE(ns->nxstate) == STATE_READY) - switch_state(ns); + ns_switch_state(ns); } return; @@ -2288,13 +2302,13 @@ static int __init ns_init_module(void) nsmtd->owner = THIS_MODULE; - if ((retval = parse_weakblocks()) != 0) + if ((retval = ns_parse_weakblocks()) != 0) goto error; - if ((retval = parse_weakpages()) != 0) + if ((retval = ns_parse_weakpages()) != 0) goto error; - if ((retval = parse_gravepages()) != 0) + if ((retval = ns_parse_gravepages()) != 0) goto error; nand_controller_init(&ns->base); @@ -2328,16 +2342,16 @@ static int __init ns_init_module(void) chip->pagemask = (targetsize >> chip->page_shift) - 1; } - if ((retval = setup_wear_reporting(nsmtd)) != 0) + if ((retval = ns_setup_wear_reporting(nsmtd)) != 0) goto err_exit; - if ((retval = init_nandsim(nsmtd)) != 0) + if ((retval = ns_init(nsmtd)) != 0) goto err_exit; if ((retval = nand_create_bbt(chip)) != 0) goto err_exit; - if ((retval = parse_badblocks(ns, nsmtd)) != 0) + if ((retval = ns_parse_badblocks(ns, nsmtd)) != 0) goto err_exit; /* Register NAND partitions */ @@ -2346,19 +2360,19 @@ static int __init ns_init_module(void) if (retval != 0) goto err_exit; - if ((retval = nandsim_debugfs_create(ns)) != 0) + if ((retval = ns_debugfs_create(ns)) != 0) goto err_exit; return 0; err_exit: - free_nandsim(ns); + ns_free(ns); nand_release(chip); for (i = 0;i < ARRAY_SIZE(ns->partitions); ++i) kfree(ns->partitions[i].name); error: kfree(ns); - free_lists(); + ns_free_lists(); return retval; } @@ -2374,12 +2388,12 @@ static void __exit ns_cleanup_module(void) struct nandsim *ns = nand_get_controller_data(chip); int i; - free_nandsim(ns); /* Free nandsim private resources */ + ns_free(ns); /* Free nandsim private resources */ nand_release(chip); /* Unregister driver */ for (i = 0;i < ARRAY_SIZE(ns->partitions); ++i) kfree(ns->partitions[i].name); kfree(ns); /* Free other structures */ - free_lists(); + ns_free_lists(); } module_exit(ns_cleanup_module); -- GitLab From 052a7a5374bc1e41dd1588fcb861ec3d810fd160 Mon Sep 17 00:00:00 2001 From: Miquel Raynal Date: Mon, 25 May 2020 10:58:38 +0200 Subject: [PATCH 0960/1971] mtd: rawnand: nandsim: Clean error handling Many function calls are done this way: if ((retval = func()) != 0) return retval; while we expect in the kernel function calls like: retval = func(); if (retval) return retval; Apply this change where possible and also use "ret" instead of "retval" in ns_init_module for consistency, as it is only used in this function. Signed-off-by: Miquel Raynal Link: https://lore.kernel.org/linux-mtd/20200525085851.17682-5-miquel.raynal@bootlin.com --- drivers/mtd/nand/raw/nandsim.c | 46 ++++++++++++++++++++-------------- 1 file changed, 27 insertions(+), 19 deletions(-) diff --git a/drivers/mtd/nand/raw/nandsim.c b/drivers/mtd/nand/raw/nandsim.c index 2c335cc8bcdf9..5b427a50bc27b 100644 --- a/drivers/mtd/nand/raw/nandsim.c +++ b/drivers/mtd/nand/raw/nandsim.c @@ -739,7 +739,8 @@ static int __init ns_init(struct mtd_info *mtd) printk("sector address bytes: %u\n", ns->geom.secaddrbytes); printk("options: %#x\n", ns->options); - if ((ret = ns_alloc_device(ns)) != 0) + ret = ns_alloc_device(ns); + if (ret) return ret; /* Allocate / initialize the internal buffer */ @@ -1747,7 +1748,7 @@ static void ns_switch_state(struct nandsim *ns) NS_DBG("switch_state: operation is unknown, try to find it\n"); - if (ns_find_operation(ns, 0) != 0) + if (!ns_find_operation(ns, 0)) return; if ((ns->state & ACTION_MASK) && @@ -2243,7 +2244,7 @@ static int __init ns_init_module(void) { struct nand_chip *chip; struct nandsim *ns; - int retval = -ENOMEM, i; + int ret, i; if (bus_width != 8 && bus_width != 16) { NS_ERR("wrong bus width (%d), use only 8 or 16\n", bus_width); @@ -2276,7 +2277,7 @@ static int __init ns_init_module(void) break; default: NS_ERR("bbt has to be 0..2\n"); - retval = -EINVAL; + ret = -EINVAL; goto error; } /* @@ -2302,21 +2303,24 @@ static int __init ns_init_module(void) nsmtd->owner = THIS_MODULE; - if ((retval = ns_parse_weakblocks()) != 0) + ret = ns_parse_weakblocks(); + if (ret) goto error; - if ((retval = ns_parse_weakpages()) != 0) + ret = ns_parse_weakpages(); + if (ret) goto error; - if ((retval = ns_parse_gravepages()) != 0) + ret = ns_parse_gravepages(); + if (ret) goto error; nand_controller_init(&ns->base); ns->base.ops = &ns_controller_ops; chip->controller = &ns->base; - retval = nand_scan(chip, 1); - if (retval) { + ret = nand_scan(chip, 1); + if (ret) { NS_ERR("Could not scan NAND Simulator device\n"); goto error; } @@ -2330,7 +2334,7 @@ static int __init ns_init_module(void) if (new_size >> overridesize != nsmtd->erasesize) { NS_ERR("overridesize is too big\n"); - retval = -EINVAL; + ret = -EINVAL; goto err_exit; } @@ -2342,25 +2346,29 @@ static int __init ns_init_module(void) chip->pagemask = (targetsize >> chip->page_shift) - 1; } - if ((retval = ns_setup_wear_reporting(nsmtd)) != 0) + ret = ns_setup_wear_reporting(nsmtd); + if (ret) goto err_exit; - if ((retval = ns_init(nsmtd)) != 0) + ret = ns_init(nsmtd); + if (ret) goto err_exit; - if ((retval = nand_create_bbt(chip)) != 0) + ret = nand_create_bbt(chip); + if (ret) goto err_exit; - if ((retval = ns_parse_badblocks(ns, nsmtd)) != 0) + ret = ns_parse_badblocks(ns, nsmtd); + if (ret) goto err_exit; /* Register NAND partitions */ - retval = mtd_device_register(nsmtd, &ns->partitions[0], - ns->nbparts); - if (retval != 0) + ret = mtd_device_register(nsmtd, &ns->partitions[0], ns->nbparts); + if (ret) goto err_exit; - if ((retval = ns_debugfs_create(ns)) != 0) + ret = ns_debugfs_create(ns); + if (ret) goto err_exit; return 0; @@ -2374,7 +2382,7 @@ error: kfree(ns); ns_free_lists(); - return retval; + return ret; } module_init(ns_init_module); -- GitLab From 7f2a17369f046399a71da196726f1234b53ff7dc Mon Sep 17 00:00:00 2001 From: Miquel Raynal Date: Mon, 25 May 2020 10:58:39 +0200 Subject: [PATCH 0961/1971] mtd: rawnand: nandsim: Keep track of the created debugfs entries Debugfs entries should be removed in the error path, so first, keep track of them. Signed-off-by: Miquel Raynal Link: https://lore.kernel.org/linux-mtd/20200525085851.17682-6-miquel.raynal@bootlin.com --- drivers/mtd/nand/raw/nandsim.c | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/drivers/mtd/nand/raw/nandsim.c b/drivers/mtd/nand/raw/nandsim.c index 5b427a50bc27b..c8e9c70a66410 100644 --- a/drivers/mtd/nand/raw/nandsim.c +++ b/drivers/mtd/nand/raw/nandsim.c @@ -353,6 +353,9 @@ struct nandsim { void *file_buf; struct page *held_pages[NS_MAX_HELD_PAGES]; int held_cnt; + + /* debugfs entry */ + struct dentry *dent; }; /* @@ -495,7 +498,6 @@ DEFINE_SHOW_ATTRIBUTE(ns); static int ns_debugfs_create(struct nandsim *ns) { struct dentry *root = nsmtd->dbg.dfs_dir; - struct dentry *dent; /* * Just skip debugfs initialization when the debugfs directory is @@ -508,9 +510,9 @@ static int ns_debugfs_create(struct nandsim *ns) return 0; } - dent = debugfs_create_file("nandsim_wear_report", 0400, root, ns, - &ns_fops); - if (IS_ERR_OR_NULL(dent)) { + ns->dent = debugfs_create_file("nandsim_wear_report", 0400, root, ns, + &ns_fops); + if (IS_ERR_OR_NULL(ns->dent)) { NS_ERR("cannot create \"nandsim_wear_report\" debugfs entry\n"); return -1; } -- GitLab From cde495f83959119d0d2c78e74118a61301b8af32 Mon Sep 17 00:00:00 2001 From: Miquel Raynal Date: Mon, 25 May 2020 10:58:40 +0200 Subject: [PATCH 0962/1971] mtd: rawnand: nandsim: Remove debugfs entries at unload time Create a ns_debugfs_remove() helper for that and call it in ns_cleanup_module(). Signed-off-by: Miquel Raynal Link: https://lore.kernel.org/linux-mtd/20200525085851.17682-7-miquel.raynal@bootlin.com --- drivers/mtd/nand/raw/nandsim.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/drivers/mtd/nand/raw/nandsim.c b/drivers/mtd/nand/raw/nandsim.c index c8e9c70a66410..a439949945f7f 100644 --- a/drivers/mtd/nand/raw/nandsim.c +++ b/drivers/mtd/nand/raw/nandsim.c @@ -520,6 +520,11 @@ static int ns_debugfs_create(struct nandsim *ns) return 0; } +static void ns_debugfs_remove(struct nandsim *ns) +{ + debugfs_remove_recursive(ns->dent); +} + /* * Allocate array of page pointers, create slab allocation for an array * and initialize the array by NULL pointers. @@ -2398,6 +2403,7 @@ static void __exit ns_cleanup_module(void) struct nandsim *ns = nand_get_controller_data(chip); int i; + ns_debugfs_remove(ns); ns_free(ns); /* Free nandsim private resources */ nand_release(chip); /* Unregister driver */ for (i = 0;i < ARRAY_SIZE(ns->partitions); ++i) -- GitLab From 058018eb0202d144ae266e0497082366bfd6afff Mon Sep 17 00:00:00 2001 From: Miquel Raynal Date: Mon, 25 May 2020 10:58:41 +0200 Subject: [PATCH 0963/1971] mtd: rawnand: nandsim: Fix the two ns_alloc_device() error paths The ns_alloc_device() helper has actually two distinct path. Handle errors in both of them. Signed-off-by: Miquel Raynal Link: https://lore.kernel.org/linux-mtd/20200525085851.17682-8-miquel.raynal@bootlin.com --- drivers/mtd/nand/raw/nandsim.c | 26 +++++++++++++++++--------- 1 file changed, 17 insertions(+), 9 deletions(-) diff --git a/drivers/mtd/nand/raw/nandsim.c b/drivers/mtd/nand/raw/nandsim.c index a439949945f7f..da6d919ed1855 100644 --- a/drivers/mtd/nand/raw/nandsim.c +++ b/drivers/mtd/nand/raw/nandsim.c @@ -543,12 +543,12 @@ static int __init ns_alloc_device(struct nandsim *ns) if (!(cfile->f_mode & FMODE_CAN_READ)) { NS_ERR("alloc_device: cache file not readable\n"); err = -EINVAL; - goto err_close; + goto err_close_filp; } if (!(cfile->f_mode & FMODE_CAN_WRITE)) { NS_ERR("alloc_device: cache file not writeable\n"); err = -EINVAL; - goto err_close; + goto err_close_filp; } ns->pages_written = vzalloc(array_size(sizeof(unsigned long), @@ -556,16 +556,24 @@ static int __init ns_alloc_device(struct nandsim *ns) if (!ns->pages_written) { NS_ERR("alloc_device: unable to allocate pages written array\n"); err = -ENOMEM; - goto err_close; + goto err_close_filp; } ns->file_buf = kmalloc(ns->geom.pgszoob, GFP_KERNEL); if (!ns->file_buf) { NS_ERR("alloc_device: unable to allocate file buf\n"); err = -ENOMEM; - goto err_free; + goto err_free_pw; } ns->cfile = cfile; + return 0; + +err_free_pw: + vfree(ns->pages_written); +err_close_filp: + filp_close(cfile, NULL); + + return err; } ns->pages = vmalloc(array_size(sizeof(union ns_mem), ns->geom.pgnum)); @@ -580,15 +588,15 @@ static int __init ns_alloc_device(struct nandsim *ns) ns->geom.pgszoob, 0, 0, NULL); if (!ns->nand_pages_slab) { NS_ERR("cache_create: unable to create kmem_cache\n"); - return -ENOMEM; + err = -ENOMEM; + goto err_free_pg; } return 0; -err_free: - vfree(ns->pages_written); -err_close: - filp_close(cfile, NULL); +err_free_pg: + vfree(ns->pages); + return err; } -- GitLab From 52bc51c54ef8d8e9358f12666e9f107d9a44f394 Mon Sep 17 00:00:00 2001 From: Miquel Raynal Date: Mon, 25 May 2020 10:58:42 +0200 Subject: [PATCH 0964/1971] mtd: rawnand: nandsim: Free partition names on error in ns_init() The ns_init() function shall free the partition names allocated by ns_get_partition_name() on error. Signed-off-by: Miquel Raynal Link: https://lore.kernel.org/linux-mtd/20200525085851.17682-9-miquel.raynal@bootlin.com --- drivers/mtd/nand/raw/nandsim.c | 17 +++++++++++++---- 1 file changed, 13 insertions(+), 4 deletions(-) diff --git a/drivers/mtd/nand/raw/nandsim.c b/drivers/mtd/nand/raw/nandsim.c index da6d919ed1855..53889bce81f18 100644 --- a/drivers/mtd/nand/raw/nandsim.c +++ b/drivers/mtd/nand/raw/nandsim.c @@ -722,12 +722,14 @@ static int __init ns_init(struct mtd_info *mtd) if (remains) { if (parts_num + 1 > ARRAY_SIZE(ns->partitions)) { NS_ERR("too many partitions.\n"); - return -EINVAL; + ret = -EINVAL; + goto free_partition_names; } ns->partitions[i].name = ns_get_partition_name(i); if (!ns->partitions[i].name) { NS_ERR("unable to allocate memory.\n"); - return -ENOMEM; + ret = -ENOMEM; + goto free_partition_names; } ns->partitions[i].offset = next_offset; ns->partitions[i].size = remains; @@ -756,18 +758,25 @@ static int __init ns_init(struct mtd_info *mtd) ret = ns_alloc_device(ns); if (ret) - return ret; + goto free_partition_names; /* Allocate / initialize the internal buffer */ ns->buf.byte = kmalloc(ns->geom.pgszoob, GFP_KERNEL); if (!ns->buf.byte) { NS_ERR("init_nandsim: unable to allocate %u bytes for the internal buffer\n", ns->geom.pgszoob); - return -ENOMEM; + ret = -ENOMEM; + goto free_partition_names; } memset(ns->buf.byte, 0xFF, ns->geom.pgszoob); return 0; + +free_partition_names: + for (i = 0; i < ARRAY_SIZE(ns->partitions); ++i) + kfree(ns->partitions[i].name); + + return ret; } /* -- GitLab From 161246ec066f3eee40feeb02a07992db4c4e86f7 Mon Sep 17 00:00:00 2001 From: Miquel Raynal Date: Mon, 25 May 2020 10:58:43 +0200 Subject: [PATCH 0965/1971] mtd: rawnand: nandsim: Free the allocated device on error in ns_init() The nandsim device is allocated and initialized inside ns_init() by a call to ns_alloc_device(), free it on error. Signed-off-by: Miquel Raynal Link: https://lore.kernel.org/linux-mtd/20200525085851.17682-10-miquel.raynal@bootlin.com --- drivers/mtd/nand/raw/nandsim.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/drivers/mtd/nand/raw/nandsim.c b/drivers/mtd/nand/raw/nandsim.c index 53889bce81f18..d6e16ddb5de55 100644 --- a/drivers/mtd/nand/raw/nandsim.c +++ b/drivers/mtd/nand/raw/nandsim.c @@ -766,12 +766,14 @@ static int __init ns_init(struct mtd_info *mtd) NS_ERR("init_nandsim: unable to allocate %u bytes for the internal buffer\n", ns->geom.pgszoob); ret = -ENOMEM; - goto free_partition_names; + goto free_device; } memset(ns->buf.byte, 0xFF, ns->geom.pgszoob); return 0; +free_device: + ns_free_device(ns); free_partition_names: for (i = 0; i < ARRAY_SIZE(ns->partitions); ++i) kfree(ns->partitions[i].name); -- GitLab From 72e840a15c66e89583f5bf35a8d890f6c77bb2db Mon Sep 17 00:00:00 2001 From: Miquel Raynal Date: Mon, 25 May 2020 10:58:44 +0200 Subject: [PATCH 0966/1971] mtd: rawnand: nandsim: Free the partition names in ns_free() ns_free() is the helper that is called symmetrically to ns_init() and so should free the same objects, including the partition names. Now, callers of ns_free() do not need to free this partition names themselves. Signed-off-by: Miquel Raynal Link: https://lore.kernel.org/linux-mtd/20200525085851.17682-11-miquel.raynal@bootlin.com --- drivers/mtd/nand/raw/nandsim.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/drivers/mtd/nand/raw/nandsim.c b/drivers/mtd/nand/raw/nandsim.c index d6e16ddb5de55..4bc5da3be5874 100644 --- a/drivers/mtd/nand/raw/nandsim.c +++ b/drivers/mtd/nand/raw/nandsim.c @@ -786,6 +786,11 @@ free_partition_names: */ static void ns_free(struct nandsim *ns) { + int i; + + for (i = 0; i < ARRAY_SIZE(ns->partitions); ++i) + kfree(ns->partitions[i].name); + kfree(ns->buf.byte); ns_free_device(ns); @@ -2270,7 +2275,7 @@ static int __init ns_init_module(void) { struct nand_chip *chip; struct nandsim *ns; - int ret, i; + int ret; if (bus_width != 8 && bus_width != 16) { NS_ERR("wrong bus width (%d), use only 8 or 16\n", bus_width); @@ -2402,8 +2407,6 @@ static int __init ns_init_module(void) err_exit: ns_free(ns); nand_release(chip); - for (i = 0;i < ARRAY_SIZE(ns->partitions); ++i) - kfree(ns->partitions[i].name); error: kfree(ns); ns_free_lists(); @@ -2420,13 +2423,10 @@ static void __exit ns_cleanup_module(void) { struct nand_chip *chip = mtd_to_nand(nsmtd); struct nandsim *ns = nand_get_controller_data(chip); - int i; ns_debugfs_remove(ns); ns_free(ns); /* Free nandsim private resources */ nand_release(chip); /* Unregister driver */ - for (i = 0;i < ARRAY_SIZE(ns->partitions); ++i) - kfree(ns->partitions[i].name); kfree(ns); /* Free other structures */ ns_free_lists(); } -- GitLab From d6e4fd522461f490f49eda81b7e0fba86141ef20 Mon Sep 17 00:00:00 2001 From: Miquel Raynal Date: Mon, 25 May 2020 10:58:45 +0200 Subject: [PATCH 0967/1971] mtd: rawnand: nandsim: Stop using nand_release() nand_release() basically calls mtd_device_unregister() and nand_cleanup(). Both helpers should be called after MTD device registration and NAND scan, respectively. Drop nand_release() and use the two helpers directly so that they fit the error path and the labels there. Signed-off-by: Miquel Raynal Link: https://lore.kernel.org/linux-mtd/20200525085851.17682-12-miquel.raynal@bootlin.com --- drivers/mtd/nand/raw/nandsim.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/drivers/mtd/nand/raw/nandsim.c b/drivers/mtd/nand/raw/nandsim.c index 4bc5da3be5874..127ba2081fe99 100644 --- a/drivers/mtd/nand/raw/nandsim.c +++ b/drivers/mtd/nand/raw/nandsim.c @@ -2400,13 +2400,15 @@ static int __init ns_init_module(void) ret = ns_debugfs_create(ns); if (ret) - goto err_exit; + goto unregister_mtd; return 0; +unregister_mtd: + WARN_ON(mtd_device_unregister(nsmtd)); err_exit: ns_free(ns); - nand_release(chip); + nand_cleanup(chip); error: kfree(ns); ns_free_lists(); -- GitLab From 82503f8412dfbcaf03e8a72663d2a416131f0113 Mon Sep 17 00:00:00 2001 From: Miquel Raynal Date: Mon, 25 May 2020 10:58:46 +0200 Subject: [PATCH 0968/1971] mtd: rawnand: nandsim: Use an additional label when freeing the nandsim object Cosmetic change to give a meaning to all labels in this complicated error path. Signed-off-by: Miquel Raynal Link: https://lore.kernel.org/linux-mtd/20200525085851.17682-13-miquel.raynal@bootlin.com --- drivers/mtd/nand/raw/nandsim.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/drivers/mtd/nand/raw/nandsim.c b/drivers/mtd/nand/raw/nandsim.c index 127ba2081fe99..a31a8aaab1fe2 100644 --- a/drivers/mtd/nand/raw/nandsim.c +++ b/drivers/mtd/nand/raw/nandsim.c @@ -2387,16 +2387,16 @@ static int __init ns_init_module(void) ret = nand_create_bbt(chip); if (ret) - goto err_exit; + goto free_ns_object; ret = ns_parse_badblocks(ns, nsmtd); if (ret) - goto err_exit; + goto free_ns_object; /* Register NAND partitions */ ret = mtd_device_register(nsmtd, &ns->partitions[0], ns->nbparts); if (ret) - goto err_exit; + goto free_ns_object; ret = ns_debugfs_create(ns); if (ret) @@ -2407,6 +2407,7 @@ static int __init ns_init_module(void) unregister_mtd: WARN_ON(mtd_device_unregister(nsmtd)); err_exit: +free_ns_object: ns_free(ns); nand_cleanup(chip); error: -- GitLab From 5dcb5164b20eaa178dcea9572c8b0dccabef7a25 Mon Sep 17 00:00:00 2001 From: Miquel Raynal Date: Mon, 25 May 2020 10:58:47 +0200 Subject: [PATCH 0969/1971] mtd: rawnand: nandsim: Free erase_block_wear on error Free erase_block_wear on error, which is allocated by ns_setup_wear_reporting(). Signed-off-by: Miquel Raynal Link: https://lore.kernel.org/linux-mtd/20200525085851.17682-14-miquel.raynal@bootlin.com --- drivers/mtd/nand/raw/nandsim.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/drivers/mtd/nand/raw/nandsim.c b/drivers/mtd/nand/raw/nandsim.c index a31a8aaab1fe2..8511e70ca1e92 100644 --- a/drivers/mtd/nand/raw/nandsim.c +++ b/drivers/mtd/nand/raw/nandsim.c @@ -2383,7 +2383,7 @@ static int __init ns_init_module(void) ret = ns_init(nsmtd); if (ret) - goto err_exit; + goto free_ebw; ret = nand_create_bbt(chip); if (ret) @@ -2409,6 +2409,8 @@ unregister_mtd: err_exit: free_ns_object: ns_free(ns); +free_ebw: + kfree(erase_block_wear); nand_cleanup(chip); error: kfree(ns); -- GitLab From dc2733dea2be78e03df1fbb5816b59d852b2291c Mon Sep 17 00:00:00 2001 From: Miquel Raynal Date: Mon, 25 May 2020 10:58:48 +0200 Subject: [PATCH 0970/1971] mtd: rawnand: nandsim: Fix the label pointing on nand_cleanup() Drop the generic err_exit. The remaining operation to do from this goto statement is to cleanup the NAND allocations, so rename it. Signed-off-by: Miquel Raynal Link: https://lore.kernel.org/linux-mtd/20200525085851.17682-15-miquel.raynal@bootlin.com --- drivers/mtd/nand/raw/nandsim.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/mtd/nand/raw/nandsim.c b/drivers/mtd/nand/raw/nandsim.c index 8511e70ca1e92..8ffd68321175f 100644 --- a/drivers/mtd/nand/raw/nandsim.c +++ b/drivers/mtd/nand/raw/nandsim.c @@ -2366,7 +2366,7 @@ static int __init ns_init_module(void) if (new_size >> overridesize != nsmtd->erasesize) { NS_ERR("overridesize is too big\n"); ret = -EINVAL; - goto err_exit; + goto cleanup_nand; } /* N.B. This relies on nand_scan not doing anything with the size before we change it */ @@ -2379,7 +2379,7 @@ static int __init ns_init_module(void) ret = ns_setup_wear_reporting(nsmtd); if (ret) - goto err_exit; + goto cleanup_nand; ret = ns_init(nsmtd); if (ret) @@ -2406,11 +2406,11 @@ static int __init ns_init_module(void) unregister_mtd: WARN_ON(mtd_device_unregister(nsmtd)); -err_exit: free_ns_object: ns_free(ns); free_ebw: kfree(erase_block_wear); +cleanup_nand: nand_cleanup(chip); error: kfree(ns); -- GitLab From 73f2b68c51f4f6c3dbe621d263d6fd32526f2faf Mon Sep 17 00:00:00 2001 From: Miquel Raynal Date: Mon, 25 May 2020 10:58:49 +0200 Subject: [PATCH 0971/1971] mtd: rawnand: nandsim: Manage lists on error in ns_init_module() Lists are filled with calls to ns_parse_weakblocks(), ns_parse_weakpages() and ns_parse_gravepages(). Handle them in the error path, all at the same time. Signed-off-by: Miquel Raynal Link: https://lore.kernel.org/linux-mtd/20200525085851.17682-16-miquel.raynal@bootlin.com --- drivers/mtd/nand/raw/nandsim.c | 23 +++++++++++++++++++---- 1 file changed, 19 insertions(+), 4 deletions(-) diff --git a/drivers/mtd/nand/raw/nandsim.c b/drivers/mtd/nand/raw/nandsim.c index 8ffd68321175f..4492b9a9962e1 100644 --- a/drivers/mtd/nand/raw/nandsim.c +++ b/drivers/mtd/nand/raw/nandsim.c @@ -2273,6 +2273,7 @@ static const struct nand_controller_ops ns_controller_ops = { */ static int __init ns_init_module(void) { + struct list_head *pos, *n; struct nand_chip *chip; struct nandsim *ns; int ret; @@ -2340,11 +2341,11 @@ static int __init ns_init_module(void) ret = ns_parse_weakpages(); if (ret) - goto error; + goto free_wb_list; ret = ns_parse_gravepages(); if (ret) - goto error; + goto free_wp_list; nand_controller_init(&ns->base); ns->base.ops = &ns_controller_ops; @@ -2353,7 +2354,7 @@ static int __init ns_init_module(void) ret = nand_scan(chip, 1); if (ret) { NS_ERR("Could not scan NAND Simulator device\n"); - goto error; + goto free_gp_list; } if (overridesize) { @@ -2412,9 +2413,23 @@ free_ebw: kfree(erase_block_wear); cleanup_nand: nand_cleanup(chip); +free_gp_list: + list_for_each_safe(pos, n, &grave_pages) { + list_del(pos); + kfree(list_entry(pos, struct grave_page, list)); + } +free_wp_list: + list_for_each_safe(pos, n, &weak_pages) { + list_del(pos); + kfree(list_entry(pos, struct weak_page, list)); + } +free_wb_list: + list_for_each_safe(pos, n, &weak_blocks) { + list_del(pos); + kfree(list_entry(pos, struct weak_block, list)); + } error: kfree(ns); - ns_free_lists(); return ret; } -- GitLab From f82d82e202fce7d461c2a6bdb55feaf28448a075 Mon Sep 17 00:00:00 2001 From: Miquel Raynal Date: Mon, 25 May 2020 10:58:50 +0200 Subject: [PATCH 0972/1971] mtd: rawnand: nandsim: Rename a label in ns_init_module() Rename the "error" label to gave it a meaning. Signed-off-by: Miquel Raynal Link: https://lore.kernel.org/linux-mtd/20200525085851.17682-17-miquel.raynal@bootlin.com --- drivers/mtd/nand/raw/nandsim.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/mtd/nand/raw/nandsim.c b/drivers/mtd/nand/raw/nandsim.c index 4492b9a9962e1..7076acfbe0f44 100644 --- a/drivers/mtd/nand/raw/nandsim.c +++ b/drivers/mtd/nand/raw/nandsim.c @@ -2310,7 +2310,7 @@ static int __init ns_init_module(void) default: NS_ERR("bbt has to be 0..2\n"); ret = -EINVAL; - goto error; + goto free_ns_struct; } /* * Perform minimum nandsim structure initialization to handle @@ -2337,7 +2337,7 @@ static int __init ns_init_module(void) ret = ns_parse_weakblocks(); if (ret) - goto error; + goto free_ns_struct; ret = ns_parse_weakpages(); if (ret) @@ -2428,7 +2428,7 @@ free_wb_list: list_del(pos); kfree(list_entry(pos, struct weak_block, list)); } -error: +free_ns_struct: kfree(ns); return ret; -- GitLab From 5724fa7f2e25a0f355063ba9271529fa00c32206 Mon Sep 17 00:00:00 2001 From: Miquel Raynal Date: Mon, 25 May 2020 10:58:51 +0200 Subject: [PATCH 0973/1971] mtd: rawnand: nandsim: Reorganize ns_cleanup_module() Reorganize ns_cleanup_module() to fit the reworked exit path of ns_init_module(). There is no need for a ns_free_lists() function anymore, so drop it. Signed-off-by: Miquel Raynal Link: https://lore.kernel.org/linux-mtd/20200525085851.17682-18-miquel.raynal@bootlin.com --- drivers/mtd/nand/raw/nandsim.c | 44 +++++++++++++++++----------------- 1 file changed, 22 insertions(+), 22 deletions(-) diff --git a/drivers/mtd/nand/raw/nandsim.c b/drivers/mtd/nand/raw/nandsim.c index 7076acfbe0f44..0a5cb77966cc7 100644 --- a/drivers/mtd/nand/raw/nandsim.c +++ b/drivers/mtd/nand/raw/nandsim.c @@ -978,24 +978,6 @@ static int ns_read_error(unsigned int page_no) return 0; } -static void ns_free_lists(void) -{ - struct list_head *pos, *n; - list_for_each_safe(pos, n, &weak_blocks) { - list_del(pos); - kfree(list_entry(pos, struct weak_block, list)); - } - list_for_each_safe(pos, n, &weak_pages) { - list_del(pos); - kfree(list_entry(pos, struct weak_page, list)); - } - list_for_each_safe(pos, n, &grave_pages) { - list_del(pos); - kfree(list_entry(pos, struct grave_page, list)); - } - kfree(erase_block_wear); -} - static int ns_setup_wear_reporting(struct mtd_info *mtd) { size_t mem; @@ -2443,12 +2425,30 @@ static void __exit ns_cleanup_module(void) { struct nand_chip *chip = mtd_to_nand(nsmtd); struct nandsim *ns = nand_get_controller_data(chip); + struct list_head *pos, *n; ns_debugfs_remove(ns); - ns_free(ns); /* Free nandsim private resources */ - nand_release(chip); /* Unregister driver */ - kfree(ns); /* Free other structures */ - ns_free_lists(); + WARN_ON(mtd_device_unregister(nsmtd)); + ns_free(ns); + kfree(erase_block_wear); + nand_cleanup(chip); + + list_for_each_safe(pos, n, &grave_pages) { + list_del(pos); + kfree(list_entry(pos, struct grave_page, list)); + } + + list_for_each_safe(pos, n, &weak_pages) { + list_del(pos); + kfree(list_entry(pos, struct weak_page, list)); + } + + list_for_each_safe(pos, n, &weak_blocks) { + list_del(pos); + kfree(list_entry(pos, struct weak_block, list)); + } + + kfree(ns); } module_exit(ns_cleanup_module); -- GitLab From 9630a055256de420d528ecde9f35902a9dcd8750 Mon Sep 17 00:00:00 2001 From: Miquel Raynal Date: Tue, 19 May 2020 15:00:35 +0200 Subject: [PATCH 0974/1971] mtd: rawnand: Stop using nand_release() This helper is not very useful and very often people get confused: they use nand_release() instead of nand_cleanup(). Now that all drivers have been converted to do not use nand_release() anymore, let's remove this helper. Signed-off-by: Miquel Raynal Cc: Jonathan Corbet Link: https://lore.kernel.org/linux-mtd/20200519130035.1883-63-miquel.raynal@bootlin.com --- Documentation/driver-api/mtdnand.rst | 6 ++++-- drivers/mtd/nand/raw/nand_base.c | 12 ------------ include/linux/mtd/bbm.h | 2 +- include/linux/mtd/rawnand.h | 2 -- 4 files changed, 5 insertions(+), 17 deletions(-) diff --git a/Documentation/driver-api/mtdnand.rst b/Documentation/driver-api/mtdnand.rst index 55447659b81fa..0bf8d6ec3f54d 100644 --- a/Documentation/driver-api/mtdnand.rst +++ b/Documentation/driver-api/mtdnand.rst @@ -276,8 +276,10 @@ unregisters the partitions in the MTD layer. #ifdef MODULE static void __exit board_cleanup (void) { - /* Release resources, unregister device */ - nand_release (mtd_to_nand(board_mtd)); + /* Unregister device */ + WARN_ON(mtd_device_unregister(board_mtd)); + /* Release resources */ + nand_cleanup(mtd_to_nand(board_mtd)); /* unmap physical address */ iounmap(baseaddr); diff --git a/drivers/mtd/nand/raw/nand_base.c b/drivers/mtd/nand/raw/nand_base.c index 6a6a0a36b3fd5..8ce31490b9d08 100644 --- a/drivers/mtd/nand/raw/nand_base.c +++ b/drivers/mtd/nand/raw/nand_base.c @@ -6178,18 +6178,6 @@ void nand_cleanup(struct nand_chip *chip) EXPORT_SYMBOL_GPL(nand_cleanup); -/** - * nand_release - [NAND Interface] Unregister the MTD device and free resources - * held by the NAND device - * @chip: NAND chip object - */ -void nand_release(struct nand_chip *chip) -{ - mtd_device_unregister(nand_to_mtd(chip)); - nand_cleanup(chip); -} -EXPORT_SYMBOL_GPL(nand_release); - MODULE_LICENSE("GPL"); MODULE_AUTHOR("Steven J. Hill "); MODULE_AUTHOR("Thomas Gleixner "); diff --git a/include/linux/mtd/bbm.h b/include/linux/mtd/bbm.h index 886e30441c90b..d890805f54944 100644 --- a/include/linux/mtd/bbm.h +++ b/include/linux/mtd/bbm.h @@ -98,7 +98,7 @@ struct nand_bbt_descr { /* * Flag set by nand_create_default_bbt_descr(), marking that the nand_bbt_descr - * was allocated dynamicaly and must be freed in nand_release(). Has no meaning + * was allocated dynamicaly and must be freed in nand_cleanup(). Has no meaning * in nand_chip.bbt_options. */ #define NAND_BBT_DYNAMICSTRUCT 0x80000000 diff --git a/include/linux/mtd/rawnand.h b/include/linux/mtd/rawnand.h index 2804c13e56621..b0b358908a47b 100644 --- a/include/linux/mtd/rawnand.h +++ b/include/linux/mtd/rawnand.h @@ -1398,8 +1398,6 @@ void nand_wait_ready(struct nand_chip *chip); * sucessful nand_scan(). */ void nand_cleanup(struct nand_chip *chip); -/* Unregister the MTD device and calls nand_cleanup() */ -void nand_release(struct nand_chip *chip); /* * External helper for controller drivers that have to implement the WAITRDY -- GitLab From 519494a9afabe68a0a86e4b3e264e3e607193a02 Mon Sep 17 00:00:00 2001 From: Miquel Raynal Date: Tue, 26 May 2020 21:56:13 +0200 Subject: [PATCH 0975/1971] mtd: Fix typo in mtd_ooblayout_set_databytes() description Fix a probable copy/paste error: the function works like mtd_ooblayout_set_bytes(), not *_get_bytes(). Signed-off-by: Miquel Raynal Reviewed-by: Boris Brezillon Link: https://lore.kernel.org/linux-mtd/20200526195633.11543-2-miquel.raynal@bootlin.com --- drivers/mtd/mtdcore.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/mtd/mtdcore.c b/drivers/mtd/mtdcore.c index 50437b4ffe767..8c4dcfa49e355 100644 --- a/drivers/mtd/mtdcore.c +++ b/drivers/mtd/mtdcore.c @@ -1800,7 +1800,7 @@ EXPORT_SYMBOL_GPL(mtd_ooblayout_get_databytes); * @start: first ECC byte to set * @nbytes: number of ECC bytes to set * - * Works like mtd_ooblayout_get_bytes(), except it acts on free bytes. + * Works like mtd_ooblayout_set_bytes(), except it acts on free bytes. * * Returns zero on success, a negative error code otherwise. */ -- GitLab From f66a6fd0dc7cc49c891a167937e161114f48a62e Mon Sep 17 00:00:00 2001 From: Miquel Raynal Date: Tue, 26 May 2020 21:56:14 +0200 Subject: [PATCH 0976/1971] mtd: rawnand: Avoid a typedef In new code, the use of typedef is discouraged. Turn this one in the raw NAND core into a regular enumeration. Signed-off-by: Miquel Raynal Reviewed-by: Boris Brezillon Link: https://lore.kernel.org/linux-mtd/20200526195633.11543-3-miquel.raynal@bootlin.com --- drivers/mtd/nand/raw/nand_base.c | 4 ++-- include/linux/mtd/rawnand.h | 6 +++--- include/linux/platform_data/mtd-davinci.h | 2 +- include/linux/platform_data/mtd-nand-s3c2410.h | 2 +- 4 files changed, 7 insertions(+), 7 deletions(-) diff --git a/drivers/mtd/nand/raw/nand_base.c b/drivers/mtd/nand/raw/nand_base.c index 8ce31490b9d08..4bf614d4b7eeb 100644 --- a/drivers/mtd/nand/raw/nand_base.c +++ b/drivers/mtd/nand/raw/nand_base.c @@ -5099,8 +5099,8 @@ static int of_get_nand_ecc_mode(struct device_node *np) /* * For backward compatibility we support few obsoleted values that don't - * have their mappings into nand_ecc_modes_t anymore (they were merged - * with other enums). + * have their mappings into the nand_ecc_mode enum anymore (they were + * merged with other enums). */ if (!strcasecmp(pm, "soft_bch")) return NAND_ECC_SOFT; diff --git a/include/linux/mtd/rawnand.h b/include/linux/mtd/rawnand.h index b0b358908a47b..8f662df02fdd1 100644 --- a/include/linux/mtd/rawnand.h +++ b/include/linux/mtd/rawnand.h @@ -83,14 +83,14 @@ struct nand_chip; /* * Constants for ECC_MODES */ -typedef enum { +enum nand_ecc_mode { NAND_ECC_NONE, NAND_ECC_SOFT, NAND_ECC_HW, NAND_ECC_HW_SYNDROME, NAND_ECC_HW_OOB_FIRST, NAND_ECC_ON_DIE, -} nand_ecc_modes_t; +}; enum nand_ecc_algo { NAND_ECC_UNKNOWN, @@ -362,7 +362,7 @@ static const struct nand_ecc_caps __name = { \ * @write_oob: function to write chip OOB data */ struct nand_ecc_ctrl { - nand_ecc_modes_t mode; + enum nand_ecc_mode mode; enum nand_ecc_algo algo; int steps; int size; diff --git a/include/linux/platform_data/mtd-davinci.h b/include/linux/platform_data/mtd-davinci.h index 08e639e047e5e..03e92c71b3faa 100644 --- a/include/linux/platform_data/mtd-davinci.h +++ b/include/linux/platform_data/mtd-davinci.h @@ -68,7 +68,7 @@ struct davinci_nand_pdata { /* platform_data */ * Newer ones also support 4-bit ECC, but are awkward * using it with large page chips. */ - nand_ecc_modes_t ecc_mode; + enum nand_ecc_mode ecc_mode; u8 ecc_bits; /* e.g. NAND_BUSWIDTH_16 */ diff --git a/include/linux/platform_data/mtd-nand-s3c2410.h b/include/linux/platform_data/mtd-nand-s3c2410.h index deb849bcf0ecb..08675b16f9e16 100644 --- a/include/linux/platform_data/mtd-nand-s3c2410.h +++ b/include/linux/platform_data/mtd-nand-s3c2410.h @@ -49,7 +49,7 @@ struct s3c2410_platform_nand { unsigned int ignore_unset_ecc:1; - nand_ecc_modes_t ecc_mode; + enum nand_ecc_mode ecc_mode; int nr_sets; struct s3c2410_nand_set *sets; -- GitLab From 74e24cd2376d9cc4cfc6edad8610780b79fd5def Mon Sep 17 00:00:00 2001 From: Miquel Raynal Date: Tue, 26 May 2020 21:56:15 +0200 Subject: [PATCH 0977/1971] mtd: rawnand: Drop OOB_FIRST placement scheme This scheme has been introduced for the Davinci controller and means that the OOB area must be read *before* the rest of the data. This has nothing to do with the ECC in OOB placement as it could be understood and most importantly, there is no point in having this function out of the Davinci NAND controller driver. A DT property for this scheme has been added but never used, even by the Davinci driver which only uses this scheme to change the default nand_read_page(). Move the main read_page() helper into the Davinci driver and remove the remaining boilerplate. Signed-off-by: Miquel Raynal Reviewed-by: Boris Brezillon Link: https://lore.kernel.org/linux-mtd/20200526195633.11543-4-miquel.raynal@bootlin.com --- drivers/mtd/nand/raw/davinci_nand.c | 126 +++++++++++++++++++++------- drivers/mtd/nand/raw/nand_base.c | 81 ------------------ include/linux/mtd/rawnand.h | 1 - 3 files changed, 98 insertions(+), 110 deletions(-) diff --git a/drivers/mtd/nand/raw/davinci_nand.c b/drivers/mtd/nand/raw/davinci_nand.c index 52b87304954b1..d975a62caaa5d 100644 --- a/drivers/mtd/nand/raw/davinci_nand.c +++ b/drivers/mtd/nand/raw/davinci_nand.c @@ -371,6 +371,77 @@ correct: return corrected; } +/** + * nand_read_page_hwecc_oob_first - hw ecc, read oob first + * @chip: nand chip info structure + * @buf: buffer to store read data + * @oob_required: caller requires OOB data read to chip->oob_poi + * @page: page number to read + * + * Hardware ECC for large page chips, require OOB to be read first. For this + * ECC mode, the write_page method is re-used from ECC_HW. These methods + * read/write ECC from the OOB area, unlike the ECC_HW_SYNDROME support with + * multiple ECC steps, follows the "infix ECC" scheme and reads/writes ECC from + * the data area, by overwriting the NAND manufacturer bad block markings. + */ +static int nand_davinci_read_page_hwecc_oob_first(struct nand_chip *chip, + uint8_t *buf, + int oob_required, int page) +{ + struct mtd_info *mtd = nand_to_mtd(chip); + int i, eccsize = chip->ecc.size, ret; + int eccbytes = chip->ecc.bytes; + int eccsteps = chip->ecc.steps; + uint8_t *p = buf; + uint8_t *ecc_code = chip->ecc.code_buf; + uint8_t *ecc_calc = chip->ecc.calc_buf; + unsigned int max_bitflips = 0; + + /* Read the OOB area first */ + ret = nand_read_oob_op(chip, page, 0, chip->oob_poi, mtd->oobsize); + if (ret) + return ret; + + ret = nand_read_page_op(chip, page, 0, NULL, 0); + if (ret) + return ret; + + ret = mtd_ooblayout_get_eccbytes(mtd, ecc_code, chip->oob_poi, 0, + chip->ecc.total); + if (ret) + return ret; + + for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) { + int stat; + + chip->ecc.hwctl(chip, NAND_ECC_READ); + + ret = nand_read_data_op(chip, p, eccsize, false, false); + if (ret) + return ret; + + chip->ecc.calculate(chip, p, &ecc_calc[i]); + + stat = chip->ecc.correct(chip, p, &ecc_code[i], NULL); + if (stat == -EBADMSG && + (chip->ecc.options & NAND_ECC_GENERIC_ERASED_CHECK)) { + /* check for empty pages with bitflips */ + stat = nand_check_erased_ecc_chunk(p, eccsize, + &ecc_code[i], + eccbytes, NULL, 0, + chip->ecc.strength); + } + + if (stat < 0) { + mtd->ecc_stats.failed++; + } else { + mtd->ecc_stats.corrected += stat; + max_bitflips = max_t(unsigned int, max_bitflips, stat); + } + } + return max_bitflips; +} + /*----------------------------------------------------------------------*/ /* An ECC layout for using 4-bit ECC with small-page flash, storing @@ -530,6 +601,13 @@ static int davinci_nand_attach_chip(struct nand_chip *chip) break; case NAND_ECC_HW: if (pdata->ecc_bits == 4) { + int chunks = mtd->writesize / 512; + + if (!chunks || mtd->oobsize < 16) { + dev_dbg(&info->pdev->dev, "too small\n"); + return -EINVAL; + } + /* * No sanity checks: CPUs must support this, * and the chips may not use NAND_BUSWIDTH_16. @@ -552,6 +630,26 @@ static int davinci_nand_attach_chip(struct nand_chip *chip) info->chip.ecc.bytes = 10; info->chip.ecc.options = NAND_ECC_GENERIC_ERASED_CHECK; info->chip.ecc.algo = NAND_ECC_BCH; + + /* + * Update ECC layout if needed ... for 1-bit HW ECC, the + * default is OK, but it allocates 6 bytes when only 3 + * are needed (for each 512 bytes). For 4-bit HW ECC, + * the default is not usable: 10 bytes needed, not 6. + * + * For small page chips, preserve the manufacturer's + * badblock marking data ... and make sure a flash BBT + * table marker fits in the free bytes. + */ + if (chunks == 1) { + mtd_set_ooblayout(mtd, + &hwecc4_small_ooblayout_ops); + } else if (chunks == 4 || chunks == 8) { + mtd_set_ooblayout(mtd, &nand_ooblayout_lp_ops); + info->chip.ecc.read_page = nand_davinci_read_page_hwecc_oob_first; + } else { + return -EIO; + } } else { /* 1bit ecc hamming */ info->chip.ecc.calculate = nand_davinci_calculate_1bit; @@ -567,34 +665,6 @@ static int davinci_nand_attach_chip(struct nand_chip *chip) return -EINVAL; } - /* - * Update ECC layout if needed ... for 1-bit HW ECC, the default - * is OK, but it allocates 6 bytes when only 3 are needed (for - * each 512 bytes). For the 4-bit HW ECC, that default is not - * usable: 10 bytes are needed, not 6. - */ - if (pdata->ecc_bits == 4) { - int chunks = mtd->writesize / 512; - - if (!chunks || mtd->oobsize < 16) { - dev_dbg(&info->pdev->dev, "too small\n"); - return -EINVAL; - } - - /* For small page chips, preserve the manufacturer's - * badblock marking data ... and make sure a flash BBT - * table marker fits in the free bytes. - */ - if (chunks == 1) { - mtd_set_ooblayout(mtd, &hwecc4_small_ooblayout_ops); - } else if (chunks == 4 || chunks == 8) { - mtd_set_ooblayout(mtd, &nand_ooblayout_lp_ops); - info->chip.ecc.mode = NAND_ECC_HW_OOB_FIRST; - } else { - return -EIO; - } - } - return ret; } diff --git a/drivers/mtd/nand/raw/nand_base.c b/drivers/mtd/nand/raw/nand_base.c index 4bf614d4b7eeb..d7c791de3e88e 100644 --- a/drivers/mtd/nand/raw/nand_base.c +++ b/drivers/mtd/nand/raw/nand_base.c @@ -3075,76 +3075,6 @@ static int nand_read_page_hwecc(struct nand_chip *chip, uint8_t *buf, return max_bitflips; } -/** - * nand_read_page_hwecc_oob_first - [REPLACEABLE] hw ecc, read oob first - * @chip: nand chip info structure - * @buf: buffer to store read data - * @oob_required: caller requires OOB data read to chip->oob_poi - * @page: page number to read - * - * Hardware ECC for large page chips, require OOB to be read first. For this - * ECC mode, the write_page method is re-used from ECC_HW. These methods - * read/write ECC from the OOB area, unlike the ECC_HW_SYNDROME support with - * multiple ECC steps, follows the "infix ECC" scheme and reads/writes ECC from - * the data area, by overwriting the NAND manufacturer bad block markings. - */ -static int nand_read_page_hwecc_oob_first(struct nand_chip *chip, uint8_t *buf, - int oob_required, int page) -{ - struct mtd_info *mtd = nand_to_mtd(chip); - int i, eccsize = chip->ecc.size, ret; - int eccbytes = chip->ecc.bytes; - int eccsteps = chip->ecc.steps; - uint8_t *p = buf; - uint8_t *ecc_code = chip->ecc.code_buf; - uint8_t *ecc_calc = chip->ecc.calc_buf; - unsigned int max_bitflips = 0; - - /* Read the OOB area first */ - ret = nand_read_oob_op(chip, page, 0, chip->oob_poi, mtd->oobsize); - if (ret) - return ret; - - ret = nand_read_page_op(chip, page, 0, NULL, 0); - if (ret) - return ret; - - ret = mtd_ooblayout_get_eccbytes(mtd, ecc_code, chip->oob_poi, 0, - chip->ecc.total); - if (ret) - return ret; - - for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) { - int stat; - - chip->ecc.hwctl(chip, NAND_ECC_READ); - - ret = nand_read_data_op(chip, p, eccsize, false, false); - if (ret) - return ret; - - chip->ecc.calculate(chip, p, &ecc_calc[i]); - - stat = chip->ecc.correct(chip, p, &ecc_code[i], NULL); - if (stat == -EBADMSG && - (chip->ecc.options & NAND_ECC_GENERIC_ERASED_CHECK)) { - /* check for empty pages with bitflips */ - stat = nand_check_erased_ecc_chunk(p, eccsize, - &ecc_code[i], eccbytes, - NULL, 0, - chip->ecc.strength); - } - - if (stat < 0) { - mtd->ecc_stats.failed++; - } else { - mtd->ecc_stats.corrected += stat; - max_bitflips = max_t(unsigned int, max_bitflips, stat); - } - } - return max_bitflips; -} - /** * nand_read_page_syndrome - [REPLACEABLE] hardware ECC syndrome based page read * @chip: nand chip info structure @@ -5080,7 +5010,6 @@ static const char * const nand_ecc_modes[] = { [NAND_ECC_SOFT] = "soft", [NAND_ECC_HW] = "hw", [NAND_ECC_HW_SYNDROME] = "hw_syndrome", - [NAND_ECC_HW_OOB_FIRST] = "hw_oob_first", [NAND_ECC_ON_DIE] = "on-die", }; @@ -5829,16 +5758,6 @@ static int nand_scan_tail(struct nand_chip *chip) */ switch (ecc->mode) { - case NAND_ECC_HW_OOB_FIRST: - /* Similar to NAND_ECC_HW, but a separate read_page handle */ - if (!ecc->calculate || !ecc->correct || !ecc->hwctl) { - WARN(1, "No ECC functions supplied; hardware ECC not possible\n"); - ret = -EINVAL; - goto err_nand_manuf_cleanup; - } - if (!ecc->read_page) - ecc->read_page = nand_read_page_hwecc_oob_first; - fallthrough; case NAND_ECC_HW: /* Use standard hwecc read page function? */ if (!ecc->read_page) diff --git a/include/linux/mtd/rawnand.h b/include/linux/mtd/rawnand.h index 8f662df02fdd1..605d64ead3a85 100644 --- a/include/linux/mtd/rawnand.h +++ b/include/linux/mtd/rawnand.h @@ -88,7 +88,6 @@ enum nand_ecc_mode { NAND_ECC_SOFT, NAND_ECC_HW, NAND_ECC_HW_SYNDROME, - NAND_ECC_HW_OOB_FIRST, NAND_ECC_ON_DIE, }; -- GitLab From dbc2f2e6d5f940970819f42dcbaf498a8f35a5b7 Mon Sep 17 00:00:00 2001 From: Miquel Raynal Date: Tue, 26 May 2020 21:56:17 +0200 Subject: [PATCH 0978/1971] mtd: rawnand: Return an enum from of_get_nand_ecc_algo() There is an enumeration to list ECC algorithm, let's use it instead of returning an int. Signed-off-by: Miquel Raynal Reviewed-by: Boris Brezillon Link: https://lore.kernel.org/linux-mtd/20200526195633.11543-6-miquel.raynal@bootlin.com --- drivers/mtd/nand/raw/nand_base.c | 35 +++++++++++++++++--------------- 1 file changed, 19 insertions(+), 16 deletions(-) diff --git a/drivers/mtd/nand/raw/nand_base.c b/drivers/mtd/nand/raw/nand_base.c index d7c791de3e88e..d46d34d0d8bef 100644 --- a/drivers/mtd/nand/raw/nand_base.c +++ b/drivers/mtd/nand/raw/nand_base.c @@ -5043,17 +5043,20 @@ static const char * const nand_ecc_algos[] = { [NAND_ECC_RS] = "rs", }; -static int of_get_nand_ecc_algo(struct device_node *np) +static enum nand_ecc_algo of_get_nand_ecc_algo(struct device_node *np) { + enum nand_ecc_algo ecc_algo; const char *pm; - int err, i; + int err; err = of_property_read_string(np, "nand-ecc-algo", &pm); if (!err) { - for (i = NAND_ECC_HAMMING; i < ARRAY_SIZE(nand_ecc_algos); i++) - if (!strcasecmp(pm, nand_ecc_algos[i])) - return i; - return -ENODEV; + for (ecc_algo = NAND_ECC_HAMMING; + ecc_algo < ARRAY_SIZE(nand_ecc_algos); + ecc_algo++) { + if (!strcasecmp(pm, nand_ecc_algos[ecc_algo])) + return ecc_algo; + } } /* @@ -5061,15 +5064,14 @@ static int of_get_nand_ecc_algo(struct device_node *np) * for some obsoleted values that were specifying ECC algorithm. */ err = of_property_read_string(np, "nand-ecc-mode", &pm); - if (err < 0) - return err; - - if (!strcasecmp(pm, "soft")) - return NAND_ECC_HAMMING; - else if (!strcasecmp(pm, "soft_bch")) - return NAND_ECC_BCH; + if (!err) { + if (!strcasecmp(pm, "soft")) + return NAND_ECC_HAMMING; + else if (!strcasecmp(pm, "soft_bch")) + return NAND_ECC_BCH; + } - return -ENODEV; + return NAND_ECC_UNKNOWN; } static int of_get_nand_ecc_step_size(struct device_node *np) @@ -5114,7 +5116,8 @@ static bool of_get_nand_on_flash_bbt(struct device_node *np) static int nand_dt_init(struct nand_chip *chip) { struct device_node *dn = nand_get_flash_node(chip); - int ecc_mode, ecc_algo, ecc_strength, ecc_step; + enum nand_ecc_algo ecc_algo; + int ecc_mode, ecc_strength, ecc_step; if (!dn) return 0; @@ -5136,7 +5139,7 @@ static int nand_dt_init(struct nand_chip *chip) if (ecc_mode >= 0) chip->ecc.mode = ecc_mode; - if (ecc_algo >= 0) + if (ecc_algo != NAND_ECC_UNKNOWN) chip->ecc.algo = ecc_algo; if (ecc_strength >= 0) -- GitLab From 86f2b225adf4ecd2edfdc8541a7342645556ac3b Mon Sep 17 00:00:00 2001 From: Miquel Raynal Date: Tue, 26 May 2020 21:56:18 +0200 Subject: [PATCH 0979/1971] mtd: rawnand: Add an invalid ECC mode to discriminate with valid ones NAND ECC modes (or providers) have their own enumeration but, unlike their algorithms counterpart, there is no invalid or uninitialized value to discriminate between an error and having chosen a no-ECC situation. Add an "invalid" entry for this purpose. Signed-off-by: Miquel Raynal Reviewed-by: Boris Brezillon Link: https://lore.kernel.org/linux-mtd/20200526195633.11543-7-miquel.raynal@bootlin.com --- drivers/mtd/nand/raw/nand_base.c | 2 +- include/linux/mtd/rawnand.h | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/mtd/nand/raw/nand_base.c b/drivers/mtd/nand/raw/nand_base.c index d46d34d0d8bef..45124dbb18353 100644 --- a/drivers/mtd/nand/raw/nand_base.c +++ b/drivers/mtd/nand/raw/nand_base.c @@ -5022,7 +5022,7 @@ static int of_get_nand_ecc_mode(struct device_node *np) if (err < 0) return err; - for (i = 0; i < ARRAY_SIZE(nand_ecc_modes); i++) + for (i = NAND_ECC_NONE; i < ARRAY_SIZE(nand_ecc_modes); i++) if (!strcasecmp(pm, nand_ecc_modes[i])) return i; diff --git a/include/linux/mtd/rawnand.h b/include/linux/mtd/rawnand.h index 605d64ead3a85..65b1c1c18b41c 100644 --- a/include/linux/mtd/rawnand.h +++ b/include/linux/mtd/rawnand.h @@ -84,6 +84,7 @@ struct nand_chip; * Constants for ECC_MODES */ enum nand_ecc_mode { + NAND_ECC_INVALID, NAND_ECC_NONE, NAND_ECC_SOFT, NAND_ECC_HW, -- GitLab From 7ae2f3db6167aa7184529fdacd1de72619baf93b Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Sat, 30 May 2020 17:22:19 +0100 Subject: [PATCH 0980/1971] KVM: arm64: Flush the instruction cache if not unmapping the VM on reboot On a system with FWB, we don't need to unmap Stage-2 on reboot, as even if userspace takes this opportunity to repaint the whole of memory, FWB ensures that the data side stays consistent even if the guest uses non-cacheable mappings. However, the I-side is not necessarily coherent with the D-side if CTR_EL0.DIC is 0. In this case, invalidate the i-cache to preserve coherency. Reported-by: Alexandru Elisei Reviewed-by: Alexandru Elisei Fixes: 892713e97ca1 ("KVM: arm64: Sidestep stage2_unmap_vm() on vcpu reset when S2FWB is supported") Signed-off-by: Marc Zyngier --- arch/arm64/kvm/arm.c | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c index b0b569f2cdd0a..d6988401c22a5 100644 --- a/arch/arm64/kvm/arm.c +++ b/arch/arm64/kvm/arm.c @@ -989,11 +989,17 @@ static int kvm_arch_vcpu_ioctl_vcpu_init(struct kvm_vcpu *vcpu, * Ensure a rebooted VM will fault in RAM pages and detect if the * guest MMU is turned off and flush the caches as needed. * - * S2FWB enforces all memory accesses to RAM being cacheable, we - * ensure that the cache is always coherent. + * S2FWB enforces all memory accesses to RAM being cacheable, + * ensuring that the data side is always coherent. We still + * need to invalidate the I-cache though, as FWB does *not* + * imply CTR_EL0.DIC. */ - if (vcpu->arch.has_run_once && !cpus_have_const_cap(ARM64_HAS_STAGE2_FWB)) - stage2_unmap_vm(vcpu->kvm); + if (vcpu->arch.has_run_once) { + if (!cpus_have_final_cap(ARM64_HAS_STAGE2_FWB)) + stage2_unmap_vm(vcpu->kvm); + else + __flush_icache_all(); + } vcpu_reset_hcr(vcpu); -- GitLab From 6bd140e14d9aaa734ec37985b8b20a96c0ece948 Mon Sep 17 00:00:00 2001 From: Stafford Horne Date: Wed, 22 Apr 2020 20:24:11 +0900 Subject: [PATCH 0981/1971] openrisc: Fix issue with argument clobbering for clone/fork Working on the OpenRISC glibc port I found that sometimes clone was working strange. That the tls data argument sent in r7 was always wrong. Further investigation revealed that the arguments were getting clobbered in the entry code. This patch removes the code that writes to the argument registers. This was likely due to some old code hanging around. This patch fixes this up for clone and fork. This fork clobber is harmless but also useless so remove. Signed-off-by: Stafford Horne --- arch/openrisc/kernel/entry.S | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/openrisc/kernel/entry.S b/arch/openrisc/kernel/entry.S index e4a78571f8833..c6481cfc5220f 100644 --- a/arch/openrisc/kernel/entry.S +++ b/arch/openrisc/kernel/entry.S @@ -1166,13 +1166,13 @@ ENTRY(__sys_clone) l.movhi r29,hi(sys_clone) l.ori r29,r29,lo(sys_clone) l.j _fork_save_extra_regs_and_call - l.addi r7,r1,0 + l.nop ENTRY(__sys_fork) l.movhi r29,hi(sys_fork) l.ori r29,r29,lo(sys_fork) l.j _fork_save_extra_regs_and_call - l.addi r3,r1,0 + l.nop ENTRY(sys_rt_sigreturn) l.jal _sys_rt_sigreturn -- GitLab From 45679f9b508f10c12a1e93cf2bdccbc1c594aa39 Mon Sep 17 00:00:00 2001 From: Sibi Sankar Date: Thu, 28 May 2020 00:54:18 +0530 Subject: [PATCH 0982/1971] opp: Don't parse icc paths unnecessarily The DT node of the device may contain interconnect paths while the OPP table doesn't have the bandwidth values. There is no need to parse the paths in such cases. Signed-off-by: Sibi Sankar Tested-by: Sibi Sankar Reviewed-by: Sibi Sankar [ Viresh: Support the case of !opp_table and massaged changelog ] Signed-off-by: Viresh Kumar --- drivers/opp/of.c | 45 ++++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 44 insertions(+), 1 deletion(-) diff --git a/drivers/opp/of.c b/drivers/opp/of.c index 61fce1284f012..9a5873591a40c 100644 --- a/drivers/opp/of.c +++ b/drivers/opp/of.c @@ -332,13 +332,56 @@ free_required_opps: return ret; } +static int _bandwidth_supported(struct device *dev, struct opp_table *opp_table) +{ + struct device_node *np, *opp_np; + struct property *prop; + + if (!opp_table) { + np = of_node_get(dev->of_node); + if (!np) + return -ENODEV; + + opp_np = _opp_of_get_opp_desc_node(np, 0); + of_node_put(np); + } else { + opp_np = of_node_get(opp_table->np); + } + + /* Lets not fail in case we are parsing opp-v1 bindings */ + if (!opp_np) + return 0; + + /* Checking only first OPP is sufficient */ + np = of_get_next_available_child(opp_np, NULL); + if (!np) { + dev_err(dev, "OPP table empty\n"); + return -EINVAL; + } + of_node_put(opp_np); + + prop = of_find_property(np, "opp-peak-kBps", NULL); + of_node_put(np); + + if (!prop || !prop->length) + return 0; + + return 1; +} + int dev_pm_opp_of_find_icc_paths(struct device *dev, struct opp_table *opp_table) { struct device_node *np; - int ret = 0, i, count, num_paths; + int ret, i, count, num_paths; struct icc_path **paths; + ret = _bandwidth_supported(dev, opp_table); + if (ret <= 0) + return ret; + + ret = 0; + np = of_node_get(dev->of_node); if (!np) return 0; -- GitLab From 1b94b3aed367ff2cdc84d325e0aa9d7cc9e3cf2a Mon Sep 17 00:00:00 2001 From: Tom Zanussi Date: Fri, 3 Apr 2020 14:31:19 -0500 Subject: [PATCH 0983/1971] tracing: Check state.disabled in synth event trace functions Since trace_state.disabled is set in __synth_event_trace_start() at the same time -ENOENT is returned, don't bother returning -ENOENT - just have callers check trace_state.disabled instead, and avoid the extra return val munging. Link: http://lkml.kernel.org/r/87315c3889af870e8370e82b76cf48b426d70130.1585941485.git.zanussi@kernel.org Suggested-by: Masami Hiramatsu Signed-off-by: Tom Zanussi Signed-off-by: Steven Rostedt (VMware) --- kernel/trace/trace_events_hist.c | 19 +++---------------- 1 file changed, 3 insertions(+), 16 deletions(-) diff --git a/kernel/trace/trace_events_hist.c b/kernel/trace/trace_events_hist.c index fcab11cc6833b..5cfe8f998b3ef 100644 --- a/kernel/trace/trace_events_hist.c +++ b/kernel/trace/trace_events_hist.c @@ -1845,7 +1845,6 @@ __synth_event_trace_start(struct trace_event_file *file, if (!(file->flags & EVENT_FILE_FL_ENABLED) || trace_trigger_soft_disabled(file)) { trace_state->disabled = true; - ret = -ENOENT; goto out; } @@ -1907,11 +1906,8 @@ int synth_event_trace(struct trace_event_file *file, unsigned int n_vals, ...) int ret; ret = __synth_event_trace_start(file, &state); - if (ret) { - if (ret == -ENOENT) - ret = 0; /* just disabled, not really an error */ + if (ret || state.disabled) return ret; - } if (n_vals != state.event->n_fields) { ret = -EINVAL; @@ -1987,11 +1983,8 @@ int synth_event_trace_array(struct trace_event_file *file, u64 *vals, int ret; ret = __synth_event_trace_start(file, &state); - if (ret) { - if (ret == -ENOENT) - ret = 0; /* just disabled, not really an error */ + if (ret || state.disabled) return ret; - } if (n_vals != state.event->n_fields) { ret = -EINVAL; @@ -2067,16 +2060,10 @@ EXPORT_SYMBOL_GPL(synth_event_trace_array); int synth_event_trace_start(struct trace_event_file *file, struct synth_event_trace_state *trace_state) { - int ret; - if (!trace_state) return -EINVAL; - ret = __synth_event_trace_start(file, trace_state); - if (ret == -ENOENT) - ret = 0; /* just disabled, not really an error */ - - return ret; + return __synth_event_trace_start(file, trace_state); } EXPORT_SYMBOL_GPL(synth_event_trace_start); -- GitLab From 16b585fe71924b3aebaef5548a291021efaf7c7f Mon Sep 17 00:00:00 2001 From: Tom Zanussi Date: Fri, 3 Apr 2020 14:31:20 -0500 Subject: [PATCH 0984/1971] tracing: Add histogram-design document Add a new Documentation/trace/histogram-design.rst file describing the ftrace histogram low-level design, meant to help developers trying to understand the internals when extending or making use of the hist triggers for higher-level tools. This documentation refers to the hist_debug files implemented by 'tracing: Add hist_debug trace event files for histogram debugging' so users wishing to try out the test examples here should make sure CONFIG_HIST_TRIGGERS_DEBUG is enabled. Link: http://lkml.kernel.org/r/256b29c3274bb89a10157c4a8d1a8bce7e74849e.1585941485.git.zanussi@kernel.org Signed-off-by: Tom Zanussi Signed-off-by: Steven Rostedt (VMware) --- Documentation/trace/histogram-design.rst | 2115 ++++++++++++++++++++++ 1 file changed, 2115 insertions(+) create mode 100644 Documentation/trace/histogram-design.rst diff --git a/Documentation/trace/histogram-design.rst b/Documentation/trace/histogram-design.rst new file mode 100644 index 0000000000000..c246753f0ffc2 --- /dev/null +++ b/Documentation/trace/histogram-design.rst @@ -0,0 +1,2115 @@ +.. SPDX-License-Identifier: GPL-2.0 + +====================== +Histogram Design Notes +====================== + +:Author: Tom Zanussi + +This document attempts to provide a description of how the ftrace +histograms work and how the individual pieces map to the data +structures used to implement them in trace_events_hist.c and +tracing_map.c. + +Note: All the ftrace histogram command examples assume the working + directory is the ftrace /tracing directory. For example:: + + # cd /sys/kernel/debug/tracing + +Also, the histogram output displayed for those commands will be +generally be truncated - only enough to make the point is displayed. + +'hist_debug' trace event files +============================== + +If the kernel is compiled with CONFIG_HIST_TRIGGERS_DEBUG set, an +event file named 'hist_debug' will appear in each event's +subdirectory. This file can be read at any time and will display some +of the hist trigger internals described in this document. Specific +examples and output will be described in test cases below. + +Basic histograms +================ + +First, basic histograms. Below is pretty much the simplest thing you +can do with histograms - create one with a single key on a single +event and cat the output:: + + # echo 'hist:keys=pid' >> events/sched/sched_waking/trigger + + # cat events/sched/sched_waking/hist + + { pid: 18249 } hitcount: 1 + { pid: 13399 } hitcount: 1 + { pid: 17973 } hitcount: 1 + { pid: 12572 } hitcount: 1 + ... + { pid: 10 } hitcount: 921 + { pid: 18255 } hitcount: 1444 + { pid: 25526 } hitcount: 2055 + { pid: 5257 } hitcount: 2055 + { pid: 27367 } hitcount: 2055 + { pid: 1728 } hitcount: 2161 + + Totals: + Hits: 21305 + Entries: 183 + Dropped: 0 + +What this does is create a histogram on the sched_waking event using +pid as a key and with a single value, hitcount, which even if not +explicitly specified, exists for every histogram regardless. + +The hitcount value is a per-bucket value that's automatically +incremented on every hit for the given key, which in this case is the +pid. + +So in this histogram, there's a separate bucket for each pid, and each +bucket contains a value for that bucket, counting the number of times +sched_waking was called for that pid. + +Each histogram is represented by a hist_data struct. + +To keep track of each key and value field in the histogram, hist_data +keeps an array of these fields named fields[]. The fields[] array is +an array containing struct hist_field representations of each +histogram val and key in the histogram (variables are also included +here, but are discussed later). So for the above histogram we have one +key and one value; in this case the one value is the hitcount value, +which all histograms have, regardless of whether they define that +value or not, which the above histogram does not. + +Each struct hist_field contains a pointer to the ftrace_event_field +from the event's trace_event_file along with various bits related to +that such as the size, offset, type, and a hist_field_fn_t function, +which is used to grab the field's data from the ftrace event buffer +(in most cases - some hist_fields such as hitcount don't directly map +to an event field in the trace buffer - in these cases the function +implementation gets its value from somewhere else). The flags field +indicates which type of field it is - key, value, variable, variable +reference, etc., with value being the default. + +The other important hist_data data structure in addition to the +fields[] array is the tracing_map instance created for the histogram, +which is held in the .map member. The tracing_map implements the +lock-free hash table used to implement histograms (see +kernel/trace/tracing_map.h for much more discussion about the +low-level data structures implementing the tracing_map). For the +purposes of this discussion, the tracing_map contains a number of +buckets, each bucket corresponding to a particular tracing_map_elt +object hashed by a given histogram key. + +Below is a diagram the first part of which describes the hist_data and +associated key and value fields for the histogram described above. As +you can see, there are two fields in the fields array, one val field +for the hitcount and one key field for the pid key. + +Below that is a diagram of a run-time snapshot of what the tracing_map +might look like for a given run. It attempts to show the +relationships between the hist_data fields and the tracing_map +elements for a couple hypothetical keys and values. + + +------------------+ + | hist_data | + +------------------+ +----------------+ + | .fields[] |---->| val = hitcount |----------------------------+ + +----------------+ +----------------+ | + | .map | | .size | | + +----------------+ +--------------+ | + | .offset | | + +--------------+ | + | .fn() | | + +--------------+ | + . | + . | + . | + +----------------+ <--- n_vals | + | key = pid |----------------------------|--+ + +----------------+ | | + | .size | | | + +--------------+ | | + | .offset | | | + +--------------+ | | + | .fn() | | | + +----------------+ <--- n_fields | | + | unused | | | + +----------------+ | | + | | | | + +--------------+ | | + | | | | + +--------------+ | | + | | | | + +--------------+ | | + n_keys = n_fields - n_vals | | + | | +The hist_data n_vals and n_fields delineate the extent of the fields[] | | +array and separate keys from values for the rest of the code. | | + | | +Below is a run-time representation of the tracing_map part of the | | +histogram, with pointers from various parts of the fields[] array | | +to corresponding parts of the tracing_map. | | + | | +The tracing_map consists of an array of tracing_map_entrys and a set | | +of preallocated tracing_map_elts (abbreviated below as map_entry and | | +map_elt). The total number of map_entrys in the hist_data.map array = | | +map->max_elts (actually map->map_size but only max_elts of those are | | +used. This is a property required by the map_insert() algorithm). | | + | | +If a map_entry is unused, meaning no key has yet hashed into it, its | | +.key value is 0 and its .val pointer is NULL. Once a map_entry has | | +been claimed, the .key value contains the key's hash value and the | | +.val member points to a map_elt containing the full key and an entry | | +for each key or value in the map_elt.fields[] array. There is an | | +entry in the map_elt.fields[] array corresponding to each hist_field | | +in the histogram, and this is where the continually aggregated sums | | +corresponding to each histogram value are kept. | | + | | +The diagram attempts to show the relationship between the | | +hist_data.fields[] and the map_elt.fields[] with the links drawn | | +between diagrams:: | | + | | + +-----------+ | | + | hist_data | | | + +-----------+ | | + | .fields | | | + +---------+ +-----------+ | | + | .map |---->| map_entry | | | + +---------+ +-----------+ | | + | .key |---> 0 | | + +---------+ | | + | .val |---> NULL | | + +-----------+ | | + | map_entry | | | + +-----------+ | | + | .key |---> pid = 999 | | + +---------+ +-----------+ | | + | .val |--->| map_elt | | | + +---------+ +-----------+ | | + . | .key |---> full key * | | + . +---------+ +---------------+ | | + . | .fields |--->| .sum (val) |<-+ | + +-----------+ +---------+ | 2345 | | | + | map_entry | +---------------+ | | + +-----------+ | .offset (key) |<----+ + | .key |---> 0 | 0 | | | + +---------+ +---------------+ | | + | .val |---> NULL . | | + +-----------+ . | | + | map_entry | . | | + +-----------+ +---------------+ | | + | .key | | .sum (val) or | | | + +---------+ +---------+ | .offset (key) | | | + | .val |--->| map_elt | +---------------+ | | + +-----------+ +---------+ | .sum (val) or | | | + | map_entry | | .offset (key) | | | + +-----------+ +---------------+ | | + | .key |---> pid = 4444 | | + +---------+ +-----------+ | | + | .val | | map_elt | | | + +---------+ +-----------+ | | + | .key |---> full key * | | + +---------+ +---------------+ | | + | .fields |--->| .sum (val) |<-+ | + +---------+ | 65523 | | + +---------------+ | + | .offset (key) |<----+ + | 0 | + +---------------+ + . + . + . + +---------------+ + | .sum (val) or | + | .offset (key) | + +---------------+ + | .sum (val) or | + | .offset (key) | + +---------------+ + +Abbreviations used in the diagrams:: + + hist_data = struct hist_trigger_data + hist_data.fields = struct hist_field + fn = hist_field_fn_t + map_entry = struct tracing_map_entry + map_elt = struct tracing_map_elt + map_elt.fields = struct tracing_map_field + +Whenever a new event occurs and it has a hist trigger associated with +it, event_hist_trigger() is called. event_hist_trigger() first deals +with the key: for each subkey in the key (in the above example, there +is just one subkey corresponding to pid), the hist_field that +represents that subkey is retrieved from hist_data.fields[] and the +hist_field_fn_t fn() associated with that field, along with the +field's size and offset, is used to grab that subkey's data from the +current trace record. + +Once the complete key has been retrieved, it's used to look that key +up in the tracing_map. If there's no tracing_map_elt associated with +that key, an empty one is claimed and inserted in the map for the new +key. In either case, the tracing_map_elt associated with that key is +returned. + +Once a tracing_map_elt available, hist_trigger_elt_update() is called. +As the name implies, this updates the element, which basically means +updating the element's fields. There's a tracing_map_field associated +with each key and value in the histogram, and each of these correspond +to the key and value hist_fields created when the histogram was +created. hist_trigger_elt_update() goes through each value hist_field +and, as for the keys, uses the hist_field's fn() and size and offset +to grab the field's value from the current trace record. Once it has +that value, it simply adds that value to that field's +continually-updated tracing_map_field.sum member. Some hist_field +fn()s, such as for the hitcount, don't actually grab anything from the +trace record (the hitcount fn() just increments the counter sum by 1), +but the idea is the same. + +Once all the values have been updated, hist_trigger_elt_update() is +done and returns. Note that there are also tracing_map_fields for +each subkey in the key, but hist_trigger_elt_update() doesn't look at +them or update anything - those exist only for sorting, which can +happen later. + +Basic histogram test +-------------------- + +This is a good example to try. It produces 3 value fields and 2 key +fields in the output:: + + # echo 'hist:keys=common_pid,call_site.sym:values=bytes_req,bytes_alloc,hitcount' >> events/kmem/kmalloc/trigger + +To see the debug data, cat the kmem/kmalloc's 'hist_debug' file. It +will show the trigger info of the histogram it corresponds to, along +with the address of the hist_data associated with the histogram, which +will become useful in later examples. It then displays the number of +total hist_fields associated with the histogram along with a count of +how many of those correspond to keys and how many correspond to values. + +It then goes on to display details for each field, including the +field's flags and the position of each field in the hist_data's +fields[] array, which is useful information for verifying that things +internally appear correct or not, and which again will become even +more useful in further examples:: + + # cat events/kmem/kmalloc/hist_debug + + # event histogram + # + # trigger info: hist:keys=common_pid,call_site.sym:vals=hitcount,bytes_req,bytes_alloc:sort=hitcount:size=2048 [active] + # + + hist_data: 000000005e48c9a5 + + n_vals: 3 + n_keys: 2 + n_fields: 5 + + val fields: + + hist_data->fields[0]: + flags: + VAL: HIST_FIELD_FL_HITCOUNT + type: u64 + size: 8 + is_signed: 0 + + hist_data->fields[1]: + flags: + VAL: normal u64 value + ftrace_event_field name: bytes_req + type: size_t + size: 8 + is_signed: 0 + + hist_data->fields[2]: + flags: + VAL: normal u64 value + ftrace_event_field name: bytes_alloc + type: size_t + size: 8 + is_signed: 0 + + key fields: + + hist_data->fields[3]: + flags: + HIST_FIELD_FL_KEY + ftrace_event_field name: common_pid + type: int + size: 8 + is_signed: 1 + + hist_data->fields[4]: + flags: + HIST_FIELD_FL_KEY + ftrace_event_field name: call_site + type: unsigned long + size: 8 + is_signed: 0 + +The commands below can be used to clean things up for the next test:: + + # echo '!hist:keys=common_pid,call_site.sym:values=bytes_req,bytes_alloc,hitcount' >> events/kmem/kmalloc/trigger + +Variables +========= + +Variables allow data from one hist trigger to be saved by one hist +trigger and retrieved by another hist trigger. For example, a trigger +on the sched_waking event can capture a timestamp for a particular +pid, and later a sched_switch event that switches to that pid event +can grab the timestamp and use it to calculate a time delta between +the two events:: + + # echo 'hist:keys=pid:ts0=common_timestamp.usecs' >> + events/sched/sched_waking/trigger + + # echo 'hist:keys=next_pid:wakeup_lat=common_timestamp.usecs-$ts0' >> + events/sched/sched_switch/trigger + +In terms of the histogram data structures, variables are implemented +as another type of hist_field and for a given hist trigger are added +to the hist_data.fields[] array just after all the val fields. To +distinguish them from the existing key and val fields, they're given a +new flag type, HIST_FIELD_FL_VAR (abbreviated FL_VAR) and they also +make use of a new .var.idx field member in struct hist_field, which +maps them to an index in a new map_elt.vars[] array added to the +map_elt specifically designed to store and retrieve variable values. +The diagram below shows those new elements and adds a new variable +entry, ts0, corresponding to the ts0 variable in the sched_waking +trigger above. + +sched_waking histogram +---------------------- + + +------------------+ + | hist_data |<-------------------------------------------------------+ + +------------------+ +-------------------+ | + | .fields[] |-->| val = hitcount | | + +----------------+ +-------------------+ | + | .map | | .size | | + +----------------+ +-----------------+ | + | .offset | | + +-----------------+ | + | .fn() | | + +-----------------+ | + | .flags | | + +-----------------+ | + | .var.idx | | + +-------------------+ | + | var = ts0 | | + +-------------------+ | + | .size | | + +-----------------+ | + | .offset | | + +-----------------+ | + | .fn() | | + +-----------------+ | + | .flags & FL_VAR | | + +-----------------+ | + | .var.idx |----------------------------+-+ | + +-----------------+ | | | + . | | | + . | | | + . | | | + +-------------------+ <--- n_vals | | | + | key = pid | | | | + +-------------------+ | | | + | .size | | | | + +-----------------+ | | | + | .offset | | | | + +-----------------+ | | | + | .fn() | | | | + +-----------------+ | | | + | .flags & FL_KEY | | | | + +-----------------+ | | | + | .var.idx | | | | + +-------------------+ <--- n_fields | | | + | unused | | | | + +-------------------+ | | | + | | | | | + +-----------------+ | | | + | | | | | + +-----------------+ | | | + | | | | | + +-----------------+ | | | + | | | | | + +-----------------+ | | | + | | | | | + +-----------------+ | | | + n_keys = n_fields - n_vals | | | + | | | + | | | +This is very similar to the basic case. In the above diagram, we can | | | +see a new .flags member has been added to the struct hist_field | | | +struct, and a new entry added to hist_data.fields representing the ts0 | | | +variable. For a normal val hist_field, .flags is just 0 (modulo | | | +modifier flags), but if the value is defined as a variable, the .flags | | | +contains a set FL_VAR bit. | | | + | | | +As you can see, the ts0 entry's .var.idx member contains the index | | | +into the tracing_map_elts' .vars[] array containing variable values. | | | +This idx is used whenever the value of the variable is set or read. | | | +The map_elt.vars idx assigned to the given variable is assigned and | | | +saved in .var.idx by create_tracing_map_fields() after it calls | | | +tracing_map_add_var(). | | | + | | | +Below is a representation of the histogram at run-time, which | | | +populates the map, along with correspondence to the above hist_data and | | | +hist_field data structures. | | | + | | | +The diagram attempts to show the relationship between the | | | +hist_data.fields[] and the map_elt.fields[] and map_elt.vars[] with | | | +the links drawn between diagrams. For each of the map_elts, you can | | | +see that the .fields[] members point to the .sum or .offset of a key | | | +or val and the .vars[] members point to the value of a variable. The | | | +arrows between the two diagrams show the linkages between those | | | +tracing_map members and the field definitions in the corresponding | | | +hist_data fields[] members. | | | + | | | + +-----------+ | | | + | hist_data | | | | + +-----------+ | | | + | .fields | | | | + +---------+ +-----------+ | | | + | .map |---->| map_entry | | | | + +---------+ +-----------+ | | | + | .key |---> 0 | | | + +---------+ | | | + | .val |---> NULL | | | + +-----------+ | | | + | map_entry | | | | + +-----------+ | | | + | .key |---> pid = 999 | | | + +---------+ +-----------+ | | | + | .val |--->| map_elt | | | | + +---------+ +-----------+ | | | + . | .key |---> full key * | | | + . +---------+ +---------------+ | | | + . | .fields |--->| .sum (val) | | | | + . +---------+ | 2345 | | | | + . +--| .vars | +---------------+ | | | + . | +---------+ | .offset (key) | | | | + . | | 0 | | | | + . | +---------------+ | | | + . | . | | | + . | . | | | + . | . | | | + . | +---------------+ | | | + . | | .sum (val) or | | | | + . | | .offset (key) | | | | + . | +---------------+ | | | + . | | .sum (val) or | | | | + . | | .offset (key) | | | | + . | +---------------+ | | | + . | | | | + . +---------------->+---------------+ | | | + . | ts0 |<--+ | | + . | 113345679876 | | | | + . +---------------+ | | | + . | unused | | | | + . | | | | | + . +---------------+ | | | + . . | | | + . . | | | + . . | | | + . +---------------+ | | | + . | unused | | | | + . | | | | | + . +---------------+ | | | + . | unused | | | | + . | | | | | + . +---------------+ | | | + . | | | + +-----------+ | | | + | map_entry | | | | + +-----------+ | | | + | .key |---> pid = 4444 | | | + +---------+ +-----------+ | | | + | .val |--->| map_elt | | | | + +---------+ +-----------+ | | | + . | .key |---> full key * | | | + . +---------+ +---------------+ | | | + . | .fields |--->| .sum (val) | | | | + +---------+ | 2345 | | | | + +--| .vars | +---------------+ | | | + | +---------+ | .offset (key) | | | | + | | 0 | | | | + | +---------------+ | | | + | . | | | + | . | | | + | . | | | + | +---------------+ | | | + | | .sum (val) or | | | | + | | .offset (key) | | | | + | +---------------+ | | | + | | .sum (val) or | | | | + | | .offset (key) | | | | + | +---------------+ | | | + | | | | + | +---------------+ | | | + +---------------->| ts0 |<--+ | | + | 213499240729 | | | + +---------------+ | | + | unused | | | + | | | | + +---------------+ | | + . | | + . | | + . | | + +---------------+ | | + | unused | | | + | | | | + +---------------+ | | + | unused | | | + | | | | + +---------------+ | | + | | +For each used map entry, there's a map_elt pointing to an array of | | +.vars containing the current value of the variables associated with | | +that histogram entry. So in the above, the timestamp associated with | | +pid 999 is 113345679876, and the timestamp variable in the same | | +.var.idx for pid 4444 is 213499240729. | | + | | +sched_switch histogram | | +---------------------- | | + | | +The sched_switch histogram paired with the above sched_waking | | +histogram is shown below. The most important aspect of the | | +sched_switch histogram is that it references a variable on the | | +sched_waking histogram above. | | + | | +The histogram diagram is very similar to the others so far displayed, | | +but it adds variable references. You can see the normal hitcount and | | +key fields along with a new wakeup_lat variable implemented in the | | +same way as the sched_waking ts0 variable, but in addition there's an | | +entry with the new FL_VAR_REF (short for HIST_FIELD_FL_VAR_REF) flag. | | + | | +Associated with the new var ref field are a couple of new hist_field | | +members, var.hist_data and var_ref_idx. For a variable reference, the | | +var.hist_data goes with the var.idx, which together uniquely identify | | +a particular variable on a particular histogram. The var_ref_idx is | | +just the index into the var_ref_vals[] array that caches the values of | | +each variable whenever a hist trigger is updated. Those resulting | | +values are then finally accessed by other code such as trace action | | +code that uses the var_ref_idx values to assign param values. | | + | | +The diagram below describes the situation for the sched_switch | | +histogram referred to before: | | + | | + # echo 'hist:keys=next_pid:wakeup_lat=common_timestamp.usecs-$ts0' >> | | + events/sched/sched_switch/trigger | | + | | + +------------------+ | | + | hist_data | | | + +------------------+ +-----------------------+ | | + | .fields[] |-->| val = hitcount | | | + +----------------+ +-----------------------+ | | + | .map | | .size | | | + +----------------+ +---------------------+ | | + +--| .var_refs[] | | .offset | | | + | +----------------+ +---------------------+ | | + | | .fn() | | | + | var_ref_vals[] +---------------------+ | | + | +-------------+ | .flags | | | + | | $ts0 |<---+ +---------------------+ | | + | +-------------+ | | .var.idx | | | + | | | | +---------------------+ | | + | +-------------+ | | .var.hist_data | | | + | | | | +---------------------+ | | + | +-------------+ | | .var_ref_idx | | | + | | | | +-----------------------+ | | + | +-------------+ | | var = wakeup_lat | | | + | . | +-----------------------+ | | + | . | | .size | | | + | . | +---------------------+ | | + | +-------------+ | | .offset | | | + | | | | +---------------------+ | | + | +-------------+ | | .fn() | | | + | | | | +---------------------+ | | + | +-------------+ | | .flags & FL_VAR | | | + | | +---------------------+ | | + | | | .var.idx | | | + | | +---------------------+ | | + | | | .var.hist_data | | | + | | +---------------------+ | | + | | | .var_ref_idx | | | + | | +---------------------+ | | + | | . | | + | | . | | + | | . | | + | | +-----------------------+ <--- n_vals | | + | | | key = pid | | | + | | +-----------------------+ | | + | | | .size | | | + | | +---------------------+ | | + | | | .offset | | | + | | +---------------------+ | | + | | | .fn() | | | + | | +---------------------+ | | + | | | .flags | | | + | | +---------------------+ | | + | | | .var.idx | | | + | | +-----------------------+ <--- n_fields | | + | | | unused | | | + | | +-----------------------+ | | + | | | | | | + | | +---------------------+ | | + | | | | | | + | | +---------------------+ | | + | | | | | | + | | +---------------------+ | | + | | | | | | + | | +---------------------+ | | + | | | | | | + | | +---------------------+ | | + | | n_keys = n_fields - n_vals | | + | | | | + | | | | + | | +-----------------------+ | | + +---------------------->| var_ref = $ts0 | | | + | +-----------------------+ | | + | | .size | | | + | +---------------------+ | | + | | .offset | | | + | +---------------------+ | | + | | .fn() | | | + | +---------------------+ | | + | | .flags & FL_VAR_REF | | | + | +---------------------+ | | + | | .var.idx |--------------------------+ | + | +---------------------+ | + | | .var.hist_data |----------------------------+ + | +---------------------+ + +---| .var_ref_idx | + +---------------------+ + +Abbreviations used in the diagrams:: + + hist_data = struct hist_trigger_data + hist_data.fields = struct hist_field + fn = hist_field_fn_t + FL_KEY = HIST_FIELD_FL_KEY + FL_VAR = HIST_FIELD_FL_VAR + FL_VAR_REF = HIST_FIELD_FL_VAR_REF + +When a hist trigger makes use of a variable, a new hist_field is +created with flag HIST_FIELD_FL_VAR_REF. For a VAR_REF field, the +var.idx and var.hist_data take the same values as the referenced +variable, as well as the referenced variable's size, type, and +is_signed values. The VAR_REF field's .name is set to the name of the +variable it references. If a variable reference was created using the +explicit system.event.$var_ref notation, the hist_field's system and +event_name variabls are also set. + +So, in order to handle an event for the sched_switch histogram, +because we have a reference to a variable on another histogram, we +need to resolve all variable references first. This is done via the +resolve_var_refs() calls made from event_hist_trigger(). What this +does is grabs the var_refs[] array from the hist_data representing the +sched_switch histogram. For each one of those, the referenced +variable's var.hist_data along with the current key is used to look up +the corresponding tracing_map_elt in that histogram. Once found, the +referenced variable's var.idx is used to look up the variable's value +using tracing_map_read_var(elt, var.idx), which yields the value of +the variable for that element, ts0 in the case above. Note that both +the hist_fields representing both the variable and the variable +reference have the same var.idx, so this is straightforward. + +Variable and variable reference test +------------------------------------ + +This example creates a variable on the sched_waking event, ts0, and +uses it in the sched_switch trigger. The sched_switch trigger also +creates its own variable, wakeup_lat, but nothing yet uses it:: + + # echo 'hist:keys=pid:ts0=common_timestamp.usecs' >> events/sched/sched_waking/trigger + + # echo 'hist:keys=next_pid:wakeup_lat=common_timestamp.usecs-$ts0' >> events/sched/sched_switch/trigger + +Looking at the sched_waking 'hist_debug' output, in addition to the +normal key and value hist_fields, in the val fields section we see a +field with the HIST_FIELD_FL_VAR flag, which indicates that that field +represents a variable. Note that in addition to the variable name, +contained in the var.name field, it includes the var.idx, which is the +index into the tracing_map_elt.vars[] array of the actual variable +location. Note also that the output shows that variables live in the +same part of the hist_data->fields[] array as normal values:: + + # cat events/sched/sched_waking/hist_debug + + # event histogram + # + # trigger info: hist:keys=pid:vals=hitcount:ts0=common_timestamp.usecs:sort=hitcount:size=2048:clock=global [active] + # + + hist_data: 000000009536f554 + + n_vals: 2 + n_keys: 1 + n_fields: 3 + + val fields: + + hist_data->fields[0]: + flags: + VAL: HIST_FIELD_FL_HITCOUNT + type: u64 + size: 8 + is_signed: 0 + + hist_data->fields[1]: + flags: + HIST_FIELD_FL_VAR + var.name: ts0 + var.idx (into tracing_map_elt.vars[]): 0 + type: u64 + size: 8 + is_signed: 0 + + key fields: + + hist_data->fields[2]: + flags: + HIST_FIELD_FL_KEY + ftrace_event_field name: pid + type: pid_t + size: 8 + is_signed: 1 + +Moving on to the sched_switch trigger hist_debug output, in addition +to the unused wakeup_lat variable, we see a new section displaying +variable references. Variable references are displayed in a separate +section because in addition to to being logically separate from +variables and values, they actually live in a separate hist_data +array, var_refs[]. + +In this example, the sched_switch trigger has a reference to a +variable on the sched_waking trigger, $ts0. Looking at the details, +we can see that the var.hist_data value of the referenced variable +matches the previously displayed sched_waking trigger, and the var.idx +value matches the previously displayed var.idx value for that +variable. Also displayed is the var_ref_idx value for that variable +reference, which is where the value for that variable is cached for +use when the trigger is invoked:: + + # cat events/sched/sched_switch/hist_debug + + # event histogram + # + # trigger info: hist:keys=next_pid:vals=hitcount:wakeup_lat=common_timestamp.usecs-$ts0:sort=hitcount:size=2048:clock=global [active] + # + + hist_data: 00000000f4ee8006 + + n_vals: 2 + n_keys: 1 + n_fields: 3 + + val fields: + + hist_data->fields[0]: + flags: + VAL: HIST_FIELD_FL_HITCOUNT + type: u64 + size: 8 + is_signed: 0 + + hist_data->fields[1]: + flags: + HIST_FIELD_FL_VAR + var.name: wakeup_lat + var.idx (into tracing_map_elt.vars[]): 0 + type: u64 + size: 0 + is_signed: 0 + + key fields: + + hist_data->fields[2]: + flags: + HIST_FIELD_FL_KEY + ftrace_event_field name: next_pid + type: pid_t + size: 8 + is_signed: 1 + + variable reference fields: + + hist_data->var_refs[0]: + flags: + HIST_FIELD_FL_VAR_REF + name: ts0 + var.idx (into tracing_map_elt.vars[]): 0 + var.hist_data: 000000009536f554 + var_ref_idx (into hist_data->var_refs[]): 0 + type: u64 + size: 8 + is_signed: 0 + +The commands below can be used to clean things up for the next test:: + + # echo '!hist:keys=next_pid:wakeup_lat=common_timestamp.usecs-$ts0' >> events/sched/sched_switch/trigger + + # echo '!hist:keys=pid:ts0=common_timestamp.usecs' >> events/sched/sched_waking/trigger + +Actions and Handlers +==================== + +Adding onto the previous example, we will now do something with that +wakeup_lat variable, namely send it and another field as a synthetic +event. + +The onmatch() action below basically says that whenever we have a +sched_switch event, if we have a matching sched_waking event, in this +case if we have a pid in the sched_waking histogram that matches the +the next_pid field on this sched_switch event, we retrieve the +variables specified in the wakeup_latency() trace action, and use +them to generate a new wakeup_latency event into the trace stream. + +Note that the way the trace handlers such as wakeup_latency() (which +could equivalently be written trace(wakeup_latency,$wakeup_lat,next_pid) +are implemented, the parameters specified to the trace handler must be +variables. In this case, $wakeup_lat is obviously a variable, but +next_pid isn't, since it's just naming a field in the sched_switch +trace event. Since this is something that almost every trace() and +save() action does, a special shortcut is implemented to allow field +names to be used directly in those cases. How it works is that under +the covers, a temporary variable is created for the named field, and +this variable is what is actually passed to the trace handler. In the +code and documentation, this type of variable is called a 'field +variable'. + +Fields on other trace event's histograms can be used as well. In that +case we have to generate a new histogram and an unfortunately named +'synthetic_field' (the use of synthetic here has nothing to do with +synthetic events) and use that special histogram field as a variable. + +The diagram below illustrates the new elements described above in the +context of the sched_switch histogram using the onmatch() handler and +the trace() action. + +First, we define the wakeup_latency synthetic event:: + + # echo 'wakeup_latency u64 lat; pid_t pid' >> synthetic_events + +Next, the sched_waking hist trigger as before:: + + # echo 'hist:keys=pid:ts0=common_timestamp.usecs' >> + events/sched/sched_waking/trigger + +Finally, we create a hist trigger on the sched_switch event that +generates a wakeup_latency() trace event. In this case we pass +next_pid into the wakeup_latency synthetic event invocation, which +means it will be automatically converted into a field variable:: + + # echo 'hist:keys=next_pid:wakeup_lat=common_timestamp.usecs-$ts0: \ + onmatch(sched.sched_waking).wakeup_latency($wakeup_lat,next_pid)' >> + /sys/kernel/debug/tracing/events/sched/sched_switch/trigger + +The diagram for the sched_switch event is similar to previous examples +but shows the additional field_vars[] array for hist_data and shows +the linkages between the field_vars and the variables and references +created to implement the field variables. The details are discussed +below:: + + +------------------+ + | hist_data | + +------------------+ +-----------------------+ + | .fields[] |-->| val = hitcount | + +----------------+ +-----------------------+ + | .map | | .size | + +----------------+ +---------------------+ + +---| .field_vars[] | | .offset | + | +----------------+ +---------------------+ + |+--| .var_refs[] | | .offset | + || +----------------+ +---------------------+ + || | .fn() | + || var_ref_vals[] +---------------------+ + || +-------------+ | .flags | + || | $ts0 |<---+ +---------------------+ + || +-------------+ | | .var.idx | + || | $next_pid |<-+ | +---------------------+ + || +-------------+ | | | .var.hist_data | + ||+>| $wakeup_lat | | | +---------------------+ + ||| +-------------+ | | | .var_ref_idx | + ||| | | | | +-----------------------+ + ||| +-------------+ | | | var = wakeup_lat | + ||| . | | +-----------------------+ + ||| . | | | .size | + ||| . | | +---------------------+ + ||| +-------------+ | | | .offset | + ||| | | | | +---------------------+ + ||| +-------------+ | | | .fn() | + ||| | | | | +---------------------+ + ||| +-------------+ | | | .flags & FL_VAR | + ||| | | +---------------------+ + ||| | | | .var.idx | + ||| | | +---------------------+ + ||| | | | .var.hist_data | + ||| | | +---------------------+ + ||| | | | .var_ref_idx | + ||| | | +---------------------+ + ||| | | . + ||| | | . + ||| | | . + ||| | | . + ||| +--------------+ | | . + +-->| field_var | | | . + || +--------------+ | | . + || | var | | | . + || +------------+ | | . + || | val | | | . + || +--------------+ | | . + || | field_var | | | . + || +--------------+ | | . + || | var | | | . + || +------------+ | | . + || | val | | | . + || +------------+ | | . + || . | | . + || . | | . + || . | | +-----------------------+ <--- n_vals + || +--------------+ | | | key = pid | + || | field_var | | | +-----------------------+ + || +--------------+ | | | .size | + || | var |--+| +---------------------+ + || +------------+ ||| | .offset | + || | val |-+|| +---------------------+ + || +------------+ ||| | .fn() | + || ||| +---------------------+ + || ||| | .flags | + || ||| +---------------------+ + || ||| | .var.idx | + || ||| +---------------------+ <--- n_fields + || ||| + || ||| n_keys = n_fields - n_vals + || ||| +-----------------------+ + || |+->| var = next_pid | + || | | +-----------------------+ + || | | | .size | + || | | +---------------------+ + || | | | .offset | + || | | +---------------------+ + || | | | .flags & FL_VAR | + || | | +---------------------+ + || | | | .var.idx | + || | | +---------------------+ + || | | | .var.hist_data | + || | | +-----------------------+ + || +-->| val for next_pid | + || | | +-----------------------+ + || | | | .size | + || | | +---------------------+ + || | | | .offset | + || | | +---------------------+ + || | | | .fn() | + || | | +---------------------+ + || | | | .flags | + || | | +---------------------+ + || | | | | + || | | +---------------------+ + || | | + || | | + || | | +-----------------------+ + +|------------------|-|>| var_ref = $ts0 | + | | | +-----------------------+ + | | | | .size | + | | | +---------------------+ + | | | | .offset | + | | | +---------------------+ + | | | | .fn() | + | | | +---------------------+ + | | | | .flags & FL_VAR_REF | + | | | +---------------------+ + | | +---| .var_ref_idx | + | | +-----------------------+ + | | | var_ref = $next_pid | + | | +-----------------------+ + | | | .size | + | | +---------------------+ + | | | .offset | + | | +---------------------+ + | | | .fn() | + | | +---------------------+ + | | | .flags & FL_VAR_REF | + | | +---------------------+ + | +-----| .var_ref_idx | + | +-----------------------+ + | | var_ref = $wakeup_lat | + | +-----------------------+ + | | .size | + | +---------------------+ + | | .offset | + | +---------------------+ + | | .fn() | + | +---------------------+ + | | .flags & FL_VAR_REF | + | +---------------------+ + +------------------------| .var_ref_idx | + +---------------------+ + +As you can see, for a field variable, two hist_fields are created: one +representing the variable, in this case next_pid, and one to actually +get the value of the field from the trace stream, like a normal val +field does. These are created separately from normal variable +creation and are saved in the hist_data->field_vars[] array. See +below for how these are used. In addition, a reference hist_field is +also created, which is needed to reference the field variables such as +$next_pid variable in the trace() action. + +Note that $wakeup_lat is also a variable reference, referencing the +value of the expression common_timestamp-$ts0, and so also needs to +have a hist field entry representing that reference created. + +When hist_trigger_elt_update() is called to get the normal key and +value fields, it also calls update_field_vars(), which goes through +each field_var created for the histogram, and available from +hist_data->field_vars and calls val->fn() to get the data from the +current trace record, and then uses the var's var.idx to set the +variable at the var.idx offset in the appropriate tracing_map_elt's +variable at elt->vars[var.idx]. + +Once all the variables have been updated, resolve_var_refs() can be +called from event_hist_trigger(), and not only can our $ts0 and +$next_pid references be resolved but the $wakeup_lat reference as +well. At this point, the trace() action can simply access the values +assembled in the var_ref_vals[] array and generate the trace event. + +The same process occurs for the field variables associated with the +save() action. + +Abbreviations used in the diagram:: + + hist_data = struct hist_trigger_data + hist_data.fields = struct hist_field + field_var = struct field_var + fn = hist_field_fn_t + FL_KEY = HIST_FIELD_FL_KEY + FL_VAR = HIST_FIELD_FL_VAR + FL_VAR_REF = HIST_FIELD_FL_VAR_REF + +trace() action field variable test +---------------------------------- + +This example adds to the previous test example by finally making use +of the wakeup_lat variable, but in addition also creates a couple of +field variables that then are all passed to the wakeup_latency() trace +action via the onmatch() handler. + +First, we create the wakeup_latency synthetic event:: + + # echo 'wakeup_latency u64 lat; pid_t pid; char comm[16]' >> synthetic_events + +Next, the sched_waking trigger from previous examples:: + + # echo 'hist:keys=pid:ts0=common_timestamp.usecs' >> events/sched/sched_waking/trigger + +Finally, as in the previous test example, we calculate and assign the +wakeup latency using the $ts0 reference from the sched_waking trigger +to the wakeup_lat variable, and finally use it along with a couple +sched_switch event fields, next_pid and next_comm, to generate a +wakeup_latency trace event. The next_pid and next_comm event fields +are automatically converted into field variables for this purpose:: + + # echo 'hist:keys=next_pid:wakeup_lat=common_timestamp.usecs-$ts0:onmatch(sched.sched_waking).wakeup_latency($wakeup_lat,next_pid,next_comm)' >> /sys/kernel/debug/tracing/events/sched/sched_switch/trigger + +The sched_waking hist_debug output shows the same data as in the +previous test example:: + + # cat events/sched/sched_waking/hist_debug + + # event histogram + # + # trigger info: hist:keys=pid:vals=hitcount:ts0=common_timestamp.usecs:sort=hitcount:size=2048:clock=global [active] + # + + hist_data: 00000000d60ff61f + + n_vals: 2 + n_keys: 1 + n_fields: 3 + + val fields: + + hist_data->fields[0]: + flags: + VAL: HIST_FIELD_FL_HITCOUNT + type: u64 + size: 8 + is_signed: 0 + + hist_data->fields[1]: + flags: + HIST_FIELD_FL_VAR + var.name: ts0 + var.idx (into tracing_map_elt.vars[]): 0 + type: u64 + size: 8 + is_signed: 0 + + key fields: + + hist_data->fields[2]: + flags: + HIST_FIELD_FL_KEY + ftrace_event_field name: pid + type: pid_t + size: 8 + is_signed: 1 + +The sched_switch hist_debug output shows the same key and value fields +as in the previous test example - note that wakeup_lat is still in the +val fields section, but that the new field variables are not there - +although the field variables are variables, they're held separately in +the hist_data's field_vars[] array. Although the field variables and +the normal variables are located in separate places, you can see that +the actual variable locations for those variables in the +tracing_map_elt.vars[] do have increasing indices as expected: +wakeup_lat takes the var.idx = 0 slot, while the field variables for +next_pid and next_comm have values var.idx = 1, and var.idx = 2. Note +also that those are the same values displayed for the variable +references corresponding to those variables in the variable reference +fields section. Since there are two triggers and thus two hist_data +addresses, those addresses also need to be accounted for when doing +the matching - you can see that the first variable refers to the 0 +var.idx on the previous hist trigger (see the hist_data address +associated with that trigger), while the second variable refers to the +0 var.idx on the sched_switch hist trigger, as do all the remaining +variable references. + +Finally, the action tracking variables section just shows the system +and event name for the onmatch() handler:: + + # cat events/sched/sched_switch/hist_debug + + # event histogram + # + # trigger info: hist:keys=next_pid:vals=hitcount:wakeup_lat=common_timestamp.usecs-$ts0:sort=hitcount:size=2048:clock=global:onmatch(sched.sched_waking).wakeup_latency($wakeup_lat,next_pid,next_comm) [active] + # + + hist_data: 0000000008f551b7 + + n_vals: 2 + n_keys: 1 + n_fields: 3 + + val fields: + + hist_data->fields[0]: + flags: + VAL: HIST_FIELD_FL_HITCOUNT + type: u64 + size: 8 + is_signed: 0 + + hist_data->fields[1]: + flags: + HIST_FIELD_FL_VAR + var.name: wakeup_lat + var.idx (into tracing_map_elt.vars[]): 0 + type: u64 + size: 0 + is_signed: 0 + + key fields: + + hist_data->fields[2]: + flags: + HIST_FIELD_FL_KEY + ftrace_event_field name: next_pid + type: pid_t + size: 8 + is_signed: 1 + + variable reference fields: + + hist_data->var_refs[0]: + flags: + HIST_FIELD_FL_VAR_REF + name: ts0 + var.idx (into tracing_map_elt.vars[]): 0 + var.hist_data: 00000000d60ff61f + var_ref_idx (into hist_data->var_refs[]): 0 + type: u64 + size: 8 + is_signed: 0 + + hist_data->var_refs[1]: + flags: + HIST_FIELD_FL_VAR_REF + name: wakeup_lat + var.idx (into tracing_map_elt.vars[]): 0 + var.hist_data: 0000000008f551b7 + var_ref_idx (into hist_data->var_refs[]): 1 + type: u64 + size: 0 + is_signed: 0 + + hist_data->var_refs[2]: + flags: + HIST_FIELD_FL_VAR_REF + name: next_pid + var.idx (into tracing_map_elt.vars[]): 1 + var.hist_data: 0000000008f551b7 + var_ref_idx (into hist_data->var_refs[]): 2 + type: pid_t + size: 4 + is_signed: 0 + + hist_data->var_refs[3]: + flags: + HIST_FIELD_FL_VAR_REF + name: next_comm + var.idx (into tracing_map_elt.vars[]): 2 + var.hist_data: 0000000008f551b7 + var_ref_idx (into hist_data->var_refs[]): 3 + type: char[16] + size: 256 + is_signed: 0 + + field variables: + + hist_data->field_vars[0]: + + field_vars[0].var: + flags: + HIST_FIELD_FL_VAR + var.name: next_pid + var.idx (into tracing_map_elt.vars[]): 1 + + field_vars[0].val: + ftrace_event_field name: next_pid + type: pid_t + size: 4 + is_signed: 1 + + hist_data->field_vars[1]: + + field_vars[1].var: + flags: + HIST_FIELD_FL_VAR + var.name: next_comm + var.idx (into tracing_map_elt.vars[]): 2 + + field_vars[1].val: + ftrace_event_field name: next_comm + type: char[16] + size: 256 + is_signed: 0 + + action tracking variables (for onmax()/onchange()/onmatch()): + + hist_data->actions[0].match_data.event_system: sched + hist_data->actions[0].match_data.event: sched_waking + +The commands below can be used to clean things up for the next test:: + + # echo '!hist:keys=next_pid:wakeup_lat=common_timestamp.usecs-$ts0:onmatch(sched.sched_waking).wakeup_latency($wakeup_lat,next_pid,next_comm)' >> /sys/kernel/debug/tracing/events/sched/sched_switch/trigger + + # echo '!hist:keys=pid:ts0=common_timestamp.usecs' >> events/sched/sched_waking/trigger + + # echo '!wakeup_latency u64 lat; pid_t pid; char comm[16]' >> synthetic_events + +action_data and the trace() action +---------------------------------- + +As mentioned above, when the trace() action generates a synthetic +event, all the parameters to the synthetic event either already are +variables or are converted into variables (via field variables), and +finally all those variable values are collected via references to them +into a var_ref_vals[] array. + +The values in the var_ref_vals[] array, however, don't necessarily +follow the same ordering as the synthetic event params. To address +that, struct action_data contains another array, var_ref_idx[] that +maps the trace action params to the var_ref_vals[] values. Below is a +diagram illustrating that for the wakeup_latency() synthetic event:: + + +------------------+ wakeup_latency() + | action_data | event params var_ref_vals[] + +------------------+ +-----------------+ +-----------------+ + | .var_ref_idx[] |--->| $wakeup_lat idx |---+ | | + +----------------+ +-----------------+ | +-----------------+ + | .synth_event | | $next_pid idx |---|-+ | $wakeup_lat val | + +----------------+ +-----------------+ | | +-----------------+ + . | +->| $next_pid val | + . | +-----------------+ + . | . + +-----------------+ | . + | | | . + +-----------------+ | +-----------------+ + +--->| $wakeup_lat val | + +-----------------+ + +Basically, how this ends up getting used in the synthetic event probe +function, trace_event_raw_event_synth(), is as follows:: + + for each field i in .synth_event + val_idx = .var_ref_idx[i] + val = var_ref_vals[val_idx] + +action_data and the onXXX() handlers +------------------------------------ + +The hist trigger onXXX() actions other than onmatch(), such as onmax() +and onchange(), also make use of and internally create hidden +variables. This information is contained in the +action_data.track_data struct, and is also visible in the hist_debug +output as will be described in the example below. + +Typically, the onmax() or onchange() handlers are used in conjunction +with the save() and snapshot() actions. For example:: + + # echo 'hist:keys=next_pid:wakeup_lat=common_timestamp.usecs-$ts0: \ + onmax($wakeup_lat).save(next_comm,prev_pid,prev_prio,prev_comm)' >> + /sys/kernel/debug/tracing/events/sched/sched_switch/trigger + +or:: + + # echo 'hist:keys=next_pid:wakeup_lat=common_timestamp.usecs-$ts0: \ + onmax($wakeup_lat).snapshot()' >> + /sys/kernel/debug/tracing/events/sched/sched_switch/trigger + +save() action field variable test +--------------------------------- + +For this example, instead of generating a synthetic event, the save() +action is used to save field values whenever an onmax() handler +detects that a new max latency has been hit. As in the previous +example, the values being saved are also field values, but in this +case, are kept in a separate hist_data array named save_vars[]. + +As in previous test examples, we set up the sched_waking trigger:: + + # echo 'hist:keys=pid:ts0=common_timestamp.usecs' >> events/sched/sched_waking/trigger + +In this case, however, we set up the sched_switch trigger to save some +sched_switch field values whenever we hit a new maximum latency. For +both the onmax() handler and save() action, variables will be created, +which we can use the hist_debug files to examine:: + + # echo 'hist:keys=next_pid:wakeup_lat=common_timestamp.usecs-$ts0:onmax($wakeup_lat).save(next_comm,prev_pid,prev_prio,prev_comm)' >> events/sched/sched_switch/trigger + +The sched_waking hist_debug output shows the same data as in the +previous test examples:: + + # cat events/sched/sched_waking/hist_debug + + # + # trigger info: hist:keys=pid:vals=hitcount:ts0=common_timestamp.usecs:sort=hitcount:size=2048:clock=global [active] + # + + hist_data: 00000000e6290f48 + + n_vals: 2 + n_keys: 1 + n_fields: 3 + + val fields: + + hist_data->fields[0]: + flags: + VAL: HIST_FIELD_FL_HITCOUNT + type: u64 + size: 8 + is_signed: 0 + + hist_data->fields[1]: + flags: + HIST_FIELD_FL_VAR + var.name: ts0 + var.idx (into tracing_map_elt.vars[]): 0 + type: u64 + size: 8 + is_signed: 0 + + key fields: + + hist_data->fields[2]: + flags: + HIST_FIELD_FL_KEY + ftrace_event_field name: pid + type: pid_t + size: 8 + is_signed: 1 + +The output of the sched_switch trigger shows the same val and key +values as before, but also shows a couple new sections. + +First, the action tracking variables section now shows the +actions[].track_data information describing the special tracking +variables and references used to track, in this case, the running +maximum value. The actions[].track_data.var_ref member contains the +reference to the variable being tracked, in this case the $wakeup_lat +variable. In order to perform the onmax() handler function, there +also needs to be a variable that tracks the current maximum by getting +updated whenever a new maximum is hit. In this case, we can see that +an autogenerated veriable named ' __max' has been created and is +visible in the actions[].track_data.track_var variable. + +Finally, in the new 'save action variables' section, we can see that +the 4 params to the save() function have resulted in 4 field variables +being created for the purposes of saving the values of the named +fields when the max is hit. These variables are kept in a separate +save_vars[] array off of hist_data, so are displayed in a separate +section:: + + # cat events/sched/sched_switch/hist_debug + + # event histogram + # + # trigger info: hist:keys=next_pid:vals=hitcount:wakeup_lat=common_timestamp.usecs-$ts0:sort=hitcount:size=2048:clock=global:onmax($wakeup_lat).save(next_comm,prev_pid,prev_prio,prev_comm) [active] + # + + hist_data: 0000000057bcd28d + + n_vals: 2 + n_keys: 1 + n_fields: 3 + + val fields: + + hist_data->fields[0]: + flags: + VAL: HIST_FIELD_FL_HITCOUNT + type: u64 + size: 8 + is_signed: 0 + + hist_data->fields[1]: + flags: + HIST_FIELD_FL_VAR + var.name: wakeup_lat + var.idx (into tracing_map_elt.vars[]): 0 + type: u64 + size: 0 + is_signed: 0 + + key fields: + + hist_data->fields[2]: + flags: + HIST_FIELD_FL_KEY + ftrace_event_field name: next_pid + type: pid_t + size: 8 + is_signed: 1 + + variable reference fields: + + hist_data->var_refs[0]: + flags: + HIST_FIELD_FL_VAR_REF + name: ts0 + var.idx (into tracing_map_elt.vars[]): 0 + var.hist_data: 00000000e6290f48 + var_ref_idx (into hist_data->var_refs[]): 0 + type: u64 + size: 8 + is_signed: 0 + + hist_data->var_refs[1]: + flags: + HIST_FIELD_FL_VAR_REF + name: wakeup_lat + var.idx (into tracing_map_elt.vars[]): 0 + var.hist_data: 0000000057bcd28d + var_ref_idx (into hist_data->var_refs[]): 1 + type: u64 + size: 0 + is_signed: 0 + + action tracking variables (for onmax()/onchange()/onmatch()): + + hist_data->actions[0].track_data.var_ref: + flags: + HIST_FIELD_FL_VAR_REF + name: wakeup_lat + var.idx (into tracing_map_elt.vars[]): 0 + var.hist_data: 0000000057bcd28d + var_ref_idx (into hist_data->var_refs[]): 1 + type: u64 + size: 0 + is_signed: 0 + + hist_data->actions[0].track_data.track_var: + flags: + HIST_FIELD_FL_VAR + var.name: __max + var.idx (into tracing_map_elt.vars[]): 1 + type: u64 + size: 8 + is_signed: 0 + + save action variables (save() params): + + hist_data->save_vars[0]: + + save_vars[0].var: + flags: + HIST_FIELD_FL_VAR + var.name: next_comm + var.idx (into tracing_map_elt.vars[]): 2 + + save_vars[0].val: + ftrace_event_field name: next_comm + type: char[16] + size: 256 + is_signed: 0 + + hist_data->save_vars[1]: + + save_vars[1].var: + flags: + HIST_FIELD_FL_VAR + var.name: prev_pid + var.idx (into tracing_map_elt.vars[]): 3 + + save_vars[1].val: + ftrace_event_field name: prev_pid + type: pid_t + size: 4 + is_signed: 1 + + hist_data->save_vars[2]: + + save_vars[2].var: + flags: + HIST_FIELD_FL_VAR + var.name: prev_prio + var.idx (into tracing_map_elt.vars[]): 4 + + save_vars[2].val: + ftrace_event_field name: prev_prio + type: int + size: 4 + is_signed: 1 + + hist_data->save_vars[3]: + + save_vars[3].var: + flags: + HIST_FIELD_FL_VAR + var.name: prev_comm + var.idx (into tracing_map_elt.vars[]): 5 + + save_vars[3].val: + ftrace_event_field name: prev_comm + type: char[16] + size: 256 + is_signed: 0 + +The commands below can be used to clean things up for the next test:: + + # echo '!hist:keys=next_pid:wakeup_lat=common_timestamp.usecs-$ts0:onmax($wakeup_lat).save(next_comm,prev_pid,prev_prio,prev_comm)' >> events/sched/sched_switch/trigger + + # echo '!hist:keys=pid:ts0=common_timestamp.usecs' >> events/sched/sched_waking/trigger + +A couple special cases +====================== + +While the above covers the basics of the histogram internals, there +are a couple of special cases that should be discussed, since they +tend to creae even more confusion. Those are field variables on other +histograms, and aliases, both described below through example tests +using the hist_debug files. + +Test of field variables on other histograms +------------------------------------------- + +This example is similar to the previous examples, but in this case, +the sched_switch trigger references a hist trigger field on another +event, namely the sched_waking event. In order to accomplish this, a +field variable is created for the other event, but since an existing +histogram can't be used, as existing histograms are immutable, a new +histogram with a matching variable is created and used, and we'll see +that reflected in the hist_debug output shown below. + +First, we create the wakeup_latency synthetic event. Note the +addition of the prio field:: + + # echo 'wakeup_latency u64 lat; pid_t pid; int prio' >> synthetic_events + +As in previous test examples, we set up the sched_waking trigger:: + + # echo 'hist:keys=pid:ts0=common_timestamp.usecs' >> events/sched/sched_waking/trigger + +Here we set up a hist trigger on sched_switch to send a wakeup_latency +event using an onmatch handler naming the sched_waking event. Note +that the third param being passed to the wakeup_latency() is prio, +which is a field name that needs to have a field variable created for +it. There isn't however any prio field on the sched_switch event so +it would seem that it wouldn't be possible to create a field variable +for it. The matching sched_waking event does have a prio field, so it +should be possible to make use of it for this purpose. The problem +with that is that it's not currently possible to define a new variable +on an existing histogram, so it's not possible to add a new prio field +variable to the existing sched_waking histogram. It is however +possible to create an additional new 'matching' sched_waking histogram +for the same event, meaning that it uses the same key and filters, and +define the new prio field variable on that. + +Here's the sched_switch trigger:: + + # echo 'hist:keys=next_pid:wakeup_lat=common_timestamp.usecs-$ts0:onmatch(sched.sched_waking).wakeup_latency($wakeup_lat,next_pid,prio)' >> events/sched/sched_switch/trigger + +And here's the output of the hist_debug information for the +sched_waking hist trigger. Note that there are two histograms +displayed in the output: the first is the normal sched_waking +histogram we've seen in the previous examples, and the second is the +special histogram we created to provide the prio field variable. + +Looking at the second histogram below, we see a variable with the name +synthetic_prio. This is the field variable created for the prio field +on that sched_waking histogram:: + + # cat events/sched/sched_waking/hist_debug + + # event histogram + # + # trigger info: hist:keys=pid:vals=hitcount:ts0=common_timestamp.usecs:sort=hitcount:size=2048:clock=global [active] + # + + hist_data: 00000000349570e4 + + n_vals: 2 + n_keys: 1 + n_fields: 3 + + val fields: + + hist_data->fields[0]: + flags: + VAL: HIST_FIELD_FL_HITCOUNT + type: u64 + size: 8 + is_signed: 0 + + hist_data->fields[1]: + flags: + HIST_FIELD_FL_VAR + var.name: ts0 + var.idx (into tracing_map_elt.vars[]): 0 + type: u64 + size: 8 + is_signed: 0 + + key fields: + + hist_data->fields[2]: + flags: + HIST_FIELD_FL_KEY + ftrace_event_field name: pid + type: pid_t + size: 8 + is_signed: 1 + + + # event histogram + # + # trigger info: hist:keys=pid:vals=hitcount:synthetic_prio=prio:sort=hitcount:size=2048 [active] + # + + hist_data: 000000006920cf38 + + n_vals: 2 + n_keys: 1 + n_fields: 3 + + val fields: + + hist_data->fields[0]: + flags: + VAL: HIST_FIELD_FL_HITCOUNT + type: u64 + size: 8 + is_signed: 0 + + hist_data->fields[1]: + flags: + HIST_FIELD_FL_VAR + ftrace_event_field name: prio + var.name: synthetic_prio + var.idx (into tracing_map_elt.vars[]): 0 + type: int + size: 4 + is_signed: 1 + + key fields: + + hist_data->fields[2]: + flags: + HIST_FIELD_FL_KEY + ftrace_event_field name: pid + type: pid_t + size: 8 + is_signed: 1 + +Looking at the sched_switch histogram below, we can see a reference to +the synthetic_prio variable on sched_waking, and looking at the +associated hist_data address we see that it is indeed associated with +the new histogram. Note also that the other references are to a +normal variable, wakeup_lat, and to a normal field variable, next_pid, +the details of which are in the field variables section:: + + # cat events/sched/sched_switch/hist_debug + + # event histogram + # + # trigger info: hist:keys=next_pid:vals=hitcount:wakeup_lat=common_timestamp.usecs-$ts0:sort=hitcount:size=2048:clock=global:onmatch(sched.sched_waking).wakeup_latency($wakeup_lat,next_pid,prio) [active] + # + + hist_data: 00000000a73b67df + + n_vals: 2 + n_keys: 1 + n_fields: 3 + + val fields: + + hist_data->fields[0]: + flags: + VAL: HIST_FIELD_FL_HITCOUNT + type: u64 + size: 8 + is_signed: 0 + + hist_data->fields[1]: + flags: + HIST_FIELD_FL_VAR + var.name: wakeup_lat + var.idx (into tracing_map_elt.vars[]): 0 + type: u64 + size: 0 + is_signed: 0 + + key fields: + + hist_data->fields[2]: + flags: + HIST_FIELD_FL_KEY + ftrace_event_field name: next_pid + type: pid_t + size: 8 + is_signed: 1 + + variable reference fields: + + hist_data->var_refs[0]: + flags: + HIST_FIELD_FL_VAR_REF + name: ts0 + var.idx (into tracing_map_elt.vars[]): 0 + var.hist_data: 00000000349570e4 + var_ref_idx (into hist_data->var_refs[]): 0 + type: u64 + size: 8 + is_signed: 0 + + hist_data->var_refs[1]: + flags: + HIST_FIELD_FL_VAR_REF + name: wakeup_lat + var.idx (into tracing_map_elt.vars[]): 0 + var.hist_data: 00000000a73b67df + var_ref_idx (into hist_data->var_refs[]): 1 + type: u64 + size: 0 + is_signed: 0 + + hist_data->var_refs[2]: + flags: + HIST_FIELD_FL_VAR_REF + name: next_pid + var.idx (into tracing_map_elt.vars[]): 1 + var.hist_data: 00000000a73b67df + var_ref_idx (into hist_data->var_refs[]): 2 + type: pid_t + size: 4 + is_signed: 0 + + hist_data->var_refs[3]: + flags: + HIST_FIELD_FL_VAR_REF + name: synthetic_prio + var.idx (into tracing_map_elt.vars[]): 0 + var.hist_data: 000000006920cf38 + var_ref_idx (into hist_data->var_refs[]): 3 + type: int + size: 4 + is_signed: 1 + + field variables: + + hist_data->field_vars[0]: + + field_vars[0].var: + flags: + HIST_FIELD_FL_VAR + var.name: next_pid + var.idx (into tracing_map_elt.vars[]): 1 + + field_vars[0].val: + ftrace_event_field name: next_pid + type: pid_t + size: 4 + is_signed: 1 + + action tracking variables (for onmax()/onchange()/onmatch()): + + hist_data->actions[0].match_data.event_system: sched + hist_data->actions[0].match_data.event: sched_waking + +The commands below can be used to clean things up for the next test:: + + # echo '!hist:keys=next_pid:wakeup_lat=common_timestamp.usecs-$ts0:onmatch(sched.sched_waking).wakeup_latency($wakeup_lat,next_pid,prio)' >> events/sched/sched_switch/trigger + + # echo '!hist:keys=pid:ts0=common_timestamp.usecs' >> events/sched/sched_waking/trigger + + # echo '!wakeup_latency u64 lat; pid_t pid; int prio' >> synthetic_events + +Alias test +---------- + +This example is very similar to previous examples, but demonstrates +the alias flag. + +First, we create the wakeup_latency synthetic event:: + + # echo 'wakeup_latency u64 lat; pid_t pid; char comm[16]' >> synthetic_events + +Next, we create a sched_waking trigger similar to previous examples, +but in this case we save the pid in the waking_pid variable:: + + # echo 'hist:keys=pid:waking_pid=pid:ts0=common_timestamp.usecs' >> events/sched/sched_waking/trigger + +For the sched_switch trigger, instead of using $waking_pid directly in +the wakeup_latency synthetic event invocation, we create an alias of +$waking_pid named $woken_pid, and use that in the synthetic event +invocation instead:: + + # echo 'hist:keys=next_pid:woken_pid=$waking_pid:wakeup_lat=common_timestamp.usecs-$ts0:onmatch(sched.sched_waking).wakeup_latency($wakeup_lat,$woken_pid,next_comm)' >> events/sched/sched_switch/trigger + +Looking at the sched_waking hist_debug output, in addition to the +normal fields, we can see the waking_pid variable:: + + # cat events/sched/sched_waking/hist_debug + + # event histogram + # + # trigger info: hist:keys=pid:vals=hitcount:waking_pid=pid,ts0=common_timestamp.usecs:sort=hitcount:size=2048:clock=global [active] + # + + hist_data: 00000000a250528c + + n_vals: 3 + n_keys: 1 + n_fields: 4 + + val fields: + + hist_data->fields[0]: + flags: + VAL: HIST_FIELD_FL_HITCOUNT + type: u64 + size: 8 + is_signed: 0 + + hist_data->fields[1]: + flags: + HIST_FIELD_FL_VAR + ftrace_event_field name: pid + var.name: waking_pid + var.idx (into tracing_map_elt.vars[]): 0 + type: pid_t + size: 4 + is_signed: 1 + + hist_data->fields[2]: + flags: + HIST_FIELD_FL_VAR + var.name: ts0 + var.idx (into tracing_map_elt.vars[]): 1 + type: u64 + size: 8 + is_signed: 0 + + key fields: + + hist_data->fields[3]: + flags: + HIST_FIELD_FL_KEY + ftrace_event_field name: pid + type: pid_t + size: 8 + is_signed: 1 + +The sched_switch hist_debug output shows that a variable named +woken_pid has been created but that it also has the +HIST_FIELD_FL_ALIAS flag set. It also has the HIST_FIELD_FL_VAR flag +set, which is why it appears in the val field section. + +Despite that implementation detail, an alias variable is actually more +like a variable reference; in fact it can be thought of as a reference +to a reference. The implementation copies the var_ref->fn() from the +variable reference being referenced, in this case, the waking_pid +fn(), which is hist_field_var_ref() and makes that the fn() of the +alias. The hist_field_var_ref() fn() requires the var_ref_idx of the +variable reference it's using, so waking_pid's var_ref_idx is also +copied to the alias. The end result is that when the value of alias +is retrieved, in the end it just does the same thing the original +reference would have done and retrieves the same value from the +var_ref_vals[] array. You can verify this in the output by noting +that the var_ref_idx of the alias, in this case woken_pid, is the same +as the var_ref_idx of the reference, waking_pid, in the variable +reference fields section. + +Additionally, once it gets that value, since it is also a variable, it +then saves that value into its var.idx. So the var.idx of the +woken_pid alias is 0, which it fills with the value from var_ref_idx 0 +when its fn() is called to update itself. You'll also notice that +there's a woken_pid var_ref in the variable refs section. That is the +reference to the woken_pid alias variable, and you can see that it +retrieves the value from the same var.idx as the woken_pid alias, 0, +and then in turn saves that value in its own var_ref_idx slot, 3, and +the value at this position is finally what gets assigned to the +$woken_pid slot in the trace event invocation:: + + # cat events/sched/sched_switch/hist_debug + + # event histogram + # + # trigger info: hist:keys=next_pid:vals=hitcount:woken_pid=$waking_pid,wakeup_lat=common_timestamp.usecs-$ts0:sort=hitcount:size=2048:clock=global:onmatch(sched.sched_waking).wakeup_latency($wakeup_lat,$woken_pid,next_comm) [active] + # + + hist_data: 0000000055d65ed0 + + n_vals: 3 + n_keys: 1 + n_fields: 4 + + val fields: + + hist_data->fields[0]: + flags: + VAL: HIST_FIELD_FL_HITCOUNT + type: u64 + size: 8 + is_signed: 0 + + hist_data->fields[1]: + flags: + HIST_FIELD_FL_VAR + HIST_FIELD_FL_ALIAS + var.name: woken_pid + var.idx (into tracing_map_elt.vars[]): 0 + var_ref_idx (into hist_data->var_refs[]): 0 + type: pid_t + size: 4 + is_signed: 1 + + hist_data->fields[2]: + flags: + HIST_FIELD_FL_VAR + var.name: wakeup_lat + var.idx (into tracing_map_elt.vars[]): 1 + type: u64 + size: 0 + is_signed: 0 + + key fields: + + hist_data->fields[3]: + flags: + HIST_FIELD_FL_KEY + ftrace_event_field name: next_pid + type: pid_t + size: 8 + is_signed: 1 + + variable reference fields: + + hist_data->var_refs[0]: + flags: + HIST_FIELD_FL_VAR_REF + name: waking_pid + var.idx (into tracing_map_elt.vars[]): 0 + var.hist_data: 00000000a250528c + var_ref_idx (into hist_data->var_refs[]): 0 + type: pid_t + size: 4 + is_signed: 1 + + hist_data->var_refs[1]: + flags: + HIST_FIELD_FL_VAR_REF + name: ts0 + var.idx (into tracing_map_elt.vars[]): 1 + var.hist_data: 00000000a250528c + var_ref_idx (into hist_data->var_refs[]): 1 + type: u64 + size: 8 + is_signed: 0 + + hist_data->var_refs[2]: + flags: + HIST_FIELD_FL_VAR_REF + name: wakeup_lat + var.idx (into tracing_map_elt.vars[]): 1 + var.hist_data: 0000000055d65ed0 + var_ref_idx (into hist_data->var_refs[]): 2 + type: u64 + size: 0 + is_signed: 0 + + hist_data->var_refs[3]: + flags: + HIST_FIELD_FL_VAR_REF + name: woken_pid + var.idx (into tracing_map_elt.vars[]): 0 + var.hist_data: 0000000055d65ed0 + var_ref_idx (into hist_data->var_refs[]): 3 + type: pid_t + size: 4 + is_signed: 1 + + hist_data->var_refs[4]: + flags: + HIST_FIELD_FL_VAR_REF + name: next_comm + var.idx (into tracing_map_elt.vars[]): 2 + var.hist_data: 0000000055d65ed0 + var_ref_idx (into hist_data->var_refs[]): 4 + type: char[16] + size: 256 + is_signed: 0 + + field variables: + + hist_data->field_vars[0]: + + field_vars[0].var: + flags: + HIST_FIELD_FL_VAR + var.name: next_comm + var.idx (into tracing_map_elt.vars[]): 2 + + field_vars[0].val: + ftrace_event_field name: next_comm + type: char[16] + size: 256 + is_signed: 0 + + action tracking variables (for onmax()/onchange()/onmatch()): + + hist_data->actions[0].match_data.event_system: sched + hist_data->actions[0].match_data.event: sched_waking + +The commands below can be used to clean things up for the next test:: + + # echo '!hist:keys=next_pid:woken_pid=$waking_pid:wakeup_lat=common_timestamp.usecs-$ts0:onmatch(sched.sched_waking).wakeup_latency($wakeup_lat,$woken_pid,next_comm)' >> events/sched/sched_switch/trigger + + # echo '!hist:keys=pid:ts0=common_timestamp.usecs' >> events/sched/sched_waking/trigger + + # echo '!wakeup_latency u64 lat; pid_t pid; char comm[16]' >> synthetic_events -- GitLab From 2d19bd79ae6509858582a9cade739c2e9a4fdca8 Mon Sep 17 00:00:00 2001 From: Tom Zanussi Date: Fri, 3 Apr 2020 14:31:21 -0500 Subject: [PATCH 0985/1971] tracing: Add hist_debug trace event files for histogram debugging Add a new "hist_debug" file for each trace event, which when read will dump out a bunch of internal details about the hist triggers defined on that event. This is normally off but can be enabled by saying 'y' to the new CONFIG_HIST_TRIGGERS_DEBUG config option. This is in support of the new Documentation file describing histogram internals, Documentation/trace/histogram-design.rst, which was requested by developers trying to understand the internals when extending or making use of the hist triggers for higher-level tools. The histogram-design.rst documentation refers to the hist_debug files and demonstrates their use with output in the test examples. Link: http://lkml.kernel.org/r/77914c22b0ba493d9783c53bbfbc6087d6a7e1b1.1585941485.git.zanussi@kernel.org Signed-off-by: Tom Zanussi Signed-off-by: Steven Rostedt (VMware) --- kernel/trace/Kconfig | 23 +++ kernel/trace/trace.h | 1 + kernel/trace/trace_events.c | 4 + kernel/trace/trace_events_hist.c | 273 +++++++++++++++++++++++++++++++ 4 files changed, 301 insertions(+) diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig index 743647005f64e..2aec9376825f3 100644 --- a/kernel/trace/Kconfig +++ b/kernel/trace/Kconfig @@ -847,6 +847,29 @@ config KPROBE_EVENT_GEN_TEST If unsure, say N. +config HIST_TRIGGERS_DEBUG + bool "Hist trigger debug support" + depends on HIST_TRIGGERS + help + Add "hist_debug" file for each event, which when read will + dump out a bunch of internal details about the hist triggers + defined on that event. + + The hist_debug file serves a couple of purposes: + + - Helps developers verify that nothing is broken. + + - Provides educational information to support the details + of the hist trigger internals as described by + Documentation/trace/histogram-design.rst. + + The hist_debug output only covers the data structures + related to the histogram definitions themselves and doesn't + display the internals of map buckets or variable values of + running histograms. + + If unsure, say N. + endif # FTRACE endif # TRACING_SUPPORT diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h index 4eb1d004d5f2b..def769df5bf12 100644 --- a/kernel/trace/trace.h +++ b/kernel/trace/trace.h @@ -1661,6 +1661,7 @@ extern struct list_head ftrace_events; extern const struct file_operations event_trigger_fops; extern const struct file_operations event_hist_fops; +extern const struct file_operations event_hist_debug_fops; extern const struct file_operations event_inject_fops; #ifdef CONFIG_HIST_TRIGGERS diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c index 242f59e7f17d5..f6f55682d3e2d 100644 --- a/kernel/trace/trace_events.c +++ b/kernel/trace/trace_events.c @@ -2208,6 +2208,10 @@ event_create_dir(struct dentry *parent, struct trace_event_file *file) #ifdef CONFIG_HIST_TRIGGERS trace_create_file("hist", 0444, file->dir, file, &event_hist_fops); +#endif +#ifdef CONFIG_HIST_TRIGGERS_DEBUG + trace_create_file("hist_debug", 0444, file->dir, file, + &event_hist_debug_fops); #endif trace_create_file("format", 0444, file->dir, call, &ftrace_event_format_fops); diff --git a/kernel/trace/trace_events_hist.c b/kernel/trace/trace_events_hist.c index 5cfe8f998b3ef..313227da49255 100644 --- a/kernel/trace/trace_events_hist.c +++ b/kernel/trace/trace_events_hist.c @@ -6478,6 +6478,279 @@ const struct file_operations event_hist_fops = { .release = single_release, }; +#ifdef CONFIG_HIST_TRIGGERS_DEBUG +static void hist_field_debug_show_flags(struct seq_file *m, + unsigned long flags) +{ + seq_puts(m, " flags:\n"); + + if (flags & HIST_FIELD_FL_KEY) + seq_puts(m, " HIST_FIELD_FL_KEY\n"); + else if (flags & HIST_FIELD_FL_HITCOUNT) + seq_puts(m, " VAL: HIST_FIELD_FL_HITCOUNT\n"); + else if (flags & HIST_FIELD_FL_VAR) + seq_puts(m, " HIST_FIELD_FL_VAR\n"); + else if (flags & HIST_FIELD_FL_VAR_REF) + seq_puts(m, " HIST_FIELD_FL_VAR_REF\n"); + else + seq_puts(m, " VAL: normal u64 value\n"); + + if (flags & HIST_FIELD_FL_ALIAS) + seq_puts(m, " HIST_FIELD_FL_ALIAS\n"); +} + +static int hist_field_debug_show(struct seq_file *m, + struct hist_field *field, unsigned long flags) +{ + if ((field->flags & flags) != flags) { + seq_printf(m, "ERROR: bad flags - %lx\n", flags); + return -EINVAL; + } + + hist_field_debug_show_flags(m, field->flags); + if (field->field) + seq_printf(m, " ftrace_event_field name: %s\n", + field->field->name); + + if (field->flags & HIST_FIELD_FL_VAR) { + seq_printf(m, " var.name: %s\n", field->var.name); + seq_printf(m, " var.idx (into tracing_map_elt.vars[]): %u\n", + field->var.idx); + } + + if (field->flags & HIST_FIELD_FL_ALIAS) + seq_printf(m, " var_ref_idx (into hist_data->var_refs[]): %u\n", + field->var_ref_idx); + + if (field->flags & HIST_FIELD_FL_VAR_REF) { + seq_printf(m, " name: %s\n", field->name); + seq_printf(m, " var.idx (into tracing_map_elt.vars[]): %u\n", + field->var.idx); + seq_printf(m, " var.hist_data: %p\n", field->var.hist_data); + seq_printf(m, " var_ref_idx (into hist_data->var_refs[]): %u\n", + field->var_ref_idx); + if (field->system) + seq_printf(m, " system: %s\n", field->system); + if (field->event_name) + seq_printf(m, " event_name: %s\n", field->event_name); + } + + seq_printf(m, " type: %s\n", field->type); + seq_printf(m, " size: %u\n", field->size); + seq_printf(m, " is_signed: %u\n", field->is_signed); + + return 0; +} + +static int field_var_debug_show(struct seq_file *m, + struct field_var *field_var, unsigned int i, + bool save_vars) +{ + const char *vars_name = save_vars ? "save_vars" : "field_vars"; + struct hist_field *field; + int ret = 0; + + seq_printf(m, "\n hist_data->%s[%d]:\n", vars_name, i); + + field = field_var->var; + + seq_printf(m, "\n %s[%d].var:\n", vars_name, i); + + hist_field_debug_show_flags(m, field->flags); + seq_printf(m, " var.name: %s\n", field->var.name); + seq_printf(m, " var.idx (into tracing_map_elt.vars[]): %u\n", + field->var.idx); + + field = field_var->val; + + seq_printf(m, "\n %s[%d].val:\n", vars_name, i); + if (field->field) + seq_printf(m, " ftrace_event_field name: %s\n", + field->field->name); + else { + ret = -EINVAL; + goto out; + } + + seq_printf(m, " type: %s\n", field->type); + seq_printf(m, " size: %u\n", field->size); + seq_printf(m, " is_signed: %u\n", field->is_signed); +out: + return ret; +} + +static int hist_action_debug_show(struct seq_file *m, + struct action_data *data, int i) +{ + int ret = 0; + + if (data->handler == HANDLER_ONMAX || + data->handler == HANDLER_ONCHANGE) { + seq_printf(m, "\n hist_data->actions[%d].track_data.var_ref:\n", i); + ret = hist_field_debug_show(m, data->track_data.var_ref, + HIST_FIELD_FL_VAR_REF); + if (ret) + goto out; + + seq_printf(m, "\n hist_data->actions[%d].track_data.track_var:\n", i); + ret = hist_field_debug_show(m, data->track_data.track_var, + HIST_FIELD_FL_VAR); + if (ret) + goto out; + } + + if (data->handler == HANDLER_ONMATCH) { + seq_printf(m, "\n hist_data->actions[%d].match_data.event_system: %s\n", + i, data->match_data.event_system); + seq_printf(m, " hist_data->actions[%d].match_data.event: %s\n", + i, data->match_data.event); + } +out: + return ret; +} + +static int hist_actions_debug_show(struct seq_file *m, + struct hist_trigger_data *hist_data) +{ + int i, ret = 0; + + if (hist_data->n_actions) + seq_puts(m, "\n action tracking variables (for onmax()/onchange()/onmatch()):\n"); + + for (i = 0; i < hist_data->n_actions; i++) { + struct action_data *action = hist_data->actions[i]; + + ret = hist_action_debug_show(m, action, i); + if (ret) + goto out; + } + + if (hist_data->n_save_vars) + seq_puts(m, "\n save action variables (save() params):\n"); + + for (i = 0; i < hist_data->n_save_vars; i++) { + ret = field_var_debug_show(m, hist_data->save_vars[i], i, true); + if (ret) + goto out; + } +out: + return ret; +} + +static void hist_trigger_debug_show(struct seq_file *m, + struct event_trigger_data *data, int n) +{ + struct hist_trigger_data *hist_data; + int i, ret; + + if (n > 0) + seq_puts(m, "\n\n"); + + seq_puts(m, "# event histogram\n#\n# trigger info: "); + data->ops->print(m, data->ops, data); + seq_puts(m, "#\n\n"); + + hist_data = data->private_data; + + seq_printf(m, "hist_data: %p\n\n", hist_data); + seq_printf(m, " n_vals: %u\n", hist_data->n_vals); + seq_printf(m, " n_keys: %u\n", hist_data->n_keys); + seq_printf(m, " n_fields: %u\n", hist_data->n_fields); + + seq_puts(m, "\n val fields:\n\n"); + + seq_puts(m, " hist_data->fields[0]:\n"); + ret = hist_field_debug_show(m, hist_data->fields[0], + HIST_FIELD_FL_HITCOUNT); + if (ret) + return; + + for (i = 1; i < hist_data->n_vals; i++) { + seq_printf(m, "\n hist_data->fields[%d]:\n", i); + ret = hist_field_debug_show(m, hist_data->fields[i], 0); + if (ret) + return; + } + + seq_puts(m, "\n key fields:\n"); + + for (i = hist_data->n_vals; i < hist_data->n_fields; i++) { + seq_printf(m, "\n hist_data->fields[%d]:\n", i); + ret = hist_field_debug_show(m, hist_data->fields[i], + HIST_FIELD_FL_KEY); + if (ret) + return; + } + + if (hist_data->n_var_refs) + seq_puts(m, "\n variable reference fields:\n"); + + for (i = 0; i < hist_data->n_var_refs; i++) { + seq_printf(m, "\n hist_data->var_refs[%d]:\n", i); + ret = hist_field_debug_show(m, hist_data->var_refs[i], + HIST_FIELD_FL_VAR_REF); + if (ret) + return; + } + + if (hist_data->n_field_vars) + seq_puts(m, "\n field variables:\n"); + + for (i = 0; i < hist_data->n_field_vars; i++) { + ret = field_var_debug_show(m, hist_data->field_vars[i], i, false); + if (ret) + return; + } + + ret = hist_actions_debug_show(m, hist_data); + if (ret) + return; +} + +static int hist_debug_show(struct seq_file *m, void *v) +{ + struct event_trigger_data *data; + struct trace_event_file *event_file; + int n = 0, ret = 0; + + mutex_lock(&event_mutex); + + event_file = event_file_data(m->private); + if (unlikely(!event_file)) { + ret = -ENODEV; + goto out_unlock; + } + + list_for_each_entry(data, &event_file->triggers, list) { + if (data->cmd_ops->trigger_type == ETT_EVENT_HIST) + hist_trigger_debug_show(m, data, n++); + } + + out_unlock: + mutex_unlock(&event_mutex); + + return ret; +} + +static int event_hist_debug_open(struct inode *inode, struct file *file) +{ + int ret; + + ret = security_locked_down(LOCKDOWN_TRACEFS); + if (ret) + return ret; + + return single_open(file, hist_debug_show, file); +} + +const struct file_operations event_hist_debug_fops = { + .open = event_hist_debug_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; +#endif + static void hist_field_print(struct seq_file *m, struct hist_field *hist_field) { const char *field_name = hist_field_name(hist_field, 0); -- GitLab From 0906844545a2cbe0fabcff1c78ab9c6b0d0c2ca0 Mon Sep 17 00:00:00 2001 From: "Steven Rostedt (VMware)" Date: Thu, 28 May 2020 19:27:08 -0400 Subject: [PATCH 0986/1971] tracing/doc: Fix typos in histogram-design.rst There's a few typos in the histogram-design.rst document that need need to be fixed. Cc: Tom Zanussi Acked-by: Randy Dunlap Signed-off-by: Steven Rostedt (VMware) --- Documentation/trace/histogram-design.rst | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/Documentation/trace/histogram-design.rst b/Documentation/trace/histogram-design.rst index c246753f0ffc2..06f5c7e5f2eed 100644 --- a/Documentation/trace/histogram-design.rst +++ b/Documentation/trace/histogram-design.rst @@ -700,7 +700,7 @@ variable, as well as the referenced variable's size, type, and is_signed values. The VAR_REF field's .name is set to the name of the variable it references. If a variable reference was created using the explicit system.event.$var_ref notation, the hist_field's system and -event_name variabls are also set. +event_name variables are also set. So, in order to handle an event for the sched_switch histogram, because we have a reference to a variable on another histogram, we @@ -1445,7 +1445,7 @@ reference to the variable being tracked, in this case the $wakeup_lat variable. In order to perform the onmax() handler function, there also needs to be a variable that tracks the current maximum by getting updated whenever a new maximum is hit. In this case, we can see that -an autogenerated veriable named ' __max' has been created and is +an auto-generated variable named ' __max' has been created and is visible in the actions[].track_data.track_var variable. Finally, in the new 'save action variables' section, we can see that @@ -1611,7 +1611,7 @@ A couple special cases While the above covers the basics of the histogram internals, there are a couple of special cases that should be discussed, since they -tend to creae even more confusion. Those are field variables on other +tend to create even more confusion. Those are field variables on other histograms, and aliases, both described below through example tests using the hist_debug files. -- GitLab From 5bbf959de408640a99a378d05107d510b866b3ca Mon Sep 17 00:00:00 2001 From: Tom Zanussi Date: Mon, 18 May 2020 13:29:24 -0500 Subject: [PATCH 0987/1971] tracing: Fix events.rst section numbering MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The in-kernel trace event API should have its own section, and the duplicate section numbers need fixing as well. Link: https://lkml.kernel.org/r/90ea854dfb728390b50ddf8a8675238973ee014a.camel@kernel.org Reported-by: Li Xinhai Reviewed-by: Li Xinhai Signed-off-by: Tom Zanussi Signed-off-by: Steven Rostedt (VMware) --- Documentation/trace/events.rst | 28 ++++++++++++++-------------- 1 file changed, 14 insertions(+), 14 deletions(-) diff --git a/Documentation/trace/events.rst b/Documentation/trace/events.rst index 4a2ebe0bd19b8..f792b1959a33a 100644 --- a/Documentation/trace/events.rst +++ b/Documentation/trace/events.rst @@ -527,8 +527,8 @@ The following commands are supported: See Documentation/trace/histogram.rst for details and examples. -6.3 In-kernel trace event API ------------------------------ +7. In-kernel trace event API +============================ In most cases, the command-line interface to trace events is more than sufficient. Sometimes, however, applications might find the need for @@ -560,8 +560,8 @@ following: - tracing synthetic events from in-kernel code - the low-level "dynevent_cmd" API -6.3.1 Dyamically creating synthetic event definitions ------------------------------------------------------ +7.1 Dyamically creating synthetic event definitions +--------------------------------------------------- There are a couple ways to create a new synthetic event from a kernel module or other kernel code. @@ -666,8 +666,8 @@ registered by calling the synth_event_gen_cmd_end() function:: At this point, the event object is ready to be used for tracing new events. -6.3.3 Tracing synthetic events from in-kernel code --------------------------------------------------- +7.2 Tracing synthetic events from in-kernel code +------------------------------------------------ To trace a synthetic event, there are several options. The first option is to trace the event in one call, using synth_event_trace() @@ -678,8 +678,8 @@ synth_event_trace_start() and synth_event_trace_end() along with synth_event_add_next_val() or synth_event_add_val() to add the values piecewise. -6.3.3.1 Tracing a synthetic event all at once ---------------------------------------------- +7.2.1 Tracing a synthetic event all at once +------------------------------------------- To trace a synthetic event all at once, the synth_event_trace() or synth_event_trace_array() functions can be used. @@ -780,8 +780,8 @@ remove the event:: ret = synth_event_delete("schedtest"); -6.3.3.1 Tracing a synthetic event piecewise -------------------------------------------- +7.2.2 Tracing a synthetic event piecewise +----------------------------------------- To trace a synthetic using the piecewise method described above, the synth_event_trace_start() function is used to 'open' the synthetic @@ -864,8 +864,8 @@ Note that synth_event_trace_end() must be called at the end regardless of whether any of the add calls failed (say due to a bad field name being passed in). -6.3.4 Dyamically creating kprobe and kretprobe event definitions ----------------------------------------------------------------- +7.3 Dyamically creating kprobe and kretprobe event definitions +-------------------------------------------------------------- To create a kprobe or kretprobe trace event from kernel code, the kprobe_event_gen_cmd_start() or kretprobe_event_gen_cmd_start() @@ -941,8 +941,8 @@ used to give the kprobe event file back and delete the event:: ret = kprobe_event_delete("gen_kprobe_test"); -6.3.4 The "dynevent_cmd" low-level API --------------------------------------- +7.4 The "dynevent_cmd" low-level API +------------------------------------ Both the in-kernel synthetic event and kprobe interfaces are built on top of a lower-level "dynevent_cmd" interface. This interface is -- GitLab From 726721a51838e3983023f906580722fc83f804ee Mon Sep 17 00:00:00 2001 From: Tom Zanussi Date: Thu, 28 May 2020 14:32:37 -0500 Subject: [PATCH 0988/1971] tracing: Move synthetic events to a separate file With the addition of the in-kernel synthetic event API, synthetic events are no longer specifically tied to the histogram triggers. The synthetic event code is also making trace_event_hist.c very bloated, so for those reasons, move it to a separate file, trace_events_synth.c, along with a new trace_synth.h header file. Because synthetic events are now independent from hist triggers, add a new CONFIG_SYNTH_EVENTS config option, and have CONFIG_HIST_TRIGGERS select it, and have CONFIG_SYNTH_EVENT_GEN_TEST depend on it. Link: http://lkml.kernel.org/r/4d1fa1f85ed5982706ac44844ac92451dcb04715.1590693308.git.zanussi@kernel.org Signed-off-by: Tom Zanussi Signed-off-by: Steven Rostedt (VMware) --- kernel/trace/Kconfig | 20 +- kernel/trace/Makefile | 1 + kernel/trace/trace_events_hist.c | 2071 ++--------------------------- kernel/trace/trace_events_synth.c | 1789 +++++++++++++++++++++++++ kernel/trace/trace_synth.h | 36 + 5 files changed, 1989 insertions(+), 1928 deletions(-) create mode 100644 kernel/trace/trace_events_synth.c create mode 100644 kernel/trace/trace_synth.h diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig index 2aec9376825f3..75407d5dc83a4 100644 --- a/kernel/trace/Kconfig +++ b/kernel/trace/Kconfig @@ -623,12 +623,30 @@ config TRACING_MAP generally used outside of that context, and is normally selected by tracers that use it. +config SYNTH_EVENTS + bool "Synthetic trace events" + select TRACING + select DYNAMIC_EVENTS + default n + help + Synthetic events are user-defined trace events that can be + used to combine data from other trace events or in fact any + data source. Synthetic events can be generated indirectly + via the trace() action of histogram triggers or directly + by way of an in-kernel API. + + See Documentation/trace/events.rst or + Documentation/trace/histogram.rst for details and examples. + + If in doubt, say N. + config HIST_TRIGGERS bool "Histogram triggers" depends on ARCH_HAVE_NMI_SAFE_CMPXCHG select TRACING_MAP select TRACING select DYNAMIC_EVENTS + select SYNTH_EVENTS default n help Hist triggers allow one or more arbitrary trace event fields @@ -824,7 +842,7 @@ config PREEMPTIRQ_DELAY_TEST config SYNTH_EVENT_GEN_TEST tristate "Test module for in-kernel synthetic event generation" - depends on HIST_TRIGGERS + depends on SYNTH_EVENTS help This option creates a test module to check the base functionality of in-kernel synthetic event definition and diff --git a/kernel/trace/Makefile b/kernel/trace/Makefile index f9dcd19165fa2..1d8aaa5464123 100644 --- a/kernel/trace/Makefile +++ b/kernel/trace/Makefile @@ -72,6 +72,7 @@ endif obj-$(CONFIG_EVENT_TRACING) += trace_events_filter.o obj-$(CONFIG_EVENT_TRACING) += trace_events_trigger.o obj-$(CONFIG_TRACE_EVENT_INJECT) += trace_events_inject.o +obj-$(CONFIG_SYNTH_EVENTS) += trace_events_synth.o obj-$(CONFIG_HIST_TRIGGERS) += trace_events_hist.o obj-$(CONFIG_BPF_EVENTS) += bpf_trace.o obj-$(CONFIG_KPROBE_EVENTS) += trace_kprobe.o diff --git a/kernel/trace/trace_events_hist.c b/kernel/trace/trace_events_hist.c index 313227da49255..0b933546142e8 100644 --- a/kernel/trace/trace_events_hist.c +++ b/kernel/trace/trace_events_hist.c @@ -19,13 +19,7 @@ #include #include "tracing_map.h" -#include "trace.h" -#include "trace_dynevent.h" - -#define SYNTH_SYSTEM "synthetic" -#define SYNTH_FIELDS_MAX 32 - -#define STR_VAR_LEN_MAX 32 /* must be multiple of sizeof(u64) */ +#include "trace_synth.h" #define ERRORS \ C(NONE, "No error"), \ @@ -380,69 +374,6 @@ struct hist_trigger_data { unsigned int n_save_var_str; }; -static int create_synth_event(int argc, const char **argv); -static int synth_event_show(struct seq_file *m, struct dyn_event *ev); -static int synth_event_release(struct dyn_event *ev); -static bool synth_event_is_busy(struct dyn_event *ev); -static bool synth_event_match(const char *system, const char *event, - int argc, const char **argv, struct dyn_event *ev); - -static struct dyn_event_operations synth_event_ops = { - .create = create_synth_event, - .show = synth_event_show, - .is_busy = synth_event_is_busy, - .free = synth_event_release, - .match = synth_event_match, -}; - -struct synth_field { - char *type; - char *name; - size_t size; - unsigned int offset; - bool is_signed; - bool is_string; -}; - -struct synth_event { - struct dyn_event devent; - int ref; - char *name; - struct synth_field **fields; - unsigned int n_fields; - unsigned int n_u64; - struct trace_event_class class; - struct trace_event_call call; - struct tracepoint *tp; - struct module *mod; -}; - -static bool is_synth_event(struct dyn_event *ev) -{ - return ev->ops == &synth_event_ops; -} - -static struct synth_event *to_synth_event(struct dyn_event *ev) -{ - return container_of(ev, struct synth_event, devent); -} - -static bool synth_event_is_busy(struct dyn_event *ev) -{ - struct synth_event *event = to_synth_event(ev); - - return event->ref != 0; -} - -static bool synth_event_match(const char *system, const char *event, - int argc, const char **argv, struct dyn_event *ev) -{ - struct synth_event *sev = to_synth_event(ev); - - return strcmp(sev->name, event) == 0 && - (!system || strcmp(system, SYNTH_SYSTEM) == 0); -} - struct action_data; typedef void (*action_fn_t) (struct hist_trigger_data *hist_data, @@ -490,1882 +421,202 @@ struct action_data { union { struct { char *event; - char *event_system; - } match_data; - - struct { - /* - * var_str contains the $-unstripped variable - * name referenced by var_ref, and used when - * printing the action. Because var_ref - * creation is deferred to create_actions(), - * we need a per-action way to save it until - * then, thus var_str. - */ - char *var_str; - - /* - * var_ref refers to the variable being - * tracked e.g onmax($var). - */ - struct hist_field *var_ref; - - /* - * track_var contains the 'invisible' tracking - * variable created to keep the current - * e.g. max value. - */ - struct hist_field *track_var; - - check_track_val_fn_t check_val; - action_fn_t save_data; - } track_data; - }; -}; - -struct track_data { - u64 track_val; - bool updated; - - unsigned int key_len; - void *key; - struct tracing_map_elt elt; - - struct action_data *action_data; - struct hist_trigger_data *hist_data; -}; - -struct hist_elt_data { - char *comm; - u64 *var_ref_vals; - char *field_var_str[SYNTH_FIELDS_MAX]; -}; - -struct snapshot_context { - struct tracing_map_elt *elt; - void *key; -}; - -static void track_data_free(struct track_data *track_data) -{ - struct hist_elt_data *elt_data; - - if (!track_data) - return; - - kfree(track_data->key); - - elt_data = track_data->elt.private_data; - if (elt_data) { - kfree(elt_data->comm); - kfree(elt_data); - } - - kfree(track_data); -} - -static struct track_data *track_data_alloc(unsigned int key_len, - struct action_data *action_data, - struct hist_trigger_data *hist_data) -{ - struct track_data *data = kzalloc(sizeof(*data), GFP_KERNEL); - struct hist_elt_data *elt_data; - - if (!data) - return ERR_PTR(-ENOMEM); - - data->key = kzalloc(key_len, GFP_KERNEL); - if (!data->key) { - track_data_free(data); - return ERR_PTR(-ENOMEM); - } - - data->key_len = key_len; - data->action_data = action_data; - data->hist_data = hist_data; - - elt_data = kzalloc(sizeof(*elt_data), GFP_KERNEL); - if (!elt_data) { - track_data_free(data); - return ERR_PTR(-ENOMEM); - } - data->elt.private_data = elt_data; - - elt_data->comm = kzalloc(TASK_COMM_LEN, GFP_KERNEL); - if (!elt_data->comm) { - track_data_free(data); - return ERR_PTR(-ENOMEM); - } - - return data; -} - -static char last_cmd[MAX_FILTER_STR_VAL]; -static char last_cmd_loc[MAX_FILTER_STR_VAL]; - -static int errpos(char *str) -{ - return err_pos(last_cmd, str); -} - -static void last_cmd_set(struct trace_event_file *file, char *str) -{ - const char *system = NULL, *name = NULL; - struct trace_event_call *call; - - if (!str) - return; - - strcpy(last_cmd, "hist:"); - strncat(last_cmd, str, MAX_FILTER_STR_VAL - 1 - sizeof("hist:")); - - if (file) { - call = file->event_call; - - system = call->class->system; - if (system) { - name = trace_event_name(call); - if (!name) - system = NULL; - } - } - - if (system) - snprintf(last_cmd_loc, MAX_FILTER_STR_VAL, "hist:%s:%s", system, name); -} - -static void hist_err(struct trace_array *tr, u8 err_type, u8 err_pos) -{ - tracing_log_err(tr, last_cmd_loc, last_cmd, err_text, - err_type, err_pos); -} - -static void hist_err_clear(void) -{ - last_cmd[0] = '\0'; - last_cmd_loc[0] = '\0'; -} - -struct synth_trace_event { - struct trace_entry ent; - u64 fields[]; -}; - -static int synth_event_define_fields(struct trace_event_call *call) -{ - struct synth_trace_event trace; - int offset = offsetof(typeof(trace), fields); - struct synth_event *event = call->data; - unsigned int i, size, n_u64; - char *name, *type; - bool is_signed; - int ret = 0; - - for (i = 0, n_u64 = 0; i < event->n_fields; i++) { - size = event->fields[i]->size; - is_signed = event->fields[i]->is_signed; - type = event->fields[i]->type; - name = event->fields[i]->name; - ret = trace_define_field(call, type, name, offset, size, - is_signed, FILTER_OTHER); - if (ret) - break; - - event->fields[i]->offset = n_u64; - - if (event->fields[i]->is_string) { - offset += STR_VAR_LEN_MAX; - n_u64 += STR_VAR_LEN_MAX / sizeof(u64); - } else { - offset += sizeof(u64); - n_u64++; - } - } - - event->n_u64 = n_u64; - - return ret; -} - -static bool synth_field_signed(char *type) -{ - if (str_has_prefix(type, "u")) - return false; - if (strcmp(type, "gfp_t") == 0) - return false; - - return true; -} - -static int synth_field_is_string(char *type) -{ - if (strstr(type, "char[") != NULL) - return true; - - return false; -} - -static int synth_field_string_size(char *type) -{ - char buf[4], *end, *start; - unsigned int len; - int size, err; - - start = strstr(type, "char["); - if (start == NULL) - return -EINVAL; - start += sizeof("char[") - 1; - - end = strchr(type, ']'); - if (!end || end < start) - return -EINVAL; - - len = end - start; - if (len > 3) - return -EINVAL; - - strncpy(buf, start, len); - buf[len] = '\0'; - - err = kstrtouint(buf, 0, &size); - if (err) - return err; - - if (size > STR_VAR_LEN_MAX) - return -EINVAL; - - return size; -} - -static int synth_field_size(char *type) -{ - int size = 0; - - if (strcmp(type, "s64") == 0) - size = sizeof(s64); - else if (strcmp(type, "u64") == 0) - size = sizeof(u64); - else if (strcmp(type, "s32") == 0) - size = sizeof(s32); - else if (strcmp(type, "u32") == 0) - size = sizeof(u32); - else if (strcmp(type, "s16") == 0) - size = sizeof(s16); - else if (strcmp(type, "u16") == 0) - size = sizeof(u16); - else if (strcmp(type, "s8") == 0) - size = sizeof(s8); - else if (strcmp(type, "u8") == 0) - size = sizeof(u8); - else if (strcmp(type, "char") == 0) - size = sizeof(char); - else if (strcmp(type, "unsigned char") == 0) - size = sizeof(unsigned char); - else if (strcmp(type, "int") == 0) - size = sizeof(int); - else if (strcmp(type, "unsigned int") == 0) - size = sizeof(unsigned int); - else if (strcmp(type, "long") == 0) - size = sizeof(long); - else if (strcmp(type, "unsigned long") == 0) - size = sizeof(unsigned long); - else if (strcmp(type, "pid_t") == 0) - size = sizeof(pid_t); - else if (strcmp(type, "gfp_t") == 0) - size = sizeof(gfp_t); - else if (synth_field_is_string(type)) - size = synth_field_string_size(type); - - return size; -} - -static const char *synth_field_fmt(char *type) -{ - const char *fmt = "%llu"; - - if (strcmp(type, "s64") == 0) - fmt = "%lld"; - else if (strcmp(type, "u64") == 0) - fmt = "%llu"; - else if (strcmp(type, "s32") == 0) - fmt = "%d"; - else if (strcmp(type, "u32") == 0) - fmt = "%u"; - else if (strcmp(type, "s16") == 0) - fmt = "%d"; - else if (strcmp(type, "u16") == 0) - fmt = "%u"; - else if (strcmp(type, "s8") == 0) - fmt = "%d"; - else if (strcmp(type, "u8") == 0) - fmt = "%u"; - else if (strcmp(type, "char") == 0) - fmt = "%d"; - else if (strcmp(type, "unsigned char") == 0) - fmt = "%u"; - else if (strcmp(type, "int") == 0) - fmt = "%d"; - else if (strcmp(type, "unsigned int") == 0) - fmt = "%u"; - else if (strcmp(type, "long") == 0) - fmt = "%ld"; - else if (strcmp(type, "unsigned long") == 0) - fmt = "%lu"; - else if (strcmp(type, "pid_t") == 0) - fmt = "%d"; - else if (strcmp(type, "gfp_t") == 0) - fmt = "%x"; - else if (synth_field_is_string(type)) - fmt = "%s"; - - return fmt; -} - -static void print_synth_event_num_val(struct trace_seq *s, - char *print_fmt, char *name, - int size, u64 val, char *space) -{ - switch (size) { - case 1: - trace_seq_printf(s, print_fmt, name, (u8)val, space); - break; - - case 2: - trace_seq_printf(s, print_fmt, name, (u16)val, space); - break; - - case 4: - trace_seq_printf(s, print_fmt, name, (u32)val, space); - break; - - default: - trace_seq_printf(s, print_fmt, name, val, space); - break; - } -} - -static enum print_line_t print_synth_event(struct trace_iterator *iter, - int flags, - struct trace_event *event) -{ - struct trace_array *tr = iter->tr; - struct trace_seq *s = &iter->seq; - struct synth_trace_event *entry; - struct synth_event *se; - unsigned int i, n_u64; - char print_fmt[32]; - const char *fmt; - - entry = (struct synth_trace_event *)iter->ent; - se = container_of(event, struct synth_event, call.event); - - trace_seq_printf(s, "%s: ", se->name); - - for (i = 0, n_u64 = 0; i < se->n_fields; i++) { - if (trace_seq_has_overflowed(s)) - goto end; - - fmt = synth_field_fmt(se->fields[i]->type); - - /* parameter types */ - if (tr && tr->trace_flags & TRACE_ITER_VERBOSE) - trace_seq_printf(s, "%s ", fmt); - - snprintf(print_fmt, sizeof(print_fmt), "%%s=%s%%s", fmt); - - /* parameter values */ - if (se->fields[i]->is_string) { - trace_seq_printf(s, print_fmt, se->fields[i]->name, - (char *)&entry->fields[n_u64], - i == se->n_fields - 1 ? "" : " "); - n_u64 += STR_VAR_LEN_MAX / sizeof(u64); - } else { - struct trace_print_flags __flags[] = { - __def_gfpflag_names, {-1, NULL} }; - char *space = (i == se->n_fields - 1 ? "" : " "); - - print_synth_event_num_val(s, print_fmt, - se->fields[i]->name, - se->fields[i]->size, - entry->fields[n_u64], - space); - - if (strcmp(se->fields[i]->type, "gfp_t") == 0) { - trace_seq_puts(s, " ("); - trace_print_flags_seq(s, "|", - entry->fields[n_u64], - __flags); - trace_seq_putc(s, ')'); - } - n_u64++; - } - } -end: - trace_seq_putc(s, '\n'); - - return trace_handle_return(s); -} - -static struct trace_event_functions synth_event_funcs = { - .trace = print_synth_event -}; - -static notrace void trace_event_raw_event_synth(void *__data, - u64 *var_ref_vals, - unsigned int *var_ref_idx) -{ - struct trace_event_file *trace_file = __data; - struct synth_trace_event *entry; - struct trace_event_buffer fbuffer; - struct trace_buffer *buffer; - struct synth_event *event; - unsigned int i, n_u64, val_idx; - int fields_size = 0; - - event = trace_file->event_call->data; - - if (trace_trigger_soft_disabled(trace_file)) - return; - - fields_size = event->n_u64 * sizeof(u64); - - /* - * Avoid ring buffer recursion detection, as this event - * is being performed within another event. - */ - buffer = trace_file->tr->array_buffer.buffer; - ring_buffer_nest_start(buffer); - - entry = trace_event_buffer_reserve(&fbuffer, trace_file, - sizeof(*entry) + fields_size); - if (!entry) - goto out; - - for (i = 0, n_u64 = 0; i < event->n_fields; i++) { - val_idx = var_ref_idx[i]; - if (event->fields[i]->is_string) { - char *str_val = (char *)(long)var_ref_vals[val_idx]; - char *str_field = (char *)&entry->fields[n_u64]; - - strscpy(str_field, str_val, STR_VAR_LEN_MAX); - n_u64 += STR_VAR_LEN_MAX / sizeof(u64); - } else { - struct synth_field *field = event->fields[i]; - u64 val = var_ref_vals[val_idx]; - - switch (field->size) { - case 1: - *(u8 *)&entry->fields[n_u64] = (u8)val; - break; - - case 2: - *(u16 *)&entry->fields[n_u64] = (u16)val; - break; - - case 4: - *(u32 *)&entry->fields[n_u64] = (u32)val; - break; - - default: - entry->fields[n_u64] = val; - break; - } - n_u64++; - } - } - - trace_event_buffer_commit(&fbuffer); -out: - ring_buffer_nest_end(buffer); -} - -static void free_synth_event_print_fmt(struct trace_event_call *call) -{ - if (call) { - kfree(call->print_fmt); - call->print_fmt = NULL; - } -} - -static int __set_synth_event_print_fmt(struct synth_event *event, - char *buf, int len) -{ - const char *fmt; - int pos = 0; - int i; - - /* When len=0, we just calculate the needed length */ -#define LEN_OR_ZERO (len ? len - pos : 0) - - pos += snprintf(buf + pos, LEN_OR_ZERO, "\""); - for (i = 0; i < event->n_fields; i++) { - fmt = synth_field_fmt(event->fields[i]->type); - pos += snprintf(buf + pos, LEN_OR_ZERO, "%s=%s%s", - event->fields[i]->name, fmt, - i == event->n_fields - 1 ? "" : ", "); - } - pos += snprintf(buf + pos, LEN_OR_ZERO, "\""); - - for (i = 0; i < event->n_fields; i++) { - pos += snprintf(buf + pos, LEN_OR_ZERO, - ", REC->%s", event->fields[i]->name); - } - -#undef LEN_OR_ZERO - - /* return the length of print_fmt */ - return pos; -} - -static int set_synth_event_print_fmt(struct trace_event_call *call) -{ - struct synth_event *event = call->data; - char *print_fmt; - int len; - - /* First: called with 0 length to calculate the needed length */ - len = __set_synth_event_print_fmt(event, NULL, 0); - - print_fmt = kmalloc(len + 1, GFP_KERNEL); - if (!print_fmt) - return -ENOMEM; - - /* Second: actually write the @print_fmt */ - __set_synth_event_print_fmt(event, print_fmt, len + 1); - call->print_fmt = print_fmt; - - return 0; -} - -static void free_synth_field(struct synth_field *field) -{ - kfree(field->type); - kfree(field->name); - kfree(field); -} - -static struct synth_field *parse_synth_field(int argc, const char **argv, - int *consumed) -{ - struct synth_field *field; - const char *prefix = NULL, *field_type = argv[0], *field_name, *array; - int len, ret = 0; - - if (field_type[0] == ';') - field_type++; - - if (!strcmp(field_type, "unsigned")) { - if (argc < 3) - return ERR_PTR(-EINVAL); - prefix = "unsigned "; - field_type = argv[1]; - field_name = argv[2]; - *consumed = 3; - } else { - field_name = argv[1]; - *consumed = 2; - } - - field = kzalloc(sizeof(*field), GFP_KERNEL); - if (!field) - return ERR_PTR(-ENOMEM); - - len = strlen(field_name); - array = strchr(field_name, '['); - if (array) - len -= strlen(array); - else if (field_name[len - 1] == ';') - len--; - - field->name = kmemdup_nul(field_name, len, GFP_KERNEL); - if (!field->name) { - ret = -ENOMEM; - goto free; - } - - if (field_type[0] == ';') - field_type++; - len = strlen(field_type) + 1; - if (array) - len += strlen(array); - if (prefix) - len += strlen(prefix); - - field->type = kzalloc(len, GFP_KERNEL); - if (!field->type) { - ret = -ENOMEM; - goto free; - } - if (prefix) - strcat(field->type, prefix); - strcat(field->type, field_type); - if (array) { - strcat(field->type, array); - if (field->type[len - 1] == ';') - field->type[len - 1] = '\0'; - } - - field->size = synth_field_size(field->type); - if (!field->size) { - ret = -EINVAL; - goto free; - } - - if (synth_field_is_string(field->type)) - field->is_string = true; - - field->is_signed = synth_field_signed(field->type); - - out: - return field; - free: - free_synth_field(field); - field = ERR_PTR(ret); - goto out; -} - -static void free_synth_tracepoint(struct tracepoint *tp) -{ - if (!tp) - return; - - kfree(tp->name); - kfree(tp); -} - -static struct tracepoint *alloc_synth_tracepoint(char *name) -{ - struct tracepoint *tp; - - tp = kzalloc(sizeof(*tp), GFP_KERNEL); - if (!tp) - return ERR_PTR(-ENOMEM); - - tp->name = kstrdup(name, GFP_KERNEL); - if (!tp->name) { - kfree(tp); - return ERR_PTR(-ENOMEM); - } - - return tp; -} - -typedef void (*synth_probe_func_t) (void *__data, u64 *var_ref_vals, - unsigned int *var_ref_idx); - -static inline void trace_synth(struct synth_event *event, u64 *var_ref_vals, - unsigned int *var_ref_idx) -{ - struct tracepoint *tp = event->tp; - - if (unlikely(atomic_read(&tp->key.enabled) > 0)) { - struct tracepoint_func *probe_func_ptr; - synth_probe_func_t probe_func; - void *__data; - - if (!(cpu_online(raw_smp_processor_id()))) - return; - - probe_func_ptr = rcu_dereference_sched((tp)->funcs); - if (probe_func_ptr) { - do { - probe_func = probe_func_ptr->func; - __data = probe_func_ptr->data; - probe_func(__data, var_ref_vals, var_ref_idx); - } while ((++probe_func_ptr)->func); - } - } -} - -static struct synth_event *find_synth_event(const char *name) -{ - struct dyn_event *pos; - struct synth_event *event; - - for_each_dyn_event(pos) { - if (!is_synth_event(pos)) - continue; - event = to_synth_event(pos); - if (strcmp(event->name, name) == 0) - return event; - } - - return NULL; -} - -static struct trace_event_fields synth_event_fields_array[] = { - { .type = TRACE_FUNCTION_TYPE, - .define_fields = synth_event_define_fields }, - {} -}; - -static int register_synth_event(struct synth_event *event) -{ - struct trace_event_call *call = &event->call; - int ret = 0; - - event->call.class = &event->class; - event->class.system = kstrdup(SYNTH_SYSTEM, GFP_KERNEL); - if (!event->class.system) { - ret = -ENOMEM; - goto out; - } - - event->tp = alloc_synth_tracepoint(event->name); - if (IS_ERR(event->tp)) { - ret = PTR_ERR(event->tp); - event->tp = NULL; - goto out; - } - - INIT_LIST_HEAD(&call->class->fields); - call->event.funcs = &synth_event_funcs; - call->class->fields_array = synth_event_fields_array; - - ret = register_trace_event(&call->event); - if (!ret) { - ret = -ENODEV; - goto out; - } - call->flags = TRACE_EVENT_FL_TRACEPOINT; - call->class->reg = trace_event_reg; - call->class->probe = trace_event_raw_event_synth; - call->data = event; - call->tp = event->tp; - - ret = trace_add_event_call(call); - if (ret) { - pr_warn("Failed to register synthetic event: %s\n", - trace_event_name(call)); - goto err; - } - - ret = set_synth_event_print_fmt(call); - if (ret < 0) { - trace_remove_event_call(call); - goto err; - } - out: - return ret; - err: - unregister_trace_event(&call->event); - goto out; -} - -static int unregister_synth_event(struct synth_event *event) -{ - struct trace_event_call *call = &event->call; - int ret; - - ret = trace_remove_event_call(call); - - return ret; -} - -static void free_synth_event(struct synth_event *event) -{ - unsigned int i; - - if (!event) - return; - - for (i = 0; i < event->n_fields; i++) - free_synth_field(event->fields[i]); - - kfree(event->fields); - kfree(event->name); - kfree(event->class.system); - free_synth_tracepoint(event->tp); - free_synth_event_print_fmt(&event->call); - kfree(event); -} - -static struct synth_event *alloc_synth_event(const char *name, int n_fields, - struct synth_field **fields) -{ - struct synth_event *event; - unsigned int i; - - event = kzalloc(sizeof(*event), GFP_KERNEL); - if (!event) { - event = ERR_PTR(-ENOMEM); - goto out; - } - - event->name = kstrdup(name, GFP_KERNEL); - if (!event->name) { - kfree(event); - event = ERR_PTR(-ENOMEM); - goto out; - } - - event->fields = kcalloc(n_fields, sizeof(*event->fields), GFP_KERNEL); - if (!event->fields) { - free_synth_event(event); - event = ERR_PTR(-ENOMEM); - goto out; - } - - dyn_event_init(&event->devent, &synth_event_ops); - - for (i = 0; i < n_fields; i++) - event->fields[i] = fields[i]; - - event->n_fields = n_fields; - out: - return event; -} - -static void action_trace(struct hist_trigger_data *hist_data, - struct tracing_map_elt *elt, void *rec, - struct ring_buffer_event *rbe, void *key, - struct action_data *data, u64 *var_ref_vals) -{ - struct synth_event *event = data->synth_event; - - trace_synth(event, var_ref_vals, data->var_ref_idx); -} - -struct hist_var_data { - struct list_head list; - struct hist_trigger_data *hist_data; -}; - -static int synth_event_check_arg_fn(void *data) -{ - struct dynevent_arg_pair *arg_pair = data; - int size; - - size = synth_field_size((char *)arg_pair->lhs); - - return size ? 0 : -EINVAL; -} - -/** - * synth_event_add_field - Add a new field to a synthetic event cmd - * @cmd: A pointer to the dynevent_cmd struct representing the new event - * @type: The type of the new field to add - * @name: The name of the new field to add - * - * Add a new field to a synthetic event cmd object. Field ordering is in - * the same order the fields are added. - * - * See synth_field_size() for available types. If field_name contains - * [n] the field is considered to be an array. - * - * Return: 0 if successful, error otherwise. - */ -int synth_event_add_field(struct dynevent_cmd *cmd, const char *type, - const char *name) -{ - struct dynevent_arg_pair arg_pair; - int ret; - - if (cmd->type != DYNEVENT_TYPE_SYNTH) - return -EINVAL; - - if (!type || !name) - return -EINVAL; - - dynevent_arg_pair_init(&arg_pair, 0, ';'); - - arg_pair.lhs = type; - arg_pair.rhs = name; - - ret = dynevent_arg_pair_add(cmd, &arg_pair, synth_event_check_arg_fn); - if (ret) - return ret; - - if (++cmd->n_fields > SYNTH_FIELDS_MAX) - ret = -EINVAL; - - return ret; -} -EXPORT_SYMBOL_GPL(synth_event_add_field); - -/** - * synth_event_add_field_str - Add a new field to a synthetic event cmd - * @cmd: A pointer to the dynevent_cmd struct representing the new event - * @type_name: The type and name of the new field to add, as a single string - * - * Add a new field to a synthetic event cmd object, as a single - * string. The @type_name string is expected to be of the form 'type - * name', which will be appended by ';'. No sanity checking is done - - * what's passed in is assumed to already be well-formed. Field - * ordering is in the same order the fields are added. - * - * See synth_field_size() for available types. If field_name contains - * [n] the field is considered to be an array. - * - * Return: 0 if successful, error otherwise. - */ -int synth_event_add_field_str(struct dynevent_cmd *cmd, const char *type_name) -{ - struct dynevent_arg arg; - int ret; - - if (cmd->type != DYNEVENT_TYPE_SYNTH) - return -EINVAL; - - if (!type_name) - return -EINVAL; - - dynevent_arg_init(&arg, ';'); - - arg.str = type_name; - - ret = dynevent_arg_add(cmd, &arg, NULL); - if (ret) - return ret; - - if (++cmd->n_fields > SYNTH_FIELDS_MAX) - ret = -EINVAL; - - return ret; -} -EXPORT_SYMBOL_GPL(synth_event_add_field_str); - -/** - * synth_event_add_fields - Add multiple fields to a synthetic event cmd - * @cmd: A pointer to the dynevent_cmd struct representing the new event - * @fields: An array of type/name field descriptions - * @n_fields: The number of field descriptions contained in the fields array - * - * Add a new set of fields to a synthetic event cmd object. The event - * fields that will be defined for the event should be passed in as an - * array of struct synth_field_desc, and the number of elements in the - * array passed in as n_fields. Field ordering will retain the - * ordering given in the fields array. - * - * See synth_field_size() for available types. If field_name contains - * [n] the field is considered to be an array. - * - * Return: 0 if successful, error otherwise. - */ -int synth_event_add_fields(struct dynevent_cmd *cmd, - struct synth_field_desc *fields, - unsigned int n_fields) -{ - unsigned int i; - int ret = 0; - - for (i = 0; i < n_fields; i++) { - if (fields[i].type == NULL || fields[i].name == NULL) { - ret = -EINVAL; - break; - } - - ret = synth_event_add_field(cmd, fields[i].type, fields[i].name); - if (ret) - break; - } - - return ret; -} -EXPORT_SYMBOL_GPL(synth_event_add_fields); - -/** - * __synth_event_gen_cmd_start - Start a synthetic event command from arg list - * @cmd: A pointer to the dynevent_cmd struct representing the new event - * @name: The name of the synthetic event - * @mod: The module creating the event, NULL if not created from a module - * @args: Variable number of arg (pairs), one pair for each field - * - * NOTE: Users normally won't want to call this function directly, but - * rather use the synth_event_gen_cmd_start() wrapper, which - * automatically adds a NULL to the end of the arg list. If this - * function is used directly, make sure the last arg in the variable - * arg list is NULL. - * - * Generate a synthetic event command to be executed by - * synth_event_gen_cmd_end(). This function can be used to generate - * the complete command or only the first part of it; in the latter - * case, synth_event_add_field(), synth_event_add_field_str(), or - * synth_event_add_fields() can be used to add more fields following - * this. - * - * There should be an even number variable args, each pair consisting - * of a type followed by a field name. - * - * See synth_field_size() for available types. If field_name contains - * [n] the field is considered to be an array. - * - * Return: 0 if successful, error otherwise. - */ -int __synth_event_gen_cmd_start(struct dynevent_cmd *cmd, const char *name, - struct module *mod, ...) -{ - struct dynevent_arg arg; - va_list args; - int ret; - - cmd->event_name = name; - cmd->private_data = mod; - - if (cmd->type != DYNEVENT_TYPE_SYNTH) - return -EINVAL; - - dynevent_arg_init(&arg, 0); - arg.str = name; - ret = dynevent_arg_add(cmd, &arg, NULL); - if (ret) - return ret; - - va_start(args, mod); - for (;;) { - const char *type, *name; - - type = va_arg(args, const char *); - if (!type) - break; - name = va_arg(args, const char *); - if (!name) - break; - - if (++cmd->n_fields > SYNTH_FIELDS_MAX) { - ret = -EINVAL; - break; - } - - ret = synth_event_add_field(cmd, type, name); - if (ret) - break; - } - va_end(args); - - return ret; -} -EXPORT_SYMBOL_GPL(__synth_event_gen_cmd_start); - -/** - * synth_event_gen_cmd_array_start - Start synthetic event command from an array - * @cmd: A pointer to the dynevent_cmd struct representing the new event - * @name: The name of the synthetic event - * @fields: An array of type/name field descriptions - * @n_fields: The number of field descriptions contained in the fields array - * - * Generate a synthetic event command to be executed by - * synth_event_gen_cmd_end(). This function can be used to generate - * the complete command or only the first part of it; in the latter - * case, synth_event_add_field(), synth_event_add_field_str(), or - * synth_event_add_fields() can be used to add more fields following - * this. - * - * The event fields that will be defined for the event should be - * passed in as an array of struct synth_field_desc, and the number of - * elements in the array passed in as n_fields. Field ordering will - * retain the ordering given in the fields array. - * - * See synth_field_size() for available types. If field_name contains - * [n] the field is considered to be an array. - * - * Return: 0 if successful, error otherwise. - */ -int synth_event_gen_cmd_array_start(struct dynevent_cmd *cmd, const char *name, - struct module *mod, - struct synth_field_desc *fields, - unsigned int n_fields) -{ - struct dynevent_arg arg; - unsigned int i; - int ret = 0; - - cmd->event_name = name; - cmd->private_data = mod; - - if (cmd->type != DYNEVENT_TYPE_SYNTH) - return -EINVAL; - - if (n_fields > SYNTH_FIELDS_MAX) - return -EINVAL; - - dynevent_arg_init(&arg, 0); - arg.str = name; - ret = dynevent_arg_add(cmd, &arg, NULL); - if (ret) - return ret; - - for (i = 0; i < n_fields; i++) { - if (fields[i].type == NULL || fields[i].name == NULL) - return -EINVAL; - - ret = synth_event_add_field(cmd, fields[i].type, fields[i].name); - if (ret) - break; - } - - return ret; -} -EXPORT_SYMBOL_GPL(synth_event_gen_cmd_array_start); - -static int __create_synth_event(int argc, const char *name, const char **argv) -{ - struct synth_field *field, *fields[SYNTH_FIELDS_MAX]; - struct synth_event *event = NULL; - int i, consumed = 0, n_fields = 0, ret = 0; - - /* - * Argument syntax: - * - Add synthetic event: field[;field] ... - * - Remove synthetic event: ! field[;field] ... - * where 'field' = type field_name - */ - - if (name[0] == '\0' || argc < 1) - return -EINVAL; - - mutex_lock(&event_mutex); - - event = find_synth_event(name); - if (event) { - ret = -EEXIST; - goto out; - } - - for (i = 0; i < argc - 1; i++) { - if (strcmp(argv[i], ";") == 0) - continue; - if (n_fields == SYNTH_FIELDS_MAX) { - ret = -EINVAL; - goto err; - } - - field = parse_synth_field(argc - i, &argv[i], &consumed); - if (IS_ERR(field)) { - ret = PTR_ERR(field); - goto err; - } - fields[n_fields++] = field; - i += consumed - 1; - } - - if (i < argc && strcmp(argv[i], ";") != 0) { - ret = -EINVAL; - goto err; - } - - event = alloc_synth_event(name, n_fields, fields); - if (IS_ERR(event)) { - ret = PTR_ERR(event); - event = NULL; - goto err; - } - ret = register_synth_event(event); - if (!ret) - dyn_event_add(&event->devent); - else - free_synth_event(event); - out: - mutex_unlock(&event_mutex); - - return ret; - err: - for (i = 0; i < n_fields; i++) - free_synth_field(fields[i]); - - goto out; -} - -/** - * synth_event_create - Create a new synthetic event - * @name: The name of the new sythetic event - * @fields: An array of type/name field descriptions - * @n_fields: The number of field descriptions contained in the fields array - * @mod: The module creating the event, NULL if not created from a module - * - * Create a new synthetic event with the given name under the - * trace/events/synthetic/ directory. The event fields that will be - * defined for the event should be passed in as an array of struct - * synth_field_desc, and the number elements in the array passed in as - * n_fields. Field ordering will retain the ordering given in the - * fields array. - * - * If the new synthetic event is being created from a module, the mod - * param must be non-NULL. This will ensure that the trace buffer - * won't contain unreadable events. - * - * The new synth event should be deleted using synth_event_delete() - * function. The new synthetic event can be generated from modules or - * other kernel code using trace_synth_event() and related functions. - * - * Return: 0 if successful, error otherwise. - */ -int synth_event_create(const char *name, struct synth_field_desc *fields, - unsigned int n_fields, struct module *mod) -{ - struct dynevent_cmd cmd; - char *buf; - int ret; - - buf = kzalloc(MAX_DYNEVENT_CMD_LEN, GFP_KERNEL); - if (!buf) - return -ENOMEM; - - synth_event_cmd_init(&cmd, buf, MAX_DYNEVENT_CMD_LEN); - - ret = synth_event_gen_cmd_array_start(&cmd, name, mod, - fields, n_fields); - if (ret) - goto out; - - ret = synth_event_gen_cmd_end(&cmd); - out: - kfree(buf); - - return ret; -} -EXPORT_SYMBOL_GPL(synth_event_create); - -static int destroy_synth_event(struct synth_event *se) -{ - int ret; - - if (se->ref) - ret = -EBUSY; - else { - ret = unregister_synth_event(se); - if (!ret) { - dyn_event_remove(&se->devent); - free_synth_event(se); - } - } - - return ret; -} - -/** - * synth_event_delete - Delete a synthetic event - * @event_name: The name of the new sythetic event - * - * Delete a synthetic event that was created with synth_event_create(). - * - * Return: 0 if successful, error otherwise. - */ -int synth_event_delete(const char *event_name) -{ - struct synth_event *se = NULL; - struct module *mod = NULL; - int ret = -ENOENT; - - mutex_lock(&event_mutex); - se = find_synth_event(event_name); - if (se) { - mod = se->mod; - ret = destroy_synth_event(se); - } - mutex_unlock(&event_mutex); - - if (mod) { - mutex_lock(&trace_types_lock); - /* - * It is safest to reset the ring buffer if the module - * being unloaded registered any events that were - * used. The only worry is if a new module gets - * loaded, and takes on the same id as the events of - * this module. When printing out the buffer, traced - * events left over from this module may be passed to - * the new module events and unexpected results may - * occur. - */ - tracing_reset_all_online_cpus(); - mutex_unlock(&trace_types_lock); - } - - return ret; -} -EXPORT_SYMBOL_GPL(synth_event_delete); - -static int create_or_delete_synth_event(int argc, char **argv) -{ - const char *name = argv[0]; - int ret; - - /* trace_run_command() ensures argc != 0 */ - if (name[0] == '!') { - ret = synth_event_delete(name + 1); - return ret; - } - - ret = __create_synth_event(argc - 1, name, (const char **)argv + 1); - return ret == -ECANCELED ? -EINVAL : ret; -} - -static int synth_event_run_command(struct dynevent_cmd *cmd) -{ - struct synth_event *se; - int ret; - - ret = trace_run_command(cmd->seq.buffer, create_or_delete_synth_event); - if (ret) - return ret; - - se = find_synth_event(cmd->event_name); - if (WARN_ON(!se)) - return -ENOENT; - - se->mod = cmd->private_data; - - return ret; -} - -/** - * synth_event_cmd_init - Initialize a synthetic event command object - * @cmd: A pointer to the dynevent_cmd struct representing the new event - * @buf: A pointer to the buffer used to build the command - * @maxlen: The length of the buffer passed in @buf - * - * Initialize a synthetic event command object. Use this before - * calling any of the other dyenvent_cmd functions. - */ -void synth_event_cmd_init(struct dynevent_cmd *cmd, char *buf, int maxlen) -{ - dynevent_cmd_init(cmd, buf, maxlen, DYNEVENT_TYPE_SYNTH, - synth_event_run_command); -} -EXPORT_SYMBOL_GPL(synth_event_cmd_init); - -static inline int -__synth_event_trace_start(struct trace_event_file *file, - struct synth_event_trace_state *trace_state) -{ - int entry_size, fields_size = 0; - int ret = 0; - - memset(trace_state, '\0', sizeof(*trace_state)); - - /* - * Normal event tracing doesn't get called at all unless the - * ENABLED bit is set (which attaches the probe thus allowing - * this code to be called, etc). Because this is called - * directly by the user, we don't have that but we still need - * to honor not logging when disabled. For the the iterated - * trace case, we save the enabed state upon start and just - * ignore the following data calls. - */ - if (!(file->flags & EVENT_FILE_FL_ENABLED) || - trace_trigger_soft_disabled(file)) { - trace_state->disabled = true; - goto out; - } - - trace_state->event = file->event_call->data; - - fields_size = trace_state->event->n_u64 * sizeof(u64); - - /* - * Avoid ring buffer recursion detection, as this event - * is being performed within another event. - */ - trace_state->buffer = file->tr->array_buffer.buffer; - ring_buffer_nest_start(trace_state->buffer); - - entry_size = sizeof(*trace_state->entry) + fields_size; - trace_state->entry = trace_event_buffer_reserve(&trace_state->fbuffer, - file, - entry_size); - if (!trace_state->entry) { - ring_buffer_nest_end(trace_state->buffer); - ret = -EINVAL; - } -out: - return ret; -} - -static inline void -__synth_event_trace_end(struct synth_event_trace_state *trace_state) -{ - trace_event_buffer_commit(&trace_state->fbuffer); - - ring_buffer_nest_end(trace_state->buffer); -} - -/** - * synth_event_trace - Trace a synthetic event - * @file: The trace_event_file representing the synthetic event - * @n_vals: The number of values in vals - * @args: Variable number of args containing the event values - * - * Trace a synthetic event using the values passed in the variable - * argument list. - * - * The argument list should be a list 'n_vals' u64 values. The number - * of vals must match the number of field in the synthetic event, and - * must be in the same order as the synthetic event fields. - * - * All vals should be cast to u64, and string vals are just pointers - * to strings, cast to u64. Strings will be copied into space - * reserved in the event for the string, using these pointers. - * - * Return: 0 on success, err otherwise. - */ -int synth_event_trace(struct trace_event_file *file, unsigned int n_vals, ...) -{ - struct synth_event_trace_state state; - unsigned int i, n_u64; - va_list args; - int ret; - - ret = __synth_event_trace_start(file, &state); - if (ret || state.disabled) - return ret; - - if (n_vals != state.event->n_fields) { - ret = -EINVAL; - goto out; - } - - va_start(args, n_vals); - for (i = 0, n_u64 = 0; i < state.event->n_fields; i++) { - u64 val; - - val = va_arg(args, u64); - - if (state.event->fields[i]->is_string) { - char *str_val = (char *)(long)val; - char *str_field = (char *)&state.entry->fields[n_u64]; - - strscpy(str_field, str_val, STR_VAR_LEN_MAX); - n_u64 += STR_VAR_LEN_MAX / sizeof(u64); - } else { - struct synth_field *field = state.event->fields[i]; - - switch (field->size) { - case 1: - *(u8 *)&state.entry->fields[n_u64] = (u8)val; - break; - - case 2: - *(u16 *)&state.entry->fields[n_u64] = (u16)val; - break; - - case 4: - *(u32 *)&state.entry->fields[n_u64] = (u32)val; - break; - - default: - state.entry->fields[n_u64] = val; - break; - } - n_u64++; - } - } - va_end(args); -out: - __synth_event_trace_end(&state); - - return ret; -} -EXPORT_SYMBOL_GPL(synth_event_trace); - -/** - * synth_event_trace_array - Trace a synthetic event from an array - * @file: The trace_event_file representing the synthetic event - * @vals: Array of values - * @n_vals: The number of values in vals - * - * Trace a synthetic event using the values passed in as 'vals'. - * - * The 'vals' array is just an array of 'n_vals' u64. The number of - * vals must match the number of field in the synthetic event, and - * must be in the same order as the synthetic event fields. - * - * All vals should be cast to u64, and string vals are just pointers - * to strings, cast to u64. Strings will be copied into space - * reserved in the event for the string, using these pointers. - * - * Return: 0 on success, err otherwise. - */ -int synth_event_trace_array(struct trace_event_file *file, u64 *vals, - unsigned int n_vals) -{ - struct synth_event_trace_state state; - unsigned int i, n_u64; - int ret; - - ret = __synth_event_trace_start(file, &state); - if (ret || state.disabled) - return ret; - - if (n_vals != state.event->n_fields) { - ret = -EINVAL; - goto out; - } - - for (i = 0, n_u64 = 0; i < state.event->n_fields; i++) { - if (state.event->fields[i]->is_string) { - char *str_val = (char *)(long)vals[i]; - char *str_field = (char *)&state.entry->fields[n_u64]; - - strscpy(str_field, str_val, STR_VAR_LEN_MAX); - n_u64 += STR_VAR_LEN_MAX / sizeof(u64); - } else { - struct synth_field *field = state.event->fields[i]; - u64 val = vals[i]; - - switch (field->size) { - case 1: - *(u8 *)&state.entry->fields[n_u64] = (u8)val; - break; - - case 2: - *(u16 *)&state.entry->fields[n_u64] = (u16)val; - break; - - case 4: - *(u32 *)&state.entry->fields[n_u64] = (u32)val; - break; - - default: - state.entry->fields[n_u64] = val; - break; - } - n_u64++; - } - } -out: - __synth_event_trace_end(&state); - - return ret; -} -EXPORT_SYMBOL_GPL(synth_event_trace_array); - -/** - * synth_event_trace_start - Start piecewise synthetic event trace - * @file: The trace_event_file representing the synthetic event - * @trace_state: A pointer to object tracking the piecewise trace state - * - * Start the trace of a synthetic event field-by-field rather than all - * at once. - * - * This function 'opens' an event trace, which means space is reserved - * for the event in the trace buffer, after which the event's - * individual field values can be set through either - * synth_event_add_next_val() or synth_event_add_val(). - * - * A pointer to a trace_state object is passed in, which will keep - * track of the current event trace state until the event trace is - * closed (and the event finally traced) using - * synth_event_trace_end(). - * - * Note that synth_event_trace_end() must be called after all values - * have been added for each event trace, regardless of whether adding - * all field values succeeded or not. - * - * Note also that for a given event trace, all fields must be added - * using either synth_event_add_next_val() or synth_event_add_val() - * but not both together or interleaved. - * - * Return: 0 on success, err otherwise. - */ -int synth_event_trace_start(struct trace_event_file *file, - struct synth_event_trace_state *trace_state) -{ - if (!trace_state) - return -EINVAL; + char *event_system; + } match_data; - return __synth_event_trace_start(file, trace_state); -} -EXPORT_SYMBOL_GPL(synth_event_trace_start); + struct { + /* + * var_str contains the $-unstripped variable + * name referenced by var_ref, and used when + * printing the action. Because var_ref + * creation is deferred to create_actions(), + * we need a per-action way to save it until + * then, thus var_str. + */ + char *var_str; -static int __synth_event_add_val(const char *field_name, u64 val, - struct synth_event_trace_state *trace_state) -{ - struct synth_field *field = NULL; - struct synth_trace_event *entry; - struct synth_event *event; - int i, ret = 0; + /* + * var_ref refers to the variable being + * tracked e.g onmax($var). + */ + struct hist_field *var_ref; - if (!trace_state) { - ret = -EINVAL; - goto out; - } + /* + * track_var contains the 'invisible' tracking + * variable created to keep the current + * e.g. max value. + */ + struct hist_field *track_var; - /* can't mix add_next_synth_val() with add_synth_val() */ - if (field_name) { - if (trace_state->add_next) { - ret = -EINVAL; - goto out; - } - trace_state->add_name = true; - } else { - if (trace_state->add_name) { - ret = -EINVAL; - goto out; - } - trace_state->add_next = true; - } + check_track_val_fn_t check_val; + action_fn_t save_data; + } track_data; + }; +}; - if (trace_state->disabled) - goto out; +struct track_data { + u64 track_val; + bool updated; - event = trace_state->event; - if (trace_state->add_name) { - for (i = 0; i < event->n_fields; i++) { - field = event->fields[i]; - if (strcmp(field->name, field_name) == 0) - break; - } - if (!field) { - ret = -EINVAL; - goto out; - } - } else { - if (trace_state->cur_field >= event->n_fields) { - ret = -EINVAL; - goto out; - } - field = event->fields[trace_state->cur_field++]; - } + unsigned int key_len; + void *key; + struct tracing_map_elt elt; - entry = trace_state->entry; - if (field->is_string) { - char *str_val = (char *)(long)val; - char *str_field; + struct action_data *action_data; + struct hist_trigger_data *hist_data; +}; - if (!str_val) { - ret = -EINVAL; - goto out; - } +struct hist_elt_data { + char *comm; + u64 *var_ref_vals; + char *field_var_str[SYNTH_FIELDS_MAX]; +}; - str_field = (char *)&entry->fields[field->offset]; - strscpy(str_field, str_val, STR_VAR_LEN_MAX); - } else { - switch (field->size) { - case 1: - *(u8 *)&trace_state->entry->fields[field->offset] = (u8)val; - break; +struct snapshot_context { + struct tracing_map_elt *elt; + void *key; +}; - case 2: - *(u16 *)&trace_state->entry->fields[field->offset] = (u16)val; - break; +static void track_data_free(struct track_data *track_data) +{ + struct hist_elt_data *elt_data; - case 4: - *(u32 *)&trace_state->entry->fields[field->offset] = (u32)val; - break; + if (!track_data) + return; - default: - trace_state->entry->fields[field->offset] = val; - break; - } + kfree(track_data->key); + + elt_data = track_data->elt.private_data; + if (elt_data) { + kfree(elt_data->comm); + kfree(elt_data); } - out: - return ret; -} -/** - * synth_event_add_next_val - Add the next field's value to an open synth trace - * @val: The value to set the next field to - * @trace_state: A pointer to object tracking the piecewise trace state - * - * Set the value of the next field in an event that's been opened by - * synth_event_trace_start(). - * - * The val param should be the value cast to u64. If the value points - * to a string, the val param should be a char * cast to u64. - * - * This function assumes all the fields in an event are to be set one - * after another - successive calls to this function are made, one for - * each field, in the order of the fields in the event, until all - * fields have been set. If you'd rather set each field individually - * without regard to ordering, synth_event_add_val() can be used - * instead. - * - * Note however that synth_event_add_next_val() and - * synth_event_add_val() can't be intermixed for a given event trace - - * one or the other but not both can be used at the same time. - * - * Note also that synth_event_trace_end() must be called after all - * values have been added for each event trace, regardless of whether - * adding all field values succeeded or not. - * - * Return: 0 on success, err otherwise. - */ -int synth_event_add_next_val(u64 val, - struct synth_event_trace_state *trace_state) -{ - return __synth_event_add_val(NULL, val, trace_state); + kfree(track_data); } -EXPORT_SYMBOL_GPL(synth_event_add_next_val); -/** - * synth_event_add_val - Add a named field's value to an open synth trace - * @field_name: The name of the synthetic event field value to set - * @val: The value to set the next field to - * @trace_state: A pointer to object tracking the piecewise trace state - * - * Set the value of the named field in an event that's been opened by - * synth_event_trace_start(). - * - * The val param should be the value cast to u64. If the value points - * to a string, the val param should be a char * cast to u64. - * - * This function looks up the field name, and if found, sets the field - * to the specified value. This lookup makes this function more - * expensive than synth_event_add_next_val(), so use that or the - * none-piecewise synth_event_trace() instead if efficiency is more - * important. - * - * Note however that synth_event_add_next_val() and - * synth_event_add_val() can't be intermixed for a given event trace - - * one or the other but not both can be used at the same time. - * - * Note also that synth_event_trace_end() must be called after all - * values have been added for each event trace, regardless of whether - * adding all field values succeeded or not. - * - * Return: 0 on success, err otherwise. - */ -int synth_event_add_val(const char *field_name, u64 val, - struct synth_event_trace_state *trace_state) +static struct track_data *track_data_alloc(unsigned int key_len, + struct action_data *action_data, + struct hist_trigger_data *hist_data) { - return __synth_event_add_val(field_name, val, trace_state); -} -EXPORT_SYMBOL_GPL(synth_event_add_val); + struct track_data *data = kzalloc(sizeof(*data), GFP_KERNEL); + struct hist_elt_data *elt_data; -/** - * synth_event_trace_end - End piecewise synthetic event trace - * @trace_state: A pointer to object tracking the piecewise trace state - * - * End the trace of a synthetic event opened by - * synth_event_trace__start(). - * - * This function 'closes' an event trace, which basically means that - * it commits the reserved event and cleans up other loose ends. - * - * A pointer to a trace_state object is passed in, which will keep - * track of the current event trace state opened with - * synth_event_trace_start(). - * - * Note that this function must be called after all values have been - * added for each event trace, regardless of whether adding all field - * values succeeded or not. - * - * Return: 0 on success, err otherwise. - */ -int synth_event_trace_end(struct synth_event_trace_state *trace_state) -{ - if (!trace_state) - return -EINVAL; + if (!data) + return ERR_PTR(-ENOMEM); - __synth_event_trace_end(trace_state); + data->key = kzalloc(key_len, GFP_KERNEL); + if (!data->key) { + track_data_free(data); + return ERR_PTR(-ENOMEM); + } - return 0; -} -EXPORT_SYMBOL_GPL(synth_event_trace_end); + data->key_len = key_len; + data->action_data = action_data; + data->hist_data = hist_data; -static int create_synth_event(int argc, const char **argv) -{ - const char *name = argv[0]; - int len; + elt_data = kzalloc(sizeof(*elt_data), GFP_KERNEL); + if (!elt_data) { + track_data_free(data); + return ERR_PTR(-ENOMEM); + } - if (name[0] != 's' || name[1] != ':') - return -ECANCELED; - name += 2; + data->elt.private_data = elt_data; - /* This interface accepts group name prefix */ - if (strchr(name, '/')) { - len = str_has_prefix(name, SYNTH_SYSTEM "/"); - if (len == 0) - return -EINVAL; - name += len; + elt_data->comm = kzalloc(TASK_COMM_LEN, GFP_KERNEL); + if (!elt_data->comm) { + track_data_free(data); + return ERR_PTR(-ENOMEM); } - return __create_synth_event(argc - 1, name, argv + 1); -} - -static int synth_event_release(struct dyn_event *ev) -{ - struct synth_event *event = to_synth_event(ev); - int ret; - if (event->ref) - return -EBUSY; + return data; +} - ret = unregister_synth_event(event); - if (ret) - return ret; +static char last_cmd[MAX_FILTER_STR_VAL]; +static char last_cmd_loc[MAX_FILTER_STR_VAL]; - dyn_event_remove(ev); - free_synth_event(event); - return 0; +static int errpos(char *str) +{ + return err_pos(last_cmd, str); } -static int __synth_event_show(struct seq_file *m, struct synth_event *event) +static void last_cmd_set(struct trace_event_file *file, char *str) { - struct synth_field *field; - unsigned int i; + const char *system = NULL, *name = NULL; + struct trace_event_call *call; - seq_printf(m, "%s\t", event->name); + if (!str) + return; - for (i = 0; i < event->n_fields; i++) { - field = event->fields[i]; + strcpy(last_cmd, "hist:"); + strncat(last_cmd, str, MAX_FILTER_STR_VAL - 1 - sizeof("hist:")); - /* parameter values */ - seq_printf(m, "%s %s%s", field->type, field->name, - i == event->n_fields - 1 ? "" : "; "); + if (file) { + call = file->event_call; + system = call->class->system; + if (system) { + name = trace_event_name(call); + if (!name) + system = NULL; + } } - seq_putc(m, '\n'); - - return 0; + if (system) + snprintf(last_cmd_loc, MAX_FILTER_STR_VAL, "hist:%s:%s", system, name); } -static int synth_event_show(struct seq_file *m, struct dyn_event *ev) +static void hist_err(struct trace_array *tr, u8 err_type, u8 err_pos) { - struct synth_event *event = to_synth_event(ev); - - seq_printf(m, "s:%s/", event->class.system); - - return __synth_event_show(m, event); + tracing_log_err(tr, last_cmd_loc, last_cmd, err_text, + err_type, err_pos); } -static int synth_events_seq_show(struct seq_file *m, void *v) +static void hist_err_clear(void) { - struct dyn_event *ev = v; - - if (!is_synth_event(ev)) - return 0; - - return __synth_event_show(m, to_synth_event(ev)); + last_cmd[0] = '\0'; + last_cmd_loc[0] = '\0'; } -static const struct seq_operations synth_events_seq_op = { - .start = dyn_event_seq_start, - .next = dyn_event_seq_next, - .stop = dyn_event_seq_stop, - .show = synth_events_seq_show, -}; +typedef void (*synth_probe_func_t) (void *__data, u64 *var_ref_vals, + unsigned int *var_ref_idx); -static int synth_events_open(struct inode *inode, struct file *file) +static inline void trace_synth(struct synth_event *event, u64 *var_ref_vals, + unsigned int *var_ref_idx) { - int ret; + struct tracepoint *tp = event->tp; - ret = security_locked_down(LOCKDOWN_TRACEFS); - if (ret) - return ret; + if (unlikely(atomic_read(&tp->key.enabled) > 0)) { + struct tracepoint_func *probe_func_ptr; + synth_probe_func_t probe_func; + void *__data; - if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) { - ret = dyn_events_release_all(&synth_event_ops); - if (ret < 0) - return ret; - } + if (!(cpu_online(raw_smp_processor_id()))) + return; - return seq_open(file, &synth_events_seq_op); + probe_func_ptr = rcu_dereference_sched((tp)->funcs); + if (probe_func_ptr) { + do { + probe_func = probe_func_ptr->func; + __data = probe_func_ptr->data; + probe_func(__data, var_ref_vals, var_ref_idx); + } while ((++probe_func_ptr)->func); + } + } } -static ssize_t synth_events_write(struct file *file, - const char __user *buffer, - size_t count, loff_t *ppos) +static void action_trace(struct hist_trigger_data *hist_data, + struct tracing_map_elt *elt, void *rec, + struct ring_buffer_event *rbe, void *key, + struct action_data *data, u64 *var_ref_vals) { - return trace_parse_run_command(file, buffer, count, ppos, - create_or_delete_synth_event); + struct synth_event *event = data->synth_event; + + trace_synth(event, var_ref_vals, data->var_ref_idx); } -static const struct file_operations synth_events_fops = { - .open = synth_events_open, - .write = synth_events_write, - .read = seq_read, - .llseek = seq_lseek, - .release = seq_release, +struct hist_var_data { + struct list_head list; + struct hist_trigger_data *hist_data; }; static u64 hist_field_timestamp(struct hist_field *hist_field, @@ -7653,37 +5904,3 @@ __init int register_trigger_hist_enable_disable_cmds(void) return ret; } - -static __init int trace_events_hist_init(void) -{ - struct dentry *entry = NULL; - struct dentry *d_tracer; - int err = 0; - - err = dyn_event_register(&synth_event_ops); - if (err) { - pr_warn("Could not register synth_event_ops\n"); - return err; - } - - d_tracer = tracing_init_dentry(); - if (IS_ERR(d_tracer)) { - err = PTR_ERR(d_tracer); - goto err; - } - - entry = tracefs_create_file("synthetic_events", 0644, d_tracer, - NULL, &synth_events_fops); - if (!entry) { - err = -ENODEV; - goto err; - } - - return err; - err: - pr_warn("Could not create tracefs 'synthetic_events' entry\n"); - - return err; -} - -fs_initcall(trace_events_hist_init); diff --git a/kernel/trace/trace_events_synth.c b/kernel/trace/trace_events_synth.c new file mode 100644 index 0000000000000..c6cca0d1d5840 --- /dev/null +++ b/kernel/trace/trace_events_synth.c @@ -0,0 +1,1789 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * trace_events_synth - synthetic trace events + * + * Copyright (C) 2015, 2020 Tom Zanussi + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +/* for gfp flag names */ +#include +#include + +#include "trace_synth.h" + +static int create_synth_event(int argc, const char **argv); +static int synth_event_show(struct seq_file *m, struct dyn_event *ev); +static int synth_event_release(struct dyn_event *ev); +static bool synth_event_is_busy(struct dyn_event *ev); +static bool synth_event_match(const char *system, const char *event, + int argc, const char **argv, struct dyn_event *ev); + +static struct dyn_event_operations synth_event_ops = { + .create = create_synth_event, + .show = synth_event_show, + .is_busy = synth_event_is_busy, + .free = synth_event_release, + .match = synth_event_match, +}; + +static bool is_synth_event(struct dyn_event *ev) +{ + return ev->ops == &synth_event_ops; +} + +static struct synth_event *to_synth_event(struct dyn_event *ev) +{ + return container_of(ev, struct synth_event, devent); +} + +static bool synth_event_is_busy(struct dyn_event *ev) +{ + struct synth_event *event = to_synth_event(ev); + + return event->ref != 0; +} + +static bool synth_event_match(const char *system, const char *event, + int argc, const char **argv, struct dyn_event *ev) +{ + struct synth_event *sev = to_synth_event(ev); + + return strcmp(sev->name, event) == 0 && + (!system || strcmp(system, SYNTH_SYSTEM) == 0); +} + +struct synth_trace_event { + struct trace_entry ent; + u64 fields[]; +}; + +static int synth_event_define_fields(struct trace_event_call *call) +{ + struct synth_trace_event trace; + int offset = offsetof(typeof(trace), fields); + struct synth_event *event = call->data; + unsigned int i, size, n_u64; + char *name, *type; + bool is_signed; + int ret = 0; + + for (i = 0, n_u64 = 0; i < event->n_fields; i++) { + size = event->fields[i]->size; + is_signed = event->fields[i]->is_signed; + type = event->fields[i]->type; + name = event->fields[i]->name; + ret = trace_define_field(call, type, name, offset, size, + is_signed, FILTER_OTHER); + if (ret) + break; + + event->fields[i]->offset = n_u64; + + if (event->fields[i]->is_string) { + offset += STR_VAR_LEN_MAX; + n_u64 += STR_VAR_LEN_MAX / sizeof(u64); + } else { + offset += sizeof(u64); + n_u64++; + } + } + + event->n_u64 = n_u64; + + return ret; +} + +static bool synth_field_signed(char *type) +{ + if (str_has_prefix(type, "u")) + return false; + if (strcmp(type, "gfp_t") == 0) + return false; + + return true; +} + +static int synth_field_is_string(char *type) +{ + if (strstr(type, "char[") != NULL) + return true; + + return false; +} + +static int synth_field_string_size(char *type) +{ + char buf[4], *end, *start; + unsigned int len; + int size, err; + + start = strstr(type, "char["); + if (start == NULL) + return -EINVAL; + start += sizeof("char[") - 1; + + end = strchr(type, ']'); + if (!end || end < start) + return -EINVAL; + + len = end - start; + if (len > 3) + return -EINVAL; + + strncpy(buf, start, len); + buf[len] = '\0'; + + err = kstrtouint(buf, 0, &size); + if (err) + return err; + + if (size > STR_VAR_LEN_MAX) + return -EINVAL; + + return size; +} + +static int synth_field_size(char *type) +{ + int size = 0; + + if (strcmp(type, "s64") == 0) + size = sizeof(s64); + else if (strcmp(type, "u64") == 0) + size = sizeof(u64); + else if (strcmp(type, "s32") == 0) + size = sizeof(s32); + else if (strcmp(type, "u32") == 0) + size = sizeof(u32); + else if (strcmp(type, "s16") == 0) + size = sizeof(s16); + else if (strcmp(type, "u16") == 0) + size = sizeof(u16); + else if (strcmp(type, "s8") == 0) + size = sizeof(s8); + else if (strcmp(type, "u8") == 0) + size = sizeof(u8); + else if (strcmp(type, "char") == 0) + size = sizeof(char); + else if (strcmp(type, "unsigned char") == 0) + size = sizeof(unsigned char); + else if (strcmp(type, "int") == 0) + size = sizeof(int); + else if (strcmp(type, "unsigned int") == 0) + size = sizeof(unsigned int); + else if (strcmp(type, "long") == 0) + size = sizeof(long); + else if (strcmp(type, "unsigned long") == 0) + size = sizeof(unsigned long); + else if (strcmp(type, "pid_t") == 0) + size = sizeof(pid_t); + else if (strcmp(type, "gfp_t") == 0) + size = sizeof(gfp_t); + else if (synth_field_is_string(type)) + size = synth_field_string_size(type); + + return size; +} + +static const char *synth_field_fmt(char *type) +{ + const char *fmt = "%llu"; + + if (strcmp(type, "s64") == 0) + fmt = "%lld"; + else if (strcmp(type, "u64") == 0) + fmt = "%llu"; + else if (strcmp(type, "s32") == 0) + fmt = "%d"; + else if (strcmp(type, "u32") == 0) + fmt = "%u"; + else if (strcmp(type, "s16") == 0) + fmt = "%d"; + else if (strcmp(type, "u16") == 0) + fmt = "%u"; + else if (strcmp(type, "s8") == 0) + fmt = "%d"; + else if (strcmp(type, "u8") == 0) + fmt = "%u"; + else if (strcmp(type, "char") == 0) + fmt = "%d"; + else if (strcmp(type, "unsigned char") == 0) + fmt = "%u"; + else if (strcmp(type, "int") == 0) + fmt = "%d"; + else if (strcmp(type, "unsigned int") == 0) + fmt = "%u"; + else if (strcmp(type, "long") == 0) + fmt = "%ld"; + else if (strcmp(type, "unsigned long") == 0) + fmt = "%lu"; + else if (strcmp(type, "pid_t") == 0) + fmt = "%d"; + else if (strcmp(type, "gfp_t") == 0) + fmt = "%x"; + else if (synth_field_is_string(type)) + fmt = "%s"; + + return fmt; +} + +static void print_synth_event_num_val(struct trace_seq *s, + char *print_fmt, char *name, + int size, u64 val, char *space) +{ + switch (size) { + case 1: + trace_seq_printf(s, print_fmt, name, (u8)val, space); + break; + + case 2: + trace_seq_printf(s, print_fmt, name, (u16)val, space); + break; + + case 4: + trace_seq_printf(s, print_fmt, name, (u32)val, space); + break; + + default: + trace_seq_printf(s, print_fmt, name, val, space); + break; + } +} + +static enum print_line_t print_synth_event(struct trace_iterator *iter, + int flags, + struct trace_event *event) +{ + struct trace_array *tr = iter->tr; + struct trace_seq *s = &iter->seq; + struct synth_trace_event *entry; + struct synth_event *se; + unsigned int i, n_u64; + char print_fmt[32]; + const char *fmt; + + entry = (struct synth_trace_event *)iter->ent; + se = container_of(event, struct synth_event, call.event); + + trace_seq_printf(s, "%s: ", se->name); + + for (i = 0, n_u64 = 0; i < se->n_fields; i++) { + if (trace_seq_has_overflowed(s)) + goto end; + + fmt = synth_field_fmt(se->fields[i]->type); + + /* parameter types */ + if (tr && tr->trace_flags & TRACE_ITER_VERBOSE) + trace_seq_printf(s, "%s ", fmt); + + snprintf(print_fmt, sizeof(print_fmt), "%%s=%s%%s", fmt); + + /* parameter values */ + if (se->fields[i]->is_string) { + trace_seq_printf(s, print_fmt, se->fields[i]->name, + (char *)&entry->fields[n_u64], + i == se->n_fields - 1 ? "" : " "); + n_u64 += STR_VAR_LEN_MAX / sizeof(u64); + } else { + struct trace_print_flags __flags[] = { + __def_gfpflag_names, {-1, NULL} }; + char *space = (i == se->n_fields - 1 ? "" : " "); + + print_synth_event_num_val(s, print_fmt, + se->fields[i]->name, + se->fields[i]->size, + entry->fields[n_u64], + space); + + if (strcmp(se->fields[i]->type, "gfp_t") == 0) { + trace_seq_puts(s, " ("); + trace_print_flags_seq(s, "|", + entry->fields[n_u64], + __flags); + trace_seq_putc(s, ')'); + } + n_u64++; + } + } +end: + trace_seq_putc(s, '\n'); + + return trace_handle_return(s); +} + +static struct trace_event_functions synth_event_funcs = { + .trace = print_synth_event +}; + +static notrace void trace_event_raw_event_synth(void *__data, + u64 *var_ref_vals, + unsigned int *var_ref_idx) +{ + struct trace_event_file *trace_file = __data; + struct synth_trace_event *entry; + struct trace_event_buffer fbuffer; + struct trace_buffer *buffer; + struct synth_event *event; + unsigned int i, n_u64, val_idx; + int fields_size = 0; + + event = trace_file->event_call->data; + + if (trace_trigger_soft_disabled(trace_file)) + return; + + fields_size = event->n_u64 * sizeof(u64); + + /* + * Avoid ring buffer recursion detection, as this event + * is being performed within another event. + */ + buffer = trace_file->tr->array_buffer.buffer; + ring_buffer_nest_start(buffer); + + entry = trace_event_buffer_reserve(&fbuffer, trace_file, + sizeof(*entry) + fields_size); + if (!entry) + goto out; + + for (i = 0, n_u64 = 0; i < event->n_fields; i++) { + val_idx = var_ref_idx[i]; + if (event->fields[i]->is_string) { + char *str_val = (char *)(long)var_ref_vals[val_idx]; + char *str_field = (char *)&entry->fields[n_u64]; + + strscpy(str_field, str_val, STR_VAR_LEN_MAX); + n_u64 += STR_VAR_LEN_MAX / sizeof(u64); + } else { + struct synth_field *field = event->fields[i]; + u64 val = var_ref_vals[val_idx]; + + switch (field->size) { + case 1: + *(u8 *)&entry->fields[n_u64] = (u8)val; + break; + + case 2: + *(u16 *)&entry->fields[n_u64] = (u16)val; + break; + + case 4: + *(u32 *)&entry->fields[n_u64] = (u32)val; + break; + + default: + entry->fields[n_u64] = val; + break; + } + n_u64++; + } + } + + trace_event_buffer_commit(&fbuffer); +out: + ring_buffer_nest_end(buffer); +} + +static void free_synth_event_print_fmt(struct trace_event_call *call) +{ + if (call) { + kfree(call->print_fmt); + call->print_fmt = NULL; + } +} + +static int __set_synth_event_print_fmt(struct synth_event *event, + char *buf, int len) +{ + const char *fmt; + int pos = 0; + int i; + + /* When len=0, we just calculate the needed length */ +#define LEN_OR_ZERO (len ? len - pos : 0) + + pos += snprintf(buf + pos, LEN_OR_ZERO, "\""); + for (i = 0; i < event->n_fields; i++) { + fmt = synth_field_fmt(event->fields[i]->type); + pos += snprintf(buf + pos, LEN_OR_ZERO, "%s=%s%s", + event->fields[i]->name, fmt, + i == event->n_fields - 1 ? "" : ", "); + } + pos += snprintf(buf + pos, LEN_OR_ZERO, "\""); + + for (i = 0; i < event->n_fields; i++) { + pos += snprintf(buf + pos, LEN_OR_ZERO, + ", REC->%s", event->fields[i]->name); + } + +#undef LEN_OR_ZERO + + /* return the length of print_fmt */ + return pos; +} + +static int set_synth_event_print_fmt(struct trace_event_call *call) +{ + struct synth_event *event = call->data; + char *print_fmt; + int len; + + /* First: called with 0 length to calculate the needed length */ + len = __set_synth_event_print_fmt(event, NULL, 0); + + print_fmt = kmalloc(len + 1, GFP_KERNEL); + if (!print_fmt) + return -ENOMEM; + + /* Second: actually write the @print_fmt */ + __set_synth_event_print_fmt(event, print_fmt, len + 1); + call->print_fmt = print_fmt; + + return 0; +} + +static void free_synth_field(struct synth_field *field) +{ + kfree(field->type); + kfree(field->name); + kfree(field); +} + +static struct synth_field *parse_synth_field(int argc, const char **argv, + int *consumed) +{ + struct synth_field *field; + const char *prefix = NULL, *field_type = argv[0], *field_name, *array; + int len, ret = 0; + + if (field_type[0] == ';') + field_type++; + + if (!strcmp(field_type, "unsigned")) { + if (argc < 3) + return ERR_PTR(-EINVAL); + prefix = "unsigned "; + field_type = argv[1]; + field_name = argv[2]; + *consumed = 3; + } else { + field_name = argv[1]; + *consumed = 2; + } + + field = kzalloc(sizeof(*field), GFP_KERNEL); + if (!field) + return ERR_PTR(-ENOMEM); + + len = strlen(field_name); + array = strchr(field_name, '['); + if (array) + len -= strlen(array); + else if (field_name[len - 1] == ';') + len--; + + field->name = kmemdup_nul(field_name, len, GFP_KERNEL); + if (!field->name) { + ret = -ENOMEM; + goto free; + } + + if (field_type[0] == ';') + field_type++; + len = strlen(field_type) + 1; + if (array) + len += strlen(array); + if (prefix) + len += strlen(prefix); + + field->type = kzalloc(len, GFP_KERNEL); + if (!field->type) { + ret = -ENOMEM; + goto free; + } + if (prefix) + strcat(field->type, prefix); + strcat(field->type, field_type); + if (array) { + strcat(field->type, array); + if (field->type[len - 1] == ';') + field->type[len - 1] = '\0'; + } + + field->size = synth_field_size(field->type); + if (!field->size) { + ret = -EINVAL; + goto free; + } + + if (synth_field_is_string(field->type)) + field->is_string = true; + + field->is_signed = synth_field_signed(field->type); + + out: + return field; + free: + free_synth_field(field); + field = ERR_PTR(ret); + goto out; +} + +static void free_synth_tracepoint(struct tracepoint *tp) +{ + if (!tp) + return; + + kfree(tp->name); + kfree(tp); +} + +static struct tracepoint *alloc_synth_tracepoint(char *name) +{ + struct tracepoint *tp; + + tp = kzalloc(sizeof(*tp), GFP_KERNEL); + if (!tp) + return ERR_PTR(-ENOMEM); + + tp->name = kstrdup(name, GFP_KERNEL); + if (!tp->name) { + kfree(tp); + return ERR_PTR(-ENOMEM); + } + + return tp; +} + +struct synth_event *find_synth_event(const char *name) +{ + struct dyn_event *pos; + struct synth_event *event; + + for_each_dyn_event(pos) { + if (!is_synth_event(pos)) + continue; + event = to_synth_event(pos); + if (strcmp(event->name, name) == 0) + return event; + } + + return NULL; +} + +static struct trace_event_fields synth_event_fields_array[] = { + { .type = TRACE_FUNCTION_TYPE, + .define_fields = synth_event_define_fields }, + {} +}; + +static int register_synth_event(struct synth_event *event) +{ + struct trace_event_call *call = &event->call; + int ret = 0; + + event->call.class = &event->class; + event->class.system = kstrdup(SYNTH_SYSTEM, GFP_KERNEL); + if (!event->class.system) { + ret = -ENOMEM; + goto out; + } + + event->tp = alloc_synth_tracepoint(event->name); + if (IS_ERR(event->tp)) { + ret = PTR_ERR(event->tp); + event->tp = NULL; + goto out; + } + + INIT_LIST_HEAD(&call->class->fields); + call->event.funcs = &synth_event_funcs; + call->class->fields_array = synth_event_fields_array; + + ret = register_trace_event(&call->event); + if (!ret) { + ret = -ENODEV; + goto out; + } + call->flags = TRACE_EVENT_FL_TRACEPOINT; + call->class->reg = trace_event_reg; + call->class->probe = trace_event_raw_event_synth; + call->data = event; + call->tp = event->tp; + + ret = trace_add_event_call(call); + if (ret) { + pr_warn("Failed to register synthetic event: %s\n", + trace_event_name(call)); + goto err; + } + + ret = set_synth_event_print_fmt(call); + if (ret < 0) { + trace_remove_event_call(call); + goto err; + } + out: + return ret; + err: + unregister_trace_event(&call->event); + goto out; +} + +static int unregister_synth_event(struct synth_event *event) +{ + struct trace_event_call *call = &event->call; + int ret; + + ret = trace_remove_event_call(call); + + return ret; +} + +static void free_synth_event(struct synth_event *event) +{ + unsigned int i; + + if (!event) + return; + + for (i = 0; i < event->n_fields; i++) + free_synth_field(event->fields[i]); + + kfree(event->fields); + kfree(event->name); + kfree(event->class.system); + free_synth_tracepoint(event->tp); + free_synth_event_print_fmt(&event->call); + kfree(event); +} + +static struct synth_event *alloc_synth_event(const char *name, int n_fields, + struct synth_field **fields) +{ + struct synth_event *event; + unsigned int i; + + event = kzalloc(sizeof(*event), GFP_KERNEL); + if (!event) { + event = ERR_PTR(-ENOMEM); + goto out; + } + + event->name = kstrdup(name, GFP_KERNEL); + if (!event->name) { + kfree(event); + event = ERR_PTR(-ENOMEM); + goto out; + } + + event->fields = kcalloc(n_fields, sizeof(*event->fields), GFP_KERNEL); + if (!event->fields) { + free_synth_event(event); + event = ERR_PTR(-ENOMEM); + goto out; + } + + dyn_event_init(&event->devent, &synth_event_ops); + + for (i = 0; i < n_fields; i++) + event->fields[i] = fields[i]; + + event->n_fields = n_fields; + out: + return event; +} + +static int synth_event_check_arg_fn(void *data) +{ + struct dynevent_arg_pair *arg_pair = data; + int size; + + size = synth_field_size((char *)arg_pair->lhs); + + return size ? 0 : -EINVAL; +} + +/** + * synth_event_add_field - Add a new field to a synthetic event cmd + * @cmd: A pointer to the dynevent_cmd struct representing the new event + * @type: The type of the new field to add + * @name: The name of the new field to add + * + * Add a new field to a synthetic event cmd object. Field ordering is in + * the same order the fields are added. + * + * See synth_field_size() for available types. If field_name contains + * [n] the field is considered to be an array. + * + * Return: 0 if successful, error otherwise. + */ +int synth_event_add_field(struct dynevent_cmd *cmd, const char *type, + const char *name) +{ + struct dynevent_arg_pair arg_pair; + int ret; + + if (cmd->type != DYNEVENT_TYPE_SYNTH) + return -EINVAL; + + if (!type || !name) + return -EINVAL; + + dynevent_arg_pair_init(&arg_pair, 0, ';'); + + arg_pair.lhs = type; + arg_pair.rhs = name; + + ret = dynevent_arg_pair_add(cmd, &arg_pair, synth_event_check_arg_fn); + if (ret) + return ret; + + if (++cmd->n_fields > SYNTH_FIELDS_MAX) + ret = -EINVAL; + + return ret; +} +EXPORT_SYMBOL_GPL(synth_event_add_field); + +/** + * synth_event_add_field_str - Add a new field to a synthetic event cmd + * @cmd: A pointer to the dynevent_cmd struct representing the new event + * @type_name: The type and name of the new field to add, as a single string + * + * Add a new field to a synthetic event cmd object, as a single + * string. The @type_name string is expected to be of the form 'type + * name', which will be appended by ';'. No sanity checking is done - + * what's passed in is assumed to already be well-formed. Field + * ordering is in the same order the fields are added. + * + * See synth_field_size() for available types. If field_name contains + * [n] the field is considered to be an array. + * + * Return: 0 if successful, error otherwise. + */ +int synth_event_add_field_str(struct dynevent_cmd *cmd, const char *type_name) +{ + struct dynevent_arg arg; + int ret; + + if (cmd->type != DYNEVENT_TYPE_SYNTH) + return -EINVAL; + + if (!type_name) + return -EINVAL; + + dynevent_arg_init(&arg, ';'); + + arg.str = type_name; + + ret = dynevent_arg_add(cmd, &arg, NULL); + if (ret) + return ret; + + if (++cmd->n_fields > SYNTH_FIELDS_MAX) + ret = -EINVAL; + + return ret; +} +EXPORT_SYMBOL_GPL(synth_event_add_field_str); + +/** + * synth_event_add_fields - Add multiple fields to a synthetic event cmd + * @cmd: A pointer to the dynevent_cmd struct representing the new event + * @fields: An array of type/name field descriptions + * @n_fields: The number of field descriptions contained in the fields array + * + * Add a new set of fields to a synthetic event cmd object. The event + * fields that will be defined for the event should be passed in as an + * array of struct synth_field_desc, and the number of elements in the + * array passed in as n_fields. Field ordering will retain the + * ordering given in the fields array. + * + * See synth_field_size() for available types. If field_name contains + * [n] the field is considered to be an array. + * + * Return: 0 if successful, error otherwise. + */ +int synth_event_add_fields(struct dynevent_cmd *cmd, + struct synth_field_desc *fields, + unsigned int n_fields) +{ + unsigned int i; + int ret = 0; + + for (i = 0; i < n_fields; i++) { + if (fields[i].type == NULL || fields[i].name == NULL) { + ret = -EINVAL; + break; + } + + ret = synth_event_add_field(cmd, fields[i].type, fields[i].name); + if (ret) + break; + } + + return ret; +} +EXPORT_SYMBOL_GPL(synth_event_add_fields); + +/** + * __synth_event_gen_cmd_start - Start a synthetic event command from arg list + * @cmd: A pointer to the dynevent_cmd struct representing the new event + * @name: The name of the synthetic event + * @mod: The module creating the event, NULL if not created from a module + * @args: Variable number of arg (pairs), one pair for each field + * + * NOTE: Users normally won't want to call this function directly, but + * rather use the synth_event_gen_cmd_start() wrapper, which + * automatically adds a NULL to the end of the arg list. If this + * function is used directly, make sure the last arg in the variable + * arg list is NULL. + * + * Generate a synthetic event command to be executed by + * synth_event_gen_cmd_end(). This function can be used to generate + * the complete command or only the first part of it; in the latter + * case, synth_event_add_field(), synth_event_add_field_str(), or + * synth_event_add_fields() can be used to add more fields following + * this. + * + * There should be an even number variable args, each pair consisting + * of a type followed by a field name. + * + * See synth_field_size() for available types. If field_name contains + * [n] the field is considered to be an array. + * + * Return: 0 if successful, error otherwise. + */ +int __synth_event_gen_cmd_start(struct dynevent_cmd *cmd, const char *name, + struct module *mod, ...) +{ + struct dynevent_arg arg; + va_list args; + int ret; + + cmd->event_name = name; + cmd->private_data = mod; + + if (cmd->type != DYNEVENT_TYPE_SYNTH) + return -EINVAL; + + dynevent_arg_init(&arg, 0); + arg.str = name; + ret = dynevent_arg_add(cmd, &arg, NULL); + if (ret) + return ret; + + va_start(args, mod); + for (;;) { + const char *type, *name; + + type = va_arg(args, const char *); + if (!type) + break; + name = va_arg(args, const char *); + if (!name) + break; + + if (++cmd->n_fields > SYNTH_FIELDS_MAX) { + ret = -EINVAL; + break; + } + + ret = synth_event_add_field(cmd, type, name); + if (ret) + break; + } + va_end(args); + + return ret; +} +EXPORT_SYMBOL_GPL(__synth_event_gen_cmd_start); + +/** + * synth_event_gen_cmd_array_start - Start synthetic event command from an array + * @cmd: A pointer to the dynevent_cmd struct representing the new event + * @name: The name of the synthetic event + * @fields: An array of type/name field descriptions + * @n_fields: The number of field descriptions contained in the fields array + * + * Generate a synthetic event command to be executed by + * synth_event_gen_cmd_end(). This function can be used to generate + * the complete command or only the first part of it; in the latter + * case, synth_event_add_field(), synth_event_add_field_str(), or + * synth_event_add_fields() can be used to add more fields following + * this. + * + * The event fields that will be defined for the event should be + * passed in as an array of struct synth_field_desc, and the number of + * elements in the array passed in as n_fields. Field ordering will + * retain the ordering given in the fields array. + * + * See synth_field_size() for available types. If field_name contains + * [n] the field is considered to be an array. + * + * Return: 0 if successful, error otherwise. + */ +int synth_event_gen_cmd_array_start(struct dynevent_cmd *cmd, const char *name, + struct module *mod, + struct synth_field_desc *fields, + unsigned int n_fields) +{ + struct dynevent_arg arg; + unsigned int i; + int ret = 0; + + cmd->event_name = name; + cmd->private_data = mod; + + if (cmd->type != DYNEVENT_TYPE_SYNTH) + return -EINVAL; + + if (n_fields > SYNTH_FIELDS_MAX) + return -EINVAL; + + dynevent_arg_init(&arg, 0); + arg.str = name; + ret = dynevent_arg_add(cmd, &arg, NULL); + if (ret) + return ret; + + for (i = 0; i < n_fields; i++) { + if (fields[i].type == NULL || fields[i].name == NULL) + return -EINVAL; + + ret = synth_event_add_field(cmd, fields[i].type, fields[i].name); + if (ret) + break; + } + + return ret; +} +EXPORT_SYMBOL_GPL(synth_event_gen_cmd_array_start); + +static int __create_synth_event(int argc, const char *name, const char **argv) +{ + struct synth_field *field, *fields[SYNTH_FIELDS_MAX]; + struct synth_event *event = NULL; + int i, consumed = 0, n_fields = 0, ret = 0; + + /* + * Argument syntax: + * - Add synthetic event: field[;field] ... + * - Remove synthetic event: ! field[;field] ... + * where 'field' = type field_name + */ + + if (name[0] == '\0' || argc < 1) + return -EINVAL; + + mutex_lock(&event_mutex); + + event = find_synth_event(name); + if (event) { + ret = -EEXIST; + goto out; + } + + for (i = 0; i < argc - 1; i++) { + if (strcmp(argv[i], ";") == 0) + continue; + if (n_fields == SYNTH_FIELDS_MAX) { + ret = -EINVAL; + goto err; + } + + field = parse_synth_field(argc - i, &argv[i], &consumed); + if (IS_ERR(field)) { + ret = PTR_ERR(field); + goto err; + } + fields[n_fields++] = field; + i += consumed - 1; + } + + if (i < argc && strcmp(argv[i], ";") != 0) { + ret = -EINVAL; + goto err; + } + + event = alloc_synth_event(name, n_fields, fields); + if (IS_ERR(event)) { + ret = PTR_ERR(event); + event = NULL; + goto err; + } + ret = register_synth_event(event); + if (!ret) + dyn_event_add(&event->devent); + else + free_synth_event(event); + out: + mutex_unlock(&event_mutex); + + return ret; + err: + for (i = 0; i < n_fields; i++) + free_synth_field(fields[i]); + + goto out; +} + +/** + * synth_event_create - Create a new synthetic event + * @name: The name of the new sythetic event + * @fields: An array of type/name field descriptions + * @n_fields: The number of field descriptions contained in the fields array + * @mod: The module creating the event, NULL if not created from a module + * + * Create a new synthetic event with the given name under the + * trace/events/synthetic/ directory. The event fields that will be + * defined for the event should be passed in as an array of struct + * synth_field_desc, and the number elements in the array passed in as + * n_fields. Field ordering will retain the ordering given in the + * fields array. + * + * If the new synthetic event is being created from a module, the mod + * param must be non-NULL. This will ensure that the trace buffer + * won't contain unreadable events. + * + * The new synth event should be deleted using synth_event_delete() + * function. The new synthetic event can be generated from modules or + * other kernel code using trace_synth_event() and related functions. + * + * Return: 0 if successful, error otherwise. + */ +int synth_event_create(const char *name, struct synth_field_desc *fields, + unsigned int n_fields, struct module *mod) +{ + struct dynevent_cmd cmd; + char *buf; + int ret; + + buf = kzalloc(MAX_DYNEVENT_CMD_LEN, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + synth_event_cmd_init(&cmd, buf, MAX_DYNEVENT_CMD_LEN); + + ret = synth_event_gen_cmd_array_start(&cmd, name, mod, + fields, n_fields); + if (ret) + goto out; + + ret = synth_event_gen_cmd_end(&cmd); + out: + kfree(buf); + + return ret; +} +EXPORT_SYMBOL_GPL(synth_event_create); + +static int destroy_synth_event(struct synth_event *se) +{ + int ret; + + if (se->ref) + ret = -EBUSY; + else { + ret = unregister_synth_event(se); + if (!ret) { + dyn_event_remove(&se->devent); + free_synth_event(se); + } + } + + return ret; +} + +/** + * synth_event_delete - Delete a synthetic event + * @event_name: The name of the new sythetic event + * + * Delete a synthetic event that was created with synth_event_create(). + * + * Return: 0 if successful, error otherwise. + */ +int synth_event_delete(const char *event_name) +{ + struct synth_event *se = NULL; + struct module *mod = NULL; + int ret = -ENOENT; + + mutex_lock(&event_mutex); + se = find_synth_event(event_name); + if (se) { + mod = se->mod; + ret = destroy_synth_event(se); + } + mutex_unlock(&event_mutex); + + if (mod) { + mutex_lock(&trace_types_lock); + /* + * It is safest to reset the ring buffer if the module + * being unloaded registered any events that were + * used. The only worry is if a new module gets + * loaded, and takes on the same id as the events of + * this module. When printing out the buffer, traced + * events left over from this module may be passed to + * the new module events and unexpected results may + * occur. + */ + tracing_reset_all_online_cpus(); + mutex_unlock(&trace_types_lock); + } + + return ret; +} +EXPORT_SYMBOL_GPL(synth_event_delete); + +static int create_or_delete_synth_event(int argc, char **argv) +{ + const char *name = argv[0]; + int ret; + + /* trace_run_command() ensures argc != 0 */ + if (name[0] == '!') { + ret = synth_event_delete(name + 1); + return ret; + } + + ret = __create_synth_event(argc - 1, name, (const char **)argv + 1); + return ret == -ECANCELED ? -EINVAL : ret; +} + +static int synth_event_run_command(struct dynevent_cmd *cmd) +{ + struct synth_event *se; + int ret; + + ret = trace_run_command(cmd->seq.buffer, create_or_delete_synth_event); + if (ret) + return ret; + + se = find_synth_event(cmd->event_name); + if (WARN_ON(!se)) + return -ENOENT; + + se->mod = cmd->private_data; + + return ret; +} + +/** + * synth_event_cmd_init - Initialize a synthetic event command object + * @cmd: A pointer to the dynevent_cmd struct representing the new event + * @buf: A pointer to the buffer used to build the command + * @maxlen: The length of the buffer passed in @buf + * + * Initialize a synthetic event command object. Use this before + * calling any of the other dyenvent_cmd functions. + */ +void synth_event_cmd_init(struct dynevent_cmd *cmd, char *buf, int maxlen) +{ + dynevent_cmd_init(cmd, buf, maxlen, DYNEVENT_TYPE_SYNTH, + synth_event_run_command); +} +EXPORT_SYMBOL_GPL(synth_event_cmd_init); + +static inline int +__synth_event_trace_start(struct trace_event_file *file, + struct synth_event_trace_state *trace_state) +{ + int entry_size, fields_size = 0; + int ret = 0; + + memset(trace_state, '\0', sizeof(*trace_state)); + + /* + * Normal event tracing doesn't get called at all unless the + * ENABLED bit is set (which attaches the probe thus allowing + * this code to be called, etc). Because this is called + * directly by the user, we don't have that but we still need + * to honor not logging when disabled. For the the iterated + * trace case, we save the enabed state upon start and just + * ignore the following data calls. + */ + if (!(file->flags & EVENT_FILE_FL_ENABLED) || + trace_trigger_soft_disabled(file)) { + trace_state->disabled = true; + ret = -ENOENT; + goto out; + } + + trace_state->event = file->event_call->data; + + fields_size = trace_state->event->n_u64 * sizeof(u64); + + /* + * Avoid ring buffer recursion detection, as this event + * is being performed within another event. + */ + trace_state->buffer = file->tr->array_buffer.buffer; + ring_buffer_nest_start(trace_state->buffer); + + entry_size = sizeof(*trace_state->entry) + fields_size; + trace_state->entry = trace_event_buffer_reserve(&trace_state->fbuffer, + file, + entry_size); + if (!trace_state->entry) { + ring_buffer_nest_end(trace_state->buffer); + ret = -EINVAL; + } +out: + return ret; +} + +static inline void +__synth_event_trace_end(struct synth_event_trace_state *trace_state) +{ + trace_event_buffer_commit(&trace_state->fbuffer); + + ring_buffer_nest_end(trace_state->buffer); +} + +/** + * synth_event_trace - Trace a synthetic event + * @file: The trace_event_file representing the synthetic event + * @n_vals: The number of values in vals + * @args: Variable number of args containing the event values + * + * Trace a synthetic event using the values passed in the variable + * argument list. + * + * The argument list should be a list 'n_vals' u64 values. The number + * of vals must match the number of field in the synthetic event, and + * must be in the same order as the synthetic event fields. + * + * All vals should be cast to u64, and string vals are just pointers + * to strings, cast to u64. Strings will be copied into space + * reserved in the event for the string, using these pointers. + * + * Return: 0 on success, err otherwise. + */ +int synth_event_trace(struct trace_event_file *file, unsigned int n_vals, ...) +{ + struct synth_event_trace_state state; + unsigned int i, n_u64; + va_list args; + int ret; + + ret = __synth_event_trace_start(file, &state); + if (ret) { + if (ret == -ENOENT) + ret = 0; /* just disabled, not really an error */ + return ret; + } + + if (n_vals != state.event->n_fields) { + ret = -EINVAL; + goto out; + } + + va_start(args, n_vals); + for (i = 0, n_u64 = 0; i < state.event->n_fields; i++) { + u64 val; + + val = va_arg(args, u64); + + if (state.event->fields[i]->is_string) { + char *str_val = (char *)(long)val; + char *str_field = (char *)&state.entry->fields[n_u64]; + + strscpy(str_field, str_val, STR_VAR_LEN_MAX); + n_u64 += STR_VAR_LEN_MAX / sizeof(u64); + } else { + struct synth_field *field = state.event->fields[i]; + + switch (field->size) { + case 1: + *(u8 *)&state.entry->fields[n_u64] = (u8)val; + break; + + case 2: + *(u16 *)&state.entry->fields[n_u64] = (u16)val; + break; + + case 4: + *(u32 *)&state.entry->fields[n_u64] = (u32)val; + break; + + default: + state.entry->fields[n_u64] = val; + break; + } + n_u64++; + } + } + va_end(args); +out: + __synth_event_trace_end(&state); + + return ret; +} +EXPORT_SYMBOL_GPL(synth_event_trace); + +/** + * synth_event_trace_array - Trace a synthetic event from an array + * @file: The trace_event_file representing the synthetic event + * @vals: Array of values + * @n_vals: The number of values in vals + * + * Trace a synthetic event using the values passed in as 'vals'. + * + * The 'vals' array is just an array of 'n_vals' u64. The number of + * vals must match the number of field in the synthetic event, and + * must be in the same order as the synthetic event fields. + * + * All vals should be cast to u64, and string vals are just pointers + * to strings, cast to u64. Strings will be copied into space + * reserved in the event for the string, using these pointers. + * + * Return: 0 on success, err otherwise. + */ +int synth_event_trace_array(struct trace_event_file *file, u64 *vals, + unsigned int n_vals) +{ + struct synth_event_trace_state state; + unsigned int i, n_u64; + int ret; + + ret = __synth_event_trace_start(file, &state); + if (ret) { + if (ret == -ENOENT) + ret = 0; /* just disabled, not really an error */ + return ret; + } + + if (n_vals != state.event->n_fields) { + ret = -EINVAL; + goto out; + } + + for (i = 0, n_u64 = 0; i < state.event->n_fields; i++) { + if (state.event->fields[i]->is_string) { + char *str_val = (char *)(long)vals[i]; + char *str_field = (char *)&state.entry->fields[n_u64]; + + strscpy(str_field, str_val, STR_VAR_LEN_MAX); + n_u64 += STR_VAR_LEN_MAX / sizeof(u64); + } else { + struct synth_field *field = state.event->fields[i]; + u64 val = vals[i]; + + switch (field->size) { + case 1: + *(u8 *)&state.entry->fields[n_u64] = (u8)val; + break; + + case 2: + *(u16 *)&state.entry->fields[n_u64] = (u16)val; + break; + + case 4: + *(u32 *)&state.entry->fields[n_u64] = (u32)val; + break; + + default: + state.entry->fields[n_u64] = val; + break; + } + n_u64++; + } + } +out: + __synth_event_trace_end(&state); + + return ret; +} +EXPORT_SYMBOL_GPL(synth_event_trace_array); + +/** + * synth_event_trace_start - Start piecewise synthetic event trace + * @file: The trace_event_file representing the synthetic event + * @trace_state: A pointer to object tracking the piecewise trace state + * + * Start the trace of a synthetic event field-by-field rather than all + * at once. + * + * This function 'opens' an event trace, which means space is reserved + * for the event in the trace buffer, after which the event's + * individual field values can be set through either + * synth_event_add_next_val() or synth_event_add_val(). + * + * A pointer to a trace_state object is passed in, which will keep + * track of the current event trace state until the event trace is + * closed (and the event finally traced) using + * synth_event_trace_end(). + * + * Note that synth_event_trace_end() must be called after all values + * have been added for each event trace, regardless of whether adding + * all field values succeeded or not. + * + * Note also that for a given event trace, all fields must be added + * using either synth_event_add_next_val() or synth_event_add_val() + * but not both together or interleaved. + * + * Return: 0 on success, err otherwise. + */ +int synth_event_trace_start(struct trace_event_file *file, + struct synth_event_trace_state *trace_state) +{ + int ret; + + if (!trace_state) + return -EINVAL; + + ret = __synth_event_trace_start(file, trace_state); + if (ret == -ENOENT) + ret = 0; /* just disabled, not really an error */ + + return ret; +} +EXPORT_SYMBOL_GPL(synth_event_trace_start); + +static int __synth_event_add_val(const char *field_name, u64 val, + struct synth_event_trace_state *trace_state) +{ + struct synth_field *field = NULL; + struct synth_trace_event *entry; + struct synth_event *event; + int i, ret = 0; + + if (!trace_state) { + ret = -EINVAL; + goto out; + } + + /* can't mix add_next_synth_val() with add_synth_val() */ + if (field_name) { + if (trace_state->add_next) { + ret = -EINVAL; + goto out; + } + trace_state->add_name = true; + } else { + if (trace_state->add_name) { + ret = -EINVAL; + goto out; + } + trace_state->add_next = true; + } + + if (trace_state->disabled) + goto out; + + event = trace_state->event; + if (trace_state->add_name) { + for (i = 0; i < event->n_fields; i++) { + field = event->fields[i]; + if (strcmp(field->name, field_name) == 0) + break; + } + if (!field) { + ret = -EINVAL; + goto out; + } + } else { + if (trace_state->cur_field >= event->n_fields) { + ret = -EINVAL; + goto out; + } + field = event->fields[trace_state->cur_field++]; + } + + entry = trace_state->entry; + if (field->is_string) { + char *str_val = (char *)(long)val; + char *str_field; + + if (!str_val) { + ret = -EINVAL; + goto out; + } + + str_field = (char *)&entry->fields[field->offset]; + strscpy(str_field, str_val, STR_VAR_LEN_MAX); + } else { + switch (field->size) { + case 1: + *(u8 *)&trace_state->entry->fields[field->offset] = (u8)val; + break; + + case 2: + *(u16 *)&trace_state->entry->fields[field->offset] = (u16)val; + break; + + case 4: + *(u32 *)&trace_state->entry->fields[field->offset] = (u32)val; + break; + + default: + trace_state->entry->fields[field->offset] = val; + break; + } + } + out: + return ret; +} + +/** + * synth_event_add_next_val - Add the next field's value to an open synth trace + * @val: The value to set the next field to + * @trace_state: A pointer to object tracking the piecewise trace state + * + * Set the value of the next field in an event that's been opened by + * synth_event_trace_start(). + * + * The val param should be the value cast to u64. If the value points + * to a string, the val param should be a char * cast to u64. + * + * This function assumes all the fields in an event are to be set one + * after another - successive calls to this function are made, one for + * each field, in the order of the fields in the event, until all + * fields have been set. If you'd rather set each field individually + * without regard to ordering, synth_event_add_val() can be used + * instead. + * + * Note however that synth_event_add_next_val() and + * synth_event_add_val() can't be intermixed for a given event trace - + * one or the other but not both can be used at the same time. + * + * Note also that synth_event_trace_end() must be called after all + * values have been added for each event trace, regardless of whether + * adding all field values succeeded or not. + * + * Return: 0 on success, err otherwise. + */ +int synth_event_add_next_val(u64 val, + struct synth_event_trace_state *trace_state) +{ + return __synth_event_add_val(NULL, val, trace_state); +} +EXPORT_SYMBOL_GPL(synth_event_add_next_val); + +/** + * synth_event_add_val - Add a named field's value to an open synth trace + * @field_name: The name of the synthetic event field value to set + * @val: The value to set the next field to + * @trace_state: A pointer to object tracking the piecewise trace state + * + * Set the value of the named field in an event that's been opened by + * synth_event_trace_start(). + * + * The val param should be the value cast to u64. If the value points + * to a string, the val param should be a char * cast to u64. + * + * This function looks up the field name, and if found, sets the field + * to the specified value. This lookup makes this function more + * expensive than synth_event_add_next_val(), so use that or the + * none-piecewise synth_event_trace() instead if efficiency is more + * important. + * + * Note however that synth_event_add_next_val() and + * synth_event_add_val() can't be intermixed for a given event trace - + * one or the other but not both can be used at the same time. + * + * Note also that synth_event_trace_end() must be called after all + * values have been added for each event trace, regardless of whether + * adding all field values succeeded or not. + * + * Return: 0 on success, err otherwise. + */ +int synth_event_add_val(const char *field_name, u64 val, + struct synth_event_trace_state *trace_state) +{ + return __synth_event_add_val(field_name, val, trace_state); +} +EXPORT_SYMBOL_GPL(synth_event_add_val); + +/** + * synth_event_trace_end - End piecewise synthetic event trace + * @trace_state: A pointer to object tracking the piecewise trace state + * + * End the trace of a synthetic event opened by + * synth_event_trace__start(). + * + * This function 'closes' an event trace, which basically means that + * it commits the reserved event and cleans up other loose ends. + * + * A pointer to a trace_state object is passed in, which will keep + * track of the current event trace state opened with + * synth_event_trace_start(). + * + * Note that this function must be called after all values have been + * added for each event trace, regardless of whether adding all field + * values succeeded or not. + * + * Return: 0 on success, err otherwise. + */ +int synth_event_trace_end(struct synth_event_trace_state *trace_state) +{ + if (!trace_state) + return -EINVAL; + + __synth_event_trace_end(trace_state); + + return 0; +} +EXPORT_SYMBOL_GPL(synth_event_trace_end); + +static int create_synth_event(int argc, const char **argv) +{ + const char *name = argv[0]; + int len; + + if (name[0] != 's' || name[1] != ':') + return -ECANCELED; + name += 2; + + /* This interface accepts group name prefix */ + if (strchr(name, '/')) { + len = str_has_prefix(name, SYNTH_SYSTEM "/"); + if (len == 0) + return -EINVAL; + name += len; + } + return __create_synth_event(argc - 1, name, argv + 1); +} + +static int synth_event_release(struct dyn_event *ev) +{ + struct synth_event *event = to_synth_event(ev); + int ret; + + if (event->ref) + return -EBUSY; + + ret = unregister_synth_event(event); + if (ret) + return ret; + + dyn_event_remove(ev); + free_synth_event(event); + return 0; +} + +static int __synth_event_show(struct seq_file *m, struct synth_event *event) +{ + struct synth_field *field; + unsigned int i; + + seq_printf(m, "%s\t", event->name); + + for (i = 0; i < event->n_fields; i++) { + field = event->fields[i]; + + /* parameter values */ + seq_printf(m, "%s %s%s", field->type, field->name, + i == event->n_fields - 1 ? "" : "; "); + } + + seq_putc(m, '\n'); + + return 0; +} + +static int synth_event_show(struct seq_file *m, struct dyn_event *ev) +{ + struct synth_event *event = to_synth_event(ev); + + seq_printf(m, "s:%s/", event->class.system); + + return __synth_event_show(m, event); +} + +static int synth_events_seq_show(struct seq_file *m, void *v) +{ + struct dyn_event *ev = v; + + if (!is_synth_event(ev)) + return 0; + + return __synth_event_show(m, to_synth_event(ev)); +} + +static const struct seq_operations synth_events_seq_op = { + .start = dyn_event_seq_start, + .next = dyn_event_seq_next, + .stop = dyn_event_seq_stop, + .show = synth_events_seq_show, +}; + +static int synth_events_open(struct inode *inode, struct file *file) +{ + int ret; + + ret = security_locked_down(LOCKDOWN_TRACEFS); + if (ret) + return ret; + + if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) { + ret = dyn_events_release_all(&synth_event_ops); + if (ret < 0) + return ret; + } + + return seq_open(file, &synth_events_seq_op); +} + +static ssize_t synth_events_write(struct file *file, + const char __user *buffer, + size_t count, loff_t *ppos) +{ + return trace_parse_run_command(file, buffer, count, ppos, + create_or_delete_synth_event); +} + +static const struct file_operations synth_events_fops = { + .open = synth_events_open, + .write = synth_events_write, + .read = seq_read, + .llseek = seq_lseek, + .release = seq_release, +}; + +static __init int trace_events_synth_init(void) +{ + struct dentry *entry = NULL; + struct dentry *d_tracer; + int err = 0; + + err = dyn_event_register(&synth_event_ops); + if (err) { + pr_warn("Could not register synth_event_ops\n"); + return err; + } + + d_tracer = tracing_init_dentry(); + if (IS_ERR(d_tracer)) { + err = PTR_ERR(d_tracer); + goto err; + } + + entry = tracefs_create_file("synthetic_events", 0644, d_tracer, + NULL, &synth_events_fops); + if (!entry) { + err = -ENODEV; + goto err; + } + + return err; + err: + pr_warn("Could not create tracefs 'synthetic_events' entry\n"); + + return err; +} + +fs_initcall(trace_events_synth_init); diff --git a/kernel/trace/trace_synth.h b/kernel/trace/trace_synth.h new file mode 100644 index 0000000000000..ac35c45207c4f --- /dev/null +++ b/kernel/trace/trace_synth.h @@ -0,0 +1,36 @@ +// SPDX-License-Identifier: GPL-2.0 +#ifndef __TRACE_SYNTH_H +#define __TRACE_SYNTH_H + +#include "trace_dynevent.h" + +#define SYNTH_SYSTEM "synthetic" +#define SYNTH_FIELDS_MAX 32 + +#define STR_VAR_LEN_MAX 32 /* must be multiple of sizeof(u64) */ + +struct synth_field { + char *type; + char *name; + size_t size; + unsigned int offset; + bool is_signed; + bool is_string; +}; + +struct synth_event { + struct dyn_event devent; + int ref; + char *name; + struct synth_field **fields; + unsigned int n_fields; + unsigned int n_u64; + struct trace_event_class class; + struct trace_event_call call; + struct tracepoint *tp; + struct module *mod; +}; + +extern struct synth_event *find_synth_event(const char *name); + +#endif /* __TRACE_SYNTH_H */ -- GitLab From bea24f766efceb5322f704015c56596ff94642f7 Mon Sep 17 00:00:00 2001 From: Tom Zanussi Date: Thu, 28 May 2020 14:32:38 -0500 Subject: [PATCH 0989/1971] selftests/ftrace: Distinguish between hist and synthetic event checks With synthetic events now a separate config item as a result of 'tracing: Move synthetic events to a separate file', tests that use both need to explicitly check for hist trigger support rather than relying on hist triggers to pull in synthetic events. Add an additional hist trigger check to all the trigger tests that now require it, otherwise they'll fail if synthetic events but not hist triggers are enabled. Link: http://lkml.kernel.org/r/af36c539006ef2768114b4ed38e6b054f7c7a3bd.1590693308.git.zanussi@kernel.org Acked-by: Masami Hiramatsu Signed-off-by: Tom Zanussi Signed-off-by: Steven Rostedt (VMware) --- .../trigger/inter-event/trigger-field-variable-support.tc | 5 +++++ .../trigger/inter-event/trigger-inter-event-combined-hist.tc | 5 +++++ .../trigger/inter-event/trigger-multi-actions-accept.tc | 5 +++++ .../trigger/inter-event/trigger-onmatch-action-hist.tc | 5 +++++ .../trigger/inter-event/trigger-onmatch-onmax-action-hist.tc | 5 +++++ .../test.d/trigger/inter-event/trigger-onmax-action-hist.tc | 5 +++++ .../trigger/inter-event/trigger-snapshot-action-hist.tc | 5 +++++ .../test.d/trigger/inter-event/trigger-trace-action-hist.tc | 5 +++++ 8 files changed, 40 insertions(+) diff --git a/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-field-variable-support.tc b/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-field-variable-support.tc index 77be6e1f6e7b3..e232059a8ab20 100644 --- a/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-field-variable-support.tc +++ b/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-field-variable-support.tc @@ -17,6 +17,11 @@ if [ ! -f synthetic_events ]; then exit_unsupported fi +if [ ! -f events/sched/sched_process_fork/hist ]; then + echo "hist trigger is not supported" + exit_unsupported +fi + echo "Test field variable support" echo 'wakeup_latency u64 lat; pid_t pid; int prio; char comm[16]' > synthetic_events diff --git a/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-inter-event-combined-hist.tc b/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-inter-event-combined-hist.tc index f3eb8aacec0e7..07cfcb8157b68 100644 --- a/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-inter-event-combined-hist.tc +++ b/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-inter-event-combined-hist.tc @@ -17,6 +17,11 @@ if [ ! -f synthetic_events ]; then exit_unsupported fi +if [ ! -f events/sched/sched_process_fork/hist ]; then + echo "hist trigger is not supported" + exit_unsupported +fi + echo "Test create synthetic event" echo 'waking_latency u64 lat pid_t pid' > synthetic_events diff --git a/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-multi-actions-accept.tc b/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-multi-actions-accept.tc index d281f056f9809..73e413c2ca268 100644 --- a/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-multi-actions-accept.tc +++ b/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-multi-actions-accept.tc @@ -17,6 +17,11 @@ if [ ! -f synthetic_events ]; then exit_unsupported fi +if [ ! -f events/sched/sched_process_fork/hist ]; then + echo "hist trigger is not supported" + exit_unsupported +fi + echo "Test multiple actions on hist trigger" echo 'wakeup_latency u64 lat; pid_t pid' >> synthetic_events TRIGGER1=events/sched/sched_wakeup/trigger diff --git a/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-onmatch-action-hist.tc b/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-onmatch-action-hist.tc index a708f0e7858a0..ebe0ad827f9f2 100644 --- a/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-onmatch-action-hist.tc +++ b/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-onmatch-action-hist.tc @@ -17,6 +17,11 @@ if [ ! -f synthetic_events ]; then exit_unsupported fi +if [ ! -f events/sched/sched_process_fork/hist ]; then + echo "hist trigger is not supported" + exit_unsupported +fi + echo "Test create synthetic event" echo 'wakeup_latency u64 lat pid_t pid char comm[16]' > synthetic_events diff --git a/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-onmatch-onmax-action-hist.tc b/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-onmatch-onmax-action-hist.tc index dfce6932d8be4..2a2ef767249ed 100644 --- a/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-onmatch-onmax-action-hist.tc +++ b/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-onmatch-onmax-action-hist.tc @@ -17,6 +17,11 @@ if [ ! -f synthetic_events ]; then exit_unsupported fi +if [ ! -f events/sched/sched_process_fork/hist ]; then + echo "hist trigger is not supported" + exit_unsupported +fi + echo "Test create synthetic event" echo 'wakeup_latency u64 lat pid_t pid char comm[16]' > synthetic_events diff --git a/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-onmax-action-hist.tc b/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-onmax-action-hist.tc index 0035995c21949..98d73bfb0296a 100644 --- a/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-onmax-action-hist.tc +++ b/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-onmax-action-hist.tc @@ -17,6 +17,11 @@ if [ ! -f synthetic_events ]; then exit_unsupported fi +if [ ! -f events/sched/sched_process_fork/hist ]; then + echo "hist trigger is not supported" + exit_unsupported +fi + echo "Test create synthetic event" echo 'wakeup_latency u64 lat pid_t pid char comm[16]' > synthetic_events diff --git a/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-snapshot-action-hist.tc b/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-snapshot-action-hist.tc index f546c1b66a9b1..01b01b9c4e07b 100644 --- a/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-snapshot-action-hist.tc +++ b/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-snapshot-action-hist.tc @@ -12,6 +12,11 @@ if [ ! -f set_event ]; then exit_unsupported fi +if [ ! -f events/sched/sched_process_fork/hist ]; then + echo "hist trigger is not supported" + exit_unsupported +fi + if [ ! -f snapshot ]; then echo "snapshot is not supported" exit_unsupported diff --git a/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-trace-action-hist.tc b/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-trace-action-hist.tc index 8021d60aafec5..c3baa486aeb47 100644 --- a/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-trace-action-hist.tc +++ b/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-trace-action-hist.tc @@ -17,6 +17,11 @@ if [ ! -f synthetic_events ]; then exit_unsupported fi +if [ ! -f events/sched/sched_process_fork/hist ]; then + echo "hist trigger is not supported" + exit_unsupported +fi + grep -q "trace(" README || exit_unsupported # version issue echo "Test create synthetic event" -- GitLab From 58f6e384480ec97b902e44399a44862907840ba9 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Thu, 28 May 2020 16:52:40 +0200 Subject: [PATCH 0990/1971] ftrace,bug: Improve traceoff_on_warn While doing some tracing, I found a huge portion of the per-cpu buffer was taken by printk/serial output because we're disabling the trace far too late (after printing the CUT string). Improve matters for architectures that have GENERIC_BUG + _BUG_FLAGS by killing the tracer in the exception handler before printing anything much. Link: https://lkml.kernel.org/r/20200528145240.GF706495@hirez.programming.kicks-ass.net Signed-off-by: Peter Zijlstra (Intel) Signed-off-by: Steven Rostedt (VMware) --- lib/bug.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/lib/bug.c b/lib/bug.c index 8c98af0bf5857..7103440c0ee1a 100644 --- a/lib/bug.c +++ b/lib/bug.c @@ -47,6 +47,7 @@ #include #include #include +#include extern struct bug_entry __start___bug_table[], __stop___bug_table[]; @@ -153,6 +154,8 @@ enum bug_trap_type report_bug(unsigned long bugaddr, struct pt_regs *regs) if (!bug) return BUG_TRAP_TYPE_NONE; + disable_trace_on_warning(); + file = NULL; line = 0; warning = 0; -- GitLab From c200784a08d4ea82f82a30678955b7f2c7550af4 Mon Sep 17 00:00:00 2001 From: "Steven Rostedt (VMware)" Date: Fri, 29 May 2020 10:46:32 -0400 Subject: [PATCH 0991/1971] tracing: Add a trace print when traceoff_on_warning is triggered When "traceoff_on_warning" is enabled and a warning happens, there can still be many trace events happening on other CPUs between the time the warning occurred and the last trace event on that same CPU. This can cause confusion in examining the trace, as it may not be obvious where the warning happened. By adding a trace print into the trace just before disabling tracing, it makes it obvious where the warning occurred, and the developer doesn't have to look at other means to see what CPU it occurred on. Cc: Peter Zijlstra Signed-off-by: Steven Rostedt (VMware) --- kernel/trace/trace.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 29615f15a820b..760fd102dbe24 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@ -1299,8 +1299,11 @@ EXPORT_SYMBOL_GPL(tracing_off); void disable_trace_on_warning(void) { - if (__disable_trace_on_warning) + if (__disable_trace_on_warning) { + trace_array_printk_buf(global_trace.array_buffer.buffer, _THIS_IP_, + "Disabling tracing due to warning\n"); tracing_off(); + } } /** -- GitLab From 01397e822af42f8485e876ba9d1309b63646d886 Mon Sep 17 00:00:00 2001 From: Vitor Massaru Iha Date: Fri, 29 May 2020 16:28:45 -0300 Subject: [PATCH 0992/1971] kunit: Fix TabError, remove defconfig code and handle when there is no kunitconfig The identation before this code (`if not os.path.exists(cli_args.build_dir):``) was with spaces instead of tabs after fixed up merge conflits, this commit revert spaces to tabs: [iha@bbking linux]$ tools/testing/kunit/kunit.py run File "tools/testing/kunit/kunit.py", line 247 if not linux: ^ TabError: inconsistent use of tabs and spaces in indentation [iha@bbking linux]$ tools/testing/kunit/kunit.py run Traceback (most recent call last): File "tools/testing/kunit/kunit.py", line 338, in main(sys.argv[1:]) File "tools/testing/kunit/kunit.py", line 215, in main add_config_opts(config_parser) [iha@bbking linux]$ tools/testing/kunit/kunit.py run Traceback (most recent call last): File "tools/testing/kunit/kunit.py", line 337, in main(sys.argv[1:]) File "tools/testing/kunit/kunit.py", line 255, in main result = run_tests(linux, request) File "tools/testing/kunit/kunit.py", line 133, in run_tests request.defconfig, AttributeError: 'KunitRequest' object has no attribute 'defconfig' Handles when there is no .kunitconfig, the error due to merge conflicts between the following: commit 9bdf64b35117 ("kunit: use KUnit defconfig by default") commit 45ba7a893ad8 ("kunit: kunit_tool: Separate out config/build/exec/parse") [iha@bbking linux]$ tools/testing/kunit/kunit.py run Traceback (most recent call last): File "tools/testing/kunit/kunit.py", line 335, in main(sys.argv[1:]) File "tools/testing/kunit/kunit.py", line 246, in main linux = kunit_kernel.LinuxSourceTree() File "../tools/testing/kunit/kunit_kernel.py", line 109, in __init__ self._kconfig.read_from_file(kunitconfig_path) File "t../ools/testing/kunit/kunit_config.py", line 88, in read_from_file with open(path, 'r') as f: FileNotFoundError: [Errno 2] No such file or directory: '.kunit/.kunitconfig' Signed-off-by: Vitor Massaru Iha Signed-off-by: Shuah Khan --- tools/testing/kunit/kunit.py | 27 ++++++++++++++++++--------- 1 file changed, 18 insertions(+), 9 deletions(-) diff --git a/tools/testing/kunit/kunit.py b/tools/testing/kunit/kunit.py index ec73b07d12658..787b6d4ad7162 100755 --- a/tools/testing/kunit/kunit.py +++ b/tools/testing/kunit/kunit.py @@ -23,7 +23,7 @@ import kunit_parser KunitResult = namedtuple('KunitResult', ['status','result','elapsed_time']) KunitConfigRequest = namedtuple('KunitConfigRequest', - ['build_dir', 'defconfig', 'make_options']) + ['build_dir', 'make_options']) KunitBuildRequest = namedtuple('KunitBuildRequest', ['jobs', 'build_dir', 'alltests', 'make_options']) @@ -130,7 +130,6 @@ def run_tests(linux: kunit_kernel.LinuxSourceTree, run_start = time.time() config_request = KunitConfigRequest(request.build_dir, - request.defconfig, request.make_options) config_result = config_tests(linux, config_request) if config_result.status != KunitStatus.SUCCESS: @@ -212,7 +211,6 @@ def main(argv, linux=None): help='Ensures that .config contains all of ' 'the options in .kunitconfig') add_common_opts(config_parser) - add_config_opts(config_parser) build_parser = subparser.add_parser('build', help='Builds a kernel with KUnit tests') add_common_opts(build_parser) @@ -238,11 +236,14 @@ def main(argv, linux=None): cli_args = parser.parse_args(argv) if cli_args.subcommand == 'run': - if not os.path.exists(cli_args.build_dir): - os.mkdir(cli_args.build_dir) - kunit_kernel.kunitconfig_path = os.path.join( - cli_args.build_dir, - kunit_kernel.kunitconfig_path) + if not os.path.exists(cli_args.build_dir): + os.mkdir(cli_args.build_dir) + kunit_kernel.kunitconfig_path = os.path.join( + cli_args.build_dir, + kunit_kernel.kunitconfig_path) + + if not os.path.exists(kunit_kernel.kunitconfig_path): + create_default_kunitconfig() if not linux: linux = kunit_kernel.LinuxSourceTree() @@ -264,11 +265,13 @@ def main(argv, linux=None): cli_args.build_dir, kunit_kernel.kunitconfig_path) + if not os.path.exists(kunit_kernel.kunitconfig_path): + create_default_kunitconfig() + if not linux: linux = kunit_kernel.LinuxSourceTree() request = KunitConfigRequest(cli_args.build_dir, - cli_args.defconfig, cli_args.make_options) result = config_tests(linux, request) kunit_parser.print_with_timestamp(( @@ -284,6 +287,9 @@ def main(argv, linux=None): cli_args.build_dir, kunit_kernel.kunitconfig_path) + if not os.path.exists(kunit_kernel.kunitconfig_path): + create_default_kunitconfig() + if not linux: linux = kunit_kernel.LinuxSourceTree() @@ -305,6 +311,9 @@ def main(argv, linux=None): cli_args.build_dir, kunit_kernel.kunitconfig_path) + if not os.path.exists(kunit_kernel.kunitconfig_path): + create_default_kunitconfig() + if not linux: linux = kunit_kernel.LinuxSourceTree() -- GitLab From 92238b31bd054cc2a046df96507575f3f10c277f Mon Sep 17 00:00:00 2001 From: Anders Roxell Date: Mon, 11 May 2020 15:14:20 +0200 Subject: [PATCH 0993/1971] kunit: Kconfig: enable a KUNIT_ALL_TESTS fragment Make it easier to enable all KUnit fragments. This is useful for kernel devs or testers, so its easy to get all KUnit tests enabled and if new gets added they will be enabled as well. Fragments that has to be builtin will be missed if CONFIG_KUNIT_ALL_TESTS is set as a module. Signed-off-by: Anders Roxell Reviewed-by: Brendan Higgins Signed-off-by: Shuah Khan --- lib/kunit/Kconfig | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/lib/kunit/Kconfig b/lib/kunit/Kconfig index 95d12e3d6d950..bdeee7639005b 100644 --- a/lib/kunit/Kconfig +++ b/lib/kunit/Kconfig @@ -41,4 +41,18 @@ config KUNIT_EXAMPLE_TEST is intended for curious hackers who would like to understand how to use KUnit for kernel development. +config KUNIT_ALL_TESTS + tristate "All KUnit tests with satisfied dependencies" + help + Enables all KUnit tests, if they can be enabled. + KUnit tests run during boot and output the results to the debug log + in TAP format (http://testanything.org/). Only useful for kernel devs + running the KUnit test harness, and not intended for inclusion into a + production build. + + For more information on KUnit and unit tests in general please refer + to the KUnit documentation in Documentation/dev-tools/kunit/. + + If unsure, say N. + endif # KUNIT -- GitLab From beaed42c427dbe8fedba4518b2baf5203f5e398b Mon Sep 17 00:00:00 2001 From: Anders Roxell Date: Mon, 11 May 2020 15:14:25 +0200 Subject: [PATCH 0994/1971] kunit: default KUNIT_* fragments to KUNIT_ALL_TESTS This makes it easier to enable all KUnit fragments. Adding 'if !KUNIT_ALL_TESTS' so individual tests can not be turned off. Therefore if KUNIT_ALL_TESTS is enabled that will hide the prompt in menuconfig. Reviewed-by: David Gow Signed-off-by: Anders Roxell Signed-off-by: Shuah Khan --- lib/kunit/Kconfig | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/lib/kunit/Kconfig b/lib/kunit/Kconfig index bdeee7639005b..00909e6a24438 100644 --- a/lib/kunit/Kconfig +++ b/lib/kunit/Kconfig @@ -15,7 +15,8 @@ menuconfig KUNIT if KUNIT config KUNIT_DEBUGFS - bool "KUnit - Enable /sys/kernel/debug/kunit debugfs representation" + bool "KUnit - Enable /sys/kernel/debug/kunit debugfs representation" if !KUNIT_ALL_TESTS + default KUNIT_ALL_TESTS help Enable debugfs representation for kunit. Currently this consists of /sys/kernel/debug/kunit//results files for each @@ -23,7 +24,8 @@ config KUNIT_DEBUGFS run that occurred. config KUNIT_TEST - tristate "KUnit test for KUnit" + tristate "KUnit test for KUnit" if !KUNIT_ALL_TESTS + default KUNIT_ALL_TESTS help Enables the unit tests for the KUnit test framework. These tests test the KUnit test framework itself; the tests are both written using @@ -32,7 +34,8 @@ config KUNIT_TEST expected. config KUNIT_EXAMPLE_TEST - tristate "Example test for KUnit" + tristate "Example test for KUnit" if !KUNIT_ALL_TESTS + default KUNIT_ALL_TESTS help Enables an example unit test that illustrates some of the basic features of KUnit. This test only exists to help new users understand -- GitLab From 5f215aab4ea0cf39e9fcdb721733a7814f3a3a50 Mon Sep 17 00:00:00 2001 From: Anders Roxell Date: Mon, 11 May 2020 15:14:29 +0200 Subject: [PATCH 0995/1971] lib: Kconfig.debug: default KUNIT_* fragments to KUNIT_ALL_TESTS This makes it easier to enable all KUnit fragments. Adding 'if !KUNIT_ALL_TESTS' so individual tests can not be turned off. Therefore if KUNIT_ALL_TESTS is enabled that will hide the prompt in menuconfig. Reviewed-by: David Gow Signed-off-by: Anders Roxell Signed-off-by: Shuah Khan --- lib/Kconfig.debug | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index 21d9c5f6e7ec7..1f4ab7a2bdee4 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -2064,8 +2064,9 @@ config TEST_SYSCTL If unsure, say N. config SYSCTL_KUNIT_TEST - tristate "KUnit test for sysctl" + tristate "KUnit test for sysctl" if !KUNIT_ALL_TESTS depends on KUNIT + default KUNIT_ALL_TESTS help This builds the proc sysctl unit test, which runs on boot. Tests the API contract and implementation correctness of sysctl. @@ -2075,8 +2076,9 @@ config SYSCTL_KUNIT_TEST If unsure, say N. config LIST_KUNIT_TEST - tristate "KUnit Test for Kernel Linked-list structures" + tristate "KUnit Test for Kernel Linked-list structures" if !KUNIT_ALL_TESTS depends on KUNIT + default KUNIT_ALL_TESTS help This builds the linked list KUnit test suite. It tests that the API and basic functionality of the list_head type -- GitLab From bebe94b53eb7ee28f436f7b45324e37713690c0f Mon Sep 17 00:00:00 2001 From: Anders Roxell Date: Mon, 11 May 2020 15:14:33 +0200 Subject: [PATCH 0996/1971] drivers: base: default KUNIT_* fragments to KUNIT_ALL_TESTS This makes it easier to enable all KUnit fragments. Adding 'if !KUNIT_ALL_TESTS' so individual tests can not be turned off. Therefore if KUNIT_ALL_TESTS is enabled that will hide the prompt in menuconfig. Reviewed-by: David Gow Signed-off-by: Anders Roxell Signed-off-by: Shuah Khan --- drivers/base/Kconfig | 3 ++- drivers/base/test/Kconfig | 3 ++- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/drivers/base/Kconfig b/drivers/base/Kconfig index 5f0bc74d2409a..8d7001712062e 100644 --- a/drivers/base/Kconfig +++ b/drivers/base/Kconfig @@ -149,8 +149,9 @@ config DEBUG_TEST_DRIVER_REMOVE test this functionality. config PM_QOS_KUNIT_TEST - bool "KUnit Test for PM QoS features" + bool "KUnit Test for PM QoS features" if !KUNIT_ALL_TESTS depends on KUNIT=y + default KUNIT_ALL_TESTS config HMEM_REPORTING bool diff --git a/drivers/base/test/Kconfig b/drivers/base/test/Kconfig index 305c7751184a2..ba225eb1b7615 100644 --- a/drivers/base/test/Kconfig +++ b/drivers/base/test/Kconfig @@ -9,5 +9,6 @@ config TEST_ASYNC_DRIVER_PROBE If unsure say N. config KUNIT_DRIVER_PE_TEST - bool "KUnit Tests for property entry API" + bool "KUnit Tests for property entry API" if !KUNIT_ALL_TESTS depends on KUNIT=y + default KUNIT_ALL_TESTS -- GitLab From d194e12b3ed3c4799951072271f001cbd104b5e9 Mon Sep 17 00:00:00 2001 From: Anders Roxell Date: Mon, 11 May 2020 15:14:38 +0200 Subject: [PATCH 0997/1971] fs: ext4: default KUNIT_* fragments to KUNIT_ALL_TESTS This makes it easier to enable all KUnit fragments. Adding 'if !KUNIT_ALL_TESTS' so individual tests can not be turned off. Therefore if KUNIT_ALL_TESTS is enabled that will hide the prompt in menuconfig. Reviewed-by: David Gow Signed-off-by: Anders Roxell Signed-off-by: Shuah Khan --- fs/ext4/Kconfig | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/fs/ext4/Kconfig b/fs/ext4/Kconfig index 2a592e38cdfec..bc7815158503a 100644 --- a/fs/ext4/Kconfig +++ b/fs/ext4/Kconfig @@ -103,9 +103,10 @@ config EXT4_DEBUG echo 1 > /sys/module/ext4/parameters/mballoc_debug config EXT4_KUNIT_TESTS - tristate "KUnit tests for ext4" + tristate "KUnit tests for ext4" if !KUNIT_ALL_TESTS select EXT4_FS depends on KUNIT + default KUNIT_ALL_TESTS help This builds the ext4 KUnit tests. -- GitLab From 6d6861d45e38d42a7df9db244c871ee3856acf57 Mon Sep 17 00:00:00 2001 From: Anders Roxell Date: Mon, 11 May 2020 15:14:42 +0200 Subject: [PATCH 0998/1971] security: apparmor: default KUNIT_* fragments to KUNIT_ALL_TESTS This makes it easier to enable all KUnit fragments. Adding 'if !KUNIT_ALL_TESTS' so individual tests can not be turned off. Therefore if KUNIT_ALL_TESTS is enabled that will hide the prompt in menuconfig. Reviewed-by: David Gow Signed-off-by: Anders Roxell Acked-by: John Johansen Signed-off-by: Shuah Khan --- security/apparmor/Kconfig | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/security/apparmor/Kconfig b/security/apparmor/Kconfig index 0fe336860773b..03fae1bd48a67 100644 --- a/security/apparmor/Kconfig +++ b/security/apparmor/Kconfig @@ -70,8 +70,9 @@ config SECURITY_APPARMOR_DEBUG_MESSAGES the kernel message buffer. config SECURITY_APPARMOR_KUNIT_TEST - bool "Build KUnit tests for policy_unpack.c" + bool "Build KUnit tests for policy_unpack.c" if !KUNIT_ALL_TESTS depends on KUNIT=y && SECURITY_APPARMOR + default KUNIT_ALL_TESTS help This builds the AppArmor KUnit tests. -- GitLab From 027690c75e8fd91b60a634d31c4891a6e39d45bd Mon Sep 17 00:00:00 2001 From: "J. Bruce Fields" Date: Mon, 1 Jun 2020 17:44:45 -0400 Subject: [PATCH 0999/1971] nfsd4: make drc_slab global, not per-net I made every global per-network-namespace instead. But perhaps doing that to this slab was a step too far. The kmem_cache_create call in our net init method also seems to be responsible for this lockdep warning: [ 45.163710] Unable to find swap-space signature [ 45.375718] trinity-c1 (855): attempted to duplicate a private mapping with mremap. This is not supported. [ 46.055744] futex_wake_op: trinity-c1 tries to shift op by -209; fix this program [ 51.011723] [ 51.013378] ====================================================== [ 51.013875] WARNING: possible circular locking dependency detected [ 51.014378] 5.2.0-rc2 #1 Not tainted [ 51.014672] ------------------------------------------------------ [ 51.015182] trinity-c2/886 is trying to acquire lock: [ 51.015593] 000000005405f099 (slab_mutex){+.+.}, at: slab_attr_store+0xa2/0x130 [ 51.016190] [ 51.016190] but task is already holding lock: [ 51.016652] 00000000ac662005 (kn->count#43){++++}, at: kernfs_fop_write+0x286/0x500 [ 51.017266] [ 51.017266] which lock already depends on the new lock. [ 51.017266] [ 51.017909] [ 51.017909] the existing dependency chain (in reverse order) is: [ 51.018497] [ 51.018497] -> #1 (kn->count#43){++++}: [ 51.018956] __lock_acquire+0x7cf/0x1a20 [ 51.019317] lock_acquire+0x17d/0x390 [ 51.019658] __kernfs_remove+0x892/0xae0 [ 51.020020] kernfs_remove_by_name_ns+0x78/0x110 [ 51.020435] sysfs_remove_link+0x55/0xb0 [ 51.020832] sysfs_slab_add+0xc1/0x3e0 [ 51.021332] __kmem_cache_create+0x155/0x200 [ 51.021720] create_cache+0xf5/0x320 [ 51.022054] kmem_cache_create_usercopy+0x179/0x320 [ 51.022486] kmem_cache_create+0x1a/0x30 [ 51.022867] nfsd_reply_cache_init+0x278/0x560 [ 51.023266] nfsd_init_net+0x20f/0x5e0 [ 51.023623] ops_init+0xcb/0x4b0 [ 51.023928] setup_net+0x2fe/0x670 [ 51.024315] copy_net_ns+0x30a/0x3f0 [ 51.024653] create_new_namespaces+0x3c5/0x820 [ 51.025257] unshare_nsproxy_namespaces+0xd1/0x240 [ 51.025881] ksys_unshare+0x506/0x9c0 [ 51.026381] __x64_sys_unshare+0x3a/0x50 [ 51.026937] do_syscall_64+0x110/0x10b0 [ 51.027509] entry_SYSCALL_64_after_hwframe+0x49/0xbe [ 51.028175] [ 51.028175] -> #0 (slab_mutex){+.+.}: [ 51.028817] validate_chain+0x1c51/0x2cc0 [ 51.029422] __lock_acquire+0x7cf/0x1a20 [ 51.029947] lock_acquire+0x17d/0x390 [ 51.030438] __mutex_lock+0x100/0xfa0 [ 51.030995] mutex_lock_nested+0x27/0x30 [ 51.031516] slab_attr_store+0xa2/0x130 [ 51.032020] sysfs_kf_write+0x11d/0x180 [ 51.032529] kernfs_fop_write+0x32a/0x500 [ 51.033056] do_loop_readv_writev+0x21d/0x310 [ 51.033627] do_iter_write+0x2e5/0x380 [ 51.034148] vfs_writev+0x170/0x310 [ 51.034616] do_pwritev+0x13e/0x160 [ 51.035100] __x64_sys_pwritev+0xa3/0x110 [ 51.035633] do_syscall_64+0x110/0x10b0 [ 51.036200] entry_SYSCALL_64_after_hwframe+0x49/0xbe [ 51.036924] [ 51.036924] other info that might help us debug this: [ 51.036924] [ 51.037876] Possible unsafe locking scenario: [ 51.037876] [ 51.038556] CPU0 CPU1 [ 51.039130] ---- ---- [ 51.039676] lock(kn->count#43); [ 51.040084] lock(slab_mutex); [ 51.040597] lock(kn->count#43); [ 51.041062] lock(slab_mutex); [ 51.041320] [ 51.041320] *** DEADLOCK *** [ 51.041320] [ 51.041793] 3 locks held by trinity-c2/886: [ 51.042128] #0: 000000001f55e152 (sb_writers#5){.+.+}, at: vfs_writev+0x2b9/0x310 [ 51.042739] #1: 00000000c7d6c034 (&of->mutex){+.+.}, at: kernfs_fop_write+0x25b/0x500 [ 51.043400] #2: 00000000ac662005 (kn->count#43){++++}, at: kernfs_fop_write+0x286/0x500 Reported-by: kernel test robot Fixes: 3ba75830ce17 "drc containerization" Signed-off-by: J. Bruce Fields --- fs/nfsd/cache.h | 2 ++ fs/nfsd/netns.h | 1 - fs/nfsd/nfscache.c | 29 +++++++++++++++++------------ fs/nfsd/nfsctl.c | 6 ++++++ 4 files changed, 25 insertions(+), 13 deletions(-) diff --git a/fs/nfsd/cache.h b/fs/nfsd/cache.h index 10ec5ecdf1178..65c331f75e9c7 100644 --- a/fs/nfsd/cache.h +++ b/fs/nfsd/cache.h @@ -78,6 +78,8 @@ enum { /* Checksum this amount of the request */ #define RC_CSUMLEN (256U) +int nfsd_drc_slab_create(void); +void nfsd_drc_slab_free(void); int nfsd_reply_cache_init(struct nfsd_net *); void nfsd_reply_cache_shutdown(struct nfsd_net *); int nfsd_cache_lookup(struct svc_rqst *); diff --git a/fs/nfsd/netns.h b/fs/nfsd/netns.h index 09aa545825bd9..9217cb64bf0e7 100644 --- a/fs/nfsd/netns.h +++ b/fs/nfsd/netns.h @@ -139,7 +139,6 @@ struct nfsd_net { * Duplicate reply cache */ struct nfsd_drc_bucket *drc_hashtbl; - struct kmem_cache *drc_slab; /* max number of entries allowed in the cache */ unsigned int max_drc_entries; diff --git a/fs/nfsd/nfscache.c b/fs/nfsd/nfscache.c index 945d2f5e760e8..f30a7def78996 100644 --- a/fs/nfsd/nfscache.c +++ b/fs/nfsd/nfscache.c @@ -35,6 +35,8 @@ struct nfsd_drc_bucket { spinlock_t cache_lock; }; +static struct kmem_cache *drc_slab; + static int nfsd_cache_append(struct svc_rqst *rqstp, struct kvec *vec); static unsigned long nfsd_reply_cache_count(struct shrinker *shrink, struct shrink_control *sc); @@ -94,7 +96,7 @@ nfsd_reply_cache_alloc(struct svc_rqst *rqstp, __wsum csum, { struct svc_cacherep *rp; - rp = kmem_cache_alloc(nn->drc_slab, GFP_KERNEL); + rp = kmem_cache_alloc(drc_slab, GFP_KERNEL); if (rp) { rp->c_state = RC_UNUSED; rp->c_type = RC_NOCACHE; @@ -128,7 +130,7 @@ nfsd_reply_cache_free_locked(struct nfsd_drc_bucket *b, struct svc_cacherep *rp, atomic_dec(&nn->num_drc_entries); nn->drc_mem_usage -= sizeof(*rp); } - kmem_cache_free(nn->drc_slab, rp); + kmem_cache_free(drc_slab, rp); } static void @@ -140,6 +142,18 @@ nfsd_reply_cache_free(struct nfsd_drc_bucket *b, struct svc_cacherep *rp, spin_unlock(&b->cache_lock); } +int nfsd_drc_slab_create(void) +{ + drc_slab = kmem_cache_create("nfsd_drc", + sizeof(struct svc_cacherep), 0, 0, NULL); + return drc_slab ? 0: -ENOMEM; +} + +void nfsd_drc_slab_free(void) +{ + kmem_cache_destroy(drc_slab); +} + int nfsd_reply_cache_init(struct nfsd_net *nn) { unsigned int hashsize; @@ -158,18 +172,13 @@ int nfsd_reply_cache_init(struct nfsd_net *nn) if (status) goto out_nomem; - nn->drc_slab = kmem_cache_create("nfsd_drc", - sizeof(struct svc_cacherep), 0, 0, NULL); - if (!nn->drc_slab) - goto out_shrinker; - nn->drc_hashtbl = kcalloc(hashsize, sizeof(*nn->drc_hashtbl), GFP_KERNEL); if (!nn->drc_hashtbl) { nn->drc_hashtbl = vzalloc(array_size(hashsize, sizeof(*nn->drc_hashtbl))); if (!nn->drc_hashtbl) - goto out_slab; + goto out_shrinker; } for (i = 0; i < hashsize; i++) { @@ -179,8 +188,6 @@ int nfsd_reply_cache_init(struct nfsd_net *nn) nn->drc_hashsize = hashsize; return 0; -out_slab: - kmem_cache_destroy(nn->drc_slab); out_shrinker: unregister_shrinker(&nn->nfsd_reply_cache_shrinker); out_nomem: @@ -208,8 +215,6 @@ void nfsd_reply_cache_shutdown(struct nfsd_net *nn) nn->drc_hashtbl = NULL; nn->drc_hashsize = 0; - kmem_cache_destroy(nn->drc_slab); - nn->drc_slab = NULL; } /* diff --git a/fs/nfsd/nfsctl.c b/fs/nfsd/nfsctl.c index b48eac3bb72b9..b68e96681522e 100644 --- a/fs/nfsd/nfsctl.c +++ b/fs/nfsd/nfsctl.c @@ -1533,6 +1533,9 @@ static int __init init_nfsd(void) goto out_free_slabs; nfsd_fault_inject_init(); /* nfsd fault injection controls */ nfsd_stat_init(); /* Statistics */ + retval = nfsd_drc_slab_create(); + if (retval) + goto out_free_stat; nfsd_lockd_init(); /* lockd->nfsd callbacks */ retval = create_proc_exports_entry(); if (retval) @@ -1546,6 +1549,8 @@ out_free_all: remove_proc_entry("fs/nfs", NULL); out_free_lockd: nfsd_lockd_shutdown(); + nfsd_drc_slab_free(); +out_free_stat: nfsd_stat_shutdown(); nfsd_fault_inject_cleanup(); nfsd4_exit_pnfs(); @@ -1560,6 +1565,7 @@ out_unregister_pernet: static void __exit exit_nfsd(void) { + nfsd_drc_slab_free(); remove_proc_entry("fs/nfs/exports", NULL); remove_proc_entry("fs/nfs", NULL); nfsd_stat_shutdown(); -- GitLab From 36f9967531da27ff8cc6f005d93760b578baffb9 Mon Sep 17 00:00:00 2001 From: Stefano Stabellini Date: Thu, 21 May 2020 12:32:42 -0700 Subject: [PATCH 1000/1971] 9p/xen: increase XEN_9PFS_RING_ORDER Increase XEN_9PFS_RING_ORDER to 9 for performance reason. Order 9 is the max allowed by the protocol. We can't assume that all backends will support order 9. The xenstore property max-ring-page-order specifies the max order supported by the backend. We'll use max-ring-page-order for the size of the ring. This means that the size of the ring is not static (XEN_FLEX_RING_SIZE(9)) anymore. Change XEN_9PFS_RING_SIZE to take an argument and base the calculation on the order chosen at setup time. Finally, modify p9_xen_trans.maxsize to be divided by 4 compared to the original value. We need to divide it by 2 because we have two rings coming off the same order allocation: the in and out rings. This was a mistake in the original code. Also divide it further by 2 because we don't want a single request/reply to fill up the entire ring. There can be multiple requests/replies outstanding at any given time and if we use the full ring with one, we risk forcing the backend to wait for the client to read back more replies before continuing, which is not performant. Link: http://lkml.kernel.org/r/20200521193242.15953-1-sstabellini@kernel.org Signed-off-by: Stefano Stabellini Signed-off-by: Dominique Martinet --- net/9p/trans_xen.c | 61 ++++++++++++++++++++++++++-------------------- 1 file changed, 34 insertions(+), 27 deletions(-) diff --git a/net/9p/trans_xen.c b/net/9p/trans_xen.c index 3963eb11c3fbd..3debad93be1a6 100644 --- a/net/9p/trans_xen.c +++ b/net/9p/trans_xen.c @@ -43,8 +43,8 @@ #include #define XEN_9PFS_NUM_RINGS 2 -#define XEN_9PFS_RING_ORDER 6 -#define XEN_9PFS_RING_SIZE XEN_FLEX_RING_SIZE(XEN_9PFS_RING_ORDER) +#define XEN_9PFS_RING_ORDER 9 +#define XEN_9PFS_RING_SIZE(ring) XEN_FLEX_RING_SIZE(ring->intf->ring_order) struct xen_9pfs_header { uint32_t size; @@ -132,8 +132,8 @@ static bool p9_xen_write_todo(struct xen_9pfs_dataring *ring, RING_IDX size) prod = ring->intf->out_prod; virt_mb(); - return XEN_9PFS_RING_SIZE - - xen_9pfs_queued(prod, cons, XEN_9PFS_RING_SIZE) >= size; + return XEN_9PFS_RING_SIZE(ring) - + xen_9pfs_queued(prod, cons, XEN_9PFS_RING_SIZE(ring)) >= size; } static int p9_xen_request(struct p9_client *client, struct p9_req_t *p9_req) @@ -167,17 +167,18 @@ again: prod = ring->intf->out_prod; virt_mb(); - if (XEN_9PFS_RING_SIZE - xen_9pfs_queued(prod, cons, - XEN_9PFS_RING_SIZE) < size) { + if (XEN_9PFS_RING_SIZE(ring) - + xen_9pfs_queued(prod, cons, XEN_9PFS_RING_SIZE(ring)) < size) { spin_unlock_irqrestore(&ring->lock, flags); goto again; } - masked_prod = xen_9pfs_mask(prod, XEN_9PFS_RING_SIZE); - masked_cons = xen_9pfs_mask(cons, XEN_9PFS_RING_SIZE); + masked_prod = xen_9pfs_mask(prod, XEN_9PFS_RING_SIZE(ring)); + masked_cons = xen_9pfs_mask(cons, XEN_9PFS_RING_SIZE(ring)); xen_9pfs_write_packet(ring->data.out, p9_req->tc.sdata, size, - &masked_prod, masked_cons, XEN_9PFS_RING_SIZE); + &masked_prod, masked_cons, + XEN_9PFS_RING_SIZE(ring)); p9_req->status = REQ_STATUS_SENT; virt_wmb(); /* write ring before updating pointer */ @@ -207,19 +208,19 @@ static void p9_xen_response(struct work_struct *work) prod = ring->intf->in_prod; virt_rmb(); - if (xen_9pfs_queued(prod, cons, XEN_9PFS_RING_SIZE) < + if (xen_9pfs_queued(prod, cons, XEN_9PFS_RING_SIZE(ring)) < sizeof(h)) { notify_remote_via_irq(ring->irq); return; } - masked_prod = xen_9pfs_mask(prod, XEN_9PFS_RING_SIZE); - masked_cons = xen_9pfs_mask(cons, XEN_9PFS_RING_SIZE); + masked_prod = xen_9pfs_mask(prod, XEN_9PFS_RING_SIZE(ring)); + masked_cons = xen_9pfs_mask(cons, XEN_9PFS_RING_SIZE(ring)); /* First, read just the header */ xen_9pfs_read_packet(&h, ring->data.in, sizeof(h), masked_prod, &masked_cons, - XEN_9PFS_RING_SIZE); + XEN_9PFS_RING_SIZE(ring)); req = p9_tag_lookup(priv->client, h.tag); if (!req || req->status != REQ_STATUS_SENT) { @@ -233,11 +234,11 @@ static void p9_xen_response(struct work_struct *work) memcpy(&req->rc, &h, sizeof(h)); req->rc.offset = 0; - masked_cons = xen_9pfs_mask(cons, XEN_9PFS_RING_SIZE); + masked_cons = xen_9pfs_mask(cons, XEN_9PFS_RING_SIZE(ring)); /* Then, read the whole packet (including the header) */ xen_9pfs_read_packet(req->rc.sdata, ring->data.in, h.size, masked_prod, &masked_cons, - XEN_9PFS_RING_SIZE); + XEN_9PFS_RING_SIZE(ring)); virt_mb(); cons += h.size; @@ -267,7 +268,7 @@ static irqreturn_t xen_9pfs_front_event_handler(int irq, void *r) static struct p9_trans_module p9_xen_trans = { .name = "xen", - .maxsize = 1 << (XEN_9PFS_RING_ORDER + XEN_PAGE_SHIFT), + .maxsize = 1 << (XEN_9PFS_RING_ORDER + XEN_PAGE_SHIFT - 2), .def = 1, .create = p9_xen_create, .close = p9_xen_close, @@ -295,14 +296,16 @@ static void xen_9pfs_front_free(struct xen_9pfs_front_priv *priv) if (priv->rings[i].irq > 0) unbind_from_irqhandler(priv->rings[i].irq, priv->dev); if (priv->rings[i].data.in) { - for (j = 0; j < (1 << XEN_9PFS_RING_ORDER); j++) { + for (j = 0; + j < (1 << priv->rings[i].intf->ring_order); + j++) { grant_ref_t ref; ref = priv->rings[i].intf->ref[j]; gnttab_end_foreign_access(ref, 0, 0); } free_pages((unsigned long)priv->rings[i].data.in, - XEN_9PFS_RING_ORDER - + priv->rings[i].intf->ring_order - (PAGE_SHIFT - XEN_PAGE_SHIFT)); } gnttab_end_foreign_access(priv->rings[i].ref, 0, 0); @@ -323,7 +326,8 @@ static int xen_9pfs_front_remove(struct xenbus_device *dev) } static int xen_9pfs_front_alloc_dataring(struct xenbus_device *dev, - struct xen_9pfs_dataring *ring) + struct xen_9pfs_dataring *ring, + unsigned int order) { int i = 0; int ret = -ENOMEM; @@ -342,21 +346,21 @@ static int xen_9pfs_front_alloc_dataring(struct xenbus_device *dev, goto out; ring->ref = ret; bytes = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, - XEN_9PFS_RING_ORDER - (PAGE_SHIFT - XEN_PAGE_SHIFT)); + order - (PAGE_SHIFT - XEN_PAGE_SHIFT)); if (!bytes) { ret = -ENOMEM; goto out; } - for (; i < (1 << XEN_9PFS_RING_ORDER); i++) { + for (; i < (1 << order); i++) { ret = gnttab_grant_foreign_access( dev->otherend_id, virt_to_gfn(bytes) + i, 0); if (ret < 0) goto out; ring->intf->ref[i] = ret; } - ring->intf->ring_order = XEN_9PFS_RING_ORDER; + ring->intf->ring_order = order; ring->data.in = bytes; - ring->data.out = bytes + XEN_9PFS_RING_SIZE; + ring->data.out = bytes + XEN_FLEX_RING_SIZE(order); ret = xenbus_alloc_evtchn(dev, &ring->evtchn); if (ret) @@ -374,7 +378,7 @@ out: for (i--; i >= 0; i--) gnttab_end_foreign_access(ring->intf->ref[i], 0, 0); free_pages((unsigned long)bytes, - XEN_9PFS_RING_ORDER - + ring->intf->ring_order - (PAGE_SHIFT - XEN_PAGE_SHIFT)); } gnttab_end_foreign_access(ring->ref, 0, 0); @@ -404,8 +408,10 @@ static int xen_9pfs_front_probe(struct xenbus_device *dev, return -EINVAL; max_ring_order = xenbus_read_unsigned(dev->otherend, "max-ring-page-order", 0); - if (max_ring_order < XEN_9PFS_RING_ORDER) - return -EINVAL; + if (max_ring_order > XEN_9PFS_RING_ORDER) + max_ring_order = XEN_9PFS_RING_ORDER; + if (p9_xen_trans.maxsize > XEN_FLEX_RING_SIZE(max_ring_order)) + p9_xen_trans.maxsize = XEN_FLEX_RING_SIZE(max_ring_order) / 2; priv = kzalloc(sizeof(*priv), GFP_KERNEL); if (!priv) @@ -422,7 +428,8 @@ static int xen_9pfs_front_probe(struct xenbus_device *dev, for (i = 0; i < priv->num_rings; i++) { priv->rings[i].priv = priv; - ret = xen_9pfs_front_alloc_dataring(dev, &priv->rings[i]); + ret = xen_9pfs_front_alloc_dataring(dev, &priv->rings[i], + max_ring_order); if (ret < 0) goto error; } -- GitLab From e7c8cc35a64d1d21e6fb811c519eadbda30f4f77 Mon Sep 17 00:00:00 2001 From: Matej Genci Date: Wed, 11 Sep 2019 12:49:53 +0000 Subject: [PATCH 1001/1971] virtio: add VIRTIO_RING_NO_LEGACY Add macro to disable legacy vring functions. Signed-off-by: Matej Genci Link: https://lore.kernel.org/r/20190911124942.243713-1-matej.genci@nutanix.com Signed-off-by: Michael S. Tsirkin --- drivers/virtio/virtio_pci_modern.c | 1 + include/uapi/linux/virtio_ring.h | 4 ++++ 2 files changed, 5 insertions(+) diff --git a/drivers/virtio/virtio_pci_modern.c b/drivers/virtio/virtio_pci_modern.c index 7abcc50838b88..db93cedd262fc 100644 --- a/drivers/virtio/virtio_pci_modern.c +++ b/drivers/virtio/virtio_pci_modern.c @@ -16,6 +16,7 @@ #include #define VIRTIO_PCI_NO_LEGACY +#define VIRTIO_RING_NO_LEGACY #include "virtio_pci_common.h" /* diff --git a/include/uapi/linux/virtio_ring.h b/include/uapi/linux/virtio_ring.h index 559f42e733152..9223c3a5c46ae 100644 --- a/include/uapi/linux/virtio_ring.h +++ b/include/uapi/linux/virtio_ring.h @@ -135,6 +135,8 @@ struct vring { #define VRING_USED_ALIGN_SIZE 4 #define VRING_DESC_ALIGN_SIZE 16 +#ifndef VIRTIO_RING_NO_LEGACY + /* The standard layout for the ring is a continuous chunk of memory which looks * like this. We assume num is a power of 2. * @@ -181,6 +183,8 @@ static inline unsigned vring_size(unsigned int num, unsigned long align) + sizeof(__virtio16) * 3 + sizeof(struct vring_used_elem) * num; } +#endif /* VIRTIO_RING_NO_LEGACY */ + /* The following is used with USED_EVENT_IDX and AVAIL_EVENT_IDX */ /* Assuming a given event_idx value from the other side, if * we have just incremented index from old to new_idx, -- GitLab From 0c35c67412f0ae9ebe1a87cb83bc9de8143438b7 Mon Sep 17 00:00:00 2001 From: Markus Elfring Date: Sun, 5 Apr 2020 19:14:10 +0200 Subject: [PATCH 1002/1971] virtio-mmio: Delete an error message in vm_find_vqs() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The function “platform_get_irq” can log an error already. Thus omit a redundant message for the exception handling in the calling function. This issue was detected by using the Coccinelle software. Signed-off-by: Markus Elfring Link: https://lore.kernel.org/r/9e27bc4a-cfa1-7818-dc25-8ad308816b30@web.de Signed-off-by: Michael S. Tsirkin --- drivers/virtio/virtio_mmio.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/drivers/virtio/virtio_mmio.c b/drivers/virtio/virtio_mmio.c index 97d5725fd9a26..9d16aaffca9d7 100644 --- a/drivers/virtio/virtio_mmio.c +++ b/drivers/virtio/virtio_mmio.c @@ -466,10 +466,8 @@ static int vm_find_vqs(struct virtio_device *vdev, unsigned nvqs, int irq = platform_get_irq(vm_dev->pdev, 0); int i, err, queue_idx = 0; - if (irq < 0) { - dev_err(&vdev->dev, "Cannot get IRQ resource\n"); + if (irq < 0) return irq; - } err = request_irq(irq, vm_interrupt, IRQF_SHARED, dev_name(&vdev->dev), vm_dev); -- GitLab From a865e420b9561235851c3f5d483c82ef389d29bd Mon Sep 17 00:00:00 2001 From: "Michael S. Tsirkin" Date: Mon, 6 Apr 2020 08:42:55 -0400 Subject: [PATCH 1003/1971] virtio: force spec specified alignment on types The ring element addresses are passed between components with different alignments assumptions. Thus, if guest/userspace selects a pointer and host then gets and dereferences it, we might need to decrease the compiler-selected alignment to prevent compiler on the host from assuming pointer is aligned. This actually triggers on ARM with -mabi=apcs-gnu - which is a deprecated configuration, but it seems safer to handle this generally. Note that userspace that allocates the memory is actually OK and does not need to be fixed, but userspace that gets it from guest or another process does need to be fixed. The later doesn't generally talk to the kernel so while it might be buggy it's not talking to the kernel in the buggy way - it's just using the header in the buggy way - so fixing header and asking userspace to recompile is the best we can do. I verified that the produced kernel binary on x86 is exactly identical before and after the change. Signed-off-by: Michael S. Tsirkin Acked-by: Jason Wang --- drivers/vhost/vhost.c | 8 +++--- drivers/vhost/vhost.h | 6 ++--- drivers/vhost/vringh.c | 6 ++--- include/linux/vringh.h | 6 ++--- include/uapi/linux/virtio_ring.h | 46 ++++++++++++++++++++++++-------- 5 files changed, 48 insertions(+), 24 deletions(-) diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c index 21a59b598ed87..96d9871fa0cb5 100644 --- a/drivers/vhost/vhost.c +++ b/drivers/vhost/vhost.c @@ -1244,9 +1244,9 @@ static int vhost_iotlb_miss(struct vhost_virtqueue *vq, u64 iova, int access) } static bool vq_access_ok(struct vhost_virtqueue *vq, unsigned int num, - struct vring_desc __user *desc, - struct vring_avail __user *avail, - struct vring_used __user *used) + vring_desc_t __user *desc, + vring_avail_t __user *avail, + vring_used_t __user *used) { return access_ok(desc, vhost_get_desc_size(vq, num)) && @@ -2301,7 +2301,7 @@ static int __vhost_add_used_n(struct vhost_virtqueue *vq, struct vring_used_elem *heads, unsigned count) { - struct vring_used_elem __user *used; + vring_used_elem_t __user *used; u16 old, new; int start; diff --git a/drivers/vhost/vhost.h b/drivers/vhost/vhost.h index f8403bd46b85d..60cab4c782296 100644 --- a/drivers/vhost/vhost.h +++ b/drivers/vhost/vhost.h @@ -67,9 +67,9 @@ struct vhost_virtqueue { /* The actual ring of buffers. */ struct mutex mutex; unsigned int num; - struct vring_desc __user *desc; - struct vring_avail __user *avail; - struct vring_used __user *used; + vring_desc_t __user *desc; + vring_avail_t __user *avail; + vring_used_t __user *used; const struct vhost_iotlb_map *meta_iotlb[VHOST_NUM_ADDRS]; struct file *kick; struct eventfd_ctx *call_ctx; diff --git a/drivers/vhost/vringh.c b/drivers/vhost/vringh.c index ba8e0d6cfd974..e059a9a47cdf1 100644 --- a/drivers/vhost/vringh.c +++ b/drivers/vhost/vringh.c @@ -620,9 +620,9 @@ static inline int xfer_to_user(const struct vringh *vrh, */ int vringh_init_user(struct vringh *vrh, u64 features, unsigned int num, bool weak_barriers, - struct vring_desc __user *desc, - struct vring_avail __user *avail, - struct vring_used __user *used) + vring_desc_t __user *desc, + vring_avail_t __user *avail, + vring_used_t __user *used) { /* Sane power of 2 please! */ if (!num || num > 0xffff || (num & (num - 1))) { diff --git a/include/linux/vringh.h b/include/linux/vringh.h index 9e2763d7c1591..59bd50f99291b 100644 --- a/include/linux/vringh.h +++ b/include/linux/vringh.h @@ -105,9 +105,9 @@ struct vringh_kiov { /* Helpers for userspace vrings. */ int vringh_init_user(struct vringh *vrh, u64 features, unsigned int num, bool weak_barriers, - struct vring_desc __user *desc, - struct vring_avail __user *avail, - struct vring_used __user *used); + vring_desc_t __user *desc, + vring_avail_t __user *avail, + vring_used_t __user *used); static inline void vringh_iov_init(struct vringh_iov *iov, struct iovec *iovec, unsigned num) diff --git a/include/uapi/linux/virtio_ring.h b/include/uapi/linux/virtio_ring.h index 9223c3a5c46ae..476d3e5c0fe70 100644 --- a/include/uapi/linux/virtio_ring.h +++ b/include/uapi/linux/virtio_ring.h @@ -86,6 +86,13 @@ * at the end of the used ring. Guest should ignore the used->flags field. */ #define VIRTIO_RING_F_EVENT_IDX 29 +/* Alignment requirements for vring elements. + * When using pre-virtio 1.0 layout, these fall out naturally. + */ +#define VRING_AVAIL_ALIGN_SIZE 2 +#define VRING_USED_ALIGN_SIZE 4 +#define VRING_DESC_ALIGN_SIZE 16 + /* Virtio ring descriptors: 16 bytes. These can chain together via "next". */ struct vring_desc { /* Address (guest-physical). */ @@ -112,29 +119,46 @@ struct vring_used_elem { __virtio32 len; }; +typedef struct vring_used_elem __attribute__((aligned(VRING_USED_ALIGN_SIZE))) + vring_used_elem_t; + struct vring_used { __virtio16 flags; __virtio16 idx; - struct vring_used_elem ring[]; + vring_used_elem_t ring[]; }; +/* + * The ring element addresses are passed between components with different + * alignments assumptions. Thus, we might need to decrease the compiler-selected + * alignment, and so must use a typedef to make sure the aligned attribute + * actually takes hold: + * + * https://gcc.gnu.org/onlinedocs//gcc/Common-Type-Attributes.html#Common-Type-Attributes + * + * When used on a struct, or struct member, the aligned attribute can only + * increase the alignment; in order to decrease it, the packed attribute must + * be specified as well. When used as part of a typedef, the aligned attribute + * can both increase and decrease alignment, and specifying the packed + * attribute generates a warning. + */ +typedef struct vring_desc __attribute__((aligned(VRING_DESC_ALIGN_SIZE))) + vring_desc_t; +typedef struct vring_avail __attribute__((aligned(VRING_AVAIL_ALIGN_SIZE))) + vring_avail_t; +typedef struct vring_used __attribute__((aligned(VRING_USED_ALIGN_SIZE))) + vring_used_t; + struct vring { unsigned int num; - struct vring_desc *desc; + vring_desc_t *desc; - struct vring_avail *avail; + vring_avail_t *avail; - struct vring_used *used; + vring_used_t *used; }; -/* Alignment requirements for vring elements. - * When using pre-virtio 1.0 layout, these fall out naturally. - */ -#define VRING_AVAIL_ALIGN_SIZE 2 -#define VRING_USED_ALIGN_SIZE 4 -#define VRING_DESC_ALIGN_SIZE 16 - #ifndef VIRTIO_RING_NO_LEGACY /* The standard layout for the ring is a continuous chunk of memory which looks -- GitLab From 213e77213867e4edbdb342c37ea822feedf4608f Mon Sep 17 00:00:00 2001 From: "Michael S. Tsirkin" Date: Thu, 23 Apr 2020 08:36:57 -0400 Subject: [PATCH 1004/1971] vhost: revert "vhost: disable for OABI" This reverts commit d085eb8ce727 ("vhost: disable for OABI") With commit "virtio: force spec specified alignment on types" in place, we force proper alignment for all structures, so there's no longer a reason to blacklist OABI. Signed-off-by: Michael S. Tsirkin --- drivers/misc/mic/Kconfig | 2 +- drivers/net/caif/Kconfig | 2 +- drivers/vdpa/Kconfig | 2 +- drivers/vhost/Kconfig | 17 ++++------------- 4 files changed, 7 insertions(+), 16 deletions(-) diff --git a/drivers/misc/mic/Kconfig b/drivers/misc/mic/Kconfig index 3bfe72c59864c..8f201d019f5a4 100644 --- a/drivers/misc/mic/Kconfig +++ b/drivers/misc/mic/Kconfig @@ -116,7 +116,7 @@ config MIC_COSM config VOP tristate "VOP Driver" - depends on VOP_BUS && VHOST_DPN + depends on VOP_BUS select VHOST_RING select VIRTIO help diff --git a/drivers/net/caif/Kconfig b/drivers/net/caif/Kconfig index 661c25eb1c46f..9db0570c5bebb 100644 --- a/drivers/net/caif/Kconfig +++ b/drivers/net/caif/Kconfig @@ -50,7 +50,7 @@ config CAIF_HSI config CAIF_VIRTIO tristate "CAIF virtio transport driver" - depends on CAIF && HAS_DMA && VHOST_DPN + depends on CAIF && HAS_DMA select VHOST_RING select VIRTIO select GENERIC_ALLOCATOR diff --git a/drivers/vdpa/Kconfig b/drivers/vdpa/Kconfig index e8140065c8a53..3e1ceb8e9f2b5 100644 --- a/drivers/vdpa/Kconfig +++ b/drivers/vdpa/Kconfig @@ -10,7 +10,7 @@ if VDPA config VDPA_SIM tristate "vDPA device simulator" - depends on RUNTIME_TESTING_MENU && HAS_DMA && VHOST_DPN + depends on RUNTIME_TESTING_MENU && HAS_DMA select VHOST_RING default n help diff --git a/drivers/vhost/Kconfig b/drivers/vhost/Kconfig index c4f2737935953..2c75d164b8272 100644 --- a/drivers/vhost/Kconfig +++ b/drivers/vhost/Kconfig @@ -13,15 +13,6 @@ config VHOST_RING This option is selected by any driver which needs to access the host side of a virtio ring. -config VHOST_DPN - bool - depends on !ARM || AEABI - default y - help - Anything selecting VHOST or VHOST_RING must depend on VHOST_DPN. - This excludes the deprecated ARM ABI since that forces a 4 byte - alignment on all structs - incompatible with virtio spec requirements. - config VHOST tristate select VHOST_IOTLB @@ -37,7 +28,7 @@ if VHOST_MENU config VHOST_NET tristate "Host kernel accelerator for virtio net" - depends on NET && EVENTFD && (TUN || !TUN) && (TAP || !TAP) && VHOST_DPN + depends on NET && EVENTFD && (TUN || !TUN) && (TAP || !TAP) select VHOST ---help--- This kernel module can be loaded in host kernel to accelerate @@ -49,7 +40,7 @@ config VHOST_NET config VHOST_SCSI tristate "VHOST_SCSI TCM fabric driver" - depends on TARGET_CORE && EVENTFD && VHOST_DPN + depends on TARGET_CORE && EVENTFD select VHOST default n ---help--- @@ -58,7 +49,7 @@ config VHOST_SCSI config VHOST_VSOCK tristate "vhost virtio-vsock driver" - depends on VSOCKETS && EVENTFD && VHOST_DPN + depends on VSOCKETS && EVENTFD select VHOST select VIRTIO_VSOCKETS_COMMON default n @@ -72,7 +63,7 @@ config VHOST_VSOCK config VHOST_VDPA tristate "Vhost driver for vDPA-based backend" - depends on EVENTFD && VHOST_DPN + depends on EVENTFD select VHOST depends on VDPA help -- GitLab From 5c1bd89b45d4c82316833a8d7ed36720ab89ee3d Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Tue, 10 Mar 2020 12:54:11 +0100 Subject: [PATCH 1005/1971] MAINTAINERS: Add myself as virtio-balloon co-maintainer As suggested by Michael, let's add me as co-maintainer of virtio-balloon. While at it, also add "include/linux/balloon_compaction.h" to the file list. Cc: Michael S. Tsirkin Cc: Jason Wang Cc: Greg Kroah-Hartman Signed-off-by: David Hildenbrand Link: https://lore.kernel.org/r/20200310115411.12760-1-david@redhat.com Signed-off-by: Michael S. Tsirkin --- MAINTAINERS | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/MAINTAINERS b/MAINTAINERS index 50659d76976b7..b5106b7d6d0c2 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -17909,9 +17909,18 @@ F: drivers/virtio/ F: include/linux/vdpa.h F: include/linux/virtio*.h F: include/uapi/linux/virtio_*.h -F: mm/balloon_compaction.c F: tools/virtio/ +VIRTIO BALLOON +M: "Michael S. Tsirkin" +M: David Hildenbrand +L: virtualization@lists.linux-foundation.org +S: Maintained +F: drivers/virtio/virtio_balloon.c +F: include/uapi/linux/virtio_balloon.h +F: include/linux/balloon_compaction.h +F: mm/balloon_compaction.c + VIRTIO CRYPTO DRIVER M: Gonglei L: virtualization@lists.linux-foundation.org -- GitLab From 856c45d8c1a733e10ad82be817202eb32ea4e570 Mon Sep 17 00:00:00 2001 From: Peter Vasil Date: Tue, 28 Apr 2020 18:41:50 +0200 Subject: [PATCH 1006/1971] pwm: sun4i: Support direct clock output on Allwinner A64 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Allwinner A64 is capable of a direct clock output on PWM (see A64 User Manual chapter 3.10). Add support for this in the sun4i PWM driver. Signed-off-by: Peter Vasil Acked-by: Maxime Ripard Acked-by: Uwe Kleine-König Signed-off-by: Thierry Reding --- drivers/pwm/pwm-sun4i.c | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/drivers/pwm/pwm-sun4i.c b/drivers/pwm/pwm-sun4i.c index 5c677c5633499..18fbbe3277d0f 100644 --- a/drivers/pwm/pwm-sun4i.c +++ b/drivers/pwm/pwm-sun4i.c @@ -352,6 +352,12 @@ static const struct sun4i_pwm_data sun4i_pwm_single_bypass = { .npwm = 1, }; +static const struct sun4i_pwm_data sun50i_a64_pwm_data = { + .has_prescaler_bypass = true, + .has_direct_mod_clk_output = true, + .npwm = 1, +}; + static const struct sun4i_pwm_data sun50i_h6_pwm_data = { .has_prescaler_bypass = true, .has_direct_mod_clk_output = true, @@ -374,6 +380,9 @@ static const struct of_device_id sun4i_pwm_dt_ids[] = { }, { .compatible = "allwinner,sun8i-h3-pwm", .data = &sun4i_pwm_single_bypass, + }, { + .compatible = "allwinner,sun50i-a64-pwm", + .data = &sun50i_a64_pwm_data, }, { .compatible = "allwinner,sun50i-h6-pwm", .data = &sun50i_h6_pwm_data, -- GitLab From 01aa905d4791da7d3630f6030ff99d58105cca00 Mon Sep 17 00:00:00 2001 From: Hans de Goede Date: Tue, 12 May 2020 13:00:44 +0200 Subject: [PATCH 1007/1971] pwm: lpss: Fix get_state runtime-pm reference handling Before commit cfc4c189bc70 ("pwm: Read initial hardware state at request time"), a driver's get_state callback would get called once per PWM from pwmchip_add(). pwm-lpss' runtime-pm code was relying on this, getting a runtime-pm ref for PWMs which are enabled at probe time from within its get_state callback, before enabling runtime-pm. The change to calling get_state at request time causes a number of problems: 1. PWMs enabled at probe time may get runtime suspended before they are requested, causing e.g. a LCD backlight controlled by the PWM to turn off. 2. When the request happens when the PWM has been runtime suspended, the ctrl register will read all 1 / 0xffffffff, causing get_state to store bogus values in the pwm_state. 3. get_state was using an async pm_runtime_get() call, because it assumed that runtime-pm has not been enabled yet. If shortly after the request an apply call is made, then the pwm_lpss_is_updating() check may trigger because the resume triggered by the pm_runtime_get() call is not complete yet, so the ctrl register still reads all 1 / 0xffffffff. This commit fixes these issues by moving the initial pm_runtime_get() call for PWMs which are enabled at probe time to the pwm_lpss_probe() function; and by making get_state take a runtime-pm ref before reading the ctrl reg. BugLink: https://bugzilla.redhat.com/show_bug.cgi?id=1828927 Fixes: cfc4c189bc70 ("pwm: Read initial hardware state at request time") Cc: stable@vger.kernel.org Signed-off-by: Hans de Goede Reviewed-by: Andy Shevchenko Signed-off-by: Thierry Reding --- drivers/pwm/pwm-lpss.c | 15 +++++++++++---- 1 file changed, 11 insertions(+), 4 deletions(-) diff --git a/drivers/pwm/pwm-lpss.c b/drivers/pwm/pwm-lpss.c index 75bbfe5f3bc29..9d965ffe66d1e 100644 --- a/drivers/pwm/pwm-lpss.c +++ b/drivers/pwm/pwm-lpss.c @@ -158,7 +158,6 @@ static int pwm_lpss_apply(struct pwm_chip *chip, struct pwm_device *pwm, return 0; } -/* This function gets called once from pwmchip_add to get the initial state */ static void pwm_lpss_get_state(struct pwm_chip *chip, struct pwm_device *pwm, struct pwm_state *state) { @@ -167,6 +166,8 @@ static void pwm_lpss_get_state(struct pwm_chip *chip, struct pwm_device *pwm, unsigned long long base_unit, freq, on_time_div; u32 ctrl; + pm_runtime_get_sync(chip->dev); + base_unit_range = BIT(lpwm->info->base_unit_bits); ctrl = pwm_lpss_read(pwm); @@ -187,8 +188,7 @@ static void pwm_lpss_get_state(struct pwm_chip *chip, struct pwm_device *pwm, state->polarity = PWM_POLARITY_NORMAL; state->enabled = !!(ctrl & PWM_ENABLE); - if (state->enabled) - pm_runtime_get(chip->dev); + pm_runtime_put(chip->dev); } static const struct pwm_ops pwm_lpss_ops = { @@ -202,7 +202,8 @@ struct pwm_lpss_chip *pwm_lpss_probe(struct device *dev, struct resource *r, { struct pwm_lpss_chip *lpwm; unsigned long c; - int ret; + int i, ret; + u32 ctrl; if (WARN_ON(info->npwm > MAX_PWMS)) return ERR_PTR(-ENODEV); @@ -232,6 +233,12 @@ struct pwm_lpss_chip *pwm_lpss_probe(struct device *dev, struct resource *r, return ERR_PTR(ret); } + for (i = 0; i < lpwm->info->npwm; i++) { + ctrl = pwm_lpss_read(&lpwm->chip.pwms[i]); + if (ctrl & PWM_ENABLE) + pm_runtime_get(dev); + } + return lpwm; } EXPORT_SYMBOL_GPL(pwm_lpss_probe); -- GitLab From b48d49e0d53a15a82545ae68db54e8abe99a840a Mon Sep 17 00:00:00 2001 From: Paul Cercueil Date: Wed, 27 May 2020 13:52:22 +0200 Subject: [PATCH 1008/1971] pwm: jz4740: Drop dependency on MACH_INGENIC MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Depending on MACH_INGENIC prevent us from creating a generic kernel that works on more than one MIPS board. Instead, we just depend on MIPS being set. Acked-by: Uwe Kleine-König Signed-off-by: Paul Cercueil Signed-off-by: Thierry Reding --- drivers/pwm/Kconfig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/pwm/Kconfig b/drivers/pwm/Kconfig index c13d146cdde53..cb8d739067d2f 100644 --- a/drivers/pwm/Kconfig +++ b/drivers/pwm/Kconfig @@ -244,7 +244,7 @@ config PWM_IQS620A config PWM_JZ4740 tristate "Ingenic JZ47xx PWM support" - depends on MACH_INGENIC + depends on MIPS depends on COMMON_CLK select MFD_SYSCON help -- GitLab From 9017dc4fbd59c09463019ce494cfe36d654495a8 Mon Sep 17 00:00:00 2001 From: Paul Cercueil Date: Wed, 27 May 2020 13:52:23 +0200 Subject: [PATCH 1009/1971] pwm: jz4740: Enhance precision in calculation of duty cycle MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Calculating the hardware value for the duty from the hardware value of the period resulted in a precision loss versus calculating it from the clock rate directly. (Also remove a cast that doesn't really need to be here) Fixes: f6b8a5700057 ("pwm: Add Ingenic JZ4740 support") Cc: Suggested-by: Uwe Kleine-König Reviewed-by: Uwe Kleine-König Signed-off-by: Paul Cercueil Signed-off-by: Thierry Reding --- drivers/pwm/pwm-jz4740.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/pwm/pwm-jz4740.c b/drivers/pwm/pwm-jz4740.c index 3cd5c054ad9af..4fe9d99ac9a90 100644 --- a/drivers/pwm/pwm-jz4740.c +++ b/drivers/pwm/pwm-jz4740.c @@ -158,11 +158,11 @@ static int jz4740_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm, /* Calculate period value */ tmp = (unsigned long long)rate * state->period; do_div(tmp, NSEC_PER_SEC); - period = (unsigned long)tmp; + period = tmp; /* Calculate duty value */ - tmp = (unsigned long long)period * state->duty_cycle; - do_div(tmp, state->period); + tmp = (unsigned long long)rate * state->duty_cycle; + do_div(tmp, NSEC_PER_SEC); duty = period - tmp; if (duty >= period) -- GitLab From a020f22a4ff555386f136eb951e16e4cff709e01 Mon Sep 17 00:00:00 2001 From: Paul Cercueil Date: Wed, 27 May 2020 13:52:24 +0200 Subject: [PATCH 1010/1971] pwm: jz4740: Make PWM start with the active part The PWM in Ingenic SoCs starts in inactive state until the internal timer reaches the duty value, then becomes active until the timer reaches the period value. In theory, we should then use (period - duty) as the real duty value, as a high duty value would otherwise result in the PWM pin being inactive most of the time. This is the reason why the duty value was inverted in the driver until now, but it still had the problem that it would not start with the active part. To address this remaining issue, the common trick is to invert the duty, and invert the polarity when the PWM is enabled. Since the duty was already inverted, and we invert it again, we now program the hardware for the requested duty, and simply invert the polarity when the PWM is enabled. Signed-off-by: Paul Cercueil Signed-off-by: Thierry Reding --- drivers/pwm/pwm-jz4740.c | 25 ++++++++++++++++--------- 1 file changed, 16 insertions(+), 9 deletions(-) diff --git a/drivers/pwm/pwm-jz4740.c b/drivers/pwm/pwm-jz4740.c index 4fe9d99ac9a90..fe06ca8ce30fe 100644 --- a/drivers/pwm/pwm-jz4740.c +++ b/drivers/pwm/pwm-jz4740.c @@ -6,7 +6,6 @@ * Limitations: * - The .apply callback doesn't complete the currently running period before * reconfiguring the hardware. - * - Each period starts with the inactive part. */ #include @@ -163,7 +162,7 @@ static int jz4740_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm, /* Calculate duty value */ tmp = (unsigned long long)rate * state->duty_cycle; do_div(tmp, NSEC_PER_SEC); - duty = period - tmp; + duty = tmp; if (duty >= period) duty = period - 1; @@ -189,18 +188,26 @@ static int jz4740_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm, regmap_update_bits(jz4740->map, TCU_REG_TCSRc(pwm->hwpwm), TCU_TCSR_PWM_SD, TCU_TCSR_PWM_SD); - /* Set polarity */ - switch (state->polarity) { - case PWM_POLARITY_NORMAL: + /* + * Set polarity. + * + * The PWM starts in inactive state until the internal timer reaches the + * duty value, then becomes active until the timer reaches the period + * value. In theory, we should then use (period - duty) as the real duty + * value, as a high duty value would otherwise result in the PWM pin + * being inactive most of the time. + * + * Here, we don't do that, and instead invert the polarity of the PWM + * when it is active. This trick makes the PWM start with its active + * state instead of its inactive state. + */ + if ((state->polarity == PWM_POLARITY_NORMAL) ^ state->enabled) regmap_update_bits(jz4740->map, TCU_REG_TCSRc(pwm->hwpwm), TCU_TCSR_PWM_INITL_HIGH, 0); - break; - case PWM_POLARITY_INVERSED: + else regmap_update_bits(jz4740->map, TCU_REG_TCSRc(pwm->hwpwm), TCU_TCSR_PWM_INITL_HIGH, TCU_TCSR_PWM_INITL_HIGH); - break; - } if (state->enabled) jz4740_pwm_enable(chip, pwm); -- GitLab From 74db728c0b4c7c0781d439afa6ebc43c76a1f952 Mon Sep 17 00:00:00 2001 From: Paul Cercueil Date: Wed, 27 May 2020 13:52:25 +0200 Subject: [PATCH 1011/1971] pwm: jz4740: Add support for the JZ4725B The PWM hardware in the JZ4725B works the same as in the JZ4740, but has only six channels available. Signed-off-by: Paul Cercueil Signed-off-by: Thierry Reding --- drivers/pwm/pwm-jz4740.c | 24 ++++++++++++++++++++---- 1 file changed, 20 insertions(+), 4 deletions(-) diff --git a/drivers/pwm/pwm-jz4740.c b/drivers/pwm/pwm-jz4740.c index fe06ca8ce30fe..5830ac2bdf6ac 100644 --- a/drivers/pwm/pwm-jz4740.c +++ b/drivers/pwm/pwm-jz4740.c @@ -20,7 +20,9 @@ #include #include -#define NUM_PWM 8 +struct soc_info { + unsigned int num_pwms; +}; struct jz4740_pwm_chip { struct pwm_chip chip; @@ -36,7 +38,7 @@ static bool jz4740_pwm_can_use_chn(struct jz4740_pwm_chip *jz, unsigned int channel) { /* Enable all TCU channels for PWM use by default except channels 0/1 */ - u32 pwm_channels_mask = GENMASK(NUM_PWM - 1, 2); + u32 pwm_channels_mask = GENMASK(jz->chip.npwm - 1, 2); device_property_read_u32(jz->chip.dev->parent, "ingenic,pwm-channels-mask", @@ -226,6 +228,11 @@ static int jz4740_pwm_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct jz4740_pwm_chip *jz4740; + const struct soc_info *info; + + info = device_get_match_data(dev); + if (!info) + return -EINVAL; jz4740 = devm_kzalloc(dev, sizeof(*jz4740), GFP_KERNEL); if (!jz4740) @@ -239,7 +246,7 @@ static int jz4740_pwm_probe(struct platform_device *pdev) jz4740->chip.dev = dev; jz4740->chip.ops = &jz4740_pwm_ops; - jz4740->chip.npwm = NUM_PWM; + jz4740->chip.npwm = info->num_pwms; jz4740->chip.base = -1; jz4740->chip.of_xlate = of_pwm_xlate_with_flags; jz4740->chip.of_pwm_n_cells = 3; @@ -256,9 +263,18 @@ static int jz4740_pwm_remove(struct platform_device *pdev) return pwmchip_remove(&jz4740->chip); } +static const struct soc_info __maybe_unused jz4740_soc_info = { + .num_pwms = 8, +}; + +static const struct soc_info __maybe_unused jz4725b_soc_info = { + .num_pwms = 6, +}; + #ifdef CONFIG_OF static const struct of_device_id jz4740_pwm_dt_ids[] = { - { .compatible = "ingenic,jz4740-pwm", }, + { .compatible = "ingenic,jz4740-pwm", .data = &jz4740_soc_info }, + { .compatible = "ingenic,jz4725b-pwm", .data = &jz4725b_soc_info }, {}, }; MODULE_DEVICE_TABLE(of, jz4740_pwm_dt_ids); -- GitLab From 1d7796bdb63a63c9cc8087ffc6daeb36a7c005e1 Mon Sep 17 00:00:00 2001 From: Sandipan Patra Date: Mon, 1 Jun 2020 10:50:36 +0530 Subject: [PATCH 1012/1971] pwm: tegra: Support dynamic clock frequency configuration Added support for dynamic clock freq configuration in PWM kernel driver. Earlier the PWM driver used to cache boot time clock rate by PWM clock parent during probe. Hence dynamically changing PWM frequency was not possible for all the possible ranges. With this change, dynamic calculation is enabled and it is able to set the requested period from sysfs knob provided the value is supported by clock source. Changes mainly have 2 parts: - Tegra186 and later chips [1] - Tegra210 and prior chips [2] For [1] - Changes implemented to set pwm period dynamically and also checks added to allow only if requested period(ns) is below or equals to higher range. For [2] - Only checks if the requested period(ns) is below or equals to higher range defined by max clock limit. The limitation in Tegra210 or prior chips are due to the reason of having only one PWM controller supporting multiple channels. But later chips have multiple PWM controller instances each having single channel support. Signed-off-by: Sandipan Patra Reviewed-by: Jon Hunter Signed-off-by: Thierry Reding --- drivers/pwm/pwm-tegra.c | 80 ++++++++++++++++++++++++++++++++++++++--- 1 file changed, 76 insertions(+), 4 deletions(-) diff --git a/drivers/pwm/pwm-tegra.c b/drivers/pwm/pwm-tegra.c index d26ed8f579ff0..1daf591025c00 100644 --- a/drivers/pwm/pwm-tegra.c +++ b/drivers/pwm/pwm-tegra.c @@ -4,8 +4,36 @@ * * Tegra pulse-width-modulation controller driver * - * Copyright (c) 2010, NVIDIA Corporation. + * Copyright (c) 2010-2020, NVIDIA Corporation. * Based on arch/arm/plat-mxc/pwm.c by Sascha Hauer + * + * Overview of Tegra Pulse Width Modulator Register: + * 1. 13-bit: Frequency division (SCALE) + * 2. 8-bit : Pulse division (DUTY) + * 3. 1-bit : Enable bit + * + * The PWM clock frequency is divided by 256 before subdividing it based + * on the programmable frequency division value to generate the required + * frequency for PWM output. The maximum output frequency that can be + * achieved is (max rate of source clock) / 256. + * e.g. if source clock rate is 408 MHz, maximum output frequency can be: + * 408 MHz/256 = 1.6 MHz. + * This 1.6 MHz frequency can further be divided using SCALE value in PWM. + * + * PWM pulse width: 8 bits are usable [23:16] for varying pulse width. + * To achieve 100% duty cycle, program Bit [24] of this register to + * 1’b1. In which case the other bits [23:16] are set to don't care. + * + * Limitations: + * - When PWM is disabled, the output is driven to inactive. + * - It does not allow the current PWM period to complete and + * stops abruptly. + * + * - If the register is reconfigured while PWM is running, + * it does not complete the currently running period. + * + * - If the user input duty is beyond acceptible limits, + * -EINVAL is returned. */ #include @@ -41,6 +69,7 @@ struct tegra_pwm_chip { struct reset_control*rst; unsigned long clk_rate; + unsigned long min_period_ns; void __iomem *regs; @@ -68,7 +97,7 @@ static int tegra_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm, { struct tegra_pwm_chip *pc = to_tegra_pwm_chip(chip); unsigned long long c = duty_ns, hz; - unsigned long rate; + unsigned long rate, required_clk_rate; u32 val = 0; int err; @@ -82,10 +111,48 @@ static int tegra_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm, val = (u32)c << PWM_DUTY_SHIFT; + /* + * min period = max clock limit >> PWM_DUTY_WIDTH + */ + if (period_ns < pc->min_period_ns) + return -EINVAL; + /* * Compute the prescaler value for which (1 << PWM_DUTY_WIDTH) * cycles at the PWM clock rate will take period_ns nanoseconds. + * + * num_channels: If single instance of PWM controller has multiple + * channels (e.g. Tegra210 or older) then it is not possible to + * configure separate clock rates to each of the channels, in such + * case the value stored during probe will be referred. + * + * If every PWM controller instance has one channel respectively, i.e. + * nums_channels == 1 then only the clock rate can be modified + * dynamically (e.g. Tegra186 or Tegra194). */ + if (pc->soc->num_channels == 1) { + /* + * Rate is multiplied with 2^PWM_DUTY_WIDTH so that it matches + * with the maximum possible rate that the controller can + * provide. Any further lower value can be derived by setting + * PFM bits[0:12]. + * + * required_clk_rate is a reference rate for source clock and + * it is derived based on user requested period. By setting the + * source clock rate as required_clk_rate, PWM controller will + * be able to configure the requested period. + */ + required_clk_rate = + (NSEC_PER_SEC / period_ns) << PWM_DUTY_WIDTH; + + err = clk_set_rate(pc->clk, required_clk_rate); + if (err < 0) + return -EINVAL; + + /* Store the new rate for further references */ + pc->clk_rate = clk_get_rate(pc->clk); + } + rate = pc->clk_rate >> PWM_DUTY_WIDTH; /* Consider precision in PWM_SCALE_WIDTH rate calculation */ @@ -94,7 +161,7 @@ static int tegra_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm, /* * Since the actual PWM divider is the register's frequency divider - * field minus 1, we need to decrement to get the correct value to + * field plus 1, we need to decrement to get the correct value to * write to the register. */ if (rate > 0) @@ -205,6 +272,10 @@ static int tegra_pwm_probe(struct platform_device *pdev) */ pwm->clk_rate = clk_get_rate(pwm->clk); + /* Set minimum limit of PWM period for the IP */ + pwm->min_period_ns = + (NSEC_PER_SEC / (pwm->soc->max_frequency >> PWM_DUTY_WIDTH)) + 1; + pwm->rst = devm_reset_control_get_exclusive(&pdev->dev, "pwm"); if (IS_ERR(pwm->rst)) { ret = PTR_ERR(pwm->rst); @@ -312,5 +383,6 @@ static struct platform_driver tegra_pwm_driver = { module_platform_driver(tegra_pwm_driver); MODULE_LICENSE("GPL"); -MODULE_AUTHOR("NVIDIA Corporation"); +MODULE_AUTHOR("Sandipan Patra "); +MODULE_DESCRIPTION("Tegra PWM controller driver"); MODULE_ALIAS("platform:tegra-pwm"); -- GitLab From ca162ce98110b98e7d97b7157328d34dcfdd40a9 Mon Sep 17 00:00:00 2001 From: Navid Emamdoost Date: Mon, 1 Jun 2020 02:11:16 -0500 Subject: [PATCH 1013/1971] pwm: img: Call pm_runtime_put() in pm_runtime_get_sync() failed case Even in failed case of pm_runtime_get_sync(), the usage_count is incremented. In order to keep the usage_count with correct value call appropriate pm_runtime_put(). Signed-off-by: Navid Emamdoost Signed-off-by: Thierry Reding --- drivers/pwm/pwm-img.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/drivers/pwm/pwm-img.c b/drivers/pwm/pwm-img.c index c9e57bd109fbf..599a0f66a3845 100644 --- a/drivers/pwm/pwm-img.c +++ b/drivers/pwm/pwm-img.c @@ -129,8 +129,10 @@ static int img_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm, duty = DIV_ROUND_UP(timebase * duty_ns, period_ns); ret = pm_runtime_get_sync(chip->dev); - if (ret < 0) + if (ret < 0) { + pm_runtime_put_autosuspend(chip->dev); return ret; + } val = img_pwm_readl(pwm_chip, PWM_CTRL_CFG); val &= ~(PWM_CTRL_CFG_DIV_MASK << PWM_CTRL_CFG_DIV_SHIFT(pwm->hwpwm)); @@ -331,8 +333,10 @@ static int img_pwm_remove(struct platform_device *pdev) int ret; ret = pm_runtime_get_sync(&pdev->dev); - if (ret < 0) + if (ret < 0) { + pm_runtime_put(&pdev->dev); return ret; + } for (i = 0; i < pwm_chip->chip.npwm; i++) { val = img_pwm_readl(pwm_chip, PWM_CTRL_CFG); -- GitLab From cad0f2960675261584c14e8f0026cd67c60f4864 Mon Sep 17 00:00:00 2001 From: Rasmus Villemoes Date: Thu, 19 Sep 2019 11:17:27 +0200 Subject: [PATCH 1014/1971] pwm: rockchip: Simplify rockchip_pwm_get_state() The way state->enabled is computed is rather convoluted and hard to read - both branches of the if() actually do the exact same thing. So remove the if(), and further simplify " ? true : false" to "". Signed-off-by: Rasmus Villemoes Signed-off-by: Thierry Reding --- drivers/pwm/pwm-rockchip.c | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) diff --git a/drivers/pwm/pwm-rockchip.c b/drivers/pwm/pwm-rockchip.c index 73352e6fbccbf..eb8c9cb645a6c 100644 --- a/drivers/pwm/pwm-rockchip.c +++ b/drivers/pwm/pwm-rockchip.c @@ -83,12 +83,7 @@ static void rockchip_pwm_get_state(struct pwm_chip *chip, state->duty_cycle = DIV_ROUND_CLOSEST_ULL(tmp, clk_rate); val = readl_relaxed(pc->base + pc->data->regs.ctrl); - if (pc->data->supports_polarity) - state->enabled = ((val & enable_conf) != enable_conf) ? - false : true; - else - state->enabled = ((val & enable_conf) == enable_conf) ? - true : false; + state->enabled = (val & enable_conf) == enable_conf; if (pc->data->supports_polarity && !(val & PWM_DUTY_POSITIVE)) state->polarity = PWM_POLARITY_INVERSED; -- GitLab From aef1a3799b5cb3ba4841f6034497b179646ccc70 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Uwe=20Kleine-K=C3=B6nig?= Date: Thu, 16 Apr 2020 10:02:45 +0200 Subject: [PATCH 1015/1971] pwm: imx27: Fix rounding behavior MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit To not trigger the warnings provided by CONFIG_PWM_DEBUG - use up-rounding in .get_state() - don't divide by the result of a division - don't use the rounded counter value for the period length to calculate the counter value for the duty cycle Signed-off-by: Uwe Kleine-König Signed-off-by: Thierry Reding --- drivers/pwm/pwm-imx27.c | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/drivers/pwm/pwm-imx27.c b/drivers/pwm/pwm-imx27.c index a6e40d4c485f3..732a6f3701e8e 100644 --- a/drivers/pwm/pwm-imx27.c +++ b/drivers/pwm/pwm-imx27.c @@ -150,13 +150,12 @@ static void pwm_imx27_get_state(struct pwm_chip *chip, prescaler = MX3_PWMCR_PRESCALER_GET(val); pwm_clk = clk_get_rate(imx->clk_per); - pwm_clk = DIV_ROUND_CLOSEST_ULL(pwm_clk, prescaler); val = readl(imx->mmio_base + MX3_PWMPR); period = val >= MX3_PWMPR_MAX ? MX3_PWMPR_MAX : val; /* PWMOUT (Hz) = PWMCLK / (PWMPR + 2) */ - tmp = NSEC_PER_SEC * (u64)(period + 2); - state->period = DIV_ROUND_CLOSEST_ULL(tmp, pwm_clk); + tmp = NSEC_PER_SEC * (u64)(period + 2) * prescaler; + state->period = DIV_ROUND_UP_ULL(tmp, pwm_clk); /* * PWMSAR can be read only if PWM is enabled. If the PWM is disabled, @@ -167,8 +166,8 @@ static void pwm_imx27_get_state(struct pwm_chip *chip, else val = imx->duty_cycle; - tmp = NSEC_PER_SEC * (u64)(val); - state->duty_cycle = DIV_ROUND_CLOSEST_ULL(tmp, pwm_clk); + tmp = NSEC_PER_SEC * (u64)(val) * prescaler; + state->duty_cycle = DIV_ROUND_UP_ULL(tmp, pwm_clk); pwm_imx27_clk_disable_unprepare(imx); } @@ -220,22 +219,23 @@ static int pwm_imx27_apply(struct pwm_chip *chip, struct pwm_device *pwm, struct pwm_imx27_chip *imx = to_pwm_imx27_chip(chip); struct pwm_state cstate; unsigned long long c; + unsigned long long clkrate; int ret; u32 cr; pwm_get_state(pwm, &cstate); - c = clk_get_rate(imx->clk_per); - c *= state->period; + clkrate = clk_get_rate(imx->clk_per); + c = clkrate * state->period; - do_div(c, 1000000000); + do_div(c, NSEC_PER_SEC); period_cycles = c; prescale = period_cycles / 0x10000 + 1; period_cycles /= prescale; - c = (unsigned long long)period_cycles * state->duty_cycle; - do_div(c, state->period); + c = clkrate * state->duty_cycle; + do_div(c, NSEC_PER_SEC * prescale); duty_cycles = c; /* -- GitLab From d741cb045cb5552d9e636e7e447f237d3b1667ad Mon Sep 17 00:00:00 2001 From: Thierry Reding Date: Tue, 2 Jun 2020 14:29:01 +0200 Subject: [PATCH 1016/1971] MAINTAINERS: Add Lee Jones as reviewer for the PWM subsystem Lee has kindly offered his help in sharing the patch review workload for the PWM subsystem. If this works out well between Lee, Uwe and myself it may be a good idea to maintain the subsystem as a group. Acked-by: Lee Jones Signed-off-by: Thierry Reding --- MAINTAINERS | 1 + 1 file changed, 1 insertion(+) diff --git a/MAINTAINERS b/MAINTAINERS index e64e5db314976..b8889bed5d1eb 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -13736,6 +13736,7 @@ F: drivers/media/rc/pwm-ir-tx.c PWM SUBSYSTEM M: Thierry Reding R: Uwe Kleine-König +M: Lee Jones L: linux-pwm@vger.kernel.org S: Maintained Q: https://patchwork.ozlabs.org/project/linux-pwm/list/ -- GitLab From cf1eb321d1cd134da0083f6430301a09f8012de1 Mon Sep 17 00:00:00 2001 From: Sebastian Reichel Date: Tue, 2 Jun 2020 16:36:24 +0200 Subject: [PATCH 1017/1971] Revert "power: supply: sbs-battery: add PEC support" This depends on the simplification of sbs_read_string_data, which breaks booting exynos5 based chromebooks. More investigation is required, so this patch and the simplification patch are reverted for this merge window. Note, that this is only a partial revert, since sbs_update_presence() has not been removed. It is also required for the charger broadcast disabling. This reverts commit 79bcd5a4a66076a8a8dacd7f4a3be1952283aef4. Signed-off-by: Sebastian Reichel --- drivers/power/supply/sbs-battery.c | 51 ------------------------------ 1 file changed, 51 deletions(-) diff --git a/drivers/power/supply/sbs-battery.c b/drivers/power/supply/sbs-battery.c index f4f73e669460a..00dfd9e9ac935 100644 --- a/drivers/power/supply/sbs-battery.c +++ b/drivers/power/supply/sbs-battery.c @@ -51,14 +51,6 @@ enum { REG_CHARGE_VOLTAGE, }; -#define REG_ADDR_SPEC_INFO 0x1A -#define SPEC_INFO_VERSION_MASK GENMASK(7, 4) -#define SPEC_INFO_VERSION_SHIFT 4 - -#define SBS_VERSION_1_0 1 -#define SBS_VERSION_1_1 2 -#define SBS_VERSION_1_1_WITH_PEC 3 - #define REG_ADDR_MANUFACTURE_DATE 0x1B /* Battery Mode defines */ @@ -232,57 +224,14 @@ exit: static int sbs_update_presence(struct sbs_info *chip, bool is_present) { - struct i2c_client *client = chip->client; - int retries = chip->i2c_retry_count; - s32 ret = 0; - u8 version; - if (chip->is_present == is_present) return 0; if (!is_present) { chip->is_present = false; - /* Disable PEC when no device is present */ - client->flags &= ~I2C_CLIENT_PEC; return 0; } - /* Check if device supports packet error checking and use it */ - while (retries > 0) { - ret = i2c_smbus_read_word_data(client, REG_ADDR_SPEC_INFO); - if (ret >= 0) - break; - - /* - * Some batteries trigger the detection pin before the - * I2C bus is properly connected. This works around the - * issue. - */ - msleep(100); - - retries--; - } - - if (ret < 0) { - dev_dbg(&client->dev, "failed to read spec info: %d\n", ret); - - /* fallback to old behaviour */ - client->flags &= ~I2C_CLIENT_PEC; - chip->is_present = true; - - return ret; - } - - version = (ret & SPEC_INFO_VERSION_MASK) >> SPEC_INFO_VERSION_SHIFT; - - if (version == SBS_VERSION_1_1_WITH_PEC) - client->flags |= I2C_CLIENT_PEC; - else - client->flags &= ~I2C_CLIENT_PEC; - - dev_dbg(&client->dev, "PEC: %s\n", (client->flags & I2C_CLIENT_PEC) ? - "enabled" : "disabled"); - if (!chip->is_present && is_present && !chip->charger_broadcasts) sbs_disable_charger_broadcasts(chip); -- GitLab From 972eabb97aab8e5d86c79f28a707e0d1e8a1d1a0 Mon Sep 17 00:00:00 2001 From: Sebastian Reichel Date: Tue, 2 Jun 2020 16:49:08 +0200 Subject: [PATCH 1018/1971] Revert "power: supply: sbs-battery: simplify read_read_string_data" The commit is a nice cleanup, but breaks booting on exynos5 based chromebooks. It's seems to come down to exynos5's i2c driver not implementing I2C_FUNC_SMBUS_READ_BLOCK_DATA. It's not yet clear why that breaks boot / massively slows it down when userspace starts, so revert the problematic patch. This reverts commit c4b12a2f3f3de670f6be5e96092a2cab0b877f1a. Signed-off-by: Sebastian Reichel --- drivers/power/supply/sbs-battery.c | 65 ++++++++++++++++++++++++------ 1 file changed, 53 insertions(+), 12 deletions(-) diff --git a/drivers/power/supply/sbs-battery.c b/drivers/power/supply/sbs-battery.c index 00dfd9e9ac935..83b9924033bd5 100644 --- a/drivers/power/supply/sbs-battery.c +++ b/drivers/power/supply/sbs-battery.c @@ -267,32 +267,66 @@ static int sbs_read_string_data(struct i2c_client *client, u8 address, char *values) { struct sbs_info *chip = i2c_get_clientdata(client); - int retries = chip->i2c_retry_count; - s32 ret = 0; + s32 ret = 0, block_length = 0; + int retries_length, retries_block; + u8 block_buffer[I2C_SMBUS_BLOCK_MAX + 1]; + retries_length = chip->i2c_retry_count; + retries_block = chip->i2c_retry_count; + + /* Adapter needs to support these two functions */ if (!i2c_check_functionality(client->adapter, - I2C_FUNC_SMBUS_READ_BLOCK_DATA)) { + I2C_FUNC_SMBUS_BYTE_DATA | + I2C_FUNC_SMBUS_I2C_BLOCK)){ return -ENODEV; } + /* Get the length of block data */ + while (retries_length > 0) { + ret = i2c_smbus_read_byte_data(client, address); + if (ret >= 0) + break; + retries_length--; + } + + if (ret < 0) { + dev_dbg(&client->dev, + "%s: i2c read at address 0x%x failed\n", + __func__, address); + return ret; + } + + /* block_length does not include NULL terminator */ + block_length = ret; + if (block_length > I2C_SMBUS_BLOCK_MAX) { + dev_err(&client->dev, + "%s: Returned block_length is longer than 0x%x\n", + __func__, I2C_SMBUS_BLOCK_MAX); + return -EINVAL; + } + /* Get the block data */ - while (retries > 0) { - ret = i2c_smbus_read_block_data(client, address, values); + while (retries_block > 0) { + ret = i2c_smbus_read_i2c_block_data( + client, address, + block_length + 1, block_buffer); if (ret >= 0) break; - retries--; + retries_block--; } if (ret < 0) { - dev_dbg(&client->dev, "%s: failed to read block 0x%x: %d\n", - __func__, address, ret); + dev_dbg(&client->dev, + "%s: i2c read at address 0x%x failed\n", + __func__, address); return ret; } - /* add string termination */ - values[ret] = '\0'; + /* block_buffer[0] == block_length */ + memcpy(values, block_buffer + 1, block_length); + values[block_length] = '\0'; - return 0; + return ret; } static int sbs_write_word_data(struct i2c_client *client, u8 address, @@ -514,7 +548,14 @@ static int sbs_get_battery_property(struct i2c_client *client, static int sbs_get_battery_string_property(struct i2c_client *client, int reg_offset, enum power_supply_property psp, char *val) { - return sbs_read_string_data(client, sbs_data[reg_offset].addr, val); + s32 ret; + + ret = sbs_read_string_data(client, sbs_data[reg_offset].addr, val); + + if (ret < 0) + return ret; + + return 0; } static void sbs_unit_adjustment(struct i2c_client *client, -- GitLab From d0676871fd52f575cc2bfec4faa2fcbc8af370e8 Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Thu, 28 May 2020 23:52:06 +0900 Subject: [PATCH 1019/1971] lib: Make prime number generator independently selectable Make prime number generator independently selectable from kconfig. This allows us to enable CONFIG_PRIME_NUMBERS=m and run the tools/testing/selftests/lib/prime_numbers.sh without other DRM selftest modules. Signed-off-by: Masami Hiramatsu Reviewed-by: Kees Cook Reviewed-by: Luis Chamberlain Signed-off-by: Shuah Khan --- lib/math/Kconfig | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/lib/math/Kconfig b/lib/math/Kconfig index 15bd50d92308d..f19bc9734fa7c 100644 --- a/lib/math/Kconfig +++ b/lib/math/Kconfig @@ -6,7 +6,12 @@ config CORDIC calculations are in fixed point. Module will be called cordic. config PRIME_NUMBERS - tristate + tristate "Simple prime number generator for testing" + help + This option provides a simple prime number generator for test + modules. + + If unsure, say N. config RATIONAL bool -- GitLab From 2f56f84511367ce5ffd4a1c88dcd770ef049ea96 Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Thu, 28 May 2020 23:52:16 +0900 Subject: [PATCH 1020/1971] lib: Make test_sysctl initialized as module test_sysctl.c is expected to be used as a module, but since it does not use module_init(), it never be registered as a module and not appeared under /sys/module/. In the result, the selftests/sysctl/sysctl.sh always fails to find the test module and is skipped. This makes test_sysctl.c initialized as a module by module_init() and allow sysctl.sh to find the test module is loaded. Signed-off-by: Masami Hiramatsu Reviewed-by: Luis Chamberlain Signed-off-by: Shuah Khan --- lib/test_sysctl.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/test_sysctl.c b/lib/test_sysctl.c index 566dad3f41960..ec4d0f03475d7 100644 --- a/lib/test_sysctl.c +++ b/lib/test_sysctl.c @@ -149,7 +149,7 @@ static int __init test_sysctl_init(void) } return 0; } -late_initcall(test_sysctl_init); +module_init(test_sysctl_init); static void __exit test_sysctl_exit(void) { -- GitLab From eee470e0739a9d8e29460f6d355cefa1c9a0384a Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Thu, 28 May 2020 23:52:26 +0900 Subject: [PATCH 1021/1971] selftests/sysctl: Fix to load test_sysctl module Fix to load test_sysctl.ko module correctly. sysctl.sh checks whether the test module is embedded (or loaded already) or not at first, and if not, it returns skip error instead of trying modprobe. Thus, there is no chance to load the test_sysctl test module. Instead, this removes that module embedded check and returns skip error only if it ensures that there is no embedded test module *and* no loadable test module. This also avoid referring config file since that is not installed. Signed-off-by: Masami Hiramatsu Reviewed-by: Kees Cook Reviewed-by: Luis Chamberlain Signed-off-by: Shuah Khan --- tools/testing/selftests/sysctl/sysctl.sh | 13 ++----------- 1 file changed, 2 insertions(+), 11 deletions(-) diff --git a/tools/testing/selftests/sysctl/sysctl.sh b/tools/testing/selftests/sysctl/sysctl.sh index 6a970b127c9b3..c3459f9f2429b 100755 --- a/tools/testing/selftests/sysctl/sysctl.sh +++ b/tools/testing/selftests/sysctl/sysctl.sh @@ -40,16 +40,6 @@ ALL_TESTS="$ALL_TESTS 0004:1:1:uint_0001" ALL_TESTS="$ALL_TESTS 0005:3:1:int_0003" ALL_TESTS="$ALL_TESTS 0006:50:1:bitmap_0001" -test_modprobe() -{ - if [ ! -d $DIR ]; then - echo "$0: $DIR not present" >&2 - echo "You must have the following enabled in your kernel:" >&2 - cat $TEST_DIR/config >&2 - exit $ksft_skip - fi -} - function allow_user_defaults() { if [ -z $DIR ]; then @@ -125,10 +115,12 @@ function load_req_mod() if [ ! -d $DIR ]; then if ! modprobe -q -n $TEST_DRIVER; then echo "$0: module $TEST_DRIVER not found [SKIP]" + echo "You must set CONFIG_TEST_SYSCTL=m in your kernel" >&2 exit $ksft_skip fi modprobe $TEST_DRIVER if [ $? -ne 0 ]; then + echo "$0: modprobe $TEST_DRIVER failed." exit fi fi @@ -929,7 +921,6 @@ test_reqs allow_user_defaults check_production_sysctl_writes_strict load_req_mod -test_modprobe trap "test_finish" EXIT -- GitLab From 382561a0f11c4995d48ab82670412f8d6c418430 Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Thu, 28 May 2020 23:52:37 +0900 Subject: [PATCH 1022/1971] selftests/sysctl: Make sysctl test driver as a module Fix config file to require CONFIG_TEST_SYSCTL=m instead of y because this driver introduces a test sysctl interfaces which are normally not used, and only used for the selftest. Signed-off-by: Masami Hiramatsu Reviewed-by: Kees Cook Reviewed-by: Luis Chamberlain Signed-off-by: Shuah Khan --- tools/testing/selftests/sysctl/config | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/testing/selftests/sysctl/config b/tools/testing/selftests/sysctl/config index 6ca14800d7553..fc263efd1fad5 100644 --- a/tools/testing/selftests/sysctl/config +++ b/tools/testing/selftests/sysctl/config @@ -1 +1 @@ -CONFIG_TEST_SYSCTL=y +CONFIG_TEST_SYSCTL=m -- GitLab From 522f6e6cba6880a038e2bd88e10390b84cd3febd Mon Sep 17 00:00:00 2001 From: Amir Goldstein Date: Sat, 23 May 2020 16:21:55 +0300 Subject: [PATCH 1023/1971] ovl: fix out of bounds access warning in ovl_check_fb_len() syzbot reported out of bounds memory access from open_by_handle_at() with a crafted file handle that looks like this: { .handle_bytes = 2, .handle_type = OVL_FILEID_V1 } handle_bytes gets rounded down to 0 and we end up calling: ovl_check_fh_len(fh, 0) => ovl_check_fb_len(fh + 3, -3) But fh buffer is only 2 bytes long, so accessing struct ovl_fb at fh + 3 is illegal. Fixes: cbe7fba8edfc ("ovl: make sure that real fid is 32bit aligned in memory") Reported-and-tested-by: syzbot+61958888b1c60361a791@syzkaller.appspotmail.com Cc: # v5.5 Signed-off-by: Amir Goldstein Signed-off-by: Miklos Szeredi --- fs/overlayfs/overlayfs.h | 3 +++ 1 file changed, 3 insertions(+) diff --git a/fs/overlayfs/overlayfs.h b/fs/overlayfs/overlayfs.h index 76747f5b05175..ffbb57b2d7f6b 100644 --- a/fs/overlayfs/overlayfs.h +++ b/fs/overlayfs/overlayfs.h @@ -355,6 +355,9 @@ int ovl_check_fb_len(struct ovl_fb *fb, int fb_len); static inline int ovl_check_fh_len(struct ovl_fh *fh, int fh_len) { + if (fh_len < sizeof(struct ovl_fh)) + return -EINVAL; + return ovl_check_fb_len(&fh->fb, fh_len - OVL_FH_WIRE_OFFSET); } -- GitLab From 59fb20138a9b5249a4176d5bbc5c670a97343061 Mon Sep 17 00:00:00 2001 From: Vivek Goyal Date: Mon, 1 Jun 2020 11:56:50 -0400 Subject: [PATCH 1024/1971] ovl: simplify setting of origin for index lookup overlayfs can keep index of copied up files and directories and it seems to serve two primary puroposes. For regular files, it avoids breaking lower hardlinks over copy up. For directories it seems to be used for various error checks. During ovl_lookup(), we lookup for index using lower dentry in many a cases. That lower dentry is called "origin" and following is a summary of current logic. If there is no upperdentry, always lookup for index using lower dentry. For regular files it helps avoiding breaking hard links over copyup and for directories it seems to be just error checks. If there is an upperdentry, then there are 3 possible cases. - For directories, lower dentry is found using two ways. One is regular path based lookup in lower layers and second is using ORIGIN xattr on upper dentry. First verify that path based lookup lower dentry matches the one pointed by upper ORIGIN xattr. If yes, use this verified origin for index lookup. - For regular files (non-metacopy), there is no path based lookup in lower layers as lookup stops once we find upper dentry. So there is no origin verification. If there is ORIGIN xattr present on upper, use that to lookup index otherwise don't. - For regular metacopy files, again lower dentry is found using path based lookup as well as ORIGIN xattr on upper. Path based lookup is continued in this case to find lower data dentry for metacopy upper. So like directories we only use verified origin. If ORIGIN xattr is not present (Either because lower did not support file handles or because this is hardlink copied up with index=off), then don't use path lookup based lower dentry as origin. This is same as regular non-metacopy file case. Suggested-by: Amir Goldstein Signed-off-by: Vivek Goyal Reviewed-by: Amir Goldstein Signed-off-by: Miklos Szeredi --- fs/overlayfs/namei.c | 29 +++++++++++++++++------------ 1 file changed, 17 insertions(+), 12 deletions(-) diff --git a/fs/overlayfs/namei.c b/fs/overlayfs/namei.c index 723d177447580..e6868110d9f1f 100644 --- a/fs/overlayfs/namei.c +++ b/fs/overlayfs/namei.c @@ -994,25 +994,30 @@ struct dentry *ovl_lookup(struct inode *dir, struct dentry *dentry, } stack = origin_path; ctr = 1; + origin = origin_path->dentry; origin_path = NULL; } /* - * Lookup index by lower inode and verify it matches upper inode. - * We only trust dir index if we verified that lower dir matches - * origin, otherwise dir index entries may be inconsistent and we - * ignore them. + * Always lookup index if there is no-upperdentry. * - * For non-dir upper metacopy dentry, we already set "origin" if we - * verified that lower matched upper origin. If upper origin was - * not present (because lower layer did not support fh encode/decode), - * or indexing is not enabled, do not set "origin" and skip looking up - * index. This case should be handled in same way as a non-dir upper - * without ORIGIN is handled. + * For the case of upperdentry, we have set origin by now if it + * needed to be set. There are basically three cases. + * + * For directories, lookup index by lower inode and verify it matches + * upper inode. We only trust dir index if we verified that lower dir + * matches origin, otherwise dir index entries may be inconsistent + * and we ignore them. + * + * For regular upper, we already set origin if upper had ORIGIN + * xattr. There is no verification though as there is no path + * based dentry lookup in lower in this case. + * + * For metacopy upper, we set a verified origin already if index + * is enabled and if upper had an ORIGIN xattr. * - * Always lookup index of non-dir non-metacopy and non-upper. */ - if (ctr && (!upperdentry || (!d.is_dir && !metacopy))) + if (!upperdentry && ctr) origin = stack[0].dentry; if (origin && ovl_indexdir(dentry->d_sb) && -- GitLab From 6815f479ca90ee7fd2e28b2a420f796b974155fe Mon Sep 17 00:00:00 2001 From: Vivek Goyal Date: Mon, 1 Jun 2020 11:56:51 -0400 Subject: [PATCH 1025/1971] ovl: use only uppermetacopy state in ovl_lookup() Currently we use a variable "metacopy" which signifies that dentry could be either uppermetacopy or lowermetacopy. Amir suggested that we can move code around and use d.metacopy in such a way that we don't need lowermetacopy and just can do away with uppermetacopy. So this patch replaces "metacopy" with "uppermetacopy". It also moves some code little higher to keep reading little simpler. Suggested-by: Amir Goldstein Signed-off-by: Vivek Goyal Reviewed-by: Amir Goldstein Signed-off-by: Miklos Szeredi --- fs/overlayfs/namei.c | 57 ++++++++++++++++++++++---------------------- 1 file changed, 28 insertions(+), 29 deletions(-) diff --git a/fs/overlayfs/namei.c b/fs/overlayfs/namei.c index e6868110d9f1f..59363c4f6ed40 100644 --- a/fs/overlayfs/namei.c +++ b/fs/overlayfs/namei.c @@ -812,7 +812,7 @@ struct dentry *ovl_lookup(struct inode *dir, struct dentry *dentry, struct dentry *this; unsigned int i; int err; - bool metacopy = false; + bool uppermetacopy = false; struct ovl_lookup_data d = { .sb = dentry->d_sb, .name = dentry->d_name, @@ -858,7 +858,7 @@ struct dentry *ovl_lookup(struct inode *dir, struct dentry *dentry, goto out_put_upper; if (d.metacopy) - metacopy = true; + uppermetacopy = true; } if (d.redirect) { @@ -895,6 +895,21 @@ struct dentry *ovl_lookup(struct inode *dir, struct dentry *dentry, if (!this) continue; + if ((uppermetacopy || d.metacopy) && !ofs->config.metacopy) { + err = -EPERM; + pr_warn_ratelimited("refusing to follow metacopy origin for (%pd2)\n", dentry); + goto out_put; + } + + /* + * Do not store intermediate metacopy dentries in chain, + * except top most lower metacopy dentry + */ + if (d.metacopy && ctr) { + dput(this); + continue; + } + /* * If no origin fh is stored in upper of a merge dir, store fh * of lower dir and set upper parent "impure". @@ -929,17 +944,6 @@ struct dentry *ovl_lookup(struct inode *dir, struct dentry *dentry, origin = this; } - if (d.metacopy) - metacopy = true; - /* - * Do not store intermediate metacopy dentries in chain, - * except top most lower metacopy dentry - */ - if (d.metacopy && ctr) { - dput(this); - continue; - } - stack[ctr].dentry = this; stack[ctr].layer = lower.layer; ctr++; @@ -971,22 +975,17 @@ struct dentry *ovl_lookup(struct inode *dir, struct dentry *dentry, } } - if (metacopy) { - /* - * Found a metacopy dentry but did not find corresponding - * data dentry - */ - if (d.metacopy) { - err = -EIO; - goto out_put; - } - - err = -EPERM; - if (!ofs->config.metacopy) { - pr_warn_ratelimited("refusing to follow metacopy origin for (%pd2)\n", - dentry); - goto out_put; - } + /* + * For regular non-metacopy upper dentries, there is no lower + * path based lookup, hence ctr will be zero. If a dentry is found + * using ORIGIN xattr on upper, install it in stack. + * + * For metacopy dentry, path based lookup will find lower dentries. + * Just make sure a corresponding data dentry has been found. + */ + if (d.metacopy || (uppermetacopy && !ctr)) { + err = -EIO; + goto out_put; } else if (!d.is_dir && upperdentry && !ctr && origin_path) { if (WARN_ON(stack != NULL)) { err = -EIO; -- GitLab From 28166ab3c875b8cbe19b6ad43e29257d1605e3b9 Mon Sep 17 00:00:00 2001 From: Vivek Goyal Date: Mon, 1 Jun 2020 11:56:52 -0400 Subject: [PATCH 1026/1971] ovl: initialize OVL_UPPERDATA in ovl_lookup() Currently ovl_get_inode() initializes OVL_UPPERDATA flag and for that it has to call ovl_check_metacopy_xattr() and check if metacopy xattr is present or not. yangerkun reported sometimes underlying filesystem might return -EIO and in that case error handling path does not cleanup properly leading to various warnings. Run generic/461 with ext4 upper/lower layer sometimes may trigger the bug as below(linux 4.19): [ 551.001349] overlayfs: failed to get metacopy (-5) [ 551.003464] overlayfs: failed to get inode (-5) [ 551.004243] overlayfs: cleanup of 'd44/fd51' failed (-5) [ 551.004941] overlayfs: failed to get origin (-5) [ 551.005199] ------------[ cut here ]------------ [ 551.006697] WARNING: CPU: 3 PID: 24674 at fs/inode.c:1528 iput+0x33b/0x400 ... [ 551.027219] Call Trace: [ 551.027623] ovl_create_object+0x13f/0x170 [ 551.028268] ovl_create+0x27/0x30 [ 551.028799] path_openat+0x1a35/0x1ea0 [ 551.029377] do_filp_open+0xad/0x160 [ 551.029944] ? vfs_writev+0xe9/0x170 [ 551.030499] ? page_counter_try_charge+0x77/0x120 [ 551.031245] ? __alloc_fd+0x160/0x2a0 [ 551.031832] ? do_sys_open+0x189/0x340 [ 551.032417] ? get_unused_fd_flags+0x34/0x40 [ 551.033081] do_sys_open+0x189/0x340 [ 551.033632] __x64_sys_creat+0x24/0x30 [ 551.034219] do_syscall_64+0xd5/0x430 [ 551.034800] entry_SYSCALL_64_after_hwframe+0x44/0xa9 One solution is to improve error handling and call iget_failed() if error is encountered. Amir thinks that this path is little intricate and there is not real need to check and initialize OVL_UPPERDATA in ovl_get_inode(). Instead caller of ovl_get_inode() can initialize this state. And this will avoid double checking of metacopy xattr lookup in ovl_lookup() and ovl_get_inode(). OVL_UPPERDATA is inode flag. So I was little concerned that initializing it outside ovl_get_inode() might have some races. But this is one way transition. That is once a file has been fully copied up, it can't go back to metacopy file again. And that seems to help avoid races. So as of now I can't see any races w.r.t OVL_UPPERDATA being set wrongly. So move settingof OVL_UPPERDATA inside the callers of ovl_get_inode(). ovl_obtain_alias() already does it. So only two callers now left are ovl_lookup() and ovl_instantiate(). Reported-by: yangerkun Suggested-by: Amir Goldstein Signed-off-by: Vivek Goyal Reviewed-by: Amir Goldstein Signed-off-by: Miklos Szeredi --- fs/overlayfs/dir.c | 2 ++ fs/overlayfs/inode.c | 11 +---------- fs/overlayfs/namei.c | 2 ++ 3 files changed, 5 insertions(+), 10 deletions(-) diff --git a/fs/overlayfs/dir.c b/fs/overlayfs/dir.c index 09faa63cf24dd..1bba4813f9cb0 100644 --- a/fs/overlayfs/dir.c +++ b/fs/overlayfs/dir.c @@ -286,6 +286,8 @@ static int ovl_instantiate(struct dentry *dentry, struct inode *inode, inode = ovl_get_inode(dentry->d_sb, &oip); if (IS_ERR(inode)) return PTR_ERR(inode); + if (inode == oip.newinode) + ovl_set_flag(OVL_UPPERDATA, inode); } else { WARN_ON(ovl_inode_real(inode) != d_inode(newdentry)); dput(newdentry); diff --git a/fs/overlayfs/inode.c b/fs/overlayfs/inode.c index 981f11ec51bc6..f2aaf00821c00 100644 --- a/fs/overlayfs/inode.c +++ b/fs/overlayfs/inode.c @@ -957,7 +957,7 @@ struct inode *ovl_get_inode(struct super_block *sb, bool bylower = ovl_hash_bylower(sb, upperdentry, lowerdentry, oip->index); int fsid = bylower ? lowerpath->layer->fsid : 0; - bool is_dir, metacopy = false; + bool is_dir; unsigned long ino = 0; int err = oip->newinode ? -EEXIST : -ENOMEM; @@ -1018,15 +1018,6 @@ struct inode *ovl_get_inode(struct super_block *sb, if (oip->index) ovl_set_flag(OVL_INDEX, inode); - if (upperdentry) { - err = ovl_check_metacopy_xattr(upperdentry); - if (err < 0) - goto out_err; - metacopy = err; - if (!metacopy) - ovl_set_flag(OVL_UPPERDATA, inode); - } - OVL_I(inode)->redirect = oip->redirect; if (bylower) diff --git a/fs/overlayfs/namei.c b/fs/overlayfs/namei.c index 59363c4f6ed40..1687a770ff395 100644 --- a/fs/overlayfs/namei.c +++ b/fs/overlayfs/namei.c @@ -1067,6 +1067,8 @@ struct dentry *ovl_lookup(struct inode *dir, struct dentry *dentry, err = PTR_ERR(inode); if (IS_ERR(inode)) goto out_free_oe; + if (upperdentry && !uppermetacopy) + ovl_set_flag(OVL_UPPERDATA, inode); } ovl_dentry_update_reval(dentry, upperdentry, -- GitLab From 21d8d66abffb0c7834c1b09b8b043ea6a66d6089 Mon Sep 17 00:00:00 2001 From: Vivek Goyal Date: Tue, 2 Jun 2020 11:23:38 -0400 Subject: [PATCH 1027/1971] ovl: fix redirect traversal on metacopy dentries Amir pointed me to metacopy test cases in unionmount-testsuite and I decided to run "./run --ov=10 --meta" and it failed while running test "rename-mass-5.py". Problem is w.r.t absolute redirect traversal on intermediate metacopy dentry. We do not store intermediate metacopy dentries and also skip current loop/layer and move onto lookup in next layer. But at the end of loop, we have logic to reset "poe" and layer index if currnently looked up dentry has absolute redirect. We skip all that and that means lookup in next layer will fail. Following is simple test case to reproduce this. - mkdir -p lower upper work merged lower/a lower/b - touch lower/a/foo.txt - mount -t overlay -o lowerdir=lower,upperdir=upper,workdir=work,metacopy=on none merged # Following will create absolute redirect "/a/foo.txt" on upper/b/bar.txt. - mv merged/a/foo.txt merged/b/bar.txt # unmount overlay and use upper as lower layer (lower2) for next mount. - umount merged - mv upper lower2 - rm -rf work; mkdir -p upper work - mount -t overlay -o lowerdir=lower2:lower,upperdir=upper,workdir=work,metacopy=on none merged # Force a metacopy copy-up - chown bin:bin merged/b/bar.txt # unmount overlay and use upper as lower layer (lower3) for next mount. - umount merged - mv upper lower3 - rm -rf work; mkdir -p upper work - mount -t overlay -o lowerdir=lower3:lower2:lower,upperdir=upper,workdir=work,metacopy=on none merged # ls merged/b/bar.txt ls: cannot access 'bar.txt': Input/output error Intermediate lower layer (lower2) has metacopy dentry b/bar.txt with absolute redirect "/a/foo.txt". We skipped redirect processing at the end of loop which sets poe to roe and sets the appropriate next lower layer index. And that means lookup failed in next layer. Fix this by continuing the loop for any intermediate dentries. We still do not save these at lower stack. With this fix applied unionmount-testsuite, "./run --ov-10 --meta" now passes. Signed-off-by: Vivek Goyal Reviewed-by: Amir Goldstein Signed-off-by: Miklos Szeredi --- fs/overlayfs/namei.c | 26 ++++++++++++++------------ 1 file changed, 14 insertions(+), 12 deletions(-) diff --git a/fs/overlayfs/namei.c b/fs/overlayfs/namei.c index 1687a770ff395..f9640d27ddab6 100644 --- a/fs/overlayfs/namei.c +++ b/fs/overlayfs/namei.c @@ -901,15 +901,6 @@ struct dentry *ovl_lookup(struct inode *dir, struct dentry *dentry, goto out_put; } - /* - * Do not store intermediate metacopy dentries in chain, - * except top most lower metacopy dentry - */ - if (d.metacopy && ctr) { - dput(this); - continue; - } - /* * If no origin fh is stored in upper of a merge dir, store fh * of lower dir and set upper parent "impure". @@ -944,9 +935,20 @@ struct dentry *ovl_lookup(struct inode *dir, struct dentry *dentry, origin = this; } - stack[ctr].dentry = this; - stack[ctr].layer = lower.layer; - ctr++; + if (d.metacopy && ctr) { + /* + * Do not store intermediate metacopy dentries in + * lower chain, except top most lower metacopy dentry. + * Continue the loop so that if there is an absolute + * redirect on this dentry, poe can be reset to roe. + */ + dput(this); + this = NULL; + } else { + stack[ctr].dentry = this; + stack[ctr].layer = lower.layer; + ctr++; + } /* * Following redirects can have security consequences: it's like -- GitLab From 130fdbc3d1f9966dd4230709c30f3768bccd3065 Mon Sep 17 00:00:00 2001 From: Miklos Szeredi Date: Tue, 2 Jun 2020 22:20:25 +0200 Subject: [PATCH 1028/1971] ovl: pass correct flags for opening real directory The three instances of ovl_path_open() in overlayfs/readdir.c do three different things: - pass f_flags from overlay file - pass O_RDONLY | O_DIRECTORY - pass just O_RDONLY The value of f_flags can be (other than O_RDONLY): O_WRONLY - not possible for a directory O_RDWR - not possible for a directory O_CREAT - masked out by dentry_open() O_EXCL - masked out by dentry_open() O_NOCTTY - masked out by dentry_open() O_TRUNC - masked out by dentry_open() O_APPEND - no effect on directory ops O_NDELAY - no effect on directory ops O_NONBLOCK - no effect on directory ops __O_SYNC - no effect on directory ops O_DSYNC - no effect on directory ops FASYNC - no effect on directory ops O_DIRECT - no effect on directory ops O_LARGEFILE - ? O_DIRECTORY - only affects lookup O_NOFOLLOW - only affects lookup O_NOATIME - overlay sets this unconditionally in ovl_path_open() O_CLOEXEC - only affects fd allocation O_PATH - no effect on directory ops __O_TMPFILE - not possible for a directory Fon non-merge directories we use the underlying filesystem's iterate; in this case honor O_LARGEFILE from the original file to make sure that open doesn't get rejected. For merge directories it's safe to pass O_LARGEFILE unconditionally since userspace will only see the artificial offsets created by overlayfs. Signed-off-by: Miklos Szeredi --- fs/overlayfs/readdir.c | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/fs/overlayfs/readdir.c b/fs/overlayfs/readdir.c index a10b21b562ad3..3bc97065cc707 100644 --- a/fs/overlayfs/readdir.c +++ b/fs/overlayfs/readdir.c @@ -297,7 +297,7 @@ static inline int ovl_dir_read(struct path *realpath, struct file *realfile; int err; - realfile = ovl_path_open(realpath, O_RDONLY | O_DIRECTORY); + realfile = ovl_path_open(realpath, O_RDONLY | O_LARGEFILE); if (IS_ERR(realfile)) return PTR_ERR(realfile); @@ -831,6 +831,12 @@ out_unlock: return res; } +static struct file *ovl_dir_open_realfile(struct file *file, + struct path *realpath) +{ + return ovl_path_open(realpath, O_RDONLY | (file->f_flags & O_LARGEFILE)); +} + static int ovl_dir_fsync(struct file *file, loff_t start, loff_t end, int datasync) { @@ -853,7 +859,7 @@ static int ovl_dir_fsync(struct file *file, loff_t start, loff_t end, struct path upperpath; ovl_path_upper(dentry, &upperpath); - realfile = ovl_path_open(&upperpath, O_RDONLY); + realfile = ovl_dir_open_realfile(file, &upperpath); inode_lock(inode); if (!od->upperfile) { @@ -904,7 +910,7 @@ static int ovl_dir_open(struct inode *inode, struct file *file) return -ENOMEM; type = ovl_path_real(file->f_path.dentry, &realpath); - realfile = ovl_path_open(&realpath, file->f_flags); + realfile = ovl_dir_open_realfile(file, &realpath); if (IS_ERR(realfile)) { kfree(od); return PTR_ERR(realfile); -- GitLab From 48bd024b8a40d73ad6b086de2615738da0c7004f Mon Sep 17 00:00:00 2001 From: Miklos Szeredi Date: Tue, 2 Jun 2020 22:20:25 +0200 Subject: [PATCH 1029/1971] ovl: switch to mounter creds in readdir In preparation for more permission checking, override credentials for directory operations on the underlying filesystems. Signed-off-by: Miklos Szeredi --- fs/overlayfs/readdir.c | 27 +++++++++++++++++++++------ 1 file changed, 21 insertions(+), 6 deletions(-) diff --git a/fs/overlayfs/readdir.c b/fs/overlayfs/readdir.c index 3bc97065cc707..1621460417bed 100644 --- a/fs/overlayfs/readdir.c +++ b/fs/overlayfs/readdir.c @@ -743,8 +743,10 @@ static int ovl_iterate(struct file *file, struct dir_context *ctx) struct ovl_dir_file *od = file->private_data; struct dentry *dentry = file->f_path.dentry; struct ovl_cache_entry *p; + const struct cred *old_cred; int err; + old_cred = ovl_override_creds(dentry->d_sb); if (!ctx->pos) ovl_dir_reset(file); @@ -758,17 +760,20 @@ static int ovl_iterate(struct file *file, struct dir_context *ctx) (ovl_same_fs(dentry->d_sb) && (ovl_is_impure_dir(file) || OVL_TYPE_MERGE(ovl_path_type(dentry->d_parent))))) { - return ovl_iterate_real(file, ctx); + err = ovl_iterate_real(file, ctx); + } else { + err = iterate_dir(od->realfile, ctx); } - return iterate_dir(od->realfile, ctx); + goto out; } if (!od->cache) { struct ovl_dir_cache *cache; cache = ovl_cache_get(dentry); + err = PTR_ERR(cache); if (IS_ERR(cache)) - return PTR_ERR(cache); + goto out; od->cache = cache; ovl_seek_cursor(od, ctx->pos); @@ -780,7 +785,7 @@ static int ovl_iterate(struct file *file, struct dir_context *ctx) if (!p->ino) { err = ovl_cache_update_ino(&file->f_path, p); if (err) - return err; + goto out; } if (!dir_emit(ctx, p->name, p->len, p->ino, p->type)) break; @@ -788,7 +793,10 @@ static int ovl_iterate(struct file *file, struct dir_context *ctx) od->cursor = p->l_node.next; ctx->pos++; } - return 0; + err = 0; +out: + revert_creds(old_cred); + return err; } static loff_t ovl_dir_llseek(struct file *file, loff_t offset, int origin) @@ -834,7 +842,14 @@ out_unlock: static struct file *ovl_dir_open_realfile(struct file *file, struct path *realpath) { - return ovl_path_open(realpath, O_RDONLY | (file->f_flags & O_LARGEFILE)); + struct file *res; + const struct cred *old_cred; + + old_cred = ovl_override_creds(file_inode(file)->i_sb); + res = ovl_path_open(realpath, O_RDONLY | (file->f_flags & O_LARGEFILE)); + revert_creds(old_cred); + + return res; } static int ovl_dir_fsync(struct file *file, loff_t start, loff_t end, -- GitLab From 56230d956739b9cb1cbde439d76227d77979a04d Mon Sep 17 00:00:00 2001 From: Miklos Szeredi Date: Tue, 2 Jun 2020 22:20:26 +0200 Subject: [PATCH 1030/1971] ovl: verify permissions in ovl_path_open() Check permission before opening a real file. ovl_path_open() is used by readdir and copy-up routines. ovl_permission() theoretically already checked copy up permissions, but it doesn't hurt to re-do these checks during the actual copy-up. For directory reading ovl_permission() only checks access to topmost underlying layer. Readdir on a merged directory accesses layers below the topmost one as well. Permission wasn't checked for these layers. Note: modifying ovl_permission() to perform this check would be far more complex and hence more bug prone. The result is less precise permissions returned in access(2). If this turns out to be an issue, we can revisit this bug. Signed-off-by: Miklos Szeredi --- fs/overlayfs/util.c | 27 ++++++++++++++++++++++++++- 1 file changed, 26 insertions(+), 1 deletion(-) diff --git a/fs/overlayfs/util.c b/fs/overlayfs/util.c index 01755bc186c91..33a7c60be3eda 100644 --- a/fs/overlayfs/util.c +++ b/fs/overlayfs/util.c @@ -459,7 +459,32 @@ bool ovl_is_whiteout(struct dentry *dentry) struct file *ovl_path_open(struct path *path, int flags) { - return dentry_open(path, flags | O_NOATIME, current_cred()); + struct inode *inode = d_inode(path->dentry); + int err, acc_mode; + + if (flags & ~(O_ACCMODE | O_LARGEFILE)) + BUG(); + + switch (flags & O_ACCMODE) { + case O_RDONLY: + acc_mode = MAY_READ; + break; + case O_WRONLY: + acc_mode = MAY_WRITE; + break; + default: + BUG(); + } + + err = inode_permission(inode, acc_mode | MAY_OPEN); + if (err) + return ERR_PTR(err); + + /* O_NOATIME is an optimization, don't fail if not permitted */ + if (inode_owner_or_capable(inode)) + flags |= O_NOATIME; + + return dentry_open(path, flags, current_cred()); } /* Caller should hold ovl_inode->lock */ -- GitLab From 38bccfbeb0af039e59eb75fe6d9b2a83cda3d381 Mon Sep 17 00:00:00 2001 From: Zach van Rijn Date: Wed, 1 Apr 2020 16:30:48 -0500 Subject: [PATCH 1031/1971] um: Add include: memset() and memcpy() are in These two functions are otherwise unknown to the pedantic compiler. Include the correct header to enable the build to succeed. Signed-off-by: Zach van Rijn Acked-By: Anton Ivanov Signed-off-by: Richard Weinberger --- arch/um/os-Linux/file.c | 1 + 1 file changed, 1 insertion(+) diff --git a/arch/um/os-Linux/file.c b/arch/um/os-Linux/file.c index 26ecbd64c4094..044836ad73927 100644 --- a/arch/um/os-Linux/file.c +++ b/arch/um/os-Linux/file.c @@ -6,6 +6,7 @@ #include #include #include +#include #include #include #include -- GitLab From bc8f8e4e6e7a648e8c6357307d614be8fdcfdf2a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marc-Andr=C3=A9=20Lureau?= Date: Tue, 7 Apr 2020 22:28:53 +0200 Subject: [PATCH 1032/1971] um: Add a generic "fd" vector transport MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Learn to take a pre-opened file-descriptor for vector IO. Instead of teaching the driver to open a FD in multiple ways, it can rely on management layer to do it on its behalf. For example, this allows inheriting a preconfigured device fd or a simple socketpair() setup, without further arguments, privileges or system access by UML. Signed-off-by: Marc-André Lureau Acked-By: Anton Ivanov Signed-off-by: Richard Weinberger --- arch/um/drivers/vector_user.c | 59 +++++++++++++++++++++++++++++++++++ 1 file changed, 59 insertions(+) diff --git a/arch/um/drivers/vector_user.c b/arch/um/drivers/vector_user.c index aa28e9eecb7ba..c4a0f26b28248 100644 --- a/arch/um/drivers/vector_user.c +++ b/arch/um/drivers/vector_user.c @@ -29,6 +29,7 @@ #include #include #include +#include #include #include "vector_user.h" @@ -42,6 +43,9 @@ #define TRANS_RAW "raw" #define TRANS_RAW_LEN strlen(TRANS_RAW) +#define TRANS_FD "fd" +#define TRANS_FD_LEN strlen(TRANS_FD) + #define VNET_HDR_FAIL "could not enable vnet headers on fd %d" #define TUN_GET_F_FAIL "tapraw: TUNGETFEATURES failed: %s" #define L2TPV3_BIND_FAIL "l2tpv3_open : could not bind socket err=%i" @@ -347,6 +351,59 @@ unix_cleanup: return NULL; } +static int strtofd(const char *nptr) +{ + long fd; + char *endptr; + + if (nptr == NULL) + return -1; + + errno = 0; + fd = strtol(nptr, &endptr, 10); + if (nptr == endptr || + errno != 0 || + *endptr != '\0' || + fd < 0 || + fd > INT_MAX) { + return -1; + } + return fd; +} + +static struct vector_fds *user_init_fd_fds(struct arglist *ifspec) +{ + int fd = -1; + char *fdarg = NULL; + struct vector_fds *result = NULL; + + fdarg = uml_vector_fetch_arg(ifspec, "fd"); + fd = strtofd(fdarg); + if (fd == -1) { + printk(UM_KERN_ERR "fd open: bad or missing fd argument"); + goto fd_cleanup; + } + + result = uml_kmalloc(sizeof(struct vector_fds), UM_GFP_KERNEL); + if (result == NULL) { + printk(UM_KERN_ERR "fd open: allocation failed"); + goto fd_cleanup; + } + + result->rx_fd = fd; + result->tx_fd = fd; + result->remote_addr_size = 0; + result->remote_addr = NULL; + return result; + +fd_cleanup: + if (fd >= 0) + os_close_file(fd); + if (result != NULL) + kfree(result); + return NULL; +} + static struct vector_fds *user_init_raw_fds(struct arglist *ifspec) { int rxfd = -1, txfd = -1; @@ -578,6 +635,8 @@ struct vector_fds *uml_vector_user_open( return user_init_socket_fds(parsed, ID_L2TPV3); if (strncmp(transport, TRANS_BESS, TRANS_BESS_LEN) == 0) return user_init_unix_fds(parsed, ID_BESS); + if (strncmp(transport, TRANS_FD, TRANS_FD_LEN) == 0) + return user_init_fd_fds(parsed); return NULL; } -- GitLab From 4c5a770580054c1d107ba204a42df1db221670bd Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Sat, 11 Apr 2020 09:28:08 -0700 Subject: [PATCH 1033/1971] um: Neaten vu_err macro definition Defining a macro with ... and __VA_ARGS__ (without ##) can cause compilation errors if a macro use does not have additional args. Add ## to __VA_ARGS__ in the macro definition. Signed-off-by: Joe Perches Signed-off-by: Richard Weinberger --- arch/um/drivers/virtio_uml.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/um/drivers/virtio_uml.c b/arch/um/drivers/virtio_uml.c index be54d368e73dd..351aee52aca63 100644 --- a/arch/um/drivers/virtio_uml.c +++ b/arch/um/drivers/virtio_uml.c @@ -74,7 +74,7 @@ struct virtio_uml_vq_info { extern unsigned long long physmem_size, highmem; -#define vu_err(vu_dev, ...) dev_err(&(vu_dev)->pdev->dev, __VA_ARGS__) +#define vu_err(vu_dev, ...) dev_err(&(vu_dev)->pdev->dev, ##__VA_ARGS__) /* Vhost-user protocol */ -- GitLab From 0b86ce29cfc273501d019b18f31cee7837c8fd2a Mon Sep 17 00:00:00 2001 From: Masahiro Yamada Date: Sat, 18 Apr 2020 03:04:55 +0900 Subject: [PATCH 1034/1971] um: Do not evaluate compiler's library path when cleaning Since commit a83e4ca26af8 ("kbuild: remove cc-option switch from -Wframe-larger-than="), 'make ARCH=um clean' emits an error message as follows: $ make ARCH=um clean gcc: error: missing argument to '-Wframe-larger-than=' We do not care compiler flags when cleaning. Use the '=' operator for lazy expansion because we do not use LDFLAGS_pcap.o or LDFLAGS_vde.o when cleaning. While I was here, I removed the redundant -r option because it already exists in the recipe. Fixes: a83e4ca26af8 ("kbuild: remove cc-option switch from -Wframe-larger-than=") Signed-off-by: Masahiro Yamada Reviewed-by: Nathan Chancellor Tested-by: Nathan Chancellor [build] Signed-off-by: Richard Weinberger --- arch/um/drivers/Makefile | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/um/drivers/Makefile b/arch/um/drivers/Makefile index a290821e355c2..2a249f6194671 100644 --- a/arch/um/drivers/Makefile +++ b/arch/um/drivers/Makefile @@ -18,9 +18,9 @@ ubd-objs := ubd_kern.o ubd_user.o port-objs := port_kern.o port_user.o harddog-objs := harddog_kern.o harddog_user.o -LDFLAGS_pcap.o := -r $(shell $(CC) $(KBUILD_CFLAGS) -print-file-name=libpcap.a) +LDFLAGS_pcap.o = $(shell $(CC) $(KBUILD_CFLAGS) -print-file-name=libpcap.a) -LDFLAGS_vde.o := -r $(shell $(CC) $(CFLAGS) -print-file-name=libvdeplug.a) +LDFLAGS_vde.o = $(shell $(CC) $(CFLAGS) -print-file-name=libvdeplug.a) targets := pcap_kern.o pcap_user.o vde_kern.o vde_user.o -- GitLab From 54ebe4060fe6c4ab76c79354417612ac9cf4f95e Mon Sep 17 00:00:00 2001 From: Anton Ivanov Date: Wed, 22 Apr 2020 17:00:01 +0100 Subject: [PATCH 1035/1971] um: Use fdatasync() when mapping the UBD FSYNC command We do not need to update the metadata (atime, mtime, etc) on the UBD file and/or the COW file until UML exits. UBD image mtime is checked in UML only when opening the files. After that they are locked and used exclusively by a single UML instance, so there is no point wasting resources on updating metadata on every sync. We can sync data only. The host will always update mtime if a file has been modified upon closing it. Signed-off-by: Anton Ivanov Signed-off-by: Richard Weinberger --- arch/um/os-Linux/file.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/um/os-Linux/file.c b/arch/um/os-Linux/file.c index 044836ad73927..e4421dbc4c36c 100644 --- a/arch/um/os-Linux/file.c +++ b/arch/um/os-Linux/file.c @@ -290,7 +290,7 @@ int os_write_file(int fd, const void *buf, int len) int os_sync_file(int fd) { - int n = fsync(fd); + int n = fdatasync(fd); if (n < 0) return -errno; -- GitLab From f6e8c474390be2e6f5bd0b8966e19958214609ff Mon Sep 17 00:00:00 2001 From: "Gustavo A. R. Silva" Date: Thu, 7 May 2020 14:26:52 -0500 Subject: [PATCH 1036/1971] um: virtio: Replace zero-length array with flexible-array The current codebase makes use of the zero-length array language extension to the C90 standard, but the preferred mechanism to declare variable-length types such as these ones is a flexible array member[1][2], introduced in C99: struct foo { int stuff; struct boo array[]; }; By making use of the mechanism above, we will get a compiler warning in case the flexible array does not occur last in the structure, which will help us prevent some kind of undefined behavior bugs from being inadvertently introduced[3] to the codebase from now on. Also, notice that, dynamic memory allocations won't be affected by this change: "Flexible array members have incomplete type, and so the sizeof operator may not be applied. As a quirk of the original implementation of zero-length arrays, sizeof evaluates to zero."[1] sizeof(flexible-array-member) triggers a warning because flexible array members have incomplete type[1]. There are some instances of code in which the sizeof operator is being incorrectly/erroneously applied to zero-length arrays and the result is zero. Such instances may be hiding some bugs. So, this work (flexible-array member conversions) will also help to get completely rid of those sorts of issues. This issue was found with the help of Coccinelle. [1] https://gcc.gnu.org/onlinedocs/gcc/Zero-Length.html [2] https://github.com/KSPP/linux/issues/21 [3] commit 76497732932f ("cxgb3/l2t: Fix undefined behaviour") Signed-off-by: Gustavo A. R. Silva Signed-off-by: Richard Weinberger --- arch/um/drivers/vector_kern.h | 2 +- arch/um/drivers/vhost_user.h | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/um/drivers/vector_kern.h b/arch/um/drivers/vector_kern.h index d0159082faf05..8fff93a75a927 100644 --- a/arch/um/drivers/vector_kern.h +++ b/arch/um/drivers/vector_kern.h @@ -129,7 +129,7 @@ struct vector_private { struct vector_estats estats; struct sock_fprog *bpf; - char user[0]; + char user[]; }; extern int build_transport_data(struct vector_private *vp); diff --git a/arch/um/drivers/vhost_user.h b/arch/um/drivers/vhost_user.h index 6c71b60051774..6f147cd3c9f76 100644 --- a/arch/um/drivers/vhost_user.h +++ b/arch/um/drivers/vhost_user.h @@ -78,7 +78,7 @@ struct vhost_user_config { u32 offset; u32 size; u32 flags; - u8 payload[0]; /* Variable length */ + u8 payload[]; /* Variable length */ } __packed; struct vhost_user_vring_state { -- GitLab From 4b68bf9a69d22dd512d61d5f0ba01b065b01ede6 Mon Sep 17 00:00:00 2001 From: Arne Edholm Date: Mon, 13 Jan 2020 15:56:22 +0100 Subject: [PATCH 1037/1971] ubi: Select fastmap anchor PEBs considering wear level rules There is a risk that the fastmap anchor PEB is alternating between just two PEBs, the current anchor and the previous anchor that was just deleted. As the fastmap pools gets the first take on free PEBs, the pools may leave no free PEBs to be selected as the new anchor, resulting in the two PEBs alternating behaviour. If the anchor PEBs gets a high erase count the PEBs will not be used by the pools but remain in ubi->free, even more increasing the likelihood they will be used as anchors. Getting stuck using only a couple of PEBs continuously will result in an uneven wear, eventually leading to failure. To fix this: - Choose the fastmap anchor when the most free PEBs are available. This is during rebuilding of the fastmap pools, after the unused pool PEBs are added to ubi->free but before the pools are populated again from the free PEBs. Also reserve an additional second best PEB as a candidate for the next time the fast map anchor is updated. If a better PEB is found the next time the fast map anchor is updated, the candidate is made available for building the pools. - Enable anchor move within the anchor area again as it is useful for distributing wear. - The anchor candidate for the next fastmap update is the most suited free PEB. Check this PEB's erase count during wear leveling. If the wear leveling limit is exceeded, the PEB is considered unsuitable for now. As all other non used anchor area PEBs should be even worse, free up the used anchor area PEB with the lowest erase count. Signed-off-by: Arne Edholm Signed-off-by: Richard Weinberger --- drivers/mtd/ubi/fastmap-wl.c | 39 ++++++++++++++++++++++-------------- drivers/mtd/ubi/fastmap.c | 11 ++++++++++ drivers/mtd/ubi/ubi.h | 4 +++- drivers/mtd/ubi/wl.c | 28 +++++++++++++++++--------- 4 files changed, 57 insertions(+), 25 deletions(-) diff --git a/drivers/mtd/ubi/fastmap-wl.c b/drivers/mtd/ubi/fastmap-wl.c index b486250923c5a..83afc00e365a5 100644 --- a/drivers/mtd/ubi/fastmap-wl.c +++ b/drivers/mtd/ubi/fastmap-wl.c @@ -116,6 +116,21 @@ void ubi_refill_pools(struct ubi_device *ubi) wl_pool->size = 0; pool->size = 0; + if (ubi->fm_anchor) { + wl_tree_add(ubi->fm_anchor, &ubi->free); + ubi->free_count++; + } + if (ubi->fm_next_anchor) { + wl_tree_add(ubi->fm_next_anchor, &ubi->free); + ubi->free_count++; + } + + /* All available PEBs are in ubi->free, now is the time to get + * the best anchor PEBs. + */ + ubi->fm_anchor = ubi_wl_get_fm_peb(ubi, 1); + ubi->fm_next_anchor = ubi_wl_get_fm_peb(ubi, 1); + for (;;) { enough = 0; if (pool->size < pool->max_size) { @@ -271,26 +286,20 @@ static struct ubi_wl_entry *get_peb_for_wl(struct ubi_device *ubi) int ubi_ensure_anchor_pebs(struct ubi_device *ubi) { struct ubi_work *wrk; - struct ubi_wl_entry *anchor; spin_lock(&ubi->wl_lock); - /* Do we already have an anchor? */ - if (ubi->fm_anchor) { - spin_unlock(&ubi->wl_lock); - return 0; - } - - /* See if we can find an anchor PEB on the list of free PEBs */ - anchor = ubi_wl_get_fm_peb(ubi, 1); - if (anchor) { - ubi->fm_anchor = anchor; - spin_unlock(&ubi->wl_lock); - return 0; + /* Do we have a next anchor? */ + if (!ubi->fm_next_anchor) { + ubi->fm_next_anchor = ubi_wl_get_fm_peb(ubi, 1); + if (!ubi->fm_next_anchor) + /* Tell wear leveling to produce a new anchor PEB */ + ubi->fm_do_produce_anchor = 1; } - /* No luck, trigger wear leveling to produce a new anchor PEB */ - ubi->fm_do_produce_anchor = 1; + /* Do wear leveling to get a new anchor PEB or check the + * existing next anchor candidate. + */ if (ubi->wl_scheduled) { spin_unlock(&ubi->wl_lock); return 0; diff --git a/drivers/mtd/ubi/fastmap.c b/drivers/mtd/ubi/fastmap.c index 53f448e7433a9..022af59906aa9 100644 --- a/drivers/mtd/ubi/fastmap.c +++ b/drivers/mtd/ubi/fastmap.c @@ -1220,6 +1220,17 @@ static int ubi_write_fastmap(struct ubi_device *ubi, fm_pos += sizeof(*fec); ubi_assert(fm_pos <= ubi->fm_size); } + if (ubi->fm_next_anchor) { + fec = (struct ubi_fm_ec *)(fm_raw + fm_pos); + + fec->pnum = cpu_to_be32(ubi->fm_next_anchor->pnum); + set_seen(ubi, ubi->fm_next_anchor->pnum, seen_pebs); + fec->ec = cpu_to_be32(ubi->fm_next_anchor->ec); + + free_peb_count++; + fm_pos += sizeof(*fec); + ubi_assert(fm_pos <= ubi->fm_size); + } fmh->free_peb_count = cpu_to_be32(free_peb_count); ubi_for_each_used_peb(ubi, wl_e, tmp_rb) { diff --git a/drivers/mtd/ubi/ubi.h b/drivers/mtd/ubi/ubi.h index 73c67e5c08f85..2fd84f5efaade 100644 --- a/drivers/mtd/ubi/ubi.h +++ b/drivers/mtd/ubi/ubi.h @@ -491,7 +491,8 @@ struct ubi_debug_info { * @fm_work: fastmap work queue * @fm_work_scheduled: non-zero if fastmap work was scheduled * @fast_attach: non-zero if UBI was attached by fastmap - * @fm_anchor: The next anchor PEB to use for fastmap + * @fm_anchor: The new anchor PEB used during fastmap update + * @fm_next_anchor: An anchor PEB candidate for the next time fastmap is updated * @fm_do_produce_anchor: If true produce an anchor PEB in wl * * @used: RB-tree of used physical eraseblocks @@ -602,6 +603,7 @@ struct ubi_device { int fm_work_scheduled; int fast_attach; struct ubi_wl_entry *fm_anchor; + struct ubi_wl_entry *fm_next_anchor; int fm_do_produce_anchor; /* Wear-leveling sub-system's stuff */ diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c index 5146cce5fe321..27636063ed1bb 100644 --- a/drivers/mtd/ubi/wl.c +++ b/drivers/mtd/ubi/wl.c @@ -687,20 +687,27 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk, } #ifdef CONFIG_MTD_UBI_FASTMAP + e1 = find_anchor_wl_entry(&ubi->used); + if (e1 && ubi->fm_next_anchor && + (ubi->fm_next_anchor->ec - e1->ec >= UBI_WL_THRESHOLD)) { + ubi->fm_do_produce_anchor = 1; + /* fm_next_anchor is no longer considered a good anchor + * candidate. + * NULL assignment also prevents multiple wear level checks + * of this PEB. + */ + wl_tree_add(ubi->fm_next_anchor, &ubi->free); + ubi->fm_next_anchor = NULL; + ubi->free_count++; + } + if (ubi->fm_do_produce_anchor) { - e1 = find_anchor_wl_entry(&ubi->used); if (!e1) goto out_cancel; e2 = get_peb_for_wl(ubi); if (!e2) goto out_cancel; - /* - * Anchor move within the anchor area is useless. - */ - if (e2->pnum < UBI_FM_MAX_START) - goto out_cancel; - self_check_in_wl_tree(ubi, e1, &ubi->used); rb_erase(&e1->u.rb, &ubi->used); dbg_wl("anchor-move PEB %d to PEB %d", e1->pnum, e2->pnum); @@ -1079,8 +1086,11 @@ static int __erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk) if (!err) { spin_lock(&ubi->wl_lock); - if (!ubi->fm_anchor && e->pnum < UBI_FM_MAX_START) { - ubi->fm_anchor = e; + if (!ubi->fm_next_anchor && e->pnum < UBI_FM_MAX_START) { + /* Abort anchor production, if needed it will be + * enabled again in the wear leveling started below. + */ + ubi->fm_next_anchor = e; ubi->fm_do_produce_anchor = 0; } else { wl_tree_add(e, &ubi->free); -- GitLab From a75ca9303175d36af93c0937dd9b1a6422908b8d Mon Sep 17 00:00:00 2001 From: yu kuai Date: Mon, 1 Jun 2020 20:38:56 +0800 Subject: [PATCH 1038/1971] block/bio-integrity: don't free 'buf' if bio_integrity_add_page() failed commit e7bf90e5afe3 ("block/bio-integrity: fix a memory leak bug") added a kfree() for 'buf' if bio_integrity_add_page() returns '0'. However, the object will be freed in bio_integrity_free() since 'bio->bi_opf' and 'bio->bi_integrity' were set previousy in bio_integrity_alloc(). Fixes: commit e7bf90e5afe3 ("block/bio-integrity: fix a memory leak bug") Signed-off-by: yu kuai Reviewed-by: Ming Lei Reviewed-by: Bob Liu Acked-by: Martin K. Petersen Signed-off-by: Jens Axboe --- block/bio-integrity.c | 1 - 1 file changed, 1 deletion(-) diff --git a/block/bio-integrity.c b/block/bio-integrity.c index 3579ac0f6ec1f..23632a33ed394 100644 --- a/block/bio-integrity.c +++ b/block/bio-integrity.c @@ -281,7 +281,6 @@ bool bio_integrity_prep(struct bio *bio) if (ret == 0) { printk(KERN_ERR "could not attach integrity payload\n"); - kfree(buf); status = BLK_STS_RESOURCE; goto err_end_io; } -- GitLab From fd2206e4e97b5bae422d9f2f9ebbc79bc97e44a5 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Tue, 2 Jun 2020 16:40:47 -0600 Subject: [PATCH 1039/1971] io_uring: disallow close of ring itself A previous commit enabled this functionality, which also enabled O_PATH to work correctly with io_uring. But we can't safely close the ring itself, as the file handle isn't reference counted inside io_uring_enter(). Instead of jumping through hoops to enable ring closure, add a "soft" ->needs_file option, ->needs_file_no_error. This enables O_PATH file descriptors to work, but still catches the case of trying to close the ring itself. Reported-by: Jann Horn Fixes: 904fbcb115c8 ("io_uring: remove 'fd is io_uring' from close path") Signed-off-by: Jens Axboe --- fs/io_uring.c | 25 +++++++++++++++++-------- 1 file changed, 17 insertions(+), 8 deletions(-) diff --git a/fs/io_uring.c b/fs/io_uring.c index 9d4bd0d3a0807..417b7105c6dc8 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -698,6 +698,8 @@ struct io_op_def { unsigned needs_mm : 1; /* needs req->file assigned */ unsigned needs_file : 1; + /* don't fail if file grab fails */ + unsigned needs_file_no_error : 1; /* hash wq insertion if file is a regular file */ unsigned hash_reg_file : 1; /* unbound wq insertion if file is a non-regular file */ @@ -804,6 +806,8 @@ static const struct io_op_def io_op_defs[] = { .needs_fs = 1, }, [IORING_OP_CLOSE] = { + .needs_file = 1, + .needs_file_no_error = 1, .file_table = 1, }, [IORING_OP_FILES_UPDATE] = { @@ -3421,6 +3425,10 @@ static int io_close_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) return -EBADF; req->close.fd = READ_ONCE(sqe->fd); + if ((req->file && req->file->f_op == &io_uring_fops) || + req->close.fd == req->ctx->ring_fd) + return -EBADF; + return 0; } @@ -5438,19 +5446,20 @@ static int io_file_get(struct io_submit_state *state, struct io_kiocb *req, return -EBADF; fd = array_index_nospec(fd, ctx->nr_user_files); file = io_file_from_index(ctx, fd); - if (!file) - return -EBADF; - req->fixed_file_refs = ctx->file_data->cur_refs; - percpu_ref_get(req->fixed_file_refs); + if (file) { + req->fixed_file_refs = ctx->file_data->cur_refs; + percpu_ref_get(req->fixed_file_refs); + } } else { trace_io_uring_file_get(ctx, fd); file = __io_file_get(state, fd); - if (unlikely(!file)) - return -EBADF; } - *out_file = file; - return 0; + if (file || io_op_defs[req->opcode].needs_file_no_error) { + *out_file = file; + return 0; + } + return -EBADF; } static int io_req_set_file(struct io_submit_state *state, struct io_kiocb *req, -- GitLab From 8c4e0f212398cdd1eb4310a5981d06a723cdd24f Mon Sep 17 00:00:00 2001 From: Bodo Stroesser Date: Thu, 28 May 2020 21:31:08 +0200 Subject: [PATCH 1040/1971] scsi: target: tcmu: Fix size in calls to tcmu_flush_dcache_range 1) If remaining ring space before the end of the ring is smaller then the next cmd to write, tcmu writes a padding entry which fills the remaining space at the end of the ring. Then tcmu calls tcmu_flush_dcache_range() with the size of struct tcmu_cmd_entry as data length to flush. If the space filled by the padding was smaller then tcmu_cmd_entry, tcmu_flush_dcache_range() is called for an address range reaching behind the end of the vmalloc'ed ring. tcmu_flush_dcache_range() in a loop calls flush_dcache_page(virt_to_page(start)); for every page being part of the range. On x86 the line is optimized out by the compiler, as flush_dcache_page() is empty on x86. But I assume the above can cause trouble on other architectures that really have a flush_dcache_page(). For paddings only the header part of an entry is relevant due to alignment rules the header always fits in the remaining space, if padding is needed. So tcmu_flush_dcache_range() can safely be called with sizeof(entry->hdr) as the length here. 2) After it has written a command to cmd ring, tcmu calls tcmu_flush_dcache_range() using the size of a struct tcmu_cmd_entry as data length to flush. But if a command needs many iovecs, the real size of the command may be bigger then tcmu_cmd_entry, so a part of the written command is not flushed then. Link: https://lore.kernel.org/r/20200528193108.9085-1-bstroesser@ts.fujitsu.com Acked-by: Mike Christie Signed-off-by: Bodo Stroesser Signed-off-by: Martin K. Petersen --- drivers/target/target_core_user.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c index 0e281d30d81ef..09ce2d1a8f337 100644 --- a/drivers/target/target_core_user.c +++ b/drivers/target/target_core_user.c @@ -1007,7 +1007,7 @@ static int queue_cmd_ring(struct tcmu_cmd *tcmu_cmd, sense_reason_t *scsi_err) entry->hdr.cmd_id = 0; /* not used for PAD */ entry->hdr.kflags = 0; entry->hdr.uflags = 0; - tcmu_flush_dcache_range(entry, sizeof(*entry)); + tcmu_flush_dcache_range(entry, sizeof(entry->hdr)); UPDATE_HEAD(mb->cmd_head, pad_size, udev->cmdr_size); tcmu_flush_dcache_range(mb, sizeof(*mb)); @@ -1072,7 +1072,7 @@ static int queue_cmd_ring(struct tcmu_cmd *tcmu_cmd, sense_reason_t *scsi_err) cdb_off = CMDR_OFF + cmd_head + base_command_size; memcpy((void *) mb + cdb_off, se_cmd->t_task_cdb, scsi_command_size(se_cmd->t_task_cdb)); entry->req.cdb_off = cdb_off; - tcmu_flush_dcache_range(entry, sizeof(*entry)); + tcmu_flush_dcache_range(entry, command_size); UPDATE_HEAD(mb->cmd_head, command_size, udev->cmdr_size); tcmu_flush_dcache_range(mb, sizeof(*mb)); -- GitLab From 0267ffce562c8bbf9b57ebe0e38445ad04972890 Mon Sep 17 00:00:00 2001 From: Qiushi Wu Date: Thu, 28 May 2020 15:13:53 -0500 Subject: [PATCH 1041/1971] scsi: iscsi: Fix reference count leak in iscsi_boot_create_kobj kobject_init_and_add() takes reference even when it fails. If this function returns an error, kobject_put() must be called to properly clean up the memory associated with the object. Link: https://lore.kernel.org/r/20200528201353.14849-1-wu000273@umn.edu Reviewed-by: Lee Duncan Signed-off-by: Qiushi Wu Signed-off-by: Martin K. Petersen --- drivers/scsi/iscsi_boot_sysfs.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/scsi/iscsi_boot_sysfs.c b/drivers/scsi/iscsi_boot_sysfs.c index e4857b7280338..a64abe38db2d4 100644 --- a/drivers/scsi/iscsi_boot_sysfs.c +++ b/drivers/scsi/iscsi_boot_sysfs.c @@ -352,7 +352,7 @@ iscsi_boot_create_kobj(struct iscsi_boot_kset *boot_kset, boot_kobj->kobj.kset = boot_kset->kset; if (kobject_init_and_add(&boot_kobj->kobj, &iscsi_boot_ktype, NULL, name, index)) { - kfree(boot_kobj); + kobject_put(&boot_kobj->kobj); return NULL; } boot_kobj->data = data; -- GitLab From 61e6ba03ea26f0205e535862009ff6ffdbf4de0c Mon Sep 17 00:00:00 2001 From: Suganath Prabu S Date: Thu, 28 May 2020 10:56:17 -0400 Subject: [PATCH 1042/1971] scsi: mpt3sas: Fix memset() in non-RDPQ mode Fix memset() accessing out of range address when reply_queue count is less than RDPQ_MAX_INDEX_IN_ONE_CHUNK (i.e. 16) in non-RDPQ mode. In non-RDPQ mode, the driver allocates a single contiguous pool of size reply_queue's count * reqly_post_free_sz. But the driver is always memsetting this pool with size 16 * reqly_post_free_sz. If reply queue count is less than 16 (i.e. when MSI-X vectors enabled < 16), the driver is accessing out of range address and this results in 'BUG: unable to handle kernel paging request at fff0x...x' bug. Make driver use dma_pool_zalloc() API to allocate and zero the pool. Link: https://lore.kernel.org/r/20200528145617.27252-1-suganath-prabu.subramani@broadcom.com Fixes: 8012209eb26b ("scsi: mpt3sas: Handle RDPQ DMA allocation in same 4G region") Signed-off-by: Suganath Prabu S Signed-off-by: Martin K. Petersen --- drivers/scsi/mpt3sas/mpt3sas_base.c | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c index beaea1933f5c4..96b78fdc6b8a9 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_base.c +++ b/drivers/scsi/mpt3sas/mpt3sas_base.c @@ -4984,7 +4984,7 @@ base_alloc_rdpq_dma_pool(struct MPT3SAS_ADAPTER *ioc, int sz) for (i = 0; i < count; i++) { if ((i % RDPQ_MAX_INDEX_IN_ONE_CHUNK == 0) && dma_alloc_count) { ioc->reply_post[i].reply_post_free = - dma_pool_alloc(ioc->reply_post_free_dma_pool, + dma_pool_zalloc(ioc->reply_post_free_dma_pool, GFP_KERNEL, &ioc->reply_post[i].reply_post_free_dma); if (!ioc->reply_post[i].reply_post_free) @@ -5008,9 +5008,6 @@ base_alloc_rdpq_dma_pool(struct MPT3SAS_ADAPTER *ioc, int sz) ioc->reply_post[i].reply_post_free_dma)); return -EAGAIN; } - memset(ioc->reply_post[i].reply_post_free, 0, - RDPQ_MAX_INDEX_IN_ONE_CHUNK * - reply_post_free_sz); dma_alloc_count--; } else { -- GitLab From 89523cb8a67cea2642deb19042a15fdd8c4138d4 Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Wed, 27 May 2020 12:52:42 +0100 Subject: [PATCH 1043/1971] scsi: qedf: Remove redundant initialization of variable rc The variable rc is being initialized with a value that is never read and it is being updated later with a new value. The initialization is redundant and can be removed. Link: https://lore.kernel.org/r/20200527115242.172344-1-colin.king@canonical.com Addresses-Coverity: ("Unused value") Signed-off-by: Colin Ian King Signed-off-by: Martin K. Petersen --- drivers/scsi/qedf/qedf_fip.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/scsi/qedf/qedf_fip.c b/drivers/scsi/qedf/qedf_fip.c index bb82f0875eca4..ad6a56ce72a86 100644 --- a/drivers/scsi/qedf/qedf_fip.c +++ b/drivers/scsi/qedf/qedf_fip.c @@ -20,7 +20,7 @@ void qedf_fcoe_send_vlan_req(struct qedf_ctx *qedf) #define MY_FIP_ALL_FCF_MACS ((__u8[6]) { 1, 0x10, 0x18, 1, 0, 2 }) static u8 my_fcoe_all_fcfs[ETH_ALEN] = MY_FIP_ALL_FCF_MACS; unsigned long flags = 0; - int rc = -1; + int rc; skb = dev_alloc_skb(sizeof(struct fip_vlan)); if (!skb) { -- GitLab From be32acff43800c87dc5c707f5d47cc607b76b653 Mon Sep 17 00:00:00 2001 From: Can Guo Date: Wed, 27 May 2020 19:24:42 -0700 Subject: [PATCH 1044/1971] scsi: ufs: Don't update urgent bkops level when toggling auto bkops Urgent bkops level is used to compare against actual bkops status read from UFS device. Urgent bkops level is set during initialization and might be updated in exception event handler during runtime. But it should not be updated to the actual bkops status every time when auto bkops is toggled. Otherwise, if urgent bkops level is updated to 0, auto bkops shall always be kept enabled. Link: https://lore.kernel.org/r/1590632686-17866-1-git-send-email-cang@codeaurora.org Fixes: 24366c2afbb0 ("scsi: ufs: Recheck bkops level if bkops is disabled") Reviewed-by: Stanley Chu Signed-off-by: Can Guo Signed-off-by: Martin K. Petersen --- drivers/scsi/ufs/ufshcd.c | 1 - 1 file changed, 1 deletion(-) diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c index 5db18f444ea9b..2bdee9edec0d8 100644 --- a/drivers/scsi/ufs/ufshcd.c +++ b/drivers/scsi/ufs/ufshcd.c @@ -5123,7 +5123,6 @@ static int ufshcd_bkops_ctrl(struct ufs_hba *hba, err = ufshcd_enable_auto_bkops(hba); else err = ufshcd_disable_auto_bkops(hba); - hba->urgent_bkops_lvl = curr_status; out: return err; } -- GitLab From 7b6668d8b806b013cfe084aeb72c7eebb578cb50 Mon Sep 17 00:00:00 2001 From: Stanley Chu Date: Sat, 30 May 2020 22:12:00 +0800 Subject: [PATCH 1045/1971] scsi: ufs: Remove redundant urgent_bkop_lvl initialization In ufshcd_probe_hba(), all BKOP SW tracking variables can be reset together in ufshcd_force_reset_auto_bkops(), thus urgent_bkop_lvl initialization in the beginning of ufshcd_probe_hba() can be merged into ufshcd_force_reset_auto_bkops(). Link: https://lore.kernel.org/r/20200530141200.4616-1-stanley.chu@mediatek.com Reviewed-by: Avri Altman Signed-off-by: Stanley Chu Signed-off-by: Martin K. Petersen --- drivers/scsi/ufs/ufshcd.c | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c index 2bdee9edec0d8..ad4fc829cbb2f 100644 --- a/drivers/scsi/ufs/ufshcd.c +++ b/drivers/scsi/ufs/ufshcd.c @@ -5076,6 +5076,7 @@ static void ufshcd_force_reset_auto_bkops(struct ufs_hba *hba) hba->ee_ctrl_mask &= ~MASK_EE_URGENT_BKOPS; ufshcd_disable_auto_bkops(hba); } + hba->urgent_bkops_lvl = BKOPS_STATUS_PERF_IMPACT; hba->is_urgent_bkops_lvl_checked = false; } @@ -7372,10 +7373,6 @@ static int ufshcd_probe_hba(struct ufs_hba *hba, bool async) if (ret) goto out; - /* set the default level for urgent bkops */ - hba->urgent_bkops_lvl = BKOPS_STATUS_PERF_IMPACT; - hba->is_urgent_bkops_lvl_checked = false; - /* Debug counters initialization */ ufshcd_clear_dbg_ufs_stats(hba); -- GitLab From 292f902a40c11f043a5ca1305a114da0e523eaa3 Mon Sep 17 00:00:00 2001 From: Miklos Szeredi Date: Tue, 2 Jun 2020 22:20:26 +0200 Subject: [PATCH 1046/1971] ovl: call secutiry hook in ovl_real_ioctl() Verify LSM permissions for underlying file, since vfs_ioctl() doesn't do it. [Stephen Rothwell] export security_file_ioctl Signed-off-by: Miklos Szeredi --- fs/overlayfs/file.c | 5 ++++- security/security.c | 1 + 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/fs/overlayfs/file.c b/fs/overlayfs/file.c index 87c362f65448b..1860e220c82d3 100644 --- a/fs/overlayfs/file.c +++ b/fs/overlayfs/file.c @@ -10,6 +10,7 @@ #include #include #include +#include #include #include #include "overlayfs.h" @@ -520,7 +521,9 @@ static long ovl_real_ioctl(struct file *file, unsigned int cmd, return ret; old_cred = ovl_override_creds(file_inode(file)->i_sb); - ret = vfs_ioctl(real.file, cmd, arg); + ret = security_file_ioctl(real.file, cmd, arg); + if (!ret) + ret = vfs_ioctl(real.file, cmd, arg); revert_creds(old_cred); fdput(real); diff --git a/security/security.c b/security/security.c index 7fed24b9d57e6..a67414105130b 100644 --- a/security/security.c +++ b/security/security.c @@ -1459,6 +1459,7 @@ int security_file_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { return call_int_hook(file_ioctl, 0, file, cmd, arg); } +EXPORT_SYMBOL_GPL(security_file_ioctl); static inline unsigned long mmap_prot(struct file *file, unsigned long prot) { -- GitLab From 05acefb4872dae89e772729efb194af754c877e8 Mon Sep 17 00:00:00 2001 From: Miklos Szeredi Date: Tue, 2 Jun 2020 22:20:26 +0200 Subject: [PATCH 1047/1971] ovl: check permission to open real file Call inode_permission() on real inode before opening regular file on one of the underlying layers. In some cases ovl_permission() already checks access to an underlying file, but it misses the metacopy case, and possibly other ones as well. Removing the redundant permission check from ovl_permission() should be considered later. Signed-off-by: Miklos Szeredi --- fs/overlayfs/file.c | 16 ++++++++++++++-- 1 file changed, 14 insertions(+), 2 deletions(-) diff --git a/fs/overlayfs/file.c b/fs/overlayfs/file.c index 1860e220c82d3..0f83c8dfec461 100644 --- a/fs/overlayfs/file.c +++ b/fs/overlayfs/file.c @@ -40,10 +40,22 @@ static struct file *ovl_open_realfile(const struct file *file, struct file *realfile; const struct cred *old_cred; int flags = file->f_flags | O_NOATIME | FMODE_NONOTIFY; + int acc_mode = ACC_MODE(flags); + int err; + + if (flags & O_APPEND) + acc_mode |= MAY_APPEND; old_cred = ovl_override_creds(inode->i_sb); - realfile = open_with_fake_path(&file->f_path, flags, realinode, - current_cred()); + err = inode_permission(realinode, MAY_OPEN | acc_mode); + if (err) { + realfile = ERR_PTR(err); + } else if (!inode_owner_or_capable(realinode)) { + realfile = ERR_PTR(-EPERM); + } else { + realfile = open_with_fake_path(&file->f_path, flags, realinode, + current_cred()); + } revert_creds(old_cred); pr_debug("open(%p[%pD2/%c], 0%o) -> (%p, 0%o)\n", -- GitLab From 1434a65ea625c51317ccdf06dabf4bd27d20fa10 Mon Sep 17 00:00:00 2001 From: Chengguang Xu Date: Tue, 26 May 2020 09:35:57 +0800 Subject: [PATCH 1048/1971] ovl: drop negative dentry in upper layer Negative dentries of upper layer are useless after construction of overlayfs' own dentry and may keep in the memory long time even after unmount of overlayfs instance. This patch tries to drop unnecessary negative dentry of upper layer to effectively reclaim memory. Signed-off-by: Chengguang Xu Signed-off-by: Miklos Szeredi --- fs/overlayfs/namei.c | 35 ++++++++++++++++++++++++++++------- 1 file changed, 28 insertions(+), 7 deletions(-) diff --git a/fs/overlayfs/namei.c b/fs/overlayfs/namei.c index f9640d27ddab6..5dc19dcea5a4e 100644 --- a/fs/overlayfs/namei.c +++ b/fs/overlayfs/namei.c @@ -191,16 +191,36 @@ static bool ovl_is_opaquedir(struct dentry *dentry) return ovl_check_dir_xattr(dentry, OVL_XATTR_OPAQUE); } +static struct dentry *ovl_lookup_positive_unlocked(const char *name, + struct dentry *base, int len, + bool drop_negative) +{ + struct dentry *ret = lookup_one_len_unlocked(name, base, len); + + if (!IS_ERR(ret) && d_flags_negative(smp_load_acquire(&ret->d_flags))) { + if (drop_negative && ret->d_lockref.count == 1) { + spin_lock(&ret->d_lock); + /* Recheck condition under lock */ + if (d_is_negative(ret) && ret->d_lockref.count == 1) + __d_drop(ret); + spin_unlock(&ret->d_lock); + } + dput(ret); + ret = ERR_PTR(-ENOENT); + } + return ret; +} + static int ovl_lookup_single(struct dentry *base, struct ovl_lookup_data *d, const char *name, unsigned int namelen, size_t prelen, const char *post, - struct dentry **ret) + struct dentry **ret, bool drop_negative) { struct dentry *this; int err; bool last_element = !post[0]; - this = lookup_positive_unlocked(name, base, namelen); + this = ovl_lookup_positive_unlocked(name, base, namelen, drop_negative); if (IS_ERR(this)) { err = PTR_ERR(this); this = NULL; @@ -276,7 +296,7 @@ out_err: } static int ovl_lookup_layer(struct dentry *base, struct ovl_lookup_data *d, - struct dentry **ret) + struct dentry **ret, bool drop_negative) { /* Counting down from the end, since the prefix can change */ size_t rem = d->name.len - 1; @@ -285,7 +305,7 @@ static int ovl_lookup_layer(struct dentry *base, struct ovl_lookup_data *d, if (d->name.name[0] != '/') return ovl_lookup_single(base, d, d->name.name, d->name.len, - 0, "", ret); + 0, "", ret, drop_negative); while (!IS_ERR_OR_NULL(base) && d_can_lookup(base)) { const char *s = d->name.name + d->name.len - rem; @@ -298,7 +318,8 @@ static int ovl_lookup_layer(struct dentry *base, struct ovl_lookup_data *d, return -EIO; err = ovl_lookup_single(base, d, s, thislen, - d->name.len - rem, next, &base); + d->name.len - rem, next, &base, + drop_negative); dput(dentry); if (err) return err; @@ -830,7 +851,7 @@ struct dentry *ovl_lookup(struct inode *dir, struct dentry *dentry, old_cred = ovl_override_creds(dentry->d_sb); upperdir = ovl_dentry_upper(dentry->d_parent); if (upperdir) { - err = ovl_lookup_layer(upperdir, &d, &upperdentry); + err = ovl_lookup_layer(upperdir, &d, &upperdentry, true); if (err) goto out; @@ -888,7 +909,7 @@ struct dentry *ovl_lookup(struct inode *dir, struct dentry *dentry, else d.last = lower.layer->idx == roe->numlower; - err = ovl_lookup_layer(lower.dentry, &d, &this); + err = ovl_lookup_layer(lower.dentry, &d, &this, false); if (err) goto out_put; -- GitLab From 968d81a64a883af2d16dd3f8a6ad6b67db2fde58 Mon Sep 17 00:00:00 2001 From: Jeykumar Sankaran Date: Tue, 2 Jun 2020 20:37:31 -0700 Subject: [PATCH 1049/1971] drm/connector: notify userspace on hotplug after register complete drm connector notifies userspace on hotplug event prematurely before late_register and mode_object register completes. This leads to a race between userspace and kernel on updating the IDR list. So, move the notification to end of connector register. Signed-off-by: Jeykumar Sankaran Signed-off-by: Steve Cohen Cc: stable@vger.kernel.org Signed-off-by: Daniel Vetter Link: https://patchwork.freedesktop.org/patch/msgid/1591155451-10393-1-git-send-email-jsanka@codeaurora.org --- drivers/gpu/drm/drm_connector.c | 5 +++++ drivers/gpu/drm/drm_sysfs.c | 3 --- 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/drm_connector.c b/drivers/gpu/drm/drm_connector.c index b1099e1251a26..d877ddc6dc575 100644 --- a/drivers/gpu/drm/drm_connector.c +++ b/drivers/gpu/drm/drm_connector.c @@ -27,6 +27,7 @@ #include #include #include +#include #include @@ -523,6 +524,10 @@ int drm_connector_register(struct drm_connector *connector) drm_mode_object_register(connector->dev, &connector->base); connector->registration_state = DRM_CONNECTOR_REGISTERED; + + /* Let userspace know we have a new connector */ + drm_sysfs_hotplug_event(connector->dev); + goto unlock; err_debugfs: diff --git a/drivers/gpu/drm/drm_sysfs.c b/drivers/gpu/drm/drm_sysfs.c index 939f0032aab18..f0336c8046392 100644 --- a/drivers/gpu/drm/drm_sysfs.c +++ b/drivers/gpu/drm/drm_sysfs.c @@ -291,9 +291,6 @@ int drm_sysfs_connector_add(struct drm_connector *connector) return PTR_ERR(connector->kdev); } - /* Let userspace know we have a new connector */ - drm_sysfs_hotplug_event(dev); - if (connector->ddc) return sysfs_create_link(&connector->kdev->kobj, &connector->ddc->dev.kobj, "ddc"); -- GitLab From c25bf185e57213b54ea0d632ac04907310993433 Mon Sep 17 00:00:00 2001 From: "J. Bruce Fields" Date: Wed, 3 Jun 2020 11:12:32 -0400 Subject: [PATCH 1050/1971] nfsd: safer handling of corrupted c_type This can only happen if there's a bug somewhere, so let's make it a WARN not a printk. Also, I think it's safest to ignore the corruption rather than trying to fix it by removing a cache entry. Signed-off-by: J. Bruce Fields --- fs/nfsd/nfscache.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/fs/nfsd/nfscache.c b/fs/nfsd/nfscache.c index f30a7def78996..0a0cf1fd77d33 100644 --- a/fs/nfsd/nfscache.c +++ b/fs/nfsd/nfscache.c @@ -477,8 +477,7 @@ found_entry: rtn = RC_REPLY; break; default: - printk(KERN_WARNING "nfsd: bad repcache type %d\n", rp->c_type); - nfsd_reply_cache_free_locked(b, rp, nn); + WARN_ONCE(1, "nfsd: bad repcache type %d\n", rp->c_type); } out_trace: -- GitLab From be25d1b5ea6a3a3ecbb5474e2ae8e32d2ba055ea Mon Sep 17 00:00:00 2001 From: Tony Luck Date: Wed, 3 Jun 2020 10:33:52 -0700 Subject: [PATCH 1051/1971] x86/cpu: Add Sapphire Rapids CPU model number Latest edition (039) of "Intel Architecture Instruction Set Extensions and Future Features Programming Reference" includes three new CPU model numbers. Linux already has the two Ice Lake server ones. Add the new model number for Sapphire Rapids. Signed-off-by: Tony Luck Signed-off-by: Borislav Petkov Link: https://lkml.kernel.org/r/20200603173352.15506-1-tony.luck@intel.com --- arch/x86/include/asm/intel-family.h | 2 ++ 1 file changed, 2 insertions(+) diff --git a/arch/x86/include/asm/intel-family.h b/arch/x86/include/asm/intel-family.h index 8f1e94f29a165..a338a6deb950b 100644 --- a/arch/x86/include/asm/intel-family.h +++ b/arch/x86/include/asm/intel-family.h @@ -89,6 +89,8 @@ #define INTEL_FAM6_COMETLAKE 0xA5 #define INTEL_FAM6_COMETLAKE_L 0xA6 +#define INTEL_FAM6_SAPPHIRERAPIDS_X 0x8F + /* "Small Core" Processors (Atom) */ #define INTEL_FAM6_ATOM_BONNELL 0x1C /* Diamondville, Pineview */ -- GitLab From b91c8c42ffdd5c983923edb38b3c3e112bfe6263 Mon Sep 17 00:00:00 2001 From: Christophe Leroy Date: Tue, 28 Apr 2020 13:16:53 +0000 Subject: [PATCH 1052/1971] lib/vdso: Force inlining of __cvdso_clock_gettime_common() When adding gettime64() to a 32 bit architecture (namely powerpc/32) it has been noticed that GCC doesn't inline anymore __cvdso_clock_gettime_common() because it is called twice (Once by __cvdso_clock_gettime() and once by __cvdso_clock_gettime32). This has the effect of seriously degrading the performance: Before the implementation of gettime64(), gettime() runs in: clock-gettime-monotonic-raw: vdso: 1003 nsec/call clock-gettime-monotonic-coarse: vdso: 592 nsec/call clock-gettime-monotonic: vdso: 942 nsec/call When adding a gettime64() entry point, the standard gettime() performance is degraded by 30% to 50%: clock-gettime-monotonic-raw: vdso: 1300 nsec/call clock-gettime-monotonic-coarse: vdso: 900 nsec/call clock-gettime-monotonic: vdso: 1232 nsec/call Adding __always_inline() to __cvdso_clock_gettime_common() regains the original performance. In terms of code size, the inlining increases the code size by only 176 bytes. This is in the noise for a kernel image. Signed-off-by: Christophe Leroy Signed-off-by: Thomas Gleixner Link: https://lkml.kernel.org/r/1ab6a62c356c3bec35d1623563ef9c636205bcda.1588079622.git.christophe.leroy@c-s.fr --- lib/vdso/gettimeofday.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/vdso/gettimeofday.c b/lib/vdso/gettimeofday.c index a2909af4b9240..7938d3c4901d1 100644 --- a/lib/vdso/gettimeofday.c +++ b/lib/vdso/gettimeofday.c @@ -210,7 +210,7 @@ static __always_inline int do_coarse(const struct vdso_data *vd, clockid_t clk, return 0; } -static __maybe_unused int +static __always_inline int __cvdso_clock_gettime_common(const struct vdso_data *vd, clockid_t clock, struct __kernel_timespec *ts) { -- GitLab From 9fa060df65715732655f7f46fe7fa68fc1a5b98e Mon Sep 17 00:00:00 2001 From: Lubomir Rintel Date: Tue, 2 Jun 2020 21:38:23 +0200 Subject: [PATCH 1053/1971] i2c: pxa: don't error out if there's no pinctrl The bus recovery patch regresses on OLPC XO-1.75 that has no pinctrl in its DT. Fixes: 7c9ec2c52518 ("i2c: pxa: implement generic i2c bus recovery")' Signed-off-by: Lubomir Rintel Signed-off-by: Wolfram Sang --- drivers/i2c/busses/i2c-pxa.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/i2c/busses/i2c-pxa.c b/drivers/i2c/busses/i2c-pxa.c index a7885b8b5031c..35ca2c02c9b9b 100644 --- a/drivers/i2c/busses/i2c-pxa.c +++ b/drivers/i2c/busses/i2c-pxa.c @@ -1348,6 +1348,8 @@ static int i2c_pxa_init_recovery(struct pxa_i2c *i2c) return 0; i2c->pinctrl = devm_pinctrl_get(dev); + if (PTR_ERR(i2c->pinctrl) == -ENODEV) + i2c->pinctrl = NULL; if (IS_ERR(i2c->pinctrl)) return PTR_ERR(i2c->pinctrl); -- GitLab From cd020be04b6ec269df1b44d5cd2cd548cae0d46a Mon Sep 17 00:00:00 2001 From: Tali Perry Date: Wed, 27 May 2020 23:08:18 +0300 Subject: [PATCH 1054/1971] dt-bindings: i2c: npcm7xx: add NPCM I2C controller Added device tree binding documentation for Nuvoton BMC NPCM I2C controller. Signed-off-by: Tali Perry Reviewed-by: Rob Herring Signed-off-by: Wolfram Sang --- .../bindings/i2c/nuvoton,npcm7xx-i2c.yaml | 62 +++++++++++++++++++ 1 file changed, 62 insertions(+) create mode 100644 Documentation/devicetree/bindings/i2c/nuvoton,npcm7xx-i2c.yaml diff --git a/Documentation/devicetree/bindings/i2c/nuvoton,npcm7xx-i2c.yaml b/Documentation/devicetree/bindings/i2c/nuvoton,npcm7xx-i2c.yaml new file mode 100644 index 0000000000000..e3ef2d36f372c --- /dev/null +++ b/Documentation/devicetree/bindings/i2c/nuvoton,npcm7xx-i2c.yaml @@ -0,0 +1,62 @@ +# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/i2c/nuvoton,npcm7xx-i2c.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: nuvoton NPCM7XX I2C Controller Device Tree Bindings + +description: | + The NPCM750x includes sixteen I2C bus controllers. All Controllers support + both master and slave mode. Each controller can switch between master and slave + at run time (i.e. IPMB mode). Each controller has two 16 byte HW FIFO for TX and + RX. + +maintainers: + - Tali Perry + +properties: + compatible: + const: nuvoton,npcm7xx-i2c + + reg: + maxItems: 1 + + interrupts: + maxItems: 1 + + clocks: + maxItems: 1 + description: Reference clock for the I2C bus + + clock-frequency: + description: Desired I2C bus clock frequency in Hz. If not specified, + the default 100 kHz frequency will be used. + possible values are 100000, 400000 and 1000000. + default: 100000 + enum: [100000, 400000, 1000000] + +required: + - compatible + - reg + - interrupts + - clocks + +allOf: + - $ref: /schemas/i2c/i2c-controller.yaml# + +unevaluatedProperties: false + +examples: + - | + #include + #include + i2c0: i2c@80000 { + reg = <0x80000 0x1000>; + clocks = <&clk NPCM7XX_CLK_APB2>; + clock-frequency = <100000>; + interrupts = ; + compatible = "nuvoton,npcm750-i2c"; + }; + +... -- GitLab From 56a1485b102ed1cd5a4af8e87ed794699fd1cad2 Mon Sep 17 00:00:00 2001 From: Tali Perry Date: Wed, 27 May 2020 23:08:19 +0300 Subject: [PATCH 1055/1971] i2c: npcm7xx: Add Nuvoton NPCM I2C controller driver Add Nuvoton NPCM BMC I2C controller driver. Signed-off-by: Tali Perry Reviewed-by: Andy Shevchenko Signed-off-by: Wolfram Sang --- drivers/i2c/busses/Kconfig | 9 + drivers/i2c/busses/Makefile | 1 + drivers/i2c/busses/i2c-npcm7xx.c | 1736 ++++++++++++++++++++++++++++++ 3 files changed, 1746 insertions(+) create mode 100644 drivers/i2c/busses/i2c-npcm7xx.c diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig index 14e2e93a5698c..735bf31a3fdff 100644 --- a/drivers/i2c/busses/Kconfig +++ b/drivers/i2c/busses/Kconfig @@ -794,6 +794,15 @@ config I2C_NOMADIK I2C interface from ST-Ericsson's Nomadik and Ux500 architectures, as well as the STA2X11 PCIe I/O HUB. +config I2C_NPCM7XX + tristate "Nuvoton I2C Controller" + depends on ARCH_NPCM7XX || COMPILE_TEST + help + If you say yes to this option, support will be included for the + Nuvoton I2C controller, which is available on the NPCM7xx BMC + controller. + Driver can also support slave mode (select I2C_SLAVE). + config I2C_OCORES tristate "OpenCores I2C Controller" help diff --git a/drivers/i2c/busses/Makefile b/drivers/i2c/busses/Makefile index e18497fb85c5b..306d5dc3f417f 100644 --- a/drivers/i2c/busses/Makefile +++ b/drivers/i2c/busses/Makefile @@ -80,6 +80,7 @@ obj-$(CONFIG_I2C_MT7621) += i2c-mt7621.o obj-$(CONFIG_I2C_MV64XXX) += i2c-mv64xxx.o obj-$(CONFIG_I2C_MXS) += i2c-mxs.o obj-$(CONFIG_I2C_NOMADIK) += i2c-nomadik.o +obj-$(CONFIG_I2C_NPCM7XX) += i2c-npcm7xx.o obj-$(CONFIG_I2C_OCORES) += i2c-ocores.o obj-$(CONFIG_I2C_OMAP) += i2c-omap.o obj-$(CONFIG_I2C_OWL) += i2c-owl.o diff --git a/drivers/i2c/busses/i2c-npcm7xx.c b/drivers/i2c/busses/i2c-npcm7xx.c new file mode 100644 index 0000000000000..018abf8dda7e8 --- /dev/null +++ b/drivers/i2c/busses/i2c-npcm7xx.c @@ -0,0 +1,1736 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Nuvoton NPCM7xx I2C Controller driver + * + * Copyright (C) 2020 Nuvoton Technologies tali.perry@nuvoton.com + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +enum i2c_mode { + I2C_MASTER, + I2C_SLAVE, +}; + +/* + * External I2C Interface driver xfer indication values, which indicate status + * of the bus. + */ +enum i2c_state_ind { + I2C_NO_STATUS_IND = 0, + I2C_SLAVE_RCV_IND, + I2C_SLAVE_XMIT_IND, + I2C_SLAVE_XMIT_MISSING_DATA_IND, + I2C_SLAVE_RESTART_IND, + I2C_SLAVE_DONE_IND, + I2C_MASTER_DONE_IND, + I2C_NACK_IND, + I2C_BUS_ERR_IND, + I2C_WAKE_UP_IND, + I2C_BLOCK_BYTES_ERR_IND, + I2C_SLAVE_RCV_MISSING_DATA_IND, +}; + +/* + * Operation type values (used to define the operation currently running) + * module is interrupt driven, on each interrupt the current operation is + * checked to see if the module is currently reading or writing. + */ +enum i2c_oper { + I2C_NO_OPER = 0, + I2C_WRITE_OPER, + I2C_READ_OPER, +}; + +/* I2C Bank (module had 2 banks of registers) */ +enum i2c_bank { + I2C_BANK_0 = 0, + I2C_BANK_1, +}; + +/* Internal I2C states values (for the I2C module state machine). */ +enum i2c_state { + I2C_DISABLE = 0, + I2C_IDLE, + I2C_MASTER_START, + I2C_SLAVE_MATCH, + I2C_OPER_STARTED, + I2C_STOP_PENDING, +}; + +/* init register and default value required to enable module */ +#define NPCM_I2CSEGCTL 0xE4 +#define NPCM_I2CSEGCTL_INIT_VAL 0x0333F000 + +/* Common regs */ +#define NPCM_I2CSDA 0x00 +#define NPCM_I2CST 0x02 +#define NPCM_I2CCST 0x04 +#define NPCM_I2CCTL1 0x06 +#define NPCM_I2CADDR1 0x08 +#define NPCM_I2CCTL2 0x0A +#define NPCM_I2CADDR2 0x0C +#define NPCM_I2CCTL3 0x0E +#define NPCM_I2CCST2 0x18 +#define NPCM_I2CCST3 0x19 +#define I2C_VER 0x1F + +/*BANK0 regs*/ +#define NPCM_I2CADDR3 0x10 +#define NPCM_I2CADDR7 0x11 +#define NPCM_I2CADDR4 0x12 +#define NPCM_I2CADDR8 0x13 +#define NPCM_I2CADDR5 0x14 +#define NPCM_I2CADDR9 0x15 +#define NPCM_I2CADDR6 0x16 +#define NPCM_I2CADDR10 0x17 + +#define NPCM_I2CCTL4 0x1A +#define NPCM_I2CCTL5 0x1B +#define NPCM_I2CSCLLT 0x1C /* SCL Low Time */ +#define NPCM_I2CFIF_CTL 0x1D /* FIFO Control */ +#define NPCM_I2CSCLHT 0x1E /* SCL High Time */ + +/* BANK 1 regs */ +#define NPCM_I2CFIF_CTS 0x10 /* Both FIFOs Control and Status */ +#define NPCM_I2CTXF_CTL 0x12 /* Tx-FIFO Control */ +#define NPCM_I2CT_OUT 0x14 /* Bus T.O. */ +#define NPCM_I2CPEC 0x16 /* PEC Data */ +#define NPCM_I2CTXF_STS 0x1A /* Tx-FIFO Status */ +#define NPCM_I2CRXF_STS 0x1C /* Rx-FIFO Status */ +#define NPCM_I2CRXF_CTL 0x1E /* Rx-FIFO Control */ + +/* NPCM_I2CST reg fields */ +#define NPCM_I2CST_XMIT BIT(0) +#define NPCM_I2CST_MASTER BIT(1) +#define NPCM_I2CST_NMATCH BIT(2) +#define NPCM_I2CST_STASTR BIT(3) +#define NPCM_I2CST_NEGACK BIT(4) +#define NPCM_I2CST_BER BIT(5) +#define NPCM_I2CST_SDAST BIT(6) +#define NPCM_I2CST_SLVSTP BIT(7) + +/* NPCM_I2CCST reg fields */ +#define NPCM_I2CCST_BUSY BIT(0) +#define NPCM_I2CCST_BB BIT(1) +#define NPCM_I2CCST_MATCH BIT(2) +#define NPCM_I2CCST_GCMATCH BIT(3) +#define NPCM_I2CCST_TSDA BIT(4) +#define NPCM_I2CCST_TGSCL BIT(5) +#define NPCM_I2CCST_MATCHAF BIT(6) +#define NPCM_I2CCST_ARPMATCH BIT(7) + +/* NPCM_I2CCTL1 reg fields */ +#define NPCM_I2CCTL1_START BIT(0) +#define NPCM_I2CCTL1_STOP BIT(1) +#define NPCM_I2CCTL1_INTEN BIT(2) +#define NPCM_I2CCTL1_EOBINTE BIT(3) +#define NPCM_I2CCTL1_ACK BIT(4) +#define NPCM_I2CCTL1_GCMEN BIT(5) +#define NPCM_I2CCTL1_NMINTE BIT(6) +#define NPCM_I2CCTL1_STASTRE BIT(7) + +/* RW1S fields (inside a RW reg): */ +#define NPCM_I2CCTL1_RWS \ + (NPCM_I2CCTL1_START | NPCM_I2CCTL1_STOP | NPCM_I2CCTL1_ACK) + +/* npcm_i2caddr reg fields */ +#define NPCM_I2CADDR_A GENMASK(6, 0) +#define NPCM_I2CADDR_SAEN BIT(7) + +/* NPCM_I2CCTL2 reg fields */ +#define I2CCTL2_ENABLE BIT(0) +#define I2CCTL2_SCLFRQ6_0 GENMASK(7, 1) + +/* NPCM_I2CCTL3 reg fields */ +#define I2CCTL3_SCLFRQ8_7 GENMASK(1, 0) +#define I2CCTL3_ARPMEN BIT(2) +#define I2CCTL3_IDL_START BIT(3) +#define I2CCTL3_400K_MODE BIT(4) +#define I2CCTL3_BNK_SEL BIT(5) +#define I2CCTL3_SDA_LVL BIT(6) +#define I2CCTL3_SCL_LVL BIT(7) + +/* NPCM_I2CCST2 reg fields */ +#define NPCM_I2CCST2_MATCHA1F BIT(0) +#define NPCM_I2CCST2_MATCHA2F BIT(1) +#define NPCM_I2CCST2_MATCHA3F BIT(2) +#define NPCM_I2CCST2_MATCHA4F BIT(3) +#define NPCM_I2CCST2_MATCHA5F BIT(4) +#define NPCM_I2CCST2_MATCHA6F BIT(5) +#define NPCM_I2CCST2_MATCHA7F BIT(5) +#define NPCM_I2CCST2_INTSTS BIT(7) + +/* NPCM_I2CCST3 reg fields */ +#define NPCM_I2CCST3_MATCHA8F BIT(0) +#define NPCM_I2CCST3_MATCHA9F BIT(1) +#define NPCM_I2CCST3_MATCHA10F BIT(2) +#define NPCM_I2CCST3_EO_BUSY BIT(7) + +/* NPCM_I2CCTL4 reg fields */ +#define I2CCTL4_HLDT GENMASK(5, 0) +#define I2CCTL4_LVL_WE BIT(7) + +/* NPCM_I2CCTL5 reg fields */ +#define I2CCTL5_DBNCT GENMASK(3, 0) + +/* NPCM_I2CFIF_CTS reg fields */ +#define NPCM_I2CFIF_CTS_RXF_TXE BIT(1) +#define NPCM_I2CFIF_CTS_RFTE_IE BIT(3) +#define NPCM_I2CFIF_CTS_CLR_FIFO BIT(6) +#define NPCM_I2CFIF_CTS_SLVRSTR BIT(7) + +/* NPCM_I2CTXF_CTL reg fields */ +#define NPCM_I2CTXF_CTL_TX_THR GENMASK(4, 0) +#define NPCM_I2CTXF_CTL_THR_TXIE BIT(6) + +/* NPCM_I2CT_OUT reg fields */ +#define NPCM_I2CT_OUT_TO_CKDIV GENMASK(5, 0) +#define NPCM_I2CT_OUT_T_OUTIE BIT(6) +#define NPCM_I2CT_OUT_T_OUTST BIT(7) + +/* NPCM_I2CTXF_STS reg fields */ +#define NPCM_I2CTXF_STS_TX_BYTES GENMASK(4, 0) +#define NPCM_I2CTXF_STS_TX_THST BIT(6) + +/* NPCM_I2CRXF_STS reg fields */ +#define NPCM_I2CRXF_STS_RX_BYTES GENMASK(4, 0) +#define NPCM_I2CRXF_STS_RX_THST BIT(6) + +/* NPCM_I2CFIF_CTL reg fields */ +#define NPCM_I2CFIF_CTL_FIFO_EN BIT(4) + +/* NPCM_I2CRXF_CTL reg fields */ +#define NPCM_I2CRXF_CTL_RX_THR GENMASK(4, 0) +#define NPCM_I2CRXF_CTL_LAST_PEC BIT(5) +#define NPCM_I2CRXF_CTL_THR_RXIE BIT(6) + +#define I2C_HW_FIFO_SIZE 16 + +/* I2C_VER reg fields */ +#define I2C_VER_VERSION GENMASK(6, 0) +#define I2C_VER_FIFO_EN BIT(7) + +/* stall/stuck timeout in us */ +#define DEFAULT_STALL_COUNT 25 + +/* SCLFRQ field position */ +#define SCLFRQ_0_TO_6 GENMASK(6, 0) +#define SCLFRQ_7_TO_8 GENMASK(8, 7) + +/* supported clk settings. values in Hz. */ +#define I2C_FREQ_MIN_HZ 10000 +#define I2C_FREQ_MAX_HZ I2C_MAX_FAST_MODE_PLUS_FREQ + +/* Status of one I2C module */ +struct npcm_i2c { + struct i2c_adapter adap; + struct device *dev; + unsigned char __iomem *reg; + spinlock_t lock; /* IRQ synchronization */ + struct completion cmd_complete; + int cmd_err; + struct i2c_msg *msgs; + int msgs_num; + int num; + u32 apb_clk; + struct i2c_bus_recovery_info rinfo; + enum i2c_state state; + enum i2c_oper operation; + enum i2c_mode master_or_slave; + enum i2c_state_ind stop_ind; + u8 dest_addr; + u8 *rd_buf; + u16 rd_size; + u16 rd_ind; + u8 *wr_buf; + u16 wr_size; + u16 wr_ind; + bool fifo_use; + u16 PEC_mask; /* PEC bit mask per slave address */ + bool PEC_use; + bool read_block_use; + unsigned long int_time_stamp; + unsigned long bus_freq; /* in Hz */ + struct dentry *debugfs; /* debugfs device directory */ + u64 ber_cnt; + u64 rec_succ_cnt; + u64 rec_fail_cnt; + u64 nack_cnt; + u64 timeout_cnt; +}; + +static inline void npcm_i2c_select_bank(struct npcm_i2c *bus, + enum i2c_bank bank) +{ + u8 i2cctl3 = ioread8(bus->reg + NPCM_I2CCTL3); + + if (bank == I2C_BANK_0) + i2cctl3 = i2cctl3 & ~I2CCTL3_BNK_SEL; + else + i2cctl3 = i2cctl3 | I2CCTL3_BNK_SEL; + iowrite8(i2cctl3, bus->reg + NPCM_I2CCTL3); +} + +static void npcm_i2c_init_params(struct npcm_i2c *bus) +{ + bus->stop_ind = I2C_NO_STATUS_IND; + bus->rd_size = 0; + bus->wr_size = 0; + bus->rd_ind = 0; + bus->wr_ind = 0; + bus->read_block_use = false; + bus->int_time_stamp = 0; + bus->PEC_use = false; + bus->PEC_mask = 0; +} + +static inline void npcm_i2c_wr_byte(struct npcm_i2c *bus, u8 data) +{ + iowrite8(data, bus->reg + NPCM_I2CSDA); +} + +static inline u8 npcm_i2c_rd_byte(struct npcm_i2c *bus) +{ + return ioread8(bus->reg + NPCM_I2CSDA); +} + +static int npcm_i2c_get_SCL(struct i2c_adapter *_adap) +{ + struct npcm_i2c *bus = container_of(_adap, struct npcm_i2c, adap); + + return !!(I2CCTL3_SCL_LVL & ioread32(bus->reg + NPCM_I2CCTL3)); +} + +static int npcm_i2c_get_SDA(struct i2c_adapter *_adap) +{ + struct npcm_i2c *bus = container_of(_adap, struct npcm_i2c, adap); + + return !!(I2CCTL3_SDA_LVL & ioread32(bus->reg + NPCM_I2CCTL3)); +} + +static inline u16 npcm_i2c_get_index(struct npcm_i2c *bus) +{ + if (bus->operation == I2C_READ_OPER) + return bus->rd_ind; + if (bus->operation == I2C_WRITE_OPER) + return bus->wr_ind; + return 0; +} + +/* quick protocol (just address) */ +static inline bool npcm_i2c_is_quick(struct npcm_i2c *bus) +{ + return bus->wr_size == 0 && bus->rd_size == 0; +} + +static void npcm_i2c_disable(struct npcm_i2c *bus) +{ + u8 i2cctl2; + + /* Disable module */ + i2cctl2 = ioread8(bus->reg + NPCM_I2CCTL2); + i2cctl2 = i2cctl2 & ~I2CCTL2_ENABLE; + iowrite8(i2cctl2, bus->reg + NPCM_I2CCTL2); + + bus->state = I2C_DISABLE; +} + +static void npcm_i2c_enable(struct npcm_i2c *bus) +{ + u8 i2cctl2 = ioread8(bus->reg + NPCM_I2CCTL2); + + i2cctl2 = i2cctl2 | I2CCTL2_ENABLE; + iowrite8(i2cctl2, bus->reg + NPCM_I2CCTL2); + bus->state = I2C_IDLE; +} + +/* enable\disable end of busy (EOB) interrupts */ +static inline void npcm_i2c_eob_int(struct npcm_i2c *bus, bool enable) +{ + u8 val; + + /* Clear EO_BUSY pending bit: */ + val = ioread8(bus->reg + NPCM_I2CCST3); + val = val | NPCM_I2CCST3_EO_BUSY; + iowrite8(val, bus->reg + NPCM_I2CCST3); + + val = ioread8(bus->reg + NPCM_I2CCTL1); + val &= ~NPCM_I2CCTL1_RWS; + if (enable) + val |= NPCM_I2CCTL1_EOBINTE; + else + val &= ~NPCM_I2CCTL1_EOBINTE; + iowrite8(val, bus->reg + NPCM_I2CCTL1); +} + +static inline bool npcm_i2c_tx_fifo_empty(struct npcm_i2c *bus) +{ + u8 tx_fifo_sts; + + tx_fifo_sts = ioread8(bus->reg + NPCM_I2CTXF_STS); + /* check if TX FIFO is not empty */ + if ((tx_fifo_sts & NPCM_I2CTXF_STS_TX_BYTES) == 0) + return false; + + /* check if TX FIFO status bit is set: */ + return !!FIELD_GET(NPCM_I2CTXF_STS_TX_THST, tx_fifo_sts); +} + +static inline bool npcm_i2c_rx_fifo_full(struct npcm_i2c *bus) +{ + u8 rx_fifo_sts; + + rx_fifo_sts = ioread8(bus->reg + NPCM_I2CRXF_STS); + /* check if RX FIFO is not empty: */ + if ((rx_fifo_sts & NPCM_I2CRXF_STS_RX_BYTES) == 0) + return false; + + /* check if rx fifo full status is set: */ + return !!FIELD_GET(NPCM_I2CRXF_STS_RX_THST, rx_fifo_sts); +} + +static inline void npcm_i2c_clear_fifo_int(struct npcm_i2c *bus) +{ + u8 val; + + val = ioread8(bus->reg + NPCM_I2CFIF_CTS); + val = (val & NPCM_I2CFIF_CTS_SLVRSTR) | NPCM_I2CFIF_CTS_RXF_TXE; + iowrite8(val, bus->reg + NPCM_I2CFIF_CTS); +} + +static inline void npcm_i2c_clear_tx_fifo(struct npcm_i2c *bus) +{ + u8 val; + + val = ioread8(bus->reg + NPCM_I2CTXF_STS); + val = val | NPCM_I2CTXF_STS_TX_THST; + iowrite8(val, bus->reg + NPCM_I2CTXF_STS); +} + +static inline void npcm_i2c_clear_rx_fifo(struct npcm_i2c *bus) +{ + u8 val; + + val = ioread8(bus->reg + NPCM_I2CRXF_STS); + val = val | NPCM_I2CRXF_STS_RX_THST; + iowrite8(val, bus->reg + NPCM_I2CRXF_STS); +} + +static void npcm_i2c_int_enable(struct npcm_i2c *bus, bool enable) +{ + u8 val; + + val = ioread8(bus->reg + NPCM_I2CCTL1); + val &= ~NPCM_I2CCTL1_RWS; + if (enable) + val |= NPCM_I2CCTL1_INTEN; + else + val &= ~NPCM_I2CCTL1_INTEN; + iowrite8(val, bus->reg + NPCM_I2CCTL1); +} + +static inline void npcm_i2c_master_start(struct npcm_i2c *bus) +{ + u8 val; + + val = ioread8(bus->reg + NPCM_I2CCTL1); + val &= ~(NPCM_I2CCTL1_STOP | NPCM_I2CCTL1_ACK); + val |= NPCM_I2CCTL1_START; + iowrite8(val, bus->reg + NPCM_I2CCTL1); +} + +static inline void npcm_i2c_master_stop(struct npcm_i2c *bus) +{ + u8 val; + + /* + * override HW issue: I2C may fail to supply stop condition in Master + * Write operation. + * Need to delay at least 5 us from the last int, before issueing a stop + */ + udelay(10); /* function called from interrupt, can't sleep */ + val = ioread8(bus->reg + NPCM_I2CCTL1); + val &= ~(NPCM_I2CCTL1_START | NPCM_I2CCTL1_ACK); + val |= NPCM_I2CCTL1_STOP; + iowrite8(val, bus->reg + NPCM_I2CCTL1); + + if (!bus->fifo_use) + return; + + npcm_i2c_select_bank(bus, I2C_BANK_1); + + if (bus->operation == I2C_READ_OPER) + npcm_i2c_clear_rx_fifo(bus); + else + npcm_i2c_clear_tx_fifo(bus); + npcm_i2c_clear_fifo_int(bus); + iowrite8(0, bus->reg + NPCM_I2CTXF_CTL); +} + +static inline void npcm_i2c_stall_after_start(struct npcm_i2c *bus, bool stall) +{ + u8 val; + + val = ioread8(bus->reg + NPCM_I2CCTL1); + val &= ~NPCM_I2CCTL1_RWS; + if (stall) + val |= NPCM_I2CCTL1_STASTRE; + else + val &= ~NPCM_I2CCTL1_STASTRE; + iowrite8(val, bus->reg + NPCM_I2CCTL1); +} + +static inline void npcm_i2c_nack(struct npcm_i2c *bus) +{ + u8 val; + + val = ioread8(bus->reg + NPCM_I2CCTL1); + val &= ~(NPCM_I2CCTL1_STOP | NPCM_I2CCTL1_START); + val |= NPCM_I2CCTL1_ACK; + iowrite8(val, bus->reg + NPCM_I2CCTL1); +} + +static void npcm_i2c_reset(struct npcm_i2c *bus) +{ + /* + * Save I2CCTL1 relevant bits. It is being cleared when the module + * is disabled. + */ + u8 i2cctl1; + + i2cctl1 = ioread8(bus->reg + NPCM_I2CCTL1); + + npcm_i2c_disable(bus); + npcm_i2c_enable(bus); + + /* Restore NPCM_I2CCTL1 Status */ + i2cctl1 &= ~NPCM_I2CCTL1_RWS; + iowrite8(i2cctl1, bus->reg + NPCM_I2CCTL1); + + /* Clear BB (BUS BUSY) bit */ + iowrite8(NPCM_I2CCST_BB, bus->reg + NPCM_I2CCST); + iowrite8(0xFF, bus->reg + NPCM_I2CST); + + /* Clear EOB bit */ + iowrite8(NPCM_I2CCST3_EO_BUSY, bus->reg + NPCM_I2CCST3); + + /* Clear all fifo bits: */ + iowrite8(NPCM_I2CFIF_CTS_CLR_FIFO, bus->reg + NPCM_I2CFIF_CTS); + + bus->state = I2C_IDLE; +} + +static inline bool npcm_i2c_is_master(struct npcm_i2c *bus) +{ + return !!FIELD_GET(NPCM_I2CST_MASTER, ioread8(bus->reg + NPCM_I2CST)); +} + +static void npcm_i2c_callback(struct npcm_i2c *bus, + enum i2c_state_ind op_status, u16 info) +{ + struct i2c_msg *msgs; + int msgs_num; + + msgs = bus->msgs; + msgs_num = bus->msgs_num; + /* + * check that transaction was not timed-out, and msgs still + * holds a valid value. + */ + if (!msgs) + return; + + if (completion_done(&bus->cmd_complete)) + return; + + switch (op_status) { + case I2C_MASTER_DONE_IND: + bus->cmd_err = bus->msgs_num; + fallthrough; + case I2C_BLOCK_BYTES_ERR_IND: + /* Master tx finished and all transmit bytes were sent */ + if (bus->msgs) { + if (msgs[0].flags & I2C_M_RD) + msgs[0].len = info; + else if (msgs_num == 2 && + msgs[1].flags & I2C_M_RD) + msgs[1].len = info; + } + if (completion_done(&bus->cmd_complete) == false) + complete(&bus->cmd_complete); + break; + + case I2C_NACK_IND: + /* MASTER transmit got a NACK before tx all bytes */ + bus->cmd_err = -ENXIO; + if (bus->master_or_slave == I2C_MASTER) + complete(&bus->cmd_complete); + + break; + case I2C_BUS_ERR_IND: + /* Bus error */ + bus->cmd_err = -EAGAIN; + if (bus->master_or_slave == I2C_MASTER) + complete(&bus->cmd_complete); + + break; + case I2C_WAKE_UP_IND: + /* I2C wake up */ + break; + default: + break; + } + + bus->operation = I2C_NO_OPER; +} + +static u8 npcm_i2c_fifo_usage(struct npcm_i2c *bus) +{ + if (bus->operation == I2C_WRITE_OPER) + return FIELD_GET(NPCM_I2CTXF_STS_TX_BYTES, + ioread8(bus->reg + NPCM_I2CTXF_STS)); + if (bus->operation == I2C_READ_OPER) + return FIELD_GET(NPCM_I2CRXF_STS_RX_BYTES, + ioread8(bus->reg + NPCM_I2CRXF_STS)); + return 0; +} + +static void npcm_i2c_write_to_fifo_master(struct npcm_i2c *bus, u16 max_bytes) +{ + u8 size_free_fifo; + + /* + * Fill the FIFO, while the FIFO is not full and there are more bytes + * to write + */ + size_free_fifo = I2C_HW_FIFO_SIZE - npcm_i2c_fifo_usage(bus); + while (max_bytes-- && size_free_fifo) { + if (bus->wr_ind < bus->wr_size) + npcm_i2c_wr_byte(bus, bus->wr_buf[bus->wr_ind++]); + else + npcm_i2c_wr_byte(bus, 0xFF); + size_free_fifo = I2C_HW_FIFO_SIZE - npcm_i2c_fifo_usage(bus); + } +} + +/* + * npcm_i2c_set_fifo: + * configure the FIFO before using it. If nread is -1 RX FIFO will not be + * configured. same for nwrite + */ +static void npcm_i2c_set_fifo(struct npcm_i2c *bus, int nread, int nwrite) +{ + u8 rxf_ctl = 0; + + if (!bus->fifo_use) + return; + npcm_i2c_select_bank(bus, I2C_BANK_1); + npcm_i2c_clear_tx_fifo(bus); + npcm_i2c_clear_rx_fifo(bus); + + /* configure RX FIFO */ + if (nread > 0) { + rxf_ctl = min_t(int, nread, I2C_HW_FIFO_SIZE); + + /* set LAST bit. if LAST is set next FIFO packet is nacked */ + if (nread <= I2C_HW_FIFO_SIZE) + rxf_ctl |= NPCM_I2CRXF_CTL_LAST_PEC; + + /* + * if we are about to read the first byte in blk rd mode, + * don't NACK it. If slave returns zero size HW can't NACK + * it immidiattly, it will read extra byte and then NACK. + */ + if (bus->rd_ind == 0 && bus->read_block_use) { + /* set fifo to read one byte, no last: */ + rxf_ctl = 1; + } + + /* set fifo size: */ + iowrite8(rxf_ctl, bus->reg + NPCM_I2CRXF_CTL); + } + + /* configure TX FIFO */ + if (nwrite > 0) { + if (nwrite > I2C_HW_FIFO_SIZE) + /* data to send is more then FIFO size. */ + iowrite8(I2C_HW_FIFO_SIZE, bus->reg + NPCM_I2CTXF_CTL); + else + iowrite8(nwrite, bus->reg + NPCM_I2CTXF_CTL); + + npcm_i2c_clear_tx_fifo(bus); + } +} + +static void npcm_i2c_read_fifo(struct npcm_i2c *bus, u8 bytes_in_fifo) +{ + u8 data; + + while (bytes_in_fifo--) { + data = npcm_i2c_rd_byte(bus); + if (bus->rd_ind < bus->rd_size) + bus->rd_buf[bus->rd_ind++] = data; + } +} + +static inline void npcm_i2c_clear_master_status(struct npcm_i2c *bus) +{ + u8 val; + + /* Clear NEGACK, STASTR and BER bits */ + val = NPCM_I2CST_BER | NPCM_I2CST_NEGACK | NPCM_I2CST_STASTR; + iowrite8(val, bus->reg + NPCM_I2CST); +} + +static void npcm_i2c_master_abort(struct npcm_i2c *bus) +{ + /* Only current master is allowed to issue a stop condition */ + if (!npcm_i2c_is_master(bus)) + return; + + npcm_i2c_eob_int(bus, true); + npcm_i2c_master_stop(bus); + npcm_i2c_clear_master_status(bus); +} + +static void npcm_i2c_master_fifo_read(struct npcm_i2c *bus) +{ + int rcount; + int fifo_bytes; + enum i2c_state_ind ind = I2C_MASTER_DONE_IND; + + fifo_bytes = npcm_i2c_fifo_usage(bus); + rcount = bus->rd_size - bus->rd_ind; + + /* + * In order not to change the RX_TRH during transaction (we found that + * this might be problematic if it takes too much time to read the FIFO) + * we read the data in the following way. If the number of bytes to + * read == FIFO Size + C (where C < FIFO Size)then first read C bytes + * and in the next int we read rest of the data. + */ + if (rcount < (2 * I2C_HW_FIFO_SIZE) && rcount > I2C_HW_FIFO_SIZE) + fifo_bytes = rcount - I2C_HW_FIFO_SIZE; + + if (rcount <= fifo_bytes) { + /* last bytes are about to be read - end of tx */ + bus->state = I2C_STOP_PENDING; + bus->stop_ind = ind; + npcm_i2c_eob_int(bus, true); + /* Stop should be set before reading last byte. */ + npcm_i2c_master_stop(bus); + npcm_i2c_read_fifo(bus, fifo_bytes); + } else { + npcm_i2c_read_fifo(bus, fifo_bytes); + rcount = bus->rd_size - bus->rd_ind; + npcm_i2c_set_fifo(bus, rcount, -1); + } +} + +static void npcm_i2c_irq_master_handler_write(struct npcm_i2c *bus) +{ + u16 wcount; + + if (bus->fifo_use) + npcm_i2c_clear_tx_fifo(bus); /* clear the TX fifo status bit */ + + /* Master write operation - last byte handling */ + if (bus->wr_ind == bus->wr_size) { + if (bus->fifo_use && npcm_i2c_fifo_usage(bus) > 0) + /* + * No more bytes to send (to add to the FIFO), + * however the FIFO is not empty yet. It is + * still in the middle of tx. Currently there's nothing + * to do except for waiting to the end of the tx + * We will get an int when the FIFO will get empty. + */ + return; + + if (bus->rd_size == 0) { + /* all bytes have been written, in wr only operation */ + npcm_i2c_eob_int(bus, true); + bus->state = I2C_STOP_PENDING; + bus->stop_ind = I2C_MASTER_DONE_IND; + npcm_i2c_master_stop(bus); + /* Clear SDA Status bit (by writing dummy byte) */ + npcm_i2c_wr_byte(bus, 0xFF); + + } else { + /* last write-byte written on previous int - restart */ + npcm_i2c_set_fifo(bus, bus->rd_size, -1); + /* Generate repeated start upon next write to SDA */ + npcm_i2c_master_start(bus); + + /* + * Receiving one byte only - stall after successful + * completion of send address byte. If we NACK here, and + * slave doesn't ACK the address, we might + * unintentionally NACK the next multi-byte read. + */ + if (bus->rd_size == 1) + npcm_i2c_stall_after_start(bus, true); + + /* Next int will occur on read */ + bus->operation = I2C_READ_OPER; + /* send the slave address in read direction */ + npcm_i2c_wr_byte(bus, bus->dest_addr | 0x1); + } + } else { + /* write next byte not last byte and not slave address */ + if (!bus->fifo_use || bus->wr_size == 1) { + npcm_i2c_wr_byte(bus, bus->wr_buf[bus->wr_ind++]); + } else { + wcount = bus->wr_size - bus->wr_ind; + npcm_i2c_set_fifo(bus, -1, wcount); + if (wcount) + npcm_i2c_write_to_fifo_master(bus, wcount); + } + } +} + +static void npcm_i2c_irq_master_handler_read(struct npcm_i2c *bus) +{ + u16 block_extra_bytes_size; + u8 data; + + /* added bytes to the packet: */ + block_extra_bytes_size = bus->read_block_use + bus->PEC_use; + + /* + * Perform master read, distinguishing between last byte and the rest of + * the bytes. The last byte should be read when the clock is stopped + */ + if (bus->rd_ind == 0) { /* first byte handling: */ + if (bus->read_block_use) { + /* first byte in block protocol is the size: */ + data = npcm_i2c_rd_byte(bus); + data = clamp_val(data, 1, I2C_SMBUS_BLOCK_MAX); + bus->rd_size = data + block_extra_bytes_size; + bus->rd_buf[bus->rd_ind++] = data; + + /* clear RX FIFO interrupt status: */ + if (bus->fifo_use) { + data = ioread8(bus->reg + NPCM_I2CFIF_CTS); + data = data | NPCM_I2CFIF_CTS_RXF_TXE; + iowrite8(data, bus->reg + NPCM_I2CFIF_CTS); + } + + npcm_i2c_set_fifo(bus, bus->rd_size - 1, -1); + npcm_i2c_stall_after_start(bus, false); + } else { + npcm_i2c_clear_tx_fifo(bus); + npcm_i2c_master_fifo_read(bus); + } + } else { + if (bus->rd_size == block_extra_bytes_size && + bus->read_block_use) { + bus->state = I2C_STOP_PENDING; + bus->stop_ind = I2C_BLOCK_BYTES_ERR_IND; + bus->cmd_err = -EIO; + npcm_i2c_eob_int(bus, true); + npcm_i2c_master_stop(bus); + npcm_i2c_read_fifo(bus, npcm_i2c_fifo_usage(bus)); + } else { + npcm_i2c_master_fifo_read(bus); + } + } +} + +static void npcm_i2c_irq_handle_nmatch(struct npcm_i2c *bus) +{ + iowrite8(NPCM_I2CST_NMATCH, bus->reg + NPCM_I2CST); + npcm_i2c_nack(bus); + bus->stop_ind = I2C_BUS_ERR_IND; + npcm_i2c_callback(bus, bus->stop_ind, npcm_i2c_get_index(bus)); +} + +/* A NACK has occurred */ +static void npcm_i2c_irq_handle_nack(struct npcm_i2c *bus) +{ + u8 val; + + if (bus->nack_cnt < ULLONG_MAX) + bus->nack_cnt++; + + if (bus->fifo_use) { + /* + * if there are still untransmitted bytes in TX FIFO + * reduce them from wr_ind + */ + if (bus->operation == I2C_WRITE_OPER) + bus->wr_ind -= npcm_i2c_fifo_usage(bus); + + /* clear the FIFO */ + iowrite8(NPCM_I2CFIF_CTS_CLR_FIFO, bus->reg + NPCM_I2CFIF_CTS); + } + + /* In master write operation, got unexpected NACK */ + bus->stop_ind = I2C_NACK_IND; + /* Only current master is allowed to issue Stop Condition */ + if (npcm_i2c_is_master(bus)) { + /* stopping in the middle */ + npcm_i2c_eob_int(bus, false); + npcm_i2c_master_stop(bus); + + /* + * The bus is released from stall only after the SW clears + * NEGACK bit. Then a Stop condition is sent. + */ + npcm_i2c_clear_master_status(bus); + readx_poll_timeout_atomic(ioread8, bus->reg + NPCM_I2CCST, val, + !(val & NPCM_I2CCST_BUSY), 10, 200); + } + bus->state = I2C_IDLE; + + /* + * In Master mode, NACK should be cleared only after STOP. + * In such case, the bus is released from stall only after the + * software clears NACK bit. Then a Stop condition is sent. + */ + npcm_i2c_callback(bus, bus->stop_ind, bus->wr_ind); +} + + /* Master mode: a Bus Error has been identified */ +static void npcm_i2c_irq_handle_ber(struct npcm_i2c *bus) +{ + if (bus->ber_cnt < ULLONG_MAX) + bus->ber_cnt++; + bus->stop_ind = I2C_BUS_ERR_IND; + if (npcm_i2c_is_master(bus)) { + npcm_i2c_master_abort(bus); + } else { + npcm_i2c_clear_master_status(bus); + + /* Clear BB (BUS BUSY) bit */ + iowrite8(NPCM_I2CCST_BB, bus->reg + NPCM_I2CCST); + + bus->cmd_err = -EAGAIN; + npcm_i2c_callback(bus, bus->stop_ind, npcm_i2c_get_index(bus)); + } + bus->state = I2C_IDLE; +} + + /* EOB: a master End Of Busy (meaning STOP completed) */ +static void npcm_i2c_irq_handle_eob(struct npcm_i2c *bus) +{ + npcm_i2c_eob_int(bus, false); + bus->state = I2C_IDLE; + npcm_i2c_callback(bus, bus->stop_ind, bus->rd_ind); +} + +/* Address sent and requested stall occurred (Master mode) */ +static void npcm_i2c_irq_handle_stall_after_start(struct npcm_i2c *bus) +{ + if (npcm_i2c_is_quick(bus)) { + bus->state = I2C_STOP_PENDING; + bus->stop_ind = I2C_MASTER_DONE_IND; + npcm_i2c_eob_int(bus, true); + npcm_i2c_master_stop(bus); + } else if ((bus->rd_size == 1) && !bus->read_block_use) { + /* + * Receiving one byte only - set NACK after ensuring + * slave ACKed the address byte. + */ + npcm_i2c_nack(bus); + } + + /* Reset stall-after-address-byte */ + npcm_i2c_stall_after_start(bus, false); + + /* Clear stall only after setting STOP */ + iowrite8(NPCM_I2CST_STASTR, bus->reg + NPCM_I2CST); +} + +/* SDA status is set - TX or RX, master */ +static void npcm_i2c_irq_handle_sda(struct npcm_i2c *bus, u8 i2cst) +{ + u8 fif_cts; + + if (!npcm_i2c_is_master(bus)) + return; + + if (bus->state == I2C_IDLE) { + bus->stop_ind = I2C_WAKE_UP_IND; + + if (npcm_i2c_is_quick(bus) || bus->read_block_use) + /* + * Need to stall after successful + * completion of sending address byte + */ + npcm_i2c_stall_after_start(bus, true); + else + npcm_i2c_stall_after_start(bus, false); + + /* + * Receiving one byte only - stall after successful completion + * of sending address byte If we NACK here, and slave doesn't + * ACK the address, we might unintentionally NACK the next + * multi-byte read + */ + if (bus->wr_size == 0 && bus->rd_size == 1) + npcm_i2c_stall_after_start(bus, true); + + /* Initiate I2C master tx */ + + /* select bank 1 for FIFO regs */ + npcm_i2c_select_bank(bus, I2C_BANK_1); + + fif_cts = ioread8(bus->reg + NPCM_I2CFIF_CTS); + fif_cts = fif_cts & ~NPCM_I2CFIF_CTS_SLVRSTR; + + /* clear FIFO and relevant status bits. */ + fif_cts = fif_cts | NPCM_I2CFIF_CTS_CLR_FIFO; + iowrite8(fif_cts, bus->reg + NPCM_I2CFIF_CTS); + + /* re-enable */ + fif_cts = fif_cts | NPCM_I2CFIF_CTS_RXF_TXE; + iowrite8(fif_cts, bus->reg + NPCM_I2CFIF_CTS); + + /* + * Configure the FIFO threshold: + * according to the needed # of bytes to read. + * Note: due to HW limitation can't config the rx fifo before it + * got and ACK on the restart. LAST bit will not be reset unless + * RX completed. It will stay set on the next tx. + */ + if (bus->wr_size) + npcm_i2c_set_fifo(bus, -1, bus->wr_size); + else + npcm_i2c_set_fifo(bus, bus->rd_size, -1); + + bus->state = I2C_OPER_STARTED; + + if (npcm_i2c_is_quick(bus) || bus->wr_size) + npcm_i2c_wr_byte(bus, bus->dest_addr); + else + npcm_i2c_wr_byte(bus, bus->dest_addr | BIT(0)); + /* SDA interrupt, after start\restart */ + } else { + if (NPCM_I2CST_XMIT & i2cst) { + bus->operation = I2C_WRITE_OPER; + npcm_i2c_irq_master_handler_write(bus); + } else { + bus->operation = I2C_READ_OPER; + npcm_i2c_irq_master_handler_read(bus); + } + } +} + +static int npcm_i2c_int_master_handler(struct npcm_i2c *bus) +{ + u8 i2cst; + int ret = -EIO; + + i2cst = ioread8(bus->reg + NPCM_I2CST); + + if (FIELD_GET(NPCM_I2CST_NMATCH, i2cst)) { + npcm_i2c_irq_handle_nmatch(bus); + return 0; + } + /* A NACK has occurred */ + if (FIELD_GET(NPCM_I2CST_NEGACK, i2cst)) { + npcm_i2c_irq_handle_nack(bus); + return 0; + } + + /* Master mode: a Bus Error has been identified */ + if (FIELD_GET(NPCM_I2CST_BER, i2cst)) { + npcm_i2c_irq_handle_ber(bus); + return 0; + } + + /* EOB: a master End Of Busy (meaning STOP completed) */ + if ((FIELD_GET(NPCM_I2CCTL1_EOBINTE, + ioread8(bus->reg + NPCM_I2CCTL1)) == 1) && + (FIELD_GET(NPCM_I2CCST3_EO_BUSY, + ioread8(bus->reg + NPCM_I2CCST3)))) { + npcm_i2c_irq_handle_eob(bus); + return 0; + } + + /* Address sent and requested stall occurred (Master mode) */ + if (FIELD_GET(NPCM_I2CST_STASTR, i2cst)) { + npcm_i2c_irq_handle_stall_after_start(bus); + ret = 0; + } + + /* SDA status is set - TX or RX, master */ + if (FIELD_GET(NPCM_I2CST_SDAST, i2cst) || + (bus->fifo_use && + (npcm_i2c_tx_fifo_empty(bus) || npcm_i2c_rx_fifo_full(bus)))) { + npcm_i2c_irq_handle_sda(bus, i2cst); + ret = 0; + } + + return ret; +} + +/* recovery using TGCLK functionality of the module */ +static int npcm_i2c_recovery_tgclk(struct i2c_adapter *_adap) +{ + u8 val; + u8 fif_cts; + bool done = false; + int status = -ENOTRECOVERABLE; + struct npcm_i2c *bus = container_of(_adap, struct npcm_i2c, adap); + /* Allow 3 bytes (27 toggles) to be read from the slave: */ + int iter = 27; + + if ((npcm_i2c_get_SDA(_adap) == 1) && (npcm_i2c_get_SCL(_adap) == 1)) { + dev_dbg(bus->dev, "bus%d recovery skipped, bus not stuck", + bus->num); + npcm_i2c_reset(bus); + return status; + } + + npcm_i2c_int_enable(bus, false); + npcm_i2c_disable(bus); + npcm_i2c_enable(bus); + iowrite8(NPCM_I2CCST_BB, bus->reg + NPCM_I2CCST); + npcm_i2c_clear_tx_fifo(bus); + npcm_i2c_clear_rx_fifo(bus); + iowrite8(0, bus->reg + NPCM_I2CRXF_CTL); + iowrite8(0, bus->reg + NPCM_I2CTXF_CTL); + npcm_i2c_stall_after_start(bus, false); + + /* select bank 1 for FIFO regs */ + npcm_i2c_select_bank(bus, I2C_BANK_1); + + /* clear FIFO and relevant status bits. */ + fif_cts = ioread8(bus->reg + NPCM_I2CFIF_CTS); + fif_cts &= ~NPCM_I2CFIF_CTS_SLVRSTR; + fif_cts |= NPCM_I2CFIF_CTS_CLR_FIFO; + iowrite8(fif_cts, bus->reg + NPCM_I2CFIF_CTS); + npcm_i2c_set_fifo(bus, -1, 0); + + /* Repeat the following sequence until SDA is released */ + do { + /* Issue a single SCL toggle */ + iowrite8(NPCM_I2CCST_TGSCL, bus->reg + NPCM_I2CCST); + usleep_range(20, 30); + /* If SDA line is inactive (high), stop */ + if (npcm_i2c_get_SDA(_adap)) { + done = true; + status = 0; + } + } while (!done && iter--); + + /* If SDA line is released: send start-addr-stop, to re-sync. */ + if (npcm_i2c_get_SDA(_adap)) { + /* Send an address byte in write direction: */ + npcm_i2c_wr_byte(bus, bus->dest_addr); + npcm_i2c_master_start(bus); + /* Wait until START condition is sent */ + status = readx_poll_timeout(npcm_i2c_get_SCL, _adap, val, !val, + 20, 200); + /* If START condition was sent */ + if (npcm_i2c_is_master(bus) > 0) { + usleep_range(20, 30); + npcm_i2c_master_stop(bus); + usleep_range(200, 500); + } + } + npcm_i2c_reset(bus); + npcm_i2c_int_enable(bus, true); + + if ((npcm_i2c_get_SDA(_adap) == 1) && (npcm_i2c_get_SCL(_adap) == 1)) + status = 0; + else + status = -ENOTRECOVERABLE; + if (status) { + if (bus->rec_fail_cnt < ULLONG_MAX) + bus->rec_fail_cnt++; + } else { + if (bus->rec_succ_cnt < ULLONG_MAX) + bus->rec_succ_cnt++; + } + return status; +} + +/* recovery using bit banging functionality of the module */ +static void npcm_i2c_recovery_init(struct i2c_adapter *_adap) +{ + struct npcm_i2c *bus = container_of(_adap, struct npcm_i2c, adap); + struct i2c_bus_recovery_info *rinfo = &bus->rinfo; + + rinfo->recover_bus = npcm_i2c_recovery_tgclk; + + /* + * npcm i2c HW allows direct reading of SCL and SDA. + * However, it does not support setting SCL and SDA directly. + * The recovery function can togle SCL when SDA is low (but not set) + * Getter functions used internally, and can be used externaly. + */ + rinfo->get_scl = npcm_i2c_get_SCL; + rinfo->get_sda = npcm_i2c_get_SDA; + _adap->bus_recovery_info = rinfo; +} + +/* SCLFRQ min/max field values */ +#define SCLFRQ_MIN 10 +#define SCLFRQ_MAX 511 +#define clk_coef(freq, mul) DIV_ROUND_UP((freq) * (mul), 1000000) + +/* + * npcm_i2c_init_clk: init HW timing parameters. + * NPCM7XX i2c module timing parameters are depenent on module core clk (APB) + * and bus frequency. + * 100kHz bus requires tSCL = 4 * SCLFRQ * tCLK. LT and HT are simetric. + * 400kHz bus requires assymetric HT and LT. A different equation is recomended + * by the HW designer, given core clock range (equations in comments below). + * + */ +static int npcm_i2c_init_clk(struct npcm_i2c *bus, u32 bus_freq_hz) +{ + u32 k1 = 0; + u32 k2 = 0; + u8 dbnct = 0; + u32 sclfrq = 0; + u8 hldt = 7; + u8 fast_mode = 0; + u32 src_clk_khz; + u32 bus_freq_khz; + + src_clk_khz = bus->apb_clk / 1000; + bus_freq_khz = bus_freq_hz / 1000; + bus->bus_freq = bus_freq_hz; + + /* 100KHz and below: */ + if (bus_freq_hz <= I2C_MAX_STANDARD_MODE_FREQ) { + sclfrq = src_clk_khz / (bus_freq_khz * 4); + + if (sclfrq < SCLFRQ_MIN || sclfrq > SCLFRQ_MAX) + return -EDOM; + + if (src_clk_khz >= 40000) + hldt = 17; + else if (src_clk_khz >= 12500) + hldt = 15; + else + hldt = 7; + } + + /* 400KHz: */ + else if (bus_freq_hz <= I2C_MAX_FAST_MODE_FREQ) { + sclfrq = 0; + fast_mode = I2CCTL3_400K_MODE; + + if (src_clk_khz < 7500) + /* 400KHZ cannot be supported for core clock < 7.5MHz */ + return -EDOM; + + else if (src_clk_khz >= 50000) { + k1 = 80; + k2 = 48; + hldt = 12; + dbnct = 7; + } + + /* Master or Slave with frequency > 25MHz */ + else if (src_clk_khz > 25000) { + hldt = clk_coef(src_clk_khz, 300) + 7; + k1 = clk_coef(src_clk_khz, 1600); + k2 = clk_coef(src_clk_khz, 900); + } + } + + /* 1MHz: */ + else if (bus_freq_hz <= I2C_MAX_FAST_MODE_PLUS_FREQ) { + sclfrq = 0; + fast_mode = I2CCTL3_400K_MODE; + + /* 1MHZ cannot be supported for core clock < 24 MHz */ + if (src_clk_khz < 24000) + return -EDOM; + + k1 = clk_coef(src_clk_khz, 620); + k2 = clk_coef(src_clk_khz, 380); + + /* Core clk > 40 MHz */ + if (src_clk_khz > 40000) { + /* + * Set HLDT: + * SDA hold time: (HLDT-7) * T(CLK) >= 120 + * HLDT = 120/T(CLK) + 7 = 120 * FREQ(CLK) + 7 + */ + hldt = clk_coef(src_clk_khz, 120) + 7; + } else { + hldt = 7; + dbnct = 2; + } + } + + /* Frequency larger than 1 MHz is not supported */ + else + return -EINVAL; + + if (bus_freq_hz >= I2C_MAX_FAST_MODE_FREQ) { + k1 = round_up(k1, 2); + k2 = round_up(k2 + 1, 2); + if (k1 < SCLFRQ_MIN || k1 > SCLFRQ_MAX || + k2 < SCLFRQ_MIN || k2 > SCLFRQ_MAX) + return -EDOM; + } + + /* write sclfrq value. bits [6:0] are in I2CCTL2 reg */ + iowrite8(FIELD_PREP(I2CCTL2_SCLFRQ6_0, sclfrq & 0x7F), + bus->reg + NPCM_I2CCTL2); + + /* bits [8:7] are in I2CCTL3 reg */ + iowrite8(fast_mode | FIELD_PREP(I2CCTL3_SCLFRQ8_7, (sclfrq >> 7) & 0x3), + bus->reg + NPCM_I2CCTL3); + + /* Select Bank 0 to access NPCM_I2CCTL4/NPCM_I2CCTL5 */ + npcm_i2c_select_bank(bus, I2C_BANK_0); + + if (bus_freq_hz >= I2C_MAX_FAST_MODE_FREQ) { + /* + * Set SCL Low/High Time: + * k1 = 2 * SCLLT7-0 -> Low Time = k1 / 2 + * k2 = 2 * SCLLT7-0 -> High Time = k2 / 2 + */ + iowrite8(k1 / 2, bus->reg + NPCM_I2CSCLLT); + iowrite8(k2 / 2, bus->reg + NPCM_I2CSCLHT); + + iowrite8(dbnct, bus->reg + NPCM_I2CCTL5); + } + + iowrite8(hldt, bus->reg + NPCM_I2CCTL4); + + /* Return to Bank 1, and stay there by default: */ + npcm_i2c_select_bank(bus, I2C_BANK_1); + + return 0; +} + +static int npcm_i2c_init_module(struct npcm_i2c *bus, enum i2c_mode mode, + u32 bus_freq_hz) +{ + u8 val; + int ret; + + /* Check whether module already enabled or frequency is out of bounds */ + if ((bus->state != I2C_DISABLE && bus->state != I2C_IDLE) || + bus_freq_hz < I2C_FREQ_MIN_HZ || bus_freq_hz > I2C_FREQ_MAX_HZ) + return -EINVAL; + + npcm_i2c_disable(bus); + + /* Configure FIFO mode : */ + if (FIELD_GET(I2C_VER_FIFO_EN, ioread8(bus->reg + I2C_VER))) { + bus->fifo_use = true; + npcm_i2c_select_bank(bus, I2C_BANK_0); + val = ioread8(bus->reg + NPCM_I2CFIF_CTL); + val |= NPCM_I2CFIF_CTL_FIFO_EN; + iowrite8(val, bus->reg + NPCM_I2CFIF_CTL); + npcm_i2c_select_bank(bus, I2C_BANK_1); + } else { + bus->fifo_use = false; + } + + /* Configure I2C module clock frequency */ + ret = npcm_i2c_init_clk(bus, bus_freq_hz); + if (ret) { + dev_err(bus->dev, "npcm_i2c_init_clk failed\n"); + return ret; + } + + /* Enable module (before configuring CTL1) */ + npcm_i2c_enable(bus); + bus->state = I2C_IDLE; + val = ioread8(bus->reg + NPCM_I2CCTL1); + val = (val | NPCM_I2CCTL1_NMINTE) & ~NPCM_I2CCTL1_RWS; + iowrite8(val, bus->reg + NPCM_I2CCTL1); + + npcm_i2c_int_enable(bus, true); + + npcm_i2c_reset(bus); + + return 0; +} + +static int __npcm_i2c_init(struct npcm_i2c *bus, struct platform_device *pdev) +{ + u32 clk_freq_hz; + int ret; + + /* Initialize the internal data structures */ + bus->state = I2C_DISABLE; + bus->master_or_slave = I2C_SLAVE; + bus->int_time_stamp = 0; + + ret = device_property_read_u32(&pdev->dev, "clock-frequency", + &clk_freq_hz); + if (ret) { + dev_info(&pdev->dev, "Could not read clock-frequency property"); + clk_freq_hz = I2C_MAX_STANDARD_MODE_FREQ; + } + + ret = npcm_i2c_init_module(bus, I2C_MASTER, clk_freq_hz); + if (ret) { + dev_err(&pdev->dev, "npcm_i2c_init_module failed\n"); + return ret; + } + + return 0; +} + +static irqreturn_t npcm_i2c_bus_irq(int irq, void *dev_id) +{ + struct npcm_i2c *bus = dev_id; + + if (npcm_i2c_is_master(bus)) + bus->master_or_slave = I2C_MASTER; + + if (bus->master_or_slave == I2C_MASTER) { + bus->int_time_stamp = jiffies; + if (!npcm_i2c_int_master_handler(bus)) + return IRQ_HANDLED; + } + return IRQ_NONE; +} + +static bool npcm_i2c_master_start_xmit(struct npcm_i2c *bus, + u8 slave_addr, u16 nwrite, u16 nread, + u8 *write_data, u8 *read_data, + bool use_PEC, bool use_read_block) +{ + if (bus->state != I2C_IDLE) { + bus->cmd_err = -EBUSY; + return false; + } + bus->dest_addr = slave_addr << 1; + bus->wr_buf = write_data; + bus->wr_size = nwrite; + bus->wr_ind = 0; + bus->rd_buf = read_data; + bus->rd_size = nread; + bus->rd_ind = 0; + bus->PEC_use = 0; + + /* for tx PEC is appended to buffer from i2c IF. PEC flag is ignored */ + if (nread) + bus->PEC_use = use_PEC; + + bus->read_block_use = use_read_block; + if (nread && !nwrite) + bus->operation = I2C_READ_OPER; + else + bus->operation = I2C_WRITE_OPER; + if (bus->fifo_use) { + u8 i2cfif_cts; + + npcm_i2c_select_bank(bus, I2C_BANK_1); + /* clear FIFO and relevant status bits. */ + i2cfif_cts = ioread8(bus->reg + NPCM_I2CFIF_CTS); + i2cfif_cts &= ~NPCM_I2CFIF_CTS_SLVRSTR; + i2cfif_cts |= NPCM_I2CFIF_CTS_CLR_FIFO; + iowrite8(i2cfif_cts, bus->reg + NPCM_I2CFIF_CTS); + } + + bus->state = I2C_IDLE; + npcm_i2c_stall_after_start(bus, true); + npcm_i2c_master_start(bus); + return true; +} + +static int npcm_i2c_master_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, + int num) +{ + struct npcm_i2c *bus = container_of(adap, struct npcm_i2c, adap); + struct i2c_msg *msg0, *msg1; + unsigned long time_left, flags; + u16 nwrite, nread; + u8 *write_data, *read_data; + u8 slave_addr; + int timeout; + int ret = 0; + bool read_block = false; + bool read_PEC = false; + u8 bus_busy; + unsigned long timeout_usec; + + if (bus->state == I2C_DISABLE) { + dev_err(bus->dev, "I2C%d module is disabled", bus->num); + return -EINVAL; + } + + msg0 = &msgs[0]; + slave_addr = msg0->addr; + if (msg0->flags & I2C_M_RD) { /* read */ + nwrite = 0; + write_data = NULL; + read_data = msg0->buf; + if (msg0->flags & I2C_M_RECV_LEN) { + nread = 1; + read_block = true; + if (msg0->flags & I2C_CLIENT_PEC) + read_PEC = true; + } else { + nread = msg0->len; + } + } else { /* write */ + nwrite = msg0->len; + write_data = msg0->buf; + nread = 0; + read_data = NULL; + if (num == 2) { + msg1 = &msgs[1]; + read_data = msg1->buf; + if (msg1->flags & I2C_M_RECV_LEN) { + nread = 1; + read_block = true; + if (msg1->flags & I2C_CLIENT_PEC) + read_PEC = true; + } else { + nread = msg1->len; + read_block = false; + } + } + } + + /* Adaptive TimeOut: astimated time in usec + 100% margin */ + timeout_usec = (2 * 10000 / bus->bus_freq) * (2 + nread + nwrite); + timeout = max(msecs_to_jiffies(35), usecs_to_jiffies(timeout_usec)); + if (nwrite >= 32 * 1024 || nread >= 32 * 1024) { + dev_err(bus->dev, "i2c%d buffer too big\n", bus->num); + return -EINVAL; + } + + time_left = jiffies + msecs_to_jiffies(DEFAULT_STALL_COUNT) + 1; + do { + /* + * we must clear slave address immediately when the bus is not + * busy, so we spinlock it, but we don't keep the lock for the + * entire while since it is too long. + */ + spin_lock_irqsave(&bus->lock, flags); + bus_busy = ioread8(bus->reg + NPCM_I2CCST) & NPCM_I2CCST_BB; + spin_unlock_irqrestore(&bus->lock, flags); + + } while (time_is_after_jiffies(time_left) && bus_busy); + + if (bus_busy) { + iowrite8(NPCM_I2CCST_BB, bus->reg + NPCM_I2CCST); + npcm_i2c_reset(bus); + i2c_recover_bus(adap); + return -EAGAIN; + } + + npcm_i2c_init_params(bus); + bus->dest_addr = slave_addr; + bus->msgs = msgs; + bus->msgs_num = num; + bus->cmd_err = 0; + bus->read_block_use = read_block; + + reinit_completion(&bus->cmd_complete); + if (!npcm_i2c_master_start_xmit(bus, slave_addr, nwrite, nread, + write_data, read_data, read_PEC, + read_block)) + ret = -EBUSY; + + if (ret != -EBUSY) { + time_left = wait_for_completion_timeout(&bus->cmd_complete, + timeout); + + if (time_left == 0) { + if (bus->timeout_cnt < ULLONG_MAX) + bus->timeout_cnt++; + if (bus->master_or_slave == I2C_MASTER) { + i2c_recover_bus(adap); + bus->cmd_err = -EIO; + bus->state = I2C_IDLE; + } + } + } + ret = bus->cmd_err; + + /* if there was BER, check if need to recover the bus: */ + if (bus->cmd_err == -EAGAIN) + ret = i2c_recover_bus(adap); + + return bus->cmd_err; +} + +static u32 npcm_i2c_functionality(struct i2c_adapter *adap) +{ + return I2C_FUNC_I2C | + I2C_FUNC_SMBUS_EMUL | + I2C_FUNC_SMBUS_BLOCK_DATA | + I2C_FUNC_SMBUS_PEC; +} + +static const struct i2c_adapter_quirks npcm_i2c_quirks = { + .max_read_len = 32768, + .max_write_len = 32768, + .flags = I2C_AQ_COMB_WRITE_THEN_READ, +}; + +static const struct i2c_algorithm npcm_i2c_algo = { + .master_xfer = npcm_i2c_master_xfer, + .functionality = npcm_i2c_functionality, +}; + +/* i2c debugfs directory: used to keep health monitor of i2c devices */ +static struct dentry *npcm_i2c_debugfs_dir; + +static void npcm_i2c_init_debugfs(struct platform_device *pdev, + struct npcm_i2c *bus) +{ + struct dentry *d; + + if (!npcm_i2c_debugfs_dir) + return; + d = debugfs_create_dir(dev_name(&pdev->dev), npcm_i2c_debugfs_dir); + if (IS_ERR_OR_NULL(d)) + return; + debugfs_create_u64("ber_cnt", 0444, d, &bus->ber_cnt); + debugfs_create_u64("nack_cnt", 0444, d, &bus->nack_cnt); + debugfs_create_u64("rec_succ_cnt", 0444, d, &bus->rec_succ_cnt); + debugfs_create_u64("rec_fail_cnt", 0444, d, &bus->rec_fail_cnt); + debugfs_create_u64("timeout_cnt", 0444, d, &bus->timeout_cnt); + + bus->debugfs = d; +} + +static int npcm_i2c_probe_bus(struct platform_device *pdev) +{ + struct npcm_i2c *bus; + struct i2c_adapter *adap; + struct clk *i2c_clk; + static struct regmap *gcr_regmap; + static struct regmap *clk_regmap; + int irq; + int ret; + + bus = devm_kzalloc(&pdev->dev, sizeof(*bus), GFP_KERNEL); + if (!bus) + return -ENOMEM; + + bus->dev = &pdev->dev; + + bus->num = of_alias_get_id(pdev->dev.of_node, "i2c"); + /* core clk must be acquired to calculate module timing settings */ + i2c_clk = devm_clk_get(&pdev->dev, NULL); + if (IS_ERR(i2c_clk)) + return PTR_ERR(i2c_clk); + bus->apb_clk = clk_get_rate(i2c_clk); + + gcr_regmap = syscon_regmap_lookup_by_compatible("nuvoton,npcm750-gcr"); + if (IS_ERR(gcr_regmap)) + return IS_ERR(gcr_regmap); + regmap_write(gcr_regmap, NPCM_I2CSEGCTL, NPCM_I2CSEGCTL_INIT_VAL); + + clk_regmap = syscon_regmap_lookup_by_compatible("nuvoton,npcm750-clk"); + if (IS_ERR(clk_regmap)) + return IS_ERR(clk_regmap); + + bus->reg = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(bus->reg)) + return PTR_ERR((bus)->reg); + + spin_lock_init(&bus->lock); + init_completion(&bus->cmd_complete); + + adap = &bus->adap; + adap->owner = THIS_MODULE; + adap->retries = 3; + adap->timeout = HZ; + adap->algo = &npcm_i2c_algo; + adap->quirks = &npcm_i2c_quirks; + adap->algo_data = bus; + adap->dev.parent = &pdev->dev; + adap->dev.of_node = pdev->dev.of_node; + adap->nr = pdev->id; + + irq = platform_get_irq(pdev, 0); + if (irq < 0) + return irq; + + ret = devm_request_irq(bus->dev, irq, npcm_i2c_bus_irq, 0, + dev_name(bus->dev), bus); + if (ret) + return ret; + + ret = __npcm_i2c_init(bus, pdev); + if (ret) + return ret; + + npcm_i2c_recovery_init(adap); + + i2c_set_adapdata(adap, bus); + + snprintf(bus->adap.name, sizeof(bus->adap.name), "npcm_i2c_%d", + bus->num); + ret = i2c_add_numbered_adapter(&bus->adap); + if (ret) + return ret; + + platform_set_drvdata(pdev, bus); + npcm_i2c_init_debugfs(pdev, bus); + return 0; +} + +static int npcm_i2c_remove_bus(struct platform_device *pdev) +{ + unsigned long lock_flags; + struct npcm_i2c *bus = platform_get_drvdata(pdev); + + debugfs_remove_recursive(bus->debugfs); + spin_lock_irqsave(&bus->lock, lock_flags); + npcm_i2c_disable(bus); + spin_unlock_irqrestore(&bus->lock, lock_flags); + i2c_del_adapter(&bus->adap); + return 0; +} + +static const struct of_device_id npcm_i2c_bus_of_table[] = { + { .compatible = "nuvoton,npcm750-i2c", }, + {} +}; +MODULE_DEVICE_TABLE(of, npcm_i2c_bus_of_table); + +static struct platform_driver npcm_i2c_bus_driver = { + .probe = npcm_i2c_probe_bus, + .remove = npcm_i2c_remove_bus, + .driver = { + .name = "nuvoton-i2c", + .of_match_table = npcm_i2c_bus_of_table, + } +}; + +static int __init npcm_i2c_init(void) +{ + npcm_i2c_debugfs_dir = debugfs_create_dir("npcm_i2c", NULL); + platform_driver_register(&npcm_i2c_bus_driver); + return 0; +} +module_init(npcm_i2c_init); + +static void __exit npcm_i2c_exit(void) +{ + platform_driver_unregister(&npcm_i2c_bus_driver); + debugfs_remove_recursive(npcm_i2c_debugfs_dir); +} +module_exit(npcm_i2c_exit); + +MODULE_AUTHOR("Avi Fishman "); +MODULE_AUTHOR("Tali Perry "); +MODULE_AUTHOR("Tyrone Ting "); +MODULE_DESCRIPTION("Nuvoton I2C Bus Driver"); +MODULE_LICENSE("GPL v2"); -- GitLab From f54736925a4f83a67588381d0ead69d19323b526 Mon Sep 17 00:00:00 2001 From: Tali Perry Date: Wed, 27 May 2020 23:08:20 +0300 Subject: [PATCH 1056/1971] i2c: npcm7xx: Add support for slave mode for Nuvoton Add support for slave mode for Nuvoton NPCM BMC I2C controller driver. Signed-off-by: Tali Perry Reviewed-by: Andy Shevchenko Signed-off-by: Wolfram Sang --- drivers/i2c/busses/i2c-npcm7xx.c | 608 ++++++++++++++++++++++++++++++- 1 file changed, 607 insertions(+), 1 deletion(-) diff --git a/drivers/i2c/busses/i2c-npcm7xx.c b/drivers/i2c/busses/i2c-npcm7xx.c index 018abf8dda7e8..a8e75c3484f12 100644 --- a/drivers/i2c/busses/i2c-npcm7xx.c +++ b/drivers/i2c/busses/i2c-npcm7xx.c @@ -71,6 +71,24 @@ enum i2c_state { I2C_STOP_PENDING, }; +#if IS_ENABLED(CONFIG_I2C_SLAVE) +/* Module supports setting multiple own slave addresses */ +enum i2c_addr { + I2C_SLAVE_ADDR1 = 0, + I2C_SLAVE_ADDR2, + I2C_SLAVE_ADDR3, + I2C_SLAVE_ADDR4, + I2C_SLAVE_ADDR5, + I2C_SLAVE_ADDR6, + I2C_SLAVE_ADDR7, + I2C_SLAVE_ADDR8, + I2C_SLAVE_ADDR9, + I2C_SLAVE_ADDR10, + I2C_GC_ADDR, + I2C_ARP_ADDR, +}; +#endif + /* init register and default value required to enable module */ #define NPCM_I2CSEGCTL 0xE4 #define NPCM_I2CSEGCTL_INIT_VAL 0x0333F000 @@ -98,6 +116,21 @@ enum i2c_state { #define NPCM_I2CADDR6 0x16 #define NPCM_I2CADDR10 0x17 +#if IS_ENABLED(CONFIG_I2C_SLAVE) +/* + * npcm_i2caddr array: + * The module supports having multiple own slave addresses. + * Since the addr regs are sprinkled all over the address space, + * use this array to get the address or each register. + */ +#define I2C_NUM_OWN_ADDR 10 +const int npcm_i2caddr[I2C_NUM_OWN_ADDR] = { + NPCM_I2CADDR1, NPCM_I2CADDR2, NPCM_I2CADDR3, NPCM_I2CADDR4, + NPCM_I2CADDR5, NPCM_I2CADDR6, NPCM_I2CADDR7, NPCM_I2CADDR8, + NPCM_I2CADDR9, NPCM_I2CADDR10, +}; +#endif + #define NPCM_I2CCTL4 0x1A #define NPCM_I2CCTL5 0x1B #define NPCM_I2CSCLLT 0x1C /* SCL Low Time */ @@ -265,6 +298,16 @@ struct npcm_i2c { bool read_block_use; unsigned long int_time_stamp; unsigned long bus_freq; /* in Hz */ +#if IS_ENABLED(CONFIG_I2C_SLAVE) + u8 own_slave_addr; + struct i2c_client *slave; + int slv_rd_size; + int slv_rd_ind; + int slv_wr_size; + int slv_wr_ind; + u8 slv_rd_buf[I2C_HW_FIFO_SIZE]; + u8 slv_wr_buf[I2C_HW_FIFO_SIZE]; +#endif struct dentry *debugfs; /* debugfs device directory */ u64 ber_cnt; u64 rec_succ_cnt; @@ -296,6 +339,10 @@ static void npcm_i2c_init_params(struct npcm_i2c *bus) bus->int_time_stamp = 0; bus->PEC_use = false; bus->PEC_mask = 0; +#if IS_ENABLED(CONFIG_I2C_SLAVE) + if (bus->slave) + bus->master_or_slave = I2C_SLAVE; +#endif } static inline void npcm_i2c_wr_byte(struct npcm_i2c *bus, u8 data) @@ -341,6 +388,18 @@ static void npcm_i2c_disable(struct npcm_i2c *bus) { u8 i2cctl2; +#if IS_ENABLED(CONFIG_I2C_SLAVE) + int i; + + /* select bank 0 for I2C addresses */ + npcm_i2c_select_bank(bus, I2C_BANK_0); + + /* Slave addresses removal */ + for (i = I2C_SLAVE_ADDR1; i < I2C_NUM_OWN_ADDR; i++) + iowrite8(0, bus->reg + npcm_i2caddr[i]); + + npcm_i2c_select_bank(bus, I2C_BANK_1); +#endif /* Disable module */ i2cctl2 = ioread8(bus->reg + NPCM_I2CCTL2); i2cctl2 = i2cctl2 & ~I2CCTL2_ENABLE; @@ -504,6 +563,61 @@ static inline void npcm_i2c_nack(struct npcm_i2c *bus) iowrite8(val, bus->reg + NPCM_I2CCTL1); } +#if IS_ENABLED(CONFIG_I2C_SLAVE) +static void npcm_i2c_slave_int_enable(struct npcm_i2c *bus, bool enable) +{ + u8 i2cctl1; + + /* enable interrupt on slave match: */ + i2cctl1 = ioread8(bus->reg + NPCM_I2CCTL1); + i2cctl1 &= ~NPCM_I2CCTL1_RWS; + if (enable) + i2cctl1 |= NPCM_I2CCTL1_NMINTE; + else + i2cctl1 &= ~NPCM_I2CCTL1_NMINTE; + iowrite8(i2cctl1, bus->reg + NPCM_I2CCTL1); +} + +static int npcm_i2c_slave_enable(struct npcm_i2c *bus, enum i2c_addr addr_type, + u8 addr, bool enable) +{ + u8 i2cctl1; + u8 i2cctl3; + u8 sa_reg; + + sa_reg = (addr & 0x7F) | FIELD_PREP(NPCM_I2CADDR_SAEN, enable); + if (addr_type == I2C_GC_ADDR) { + i2cctl1 = ioread8(bus->reg + NPCM_I2CCTL1); + if (enable) + i2cctl1 |= NPCM_I2CCTL1_GCMEN; + else + i2cctl1 &= ~NPCM_I2CCTL1_GCMEN; + iowrite8(i2cctl1, bus->reg + NPCM_I2CCTL1); + return 0; + } + if (addr_type == I2C_ARP_ADDR) { + i2cctl3 = ioread8(bus->reg + NPCM_I2CCTL3); + if (enable) + i2cctl3 |= I2CCTL3_ARPMEN; + else + i2cctl3 &= ~I2CCTL3_ARPMEN; + iowrite8(i2cctl3, bus->reg + NPCM_I2CCTL3); + return 0; + } + if (addr_type >= I2C_ARP_ADDR) + return -EFAULT; + /* select bank 0 for address 3 to 10 */ + if (addr_type > I2C_SLAVE_ADDR2) + npcm_i2c_select_bank(bus, I2C_BANK_0); + /* Set and enable the address */ + iowrite8(sa_reg, bus->reg + npcm_i2caddr[addr_type]); + npcm_i2c_slave_int_enable(bus, enable); + if (addr_type > I2C_SLAVE_ADDR2) + npcm_i2c_select_bank(bus, I2C_BANK_1); + return 0; +} +#endif + static void npcm_i2c_reset(struct npcm_i2c *bus) { /* @@ -511,6 +625,9 @@ static void npcm_i2c_reset(struct npcm_i2c *bus) * is disabled. */ u8 i2cctl1; +#if IS_ENABLED(CONFIG_I2C_SLAVE) + u8 addr; +#endif i2cctl1 = ioread8(bus->reg + NPCM_I2CCTL1); @@ -531,6 +648,13 @@ static void npcm_i2c_reset(struct npcm_i2c *bus) /* Clear all fifo bits: */ iowrite8(NPCM_I2CFIF_CTS_CLR_FIFO, bus->reg + NPCM_I2CFIF_CTS); +#if IS_ENABLED(CONFIG_I2C_SLAVE) + if (bus->slave) { + addr = bus->slave->addr; + npcm_i2c_slave_enable(bus, I2C_SLAVE_ADDR1, addr, true); + } +#endif + bus->state = I2C_IDLE; } @@ -596,6 +720,10 @@ static void npcm_i2c_callback(struct npcm_i2c *bus, } bus->operation = I2C_NO_OPER; +#if IS_ENABLED(CONFIG_I2C_SLAVE) + if (bus->slave) + bus->master_or_slave = I2C_SLAVE; +#endif } static u8 npcm_i2c_fifo_usage(struct npcm_i2c *bus) @@ -707,6 +835,459 @@ static void npcm_i2c_master_abort(struct npcm_i2c *bus) npcm_i2c_clear_master_status(bus); } +#if IS_ENABLED(CONFIG_I2C_SLAVE) +static u8 npcm_i2c_get_slave_addr(struct npcm_i2c *bus, enum i2c_addr addr_type) +{ + u8 slave_add; + + /* select bank 0 for address 3 to 10 */ + if (addr_type > I2C_SLAVE_ADDR2) + npcm_i2c_select_bank(bus, I2C_BANK_0); + + slave_add = ioread8(bus->reg + npcm_i2caddr[(int)addr_type]); + + if (addr_type > I2C_SLAVE_ADDR2) + npcm_i2c_select_bank(bus, I2C_BANK_1); + + return slave_add; +} + +static int npcm_i2c_remove_slave_addr(struct npcm_i2c *bus, u8 slave_add) +{ + int i; + + /* Set the enable bit */ + slave_add |= 0x80; + npcm_i2c_select_bank(bus, I2C_BANK_0); + for (i = I2C_SLAVE_ADDR1; i < I2C_NUM_OWN_ADDR; i++) { + if (ioread8(bus->reg + npcm_i2caddr[i]) == slave_add) + iowrite8(0, bus->reg + npcm_i2caddr[i]); + } + npcm_i2c_select_bank(bus, I2C_BANK_1); + return 0; +} + +static void npcm_i2c_write_fifo_slave(struct npcm_i2c *bus, u16 max_bytes) +{ + /* + * Fill the FIFO, while the FIFO is not full and there are more bytes + * to write + */ + npcm_i2c_clear_fifo_int(bus); + npcm_i2c_clear_tx_fifo(bus); + iowrite8(0, bus->reg + NPCM_I2CTXF_CTL); + while (max_bytes-- && I2C_HW_FIFO_SIZE != npcm_i2c_fifo_usage(bus)) { + if (bus->slv_wr_size <= 0) + break; + bus->slv_wr_ind = bus->slv_wr_ind % I2C_HW_FIFO_SIZE; + npcm_i2c_wr_byte(bus, bus->slv_wr_buf[bus->slv_wr_ind]); + bus->slv_wr_ind++; + bus->slv_wr_ind = bus->slv_wr_ind % I2C_HW_FIFO_SIZE; + bus->slv_wr_size--; + } +} + +static void npcm_i2c_read_fifo_slave(struct npcm_i2c *bus, u8 bytes_in_fifo) +{ + u8 data; + + if (!bus->slave) + return; + + while (bytes_in_fifo--) { + data = npcm_i2c_rd_byte(bus); + + bus->slv_rd_ind = bus->slv_rd_ind % I2C_HW_FIFO_SIZE; + bus->slv_rd_buf[bus->slv_rd_ind] = data; + bus->slv_rd_ind++; + + /* 1st byte is length in block protocol: */ + if (bus->slv_rd_ind == 1 && bus->read_block_use) + bus->slv_rd_size = data + bus->PEC_use + 1; + } +} + +static int npcm_i2c_slave_get_wr_buf(struct npcm_i2c *bus) +{ + int i; + u8 value; + int ind; + int ret = bus->slv_wr_ind; + + /* fill a cyclic buffer */ + for (i = 0; i < I2C_HW_FIFO_SIZE; i++) { + if (bus->slv_wr_size >= I2C_HW_FIFO_SIZE) + break; + i2c_slave_event(bus->slave, I2C_SLAVE_READ_REQUESTED, &value); + ind = (bus->slv_wr_ind + bus->slv_wr_size) % I2C_HW_FIFO_SIZE; + bus->slv_wr_buf[ind] = value; + bus->slv_wr_size++; + i2c_slave_event(bus->slave, I2C_SLAVE_READ_PROCESSED, &value); + } + return I2C_HW_FIFO_SIZE - ret; +} + +static void npcm_i2c_slave_send_rd_buf(struct npcm_i2c *bus) +{ + int i; + + for (i = 0; i < bus->slv_rd_ind; i++) + i2c_slave_event(bus->slave, I2C_SLAVE_WRITE_RECEIVED, + &bus->slv_rd_buf[i]); + /* + * once we send bytes up, need to reset the counter of the wr buf + * got data from master (new offset in device), ignore wr fifo: + */ + if (bus->slv_rd_ind) { + bus->slv_wr_size = 0; + bus->slv_wr_ind = 0; + } + + bus->slv_rd_ind = 0; + bus->slv_rd_size = bus->adap.quirks->max_read_len; + + npcm_i2c_clear_fifo_int(bus); + npcm_i2c_clear_rx_fifo(bus); +} + +static void npcm_i2c_slave_receive(struct npcm_i2c *bus, u16 nread, + u8 *read_data) +{ + bus->state = I2C_OPER_STARTED; + bus->operation = I2C_READ_OPER; + bus->slv_rd_size = nread; + bus->slv_rd_ind = 0; + + iowrite8(0, bus->reg + NPCM_I2CTXF_CTL); + iowrite8(I2C_HW_FIFO_SIZE, bus->reg + NPCM_I2CRXF_CTL); + npcm_i2c_clear_tx_fifo(bus); + npcm_i2c_clear_rx_fifo(bus); +} + +static void npcm_i2c_slave_xmit(struct npcm_i2c *bus, u16 nwrite, + u8 *write_data) +{ + if (nwrite == 0) + return; + + bus->state = I2C_OPER_STARTED; + bus->operation = I2C_WRITE_OPER; + + /* get the next buffer */ + npcm_i2c_slave_get_wr_buf(bus); + npcm_i2c_write_fifo_slave(bus, nwrite); +} + +/* + * npcm_i2c_slave_wr_buf_sync: + * currently slave IF only supports single byte operations. + * in order to utilyze the npcm HW FIFO, the driver will ask for 16 bytes + * at a time, pack them in buffer, and then transmit them all together + * to the FIFO and onward to the bus. + * NACK on read will be once reached to bus->adap->quirks->max_read_len. + * sending a NACK wherever the backend requests for it is not supported. + * the next two functions allow reading to local buffer before writing it all + * to the HW FIFO. + */ +static void npcm_i2c_slave_wr_buf_sync(struct npcm_i2c *bus) +{ + int left_in_fifo; + + left_in_fifo = FIELD_GET(NPCM_I2CTXF_STS_TX_BYTES, + ioread8(bus->reg + NPCM_I2CTXF_STS)); + + /* fifo already full: */ + if (left_in_fifo >= I2C_HW_FIFO_SIZE || + bus->slv_wr_size >= I2C_HW_FIFO_SIZE) + return; + + /* update the wr fifo index back to the untransmitted bytes: */ + bus->slv_wr_ind = bus->slv_wr_ind - left_in_fifo; + bus->slv_wr_size = bus->slv_wr_size + left_in_fifo; + + if (bus->slv_wr_ind < 0) + bus->slv_wr_ind += I2C_HW_FIFO_SIZE; +} + +static void npcm_i2c_slave_rd_wr(struct npcm_i2c *bus) +{ + if (NPCM_I2CST_XMIT & ioread8(bus->reg + NPCM_I2CST)) { + /* + * Slave got an address match with direction bit 1 so it should + * transmit data. Write till the master will NACK + */ + bus->operation = I2C_WRITE_OPER; + npcm_i2c_slave_xmit(bus, bus->adap.quirks->max_write_len, + bus->slv_wr_buf); + } else { + /* + * Slave got an address match with direction bit 0 so it should + * receive data. + * this module does not support saying no to bytes. + * it will always ACK. + */ + bus->operation = I2C_READ_OPER; + npcm_i2c_read_fifo_slave(bus, npcm_i2c_fifo_usage(bus)); + bus->stop_ind = I2C_SLAVE_RCV_IND; + npcm_i2c_slave_send_rd_buf(bus); + npcm_i2c_slave_receive(bus, bus->adap.quirks->max_read_len, + bus->slv_rd_buf); + } +} + +static irqreturn_t npcm_i2c_int_slave_handler(struct npcm_i2c *bus) +{ + u8 val; + irqreturn_t ret = IRQ_NONE; + u8 i2cst = ioread8(bus->reg + NPCM_I2CST); + + /* Slave: A NACK has occurred */ + if (NPCM_I2CST_NEGACK & i2cst) { + bus->stop_ind = I2C_NACK_IND; + npcm_i2c_slave_wr_buf_sync(bus); + if (bus->fifo_use) + /* clear the FIFO */ + iowrite8(NPCM_I2CFIF_CTS_CLR_FIFO, + bus->reg + NPCM_I2CFIF_CTS); + + /* In slave write, NACK is OK, otherwise it is a problem */ + bus->stop_ind = I2C_NO_STATUS_IND; + bus->operation = I2C_NO_OPER; + bus->own_slave_addr = 0xFF; + + /* + * Slave has to wait for STOP to decide this is the end + * of the transaction. tx is not yet considered as done + */ + iowrite8(NPCM_I2CST_NEGACK, bus->reg + NPCM_I2CST); + + ret = IRQ_HANDLED; + } + + /* Slave mode: a Bus Error (BER) has been identified */ + if (NPCM_I2CST_BER & i2cst) { + /* + * Check whether bus arbitration or Start or Stop during data + * xfer bus arbitration problem should not result in recovery + */ + bus->stop_ind = I2C_BUS_ERR_IND; + + /* wait for bus busy before clear fifo */ + iowrite8(NPCM_I2CFIF_CTS_CLR_FIFO, bus->reg + NPCM_I2CFIF_CTS); + + bus->state = I2C_IDLE; + + /* + * in BER case we might get 2 interrupts: one for slave one for + * master ( for a channel which is master\slave switching) + */ + if (completion_done(&bus->cmd_complete) == false) { + bus->cmd_err = -EIO; + complete(&bus->cmd_complete); + } + bus->own_slave_addr = 0xFF; + iowrite8(NPCM_I2CST_BER, bus->reg + NPCM_I2CST); + ret = IRQ_HANDLED; + } + + /* A Slave Stop Condition has been identified */ + if (NPCM_I2CST_SLVSTP & i2cst) { + u8 bytes_in_fifo = npcm_i2c_fifo_usage(bus); + + bus->stop_ind = I2C_SLAVE_DONE_IND; + + if (bus->operation == I2C_READ_OPER) + npcm_i2c_read_fifo_slave(bus, bytes_in_fifo); + + /* if the buffer is empty nothing will be sent */ + npcm_i2c_slave_send_rd_buf(bus); + + /* Slave done transmitting or receiving */ + bus->stop_ind = I2C_NO_STATUS_IND; + + /* + * Note, just because we got here, it doesn't mean we through + * away the wr buffer. + * we keep it until the next received offset. + */ + bus->operation = I2C_NO_OPER; + bus->own_slave_addr = 0xFF; + i2c_slave_event(bus->slave, I2C_SLAVE_STOP, 0); + iowrite8(NPCM_I2CST_SLVSTP, bus->reg + NPCM_I2CST); + if (bus->fifo_use) { + npcm_i2c_clear_fifo_int(bus); + npcm_i2c_clear_rx_fifo(bus); + npcm_i2c_clear_tx_fifo(bus); + + iowrite8(NPCM_I2CFIF_CTS_CLR_FIFO, + bus->reg + NPCM_I2CFIF_CTS); + } + bus->state = I2C_IDLE; + ret = IRQ_HANDLED; + } + + /* restart condition occurred and Rx-FIFO was not empty */ + if (bus->fifo_use && FIELD_GET(NPCM_I2CFIF_CTS_SLVRSTR, + ioread8(bus->reg + NPCM_I2CFIF_CTS))) { + bus->stop_ind = I2C_SLAVE_RESTART_IND; + bus->master_or_slave = I2C_SLAVE; + if (bus->operation == I2C_READ_OPER) + npcm_i2c_read_fifo_slave(bus, npcm_i2c_fifo_usage(bus)); + bus->operation = I2C_WRITE_OPER; + iowrite8(0, bus->reg + NPCM_I2CRXF_CTL); + val = NPCM_I2CFIF_CTS_CLR_FIFO | NPCM_I2CFIF_CTS_SLVRSTR | + NPCM_I2CFIF_CTS_RXF_TXE; + iowrite8(val, bus->reg + NPCM_I2CFIF_CTS); + npcm_i2c_slave_rd_wr(bus); + ret = IRQ_HANDLED; + } + + /* A Slave Address Match has been identified */ + if (NPCM_I2CST_NMATCH & i2cst) { + u8 info = 0; + + /* Address match automatically implies slave mode */ + bus->master_or_slave = I2C_SLAVE; + npcm_i2c_clear_fifo_int(bus); + npcm_i2c_clear_rx_fifo(bus); + npcm_i2c_clear_tx_fifo(bus); + iowrite8(0, bus->reg + NPCM_I2CTXF_CTL); + iowrite8(I2C_HW_FIFO_SIZE, bus->reg + NPCM_I2CRXF_CTL); + if (NPCM_I2CST_XMIT & i2cst) { + bus->operation = I2C_WRITE_OPER; + } else { + i2c_slave_event(bus->slave, I2C_SLAVE_WRITE_REQUESTED, + &info); + bus->operation = I2C_READ_OPER; + } + if (bus->own_slave_addr == 0xFF) { + /* Check which type of address match */ + val = ioread8(bus->reg + NPCM_I2CCST); + if (NPCM_I2CCST_MATCH & val) { + u16 addr; + enum i2c_addr eaddr; + u8 i2ccst2; + u8 i2ccst3; + + i2ccst3 = ioread8(bus->reg + NPCM_I2CCST3); + i2ccst2 = ioread8(bus->reg + NPCM_I2CCST2); + + /* + * the i2c module can response to 10 own SA. + * check which one was addressed by the master. + * repond to the first one. + */ + addr = ((i2ccst3 & 0x07) << 7) | + (i2ccst2 & 0x7F); + info = ffs(addr); + eaddr = (enum i2c_addr)info; + addr = npcm_i2c_get_slave_addr(bus, eaddr); + addr &= 0x7F; + bus->own_slave_addr = addr; + if (bus->PEC_mask & BIT(info)) + bus->PEC_use = true; + else + bus->PEC_use = false; + } else { + if (NPCM_I2CCST_GCMATCH & val) + bus->own_slave_addr = 0; + if (NPCM_I2CCST_ARPMATCH & val) + bus->own_slave_addr = 0x61; + } + } else { + /* + * Slave match can happen in two options: + * 1. Start, SA, read (slave read without further ado) + * 2. Start, SA, read, data, restart, SA, read, ... + * (slave read in fragmented mode) + * 3. Start, SA, write, data, restart, SA, read, .. + * (regular write-read mode) + */ + if ((bus->state == I2C_OPER_STARTED && + bus->operation == I2C_READ_OPER && + bus->stop_ind == I2C_SLAVE_XMIT_IND) || + bus->stop_ind == I2C_SLAVE_RCV_IND) { + /* slave tx after slave rx w/o STOP */ + bus->stop_ind = I2C_SLAVE_RESTART_IND; + } + } + + if (NPCM_I2CST_XMIT & i2cst) + bus->stop_ind = I2C_SLAVE_XMIT_IND; + else + bus->stop_ind = I2C_SLAVE_RCV_IND; + bus->state = I2C_SLAVE_MATCH; + npcm_i2c_slave_rd_wr(bus); + iowrite8(NPCM_I2CST_NMATCH, bus->reg + NPCM_I2CST); + ret = IRQ_HANDLED; + } + + /* Slave SDA status is set - tx or rx */ + if ((NPCM_I2CST_SDAST & i2cst) || + (bus->fifo_use && + (npcm_i2c_tx_fifo_empty(bus) || npcm_i2c_rx_fifo_full(bus)))) { + npcm_i2c_slave_rd_wr(bus); + iowrite8(NPCM_I2CST_SDAST, bus->reg + NPCM_I2CST); + ret = IRQ_HANDLED; + } /* SDAST */ + + return ret; +} + +static int npcm_i2c_reg_slave(struct i2c_client *client) +{ + unsigned long lock_flags; + struct npcm_i2c *bus = i2c_get_adapdata(client->adapter); + + bus->slave = client; + + if (!bus->slave) + return -EINVAL; + + if (client->flags & I2C_CLIENT_TEN) + return -EAFNOSUPPORT; + + spin_lock_irqsave(&bus->lock, lock_flags); + + npcm_i2c_init_params(bus); + bus->slv_rd_size = 0; + bus->slv_wr_size = 0; + bus->slv_rd_ind = 0; + bus->slv_wr_ind = 0; + if (client->flags & I2C_CLIENT_PEC) + bus->PEC_use = true; + + dev_info(bus->dev, "i2c%d register slave SA=0x%x, PEC=%d\n", bus->num, + client->addr, bus->PEC_use); + + npcm_i2c_slave_enable(bus, I2C_SLAVE_ADDR1, client->addr, true); + npcm_i2c_clear_fifo_int(bus); + npcm_i2c_clear_rx_fifo(bus); + npcm_i2c_clear_tx_fifo(bus); + npcm_i2c_slave_int_enable(bus, true); + + spin_unlock_irqrestore(&bus->lock, lock_flags); + return 0; +} + +static int npcm_i2c_unreg_slave(struct i2c_client *client) +{ + struct npcm_i2c *bus = client->adapter->algo_data; + unsigned long lock_flags; + + spin_lock_irqsave(&bus->lock, lock_flags); + if (!bus->slave) { + spin_unlock_irqrestore(&bus->lock, lock_flags); + return -EINVAL; + } + npcm_i2c_slave_int_enable(bus, false); + npcm_i2c_remove_slave_addr(bus, client->addr); + bus->slave = NULL; + spin_unlock_irqrestore(&bus->lock, lock_flags); + return 0; +} +#endif /* CONFIG_I2C_SLAVE */ + static void npcm_i2c_master_fifo_read(struct npcm_i2c *bus) { int rcount; @@ -1372,6 +1953,9 @@ static int __npcm_i2c_init(struct npcm_i2c *bus, struct platform_device *pdev) bus->state = I2C_DISABLE; bus->master_or_slave = I2C_SLAVE; bus->int_time_stamp = 0; +#if IS_ENABLED(CONFIG_I2C_SLAVE) + bus->slave = NULL; +#endif ret = device_property_read_u32(&pdev->dev, "clock-frequency", &clk_freq_hz); @@ -1401,6 +1985,12 @@ static irqreturn_t npcm_i2c_bus_irq(int irq, void *dev_id) if (!npcm_i2c_int_master_handler(bus)) return IRQ_HANDLED; } +#if IS_ENABLED(CONFIG_I2C_SLAVE) + if (bus->slave) { + bus->master_or_slave = I2C_SLAVE; + return npcm_i2c_int_slave_handler(bus); + } +#endif return IRQ_NONE; } @@ -1520,6 +2110,11 @@ static int npcm_i2c_master_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, */ spin_lock_irqsave(&bus->lock, flags); bus_busy = ioread8(bus->reg + NPCM_I2CCST) & NPCM_I2CCST_BB; +#if IS_ENABLED(CONFIG_I2C_SLAVE) + if (!bus_busy && bus->slave) + iowrite8((bus->slave->addr & 0x7F), + bus->reg + NPCM_I2CADDR1); +#endif spin_unlock_irqrestore(&bus->lock, flags); } while (time_is_after_jiffies(time_left) && bus_busy); @@ -1564,6 +2159,12 @@ static int npcm_i2c_master_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, if (bus->cmd_err == -EAGAIN) ret = i2c_recover_bus(adap); +#if IS_ENABLED(CONFIG_I2C_SLAVE) + /* reenable slave if it was enabled */ + if (bus->slave) + iowrite8((bus->slave->addr & 0x7F) | NPCM_I2CADDR_SAEN, + bus->reg + NPCM_I2CADDR1); +#endif return bus->cmd_err; } @@ -1572,7 +2173,8 @@ static u32 npcm_i2c_functionality(struct i2c_adapter *adap) return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL | I2C_FUNC_SMBUS_BLOCK_DATA | - I2C_FUNC_SMBUS_PEC; + I2C_FUNC_SMBUS_PEC | + I2C_FUNC_SLAVE; } static const struct i2c_adapter_quirks npcm_i2c_quirks = { @@ -1584,6 +2186,10 @@ static const struct i2c_adapter_quirks npcm_i2c_quirks = { static const struct i2c_algorithm npcm_i2c_algo = { .master_xfer = npcm_i2c_master_xfer, .functionality = npcm_i2c_functionality, +#if IS_ENABLED(CONFIG_I2C_SLAVE) + .reg_slave = npcm_i2c_reg_slave, + .unreg_slave = npcm_i2c_unreg_slave, +#endif }; /* i2c debugfs directory: used to keep health monitor of i2c devices */ -- GitLab From 646e84deb4496e2a884ca87fa960f1c614da882a Mon Sep 17 00:00:00 2001 From: Al Viro Date: Wed, 19 Feb 2020 09:23:34 -0500 Subject: [PATCH 1057/1971] binfmt_elf: don't bother with __{put,copy_to}_user() Signed-off-by: Al Viro --- fs/binfmt_elf.c | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c index 13f25e241ac46..dfccd095a2897 100644 --- a/fs/binfmt_elf.c +++ b/fs/binfmt_elf.c @@ -202,7 +202,7 @@ create_elf_tables(struct linux_binprm *bprm, const struct elfhdr *exec, size_t len = strlen(k_platform) + 1; u_platform = (elf_addr_t __user *)STACK_ALLOC(p, len); - if (__copy_to_user(u_platform, k_platform, len)) + if (copy_to_user(u_platform, k_platform, len)) return -EFAULT; } @@ -215,7 +215,7 @@ create_elf_tables(struct linux_binprm *bprm, const struct elfhdr *exec, size_t len = strlen(k_base_platform) + 1; u_base_platform = (elf_addr_t __user *)STACK_ALLOC(p, len); - if (__copy_to_user(u_base_platform, k_base_platform, len)) + if (copy_to_user(u_base_platform, k_base_platform, len)) return -EFAULT; } @@ -225,7 +225,7 @@ create_elf_tables(struct linux_binprm *bprm, const struct elfhdr *exec, get_random_bytes(k_rand_bytes, sizeof(k_rand_bytes)); u_rand_bytes = (elf_addr_t __user *) STACK_ALLOC(p, sizeof(k_rand_bytes)); - if (__copy_to_user(u_rand_bytes, k_rand_bytes, sizeof(k_rand_bytes))) + if (copy_to_user(u_rand_bytes, k_rand_bytes, sizeof(k_rand_bytes))) return -EFAULT; /* Create the ELF interpreter info */ @@ -308,21 +308,21 @@ create_elf_tables(struct linux_binprm *bprm, const struct elfhdr *exec, return -EFAULT; /* Now, let's put argc (and argv, envp if appropriate) on the stack */ - if (__put_user(argc, sp++)) + if (put_user(argc, sp++)) return -EFAULT; /* Populate list of argv pointers back to argv strings. */ p = mm->arg_end = mm->arg_start; while (argc-- > 0) { size_t len; - if (__put_user((elf_addr_t)p, sp++)) + if (put_user((elf_addr_t)p, sp++)) return -EFAULT; len = strnlen_user((void __user *)p, MAX_ARG_STRLEN); if (!len || len > MAX_ARG_STRLEN) return -EINVAL; p += len; } - if (__put_user(0, sp++)) + if (put_user(0, sp++)) return -EFAULT; mm->arg_end = p; @@ -330,14 +330,14 @@ create_elf_tables(struct linux_binprm *bprm, const struct elfhdr *exec, mm->env_end = mm->env_start = p; while (envc-- > 0) { size_t len; - if (__put_user((elf_addr_t)p, sp++)) + if (put_user((elf_addr_t)p, sp++)) return -EFAULT; len = strnlen_user((void __user *)p, MAX_ARG_STRLEN); if (!len || len > MAX_ARG_STRLEN) return -EINVAL; p += len; } - if (__put_user(0, sp++)) + if (put_user(0, sp++)) return -EFAULT; mm->env_end = p; -- GitLab From 0abb013e2e73c40bd196413b49651b29e1b7dafb Mon Sep 17 00:00:00 2001 From: Al Viro Date: Wed, 19 Feb 2020 09:28:34 -0500 Subject: [PATCH 1058/1971] binfmt_elf_fdpic: don't use __... uaccess primitives Signed-off-by: Al Viro --- fs/binfmt_elf_fdpic.c | 31 +++++++++++++++++++------------ 1 file changed, 19 insertions(+), 12 deletions(-) diff --git a/fs/binfmt_elf_fdpic.c b/fs/binfmt_elf_fdpic.c index 240f666635437..6dff383dbe947 100644 --- a/fs/binfmt_elf_fdpic.c +++ b/fs/binfmt_elf_fdpic.c @@ -537,7 +537,7 @@ static int create_elf_fdpic_tables(struct linux_binprm *bprm, platform_len = strlen(k_platform) + 1; sp -= platform_len; u_platform = (char __user *) sp; - if (__copy_to_user(u_platform, k_platform, platform_len) != 0) + if (copy_to_user(u_platform, k_platform, platform_len) != 0) return -EFAULT; } @@ -552,7 +552,7 @@ static int create_elf_fdpic_tables(struct linux_binprm *bprm, platform_len = strlen(k_base_platform) + 1; sp -= platform_len; u_base_platform = (char __user *) sp; - if (__copy_to_user(u_base_platform, k_base_platform, platform_len) != 0) + if (copy_to_user(u_base_platform, k_base_platform, platform_len) != 0) return -EFAULT; } @@ -604,11 +604,13 @@ static int create_elf_fdpic_tables(struct linux_binprm *bprm, /* put the ELF interpreter info on the stack */ #define NEW_AUX_ENT(id, val) \ do { \ - struct { unsigned long _id, _val; } __user *ent; \ + struct { unsigned long _id, _val; } __user *ent, v; \ \ ent = (void __user *) csp; \ - __put_user((id), &ent[nr]._id); \ - __put_user((val), &ent[nr]._val); \ + v._id = (id); \ + v._val = (val); \ + if (copy_to_user(ent + nr, &v, sizeof(v))) \ + return -EFAULT; \ nr++; \ } while (0) @@ -675,7 +677,8 @@ static int create_elf_fdpic_tables(struct linux_binprm *bprm, /* stack argc */ csp -= sizeof(unsigned long); - __put_user(bprm->argc, (unsigned long __user *) csp); + if (put_user(bprm->argc, (unsigned long __user *) csp)) + return -EFAULT; BUG_ON(csp != sp); @@ -689,25 +692,29 @@ static int create_elf_fdpic_tables(struct linux_binprm *bprm, p = (char __user *) current->mm->arg_start; for (loop = bprm->argc; loop > 0; loop--) { - __put_user((elf_caddr_t) p, argv++); + if (put_user((elf_caddr_t) p, argv++)) + return -EFAULT; len = strnlen_user(p, MAX_ARG_STRLEN); if (!len || len > MAX_ARG_STRLEN) return -EINVAL; p += len; } - __put_user(NULL, argv); + if (put_user(NULL, argv)) + return -EFAULT; current->mm->arg_end = (unsigned long) p; /* fill in the envv[] array */ current->mm->env_start = (unsigned long) p; for (loop = bprm->envc; loop > 0; loop--) { - __put_user((elf_caddr_t)(unsigned long) p, envp++); + if (put_user((elf_caddr_t)(unsigned long) p, envp++)) + return -EFAULT; len = strnlen_user(p, MAX_ARG_STRLEN); if (!len || len > MAX_ARG_STRLEN) return -EINVAL; p += len; } - __put_user(NULL, envp); + if (put_user(NULL, envp)) + return -EFAULT; current->mm->env_end = (unsigned long) p; mm->start_stack = (unsigned long) sp; @@ -849,8 +856,8 @@ static int elf_fdpic_map_file(struct elf_fdpic_params *params, tmp = phdr->p_memsz / sizeof(Elf32_Dyn); dyn = (Elf32_Dyn __user *)params->dynamic_addr; - __get_user(d_tag, &dyn[tmp - 1].d_tag); - if (d_tag != 0) + if (get_user(d_tag, &dyn[tmp - 1].d_tag) || + d_tag != 0) goto dynamic_error; break; } -- GitLab From 8861fd576ecf96450f42f3eb4b56cad5bf12188a Mon Sep 17 00:00:00 2001 From: Al Viro Date: Wed, 19 Feb 2020 09:32:55 -0500 Subject: [PATCH 1059/1971] binfmt_flat: don't use __put_user() ... and check the return value Signed-off-by: Al Viro --- fs/binfmt_flat.c | 22 ++++++++++++++-------- 1 file changed, 14 insertions(+), 8 deletions(-) diff --git a/fs/binfmt_flat.c b/fs/binfmt_flat.c index 831a2b25ba79f..7b663ed5247be 100644 --- a/fs/binfmt_flat.c +++ b/fs/binfmt_flat.c @@ -138,35 +138,40 @@ static int create_flat_tables(struct linux_binprm *bprm, unsigned long arg_start current->mm->start_stack = (unsigned long)sp & -FLAT_STACK_ALIGN; sp = (unsigned long __user *)current->mm->start_stack; - __put_user(bprm->argc, sp++); + if (put_user(bprm->argc, sp++)) + return -EFAULT; if (IS_ENABLED(CONFIG_BINFMT_FLAT_ARGVP_ENVP_ON_STACK)) { unsigned long argv, envp; argv = (unsigned long)(sp + 2); envp = (unsigned long)(sp + 2 + bprm->argc + 1); - __put_user(argv, sp++); - __put_user(envp, sp++); + if (put_user(argv, sp++) || put_user(envp, sp++)) + return -EFAULT; } current->mm->arg_start = (unsigned long)p; for (i = bprm->argc; i > 0; i--) { - __put_user((unsigned long)p, sp++); + if (put_user((unsigned long)p, sp++)) + return -EFAULT; len = strnlen_user(p, MAX_ARG_STRLEN); if (!len || len > MAX_ARG_STRLEN) return -EINVAL; p += len; } - __put_user(0, sp++); + if (put_user(0, sp++)) + return -EFAULT; current->mm->arg_end = (unsigned long)p; current->mm->env_start = (unsigned long) p; for (i = bprm->envc; i > 0; i--) { - __put_user((unsigned long)p, sp++); + if (put_user((unsigned long)p, sp++)) + return -EFAULT; len = strnlen_user(p, MAX_ARG_STRLEN); if (!len || len > MAX_ARG_STRLEN) return -EINVAL; p += len; } - __put_user(0, sp++); + if (put_user(0, sp++)) + return -EFAULT; current->mm->env_end = (unsigned long)p; return 0; @@ -998,7 +1003,8 @@ static int load_flat_binary(struct linux_binprm *bprm) unsigned long __user *sp; current->mm->start_stack -= sizeof(unsigned long); sp = (unsigned long __user *)current->mm->start_stack; - __put_user(start_addr, sp); + if (put_user(start_addr, sp)) + return -EFAULT; start_addr = libinfo.lib_list[i].entry; } } -- GitLab From c120f3b81ede0e3d2cf34d457d7c628306760ff1 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Sun, 16 Feb 2020 17:32:57 -0500 Subject: [PATCH 1060/1971] x86: switch cp_stat64() to unsafe_put_user() Signed-off-by: Al Viro --- arch/x86/kernel/sys_ia32.c | 40 +++++++++++++++++++++----------------- 1 file changed, 22 insertions(+), 18 deletions(-) diff --git a/arch/x86/kernel/sys_ia32.c b/arch/x86/kernel/sys_ia32.c index ab03fede1422c..f8d65c99feb80 100644 --- a/arch/x86/kernel/sys_ia32.c +++ b/arch/x86/kernel/sys_ia32.c @@ -135,26 +135,30 @@ static int cp_stat64(struct stat64 __user *ubuf, struct kstat *stat) typeof(ubuf->st_gid) gid = 0; SET_UID(uid, from_kuid_munged(current_user_ns(), stat->uid)); SET_GID(gid, from_kgid_munged(current_user_ns(), stat->gid)); - if (!access_ok(ubuf, sizeof(struct stat64)) || - __put_user(huge_encode_dev(stat->dev), &ubuf->st_dev) || - __put_user(stat->ino, &ubuf->__st_ino) || - __put_user(stat->ino, &ubuf->st_ino) || - __put_user(stat->mode, &ubuf->st_mode) || - __put_user(stat->nlink, &ubuf->st_nlink) || - __put_user(uid, &ubuf->st_uid) || - __put_user(gid, &ubuf->st_gid) || - __put_user(huge_encode_dev(stat->rdev), &ubuf->st_rdev) || - __put_user(stat->size, &ubuf->st_size) || - __put_user(stat->atime.tv_sec, &ubuf->st_atime) || - __put_user(stat->atime.tv_nsec, &ubuf->st_atime_nsec) || - __put_user(stat->mtime.tv_sec, &ubuf->st_mtime) || - __put_user(stat->mtime.tv_nsec, &ubuf->st_mtime_nsec) || - __put_user(stat->ctime.tv_sec, &ubuf->st_ctime) || - __put_user(stat->ctime.tv_nsec, &ubuf->st_ctime_nsec) || - __put_user(stat->blksize, &ubuf->st_blksize) || - __put_user(stat->blocks, &ubuf->st_blocks)) + if (!user_write_access_begin(ubuf, sizeof(struct stat64))) return -EFAULT; + unsafe_put_user(huge_encode_dev(stat->dev), &ubuf->st_dev, Efault); + unsafe_put_user(stat->ino, &ubuf->__st_ino, Efault); + unsafe_put_user(stat->ino, &ubuf->st_ino, Efault); + unsafe_put_user(stat->mode, &ubuf->st_mode, Efault); + unsafe_put_user(stat->nlink, &ubuf->st_nlink, Efault); + unsafe_put_user(uid, &ubuf->st_uid, Efault); + unsafe_put_user(gid, &ubuf->st_gid, Efault); + unsafe_put_user(huge_encode_dev(stat->rdev), &ubuf->st_rdev, Efault); + unsafe_put_user(stat->size, &ubuf->st_size, Efault); + unsafe_put_user(stat->atime.tv_sec, &ubuf->st_atime, Efault); + unsafe_put_user(stat->atime.tv_nsec, &ubuf->st_atime_nsec, Efault); + unsafe_put_user(stat->mtime.tv_sec, &ubuf->st_mtime, Efault); + unsafe_put_user(stat->mtime.tv_nsec, &ubuf->st_mtime_nsec, Efault); + unsafe_put_user(stat->ctime.tv_sec, &ubuf->st_ctime, Efault); + unsafe_put_user(stat->ctime.tv_nsec, &ubuf->st_ctime_nsec, Efault); + unsafe_put_user(stat->blksize, &ubuf->st_blksize, Efault); + unsafe_put_user(stat->blocks, &ubuf->st_blocks, Efault); + user_access_end(); return 0; +Efault: + user_write_access_end(); + return -EFAULT; } COMPAT_SYSCALL_DEFINE2(ia32_stat64, const char __user *, filename, -- GitLab From 86977da9cb71c8293263c630ac920dd1537de9e5 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Sun, 16 Feb 2020 19:39:36 -0500 Subject: [PATCH 1061/1971] TEST_ACCESS_OK _never_ had been checked anywhere Once upon a time the predecessor of that thing (TEST_VERIFY_AREA) used to be. However, that had been gone for years now (and the patch that introduced TEST_ACCESS_OK has not touched any ifdefs - they got gradually removed later). Just bury it... Signed-off-by: Al Viro --- arch/x86/include/asm/pgtable_32.h | 7 ------- 1 file changed, 7 deletions(-) diff --git a/arch/x86/include/asm/pgtable_32.h b/arch/x86/include/asm/pgtable_32.h index 0dca7f7aeff2b..fb10f2f8f4f02 100644 --- a/arch/x86/include/asm/pgtable_32.h +++ b/arch/x86/include/asm/pgtable_32.h @@ -32,13 +32,6 @@ extern pmd_t initial_pg_pmd[]; void paging_init(void); void sync_initial_page_table(void); -/* - * Define this if things work differently on an i386 and an i486: - * it will (on an i486) warn about kernel memory accesses that are - * done without a 'access_ok( ..)' - */ -#undef TEST_ACCESS_OK - #ifdef CONFIG_X86_PAE # include #else -- GitLab From 5ea75ae6ae60d13dfa35fd5d2e2a81824cba6662 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Tue, 18 Feb 2020 17:30:05 -0500 Subject: [PATCH 1062/1971] user_regset_copyout_zero(): use clear_user() that's the only caller of __clear_user() in generic code, and it's not hot enough to bother with skipping access_ok(). Signed-off-by: Al Viro --- include/linux/regset.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/include/linux/regset.h b/include/linux/regset.h index bf02437797384..46d6ae68c4550 100644 --- a/include/linux/regset.h +++ b/include/linux/regset.h @@ -320,7 +320,7 @@ static inline int user_regset_copyout_zero(unsigned int *pos, if (*kbuf) { memset(*kbuf, 0, copy); *kbuf += copy; - } else if (__clear_user(*ubuf, copy)) + } else if (clear_user(*ubuf, copy)) return -EFAULT; else *ubuf += copy; -- GitLab From 9eb41c521465f62332dfddcd399412fdff9c062b Mon Sep 17 00:00:00 2001 From: Al Viro Date: Tue, 18 Feb 2020 17:32:46 -0500 Subject: [PATCH 1063/1971] x86: kvm_hv_set_msr(): use __put_user() instead of 32bit __clear_user() Signed-off-by: Al Viro --- arch/x86/kvm/hyperv.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/x86/kvm/hyperv.c b/arch/x86/kvm/hyperv.c index bcefa9d4e57ef..b85b211d46763 100644 --- a/arch/x86/kvm/hyperv.c +++ b/arch/x86/kvm/hyperv.c @@ -1129,7 +1129,7 @@ static int kvm_hv_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host) * only, there can be valuable data in the rest which needs * to be preserved e.g. on migration. */ - if (__clear_user((void __user *)addr, sizeof(u32))) + if (__put_user(0, (u32 __user *)addr)) return 1; hv_vcpu->hv_vapic = data; kvm_vcpu_mark_page_dirty(vcpu, gfn); -- GitLab From b7e4b65f3fe92abbf4a1f57987a54c820969aebd Mon Sep 17 00:00:00 2001 From: Al Viro Date: Fri, 8 May 2020 00:16:31 -0400 Subject: [PATCH 1064/1971] bpf: make bpf_check_uarg_tail_zero() use check_zeroed_user() ... rather than open-coding it, and badly, at that. Acked-by: Alexei Starovoitov Signed-off-by: Al Viro --- kernel/bpf/syscall.c | 25 ++++++------------------- 1 file changed, 6 insertions(+), 19 deletions(-) diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c index 64783da342020..41ba746ecbc28 100644 --- a/kernel/bpf/syscall.c +++ b/kernel/bpf/syscall.c @@ -67,32 +67,19 @@ int bpf_check_uarg_tail_zero(void __user *uaddr, size_t expected_size, size_t actual_size) { - unsigned char __user *addr; - unsigned char __user *end; - unsigned char val; - int err; + unsigned char __user *addr = uaddr + expected_size; + int res; if (unlikely(actual_size > PAGE_SIZE)) /* silly large */ return -E2BIG; - if (unlikely(!access_ok(uaddr, actual_size))) - return -EFAULT; - if (actual_size <= expected_size) return 0; - addr = uaddr + expected_size; - end = uaddr + actual_size; - - for (; addr < end; addr++) { - err = get_user(val, addr); - if (err) - return err; - if (val) - return -E2BIG; - } - - return 0; + res = check_zeroed_user(addr, actual_size - expected_size); + if (res < 0) + return res; + return res ? 0 : -E2BIG; } const struct bpf_map_ops bpf_map_offload_ops = { -- GitLab From daceabf1b494c9e40a93fcc323b1258f557f65a1 Mon Sep 17 00:00:00 2001 From: Tom Zanussi Date: Wed, 3 Jun 2020 10:21:24 -0500 Subject: [PATCH 1065/1971] tracing/doc: Fix ascii-art in histogram-design.rst This fixes the Sphinx parallel build error when building htmldocs: docutils.utils.SystemMessage: /home/sfr/next/next/Documentation/trace/histogram-design.rst:219: (SEVERE/4) Unexpected section title. It also fixes a bunch of other warnings I noticed when fixing the above, caused by mixing ascii-art and text. Link: https://lkml.kernel.org/r/69c291c76964642a417e5dd170d183ba6b552010.camel@kernel.org Reported-by: Stephen Rothwell Tested-by: Stephen Rothwell Signed-off-by: Tom Zanussi Signed-off-by: Steven Rostedt (VMware) --- Documentation/trace/histogram-design.rst | 48 ++++++++++++------------ 1 file changed, 24 insertions(+), 24 deletions(-) diff --git a/Documentation/trace/histogram-design.rst b/Documentation/trace/histogram-design.rst index 06f5c7e5f2eed..eef840043da9c 100644 --- a/Documentation/trace/histogram-design.rst +++ b/Documentation/trace/histogram-design.rst @@ -14,7 +14,7 @@ tracing_map.c. Note: All the ftrace histogram command examples assume the working directory is the ftrace /tracing directory. For example:: - # cd /sys/kernel/debug/tracing + # cd /sys/kernel/debug/tracing Also, the histogram output displayed for those commands will be generally be truncated - only enough to make the point is displayed. @@ -107,7 +107,7 @@ for the hitcount and one key field for the pid key. Below that is a diagram of a run-time snapshot of what the tracing_map might look like for a given run. It attempts to show the relationships between the hist_data fields and the tracing_map -elements for a couple hypothetical keys and values. +elements for a couple hypothetical keys and values.:: +------------------+ | hist_data | @@ -141,20 +141,20 @@ elements for a couple hypothetical keys and values. | | | | +--------------+ | | n_keys = n_fields - n_vals | | - | | + The hist_data n_vals and n_fields delineate the extent of the fields[] | | array and separate keys from values for the rest of the code. | | - | | + Below is a run-time representation of the tracing_map part of the | | histogram, with pointers from various parts of the fields[] array | | to corresponding parts of the tracing_map. | | - | | + The tracing_map consists of an array of tracing_map_entrys and a set | | of preallocated tracing_map_elts (abbreviated below as map_entry and | | map_elt). The total number of map_entrys in the hist_data.map array = | | map->max_elts (actually map->map_size but only max_elts of those are | | used. This is a property required by the map_insert() algorithm). | | - | | + If a map_entry is unused, meaning no key has yet hashed into it, its | | .key value is 0 and its .val pointer is NULL. Once a map_entry has | | been claimed, the .key value contains the key's hash value and the | | @@ -163,11 +163,11 @@ for each key or value in the map_elt.fields[] array. There is an | | entry in the map_elt.fields[] array corresponding to each hist_field | | in the histogram, and this is where the continually aggregated sums | | corresponding to each histogram value are kept. | | - | | + The diagram attempts to show the relationship between the | | hist_data.fields[] and the map_elt.fields[] with the links drawn | | -between diagrams:: | | - | | +between diagrams:: + +-----------+ | | | hist_data | | | +-----------+ | | @@ -380,7 +380,7 @@ entry, ts0, corresponding to the ts0 variable in the sched_waking trigger above. sched_waking histogram ----------------------- +----------------------:: +------------------+ | hist_data |<-------------------------------------------------------+ @@ -439,25 +439,25 @@ sched_waking histogram +-----------------+ | | | n_keys = n_fields - n_vals | | | | | | - | | | + This is very similar to the basic case. In the above diagram, we can | | | see a new .flags member has been added to the struct hist_field | | | struct, and a new entry added to hist_data.fields representing the ts0 | | | variable. For a normal val hist_field, .flags is just 0 (modulo | | | modifier flags), but if the value is defined as a variable, the .flags | | | contains a set FL_VAR bit. | | | - | | | + As you can see, the ts0 entry's .var.idx member contains the index | | | into the tracing_map_elts' .vars[] array containing variable values. | | | This idx is used whenever the value of the variable is set or read. | | | The map_elt.vars idx assigned to the given variable is assigned and | | | saved in .var.idx by create_tracing_map_fields() after it calls | | | tracing_map_add_var(). | | | - | | | + Below is a representation of the histogram at run-time, which | | | populates the map, along with correspondence to the above hist_data and | | | hist_field data structures. | | | - | | | + The diagram attempts to show the relationship between the | | | hist_data.fields[] and the map_elt.fields[] and map_elt.vars[] with | | | the links drawn between diagrams. For each of the map_elts, you can | | | @@ -465,8 +465,8 @@ see that the .fields[] members point to the .sum or .offset of a key | | | or val and the .vars[] members point to the value of a variable. The | | | arrows between the two diagrams show the linkages between those | | | tracing_map members and the field definitions in the corresponding | | | -hist_data fields[] members. | | | - | | | +hist_data fields[] members.:: + +-----------+ | | | | hist_data | | | | +-----------+ | | | @@ -564,27 +564,27 @@ hist_data fields[] members. | | | | unused | | | | | | | +---------------+ | | - | | + For each used map entry, there's a map_elt pointing to an array of | | .vars containing the current value of the variables associated with | | that histogram entry. So in the above, the timestamp associated with | | pid 999 is 113345679876, and the timestamp variable in the same | | .var.idx for pid 4444 is 213499240729. | | - | | + sched_switch histogram | | ---------------------- | | - | | + The sched_switch histogram paired with the above sched_waking | | histogram is shown below. The most important aspect of the | | sched_switch histogram is that it references a variable on the | | sched_waking histogram above. | | - | | + The histogram diagram is very similar to the others so far displayed, | | but it adds variable references. You can see the normal hitcount and | | key fields along with a new wakeup_lat variable implemented in the | | same way as the sched_waking ts0 variable, but in addition there's an | | entry with the new FL_VAR_REF (short for HIST_FIELD_FL_VAR_REF) flag. | | - | | + Associated with the new var ref field are a couple of new hist_field | | members, var.hist_data and var_ref_idx. For a variable reference, the | | var.hist_data goes with the var.idx, which together uniquely identify | | @@ -593,10 +593,10 @@ just the index into the var_ref_vals[] array that caches the values of | | each variable whenever a hist trigger is updated. Those resulting | | values are then finally accessed by other code such as trace action | | code that uses the var_ref_idx values to assign param values. | | - | | + The diagram below describes the situation for the sched_switch | | -histogram referred to before: | | - | | +histogram referred to before:: + # echo 'hist:keys=next_pid:wakeup_lat=common_timestamp.usecs-$ts0' >> | | events/sched/sched_switch/trigger | | | | -- GitLab From 520da69d265a91c6536c63851cbb8a53946974f0 Mon Sep 17 00:00:00 2001 From: Yuxuan Shui Date: Wed, 27 May 2020 04:08:02 +0100 Subject: [PATCH 1066/1971] ovl: initialize error in ovl_copy_xattr In ovl_copy_xattr, if all the xattrs to be copied are overlayfs private xattrs, the copy loop will terminate without assigning anything to the error variable, thus returning an uninitialized value. If ovl_copy_xattr is called from ovl_clear_empty, this uninitialized error value is put into a pointer by ERR_PTR(), causing potential invalid memory accesses down the line. This commit initialize error with 0. This is the correct value because when there's no xattr to copy, because all xattrs are private, ovl_copy_xattr should succeed. This bug is discovered with the help of INIT_STACK_ALL and clang. Signed-off-by: Yuxuan Shui Link: https://bugs.chromium.org/p/chromium/issues/detail?id=1050405 Fixes: 0956254a2d5b ("ovl: don't copy up opaqueness") Cc: stable@vger.kernel.org # v4.8 Signed-off-by: Alexander Potapenko Signed-off-by: Miklos Szeredi --- fs/overlayfs/copy_up.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fs/overlayfs/copy_up.c b/fs/overlayfs/copy_up.c index 66004534bd40c..79dd052c7dbf5 100644 --- a/fs/overlayfs/copy_up.c +++ b/fs/overlayfs/copy_up.c @@ -47,7 +47,7 @@ int ovl_copy_xattr(struct dentry *old, struct dentry *new) { ssize_t list_size, size, value_size = 0; char *buf, *name, *value = NULL; - int uninitialized_var(error); + int error = 0; size_t slen; if (!(old->d_inode->i_opflags & IOP_XATTR) || -- GitLab From 08f4c7c86d4cf125389dce9d94423024549f9b02 Mon Sep 17 00:00:00 2001 From: Miklos Szeredi Date: Thu, 4 Jun 2020 10:48:19 +0200 Subject: [PATCH 1067/1971] ovl: add accessor for ofs->upper_mnt Next patch will remove ofs->upper_mnt, so add an accessor function for this field. Signed-off-by: Miklos Szeredi --- fs/overlayfs/export.c | 6 +++--- fs/overlayfs/inode.c | 4 ++-- fs/overlayfs/namei.c | 2 +- fs/overlayfs/ovl_entry.h | 5 +++++ fs/overlayfs/readdir.c | 2 +- fs/overlayfs/super.c | 40 ++++++++++++++++++++-------------------- fs/overlayfs/util.c | 6 +++--- 7 files changed, 35 insertions(+), 30 deletions(-) diff --git a/fs/overlayfs/export.c b/fs/overlayfs/export.c index 321e24bf5c60e..8f4286450f92a 100644 --- a/fs/overlayfs/export.c +++ b/fs/overlayfs/export.c @@ -204,7 +204,7 @@ static int ovl_check_encode_origin(struct dentry *dentry) * ovl_connect_layer() will try to make origin's layer "connected" by * copying up a "connectable" ancestor. */ - if (d_is_dir(dentry) && ofs->upper_mnt) + if (d_is_dir(dentry) && ovl_upper_mnt(ofs)) return ovl_connect_layer(dentry); /* Lower file handle for indexed and non-upper dir/non-dir */ @@ -677,10 +677,10 @@ static struct dentry *ovl_upper_fh_to_d(struct super_block *sb, struct dentry *dentry; struct dentry *upper; - if (!ofs->upper_mnt) + if (!ovl_upper_mnt(ofs)) return ERR_PTR(-EACCES); - upper = ovl_decode_real_fh(fh, ofs->upper_mnt, true); + upper = ovl_decode_real_fh(fh, ovl_upper_mnt(ofs), true); if (IS_ERR_OR_NULL(upper)) return upper; diff --git a/fs/overlayfs/inode.c b/fs/overlayfs/inode.c index f2aaf00821c00..5148c63bbf4e0 100644 --- a/fs/overlayfs/inode.c +++ b/fs/overlayfs/inode.c @@ -456,7 +456,7 @@ int ovl_update_time(struct inode *inode, struct timespec64 *ts, int flags) if (flags & S_ATIME) { struct ovl_fs *ofs = inode->i_sb->s_fs_info; struct path upperpath = { - .mnt = ofs->upper_mnt, + .mnt = ovl_upper_mnt(ofs), .dentry = ovl_upperdentry_dereference(OVL_I(inode)), }; @@ -921,7 +921,7 @@ static bool ovl_hash_bylower(struct super_block *sb, struct dentry *upper, return true; /* Yes, if won't be copied up */ - if (!ofs->upper_mnt) + if (!ovl_upper_mnt(ofs)) return true; /* No, if lower hardlink is or will be broken on copy up */ diff --git a/fs/overlayfs/namei.c b/fs/overlayfs/namei.c index 5dc19dcea5a4e..3566282a9199c 100644 --- a/fs/overlayfs/namei.c +++ b/fs/overlayfs/namei.c @@ -489,7 +489,7 @@ struct dentry *ovl_index_upper(struct ovl_fs *ofs, struct dentry *index) if (IS_ERR_OR_NULL(fh)) return ERR_CAST(fh); - upper = ovl_decode_real_fh(fh, ofs->upper_mnt, true); + upper = ovl_decode_real_fh(fh, ovl_upper_mnt(ofs), true); kfree(fh); if (IS_ERR_OR_NULL(upper)) diff --git a/fs/overlayfs/ovl_entry.h b/fs/overlayfs/ovl_entry.h index a8f82fb7ffb49..2da0ff3558246 100644 --- a/fs/overlayfs/ovl_entry.h +++ b/fs/overlayfs/ovl_entry.h @@ -82,6 +82,11 @@ struct ovl_fs { struct dentry *whiteout; }; +static inline struct vfsmount *ovl_upper_mnt(struct ovl_fs *ofs) +{ + return ofs->upper_mnt; +} + static inline struct ovl_fs *OVL_FS(struct super_block *sb) { return (struct ovl_fs *)sb->s_fs_info; diff --git a/fs/overlayfs/readdir.c b/fs/overlayfs/readdir.c index 1621460417bed..6918b98faeb62 100644 --- a/fs/overlayfs/readdir.c +++ b/fs/overlayfs/readdir.c @@ -1120,7 +1120,7 @@ int ovl_indexdir_cleanup(struct ovl_fs *ofs) struct dentry *indexdir = ofs->indexdir; struct dentry *index = NULL; struct inode *dir = indexdir->d_inode; - struct path path = { .mnt = ofs->upper_mnt, .dentry = indexdir }; + struct path path = { .mnt = ovl_upper_mnt(ofs), .dentry = indexdir }; LIST_HEAD(list); struct rb_root root = RB_ROOT; struct ovl_cache_entry *p; diff --git a/fs/overlayfs/super.c b/fs/overlayfs/super.c index 60dfb27bc12bc..201be50244943 100644 --- a/fs/overlayfs/super.c +++ b/fs/overlayfs/super.c @@ -224,7 +224,7 @@ static void ovl_free_fs(struct ovl_fs *ofs) ovl_inuse_unlock(ofs->workbasedir); dput(ofs->workbasedir); if (ofs->upperdir_locked) - ovl_inuse_unlock(ofs->upper_mnt->mnt_root); + ovl_inuse_unlock(ovl_upper_mnt(ofs)->mnt_root); mntput(ofs->upper_mnt); for (i = 1; i < ofs->numlayer; i++) { iput(ofs->layers[i].trap); @@ -258,7 +258,7 @@ static int ovl_sync_fs(struct super_block *sb, int wait) struct super_block *upper_sb; int ret; - if (!ofs->upper_mnt) + if (!ovl_upper_mnt(ofs)) return 0; /* @@ -272,7 +272,7 @@ static int ovl_sync_fs(struct super_block *sb, int wait) if (!wait) return 0; - upper_sb = ofs->upper_mnt->mnt_sb; + upper_sb = ovl_upper_mnt(ofs)->mnt_sb; down_read(&upper_sb->s_umount); ret = sync_filesystem(upper_sb); @@ -310,7 +310,7 @@ static int ovl_statfs(struct dentry *dentry, struct kstatfs *buf) /* Will this overlay be forced to mount/remount ro? */ static bool ovl_force_readonly(struct ovl_fs *ofs) { - return (!ofs->upper_mnt || !ofs->workdir); + return (!ovl_upper_mnt(ofs) || !ofs->workdir); } static const char *ovl_redirect_mode_def(void) @@ -372,7 +372,7 @@ static int ovl_remount(struct super_block *sb, int *flags, char *data) return -EROFS; if (*flags & SB_RDONLY && !sb_rdonly(sb)) { - upper_sb = ofs->upper_mnt->mnt_sb; + upper_sb = ovl_upper_mnt(ofs)->mnt_sb; down_read(&upper_sb->s_umount); ret = sync_filesystem(upper_sb); up_read(&upper_sb->s_umount); @@ -669,7 +669,7 @@ static struct dentry *ovl_workdir_create(struct ovl_fs *ofs, const char *name, bool persist) { struct inode *dir = ofs->workbasedir->d_inode; - struct vfsmount *mnt = ofs->upper_mnt; + struct vfsmount *mnt = ovl_upper_mnt(ofs); struct dentry *work; int err; bool retried = false; @@ -1122,7 +1122,7 @@ static int ovl_get_upper(struct super_block *sb, struct ovl_fs *ofs, if (upper_mnt->mnt_sb->s_flags & SB_NOSEC) sb->s_flags |= SB_NOSEC; - if (ovl_inuse_trylock(ofs->upper_mnt->mnt_root)) { + if (ovl_inuse_trylock(ovl_upper_mnt(ofs)->mnt_root)) { ofs->upperdir_locked = true; } else { err = ovl_report_in_use(ofs, "upperdir"); @@ -1198,7 +1198,7 @@ out_unlock: static int ovl_make_workdir(struct super_block *sb, struct ovl_fs *ofs, struct path *workpath) { - struct vfsmount *mnt = ofs->upper_mnt; + struct vfsmount *mnt = ovl_upper_mnt(ofs); struct dentry *temp; bool rename_whiteout; bool d_type; @@ -1342,7 +1342,7 @@ out: static int ovl_get_indexdir(struct super_block *sb, struct ovl_fs *ofs, struct ovl_entry *oe, struct path *upperpath) { - struct vfsmount *mnt = ofs->upper_mnt; + struct vfsmount *mnt = ovl_upper_mnt(ofs); int err; err = mnt_want_write(mnt); @@ -1398,7 +1398,7 @@ static bool ovl_lower_uuid_ok(struct ovl_fs *ofs, const uuid_t *uuid) { unsigned int i; - if (!ofs->config.nfs_export && !ofs->upper_mnt) + if (!ofs->config.nfs_export && !ovl_upper_mnt(ofs)) return true; for (i = 0; i < ofs->numfs; i++) { @@ -1477,7 +1477,7 @@ static int ovl_get_layers(struct super_block *sb, struct ovl_fs *ofs, /* idx/fsid 0 are reserved for upper fs even with lower only overlay */ ofs->numfs++; - layers[0].mnt = ofs->upper_mnt; + layers[0].mnt = ovl_upper_mnt(ofs); layers[0].idx = 0; layers[0].fsid = 0; ofs->numlayer = 1; @@ -1494,8 +1494,8 @@ static int ovl_get_layers(struct super_block *sb, struct ovl_fs *ofs, goto out; } - if (ofs->upper_mnt) { - ofs->fs[0].sb = ofs->upper_mnt->mnt_sb; + if (ovl_upper_mnt(ofs)) { + ofs->fs[0].sb = ovl_upper_mnt(ofs)->mnt_sb; ofs->fs[0].is_lower = false; } @@ -1550,7 +1550,7 @@ static int ovl_get_layers(struct super_block *sb, struct ovl_fs *ofs, * inode number or a non persistent inode number allocated from a * dedicated range. */ - if (ofs->numfs - !ofs->upper_mnt == 1) { + if (ofs->numfs - !ovl_upper_mnt(ofs) == 1) { if (ofs->config.xino == OVL_XINO_ON) pr_info("\"xino=on\" is useless with all layers on same fs, ignore.\n"); ofs->xino_mode = 0; @@ -1699,8 +1699,8 @@ static int ovl_check_overlapping_layers(struct super_block *sb, { int i, err; - if (ofs->upper_mnt) { - err = ovl_check_layer(sb, ofs, ofs->upper_mnt->mnt_root, + if (ovl_upper_mnt(ofs)) { + err = ovl_check_layer(sb, ofs, ovl_upper_mnt(ofs)->mnt_root, "upperdir"); if (err) return err; @@ -1836,8 +1836,8 @@ static int ovl_fill_super(struct super_block *sb, void *data, int silent) if (!ofs->workdir) sb->s_flags |= SB_RDONLY; - sb->s_stack_depth = ofs->upper_mnt->mnt_sb->s_stack_depth; - sb->s_time_gran = ofs->upper_mnt->mnt_sb->s_time_gran; + sb->s_stack_depth = ovl_upper_mnt(ofs)->mnt_sb->s_stack_depth; + sb->s_time_gran = ovl_upper_mnt(ofs)->mnt_sb->s_time_gran; } oe = ovl_get_lowerstack(sb, ofs); @@ -1846,7 +1846,7 @@ static int ovl_fill_super(struct super_block *sb, void *data, int silent) goto out_err; /* If the upper fs is nonexistent, we mark overlayfs r/o too */ - if (!ofs->upper_mnt) + if (!ovl_upper_mnt(ofs)) sb->s_flags |= SB_RDONLY; if (!(ovl_force_readonly(ofs)) && ofs->config.index) { @@ -1874,7 +1874,7 @@ static int ovl_fill_super(struct super_block *sb, void *data, int silent) /* Show index=off in /proc/mounts for forced r/o mount */ if (!ofs->indexdir) { ofs->config.index = false; - if (ofs->upper_mnt && ofs->config.nfs_export) { + if (ovl_upper_mnt(ofs) && ofs->config.nfs_export) { pr_warn("NFS export requires an index dir, falling back to nfs_export=off.\n"); ofs->config.nfs_export = false; } diff --git a/fs/overlayfs/util.c b/fs/overlayfs/util.c index 33a7c60be3eda..56c1f89f20c9f 100644 --- a/fs/overlayfs/util.c +++ b/fs/overlayfs/util.c @@ -18,13 +18,13 @@ int ovl_want_write(struct dentry *dentry) { struct ovl_fs *ofs = dentry->d_sb->s_fs_info; - return mnt_want_write(ofs->upper_mnt); + return mnt_want_write(ovl_upper_mnt(ofs)); } void ovl_drop_write(struct dentry *dentry) { struct ovl_fs *ofs = dentry->d_sb->s_fs_info; - mnt_drop_write(ofs->upper_mnt); + mnt_drop_write(ovl_upper_mnt(ofs)); } struct dentry *ovl_workdir(struct dentry *dentry) @@ -150,7 +150,7 @@ void ovl_path_upper(struct dentry *dentry, struct path *path) { struct ovl_fs *ofs = dentry->d_sb->s_fs_info; - path->mnt = ofs->upper_mnt; + path->mnt = ovl_upper_mnt(ofs); path->dentry = ovl_dentry_upper(dentry); } -- GitLab From b8e42a651bdee06202ebdd96cff64fdeabd5b1d6 Mon Sep 17 00:00:00 2001 From: Miklos Szeredi Date: Thu, 4 Jun 2020 10:48:19 +0200 Subject: [PATCH 1068/1971] ovl: get rid of redundant members in struct ovl_fs ofs->upper_mnt is copied to ->layers[0].mnt and ->layers[0].trap could be used instead of a separate ->upperdir_trap. Split the lowerdir option early to get the number of layers, then allocate the ->layers array, and finally fill the upper and lower layers, as before. Get rid of path_put_init() in ovl_lower_dir(), since the only caller will take care of that. [Colin Ian King] Fix null pointer dereference on null stack pointer on error return found by Coverity. Signed-off-by: Miklos Szeredi --- fs/overlayfs/ovl_entry.h | 4 +- fs/overlayfs/super.c | 99 +++++++++++++++++++--------------------- 2 files changed, 48 insertions(+), 55 deletions(-) diff --git a/fs/overlayfs/ovl_entry.h b/fs/overlayfs/ovl_entry.h index 2da0ff3558246..b429c80879ee0 100644 --- a/fs/overlayfs/ovl_entry.h +++ b/fs/overlayfs/ovl_entry.h @@ -46,7 +46,6 @@ struct ovl_path { /* private information held for overlayfs's superblock */ struct ovl_fs { - struct vfsmount *upper_mnt; unsigned int numlayer; /* Number of unique fs among layers including upper fs */ unsigned int numfs; @@ -70,7 +69,6 @@ struct ovl_fs { bool workdir_locked; bool share_whiteout; /* Traps in ovl inode cache */ - struct inode *upperdir_trap; struct inode *workbasedir_trap; struct inode *workdir_trap; struct inode *indexdir_trap; @@ -84,7 +82,7 @@ struct ovl_fs { static inline struct vfsmount *ovl_upper_mnt(struct ovl_fs *ofs) { - return ofs->upper_mnt; + return ofs->layers[0].mnt; } static inline struct ovl_fs *OVL_FS(struct super_block *sb) diff --git a/fs/overlayfs/super.c b/fs/overlayfs/super.c index 201be50244943..eb81d8760a6a0 100644 --- a/fs/overlayfs/super.c +++ b/fs/overlayfs/super.c @@ -216,7 +216,6 @@ static void ovl_free_fs(struct ovl_fs *ofs) iput(ofs->workbasedir_trap); iput(ofs->indexdir_trap); iput(ofs->workdir_trap); - iput(ofs->upperdir_trap); dput(ofs->whiteout); dput(ofs->indexdir); dput(ofs->workdir); @@ -225,8 +224,7 @@ static void ovl_free_fs(struct ovl_fs *ofs) dput(ofs->workbasedir); if (ofs->upperdir_locked) ovl_inuse_unlock(ovl_upper_mnt(ofs)->mnt_root); - mntput(ofs->upper_mnt); - for (i = 1; i < ofs->numlayer; i++) { + for (i = 0; i < ofs->numlayer; i++) { iput(ofs->layers[i].trap); mntput(ofs->layers[i].mnt); } @@ -837,11 +835,11 @@ static int ovl_lower_dir(const char *name, struct path *path, err = ovl_mount_dir_noesc(name, path); if (err) - goto out; + return err; err = ovl_check_namelen(path, ofs, name); if (err) - goto out_put; + return err; *stack_depth = max(*stack_depth, path->mnt->mnt_sb->s_stack_depth); @@ -863,11 +861,6 @@ static int ovl_lower_dir(const char *name, struct path *path, ofs->xino_mode = -1; return 0; - -out_put: - path_put_init(path); -out: - return err; } /* Workdir should not be subdir of upperdir and vice versa */ @@ -1074,7 +1067,7 @@ static int ovl_report_in_use(struct ovl_fs *ofs, const char *name) } static int ovl_get_upper(struct super_block *sb, struct ovl_fs *ofs, - struct path *upperpath) + struct ovl_layer *upper_layer, struct path *upperpath) { struct vfsmount *upper_mnt; int err; @@ -1094,7 +1087,7 @@ static int ovl_get_upper(struct super_block *sb, struct ovl_fs *ofs, if (err) goto out; - err = ovl_setup_trap(sb, upperpath->dentry, &ofs->upperdir_trap, + err = ovl_setup_trap(sb, upperpath->dentry, &upper_layer->trap, "upperdir"); if (err) goto out; @@ -1108,7 +1101,9 @@ static int ovl_get_upper(struct super_block *sb, struct ovl_fs *ofs, /* Don't inherit atime flags */ upper_mnt->mnt_flags &= ~(MNT_NOATIME | MNT_NODIRATIME | MNT_RELATIME); - ofs->upper_mnt = upper_mnt; + upper_layer->mnt = upper_mnt; + upper_layer->idx = 0; + upper_layer->fsid = 0; /* * Inherit SB_NOSEC flag from upperdir. @@ -1458,18 +1453,13 @@ static int ovl_get_fsid(struct ovl_fs *ofs, const struct path *path) } static int ovl_get_layers(struct super_block *sb, struct ovl_fs *ofs, - struct path *stack, unsigned int numlower) + struct path *stack, unsigned int numlower, + struct ovl_layer *layers) { int err; unsigned int i; - struct ovl_layer *layers; err = -ENOMEM; - layers = kcalloc(numlower + 1, sizeof(struct ovl_layer), GFP_KERNEL); - if (!layers) - goto out; - ofs->layers = layers; - ofs->fs = kcalloc(numlower + 1, sizeof(struct ovl_sb), GFP_KERNEL); if (ofs->fs == NULL) goto out; @@ -1477,11 +1467,6 @@ static int ovl_get_layers(struct super_block *sb, struct ovl_fs *ofs, /* idx/fsid 0 are reserved for upper fs even with lower only overlay */ ofs->numfs++; - layers[0].mnt = ovl_upper_mnt(ofs); - layers[0].idx = 0; - layers[0].fsid = 0; - ofs->numlayer = 1; - /* * All lower layers that share the same fs as upper layer, use the same * pseudo_dev as upper layer. Allocate fs[0].pseudo_dev even for lower @@ -1579,44 +1564,30 @@ out: } static struct ovl_entry *ovl_get_lowerstack(struct super_block *sb, - struct ovl_fs *ofs) + const char *lower, unsigned int numlower, + struct ovl_fs *ofs, struct ovl_layer *layers) { int err; - char *lowertmp, *lower; struct path *stack = NULL; - unsigned int stacklen, numlower = 0, i; + unsigned int i; struct ovl_entry *oe; - err = -ENOMEM; - lowertmp = kstrdup(ofs->config.lowerdir, GFP_KERNEL); - if (!lowertmp) - goto out_err; - - err = -EINVAL; - stacklen = ovl_split_lowerdirs(lowertmp); - if (stacklen > OVL_MAX_STACK) { - pr_err("too many lower directories, limit is %d\n", - OVL_MAX_STACK); - goto out_err; - } else if (!ofs->config.upperdir && stacklen == 1) { + if (!ofs->config.upperdir && numlower == 1) { pr_err("at least 2 lowerdir are needed while upperdir nonexistent\n"); - goto out_err; + return ERR_PTR(-EINVAL); } else if (!ofs->config.upperdir && ofs->config.nfs_export && ofs->config.redirect_follow) { pr_warn("NFS export requires \"redirect_dir=nofollow\" on non-upper mount, falling back to nfs_export=off.\n"); ofs->config.nfs_export = false; } - err = -ENOMEM; - stack = kcalloc(stacklen, sizeof(struct path), GFP_KERNEL); + stack = kcalloc(numlower, sizeof(struct path), GFP_KERNEL); if (!stack) - goto out_err; + return ERR_PTR(-ENOMEM); err = -EINVAL; - lower = lowertmp; - for (numlower = 0; numlower < stacklen; numlower++) { - err = ovl_lower_dir(lower, &stack[numlower], ofs, - &sb->s_stack_depth); + for (i = 0; i < numlower; i++) { + err = ovl_lower_dir(lower, &stack[i], ofs, &sb->s_stack_depth); if (err) goto out_err; @@ -1630,7 +1601,7 @@ static struct ovl_entry *ovl_get_lowerstack(struct super_block *sb, goto out_err; } - err = ovl_get_layers(sb, ofs, stack, numlower); + err = ovl_get_layers(sb, ofs, stack, numlower, layers); if (err) goto out_err; @@ -1648,7 +1619,6 @@ out: for (i = 0; i < numlower; i++) path_put(&stack[i]); kfree(stack); - kfree(lowertmp); return oe; @@ -1772,7 +1742,10 @@ static int ovl_fill_super(struct super_block *sb, void *data, int silent) struct dentry *root_dentry; struct ovl_entry *oe; struct ovl_fs *ofs; + struct ovl_layer *layers; struct cred *cred; + char *splitlower = NULL; + unsigned int numlower; int err; sb->s_d_op = &ovl_dentry_operations; @@ -1804,6 +1777,26 @@ static int ovl_fill_super(struct super_block *sb, void *data, int silent) goto out_err; } + err = -ENOMEM; + splitlower = kstrdup(ofs->config.lowerdir, GFP_KERNEL); + if (!splitlower) + goto out_err; + + numlower = ovl_split_lowerdirs(splitlower); + if (numlower > OVL_MAX_STACK) { + pr_err("too many lower directories, limit is %d\n", + OVL_MAX_STACK); + goto out_err; + } + + layers = kcalloc(numlower + 1, sizeof(struct ovl_layer), GFP_KERNEL); + if (!layers) + goto out_err; + + ofs->layers = layers; + /* Layer 0 is reserved for upper even if there's no upper */ + ofs->numlayer = 1; + sb->s_stack_depth = 0; sb->s_maxbytes = MAX_LFS_FILESIZE; atomic_long_set(&ofs->last_ino, 1); @@ -1825,7 +1818,7 @@ static int ovl_fill_super(struct super_block *sb, void *data, int silent) goto out_err; } - err = ovl_get_upper(sb, ofs, &upperpath); + err = ovl_get_upper(sb, ofs, &layers[0], &upperpath); if (err) goto out_err; @@ -1840,7 +1833,7 @@ static int ovl_fill_super(struct super_block *sb, void *data, int silent) sb->s_time_gran = ovl_upper_mnt(ofs)->mnt_sb->s_time_gran; } - oe = ovl_get_lowerstack(sb, ofs); + oe = ovl_get_lowerstack(sb, splitlower, numlower, ofs, layers); err = PTR_ERR(oe); if (IS_ERR(oe)) goto out_err; @@ -1903,6 +1896,7 @@ static int ovl_fill_super(struct super_block *sb, void *data, int silent) goto out_free_oe; mntput(upperpath.mnt); + kfree(splitlower); sb->s_root = root_dentry; @@ -1912,6 +1906,7 @@ out_free_oe: ovl_entry_stack_free(oe); kfree(oe); out_err: + kfree(splitlower); path_put(&upperpath); ovl_free_fs(ofs); out: -- GitLab From df820f8de4e481222b17f9bcee7b909ae8167529 Mon Sep 17 00:00:00 2001 From: Miklos Szeredi Date: Thu, 4 Jun 2020 10:48:19 +0200 Subject: [PATCH 1069/1971] ovl: make private mounts longterm Overlayfs is using clone_private_mount() to create internal mounts for underlying layers. These are used for operations requiring a path, such as dentry_open(). Since these private mounts are not in any namespace they are treated as short term, "detached" mounts and mntput() involves taking the global mount_lock, which can result in serious cacheline pingpong. Make these private mounts longterm instead, which trade the penalty on mntput() for a slightly longer shutdown time due to an added RCU grace period when putting these mounts. Introduce a new helper kern_unmount_many() that can take care of multiple longterm mounts with a single RCU grace period. Cc: Al Viro Signed-off-by: Miklos Szeredi --- Documentation/filesystems/porting.rst | 7 +++++++ fs/namespace.c | 16 ++++++++++++++++ fs/overlayfs/super.c | 7 ++++++- include/linux/mount.h | 2 ++ 4 files changed, 31 insertions(+), 1 deletion(-) diff --git a/Documentation/filesystems/porting.rst b/Documentation/filesystems/porting.rst index 26c0939695736..867036aa90b83 100644 --- a/Documentation/filesystems/porting.rst +++ b/Documentation/filesystems/porting.rst @@ -858,3 +858,10 @@ be misspelled d_alloc_anon(). [should've been added in 2016] stale comment in finish_open() nonwithstanding, failure exits in ->atomic_open() instances should *NOT* fput() the file, no matter what. Everything is handled by the caller. + +--- + +**mandatory** + +clone_private_mount() returns a longterm mount now, so the proper destructor of +its result is kern_unmount() or kern_unmount_array(). diff --git a/fs/namespace.c b/fs/namespace.c index a28e4db075ede..d53517f1d7417 100644 --- a/fs/namespace.c +++ b/fs/namespace.c @@ -1879,6 +1879,9 @@ struct vfsmount *clone_private_mount(const struct path *path) if (IS_ERR(new_mnt)) return ERR_CAST(new_mnt); + /* Longterm mount to be removed by kern_unmount*() */ + new_mnt->mnt_ns = MNT_NS_INTERNAL; + return &new_mnt->mnt; } EXPORT_SYMBOL_GPL(clone_private_mount); @@ -3804,6 +3807,19 @@ void kern_unmount(struct vfsmount *mnt) } EXPORT_SYMBOL(kern_unmount); +void kern_unmount_array(struct vfsmount *mnt[], unsigned int num) +{ + unsigned int i; + + for (i = 0; i < num; i++) + if (mnt[i]) + real_mount(mnt[i])->mnt_ns = NULL; + synchronize_rcu_expedited(); + for (i = 0; i < num; i++) + mntput(mnt[i]); +} +EXPORT_SYMBOL(kern_unmount_array); + bool our_mnt(struct vfsmount *mnt) { return check_mnt(real_mount(mnt)); diff --git a/fs/overlayfs/super.c b/fs/overlayfs/super.c index eb81d8760a6a0..8d8cd46e14827 100644 --- a/fs/overlayfs/super.c +++ b/fs/overlayfs/super.c @@ -211,6 +211,7 @@ static void ovl_destroy_inode(struct inode *inode) static void ovl_free_fs(struct ovl_fs *ofs) { + struct vfsmount **mounts; unsigned i; iput(ofs->workbasedir_trap); @@ -224,10 +225,14 @@ static void ovl_free_fs(struct ovl_fs *ofs) dput(ofs->workbasedir); if (ofs->upperdir_locked) ovl_inuse_unlock(ovl_upper_mnt(ofs)->mnt_root); + + /* Hack! Reuse ofs->layers as a vfsmount array before freeing it */ + mounts = (struct vfsmount **) ofs->layers; for (i = 0; i < ofs->numlayer; i++) { iput(ofs->layers[i].trap); - mntput(ofs->layers[i].mnt); + mounts[i] = ofs->layers[i].mnt; } + kern_unmount_array(mounts, ofs->numlayer); kfree(ofs->layers); for (i = 0; i < ofs->numfs; i++) free_anon_bdev(ofs->fs[i].pseudo_dev); diff --git a/include/linux/mount.h b/include/linux/mount.h index bf8cc4108b8f9..8de95a0bec8d1 100644 --- a/include/linux/mount.h +++ b/include/linux/mount.h @@ -109,4 +109,6 @@ extern unsigned int sysctl_mount_max; extern bool path_is_mountpoint(const struct path *path); +extern void kern_unmount_array(struct vfsmount *mnt[], unsigned int num); + #endif /* _LINUX_MOUNT_H */ -- GitLab From b778e1ee1afe2784c2a034bb1e0192423097cb36 Mon Sep 17 00:00:00 2001 From: Miklos Szeredi Date: Thu, 4 Jun 2020 10:48:19 +0200 Subject: [PATCH 1070/1971] ovl: only pass ->ki_flags to ovl_iocb_to_rwf() Next patch will want to pass a modified set of flags, so... Signed-off-by: Miklos Szeredi --- fs/overlayfs/file.c | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/fs/overlayfs/file.c b/fs/overlayfs/file.c index 0f83c8dfec461..01820e654a219 100644 --- a/fs/overlayfs/file.c +++ b/fs/overlayfs/file.c @@ -232,9 +232,8 @@ static void ovl_file_accessed(struct file *file) touch_atime(&file->f_path); } -static rwf_t ovl_iocb_to_rwf(struct kiocb *iocb) +static rwf_t ovl_iocb_to_rwf(int ifl) { - int ifl = iocb->ki_flags; rwf_t flags = 0; if (ifl & IOCB_NOWAIT) @@ -296,7 +295,7 @@ static ssize_t ovl_read_iter(struct kiocb *iocb, struct iov_iter *iter) old_cred = ovl_override_creds(file_inode(file)->i_sb); if (is_sync_kiocb(iocb)) { ret = vfs_iter_read(real.file, iter, &iocb->ki_pos, - ovl_iocb_to_rwf(iocb)); + ovl_iocb_to_rwf(iocb->ki_flags)); } else { struct ovl_aio_req *aio_req; @@ -349,7 +348,7 @@ static ssize_t ovl_write_iter(struct kiocb *iocb, struct iov_iter *iter) if (is_sync_kiocb(iocb)) { file_start_write(real.file); ret = vfs_iter_write(real.file, iter, &iocb->ki_pos, - ovl_iocb_to_rwf(iocb)); + ovl_iocb_to_rwf(iocb->ki_flags)); file_end_write(real.file); /* Update size */ ovl_copyattr(ovl_inode_real(inode), inode); -- GitLab From 74c6e384e991c5305754e3c79edf768a62b00563 Mon Sep 17 00:00:00 2001 From: Miklos Szeredi Date: Thu, 4 Jun 2020 10:48:19 +0200 Subject: [PATCH 1071/1971] ovl: make oip->index bool ovl_get_inode() uses oip->index as a bool value, not as a pointer. Signed-off-by: Miklos Szeredi --- fs/overlayfs/inode.c | 2 +- fs/overlayfs/overlayfs.h | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/fs/overlayfs/inode.c b/fs/overlayfs/inode.c index 5148c63bbf4e0..0424ee11210f7 100644 --- a/fs/overlayfs/inode.c +++ b/fs/overlayfs/inode.c @@ -908,7 +908,7 @@ struct inode *ovl_get_trap_inode(struct super_block *sb, struct dentry *dir) * Does overlay inode need to be hashed by lower inode? */ static bool ovl_hash_bylower(struct super_block *sb, struct dentry *upper, - struct dentry *lower, struct dentry *index) + struct dentry *lower, bool index) { struct ovl_fs *ofs = sb->s_fs_info; diff --git a/fs/overlayfs/overlayfs.h b/fs/overlayfs/overlayfs.h index ffbb57b2d7f6b..b725c7f15ff49 100644 --- a/fs/overlayfs/overlayfs.h +++ b/fs/overlayfs/overlayfs.h @@ -424,7 +424,7 @@ struct ovl_inode_params { struct inode *newinode; struct dentry *upperdentry; struct ovl_path *lowerpath; - struct dentry *index; + bool index; unsigned int numlower; char *redirect; struct dentry *lowerdata; -- GitLab From b29cb8d645da87d7d1ff28a7317c541ffccfa66c Mon Sep 17 00:00:00 2001 From: Tero Kristo Date: Wed, 27 May 2020 15:24:23 +0300 Subject: [PATCH 1072/1971] crypto: omap-aes - avoid spamming console with self tests Running the self test suite for omap-aes with extra tests enabled causes huge spam with the tag message wrong indicators. With self tests, this is fine as there are some tests that purposedly pass bad data to the driver. Also, returning -EBADMSG from the driver is enough, so remove the dev_err message completely. Signed-off-by: Tero Kristo Signed-off-by: Herbert Xu --- drivers/crypto/omap-aes-gcm.c | 1 - 1 file changed, 1 deletion(-) diff --git a/drivers/crypto/omap-aes-gcm.c b/drivers/crypto/omap-aes-gcm.c index 32dc00dc570b3..9f937bdc53a7c 100644 --- a/drivers/crypto/omap-aes-gcm.c +++ b/drivers/crypto/omap-aes-gcm.c @@ -77,7 +77,6 @@ static void omap_aes_gcm_done_task(struct omap_aes_dev *dd) tag = (u8 *)rctx->auth_tag; for (i = 0; i < dd->authsize; i++) { if (tag[i]) { - dev_err(dd->dev, "GCM decryption: Tag Message is wrong\n"); ret = -EBADMSG; } } -- GitLab From 8dc43636e39a89ec51188b012a789625fa83bbe0 Mon Sep 17 00:00:00 2001 From: Tero Kristo Date: Wed, 27 May 2020 15:24:24 +0300 Subject: [PATCH 1073/1971] crypto: omap-sham - force kernel driver usage for sha algos As the hardware acceleration for the omap-sham algos is not available from userspace, force kernel driver usage. Without this flag in place, openssl 1.1 implementation thinks it can accelerate sha algorithms on omap devices directly from userspace. Signed-off-by: Tero Kristo Signed-off-by: Herbert Xu --- drivers/crypto/omap-sham.c | 24 ++++++++++++++++-------- 1 file changed, 16 insertions(+), 8 deletions(-) diff --git a/drivers/crypto/omap-sham.c b/drivers/crypto/omap-sham.c index 063ad5d03f333..34ecace143607 100644 --- a/drivers/crypto/omap-sham.c +++ b/drivers/crypto/omap-sham.c @@ -1571,7 +1571,8 @@ static struct ahash_alg algs_sha224_sha256[] = { .cra_name = "sha224", .cra_driver_name = "omap-sha224", .cra_priority = 400, - .cra_flags = CRYPTO_ALG_ASYNC | + .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | + CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK, .cra_blocksize = SHA224_BLOCK_SIZE, .cra_ctxsize = sizeof(struct omap_sham_ctx), @@ -1592,7 +1593,8 @@ static struct ahash_alg algs_sha224_sha256[] = { .cra_name = "sha256", .cra_driver_name = "omap-sha256", .cra_priority = 400, - .cra_flags = CRYPTO_ALG_ASYNC | + .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | + CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK, .cra_blocksize = SHA256_BLOCK_SIZE, .cra_ctxsize = sizeof(struct omap_sham_ctx), @@ -1614,7 +1616,8 @@ static struct ahash_alg algs_sha224_sha256[] = { .cra_name = "hmac(sha224)", .cra_driver_name = "omap-hmac-sha224", .cra_priority = 400, - .cra_flags = CRYPTO_ALG_ASYNC | + .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | + CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK, .cra_blocksize = SHA224_BLOCK_SIZE, .cra_ctxsize = sizeof(struct omap_sham_ctx) + @@ -1637,7 +1640,8 @@ static struct ahash_alg algs_sha224_sha256[] = { .cra_name = "hmac(sha256)", .cra_driver_name = "omap-hmac-sha256", .cra_priority = 400, - .cra_flags = CRYPTO_ALG_ASYNC | + .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | + CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK, .cra_blocksize = SHA256_BLOCK_SIZE, .cra_ctxsize = sizeof(struct omap_sham_ctx) + @@ -1662,7 +1666,8 @@ static struct ahash_alg algs_sha384_sha512[] = { .cra_name = "sha384", .cra_driver_name = "omap-sha384", .cra_priority = 400, - .cra_flags = CRYPTO_ALG_ASYNC | + .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | + CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK, .cra_blocksize = SHA384_BLOCK_SIZE, .cra_ctxsize = sizeof(struct omap_sham_ctx), @@ -1683,7 +1688,8 @@ static struct ahash_alg algs_sha384_sha512[] = { .cra_name = "sha512", .cra_driver_name = "omap-sha512", .cra_priority = 400, - .cra_flags = CRYPTO_ALG_ASYNC | + .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | + CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK, .cra_blocksize = SHA512_BLOCK_SIZE, .cra_ctxsize = sizeof(struct omap_sham_ctx), @@ -1705,7 +1711,8 @@ static struct ahash_alg algs_sha384_sha512[] = { .cra_name = "hmac(sha384)", .cra_driver_name = "omap-hmac-sha384", .cra_priority = 400, - .cra_flags = CRYPTO_ALG_ASYNC | + .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | + CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK, .cra_blocksize = SHA384_BLOCK_SIZE, .cra_ctxsize = sizeof(struct omap_sham_ctx) + @@ -1728,7 +1735,8 @@ static struct ahash_alg algs_sha384_sha512[] = { .cra_name = "hmac(sha512)", .cra_driver_name = "omap-hmac-sha512", .cra_priority = 400, - .cra_flags = CRYPTO_ALG_ASYNC | + .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | + CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK, .cra_blocksize = SHA512_BLOCK_SIZE, .cra_ctxsize = sizeof(struct omap_sham_ctx) + -- GitLab From 7e34e0bbc6449458a8de675814ffa22dec003698 Mon Sep 17 00:00:00 2001 From: Tero Kristo Date: Wed, 27 May 2020 15:24:25 +0300 Subject: [PATCH 1074/1971] crypto: omap-crypto - fix userspace copied buffer access In case buffers are copied from userspace, directly accessing the page will most likely fail because it hasn't been mapped into the kernel memory space. Fix the issue by forcing a kmap / kunmap within the cleanup functionality. Signed-off-by: Tero Kristo Signed-off-by: Herbert Xu --- drivers/crypto/omap-crypto.c | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/drivers/crypto/omap-crypto.c b/drivers/crypto/omap-crypto.c index cc88b7362bc2e..94b2dba90f0d9 100644 --- a/drivers/crypto/omap-crypto.c +++ b/drivers/crypto/omap-crypto.c @@ -178,11 +178,17 @@ static void omap_crypto_copy_data(struct scatterlist *src, amt = min(src->length - srco, dst->length - dsto); amt = min(len, amt); - srcb = sg_virt(src) + srco; - dstb = sg_virt(dst) + dsto; + srcb = kmap_atomic(sg_page(src)) + srco + src->offset; + dstb = kmap_atomic(sg_page(dst)) + dsto + dst->offset; memcpy(dstb, srcb, amt); + if (!PageSlab(sg_page(dst))) + flush_kernel_dcache_page(sg_page(dst)); + + kunmap_atomic(srcb); + kunmap_atomic(dstb); + srco += amt; dsto += amt; len -= amt; -- GitLab From 6395166d7a19019d5e9574eb9ecdaf0028abb887 Mon Sep 17 00:00:00 2001 From: Tero Kristo Date: Wed, 27 May 2020 15:24:26 +0300 Subject: [PATCH 1075/1971] crypto: omap-sham - huge buffer access fixes The ctx internal buffer can only hold buflen amount of data, don't try to copy over more than that. Also, initialize the context sg pointer if we only have data in the context internal buffer, this can happen when closing a hash with certain data amounts. Signed-off-by: Tero Kristo Signed-off-by: Herbert Xu --- drivers/crypto/omap-sham.c | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/drivers/crypto/omap-sham.c b/drivers/crypto/omap-sham.c index 34ecace143607..5efc66ccef46b 100644 --- a/drivers/crypto/omap-sham.c +++ b/drivers/crypto/omap-sham.c @@ -750,8 +750,15 @@ static int omap_sham_align_sgs(struct scatterlist *sg, int offset = rctx->offset; int bufcnt = rctx->bufcnt; - if (!sg || !sg->length || !nbytes) + if (!sg || !sg->length || !nbytes) { + if (bufcnt) { + sg_init_table(rctx->sgl, 1); + sg_set_buf(rctx->sgl, rctx->dd->xmit_buf, bufcnt); + rctx->sg = rctx->sgl; + } + return 0; + } new_len = nbytes; @@ -895,7 +902,7 @@ static int omap_sham_prepare_request(struct ahash_request *req, bool update) if (hash_later < 0) hash_later = 0; - if (hash_later) { + if (hash_later && hash_later <= rctx->buflen) { scatterwalk_map_and_copy(rctx->buffer, req->src, req->nbytes - hash_later, -- GitLab From 63832a0c6fe180aa34d2a77052aa501be6281c99 Mon Sep 17 00:00:00 2001 From: Tero Kristo Date: Wed, 27 May 2020 15:24:27 +0300 Subject: [PATCH 1076/1971] crypto: omap-sham - fix very small data size handling With very small data sizes, the whole data can end up in the xmit buffer. This code path does not set the sg_len properly which causes the core dma framework to crash. Fix by adding the proper size in place. Also, the data length must be a multiple of block-size, so extend the DMA data size while here. Signed-off-by: Tero Kristo Signed-off-by: Herbert Xu --- drivers/crypto/omap-sham.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/crypto/omap-sham.c b/drivers/crypto/omap-sham.c index 5efc66ccef46b..45bc551ebc209 100644 --- a/drivers/crypto/omap-sham.c +++ b/drivers/crypto/omap-sham.c @@ -752,9 +752,11 @@ static int omap_sham_align_sgs(struct scatterlist *sg, if (!sg || !sg->length || !nbytes) { if (bufcnt) { + bufcnt = DIV_ROUND_UP(bufcnt, bs) * bs; sg_init_table(rctx->sgl, 1); sg_set_buf(rctx->sgl, rctx->dd->xmit_buf, bufcnt); rctx->sg = rctx->sgl; + rctx->sg_len = 1; } return 0; -- GitLab From 9ef4e6e5e31403860081b995f26dc0d4b26356c2 Mon Sep 17 00:00:00 2001 From: Tero Kristo Date: Wed, 27 May 2020 15:24:28 +0300 Subject: [PATCH 1077/1971] crypto: omap-aes - prevent unregistering algorithms twice Most of the OMAP family SoCs contain two instances for AES core, which causes the remove callbacks to be also done twice when driver is removed. Fix the algorithm unregister callbacks to take into account the number of algorithms still registered to avoid removing these twice. Signed-off-by: Tero Kristo Signed-off-by: Herbert Xu --- drivers/crypto/omap-aes.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/drivers/crypto/omap-aes.c b/drivers/crypto/omap-aes.c index 824ddf2a66ffd..b5aff20c59009 100644 --- a/drivers/crypto/omap-aes.c +++ b/drivers/crypto/omap-aes.c @@ -1269,13 +1269,17 @@ static int omap_aes_remove(struct platform_device *pdev) spin_unlock(&list_lock); for (i = dd->pdata->algs_info_size - 1; i >= 0; i--) - for (j = dd->pdata->algs_info[i].registered - 1; j >= 0; j--) + for (j = dd->pdata->algs_info[i].registered - 1; j >= 0; j--) { crypto_unregister_skcipher( &dd->pdata->algs_info[i].algs_list[j]); + dd->pdata->algs_info[i].registered--; + } - for (i = dd->pdata->aead_algs_info->size - 1; i >= 0; i--) { + for (i = dd->pdata->aead_algs_info->registered - 1; i >= 0; i--) { aalg = &dd->pdata->aead_algs_info->algs_list[i]; crypto_unregister_aead(aalg); + dd->pdata->aead_algs_info->registered--; + } crypto_engine_exit(dd->engine); -- GitLab From 281c377872ff5d15d80df25fc4df02d2676c7cde Mon Sep 17 00:00:00 2001 From: Tero Kristo Date: Wed, 27 May 2020 15:24:29 +0300 Subject: [PATCH 1078/1971] crypto: omap-sham - add proper load balancing support for multicore The current implementation of the multiple accelerator core support for OMAP SHA does not work properly. It always picks up the first probed accelerator core if this is available, and rest of the book keeping also gets confused if there are two cores available. Add proper load balancing support for SHA, and also fix any bugs related to the multicore support while doing it. Signed-off-by: Tero Kristo Signed-off-by: Herbert Xu --- drivers/crypto/omap-sham.c | 64 ++++++++++++++++++-------------------- 1 file changed, 31 insertions(+), 33 deletions(-) diff --git a/drivers/crypto/omap-sham.c b/drivers/crypto/omap-sham.c index 45bc551ebc209..82691a057d2a1 100644 --- a/drivers/crypto/omap-sham.c +++ b/drivers/crypto/omap-sham.c @@ -168,8 +168,6 @@ struct omap_sham_hmac_ctx { }; struct omap_sham_ctx { - struct omap_sham_dev *dd; - unsigned long flags; /* fallback stuff */ @@ -934,27 +932,35 @@ static int omap_sham_update_dma_stop(struct omap_sham_dev *dd) return 0; } +struct omap_sham_dev *omap_sham_find_dev(struct omap_sham_reqctx *ctx) +{ + struct omap_sham_dev *dd; + + if (ctx->dd) + return ctx->dd; + + spin_lock_bh(&sham.lock); + dd = list_first_entry(&sham.dev_list, struct omap_sham_dev, list); + list_move_tail(&dd->list, &sham.dev_list); + ctx->dd = dd; + spin_unlock_bh(&sham.lock); + + return dd; +} + static int omap_sham_init(struct ahash_request *req) { struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); struct omap_sham_ctx *tctx = crypto_ahash_ctx(tfm); struct omap_sham_reqctx *ctx = ahash_request_ctx(req); - struct omap_sham_dev *dd = NULL, *tmp; + struct omap_sham_dev *dd; int bs = 0; - spin_lock_bh(&sham.lock); - if (!tctx->dd) { - list_for_each_entry(tmp, &sham.dev_list, list) { - dd = tmp; - break; - } - tctx->dd = dd; - } else { - dd = tctx->dd; - } - spin_unlock_bh(&sham.lock); + ctx->dd = NULL; - ctx->dd = dd; + dd = omap_sham_find_dev(ctx); + if (!dd) + return -ENODEV; ctx->flags = 0; @@ -1224,8 +1230,7 @@ err1: static int omap_sham_enqueue(struct ahash_request *req, unsigned int op) { struct omap_sham_reqctx *ctx = ahash_request_ctx(req); - struct omap_sham_ctx *tctx = crypto_tfm_ctx(req->base.tfm); - struct omap_sham_dev *dd = tctx->dd; + struct omap_sham_dev *dd = ctx->dd; ctx->op = op; @@ -1235,7 +1240,7 @@ static int omap_sham_enqueue(struct ahash_request *req, unsigned int op) static int omap_sham_update(struct ahash_request *req) { struct omap_sham_reqctx *ctx = ahash_request_ctx(req); - struct omap_sham_dev *dd = ctx->dd; + struct omap_sham_dev *dd = omap_sham_find_dev(ctx); if (!req->nbytes) return 0; @@ -1328,21 +1333,8 @@ static int omap_sham_setkey(struct crypto_ahash *tfm, const u8 *key, struct omap_sham_hmac_ctx *bctx = tctx->base; int bs = crypto_shash_blocksize(bctx->shash); int ds = crypto_shash_digestsize(bctx->shash); - struct omap_sham_dev *dd = NULL, *tmp; int err, i; - spin_lock_bh(&sham.lock); - if (!tctx->dd) { - list_for_each_entry(tmp, &sham.dev_list, list) { - dd = tmp; - break; - } - tctx->dd = dd; - } else { - dd = tctx->dd; - } - spin_unlock_bh(&sham.lock); - err = crypto_shash_setkey(tctx->fallback, key, keylen); if (err) return err; @@ -1359,7 +1351,7 @@ static int omap_sham_setkey(struct crypto_ahash *tfm, const u8 *key, memset(bctx->ipad + keylen, 0, bs - keylen); - if (!test_bit(FLAGS_AUTO_XOR, &dd->flags)) { + if (!test_bit(FLAGS_AUTO_XOR, &sham.flags)) { memcpy(bctx->opad, bctx->ipad, bs); for (i = 0; i < bs; i++) { @@ -2171,6 +2163,7 @@ static int omap_sham_probe(struct platform_device *pdev) } dd->flags |= dd->pdata->flags; + sham.flags |= dd->pdata->flags; pm_runtime_use_autosuspend(dev); pm_runtime_set_autosuspend_delay(dev, DEFAULT_AUTOSUSPEND_DELAY); @@ -2198,6 +2191,9 @@ static int omap_sham_probe(struct platform_device *pdev) spin_unlock(&sham.lock); for (i = 0; i < dd->pdata->algs_info_size; i++) { + if (dd->pdata->algs_info[i].registered) + break; + for (j = 0; j < dd->pdata->algs_info[i].size; j++) { struct ahash_alg *alg; @@ -2249,9 +2245,11 @@ static int omap_sham_remove(struct platform_device *pdev) list_del(&dd->list); spin_unlock(&sham.lock); for (i = dd->pdata->algs_info_size - 1; i >= 0; i--) - for (j = dd->pdata->algs_info[i].registered - 1; j >= 0; j--) + for (j = dd->pdata->algs_info[i].registered - 1; j >= 0; j--) { crypto_unregister_ahash( &dd->pdata->algs_info[i].algs_list[j]); + dd->pdata->algs_info[i].registered--; + } tasklet_kill(&dd->done_task); pm_runtime_disable(&pdev->dev); -- GitLab From 320bdbd816156f9ca07e5fed7bfb449f2908dda7 Mon Sep 17 00:00:00 2001 From: Christophe JAILLET Date: Sat, 30 May 2020 15:35:37 +0200 Subject: [PATCH 1079/1971] crypto: cavium/nitrox - Fix 'nitrox_get_first_device()' when ndevlist is fully iterated When a list is completely iterated with 'list_for_each_entry(x, ...)', x is not NULL at the end. While at it, remove a useless initialization of the ndev variable. It is overridden by 'list_for_each_entry'. Fixes: f2663872f073 ("crypto: cavium - Register the CNN55XX supported crypto algorithms.") Cc: Signed-off-by: Christophe JAILLET Signed-off-by: Herbert Xu --- drivers/crypto/cavium/nitrox/nitrox_main.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/crypto/cavium/nitrox/nitrox_main.c b/drivers/crypto/cavium/nitrox/nitrox_main.c index 788c6607078b1..cee2a2713038d 100644 --- a/drivers/crypto/cavium/nitrox/nitrox_main.c +++ b/drivers/crypto/cavium/nitrox/nitrox_main.c @@ -278,7 +278,7 @@ static void nitrox_remove_from_devlist(struct nitrox_device *ndev) struct nitrox_device *nitrox_get_first_device(void) { - struct nitrox_device *ndev = NULL; + struct nitrox_device *ndev; mutex_lock(&devlist_lock); list_for_each_entry(ndev, &ndevlist, list) { @@ -286,7 +286,7 @@ struct nitrox_device *nitrox_get_first_device(void) break; } mutex_unlock(&devlist_lock); - if (!ndev) + if (&ndev->list == &ndevlist) return NULL; refcount_inc(&ndev->refcnt); -- GitLab From d605cbb64252071d6a74b4fbb626e1473d91838d Mon Sep 17 00:00:00 2001 From: Oder Chiou Date: Thu, 4 Jun 2020 15:10:16 +0800 Subject: [PATCH 1080/1971] ASoC: rl6231: Modify the target DMIC clock rate Some DMIC components will not work correctly in the clock rate 3.072MHz. We recommend the clock rate 1.536MHz in the gerenal case. Signed-off-by: Oder Chiou Link: https://lore.kernel.org/r/20200604071016.3981-1-oder_chiou@realtek.com Signed-off-by: Mark Brown --- sound/soc/codecs/rl6231.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/sound/soc/codecs/rl6231.c b/sound/soc/codecs/rl6231.c index 2586d1cafc0cd..8c9daf32bab84 100644 --- a/sound/soc/codecs/rl6231.c +++ b/sound/soc/codecs/rl6231.c @@ -80,8 +80,8 @@ int rl6231_calc_dmic_clk(int rate) for (i = 0; i < ARRAY_SIZE(div); i++) { if ((div[i] % 3) == 0) continue; - /* find divider that gives DMIC frequency below 3.072MHz */ - if (3072000 * div[i] >= rate) + /* find divider that gives DMIC frequency below 1.536MHz */ + if (1536000 * div[i] >= rate) return i; } -- GitLab From e396dec46c5600d426b2ca8a01a877928b50d1d9 Mon Sep 17 00:00:00 2001 From: Shengjiu Wang Date: Thu, 4 Jun 2020 14:25:30 +0800 Subject: [PATCH 1081/1971] ASoC: fsl-asoc-card: Defer probe when fail to find codec device Defer probe when fail to find codec device, because the codec device maybe probed later than machine driver. Signed-off-by: Shengjiu Wang Link: https://lore.kernel.org/r/1591251930-4111-1-git-send-email-shengjiu.wang@nxp.com Signed-off-by: Mark Brown --- sound/soc/fsl/fsl-asoc-card.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sound/soc/fsl/fsl-asoc-card.c b/sound/soc/fsl/fsl-asoc-card.c index cf4feb835743d..00be739008884 100644 --- a/sound/soc/fsl/fsl-asoc-card.c +++ b/sound/soc/fsl/fsl-asoc-card.c @@ -581,7 +581,7 @@ static int fsl_asoc_card_probe(struct platform_device *pdev) if (!fsl_asoc_card_is_ac97(priv) && !codec_dev) { dev_err(&pdev->dev, "failed to find codec device\n"); - ret = -EINVAL; + ret = -EPROBE_DEFER; goto asrc_fail; } -- GitLab From 678916ec54f38406032462dc466fd36cdfea4e3c Mon Sep 17 00:00:00 2001 From: Takashi Iwai Date: Tue, 2 Jun 2020 18:44:53 +0200 Subject: [PATCH 1082/1971] ASoC: max98390: Fix incorrect printf qualifier MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This patch addresses a compile warning: sound/soc/codecs/max98390.c:781:3: warning: format ‘%ld’ expects argument of type ‘long int’, but argument 4 has type ‘size_t {aka const unsigned int}’ [-Wformat=] Fixes: a6e3f4f34cdb ("ASoC: max98390: Added Amplifier Driver") Signed-off-by: Takashi Iwai Acked-by: Mark Brown Link: https://lore.kernel.org/r/20200602164453.29925-1-tiwai@suse.de Signed-off-by: Mark Brown --- sound/soc/codecs/max98390.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sound/soc/codecs/max98390.c b/sound/soc/codecs/max98390.c index b9ce44dda886f..be7cd0aeb6a63 100644 --- a/sound/soc/codecs/max98390.c +++ b/sound/soc/codecs/max98390.c @@ -778,7 +778,7 @@ static int max98390_dsm_init(struct snd_soc_component *component) } dev_dbg(component->dev, - "max98390: param fw size %ld\n", + "max98390: param fw size %zd\n", fw->size); dsm_param = (char *)fw->data; dsm_param += MAX98390_DSM_PAYLOAD_OFFSET; -- GitLab From 97ed3e509ee6d098730d137759c627a8d674cf55 Mon Sep 17 00:00:00 2001 From: Steve Lee Date: Thu, 4 Jun 2020 14:47:31 +0900 Subject: [PATCH 1083/1971] ASoC: max98390: Fix potential crash during param fw loading malformed firmware file can cause out-of-bound access and crash during dsm_param bin loading. - add MIN/MAX param size to avoid out-of-bound access. - read start addr and size of param and check bound. - add condition that fw->size > param_size + _PAYLOAD_OFFSET to confirm enough data. Signed-off-by: Steve Lee Link: https://lore.kernel.org/r/20200604054731.21140-1-steves.lee@maximintegrated.com Signed-off-by: Mark Brown --- sound/soc/codecs/max98390.c | 24 ++++++++++++++++++++---- sound/soc/codecs/max98390.h | 3 ++- 2 files changed, 22 insertions(+), 5 deletions(-) diff --git a/sound/soc/codecs/max98390.c b/sound/soc/codecs/max98390.c index be7cd0aeb6a63..0d63ebfbff2f9 100644 --- a/sound/soc/codecs/max98390.c +++ b/sound/soc/codecs/max98390.c @@ -754,6 +754,7 @@ static struct snd_soc_dai_driver max98390_dai[] = { static int max98390_dsm_init(struct snd_soc_component *component) { int ret; + int param_size, param_start_addr; char filename[128]; const char *vendor, *product; struct max98390_priv *max98390 = @@ -780,14 +781,29 @@ static int max98390_dsm_init(struct snd_soc_component *component) dev_dbg(component->dev, "max98390: param fw size %zd\n", fw->size); + if (fw->size < MAX98390_DSM_PARAM_MIN_SIZE) { + dev_err(component->dev, + "param fw is invalid.\n"); + goto err_alloc; + } dsm_param = (char *)fw->data; + param_start_addr = (dsm_param[0] & 0xff) | (dsm_param[1] & 0xff) << 8; + param_size = (dsm_param[2] & 0xff) | (dsm_param[3] & 0xff) << 8; + if (param_size > MAX98390_DSM_PARAM_MAX_SIZE || + param_start_addr < DSM_STBASS_HPF_B0_BYTE0 || + fw->size < param_size + MAX98390_DSM_PAYLOAD_OFFSET) { + dev_err(component->dev, + "param fw is invalid.\n"); + goto err_alloc; + } + regmap_write(max98390->regmap, MAX98390_R203A_AMP_EN, 0x80); dsm_param += MAX98390_DSM_PAYLOAD_OFFSET; - regmap_bulk_write(max98390->regmap, DSM_EQ_BQ1_B0_BYTE0, - dsm_param, - fw->size - MAX98390_DSM_PAYLOAD_OFFSET); - release_firmware(fw); + regmap_bulk_write(max98390->regmap, param_start_addr, + dsm_param, param_size); regmap_write(max98390->regmap, MAX98390_R23E1_DSP_GLOBAL_EN, 0x01); +err_alloc: + release_firmware(fw); err: return ret; } diff --git a/sound/soc/codecs/max98390.h b/sound/soc/codecs/max98390.h index f59cb114d9579..5f444e7779b0a 100644 --- a/sound/soc/codecs/max98390.h +++ b/sound/soc/codecs/max98390.h @@ -650,7 +650,8 @@ /* DSM register offset */ #define MAX98390_DSM_PAYLOAD_OFFSET 16 -#define MAX98390_DSM_PAYLOAD_OFFSET_2 495 +#define MAX98390_DSM_PARAM_MAX_SIZE 770 +#define MAX98390_DSM_PARAM_MIN_SIZE 670 struct max98390_priv { struct regmap *regmap; -- GitLab From 138125f74b254fd2e37b3d875faf8e0e8608299b Mon Sep 17 00:00:00 2001 From: Al Viro Date: Sat, 30 May 2020 00:40:25 +0100 Subject: [PATCH 1084/1971] scsi: hpsa: Lift {BIG_,}IOCTL_Command_struct copy{in,out} into hpsa_ioctl() Link: https://lore.kernel.org/r/20200529234028.46373-1-viro@ZenIV.linux.org.uk Acked-by: Don Brace Tested-by: Don Brace Signed-off-by: Al Viro Signed-off-by: Martin K. Petersen --- drivers/scsi/hpsa.c | 116 +++++++++++++++++++++----------------------- 1 file changed, 56 insertions(+), 60 deletions(-) diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c index 1e9302e99d052..3344a06c938ee 100644 --- a/drivers/scsi/hpsa.c +++ b/drivers/scsi/hpsa.c @@ -6358,37 +6358,33 @@ static int hpsa_getdrivver_ioctl(struct ctlr_info *h, void __user *argp) return 0; } -static int hpsa_passthru_ioctl(struct ctlr_info *h, void __user *argp) +static int hpsa_passthru_ioctl(struct ctlr_info *h, + IOCTL_Command_struct *iocommand) { - IOCTL_Command_struct iocommand; struct CommandList *c; char *buff = NULL; u64 temp64; int rc = 0; - if (!argp) - return -EINVAL; if (!capable(CAP_SYS_RAWIO)) return -EPERM; - if (copy_from_user(&iocommand, argp, sizeof(iocommand))) - return -EFAULT; - if ((iocommand.buf_size < 1) && - (iocommand.Request.Type.Direction != XFER_NONE)) { + if ((iocommand->buf_size < 1) && + (iocommand->Request.Type.Direction != XFER_NONE)) { return -EINVAL; } - if (iocommand.buf_size > 0) { - buff = kmalloc(iocommand.buf_size, GFP_KERNEL); + if (iocommand->buf_size > 0) { + buff = kmalloc(iocommand->buf_size, GFP_KERNEL); if (buff == NULL) return -ENOMEM; - if (iocommand.Request.Type.Direction & XFER_WRITE) { + if (iocommand->Request.Type.Direction & XFER_WRITE) { /* Copy the data into the buffer we created */ - if (copy_from_user(buff, iocommand.buf, - iocommand.buf_size)) { + if (copy_from_user(buff, iocommand->buf, + iocommand->buf_size)) { rc = -EFAULT; goto out_kfree; } } else { - memset(buff, 0, iocommand.buf_size); + memset(buff, 0, iocommand->buf_size); } } c = cmd_alloc(h); @@ -6398,23 +6394,23 @@ static int hpsa_passthru_ioctl(struct ctlr_info *h, void __user *argp) c->scsi_cmd = SCSI_CMD_BUSY; /* Fill in Command Header */ c->Header.ReplyQueue = 0; /* unused in simple mode */ - if (iocommand.buf_size > 0) { /* buffer to fill */ + if (iocommand->buf_size > 0) { /* buffer to fill */ c->Header.SGList = 1; c->Header.SGTotal = cpu_to_le16(1); } else { /* no buffers to fill */ c->Header.SGList = 0; c->Header.SGTotal = cpu_to_le16(0); } - memcpy(&c->Header.LUN, &iocommand.LUN_info, sizeof(c->Header.LUN)); + memcpy(&c->Header.LUN, &iocommand->LUN_info, sizeof(c->Header.LUN)); /* Fill in Request block */ - memcpy(&c->Request, &iocommand.Request, + memcpy(&c->Request, &iocommand->Request, sizeof(c->Request)); /* Fill in the scatter gather information */ - if (iocommand.buf_size > 0) { + if (iocommand->buf_size > 0) { temp64 = dma_map_single(&h->pdev->dev, buff, - iocommand.buf_size, DMA_BIDIRECTIONAL); + iocommand->buf_size, DMA_BIDIRECTIONAL); if (dma_mapping_error(&h->pdev->dev, (dma_addr_t) temp64)) { c->SG[0].Addr = cpu_to_le64(0); c->SG[0].Len = cpu_to_le32(0); @@ -6422,12 +6418,12 @@ static int hpsa_passthru_ioctl(struct ctlr_info *h, void __user *argp) goto out; } c->SG[0].Addr = cpu_to_le64(temp64); - c->SG[0].Len = cpu_to_le32(iocommand.buf_size); + c->SG[0].Len = cpu_to_le32(iocommand->buf_size); c->SG[0].Ext = cpu_to_le32(HPSA_SG_LAST); /* not chaining */ } rc = hpsa_scsi_do_simple_cmd(h, c, DEFAULT_REPLY_QUEUE, NO_TIMEOUT); - if (iocommand.buf_size > 0) + if (iocommand->buf_size > 0) hpsa_pci_unmap(h->pdev, c, 1, DMA_BIDIRECTIONAL); check_ioctl_unit_attention(h, c); if (rc) { @@ -6436,16 +6432,12 @@ static int hpsa_passthru_ioctl(struct ctlr_info *h, void __user *argp) } /* Copy the error information out */ - memcpy(&iocommand.error_info, c->err_info, - sizeof(iocommand.error_info)); - if (copy_to_user(argp, &iocommand, sizeof(iocommand))) { - rc = -EFAULT; - goto out; - } - if ((iocommand.Request.Type.Direction & XFER_READ) && - iocommand.buf_size > 0) { + memcpy(&iocommand->error_info, c->err_info, + sizeof(iocommand->error_info)); + if ((iocommand->Request.Type.Direction & XFER_READ) && + iocommand->buf_size > 0) { /* Copy the data out of the buffer we created */ - if (copy_to_user(iocommand.buf, buff, iocommand.buf_size)) { + if (copy_to_user(iocommand->buf, buff, iocommand->buf_size)) { rc = -EFAULT; goto out; } @@ -6457,9 +6449,9 @@ out_kfree: return rc; } -static int hpsa_big_passthru_ioctl(struct ctlr_info *h, void __user *argp) +static int hpsa_big_passthru_ioctl(struct ctlr_info *h, + BIG_IOCTL_Command_struct *ioc) { - BIG_IOCTL_Command_struct *ioc; struct CommandList *c; unsigned char **buff = NULL; int *buff_size = NULL; @@ -6470,29 +6462,17 @@ static int hpsa_big_passthru_ioctl(struct ctlr_info *h, void __user *argp) u32 sz; BYTE __user *data_ptr; - if (!argp) - return -EINVAL; if (!capable(CAP_SYS_RAWIO)) return -EPERM; - ioc = vmemdup_user(argp, sizeof(*ioc)); - if (IS_ERR(ioc)) { - status = PTR_ERR(ioc); - goto cleanup1; - } + if ((ioc->buf_size < 1) && - (ioc->Request.Type.Direction != XFER_NONE)) { - status = -EINVAL; - goto cleanup1; - } + (ioc->Request.Type.Direction != XFER_NONE)) + return -EINVAL; /* Check kmalloc limits using all SGs */ - if (ioc->malloc_size > MAX_KMALLOC_SIZE) { - status = -EINVAL; - goto cleanup1; - } - if (ioc->buf_size > ioc->malloc_size * SG_ENTRIES_IN_CMD) { - status = -EINVAL; - goto cleanup1; - } + if (ioc->malloc_size > MAX_KMALLOC_SIZE) + return -EINVAL; + if (ioc->buf_size > ioc->malloc_size * SG_ENTRIES_IN_CMD) + return -EINVAL; buff = kcalloc(SG_ENTRIES_IN_CMD, sizeof(char *), GFP_KERNEL); if (!buff) { status = -ENOMEM; @@ -6565,10 +6545,6 @@ static int hpsa_big_passthru_ioctl(struct ctlr_info *h, void __user *argp) /* Copy the error information out */ memcpy(&ioc->error_info, c->err_info, sizeof(ioc->error_info)); - if (copy_to_user(argp, ioc, sizeof(*ioc))) { - status = -EFAULT; - goto cleanup0; - } if ((ioc->Request.Type.Direction & XFER_READ) && ioc->buf_size > 0) { int i; @@ -6594,7 +6570,6 @@ cleanup1: kfree(buff); } kfree(buff_size); - kvfree(ioc); return status; } @@ -6628,18 +6603,39 @@ static int hpsa_ioctl(struct scsi_device *dev, unsigned int cmd, return hpsa_getpciinfo_ioctl(h, argp); case CCISS_GETDRIVVER: return hpsa_getdrivver_ioctl(h, argp); - case CCISS_PASSTHRU: + case CCISS_PASSTHRU: { + IOCTL_Command_struct iocommand; + + if (!argp) + return -EINVAL; + if (copy_from_user(&iocommand, argp, sizeof(iocommand))) + return -EFAULT; if (atomic_dec_if_positive(&h->passthru_cmds_avail) < 0) return -EAGAIN; - rc = hpsa_passthru_ioctl(h, argp); + rc = hpsa_passthru_ioctl(h, &iocommand); atomic_inc(&h->passthru_cmds_avail); + if (!rc && copy_to_user(argp, &iocommand, sizeof(iocommand))) + rc = -EFAULT; return rc; - case CCISS_BIG_PASSTHRU: + } + case CCISS_BIG_PASSTHRU: { + BIG_IOCTL_Command_struct *ioc; + if (!argp) + return -EINVAL; if (atomic_dec_if_positive(&h->passthru_cmds_avail) < 0) return -EAGAIN; - rc = hpsa_big_passthru_ioctl(h, argp); + ioc = vmemdup_user(argp, sizeof(*ioc)); + if (IS_ERR(ioc)) { + atomic_inc(&h->passthru_cmds_avail); + return PTR_ERR(ioc); + } + rc = hpsa_big_passthru_ioctl(h, ioc); atomic_inc(&h->passthru_cmds_avail); + if (!rc && copy_to_user(argp, ioc, sizeof(*ioc))) + rc = -EFAULT; + kvfree(ioc); return rc; + } default: return -ENOTTY; } -- GitLab From cb17c1b69b175e3f7ae2ef53e384889cdbae5c0d Mon Sep 17 00:00:00 2001 From: Al Viro Date: Sat, 30 May 2020 00:40:26 +0100 Subject: [PATCH 1085/1971] scsi: hpsa: Don't bother with vmalloc for BIG_IOCTL_Command_struct "BIG" in the name refers to the amount of data being transferred, _not_ the size of structure itself; it's 140 or 144 bytes (for 32bit and 64bit hosts resp.). IOCTL_Command_struct is 136 or 144 bytes large... No point whatsoever turning that into dynamic allocation, let alone vmalloc one. Just keep it as local variable... Link: https://lore.kernel.org/r/20200529234028.46373-2-viro@ZenIV.linux.org.uk Acked-by: Don Brace Tested-by: Don Brace Signed-off-by: Al Viro Signed-off-by: Martin K. Petersen --- drivers/scsi/hpsa.c | 14 +++++--------- 1 file changed, 5 insertions(+), 9 deletions(-) diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c index 3344a06c938ee..64fd972721097 100644 --- a/drivers/scsi/hpsa.c +++ b/drivers/scsi/hpsa.c @@ -6619,21 +6619,17 @@ static int hpsa_ioctl(struct scsi_device *dev, unsigned int cmd, return rc; } case CCISS_BIG_PASSTHRU: { - BIG_IOCTL_Command_struct *ioc; + BIG_IOCTL_Command_struct ioc; if (!argp) return -EINVAL; + if (copy_from_user(&ioc, argp, sizeof(ioc))) + return -EFAULT; if (atomic_dec_if_positive(&h->passthru_cmds_avail) < 0) return -EAGAIN; - ioc = vmemdup_user(argp, sizeof(*ioc)); - if (IS_ERR(ioc)) { - atomic_inc(&h->passthru_cmds_avail); - return PTR_ERR(ioc); - } - rc = hpsa_big_passthru_ioctl(h, ioc); + rc = hpsa_big_passthru_ioctl(h, &ioc); atomic_inc(&h->passthru_cmds_avail); - if (!rc && copy_to_user(argp, ioc, sizeof(*ioc))) + if (!rc && copy_to_user(argp, &ioc, sizeof(ioc))) rc = -EFAULT; - kvfree(ioc); return rc; } default: -- GitLab From 10100ffd5f6584298b72f9fe26f32cf02abfb8b0 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Sat, 30 May 2020 00:40:27 +0100 Subject: [PATCH 1086/1971] scsi: hpsa: Get rid of compat_alloc_user_space() No need for building a native struct on kernel stack, copying it to userland one, then calling hpsa_ioctl() which copies it back into _another_ instance of the same struct. Link: https://lore.kernel.org/r/20200529234028.46373-3-viro@ZenIV.linux.org.uk Acked-by: Don Brace Tested-by: Don Brace Signed-off-by: Al Viro Signed-off-by: Martin K. Petersen --- drivers/scsi/hpsa.c | 80 ++++++++++++++++++++------------------------- 1 file changed, 36 insertions(+), 44 deletions(-) diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c index 64fd972721097..c7fbe56891ef0 100644 --- a/drivers/scsi/hpsa.c +++ b/drivers/scsi/hpsa.c @@ -254,6 +254,10 @@ static irqreturn_t do_hpsa_intr_intx(int irq, void *dev_id); static irqreturn_t do_hpsa_intr_msi(int irq, void *dev_id); static int hpsa_ioctl(struct scsi_device *dev, unsigned int cmd, void __user *arg); +static int hpsa_passthru_ioctl(struct ctlr_info *h, + IOCTL_Command_struct *iocommand); +static int hpsa_big_passthru_ioctl(struct ctlr_info *h, + BIG_IOCTL_Command_struct *ioc); #ifdef CONFIG_COMPAT static int hpsa_compat_ioctl(struct scsi_device *dev, unsigned int cmd, @@ -6217,75 +6221,63 @@ static void cmd_free(struct ctlr_info *h, struct CommandList *c) static int hpsa_ioctl32_passthru(struct scsi_device *dev, unsigned int cmd, void __user *arg) { - IOCTL32_Command_struct __user *arg32 = - (IOCTL32_Command_struct __user *) arg; + struct ctlr_info *h = sdev_to_hba(dev); + IOCTL32_Command_struct __user *arg32 = arg; IOCTL_Command_struct arg64; - IOCTL_Command_struct __user *p = compat_alloc_user_space(sizeof(arg64)); int err; u32 cp; - memset(&arg64, 0, sizeof(arg64)); - err = 0; - err |= copy_from_user(&arg64.LUN_info, &arg32->LUN_info, - sizeof(arg64.LUN_info)); - err |= copy_from_user(&arg64.Request, &arg32->Request, - sizeof(arg64.Request)); - err |= copy_from_user(&arg64.error_info, &arg32->error_info, - sizeof(arg64.error_info)); - err |= get_user(arg64.buf_size, &arg32->buf_size); - err |= get_user(cp, &arg32->buf); - arg64.buf = compat_ptr(cp); - err |= copy_to_user(p, &arg64, sizeof(arg64)); + if (!arg) + return -EINVAL; - if (err) + memset(&arg64, 0, sizeof(arg64)); + if (copy_from_user(&arg64, arg32, offsetof(IOCTL_Command_struct, buf))) + return -EFAULT; + if (get_user(cp, &arg32->buf)) return -EFAULT; + arg64.buf = compat_ptr(cp); - err = hpsa_ioctl(dev, CCISS_PASSTHRU, p); + if (atomic_dec_if_positive(&h->passthru_cmds_avail) < 0) + return -EAGAIN; + err = hpsa_passthru_ioctl(h, &arg64); + atomic_inc(&h->passthru_cmds_avail); if (err) return err; - err |= copy_in_user(&arg32->error_info, &p->error_info, - sizeof(arg32->error_info)); - if (err) + if (copy_to_user(&arg32->error_info, &arg64.error_info, + sizeof(arg32->error_info))) return -EFAULT; - return err; + return 0; } static int hpsa_ioctl32_big_passthru(struct scsi_device *dev, unsigned int cmd, void __user *arg) { - BIG_IOCTL32_Command_struct __user *arg32 = - (BIG_IOCTL32_Command_struct __user *) arg; + struct ctlr_info *h = sdev_to_hba(dev); + BIG_IOCTL32_Command_struct __user *arg32 = arg; BIG_IOCTL_Command_struct arg64; - BIG_IOCTL_Command_struct __user *p = - compat_alloc_user_space(sizeof(arg64)); int err; u32 cp; + if (!arg) + return -EINVAL; memset(&arg64, 0, sizeof(arg64)); - err = 0; - err |= copy_from_user(&arg64.LUN_info, &arg32->LUN_info, - sizeof(arg64.LUN_info)); - err |= copy_from_user(&arg64.Request, &arg32->Request, - sizeof(arg64.Request)); - err |= copy_from_user(&arg64.error_info, &arg32->error_info, - sizeof(arg64.error_info)); - err |= get_user(arg64.buf_size, &arg32->buf_size); - err |= get_user(arg64.malloc_size, &arg32->malloc_size); - err |= get_user(cp, &arg32->buf); - arg64.buf = compat_ptr(cp); - err |= copy_to_user(p, &arg64, sizeof(arg64)); - - if (err) + if (copy_from_user(&arg64, arg32, + offsetof(BIG_IOCTL32_Command_struct, buf))) return -EFAULT; + if (get_user(cp, &arg32->buf)) + return -EFAULT; + arg64.buf = compat_ptr(cp); - err = hpsa_ioctl(dev, CCISS_BIG_PASSTHRU, p); + if (atomic_dec_if_positive(&h->passthru_cmds_avail) < 0) + return -EAGAIN; + err = hpsa_big_passthru_ioctl(h, &arg64); + atomic_inc(&h->passthru_cmds_avail); if (err) return err; - err |= copy_in_user(&arg32->error_info, &p->error_info, - sizeof(arg32->error_info)); - if (err) + if (copy_to_user(&arg32->error_info, &arg64.error_info, + sizeof(arg32->error_info))) return -EFAULT; - return err; + return 0; } static int hpsa_compat_ioctl(struct scsi_device *dev, unsigned int cmd, -- GitLab From 06b43f968db58fd582b09b745338eae5af9f9f5e Mon Sep 17 00:00:00 2001 From: Al Viro Date: Sat, 30 May 2020 00:40:28 +0100 Subject: [PATCH 1087/1971] scsi: hpsa: hpsa_ioctl(): Tidy up a bit Link: https://lore.kernel.org/r/20200529234028.46373-4-viro@ZenIV.linux.org.uk Acked-by: Don Brace Tested-by: Don Brace Signed-off-by: Al Viro Signed-off-by: Martin K. Petersen --- drivers/scsi/hpsa.c | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c index c7fbe56891ef0..81d0414e21175 100644 --- a/drivers/scsi/hpsa.c +++ b/drivers/scsi/hpsa.c @@ -6577,14 +6577,11 @@ static void check_ioctl_unit_attention(struct ctlr_info *h, * ioctl */ static int hpsa_ioctl(struct scsi_device *dev, unsigned int cmd, - void __user *arg) + void __user *argp) { - struct ctlr_info *h; - void __user *argp = (void __user *)arg; + struct ctlr_info *h = sdev_to_hba(dev); int rc; - h = sdev_to_hba(dev); - switch (cmd) { case CCISS_DEREGDISK: case CCISS_REGNEWDISK: -- GitLab From 862b2509d157c629dd26d7ac6c6cdbf043d332eb Mon Sep 17 00:00:00 2001 From: Takashi Iwai Date: Wed, 3 Jun 2020 17:37:08 +0200 Subject: [PATCH 1088/1971] ALSA: usb-audio: Fix inconsistent card PM state after resume When a USB-audio interface gets runtime-suspended via auto-pm feature, the driver suspends all functionality and increment chip->num_suspended_intf. Later on, when the system gets suspended to S3, the driver increments chip->num_suspended_intf again, skips the device changes, and sets the card power state to SNDRV_CTL_POWER_D3hot. In return, when the system gets resumed from S3, the resume callback decrements chip->num_suspended_intf. Since this refcount is still not zero (it's been runtime-suspended), the whole resume is skipped. But there is a small pitfall here. The problem is that the driver doesn't restore the card power state after this resume call, leaving it as SNDRV_CTL_POWER_D3hot. So, even after the system resume finishes, the card instance still appears as if it were system-suspended, and this confuses many ioctl accesses that are blocked unexpectedly. In details, we have two issues behind the scene: one is that the card power state is changed only when the refcount becomes zero, and another is that the prior auto-suspend check is kept in a boolean flag. Although the latter problem is almost negligible since the auto-pm feature is imposed only on the primary interface, but this can be a potential problem on the devices with multiple interfaces. This patch addresses those issues by the following: - Replace chip->autosuspended boolean flag with chip->system_suspend counter - At the first system-suspend, chip->num_suspended_intf is recorded to chip->system_suspend - At system-resume, the card power state is restored when the chip->num_suspended_intf refcount reaches to chip->system_suspend, i.e. the state returns to the auto-suspended Also, the patch fixes yet another hidden problem by the code refactoring along with the fixes above: namely, when some resume procedure failed, the driver left chip->num_suspended_intf that was already decreased, and it might lead to the refcount unbalance. In the new code, the refcount decrement is done after the whole resume procedure, and the problem is avoided as well. Fixes: 0662292aec05 ("ALSA: usb-audio: Handle normal and auto-suspend equally") Reported-and-tested-by: Macpaul Lin Cc: Link: https://lore.kernel.org/r/20200603153709.6293-1-tiwai@suse.de Signed-off-by: Takashi Iwai --- sound/usb/card.c | 19 ++++++++++++------- sound/usb/usbaudio.h | 2 +- 2 files changed, 13 insertions(+), 8 deletions(-) diff --git a/sound/usb/card.c b/sound/usb/card.c index fd6fd1726ea0b..359f7a04be1ce 100644 --- a/sound/usb/card.c +++ b/sound/usb/card.c @@ -843,9 +843,6 @@ static int usb_audio_suspend(struct usb_interface *intf, pm_message_t message) if (chip == (void *)-1L) return 0; - chip->autosuspended = !!PMSG_IS_AUTO(message); - if (!chip->autosuspended) - snd_power_change_state(chip->card, SNDRV_CTL_POWER_D3hot); if (!chip->num_suspended_intf++) { list_for_each_entry(as, &chip->pcm_list, list) { snd_usb_pcm_suspend(as); @@ -858,6 +855,11 @@ static int usb_audio_suspend(struct usb_interface *intf, pm_message_t message) snd_usb_mixer_suspend(mixer); } + if (!PMSG_IS_AUTO(message) && !chip->system_suspend) { + snd_power_change_state(chip->card, SNDRV_CTL_POWER_D3hot); + chip->system_suspend = chip->num_suspended_intf; + } + return 0; } @@ -871,10 +873,10 @@ static int __usb_audio_resume(struct usb_interface *intf, bool reset_resume) if (chip == (void *)-1L) return 0; - if (--chip->num_suspended_intf) - return 0; atomic_inc(&chip->active); /* avoid autopm */ + if (chip->num_suspended_intf > 1) + goto out; list_for_each_entry(as, &chip->pcm_list, list) { err = snd_usb_pcm_resume(as); @@ -896,9 +898,12 @@ static int __usb_audio_resume(struct usb_interface *intf, bool reset_resume) snd_usbmidi_resume(p); } - if (!chip->autosuspended) + out: + if (chip->num_suspended_intf == chip->system_suspend) { snd_power_change_state(chip->card, SNDRV_CTL_POWER_D0); - chip->autosuspended = 0; + chip->system_suspend = 0; + } + chip->num_suspended_intf--; err_out: atomic_dec(&chip->active); /* allow autopm after this point */ diff --git a/sound/usb/usbaudio.h b/sound/usb/usbaudio.h index 1c892c7f14d78..e0ebfb25fbd51 100644 --- a/sound/usb/usbaudio.h +++ b/sound/usb/usbaudio.h @@ -26,7 +26,7 @@ struct snd_usb_audio { struct usb_interface *pm_intf; u32 usb_id; struct mutex mutex; - unsigned int autosuspended:1; + unsigned int system_suspend; atomic_t active; atomic_t shutdown; atomic_t usage_count; -- GitLab From d56f5136b01020155b6b0a29f69d924687529bee Mon Sep 17 00:00:00 2001 From: Paolo Bonzini Date: Thu, 4 Jun 2020 15:16:52 +0200 Subject: [PATCH 1089/1971] KVM: let kvm_destroy_vm_debugfs clean up vCPU debugfs directories After commit 63d0434 ("KVM: x86: move kvm_create_vcpu_debugfs after last failure point") we are creating the pre-vCPU debugfs files after the creation of the vCPU file descriptor. This makes it possible for userspace to reach kvm_vcpu_release before kvm_create_vcpu_debugfs has finished. The vcpu->debugfs_dentry then does not have any associated inode anymore, and this causes a NULL-pointer dereference in debugfs_create_file. The solution is simply to avoid removing the files; they are cleaned up when the VM file descriptor is closed (and that must be after KVM_CREATE_VCPU returns). We can stop storing the dentry in struct kvm_vcpu too, because it is not needed anywhere after kvm_create_vcpu_debugfs returns. Reported-by: syzbot+705f4401d5a93a59b87d@syzkaller.appspotmail.com Fixes: 63d04348371b ("KVM: x86: move kvm_create_vcpu_debugfs after last failure point") Signed-off-by: Paolo Bonzini --- arch/arm64/kvm/arm.c | 5 ----- arch/x86/kvm/debugfs.c | 10 +++++----- include/linux/kvm_host.h | 3 +-- virt/kvm/kvm_main.c | 8 ++++---- 4 files changed, 10 insertions(+), 16 deletions(-) diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c index 7a57381c05e8f..45276ed50dd6f 100644 --- a/arch/arm64/kvm/arm.c +++ b/arch/arm64/kvm/arm.c @@ -144,11 +144,6 @@ out_fail_alloc: return ret; } -int kvm_arch_create_vcpu_debugfs(struct kvm_vcpu *vcpu) -{ - return 0; -} - vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf) { return VM_FAULT_SIGBUS; diff --git a/arch/x86/kvm/debugfs.c b/arch/x86/kvm/debugfs.c index 018aebce33ff4..7e818d64bb4d7 100644 --- a/arch/x86/kvm/debugfs.c +++ b/arch/x86/kvm/debugfs.c @@ -43,22 +43,22 @@ static int vcpu_get_tsc_scaling_frac_bits(void *data, u64 *val) DEFINE_SIMPLE_ATTRIBUTE(vcpu_tsc_scaling_frac_fops, vcpu_get_tsc_scaling_frac_bits, NULL, "%llu\n"); -void kvm_arch_create_vcpu_debugfs(struct kvm_vcpu *vcpu) +void kvm_arch_create_vcpu_debugfs(struct kvm_vcpu *vcpu, struct dentry *debugfs_dentry) { - debugfs_create_file("tsc-offset", 0444, vcpu->debugfs_dentry, vcpu, + debugfs_create_file("tsc-offset", 0444, debugfs_dentry, vcpu, &vcpu_tsc_offset_fops); if (lapic_in_kernel(vcpu)) debugfs_create_file("lapic_timer_advance_ns", 0444, - vcpu->debugfs_dentry, vcpu, + debugfs_dentry, vcpu, &vcpu_timer_advance_ns_fops); if (kvm_has_tsc_control) { debugfs_create_file("tsc-scaling-ratio", 0444, - vcpu->debugfs_dentry, vcpu, + debugfs_dentry, vcpu, &vcpu_tsc_scaling_fops); debugfs_create_file("tsc-scaling-ratio-frac-bits", 0444, - vcpu->debugfs_dentry, vcpu, + debugfs_dentry, vcpu, &vcpu_tsc_scaling_frac_fops); } } diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index f43b59b1294c5..d38d6b9c24be0 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -318,7 +318,6 @@ struct kvm_vcpu { bool preempted; bool ready; struct kvm_vcpu_arch arch; - struct dentry *debugfs_dentry; }; static inline int kvm_vcpu_exiting_guest_mode(struct kvm_vcpu *vcpu) @@ -888,7 +887,7 @@ void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu); void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu); #ifdef __KVM_HAVE_ARCH_VCPU_DEBUGFS -void kvm_arch_create_vcpu_debugfs(struct kvm_vcpu *vcpu); +void kvm_arch_create_vcpu_debugfs(struct kvm_vcpu *vcpu, struct dentry *debugfs_dentry); #endif int kvm_arch_hardware_enable(void); diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 7fa1e38e16597..3577eb84eac0b 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -2973,7 +2973,6 @@ static int kvm_vcpu_release(struct inode *inode, struct file *filp) { struct kvm_vcpu *vcpu = filp->private_data; - debugfs_remove_recursive(vcpu->debugfs_dentry); kvm_put_kvm(vcpu->kvm); return 0; } @@ -3000,16 +2999,17 @@ static int create_vcpu_fd(struct kvm_vcpu *vcpu) static void kvm_create_vcpu_debugfs(struct kvm_vcpu *vcpu) { #ifdef __KVM_HAVE_ARCH_VCPU_DEBUGFS + struct dentry *debugfs_dentry; char dir_name[ITOA_MAX_LEN * 2]; if (!debugfs_initialized()) return; snprintf(dir_name, sizeof(dir_name), "vcpu%d", vcpu->vcpu_id); - vcpu->debugfs_dentry = debugfs_create_dir(dir_name, - vcpu->kvm->debugfs_dentry); + debugfs_dentry = debugfs_create_dir(dir_name, + vcpu->kvm->debugfs_dentry); - kvm_arch_create_vcpu_debugfs(vcpu); + kvm_arch_create_vcpu_debugfs(vcpu, debugfs_dentry); #endif } -- GitLab From 333ed74689b8fca097574124fef7fa0e3d7f79d4 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Wed, 3 Jun 2020 12:16:37 +0100 Subject: [PATCH 1090/1971] scs: Report SCS usage in bytes rather than number of entries Fix the SCS debug usage check so that we report the number of bytes used, rather than the number of entries. Fixes: 5bbaf9d1fcb9 ("scs: Add support for stack usage debugging") Reported-by: Sami Tolvanen Reviewed-by: Kees Cook Signed-off-by: Will Deacon --- kernel/scs.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kernel/scs.c b/kernel/scs.c index 222a7a9ad5439..5d4d9bbdec36c 100644 --- a/kernel/scs.c +++ b/kernel/scs.c @@ -74,7 +74,7 @@ static void scs_check_usage(struct task_struct *tsk) for (p = task_scs(tsk); p < __scs_magic(tsk); ++p) { if (!READ_ONCE_NOCHECK(*p)) break; - used++; + used += sizeof(*p); } while (used > curr) { -- GitLab From 5311ebfb612f08ec2a712a86d0af7ee2b423a9fc Mon Sep 17 00:00:00 2001 From: Christophe JAILLET Date: Sun, 31 May 2020 13:00:15 +0200 Subject: [PATCH 1091/1971] arm64: debug: mark a function as __init to save some memory 'debug_monitors_init()' is only called via 'postcore_initcall'. It can be marked as __init to save a few bytes of memory. Signed-off-by: Christophe JAILLET Reviewed-by: Douglas Anderson Link: https://lore.kernel.org/r/20200531110015.598607-1-christophe.jaillet@wanadoo.fr Signed-off-by: Will Deacon --- arch/arm64/kernel/debug-monitors.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/arm64/kernel/debug-monitors.c b/arch/arm64/kernel/debug-monitors.c index 15e80c876d464..5df49366e9abe 100644 --- a/arch/arm64/kernel/debug-monitors.c +++ b/arch/arm64/kernel/debug-monitors.c @@ -130,7 +130,7 @@ static int clear_os_lock(unsigned int cpu) return 0; } -static int debug_monitors_init(void) +static int __init debug_monitors_init(void) { return cpuhp_setup_state(CPUHP_AP_ARM64_DEBUG_MONITORS_STARTING, "arm64/debug_monitors:starting", -- GitLab From f4f6bd93fdab6f03e20fbbb9e5c6d65fc81b1588 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Wed, 3 Jun 2020 13:33:03 -0700 Subject: [PATCH 1092/1971] KVM: VMX: Always treat MSR_IA32_PERF_CAPABILITIES as a valid PMU MSR Unconditionally return true when querying the validity of MSR_IA32_PERF_CAPABILITIES so as to defer the validity check to intel_pmu_{get,set}_msr(), which can properly give the MSR a pass when the access is initiated from host userspace. The MSR is emulated so there is no underlying hardware dependency to worry about. Fixes: 27461da31089a ("KVM: x86/pmu: Support full width counting") Cc: Like Xu Signed-off-by: Sean Christopherson Message-Id: <20200603203303.28545-1-sean.j.christopherson@intel.com> Signed-off-by: Paolo Bonzini --- arch/x86/kvm/vmx/pmu_intel.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/x86/kvm/vmx/pmu_intel.c b/arch/x86/kvm/vmx/pmu_intel.c index d33d890b605f8..bdcce65c7a1de 100644 --- a/arch/x86/kvm/vmx/pmu_intel.c +++ b/arch/x86/kvm/vmx/pmu_intel.c @@ -181,7 +181,7 @@ static bool intel_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr) ret = pmu->version > 1; break; case MSR_IA32_PERF_CAPABILITIES: - ret = guest_cpuid_has(vcpu, X86_FEATURE_PDCM); + ret = 1; break; default: ret = get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0) || -- GitLab From 65b1891499b1feb74aaab514c3d34080dc6702d8 Mon Sep 17 00:00:00 2001 From: Xiaoyao Li Date: Thu, 4 Jun 2020 12:16:36 +0800 Subject: [PATCH 1093/1971] KVM: x86: Assign correct value to array.maxnent Delay the assignment of array.maxnent to use correct value for the case cpuid->nent > KVM_MAX_CPUID_ENTRIES. Fixes: e53c95e8d41e ("KVM: x86: Encapsulate CPUID entries and metadata in struct") Signed-off-by: Xiaoyao Li Message-Id: <20200604041636.1187-1-xiaoyao.li@intel.com> Signed-off-by: Paolo Bonzini --- arch/x86/kvm/cpuid.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c index 253b8e875ccd8..3d88ddf781d0d 100644 --- a/arch/x86/kvm/cpuid.c +++ b/arch/x86/kvm/cpuid.c @@ -426,7 +426,7 @@ EXPORT_SYMBOL_GPL(kvm_set_cpu_caps); struct kvm_cpuid_array { struct kvm_cpuid_entry2 *entries; - const int maxnent; + int maxnent; int nent; }; @@ -870,7 +870,6 @@ int kvm_dev_ioctl_get_cpuid(struct kvm_cpuid2 *cpuid, struct kvm_cpuid_array array = { .nent = 0, - .maxnent = cpuid->nent, }; int r, i; @@ -887,6 +886,8 @@ int kvm_dev_ioctl_get_cpuid(struct kvm_cpuid2 *cpuid, if (!array.entries) return -ENOMEM; + array.maxnent = cpuid->nent; + for (i = 0; i < ARRAY_SIZE(funcs); i++) { r = get_cpuid_func(&array, funcs[i], type); if (r) -- GitLab From fa44b82eb8318facc56356afa897f3f3105172e1 Mon Sep 17 00:00:00 2001 From: Babu Moger Date: Tue, 12 May 2020 18:59:16 -0500 Subject: [PATCH 1094/1971] KVM: x86: Move MPK feature detection to common code Both Intel and AMD support (MPK) Memory Protection Key feature. Move the feature detection from VMX to the common code. It should work for both the platforms now. Signed-off-by: Babu Moger Message-Id: <158932795627.44260.15144185478040178638.stgit@naples-babu.amd.com> Signed-off-by: Paolo Bonzini --- arch/x86/kvm/cpuid.c | 9 ++++++++- arch/x86/kvm/vmx/vmx.c | 4 ---- 2 files changed, 8 insertions(+), 5 deletions(-) diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c index 3d88ddf781d0d..9ca32d9699388 100644 --- a/arch/x86/kvm/cpuid.c +++ b/arch/x86/kvm/cpuid.c @@ -325,7 +325,7 @@ void kvm_set_cpu_caps(void) ); kvm_cpu_cap_mask(CPUID_7_ECX, - F(AVX512VBMI) | F(LA57) | 0 /*PKU*/ | 0 /*OSPKE*/ | F(RDPID) | + F(AVX512VBMI) | F(LA57) | F(PKU) | 0 /*OSPKE*/ | F(RDPID) | F(AVX512_VPOPCNTDQ) | F(UMIP) | F(AVX512_VBMI2) | F(GFNI) | F(VAES) | F(VPCLMULQDQ) | F(AVX512_VNNI) | F(AVX512_BITALG) | F(CLDEMOTE) | F(MOVDIRI) | F(MOVDIR64B) | 0 /*WAITPKG*/ @@ -334,6 +334,13 @@ void kvm_set_cpu_caps(void) if (cpuid_ecx(7) & F(LA57)) kvm_cpu_cap_set(X86_FEATURE_LA57); + /* + * PKU not yet implemented for shadow paging and requires OSPKE + * to be set on the host. Clear it if that is not the case + */ + if (!tdp_enabled || !boot_cpu_has(X86_FEATURE_OSPKE)) + kvm_cpu_cap_clear(X86_FEATURE_PKU); + kvm_cpu_cap_mask(CPUID_7_EDX, F(AVX512_4VNNIW) | F(AVX512_4FMAPS) | F(SPEC_CTRL) | F(SPEC_CTRL_SSBD) | F(ARCH_CAPABILITIES) | F(INTEL_STIBP) | diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index 170cc76a581f4..0e4f7ff54e6a4 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -7282,10 +7282,6 @@ static __init void vmx_set_cpu_caps(void) if (vmx_pt_mode_is_host_guest()) kvm_cpu_cap_check_and_set(X86_FEATURE_INTEL_PT); - /* PKU is not yet implemented for shadow paging. */ - if (enable_ept && boot_cpu_has(X86_FEATURE_OSPKE)) - kvm_cpu_cap_check_and_set(X86_FEATURE_PKU); - if (vmx_umip_emulated()) kvm_cpu_cap_set(X86_FEATURE_UMIP); -- GitLab From f5641d053d46a9a18fe13f2ecb4a7b4a66d9cdf7 Mon Sep 17 00:00:00 2001 From: Kees Cook Date: Wed, 3 Jun 2020 15:40:56 -0700 Subject: [PATCH 1095/1971] pwm: Add missing "CONFIG_" prefix MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The IS_ENABLED() use was missing the CONFIG_ prefix which would have lead to skipping this code. Fixes: 3ad1f3a33286 ("pwm: Implement some checks for lowlevel drivers") Signed-off-by: Kees Cook Reviewed-by: Uwe Kleine-König Signed-off-by: Thierry Reding --- drivers/pwm/core.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/pwm/core.c b/drivers/pwm/core.c index bca04965bfe65..004b2ea9b5fde 100644 --- a/drivers/pwm/core.c +++ b/drivers/pwm/core.c @@ -121,7 +121,7 @@ static int pwm_device_request(struct pwm_device *pwm, const char *label) pwm->chip->ops->get_state(pwm->chip, pwm, &pwm->state); trace_pwm_get(pwm, &pwm->state); - if (IS_ENABLED(PWM_DEBUG)) + if (IS_ENABLED(CONFIG_PWM_DEBUG)) pwm->last = pwm->state; } -- GitLab From 3232dd02af65f2d01be641120d2a710176b0c7a7 Mon Sep 17 00:00:00 2001 From: Pavel Begunkov Date: Wed, 3 Jun 2020 18:03:22 +0300 Subject: [PATCH 1096/1971] io_uring: fix {SQ,IO}POLL with unsupported opcodes IORING_SETUP_IOPOLL is defined only for read/write, other opcodes should be disallowed, otherwise it'll get an error as below. Also refuse open/close with SQPOLL, as the polling thread wouldn't know which file table to use. RIP: 0010:io_iopoll_getevents+0x111/0x5a0 Call Trace: ? _raw_spin_unlock_irqrestore+0x24/0x40 ? do_send_sig_info+0x64/0x90 io_iopoll_reap_events.part.0+0x5e/0xa0 io_ring_ctx_wait_and_kill+0x132/0x1c0 io_uring_release+0x20/0x30 __fput+0xcd/0x230 ____fput+0xe/0x10 task_work_run+0x67/0xa0 do_exit+0x353/0xb10 ? handle_mm_fault+0xd4/0x200 ? syscall_trace_enter+0x18c/0x2c0 do_group_exit+0x43/0xa0 __x64_sys_exit_group+0x18/0x20 do_syscall_64+0x60/0x1e0 entry_SYSCALL_64_after_hwframe+0x44/0xa9 Signed-off-by: Pavel Begunkov [axboe: allow provide/remove buffers and files update] Signed-off-by: Jens Axboe --- fs/io_uring.c | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/fs/io_uring.c b/fs/io_uring.c index 417b7105c6dc8..c627dd9ce0969 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -2766,6 +2766,8 @@ static int __io_splice_prep(struct io_kiocb *req, if (req->flags & REQ_F_NEED_CLEANUP) return 0; + if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL)) + return -EINVAL; sp->file_in = NULL; sp->len = READ_ONCE(sqe->len); @@ -2966,6 +2968,8 @@ static int io_fallocate_prep(struct io_kiocb *req, { if (sqe->ioprio || sqe->buf_index || sqe->rw_flags) return -EINVAL; + if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL)) + return -EINVAL; req->sync.off = READ_ONCE(sqe->off); req->sync.len = READ_ONCE(sqe->addr); @@ -2991,6 +2995,8 @@ static int io_openat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) const char __user *fname; int ret; + if (unlikely(req->ctx->flags & (IORING_SETUP_IOPOLL|IORING_SETUP_SQPOLL))) + return -EINVAL; if (sqe->ioprio || sqe->buf_index) return -EINVAL; if (req->flags & REQ_F_FIXED_FILE) @@ -3024,6 +3030,8 @@ static int io_openat2_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) size_t len; int ret; + if (unlikely(req->ctx->flags & (IORING_SETUP_IOPOLL|IORING_SETUP_SQPOLL))) + return -EINVAL; if (sqe->ioprio || sqe->buf_index) return -EINVAL; if (req->flags & REQ_F_FIXED_FILE) @@ -3263,6 +3271,8 @@ static int io_epoll_ctl_prep(struct io_kiocb *req, #if defined(CONFIG_EPOLL) if (sqe->ioprio || sqe->buf_index) return -EINVAL; + if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL)) + return -EINVAL; req->epoll.epfd = READ_ONCE(sqe->fd); req->epoll.op = READ_ONCE(sqe->len); @@ -3307,6 +3317,8 @@ static int io_madvise_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) #if defined(CONFIG_ADVISE_SYSCALLS) && defined(CONFIG_MMU) if (sqe->ioprio || sqe->buf_index || sqe->off) return -EINVAL; + if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL)) + return -EINVAL; req->madvise.addr = READ_ONCE(sqe->addr); req->madvise.len = READ_ONCE(sqe->len); @@ -3341,6 +3353,8 @@ static int io_fadvise_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) { if (sqe->ioprio || sqe->buf_index || sqe->addr) return -EINVAL; + if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL)) + return -EINVAL; req->fadvise.offset = READ_ONCE(sqe->off); req->fadvise.len = READ_ONCE(sqe->len); @@ -3374,6 +3388,8 @@ static int io_fadvise(struct io_kiocb *req, bool force_nonblock) static int io_statx_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) { + if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL)) + return -EINVAL; if (sqe->ioprio || sqe->buf_index) return -EINVAL; if (req->flags & REQ_F_FIXED_FILE) @@ -3418,6 +3434,8 @@ static int io_close_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) */ req->work.flags |= IO_WQ_WORK_NO_CANCEL; + if (unlikely(req->ctx->flags & (IORING_SETUP_IOPOLL|IORING_SETUP_SQPOLL))) + return -EINVAL; if (sqe->ioprio || sqe->off || sqe->addr || sqe->len || sqe->rw_flags || sqe->buf_index) return -EINVAL; -- GitLab From 25e72d1012b30bdff712b563e6141a4f311d28d6 Mon Sep 17 00:00:00 2001 From: Pavel Begunkov Date: Wed, 3 Jun 2020 18:03:23 +0300 Subject: [PATCH 1097/1971] io_uring: do build_open_how() only once build_open_how() is just adjusting open_flags/mode. Do it once during prep. It looks better than storing raw values for the future. Signed-off-by: Pavel Begunkov Signed-off-by: Jens Axboe --- fs/io_uring.c | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/fs/io_uring.c b/fs/io_uring.c index c627dd9ce0969..0c5b48467651b 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -2993,6 +2993,7 @@ static int io_fallocate(struct io_kiocb *req, bool force_nonblock) static int io_openat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) { const char __user *fname; + u64 flags, mode; int ret; if (unlikely(req->ctx->flags & (IORING_SETUP_IOPOLL|IORING_SETUP_SQPOLL))) @@ -3004,13 +3005,14 @@ static int io_openat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) if (req->flags & REQ_F_NEED_CLEANUP) return 0; - req->open.dfd = READ_ONCE(sqe->fd); - req->open.how.mode = READ_ONCE(sqe->len); - fname = u64_to_user_ptr(READ_ONCE(sqe->addr)); - req->open.how.flags = READ_ONCE(sqe->open_flags); + mode = READ_ONCE(sqe->len); + flags = READ_ONCE(sqe->open_flags); if (force_o_largefile()) - req->open.how.flags |= O_LARGEFILE; + flags |= O_LARGEFILE; + req->open.how = build_open_how(flags, mode); + req->open.dfd = READ_ONCE(sqe->fd); + fname = u64_to_user_ptr(READ_ONCE(sqe->addr)); req->open.filename = getname(fname); if (IS_ERR(req->open.filename)) { ret = PTR_ERR(req->open.filename); @@ -3104,7 +3106,6 @@ err: static int io_openat(struct io_kiocb *req, bool force_nonblock) { - req->open.how = build_open_how(req->open.how.flags, req->open.how.mode); return io_openat2(req, force_nonblock); } -- GitLab From ec65fea5a8d7a82d3137dd2a44197eb577da111f Mon Sep 17 00:00:00 2001 From: Pavel Begunkov Date: Wed, 3 Jun 2020 18:03:24 +0300 Subject: [PATCH 1098/1971] io_uring: deduplicate io_openat{,2}_prep() io_openat_prep() and io_openat2_prep() are identical except for how struct open_how is built. Deduplicate it with a helper. Signed-off-by: Pavel Begunkov Signed-off-by: Jens Axboe --- fs/io_uring.c | 55 ++++++++++++++++++--------------------------------- 1 file changed, 19 insertions(+), 36 deletions(-) diff --git a/fs/io_uring.c b/fs/io_uring.c index 0c5b48467651b..4823a116daf22 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -2990,26 +2990,21 @@ static int io_fallocate(struct io_kiocb *req, bool force_nonblock) return 0; } -static int io_openat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) +static int __io_openat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) { const char __user *fname; - u64 flags, mode; int ret; if (unlikely(req->ctx->flags & (IORING_SETUP_IOPOLL|IORING_SETUP_SQPOLL))) return -EINVAL; - if (sqe->ioprio || sqe->buf_index) + if (unlikely(sqe->ioprio || sqe->buf_index)) return -EINVAL; - if (req->flags & REQ_F_FIXED_FILE) + if (unlikely(req->flags & REQ_F_FIXED_FILE)) return -EBADF; - if (req->flags & REQ_F_NEED_CLEANUP) - return 0; - mode = READ_ONCE(sqe->len); - flags = READ_ONCE(sqe->open_flags); - if (force_o_largefile()) - flags |= O_LARGEFILE; - req->open.how = build_open_how(flags, mode); + /* open.how should be already initialised */ + if (!(req->open.how.flags & O_PATH) && force_o_largefile()) + req->open.how.flags |= O_LARGEFILE; req->open.dfd = READ_ONCE(sqe->fd); fname = u64_to_user_ptr(READ_ONCE(sqe->addr)); @@ -3019,33 +3014,33 @@ static int io_openat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) req->open.filename = NULL; return ret; } - req->open.nofile = rlimit(RLIMIT_NOFILE); req->flags |= REQ_F_NEED_CLEANUP; return 0; } +static int io_openat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) +{ + u64 flags, mode; + + if (req->flags & REQ_F_NEED_CLEANUP) + return 0; + mode = READ_ONCE(sqe->len); + flags = READ_ONCE(sqe->open_flags); + req->open.how = build_open_how(flags, mode); + return __io_openat_prep(req, sqe); +} + static int io_openat2_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) { struct open_how __user *how; - const char __user *fname; size_t len; int ret; - if (unlikely(req->ctx->flags & (IORING_SETUP_IOPOLL|IORING_SETUP_SQPOLL))) - return -EINVAL; - if (sqe->ioprio || sqe->buf_index) - return -EINVAL; - if (req->flags & REQ_F_FIXED_FILE) - return -EBADF; if (req->flags & REQ_F_NEED_CLEANUP) return 0; - - req->open.dfd = READ_ONCE(sqe->fd); - fname = u64_to_user_ptr(READ_ONCE(sqe->addr)); how = u64_to_user_ptr(READ_ONCE(sqe->addr2)); len = READ_ONCE(sqe->len); - if (len < OPEN_HOW_SIZE_VER0) return -EINVAL; @@ -3054,19 +3049,7 @@ static int io_openat2_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) if (ret) return ret; - if (!(req->open.how.flags & O_PATH) && force_o_largefile()) - req->open.how.flags |= O_LARGEFILE; - - req->open.filename = getname(fname); - if (IS_ERR(req->open.filename)) { - ret = PTR_ERR(req->open.filename); - req->open.filename = NULL; - return ret; - } - - req->open.nofile = rlimit(RLIMIT_NOFILE); - req->flags |= REQ_F_NEED_CLEANUP; - return 0; + return __io_openat_prep(req, sqe); } static int io_openat2(struct io_kiocb *req, bool force_nonblock) -- GitLab From d2b6f48b691ed67569786c332f0173b918d3fd1b Mon Sep 17 00:00:00 2001 From: Pavel Begunkov Date: Wed, 3 Jun 2020 18:03:25 +0300 Subject: [PATCH 1099/1971] io_uring: move send/recv IOPOLL check into prep Fail recv/send in case of IORING_SETUP_IOPOLL earlier during prep, so it'd be done only once. Removes duplication as well Signed-off-by: Pavel Begunkov Signed-off-by: Jens Axboe --- fs/io_uring.c | 18 ++++++------------ 1 file changed, 6 insertions(+), 12 deletions(-) diff --git a/fs/io_uring.c b/fs/io_uring.c index 4823a116daf22..d2bd82387a4ca 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -3556,6 +3556,9 @@ static int io_sendmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) struct io_async_ctx *io = req->io; int ret; + if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL)) + return -EINVAL; + sr->msg_flags = READ_ONCE(sqe->msg_flags); sr->msg = u64_to_user_ptr(READ_ONCE(sqe->addr)); sr->len = READ_ONCE(sqe->len); @@ -3585,9 +3588,6 @@ static int io_sendmsg(struct io_kiocb *req, bool force_nonblock) struct socket *sock; int ret; - if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL)) - return -EINVAL; - sock = sock_from_file(req->file, &ret); if (sock) { struct io_async_ctx io; @@ -3641,9 +3641,6 @@ static int io_send(struct io_kiocb *req, bool force_nonblock) struct socket *sock; int ret; - if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL)) - return -EINVAL; - sock = sock_from_file(req->file, &ret); if (sock) { struct io_sr_msg *sr = &req->sr_msg; @@ -3796,6 +3793,9 @@ static int io_recvmsg_prep(struct io_kiocb *req, struct io_async_ctx *io = req->io; int ret; + if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL)) + return -EINVAL; + sr->msg_flags = READ_ONCE(sqe->msg_flags); sr->msg = u64_to_user_ptr(READ_ONCE(sqe->addr)); sr->len = READ_ONCE(sqe->len); @@ -3824,9 +3824,6 @@ static int io_recvmsg(struct io_kiocb *req, bool force_nonblock) struct socket *sock; int ret, cflags = 0; - if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL)) - return -EINVAL; - sock = sock_from_file(req->file, &ret); if (sock) { struct io_buffer *kbuf; @@ -3888,9 +3885,6 @@ static int io_recv(struct io_kiocb *req, bool force_nonblock) struct socket *sock; int ret, cflags = 0; - if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL)) - return -EINVAL; - sock = sock_from_file(req->file, &ret); if (sock) { struct io_sr_msg *sr = &req->sr_msg; -- GitLab From dddb3e26f6d88c5344d28cb5ff9d3d6fa05c4f7a Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Thu, 4 Jun 2020 11:27:01 -0600 Subject: [PATCH 1100/1971] io_uring: re-set iov base/len for buffer select retry We already have the buffer selected, but we should set the iter list again. Cc: stable@vger.kernel.org # v5.7 Signed-off-by: Jens Axboe --- fs/io_uring.c | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/fs/io_uring.c b/fs/io_uring.c index d2bd82387a4ca..70f0f2f940fbd 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -2363,8 +2363,14 @@ static ssize_t __io_iov_buffer_select(struct io_kiocb *req, struct iovec *iov, static ssize_t io_iov_buffer_select(struct io_kiocb *req, struct iovec *iov, bool needs_lock) { - if (req->flags & REQ_F_BUFFER_SELECTED) + if (req->flags & REQ_F_BUFFER_SELECTED) { + struct io_buffer *kbuf; + + kbuf = (struct io_buffer *) (unsigned long) req->rw.addr; + iov[0].iov_base = u64_to_user_ptr(kbuf->addr); + iov[0].iov_len = kbuf->len; return 0; + } if (!req->rw.len) return 0; else if (req->rw.len > 1) -- GitLab From fe2b73dba47fb6d6922df1ad44e83b1754d5ed4d Mon Sep 17 00:00:00 2001 From: Xing Li Date: Sat, 23 May 2020 15:56:28 +0800 Subject: [PATCH 1101/1971] KVM: MIPS: Define KVM_ENTRYHI_ASID to cpu_asid_mask(&boot_cpu_data) The code in decode_config4() of arch/mips/kernel/cpu-probe.c asid_mask = MIPS_ENTRYHI_ASID; if (config4 & MIPS_CONF4_AE) asid_mask |= MIPS_ENTRYHI_ASIDX; set_cpu_asid_mask(c, asid_mask); set asid_mask to cpuinfo->asid_mask. So in order to support variable ASID_MASK, KVM_ENTRYHI_ASID should also be changed to cpu_asid_mask(&boot_cpu_data). Cc: Stable #4.9+ Reviewed-by: Aleksandar Markovic Signed-off-by: Xing Li [Huacai: Change current_cpu_data to boot_cpu_data for optimization] Signed-off-by: Huacai Chen Message-Id: <1590220602-3547-2-git-send-email-chenhc@lemote.com> Signed-off-by: Paolo Bonzini --- arch/mips/include/asm/kvm_host.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/mips/include/asm/kvm_host.h b/arch/mips/include/asm/kvm_host.h index e28b5a946e26c..609fdcd5b24e9 100644 --- a/arch/mips/include/asm/kvm_host.h +++ b/arch/mips/include/asm/kvm_host.h @@ -277,7 +277,7 @@ enum emulation_result { #define MIPS3_PG_FRAME 0x3fffffc0 #define VPN2_MASK 0xffffe000 -#define KVM_ENTRYHI_ASID MIPS_ENTRYHI_ASID +#define KVM_ENTRYHI_ASID cpu_asid_mask(&boot_cpu_data) #define TLB_IS_GLOBAL(x) ((x).tlb_lo[0] & (x).tlb_lo[1] & ENTRYLO_G) #define TLB_VPN2(x) ((x).tlb_hi & VPN2_MASK) #define TLB_ASID(x) ((x).tlb_hi & KVM_ENTRYHI_ASID) -- GitLab From 5816c76dea116a458f1932eefe064e35403248eb Mon Sep 17 00:00:00 2001 From: Xing Li Date: Sat, 23 May 2020 15:56:29 +0800 Subject: [PATCH 1102/1971] KVM: MIPS: Fix VPN2_MASK definition for variable cpu_vmbits If a CPU support more than 32bit vmbits (which is true for 64bit CPUs), VPN2_MASK set to fixed 0xffffe000 will lead to a wrong EntryHi in some functions such as _kvm_mips_host_tlb_inv(). The cpu_vmbits definition of 32bit CPU in cpu-features.h is 31, so we still use the old definition. Cc: Stable Reviewed-by: Aleksandar Markovic Signed-off-by: Xing Li [Huacai: Improve commit messages] Signed-off-by: Huacai Chen Message-Id: <1590220602-3547-3-git-send-email-chenhc@lemote.com> Signed-off-by: Paolo Bonzini --- arch/mips/include/asm/kvm_host.h | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/arch/mips/include/asm/kvm_host.h b/arch/mips/include/asm/kvm_host.h index 609fdcd5b24e9..31c84d86c8f6e 100644 --- a/arch/mips/include/asm/kvm_host.h +++ b/arch/mips/include/asm/kvm_host.h @@ -276,7 +276,11 @@ enum emulation_result { #define MIPS3_PG_SHIFT 6 #define MIPS3_PG_FRAME 0x3fffffc0 +#if defined(CONFIG_64BIT) +#define VPN2_MASK GENMASK(cpu_vmbits - 1, 13) +#else #define VPN2_MASK 0xffffe000 +#endif #define KVM_ENTRYHI_ASID cpu_asid_mask(&boot_cpu_data) #define TLB_IS_GLOBAL(x) ((x).tlb_lo[0] & (x).tlb_lo[1] & ENTRYLO_G) #define TLB_VPN2(x) ((x).tlb_hi & VPN2_MASK) -- GitLab From 210b4b9112b699df4c33273bf7b02d9debd2f35e Mon Sep 17 00:00:00 2001 From: Huacai Chen Date: Sat, 23 May 2020 15:56:30 +0800 Subject: [PATCH 1103/1971] KVM: MIPS: Increase KVM_MAX_VCPUS and KVM_USER_MEM_SLOTS to 16 Loongson-3 based machines can have as many as 16 CPUs, and so does memory slots, so increase KVM_MAX_VCPUS and KVM_USER_MEM_SLOTS to 16. Reviewed-by: Aleksandar Markovic Signed-off-by: Huacai Chen Co-developed-by: Jiaxun Yang Message-Id: <1590220602-3547-4-git-send-email-chenhc@lemote.com> Signed-off-by: Paolo Bonzini --- arch/mips/include/asm/kvm_host.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/mips/include/asm/kvm_host.h b/arch/mips/include/asm/kvm_host.h index 31c84d86c8f6e..bee961997de35 100644 --- a/arch/mips/include/asm/kvm_host.h +++ b/arch/mips/include/asm/kvm_host.h @@ -78,8 +78,8 @@ #define KVM_REG_MIPS_CP0_KSCRATCH6 MIPS_CP0_64(31, 7) -#define KVM_MAX_VCPUS 8 -#define KVM_USER_MEM_SLOTS 8 +#define KVM_MAX_VCPUS 16 +#define KVM_USER_MEM_SLOTS 16 /* memory slots that does not exposed to userspace */ #define KVM_PRIVATE_MEM_SLOTS 0 -- GitLab From bf10efbb81bf0c7ae6ebac3320b5b40c323463f3 Mon Sep 17 00:00:00 2001 From: Huacai Chen Date: Sat, 23 May 2020 15:56:31 +0800 Subject: [PATCH 1104/1971] KVM: MIPS: Add EVENTFD support which is needed by VHOST Add EVENTFD support for KVM/MIPS, which is needed by VHOST. Tested on Loongson-3 platform. Reviewed-by: Aleksandar Markovic Signed-off-by: Huacai Chen Co-developed-by: Jiaxun Yang Message-Id: <1590220602-3547-5-git-send-email-chenhc@lemote.com> Signed-off-by: Paolo Bonzini --- arch/mips/kvm/Kconfig | 1 + arch/mips/kvm/Makefile | 2 +- arch/mips/kvm/trap_emul.c | 3 +++ arch/mips/kvm/vz.c | 3 +++ 4 files changed, 8 insertions(+), 1 deletion(-) diff --git a/arch/mips/kvm/Kconfig b/arch/mips/kvm/Kconfig index b91d145aa2d57..d697752a57233 100644 --- a/arch/mips/kvm/Kconfig +++ b/arch/mips/kvm/Kconfig @@ -22,6 +22,7 @@ config KVM select EXPORT_UASM select PREEMPT_NOTIFIERS select KVM_GENERIC_DIRTYLOG_READ_PROTECT + select HAVE_KVM_EVENTFD select HAVE_KVM_VCPU_ASYNC_IOCTL select KVM_MMIO select MMU_NOTIFIER diff --git a/arch/mips/kvm/Makefile b/arch/mips/kvm/Makefile index 01affc1d21c53..0a3cef6153fd1 100644 --- a/arch/mips/kvm/Makefile +++ b/arch/mips/kvm/Makefile @@ -2,7 +2,7 @@ # Makefile for KVM support for MIPS # -common-objs-y = $(addprefix ../../../virt/kvm/, kvm_main.o coalesced_mmio.o) +common-objs-y = $(addprefix ../../../virt/kvm/, kvm_main.o coalesced_mmio.o eventfd.o) EXTRA_CFLAGS += -Ivirt/kvm -Iarch/mips/kvm diff --git a/arch/mips/kvm/trap_emul.c b/arch/mips/kvm/trap_emul.c index 5a11e83dffe67..f464506b1fea7 100644 --- a/arch/mips/kvm/trap_emul.c +++ b/arch/mips/kvm/trap_emul.c @@ -529,6 +529,9 @@ static int kvm_trap_emul_check_extension(struct kvm *kvm, long ext) case KVM_CAP_MIPS_TE: r = 1; break; + case KVM_CAP_IOEVENTFD: + r = 1; + break; default: r = 0; break; diff --git a/arch/mips/kvm/vz.c b/arch/mips/kvm/vz.c index 51f51009a53f5..cc7fdbb9c1082 100644 --- a/arch/mips/kvm/vz.c +++ b/arch/mips/kvm/vz.c @@ -2927,6 +2927,9 @@ static int kvm_vz_check_extension(struct kvm *kvm, long ext) r = 2; break; #endif + case KVM_CAP_IOEVENTFD: + r = 1; + break; default: r = 0; break; -- GitLab From 8bf31295030e35ef1e9e1b25b02041423f8291d3 Mon Sep 17 00:00:00 2001 From: Huacai Chen Date: Sat, 23 May 2020 15:56:32 +0800 Subject: [PATCH 1105/1971] KVM: MIPS: Use lddir/ldpte instructions to lookup gpa_mm.pgd Loongson-3 can use lddir/ldpte instuctions to accelerate page table walking, so use them to lookup gpa_mm.pgd. Reviewed-by: Aleksandar Markovic Signed-off-by: Huacai Chen Co-developed-by: Jiaxun Yang Message-Id: <1590220602-3547-6-git-send-email-chenhc@lemote.com> Signed-off-by: Paolo Bonzini --- arch/mips/kvm/entry.c | 19 ++++++++++++++++++- 1 file changed, 18 insertions(+), 1 deletion(-) diff --git a/arch/mips/kvm/entry.c b/arch/mips/kvm/entry.c index 16e1c93b484f4..fd716942e3029 100644 --- a/arch/mips/kvm/entry.c +++ b/arch/mips/kvm/entry.c @@ -56,6 +56,7 @@ #define C0_BADVADDR 8, 0 #define C0_BADINSTR 8, 1 #define C0_BADINSTRP 8, 2 +#define C0_PGD 9, 7 #define C0_ENTRYHI 10, 0 #define C0_GUESTCTL1 10, 4 #define C0_STATUS 12, 0 @@ -307,7 +308,10 @@ static void *kvm_mips_build_enter_guest(void *addr) #ifdef CONFIG_KVM_MIPS_VZ /* Save normal linux process pgd (VZ guarantees pgd_reg is set) */ - UASM_i_MFC0(&p, K0, c0_kscratch(), pgd_reg); + if (cpu_has_ldpte) + UASM_i_MFC0(&p, K0, C0_PWBASE); + else + UASM_i_MFC0(&p, K0, c0_kscratch(), pgd_reg); UASM_i_SW(&p, K0, offsetof(struct kvm_vcpu_arch, host_pgd), K1); /* @@ -469,8 +473,10 @@ void *kvm_mips_build_tlb_refill_exception(void *addr, void *handler) u32 *p = addr; struct uasm_label labels[2]; struct uasm_reloc relocs[2]; +#ifndef CONFIG_CPU_LOONGSON64 struct uasm_label *l = labels; struct uasm_reloc *r = relocs; +#endif memset(labels, 0, sizeof(labels)); memset(relocs, 0, sizeof(relocs)); @@ -490,6 +496,16 @@ void *kvm_mips_build_tlb_refill_exception(void *addr, void *handler) */ preempt_disable(); +#ifdef CONFIG_CPU_LOONGSON64 + UASM_i_MFC0(&p, K1, C0_PGD); + uasm_i_lddir(&p, K0, K1, 3); /* global page dir */ +#ifndef __PAGETABLE_PMD_FOLDED + uasm_i_lddir(&p, K1, K0, 1); /* middle page dir */ +#endif + uasm_i_ldpte(&p, K1, 0); /* even */ + uasm_i_ldpte(&p, K1, 1); /* odd */ + uasm_i_tlbwr(&p); +#else /* * Now for the actual refill bit. A lot of this can be common with the * Linux TLB refill handler, however we don't need to handle so many @@ -512,6 +528,7 @@ void *kvm_mips_build_tlb_refill_exception(void *addr, void *handler) build_get_ptep(&p, K0, K1); build_update_entries(&p, K0, K1); build_tlb_write_entry(&p, &l, &r, tlb_random); +#endif preempt_enable(); -- GitLab From 3210e2c279fee1076978b49988acdd935a6f7435 Mon Sep 17 00:00:00 2001 From: Huacai Chen Date: Sat, 23 May 2020 15:56:33 +0800 Subject: [PATCH 1106/1971] KVM: MIPS: Introduce and use cpu_guest_has_ldpte Loongson-3 has lddir/ldpte instructions and their related CP0 registers are the same as HTW. So we introduce a cpu_guest_has_ldpte flag and use it to indicate whether we need to save/restore HTW related CP0 registers (PWBase, PWSize, PWField and PWCtl). Acked-by: Thomas Bogendoerfer Reviewed-by: Aleksandar Markovic Signed-off-by: Huacai Chen Co-developed-by: Jiaxun Yang Message-Id: <1590220602-3547-7-git-send-email-chenhc@lemote.com> Signed-off-by: Paolo Bonzini --- arch/mips/include/asm/cpu-features.h | 3 +++ arch/mips/kernel/cpu-probe.c | 4 +++- arch/mips/kvm/vz.c | 26 +++++++++++++------------- 3 files changed, 19 insertions(+), 14 deletions(-) diff --git a/arch/mips/include/asm/cpu-features.h b/arch/mips/include/asm/cpu-features.h index caecbae4b5997..724dfddcab92d 100644 --- a/arch/mips/include/asm/cpu-features.h +++ b/arch/mips/include/asm/cpu-features.h @@ -682,6 +682,9 @@ #ifndef cpu_guest_has_htw #define cpu_guest_has_htw (cpu_data[0].guest.options & MIPS_CPU_HTW) #endif +#ifndef cpu_guest_has_ldpte +#define cpu_guest_has_ldpte (cpu_data[0].guest.options & MIPS_CPU_LDPTE) +#endif #ifndef cpu_guest_has_mvh #define cpu_guest_has_mvh (cpu_data[0].guest.options & MIPS_CPU_MVH) #endif diff --git a/arch/mips/kernel/cpu-probe.c b/arch/mips/kernel/cpu-probe.c index 6b93162d7c5a2..f0e7f2d7bd9a5 100644 --- a/arch/mips/kernel/cpu-probe.c +++ b/arch/mips/kernel/cpu-probe.c @@ -2017,8 +2017,10 @@ static inline void decode_cpucfg(struct cpuinfo_mips *c) if (cfg2 & LOONGSON_CFG2_LEXT2) c->ases |= MIPS_ASE_LOONGSON_EXT2; - if (cfg2 & LOONGSON_CFG2_LSPW) + if (cfg2 & LOONGSON_CFG2_LSPW) { c->options |= MIPS_CPU_LDPTE; + c->guest.options |= MIPS_CPU_LDPTE; + } if (cfg3 & LOONGSON_CFG3_LCAMP) c->ases |= MIPS_ASE_LOONGSON_CAM; diff --git a/arch/mips/kvm/vz.c b/arch/mips/kvm/vz.c index cc7fdbb9c1082..6e1b380e0b9cc 100644 --- a/arch/mips/kvm/vz.c +++ b/arch/mips/kvm/vz.c @@ -1706,7 +1706,7 @@ static unsigned long kvm_vz_num_regs(struct kvm_vcpu *vcpu) ret += ARRAY_SIZE(kvm_vz_get_one_regs_contextconfig); if (cpu_guest_has_segments) ret += ARRAY_SIZE(kvm_vz_get_one_regs_segments); - if (cpu_guest_has_htw) + if (cpu_guest_has_htw || cpu_guest_has_ldpte) ret += ARRAY_SIZE(kvm_vz_get_one_regs_htw); if (cpu_guest_has_maar && !cpu_guest_has_dyn_maar) ret += 1 + ARRAY_SIZE(vcpu->arch.maar); @@ -1755,7 +1755,7 @@ static int kvm_vz_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *indices) return -EFAULT; indices += ARRAY_SIZE(kvm_vz_get_one_regs_segments); } - if (cpu_guest_has_htw) { + if (cpu_guest_has_htw || cpu_guest_has_ldpte) { if (copy_to_user(indices, kvm_vz_get_one_regs_htw, sizeof(kvm_vz_get_one_regs_htw))) return -EFAULT; @@ -1878,17 +1878,17 @@ static int kvm_vz_get_one_reg(struct kvm_vcpu *vcpu, *v = read_gc0_segctl2(); break; case KVM_REG_MIPS_CP0_PWBASE: - if (!cpu_guest_has_htw) + if (!cpu_guest_has_htw && !cpu_guest_has_ldpte) return -EINVAL; *v = read_gc0_pwbase(); break; case KVM_REG_MIPS_CP0_PWFIELD: - if (!cpu_guest_has_htw) + if (!cpu_guest_has_htw && !cpu_guest_has_ldpte) return -EINVAL; *v = read_gc0_pwfield(); break; case KVM_REG_MIPS_CP0_PWSIZE: - if (!cpu_guest_has_htw) + if (!cpu_guest_has_htw && !cpu_guest_has_ldpte) return -EINVAL; *v = read_gc0_pwsize(); break; @@ -1896,7 +1896,7 @@ static int kvm_vz_get_one_reg(struct kvm_vcpu *vcpu, *v = (long)read_gc0_wired(); break; case KVM_REG_MIPS_CP0_PWCTL: - if (!cpu_guest_has_htw) + if (!cpu_guest_has_htw && !cpu_guest_has_ldpte) return -EINVAL; *v = read_gc0_pwctl(); break; @@ -2101,17 +2101,17 @@ static int kvm_vz_set_one_reg(struct kvm_vcpu *vcpu, write_gc0_segctl2(v); break; case KVM_REG_MIPS_CP0_PWBASE: - if (!cpu_guest_has_htw) + if (!cpu_guest_has_htw && !cpu_guest_has_ldpte) return -EINVAL; write_gc0_pwbase(v); break; case KVM_REG_MIPS_CP0_PWFIELD: - if (!cpu_guest_has_htw) + if (!cpu_guest_has_htw && !cpu_guest_has_ldpte) return -EINVAL; write_gc0_pwfield(v); break; case KVM_REG_MIPS_CP0_PWSIZE: - if (!cpu_guest_has_htw) + if (!cpu_guest_has_htw && !cpu_guest_has_ldpte) return -EINVAL; write_gc0_pwsize(v); break; @@ -2119,7 +2119,7 @@ static int kvm_vz_set_one_reg(struct kvm_vcpu *vcpu, change_gc0_wired(MIPSR6_WIRED_WIRED, v); break; case KVM_REG_MIPS_CP0_PWCTL: - if (!cpu_guest_has_htw) + if (!cpu_guest_has_htw && !cpu_guest_has_ldpte) return -EINVAL; write_gc0_pwctl(v); break; @@ -2580,7 +2580,7 @@ static int kvm_vz_vcpu_load(struct kvm_vcpu *vcpu, int cpu) } /* restore HTW registers */ - if (cpu_guest_has_htw) { + if (cpu_guest_has_htw || cpu_guest_has_ldpte) { kvm_restore_gc0_pwbase(cop0); kvm_restore_gc0_pwfield(cop0); kvm_restore_gc0_pwsize(cop0); @@ -2685,8 +2685,8 @@ static int kvm_vz_vcpu_put(struct kvm_vcpu *vcpu, int cpu) } /* save HTW registers if enabled in guest */ - if (cpu_guest_has_htw && - kvm_read_sw_gc0_config3(cop0) & MIPS_CONF3_PW) { + if (cpu_guest_has_ldpte || (cpu_guest_has_htw && + kvm_read_sw_gc0_config3(cop0) & MIPS_CONF3_PW)) { kvm_save_gc0_pwbase(cop0); kvm_save_gc0_pwfield(cop0); kvm_save_gc0_pwsize(cop0); -- GitLab From 52c07e1cbb6e16a0fe70646b4bc63f714caa0f3b Mon Sep 17 00:00:00 2001 From: Huacai Chen Date: Sat, 23 May 2020 15:56:34 +0800 Subject: [PATCH 1107/1971] KVM: MIPS: Use root tlb to control guest's CCA for Loongson-3 KVM guest has two levels of address translation: guest tlb translates GVA to GPA, and root tlb translates GPA to HPA. By default guest's CCA is controlled by guest tlb, but Loongson-3 maintains all cache coherency by hardware (including multi-core coherency and I/O DMA coherency) so it prefers all guest mappings be cacheable mappings. Thus, we use root tlb to control guest's CCA for Loongson-3. Reviewed-by: Aleksandar Markovic Signed-off-by: Huacai Chen Co-developed-by: Jiaxun Yang Message-Id: <1590220602-3547-8-git-send-email-chenhc@lemote.com> Signed-off-by: Paolo Bonzini --- arch/mips/kvm/vz.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/arch/mips/kvm/vz.c b/arch/mips/kvm/vz.c index 6e1b380e0b9cc..6e62154ed09b6 100644 --- a/arch/mips/kvm/vz.c +++ b/arch/mips/kvm/vz.c @@ -2871,6 +2871,12 @@ static int kvm_vz_hardware_enable(void) if (cpu_has_guestctl2) clear_c0_guestctl2(0x3f << 10); +#ifdef CONFIG_CPU_LOONGSON64 + /* Control guest CCA attribute */ + if (cpu_has_csr()) + csr_writel(csr_readl(0xffffffec) | 0x1, 0xffffffec); +#endif + return 0; } -- GitLab From 49bb96003fb19b77a0116d9330bdfd61aa41244b Mon Sep 17 00:00:00 2001 From: Huacai Chen Date: Sat, 23 May 2020 15:56:35 +0800 Subject: [PATCH 1108/1971] KVM: MIPS: Let indexed cacheops cause guest exit on Loongson-3 Loongson-3's indexed cache operations need a node-id in the address, but in KVM guest the node-id may be incorrect. So, let indexed cache operations cause guest exit on Loongson-3. Reviewed-by: Aleksandar Markovic Signed-off-by: Huacai Chen Co-developed-by: Jiaxun Yang Message-Id: <1590220602-3547-9-git-send-email-chenhc@lemote.com> Signed-off-by: Paolo Bonzini --- arch/mips/kvm/vz.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/arch/mips/kvm/vz.c b/arch/mips/kvm/vz.c index 6e62154ed09b6..20f69855f1649 100644 --- a/arch/mips/kvm/vz.c +++ b/arch/mips/kvm/vz.c @@ -2853,8 +2853,12 @@ static int kvm_vz_hardware_enable(void) write_c0_guestctl0(MIPS_GCTL0_CP0 | (MIPS_GCTL0_AT_GUEST << MIPS_GCTL0_AT_SHIFT) | MIPS_GCTL0_CG | MIPS_GCTL0_CF); - if (cpu_has_guestctl0ext) - set_c0_guestctl0ext(MIPS_GCTL0EXT_CGI); + if (cpu_has_guestctl0ext) { + if (current_cpu_type() != CPU_LOONGSON64) + set_c0_guestctl0ext(MIPS_GCTL0EXT_CGI); + else + clear_c0_guestctl0ext(MIPS_GCTL0EXT_CGI); + } if (cpu_has_guestid) { write_c0_guestctl1(0); -- GitLab From 3f51d8fcac7a0925fb1222d932cb3baa776b1e79 Mon Sep 17 00:00:00 2001 From: Huacai Chen Date: Sat, 23 May 2020 15:56:36 +0800 Subject: [PATCH 1109/1971] KVM: MIPS: Add more types of virtual interrupts In current implementation, MIPS KVM uses IP2, IP3, IP4 and IP7 for external interrupt, two kinds of IPIs and timer interrupt respectively, but Loongson-3 based machines prefer to use IP2, IP3, IP6 and IP7 for two kinds of external interrupts, IPI and timer interrupt. So we define two priority-irq mapping tables: kvm_loongson3_priority_to_irq[] for Loongson-3, and kvm_default_priority_to_irq[] for others. The virtual interrupt infrastructure is updated to deliver all types of interrupts from IP2, IP3, IP4, IP6 and IP7. Reviewed-by: Aleksandar Markovic Signed-off-by: Huacai Chen Co-developed-by: Jiaxun Yang Message-Id: <1590220602-3547-10-git-send-email-chenhc@lemote.com> Signed-off-by: Paolo Bonzini --- arch/mips/kvm/interrupt.c | 93 ++++++--------------------------------- arch/mips/kvm/interrupt.h | 14 +++--- arch/mips/kvm/mips.c | 40 +++++++++++++++-- arch/mips/kvm/vz.c | 53 ++++------------------ 4 files changed, 67 insertions(+), 133 deletions(-) diff --git a/arch/mips/kvm/interrupt.c b/arch/mips/kvm/interrupt.c index 7257e8b6f5a92..d28c2c9c343e7 100644 --- a/arch/mips/kvm/interrupt.c +++ b/arch/mips/kvm/interrupt.c @@ -61,27 +61,8 @@ void kvm_mips_queue_io_int_cb(struct kvm_vcpu *vcpu, * the EXC code will be set when we are actually * delivering the interrupt: */ - switch (intr) { - case 2: - kvm_set_c0_guest_cause(vcpu->arch.cop0, (C_IRQ0)); - /* Queue up an INT exception for the core */ - kvm_mips_queue_irq(vcpu, MIPS_EXC_INT_IO); - break; - - case 3: - kvm_set_c0_guest_cause(vcpu->arch.cop0, (C_IRQ1)); - kvm_mips_queue_irq(vcpu, MIPS_EXC_INT_IPI_1); - break; - - case 4: - kvm_set_c0_guest_cause(vcpu->arch.cop0, (C_IRQ2)); - kvm_mips_queue_irq(vcpu, MIPS_EXC_INT_IPI_2); - break; - - default: - break; - } - + kvm_set_c0_guest_cause(vcpu->arch.cop0, 1 << (intr + 8)); + kvm_mips_queue_irq(vcpu, kvm_irq_to_priority(intr)); } void kvm_mips_dequeue_io_int_cb(struct kvm_vcpu *vcpu, @@ -89,26 +70,8 @@ void kvm_mips_dequeue_io_int_cb(struct kvm_vcpu *vcpu, { int intr = (int)irq->irq; - switch (intr) { - case -2: - kvm_clear_c0_guest_cause(vcpu->arch.cop0, (C_IRQ0)); - kvm_mips_dequeue_irq(vcpu, MIPS_EXC_INT_IO); - break; - - case -3: - kvm_clear_c0_guest_cause(vcpu->arch.cop0, (C_IRQ1)); - kvm_mips_dequeue_irq(vcpu, MIPS_EXC_INT_IPI_1); - break; - - case -4: - kvm_clear_c0_guest_cause(vcpu->arch.cop0, (C_IRQ2)); - kvm_mips_dequeue_irq(vcpu, MIPS_EXC_INT_IPI_2); - break; - - default: - break; - } - + kvm_clear_c0_guest_cause(vcpu->arch.cop0, 1 << (-intr + 8)); + kvm_mips_dequeue_irq(vcpu, kvm_irq_to_priority(-intr)); } /* Deliver the interrupt of the corresponding priority, if possible. */ @@ -116,50 +79,20 @@ int kvm_mips_irq_deliver_cb(struct kvm_vcpu *vcpu, unsigned int priority, u32 cause) { int allowed = 0; - u32 exccode; + u32 exccode, ie; struct kvm_vcpu_arch *arch = &vcpu->arch; struct mips_coproc *cop0 = vcpu->arch.cop0; - switch (priority) { - case MIPS_EXC_INT_TIMER: - if ((kvm_read_c0_guest_status(cop0) & ST0_IE) - && (!(kvm_read_c0_guest_status(cop0) & (ST0_EXL | ST0_ERL))) - && (kvm_read_c0_guest_status(cop0) & IE_IRQ5)) { - allowed = 1; - exccode = EXCCODE_INT; - } - break; - - case MIPS_EXC_INT_IO: - if ((kvm_read_c0_guest_status(cop0) & ST0_IE) - && (!(kvm_read_c0_guest_status(cop0) & (ST0_EXL | ST0_ERL))) - && (kvm_read_c0_guest_status(cop0) & IE_IRQ0)) { - allowed = 1; - exccode = EXCCODE_INT; - } - break; - - case MIPS_EXC_INT_IPI_1: - if ((kvm_read_c0_guest_status(cop0) & ST0_IE) - && (!(kvm_read_c0_guest_status(cop0) & (ST0_EXL | ST0_ERL))) - && (kvm_read_c0_guest_status(cop0) & IE_IRQ1)) { - allowed = 1; - exccode = EXCCODE_INT; - } - break; - - case MIPS_EXC_INT_IPI_2: - if ((kvm_read_c0_guest_status(cop0) & ST0_IE) - && (!(kvm_read_c0_guest_status(cop0) & (ST0_EXL | ST0_ERL))) - && (kvm_read_c0_guest_status(cop0) & IE_IRQ2)) { - allowed = 1; - exccode = EXCCODE_INT; - } - break; + if (priority == MIPS_EXC_MAX) + return 0; - default: - break; + ie = 1 << (kvm_priority_to_irq[priority] + 8); + if ((kvm_read_c0_guest_status(cop0) & ST0_IE) + && (!(kvm_read_c0_guest_status(cop0) & (ST0_EXL | ST0_ERL))) + && (kvm_read_c0_guest_status(cop0) & ie)) { + allowed = 1; + exccode = EXCCODE_INT; } /* Are we allowed to deliver the interrupt ??? */ diff --git a/arch/mips/kvm/interrupt.h b/arch/mips/kvm/interrupt.h index 3bf0a49725e81..c3e878ca3e07f 100644 --- a/arch/mips/kvm/interrupt.h +++ b/arch/mips/kvm/interrupt.h @@ -21,11 +21,12 @@ #define MIPS_EXC_NMI 5 #define MIPS_EXC_MCHK 6 #define MIPS_EXC_INT_TIMER 7 -#define MIPS_EXC_INT_IO 8 -#define MIPS_EXC_EXECUTE 9 -#define MIPS_EXC_INT_IPI_1 10 -#define MIPS_EXC_INT_IPI_2 11 -#define MIPS_EXC_MAX 12 +#define MIPS_EXC_INT_IO_1 8 +#define MIPS_EXC_INT_IO_2 9 +#define MIPS_EXC_EXECUTE 10 +#define MIPS_EXC_INT_IPI_1 11 +#define MIPS_EXC_INT_IPI_2 12 +#define MIPS_EXC_MAX 13 /* XXXSL More to follow */ #define C_TI (_ULCAST_(1) << 30) @@ -38,6 +39,9 @@ #define KVM_MIPS_IRQ_CLEAR_ALL_AT_ONCE (0) #endif +extern u32 *kvm_priority_to_irq; +u32 kvm_irq_to_priority(u32 irq); + void kvm_mips_queue_irq(struct kvm_vcpu *vcpu, unsigned int priority); void kvm_mips_dequeue_irq(struct kvm_vcpu *vcpu, unsigned int priority); int kvm_mips_pending_timer(struct kvm_vcpu *vcpu); diff --git a/arch/mips/kvm/mips.c b/arch/mips/kvm/mips.c index 3b0148c99c0df..6b435c6e5018a 100644 --- a/arch/mips/kvm/mips.c +++ b/arch/mips/kvm/mips.c @@ -490,7 +490,10 @@ int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, int intr = (int)irq->irq; struct kvm_vcpu *dvcpu = NULL; - if (intr == 3 || intr == -3 || intr == 4 || intr == -4) + if (intr == kvm_priority_to_irq[MIPS_EXC_INT_IPI_1] || + intr == kvm_priority_to_irq[MIPS_EXC_INT_IPI_2] || + intr == (-kvm_priority_to_irq[MIPS_EXC_INT_IPI_1]) || + intr == (-kvm_priority_to_irq[MIPS_EXC_INT_IPI_2])) kvm_debug("%s: CPU: %d, INTR: %d\n", __func__, irq->cpu, (int)intr); @@ -499,10 +502,10 @@ int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, else dvcpu = vcpu->kvm->vcpus[irq->cpu]; - if (intr == 2 || intr == 3 || intr == 4) { + if (intr == 2 || intr == 3 || intr == 4 || intr == 6) { kvm_mips_callbacks->queue_io_int(dvcpu, irq); - } else if (intr == -2 || intr == -3 || intr == -4) { + } else if (intr == -2 || intr == -3 || intr == -4 || intr == -6) { kvm_mips_callbacks->dequeue_io_int(dvcpu, irq); } else { kvm_err("%s: invalid interrupt ioctl (%d:%d)\n", __func__, @@ -1620,6 +1623,34 @@ static struct notifier_block kvm_mips_csr_die_notifier = { .notifier_call = kvm_mips_csr_die_notify, }; +static u32 kvm_default_priority_to_irq[MIPS_EXC_MAX] = { + [MIPS_EXC_INT_TIMER] = C_IRQ5, + [MIPS_EXC_INT_IO_1] = C_IRQ0, + [MIPS_EXC_INT_IPI_1] = C_IRQ1, + [MIPS_EXC_INT_IPI_2] = C_IRQ2, +}; + +static u32 kvm_loongson3_priority_to_irq[MIPS_EXC_MAX] = { + [MIPS_EXC_INT_TIMER] = C_IRQ5, + [MIPS_EXC_INT_IO_1] = C_IRQ0, + [MIPS_EXC_INT_IO_2] = C_IRQ1, + [MIPS_EXC_INT_IPI_1] = C_IRQ4, +}; + +u32 *kvm_priority_to_irq = kvm_default_priority_to_irq; + +u32 kvm_irq_to_priority(u32 irq) +{ + int i; + + for (i = MIPS_EXC_INT_TIMER; i < MIPS_EXC_MAX; i++) { + if (kvm_priority_to_irq[i] == (1 << (irq + 8))) + return i; + } + + return MIPS_EXC_MAX; +} + static int __init kvm_mips_init(void) { int ret; @@ -1638,6 +1669,9 @@ static int __init kvm_mips_init(void) if (ret) return ret; + if (boot_cpu_type() == CPU_LOONGSON64) + kvm_priority_to_irq = kvm_loongson3_priority_to_irq; + register_die_notifier(&kvm_mips_csr_die_notifier); return 0; diff --git a/arch/mips/kvm/vz.c b/arch/mips/kvm/vz.c index 20f69855f1649..45be0b7bc0904 100644 --- a/arch/mips/kvm/vz.c +++ b/arch/mips/kvm/vz.c @@ -225,23 +225,7 @@ static void kvm_vz_queue_io_int_cb(struct kvm_vcpu *vcpu, * interrupts are asynchronous to vcpu execution therefore defer guest * cp0 accesses */ - switch (intr) { - case 2: - kvm_vz_queue_irq(vcpu, MIPS_EXC_INT_IO); - break; - - case 3: - kvm_vz_queue_irq(vcpu, MIPS_EXC_INT_IPI_1); - break; - - case 4: - kvm_vz_queue_irq(vcpu, MIPS_EXC_INT_IPI_2); - break; - - default: - break; - } - + kvm_vz_queue_irq(vcpu, kvm_irq_to_priority(intr)); } static void kvm_vz_dequeue_io_int_cb(struct kvm_vcpu *vcpu, @@ -253,44 +237,22 @@ static void kvm_vz_dequeue_io_int_cb(struct kvm_vcpu *vcpu, * interrupts are asynchronous to vcpu execution therefore defer guest * cp0 accesses */ - switch (intr) { - case -2: - kvm_vz_dequeue_irq(vcpu, MIPS_EXC_INT_IO); - break; - - case -3: - kvm_vz_dequeue_irq(vcpu, MIPS_EXC_INT_IPI_1); - break; - - case -4: - kvm_vz_dequeue_irq(vcpu, MIPS_EXC_INT_IPI_2); - break; - - default: - break; - } - + kvm_vz_dequeue_irq(vcpu, kvm_irq_to_priority(-intr)); } -static u32 kvm_vz_priority_to_irq[MIPS_EXC_MAX] = { - [MIPS_EXC_INT_TIMER] = C_IRQ5, - [MIPS_EXC_INT_IO] = C_IRQ0, - [MIPS_EXC_INT_IPI_1] = C_IRQ1, - [MIPS_EXC_INT_IPI_2] = C_IRQ2, -}; - static int kvm_vz_irq_deliver_cb(struct kvm_vcpu *vcpu, unsigned int priority, u32 cause) { u32 irq = (priority < MIPS_EXC_MAX) ? - kvm_vz_priority_to_irq[priority] : 0; + kvm_priority_to_irq[priority] : 0; switch (priority) { case MIPS_EXC_INT_TIMER: set_gc0_cause(C_TI); break; - case MIPS_EXC_INT_IO: + case MIPS_EXC_INT_IO_1: + case MIPS_EXC_INT_IO_2: case MIPS_EXC_INT_IPI_1: case MIPS_EXC_INT_IPI_2: if (cpu_has_guestctl2) @@ -311,7 +273,7 @@ static int kvm_vz_irq_clear_cb(struct kvm_vcpu *vcpu, unsigned int priority, u32 cause) { u32 irq = (priority < MIPS_EXC_MAX) ? - kvm_vz_priority_to_irq[priority] : 0; + kvm_priority_to_irq[priority] : 0; switch (priority) { case MIPS_EXC_INT_TIMER: @@ -329,7 +291,8 @@ static int kvm_vz_irq_clear_cb(struct kvm_vcpu *vcpu, unsigned int priority, } break; - case MIPS_EXC_INT_IO: + case MIPS_EXC_INT_IO_1: + case MIPS_EXC_INT_IO_2: case MIPS_EXC_INT_IPI_1: case MIPS_EXC_INT_IPI_2: /* Clear GuestCtl2.VIP irq if not using Hardware Clear */ -- GitLab From f21db3090de2c056728dee76d5fb66343aaf6dd1 Mon Sep 17 00:00:00 2001 From: Huacai Chen Date: Sat, 23 May 2020 15:56:37 +0800 Subject: [PATCH 1110/1971] KVM: MIPS: Add Loongson-3 Virtual IPI interrupt support This patch add Loongson-3 Virtual IPI interrupt support in the kernel. The current implementation of IPI emulation in QEMU is based on GIC for MIPS, but Loongson-3 doesn't use GIC. Furthermore, IPI emulation in QEMU is too expensive for performance (because of too many context switches between Host and Guest). With current solution, the IPI delay may even cause RCU stall warnings in a multi-core Guest. So, we design a faster solution that emulate IPI interrupt in kernel (only used by Loongson-3 now). Reviewed-by: Aleksandar Markovic Signed-off-by: Huacai Chen Co-developed-by: Jiaxun Yang Message-Id: <1590220602-3547-11-git-send-email-chenhc@lemote.com> Signed-off-by: Paolo Bonzini --- arch/mips/include/asm/kvm_host.h | 32 +++++ arch/mips/kvm/Makefile | 3 + arch/mips/kvm/emulate.c | 23 +++- arch/mips/kvm/loongson_ipi.c | 214 +++++++++++++++++++++++++++++++ arch/mips/kvm/mips.c | 6 + 5 files changed, 277 insertions(+), 1 deletion(-) create mode 100644 arch/mips/kvm/loongson_ipi.c diff --git a/arch/mips/include/asm/kvm_host.h b/arch/mips/include/asm/kvm_host.h index bee961997de35..c4e6b1e954f5f 100644 --- a/arch/mips/include/asm/kvm_host.h +++ b/arch/mips/include/asm/kvm_host.h @@ -23,6 +23,8 @@ #include #include +#include + /* MIPS KVM register ids */ #define MIPS_CP0_32(_R, _S) \ (KVM_REG_MIPS_CP0 | KVM_REG_SIZE_U32 | (8 * (_R) + (_S))) @@ -183,11 +185,39 @@ struct kvm_vcpu_stat { struct kvm_arch_memory_slot { }; +#ifdef CONFIG_CPU_LOONGSON64 +struct ipi_state { + uint32_t status; + uint32_t en; + uint32_t set; + uint32_t clear; + uint64_t buf[4]; +}; + +struct loongson_kvm_ipi; + +struct ipi_io_device { + int node_id; + struct loongson_kvm_ipi *ipi; + struct kvm_io_device device; +}; + +struct loongson_kvm_ipi { + spinlock_t lock; + struct kvm *kvm; + struct ipi_state ipistate[16]; + struct ipi_io_device dev_ipi[4]; +}; +#endif + struct kvm_arch { /* Guest physical mm */ struct mm_struct gpa_mm; /* Mask of CPUs needing GPA ASID flush */ cpumask_t asid_flush_mask; +#ifdef CONFIG_CPU_LOONGSON64 + struct loongson_kvm_ipi ipi; +#endif }; #define N_MIPS_COPROC_REGS 32 @@ -1135,6 +1165,8 @@ extern int kvm_mips_trans_mtc0(union mips_instruction inst, u32 *opc, /* Misc */ extern void kvm_mips_dump_stats(struct kvm_vcpu *vcpu); extern unsigned long kvm_mips_get_ramsize(struct kvm *kvm); +extern int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, + struct kvm_mips_interrupt *irq); static inline void kvm_arch_hardware_unsetup(void) {} static inline void kvm_arch_sync_events(struct kvm *kvm) {} diff --git a/arch/mips/kvm/Makefile b/arch/mips/kvm/Makefile index 0a3cef6153fd1..506c4ac0ba1c7 100644 --- a/arch/mips/kvm/Makefile +++ b/arch/mips/kvm/Makefile @@ -13,6 +13,9 @@ kvm-objs := $(common-objs-y) mips.o emulate.o entry.o \ fpu.o kvm-objs += hypcall.o kvm-objs += mmu.o +ifdef CONFIG_CPU_LOONGSON64 +kvm-objs += loongson_ipi.o +endif ifdef CONFIG_KVM_MIPS_VZ kvm-objs += vz.o diff --git a/arch/mips/kvm/emulate.c b/arch/mips/kvm/emulate.c index 7ccf9b096783f..4fa93d6a2ec52 100644 --- a/arch/mips/kvm/emulate.c +++ b/arch/mips/kvm/emulate.c @@ -1600,6 +1600,7 @@ enum emulation_result kvm_mips_emulate_store(union mips_instruction inst, struct kvm_run *run, struct kvm_vcpu *vcpu) { + int r; enum emulation_result er; u32 rt; void *data = run->mmio.data; @@ -1666,9 +1667,18 @@ enum emulation_result kvm_mips_emulate_store(union mips_instruction inst, goto out_fail; } - run->mmio.is_write = 1; vcpu->mmio_needed = 1; + run->mmio.is_write = 1; vcpu->mmio_is_write = 1; + + r = kvm_io_bus_write(vcpu, KVM_MMIO_BUS, + run->mmio.phys_addr, run->mmio.len, data); + + if (!r) { + vcpu->mmio_needed = 0; + return EMULATE_DONE; + } + return EMULATE_DO_MMIO; out_fail: @@ -1681,6 +1691,7 @@ enum emulation_result kvm_mips_emulate_load(union mips_instruction inst, u32 cause, struct kvm_run *run, struct kvm_vcpu *vcpu) { + int r; enum emulation_result er; unsigned long curr_pc; u32 op, rt; @@ -1745,6 +1756,16 @@ enum emulation_result kvm_mips_emulate_load(union mips_instruction inst, run->mmio.is_write = 0; vcpu->mmio_is_write = 0; + + r = kvm_io_bus_read(vcpu, KVM_MMIO_BUS, + run->mmio.phys_addr, run->mmio.len, run->mmio.data); + + if (!r) { + kvm_mips_complete_mmio_load(vcpu, run); + vcpu->mmio_needed = 0; + return EMULATE_DONE; + } + return EMULATE_DO_MMIO; } diff --git a/arch/mips/kvm/loongson_ipi.c b/arch/mips/kvm/loongson_ipi.c new file mode 100644 index 0000000000000..3681fc8fba383 --- /dev/null +++ b/arch/mips/kvm/loongson_ipi.c @@ -0,0 +1,214 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Loongson-3 Virtual IPI interrupt support. + * + * Copyright (C) 2019 Loongson Technologies, Inc. All rights reserved. + * + * Authors: Chen Zhu + * Authors: Huacai Chen + */ + +#include + +#define IPI_BASE 0x3ff01000ULL + +#define CORE0_STATUS_OFF 0x000 +#define CORE0_EN_OFF 0x004 +#define CORE0_SET_OFF 0x008 +#define CORE0_CLEAR_OFF 0x00c +#define CORE0_BUF_20 0x020 +#define CORE0_BUF_28 0x028 +#define CORE0_BUF_30 0x030 +#define CORE0_BUF_38 0x038 + +#define CORE1_STATUS_OFF 0x100 +#define CORE1_EN_OFF 0x104 +#define CORE1_SET_OFF 0x108 +#define CORE1_CLEAR_OFF 0x10c +#define CORE1_BUF_20 0x120 +#define CORE1_BUF_28 0x128 +#define CORE1_BUF_30 0x130 +#define CORE1_BUF_38 0x138 + +#define CORE2_STATUS_OFF 0x200 +#define CORE2_EN_OFF 0x204 +#define CORE2_SET_OFF 0x208 +#define CORE2_CLEAR_OFF 0x20c +#define CORE2_BUF_20 0x220 +#define CORE2_BUF_28 0x228 +#define CORE2_BUF_30 0x230 +#define CORE2_BUF_38 0x238 + +#define CORE3_STATUS_OFF 0x300 +#define CORE3_EN_OFF 0x304 +#define CORE3_SET_OFF 0x308 +#define CORE3_CLEAR_OFF 0x30c +#define CORE3_BUF_20 0x320 +#define CORE3_BUF_28 0x328 +#define CORE3_BUF_30 0x330 +#define CORE3_BUF_38 0x338 + +static int loongson_vipi_read(struct loongson_kvm_ipi *ipi, + gpa_t addr, int len, void *val) +{ + uint32_t core = (addr >> 8) & 3; + uint32_t node = (addr >> 44) & 3; + uint32_t id = core + node * 4; + uint64_t offset = addr & 0xff; + void *pbuf; + struct ipi_state *s = &(ipi->ipistate[id]); + + BUG_ON(offset & (len - 1)); + + switch (offset) { + case CORE0_STATUS_OFF: + *(uint64_t *)val = s->status; + break; + + case CORE0_EN_OFF: + *(uint64_t *)val = s->en; + break; + + case CORE0_SET_OFF: + *(uint64_t *)val = 0; + break; + + case CORE0_CLEAR_OFF: + *(uint64_t *)val = 0; + break; + + case CORE0_BUF_20 ... CORE0_BUF_38: + pbuf = (void *)s->buf + (offset - 0x20); + if (len == 8) + *(uint64_t *)val = *(uint64_t *)pbuf; + else /* Assume len == 4 */ + *(uint32_t *)val = *(uint32_t *)pbuf; + break; + + default: + pr_notice("%s with unknown addr %llx\n", __func__, addr); + break; + } + + return 0; +} + +static int loongson_vipi_write(struct loongson_kvm_ipi *ipi, + gpa_t addr, int len, const void *val) +{ + uint32_t core = (addr >> 8) & 3; + uint32_t node = (addr >> 44) & 3; + uint32_t id = core + node * 4; + uint64_t data, offset = addr & 0xff; + void *pbuf; + struct kvm *kvm = ipi->kvm; + struct kvm_mips_interrupt irq; + struct ipi_state *s = &(ipi->ipistate[id]); + + data = *(uint64_t *)val; + BUG_ON(offset & (len - 1)); + + switch (offset) { + case CORE0_STATUS_OFF: + break; + + case CORE0_EN_OFF: + s->en = data; + break; + + case CORE0_SET_OFF: + s->status |= data; + irq.cpu = id; + irq.irq = 6; + kvm_vcpu_ioctl_interrupt(kvm->vcpus[id], &irq); + break; + + case CORE0_CLEAR_OFF: + s->status &= ~data; + if (!s->status) { + irq.cpu = id; + irq.irq = -6; + kvm_vcpu_ioctl_interrupt(kvm->vcpus[id], &irq); + } + break; + + case CORE0_BUF_20 ... CORE0_BUF_38: + pbuf = (void *)s->buf + (offset - 0x20); + if (len == 8) + *(uint64_t *)pbuf = (uint64_t)data; + else /* Assume len == 4 */ + *(uint32_t *)pbuf = (uint32_t)data; + break; + + default: + pr_notice("%s with unknown addr %llx\n", __func__, addr); + break; + } + + return 0; +} + +static int kvm_ipi_read(struct kvm_vcpu *vcpu, struct kvm_io_device *dev, + gpa_t addr, int len, void *val) +{ + unsigned long flags; + struct loongson_kvm_ipi *ipi; + struct ipi_io_device *ipi_device; + + ipi_device = container_of(dev, struct ipi_io_device, device); + ipi = ipi_device->ipi; + + spin_lock_irqsave(&ipi->lock, flags); + loongson_vipi_read(ipi, addr, len, val); + spin_unlock_irqrestore(&ipi->lock, flags); + + return 0; +} + +static int kvm_ipi_write(struct kvm_vcpu *vcpu, struct kvm_io_device *dev, + gpa_t addr, int len, const void *val) +{ + unsigned long flags; + struct loongson_kvm_ipi *ipi; + struct ipi_io_device *ipi_device; + + ipi_device = container_of(dev, struct ipi_io_device, device); + ipi = ipi_device->ipi; + + spin_lock_irqsave(&ipi->lock, flags); + loongson_vipi_write(ipi, addr, len, val); + spin_unlock_irqrestore(&ipi->lock, flags); + + return 0; +} + +static const struct kvm_io_device_ops kvm_ipi_ops = { + .read = kvm_ipi_read, + .write = kvm_ipi_write, +}; + +void kvm_init_loongson_ipi(struct kvm *kvm) +{ + int i; + unsigned long addr; + struct loongson_kvm_ipi *s; + struct kvm_io_device *device; + + s = &kvm->arch.ipi; + s->kvm = kvm; + spin_lock_init(&s->lock); + + /* + * Initialize IPI device + */ + for (i = 0; i < 4; i++) { + device = &s->dev_ipi[i].device; + kvm_iodevice_init(device, &kvm_ipi_ops); + addr = (((unsigned long)i) << 44) + IPI_BASE; + mutex_lock(&kvm->slots_lock); + kvm_io_bus_register_dev(kvm, KVM_MMIO_BUS, addr, 0x400, device); + mutex_unlock(&kvm->slots_lock); + s->dev_ipi[i].ipi = s; + s->dev_ipi[i].node_id = i; + } +} diff --git a/arch/mips/kvm/mips.c b/arch/mips/kvm/mips.c index 6b435c6e5018a..e4d42e03ec37d 100644 --- a/arch/mips/kvm/mips.c +++ b/arch/mips/kvm/mips.c @@ -129,6 +129,8 @@ int kvm_arch_check_processor_compat(void *opaque) return 0; } +extern void kvm_init_loongson_ipi(struct kvm *kvm); + int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) { switch (type) { @@ -148,6 +150,10 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) if (!kvm->arch.gpa_mm.pgd) return -ENOMEM; +#ifdef CONFIG_CPU_LOONGSON64 + kvm_init_loongson_ipi(kvm); +#endif + return 0; } -- GitLab From 7f2a83f1c2a941ebfee53f504ed5fdbc61cfb333 Mon Sep 17 00:00:00 2001 From: Huacai Chen Date: Sat, 23 May 2020 15:56:38 +0800 Subject: [PATCH 1111/1971] KVM: MIPS: Add CPUCFG emulation for Loongson-3 Loongson-3 overrides lwc2 instructions to implement CPUCFG and CSR read/write functions. These instructions all cause guest exit so CSR doesn't benifit KVM guest (and there are always legacy methods to provide the same functions as CSR). So, we only emulate CPUCFG and let it return a reduced feature list (which means the virtual CPU doesn't have any other advanced features, including CSR) in KVM. Reviewed-by: Aleksandar Markovic Signed-off-by: Huacai Chen Co-developed-by: Jiaxun Yang Message-Id: <1590220602-3547-12-git-send-email-chenhc@lemote.com> Signed-off-by: Paolo Bonzini --- arch/mips/include/asm/kvm_host.h | 3 ++ arch/mips/include/uapi/asm/inst.h | 11 +++++ arch/mips/kvm/mips.c | 1 + arch/mips/kvm/vz.c | 77 +++++++++++++++++++++++++++++++ 4 files changed, 92 insertions(+) diff --git a/arch/mips/include/asm/kvm_host.h b/arch/mips/include/asm/kvm_host.h index c4e6b1e954f5f..9f3bfc8b363de 100644 --- a/arch/mips/include/asm/kvm_host.h +++ b/arch/mips/include/asm/kvm_host.h @@ -173,6 +173,9 @@ struct kvm_vcpu_stat { u64 vz_ghfc_exits; u64 vz_gpa_exits; u64 vz_resvd_exits; +#ifdef CONFIG_CPU_LOONGSON64 + u64 vz_cpucfg_exits; +#endif #endif u64 halt_successful_poll; u64 halt_attempted_poll; diff --git a/arch/mips/include/uapi/asm/inst.h b/arch/mips/include/uapi/asm/inst.h index 98f97c85e0594..43d1faa02933c 100644 --- a/arch/mips/include/uapi/asm/inst.h +++ b/arch/mips/include/uapi/asm/inst.h @@ -1012,6 +1012,16 @@ struct loongson3_lsdc2_format { /* Loongson-3 overridden ldc2/sdc2 Load/Store fo ;)))))) }; +struct loongson3_lscsr_format { /* Loongson-3 CPUCFG&CSR read/write format */ + __BITFIELD_FIELD(unsigned int opcode : 6, + __BITFIELD_FIELD(unsigned int rs : 5, + __BITFIELD_FIELD(unsigned int fr : 5, + __BITFIELD_FIELD(unsigned int rd : 5, + __BITFIELD_FIELD(unsigned int fd : 5, + __BITFIELD_FIELD(unsigned int func : 6, + ;)))))) +}; + /* * MIPS16e instruction formats (16-bit length) */ @@ -1114,6 +1124,7 @@ union mips_instruction { struct mm16_r5_format mm16_r5_format; struct loongson3_lswc2_format loongson3_lswc2_format; struct loongson3_lsdc2_format loongson3_lsdc2_format; + struct loongson3_lscsr_format loongson3_lscsr_format; }; union mips16e_instruction { diff --git a/arch/mips/kvm/mips.c b/arch/mips/kvm/mips.c index e4d42e03ec37d..f62667bb54f36 100644 --- a/arch/mips/kvm/mips.c +++ b/arch/mips/kvm/mips.c @@ -67,6 +67,7 @@ struct kvm_stats_debugfs_item debugfs_entries[] = { VCPU_STAT("vz_ghfc", vz_ghfc_exits), VCPU_STAT("vz_gpa", vz_gpa_exits), VCPU_STAT("vz_resvd", vz_resvd_exits), + VCPU_STAT("vz_cpucfg", vz_cpucfg_exits), #endif VCPU_STAT("halt_successful_poll", halt_successful_poll), VCPU_STAT("halt_attempted_poll", halt_attempted_poll), diff --git a/arch/mips/kvm/vz.c b/arch/mips/kvm/vz.c index 45be0b7bc0904..c09e8f13b2564 100644 --- a/arch/mips/kvm/vz.c +++ b/arch/mips/kvm/vz.c @@ -29,6 +29,7 @@ #include #include "interrupt.h" +#include "loongson_regs.h" #include "trace.h" @@ -1092,6 +1093,77 @@ static enum emulation_result kvm_vz_gpsi_cache(union mips_instruction inst, return EMULATE_FAIL; } +#ifdef CONFIG_CPU_LOONGSON64 +static enum emulation_result kvm_vz_gpsi_lwc2(union mips_instruction inst, + u32 *opc, u32 cause, + struct kvm_run *run, + struct kvm_vcpu *vcpu) +{ + unsigned int rs, rd; + unsigned int hostcfg; + unsigned long curr_pc; + enum emulation_result er = EMULATE_DONE; + + /* + * Update PC and hold onto current PC in case there is + * an error and we want to rollback the PC + */ + curr_pc = vcpu->arch.pc; + er = update_pc(vcpu, cause); + if (er == EMULATE_FAIL) + return er; + + rs = inst.loongson3_lscsr_format.rs; + rd = inst.loongson3_lscsr_format.rd; + switch (inst.loongson3_lscsr_format.fr) { + case 0x8: /* Read CPUCFG */ + ++vcpu->stat.vz_cpucfg_exits; + hostcfg = read_cpucfg(vcpu->arch.gprs[rs]); + + switch (vcpu->arch.gprs[rs]) { + case LOONGSON_CFG0: + vcpu->arch.gprs[rd] = 0x14c000; + break; + case LOONGSON_CFG1: + hostcfg &= (LOONGSON_CFG1_FP | LOONGSON_CFG1_MMI | + LOONGSON_CFG1_MSA1 | LOONGSON_CFG1_MSA2 | + LOONGSON_CFG1_SFBP); + vcpu->arch.gprs[rd] = hostcfg; + break; + case LOONGSON_CFG2: + hostcfg &= (LOONGSON_CFG2_LEXT1 | LOONGSON_CFG2_LEXT2 | + LOONGSON_CFG2_LEXT3 | LOONGSON_CFG2_LSPW); + vcpu->arch.gprs[rd] = hostcfg; + break; + case LOONGSON_CFG3: + vcpu->arch.gprs[rd] = hostcfg; + break; + default: + /* Don't export any other advanced features to guest */ + vcpu->arch.gprs[rd] = 0; + break; + } + break; + + default: + kvm_err("lwc2 emulate not impl %d rs %lx @%lx\n", + inst.loongson3_lscsr_format.fr, vcpu->arch.gprs[rs], curr_pc); + er = EMULATE_FAIL; + break; + } + + /* Rollback PC only if emulation was unsuccessful */ + if (er == EMULATE_FAIL) { + kvm_err("[%#lx]%s: unsupported lwc2 instruction 0x%08x 0x%08x\n", + curr_pc, __func__, inst.word, inst.loongson3_lscsr_format.fr); + + vcpu->arch.pc = curr_pc; + } + + return er; +} +#endif + static enum emulation_result kvm_trap_vz_handle_gpsi(u32 cause, u32 *opc, struct kvm_vcpu *vcpu) { @@ -1120,6 +1192,11 @@ static enum emulation_result kvm_trap_vz_handle_gpsi(u32 cause, u32 *opc, trace_kvm_exit(vcpu, KVM_TRACE_EXIT_CACHE); er = kvm_vz_gpsi_cache(inst, opc, cause, run, vcpu); break; +#endif +#ifdef CONFIG_CPU_LOONGSON64 + case lwc2_op: + er = kvm_vz_gpsi_lwc2(inst, opc, cause, run, vcpu); + break; #endif case spec3_op: switch (inst.spec3_format.func) { -- GitLab From 8a5097ee90c25656db23f44520a9dad7983d88fb Mon Sep 17 00:00:00 2001 From: Huacai Chen Date: Sat, 23 May 2020 15:56:39 +0800 Subject: [PATCH 1112/1971] KVM: MIPS: Add CONFIG6 and DIAG registers emulation Loongson-3 has CONFIG6 and DIAG registers which need to be emulated. CONFIG6 is mostly used to enable/disable FTLB and SFB, while DIAG is mostly used to flush BTB, ITLB, DTLB, VTLB and FTLB. Acked-by: Thomas Bogendoerfer Reviewed-by: Aleksandar Markovic Signed-off-by: Huacai Chen Co-developed-by: Jiaxun Yang Message-Id: <1590220602-3547-13-git-send-email-chenhc@lemote.com> Signed-off-by: Paolo Bonzini --- arch/mips/include/asm/kvm_host.h | 7 ++++ arch/mips/include/asm/mipsregs.h | 4 +++ arch/mips/kvm/tlb.c | 41 +++++++++++++++++++++ arch/mips/kvm/vz.c | 62 +++++++++++++++++++++++++++++++- 4 files changed, 113 insertions(+), 1 deletion(-) diff --git a/arch/mips/include/asm/kvm_host.h b/arch/mips/include/asm/kvm_host.h index 9f3bfc8b363de..363e7a89d1738 100644 --- a/arch/mips/include/asm/kvm_host.h +++ b/arch/mips/include/asm/kvm_host.h @@ -68,9 +68,11 @@ #define KVM_REG_MIPS_CP0_CONFIG3 MIPS_CP0_32(16, 3) #define KVM_REG_MIPS_CP0_CONFIG4 MIPS_CP0_32(16, 4) #define KVM_REG_MIPS_CP0_CONFIG5 MIPS_CP0_32(16, 5) +#define KVM_REG_MIPS_CP0_CONFIG6 MIPS_CP0_32(16, 6) #define KVM_REG_MIPS_CP0_CONFIG7 MIPS_CP0_32(16, 7) #define KVM_REG_MIPS_CP0_MAARI MIPS_CP0_64(17, 2) #define KVM_REG_MIPS_CP0_XCONTEXT MIPS_CP0_64(20, 0) +#define KVM_REG_MIPS_CP0_DIAG MIPS_CP0_32(22, 0) #define KVM_REG_MIPS_CP0_ERROREPC MIPS_CP0_64(30, 0) #define KVM_REG_MIPS_CP0_KSCRATCH1 MIPS_CP0_64(31, 2) #define KVM_REG_MIPS_CP0_KSCRATCH2 MIPS_CP0_64(31, 3) @@ -258,6 +260,7 @@ struct mips_coproc { #define MIPS_CP0_WATCH_LO 18 #define MIPS_CP0_WATCH_HI 19 #define MIPS_CP0_TLB_XCONTEXT 20 +#define MIPS_CP0_DIAG 22 #define MIPS_CP0_ECC 26 #define MIPS_CP0_CACHE_ERR 27 #define MIPS_CP0_TAG_LO 28 @@ -929,6 +932,10 @@ void kvm_vz_save_guesttlb(struct kvm_mips_tlb *buf, unsigned int index, unsigned int count); void kvm_vz_load_guesttlb(const struct kvm_mips_tlb *buf, unsigned int index, unsigned int count); +#ifdef CONFIG_CPU_LOONGSON64 +void kvm_loongson_clear_guest_vtlb(void); +void kvm_loongson_clear_guest_ftlb(void); +#endif #endif void kvm_mips_suspend_mm(int cpu); diff --git a/arch/mips/include/asm/mipsregs.h b/arch/mips/include/asm/mipsregs.h index 796dbb86575ba..20d6d40c59a42 100644 --- a/arch/mips/include/asm/mipsregs.h +++ b/arch/mips/include/asm/mipsregs.h @@ -1038,6 +1038,8 @@ /* Disable Branch Return Cache */ #define R10K_DIAG_D_BRC (_ULCAST_(1) << 22) +/* Flush BTB */ +#define LOONGSON_DIAG_BTB (_ULCAST_(1) << 1) /* Flush ITLB */ #define LOONGSON_DIAG_ITLB (_ULCAST_(1) << 2) /* Flush DTLB */ @@ -2874,7 +2876,9 @@ __BUILD_SET_C0(status) __BUILD_SET_C0(cause) __BUILD_SET_C0(config) __BUILD_SET_C0(config5) +__BUILD_SET_C0(config6) __BUILD_SET_C0(config7) +__BUILD_SET_C0(diag) __BUILD_SET_C0(intcontrol) __BUILD_SET_C0(intctl) __BUILD_SET_C0(srsmap) diff --git a/arch/mips/kvm/tlb.c b/arch/mips/kvm/tlb.c index 5d436c5216cc4..2ecf4f05a9809 100644 --- a/arch/mips/kvm/tlb.c +++ b/arch/mips/kvm/tlb.c @@ -20,6 +20,7 @@ #include #include +#include #include #include #include @@ -622,6 +623,46 @@ void kvm_vz_load_guesttlb(const struct kvm_mips_tlb *buf, unsigned int index, } EXPORT_SYMBOL_GPL(kvm_vz_load_guesttlb); +#ifdef CONFIG_CPU_LOONGSON64 +void kvm_loongson_clear_guest_vtlb(void) +{ + int idx = read_gc0_index(); + + /* Set root GuestID for root probe and write of guest TLB entry */ + set_root_gid_to_guest_gid(); + + write_gc0_index(0); + guest_tlbinvf(); + write_gc0_index(idx); + + clear_root_gid(); + set_c0_diag(LOONGSON_DIAG_ITLB | LOONGSON_DIAG_DTLB); +} +EXPORT_SYMBOL_GPL(kvm_loongson_clear_guest_vtlb); + +void kvm_loongson_clear_guest_ftlb(void) +{ + int i; + int idx = read_gc0_index(); + + /* Set root GuestID for root probe and write of guest TLB entry */ + set_root_gid_to_guest_gid(); + + for (i = current_cpu_data.tlbsizevtlb; + i < (current_cpu_data.tlbsizevtlb + + current_cpu_data.tlbsizeftlbsets); + i++) { + write_gc0_index(i); + guest_tlbinvf(); + } + write_gc0_index(idx); + + clear_root_gid(); + set_c0_diag(LOONGSON_DIAG_ITLB | LOONGSON_DIAG_DTLB); +} +EXPORT_SYMBOL_GPL(kvm_loongson_clear_guest_ftlb); +#endif + #endif /** diff --git a/arch/mips/kvm/vz.c b/arch/mips/kvm/vz.c index c09e8f13b2564..2810d1c8b67f8 100644 --- a/arch/mips/kvm/vz.c +++ b/arch/mips/kvm/vz.c @@ -127,6 +127,11 @@ static inline unsigned int kvm_vz_config5_guest_wrmask(struct kvm_vcpu *vcpu) return mask; } +static inline unsigned int kvm_vz_config6_guest_wrmask(struct kvm_vcpu *vcpu) +{ + return MIPS_CONF6_LOONGSON_INTIMER | MIPS_CONF6_LOONGSON_EXTIMER; +} + /* * VZ optionally allows these additional Config bits to be written by root: * Config: M, [MT] @@ -181,6 +186,12 @@ static inline unsigned int kvm_vz_config5_user_wrmask(struct kvm_vcpu *vcpu) return kvm_vz_config5_guest_wrmask(vcpu) | MIPS_CONF5_MRP; } +static inline unsigned int kvm_vz_config6_user_wrmask(struct kvm_vcpu *vcpu) +{ + return kvm_vz_config6_guest_wrmask(vcpu) | + MIPS_CONF6_LOONGSON_SFBEN | MIPS_CONF6_LOONGSON_FTLBDIS; +} + static gpa_t kvm_vz_gva_to_gpa_cb(gva_t gva) { /* VZ guest has already converted gva to gpa */ @@ -930,7 +941,8 @@ static enum emulation_result kvm_vz_gpsi_cop0(union mips_instruction inst, (sel == 2 || /* SRSCtl */ sel == 3)) || /* SRSMap */ (rd == MIPS_CP0_CONFIG && - (sel == 7)) || /* Config7 */ + (sel == 6 || /* Config6 */ + sel == 7)) || /* Config7 */ (rd == MIPS_CP0_LLADDR && (sel == 2) && /* MAARI */ cpu_guest_has_maar && @@ -938,6 +950,11 @@ static enum emulation_result kvm_vz_gpsi_cop0(union mips_instruction inst, (rd == MIPS_CP0_ERRCTL && (sel == 0))) { /* ErrCtl */ val = cop0->reg[rd][sel]; +#ifdef CONFIG_CPU_LOONGSON64 + } else if (rd == MIPS_CP0_DIAG && + (sel == 0)) { /* Diag */ + val = cop0->reg[rd][sel]; +#endif } else { val = 0; er = EMULATE_FAIL; @@ -1000,9 +1017,40 @@ static enum emulation_result kvm_vz_gpsi_cop0(union mips_instruction inst, cpu_guest_has_maar && !cpu_guest_has_dyn_maar) { kvm_write_maari(vcpu, val); + } else if (rd == MIPS_CP0_CONFIG && + (sel == 6)) { + cop0->reg[rd][sel] = (int)val; } else if (rd == MIPS_CP0_ERRCTL && (sel == 0)) { /* ErrCtl */ /* ignore the written value */ +#ifdef CONFIG_CPU_LOONGSON64 + } else if (rd == MIPS_CP0_DIAG && + (sel == 0)) { /* Diag */ + unsigned long flags; + + local_irq_save(flags); + if (val & LOONGSON_DIAG_BTB) { + /* Flush BTB */ + set_c0_diag(LOONGSON_DIAG_BTB); + } + if (val & LOONGSON_DIAG_ITLB) { + /* Flush ITLB */ + set_c0_diag(LOONGSON_DIAG_ITLB); + } + if (val & LOONGSON_DIAG_DTLB) { + /* Flush DTLB */ + set_c0_diag(LOONGSON_DIAG_DTLB); + } + if (val & LOONGSON_DIAG_VTLB) { + /* Flush VTLB */ + kvm_loongson_clear_guest_vtlb(); + } + if (val & LOONGSON_DIAG_FTLB) { + /* Flush FTLB */ + kvm_loongson_clear_guest_ftlb(); + } + local_irq_restore(flags); +#endif } else { er = EMULATE_FAIL; } @@ -1692,6 +1740,7 @@ static u64 kvm_vz_get_one_regs[] = { KVM_REG_MIPS_CP0_CONFIG3, KVM_REG_MIPS_CP0_CONFIG4, KVM_REG_MIPS_CP0_CONFIG5, + KVM_REG_MIPS_CP0_CONFIG6, #ifdef CONFIG_64BIT KVM_REG_MIPS_CP0_XCONTEXT, #endif @@ -2019,6 +2068,9 @@ static int kvm_vz_get_one_reg(struct kvm_vcpu *vcpu, return -EINVAL; *v = read_gc0_config5(); break; + case KVM_REG_MIPS_CP0_CONFIG6: + *v = kvm_read_sw_gc0_config6(cop0); + break; case KVM_REG_MIPS_CP0_MAAR(0) ... KVM_REG_MIPS_CP0_MAAR(0x3f): if (!cpu_guest_has_maar || cpu_guest_has_dyn_maar) return -EINVAL; @@ -2288,6 +2340,14 @@ static int kvm_vz_set_one_reg(struct kvm_vcpu *vcpu, write_gc0_config5(v); } break; + case KVM_REG_MIPS_CP0_CONFIG6: + cur = kvm_read_sw_gc0_config6(cop0); + change = (cur ^ v) & kvm_vz_config6_user_wrmask(vcpu); + if (change) { + v = cur ^ change; + kvm_write_sw_gc0_config6(cop0, (int)v); + } + break; case KVM_REG_MIPS_CP0_MAAR(0) ... KVM_REG_MIPS_CP0_MAAR(0x3f): if (!cpu_guest_has_maar || cpu_guest_has_dyn_maar) return -EINVAL; -- GitLab From dc6d95b153e78ed70b1b2c04aadffb76bcd2b3ec Mon Sep 17 00:00:00 2001 From: Huacai Chen Date: Sat, 23 May 2020 15:56:40 +0800 Subject: [PATCH 1113/1971] KVM: MIPS: Add more MMIO load/store instructions emulation This patch add more MMIO load/store instructions emulation, which can be observed in QXL and some other device drivers: 1, LWL, LWR, LDW, LDR, SWL, SWR, SDL and SDR for all MIPS; 2, GSLBX, GSLHX, GSLWX, GSLDX, GSSBX, GSSHX, GSSWX and GSSDX for Loongson-3. Reviewed-by: Aleksandar Markovic Signed-off-by: Huacai Chen Co-developed-by: Jiaxun Yang Message-Id: <1590220602-3547-14-git-send-email-chenhc@lemote.com> Signed-off-by: Paolo Bonzini --- arch/mips/kvm/emulate.c | 480 +++++++++++++++++++++++++++++++++++++++- 1 file changed, 470 insertions(+), 10 deletions(-) diff --git a/arch/mips/kvm/emulate.c b/arch/mips/kvm/emulate.c index 4fa93d6a2ec52..5ae82d9251971 100644 --- a/arch/mips/kvm/emulate.c +++ b/arch/mips/kvm/emulate.c @@ -1604,6 +1604,7 @@ enum emulation_result kvm_mips_emulate_store(union mips_instruction inst, enum emulation_result er; u32 rt; void *data = run->mmio.data; + unsigned int imme; unsigned long curr_pc; /* @@ -1661,6 +1662,211 @@ enum emulation_result kvm_mips_emulate_store(union mips_instruction inst, vcpu->arch.gprs[rt], *(u8 *)data); break; + case swl_op: + run->mmio.phys_addr = kvm_mips_callbacks->gva_to_gpa( + vcpu->arch.host_cp0_badvaddr) & (~0x3); + run->mmio.len = 4; + imme = vcpu->arch.host_cp0_badvaddr & 0x3; + switch (imme) { + case 0: + *(u32 *)data = ((*(u32 *)data) & 0xffffff00) | + (vcpu->arch.gprs[rt] >> 24); + break; + case 1: + *(u32 *)data = ((*(u32 *)data) & 0xffff0000) | + (vcpu->arch.gprs[rt] >> 16); + break; + case 2: + *(u32 *)data = ((*(u32 *)data) & 0xff000000) | + (vcpu->arch.gprs[rt] >> 8); + break; + case 3: + *(u32 *)data = vcpu->arch.gprs[rt]; + break; + default: + break; + } + + kvm_debug("[%#lx] OP_SWL: eaddr: %#lx, gpr: %#lx, data: %#x\n", + vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr, + vcpu->arch.gprs[rt], *(u32 *)data); + break; + + case swr_op: + run->mmio.phys_addr = kvm_mips_callbacks->gva_to_gpa( + vcpu->arch.host_cp0_badvaddr) & (~0x3); + run->mmio.len = 4; + imme = vcpu->arch.host_cp0_badvaddr & 0x3; + switch (imme) { + case 0: + *(u32 *)data = vcpu->arch.gprs[rt]; + break; + case 1: + *(u32 *)data = ((*(u32 *)data) & 0xff) | + (vcpu->arch.gprs[rt] << 8); + break; + case 2: + *(u32 *)data = ((*(u32 *)data) & 0xffff) | + (vcpu->arch.gprs[rt] << 16); + break; + case 3: + *(u32 *)data = ((*(u32 *)data) & 0xffffff) | + (vcpu->arch.gprs[rt] << 24); + break; + default: + break; + } + + kvm_debug("[%#lx] OP_SWR: eaddr: %#lx, gpr: %#lx, data: %#x\n", + vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr, + vcpu->arch.gprs[rt], *(u32 *)data); + break; + + case sdl_op: + run->mmio.phys_addr = kvm_mips_callbacks->gva_to_gpa( + vcpu->arch.host_cp0_badvaddr) & (~0x7); + + run->mmio.len = 8; + imme = vcpu->arch.host_cp0_badvaddr & 0x7; + switch (imme) { + case 0: + *(u64 *)data = ((*(u64 *)data) & 0xffffffffffffff00) | + ((vcpu->arch.gprs[rt] >> 56) & 0xff); + break; + case 1: + *(u64 *)data = ((*(u64 *)data) & 0xffffffffffff0000) | + ((vcpu->arch.gprs[rt] >> 48) & 0xffff); + break; + case 2: + *(u64 *)data = ((*(u64 *)data) & 0xffffffffff000000) | + ((vcpu->arch.gprs[rt] >> 40) & 0xffffff); + break; + case 3: + *(u64 *)data = ((*(u64 *)data) & 0xffffffff00000000) | + ((vcpu->arch.gprs[rt] >> 32) & 0xffffffff); + break; + case 4: + *(u64 *)data = ((*(u64 *)data) & 0xffffff0000000000) | + ((vcpu->arch.gprs[rt] >> 24) & 0xffffffffff); + break; + case 5: + *(u64 *)data = ((*(u64 *)data) & 0xffff000000000000) | + ((vcpu->arch.gprs[rt] >> 16) & 0xffffffffffff); + break; + case 6: + *(u64 *)data = ((*(u64 *)data) & 0xff00000000000000) | + ((vcpu->arch.gprs[rt] >> 8) & 0xffffffffffffff); + break; + case 7: + *(u64 *)data = vcpu->arch.gprs[rt]; + break; + default: + break; + } + + kvm_debug("[%#lx] OP_SDL: eaddr: %#lx, gpr: %#lx, data: %llx\n", + vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr, + vcpu->arch.gprs[rt], *(u64 *)data); + break; + + case sdr_op: + run->mmio.phys_addr = kvm_mips_callbacks->gva_to_gpa( + vcpu->arch.host_cp0_badvaddr) & (~0x7); + + run->mmio.len = 8; + imme = vcpu->arch.host_cp0_badvaddr & 0x7; + switch (imme) { + case 0: + *(u64 *)data = vcpu->arch.gprs[rt]; + break; + case 1: + *(u64 *)data = ((*(u64 *)data) & 0xff) | + (vcpu->arch.gprs[rt] << 8); + break; + case 2: + *(u64 *)data = ((*(u64 *)data) & 0xffff) | + (vcpu->arch.gprs[rt] << 16); + break; + case 3: + *(u64 *)data = ((*(u64 *)data) & 0xffffff) | + (vcpu->arch.gprs[rt] << 24); + break; + case 4: + *(u64 *)data = ((*(u64 *)data) & 0xffffffff) | + (vcpu->arch.gprs[rt] << 32); + break; + case 5: + *(u64 *)data = ((*(u64 *)data) & 0xffffffffff) | + (vcpu->arch.gprs[rt] << 40); + break; + case 6: + *(u64 *)data = ((*(u64 *)data) & 0xffffffffffff) | + (vcpu->arch.gprs[rt] << 48); + break; + case 7: + *(u64 *)data = ((*(u64 *)data) & 0xffffffffffffff) | + (vcpu->arch.gprs[rt] << 56); + break; + default: + break; + } + + kvm_debug("[%#lx] OP_SDR: eaddr: %#lx, gpr: %#lx, data: %llx\n", + vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr, + vcpu->arch.gprs[rt], *(u64 *)data); + break; + +#ifdef CONFIG_CPU_LOONGSON64 + case sdc2_op: + rt = inst.loongson3_lsdc2_format.rt; + switch (inst.loongson3_lsdc2_format.opcode1) { + /* + * Loongson-3 overridden sdc2 instructions. + * opcode1 instruction + * 0x0 gssbx: store 1 bytes from GPR + * 0x1 gsshx: store 2 bytes from GPR + * 0x2 gsswx: store 4 bytes from GPR + * 0x3 gssdx: store 8 bytes from GPR + */ + case 0x0: + run->mmio.len = 1; + *(u8 *)data = vcpu->arch.gprs[rt]; + + kvm_debug("[%#lx] OP_GSSBX: eaddr: %#lx, gpr: %#lx, data: %#x\n", + vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr, + vcpu->arch.gprs[rt], *(u8 *)data); + break; + case 0x1: + run->mmio.len = 2; + *(u16 *)data = vcpu->arch.gprs[rt]; + + kvm_debug("[%#lx] OP_GSSSHX: eaddr: %#lx, gpr: %#lx, data: %#x\n", + vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr, + vcpu->arch.gprs[rt], *(u16 *)data); + break; + case 0x2: + run->mmio.len = 4; + *(u32 *)data = vcpu->arch.gprs[rt]; + + kvm_debug("[%#lx] OP_GSSWX: eaddr: %#lx, gpr: %#lx, data: %#x\n", + vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr, + vcpu->arch.gprs[rt], *(u32 *)data); + break; + case 0x3: + run->mmio.len = 8; + *(u64 *)data = vcpu->arch.gprs[rt]; + + kvm_debug("[%#lx] OP_GSSDX: eaddr: %#lx, gpr: %#lx, data: %#llx\n", + vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr, + vcpu->arch.gprs[rt], *(u64 *)data); + break; + default: + kvm_err("Godson Exteneded GS-Store not yet supported (inst=0x%08x)\n", + inst.word); + break; + } + break; +#endif default: kvm_err("Store not yet supported (inst=0x%08x)\n", inst.word); @@ -1695,6 +1901,7 @@ enum emulation_result kvm_mips_emulate_load(union mips_instruction inst, enum emulation_result er; unsigned long curr_pc; u32 op, rt; + unsigned int imme; rt = inst.i_format.rt; op = inst.i_format.opcode; @@ -1747,6 +1954,162 @@ enum emulation_result kvm_mips_emulate_load(union mips_instruction inst, run->mmio.len = 1; break; + case lwl_op: + run->mmio.phys_addr = kvm_mips_callbacks->gva_to_gpa( + vcpu->arch.host_cp0_badvaddr) & (~0x3); + + run->mmio.len = 4; + imme = vcpu->arch.host_cp0_badvaddr & 0x3; + switch (imme) { + case 0: + vcpu->mmio_needed = 3; /* 1 byte */ + break; + case 1: + vcpu->mmio_needed = 4; /* 2 bytes */ + break; + case 2: + vcpu->mmio_needed = 5; /* 3 bytes */ + break; + case 3: + vcpu->mmio_needed = 6; /* 4 bytes */ + break; + default: + break; + } + break; + + case lwr_op: + run->mmio.phys_addr = kvm_mips_callbacks->gva_to_gpa( + vcpu->arch.host_cp0_badvaddr) & (~0x3); + + run->mmio.len = 4; + imme = vcpu->arch.host_cp0_badvaddr & 0x3; + switch (imme) { + case 0: + vcpu->mmio_needed = 7; /* 4 bytes */ + break; + case 1: + vcpu->mmio_needed = 8; /* 3 bytes */ + break; + case 2: + vcpu->mmio_needed = 9; /* 2 bytes */ + break; + case 3: + vcpu->mmio_needed = 10; /* 1 byte */ + break; + default: + break; + } + break; + + case ldl_op: + run->mmio.phys_addr = kvm_mips_callbacks->gva_to_gpa( + vcpu->arch.host_cp0_badvaddr) & (~0x7); + + run->mmio.len = 8; + imme = vcpu->arch.host_cp0_badvaddr & 0x7; + switch (imme) { + case 0: + vcpu->mmio_needed = 11; /* 1 byte */ + break; + case 1: + vcpu->mmio_needed = 12; /* 2 bytes */ + break; + case 2: + vcpu->mmio_needed = 13; /* 3 bytes */ + break; + case 3: + vcpu->mmio_needed = 14; /* 4 bytes */ + break; + case 4: + vcpu->mmio_needed = 15; /* 5 bytes */ + break; + case 5: + vcpu->mmio_needed = 16; /* 6 bytes */ + break; + case 6: + vcpu->mmio_needed = 17; /* 7 bytes */ + break; + case 7: + vcpu->mmio_needed = 18; /* 8 bytes */ + break; + default: + break; + } + break; + + case ldr_op: + run->mmio.phys_addr = kvm_mips_callbacks->gva_to_gpa( + vcpu->arch.host_cp0_badvaddr) & (~0x7); + + run->mmio.len = 8; + imme = vcpu->arch.host_cp0_badvaddr & 0x7; + switch (imme) { + case 0: + vcpu->mmio_needed = 19; /* 8 bytes */ + break; + case 1: + vcpu->mmio_needed = 20; /* 7 bytes */ + break; + case 2: + vcpu->mmio_needed = 21; /* 6 bytes */ + break; + case 3: + vcpu->mmio_needed = 22; /* 5 bytes */ + break; + case 4: + vcpu->mmio_needed = 23; /* 4 bytes */ + break; + case 5: + vcpu->mmio_needed = 24; /* 3 bytes */ + break; + case 6: + vcpu->mmio_needed = 25; /* 2 bytes */ + break; + case 7: + vcpu->mmio_needed = 26; /* 1 byte */ + break; + default: + break; + } + break; + +#ifdef CONFIG_CPU_LOONGSON64 + case ldc2_op: + rt = inst.loongson3_lsdc2_format.rt; + switch (inst.loongson3_lsdc2_format.opcode1) { + /* + * Loongson-3 overridden ldc2 instructions. + * opcode1 instruction + * 0x0 gslbx: store 1 bytes from GPR + * 0x1 gslhx: store 2 bytes from GPR + * 0x2 gslwx: store 4 bytes from GPR + * 0x3 gsldx: store 8 bytes from GPR + */ + case 0x0: + run->mmio.len = 1; + vcpu->mmio_needed = 27; /* signed */ + break; + case 0x1: + run->mmio.len = 2; + vcpu->mmio_needed = 28; /* signed */ + break; + case 0x2: + run->mmio.len = 4; + vcpu->mmio_needed = 29; /* signed */ + break; + case 0x3: + run->mmio.len = 8; + vcpu->mmio_needed = 30; /* signed */ + break; + default: + kvm_err("Godson Exteneded GS-Load for float not yet supported (inst=0x%08x)\n", + inst.word); + break; + } + break; +#endif + default: kvm_err("Load not yet supported (inst=0x%08x)\n", inst.word); @@ -2612,28 +2975,125 @@ enum emulation_result kvm_mips_complete_mmio_load(struct kvm_vcpu *vcpu, switch (run->mmio.len) { case 8: - *gpr = *(s64 *)run->mmio.data; + switch (vcpu->mmio_needed) { + case 11: + *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffffffffffffff) | + (((*(s64 *)run->mmio.data) & 0xff) << 56); + break; + case 12: + *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffffffffffff) | + (((*(s64 *)run->mmio.data) & 0xffff) << 48); + break; + case 13: + *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffffffffff) | + (((*(s64 *)run->mmio.data) & 0xffffff) << 40); + break; + case 14: + *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffffffff) | + (((*(s64 *)run->mmio.data) & 0xffffffff) << 32); + break; + case 15: + *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffffff) | + (((*(s64 *)run->mmio.data) & 0xffffffffff) << 24); + break; + case 16: + *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffff) | + (((*(s64 *)run->mmio.data) & 0xffffffffffff) << 16); + break; + case 17: + *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xff) | + (((*(s64 *)run->mmio.data) & 0xffffffffffffff) << 8); + break; + case 18: + case 19: + *gpr = *(s64 *)run->mmio.data; + break; + case 20: + *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xff00000000000000) | + ((((*(s64 *)run->mmio.data)) >> 8) & 0xffffffffffffff); + break; + case 21: + *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffff000000000000) | + ((((*(s64 *)run->mmio.data)) >> 16) & 0xffffffffffff); + break; + case 22: + *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffffff0000000000) | + ((((*(s64 *)run->mmio.data)) >> 24) & 0xffffffffff); + break; + case 23: + *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffffffff00000000) | + ((((*(s64 *)run->mmio.data)) >> 32) & 0xffffffff); + break; + case 24: + *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffffffffff000000) | + ((((*(s64 *)run->mmio.data)) >> 40) & 0xffffff); + break; + case 25: + *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffffffffffff0000) | + ((((*(s64 *)run->mmio.data)) >> 48) & 0xffff); + break; + case 26: + *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffffffffffffff00) | + ((((*(s64 *)run->mmio.data)) >> 56) & 0xff); + break; + default: + *gpr = *(s64 *)run->mmio.data; + } break; case 4: - if (vcpu->mmio_needed == 2) - *gpr = *(s32 *)run->mmio.data; - else + switch (vcpu->mmio_needed) { + case 1: *gpr = *(u32 *)run->mmio.data; + break; + case 2: + *gpr = *(s32 *)run->mmio.data; + break; + case 3: + *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffffff) | + (((*(s32 *)run->mmio.data) & 0xff) << 24); + break; + case 4: + *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffff) | + (((*(s32 *)run->mmio.data) & 0xffff) << 16); + break; + case 5: + *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xff) | + (((*(s32 *)run->mmio.data) & 0xffffff) << 8); + break; + case 6: + case 7: + *gpr = *(s32 *)run->mmio.data; + break; + case 8: + *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xff000000) | + ((((*(s32 *)run->mmio.data)) >> 8) & 0xffffff); + break; + case 9: + *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffff0000) | + ((((*(s32 *)run->mmio.data)) >> 16) & 0xffff); + break; + case 10: + *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffffff00) | + ((((*(s32 *)run->mmio.data)) >> 24) & 0xff); + break; + default: + *gpr = *(s32 *)run->mmio.data; + } break; case 2: - if (vcpu->mmio_needed == 2) - *gpr = *(s16 *) run->mmio.data; - else + if (vcpu->mmio_needed == 1) *gpr = *(u16 *)run->mmio.data; + else + *gpr = *(s16 *)run->mmio.data; break; case 1: - if (vcpu->mmio_needed == 2) - *gpr = *(s8 *) run->mmio.data; + if (vcpu->mmio_needed == 1) + *gpr = *(u8 *)run->mmio.data; else - *gpr = *(u8 *) run->mmio.data; + *gpr = *(s8 *)run->mmio.data; break; } -- GitLab From 0f78355c450835053fed85828c9d6526594c0921 Mon Sep 17 00:00:00 2001 From: Huacai Chen Date: Sat, 23 May 2020 15:56:41 +0800 Subject: [PATCH 1114/1971] KVM: MIPS: Enable KVM support for Loongson-3 This patch enable KVM support for Loongson-3 by selecting HAVE_KVM, but only enable KVM/VZ on Loongson-3A R4+ (because VZ of early processors are incomplete). Besides, Loongson-3 support SMP guests, so we clear the linked load bit of LLAddr in kvm_vz_vcpu_load() if the guest has more than one VCPUs. Acked-by: Thomas Bogendoerfer Reviewed-by: Aleksandar Markovic Signed-off-by: Huacai Chen Co-developed-by: Jiaxun Yang Message-Id: <1590220602-3547-15-git-send-email-chenhc@lemote.com> Signed-off-by: Paolo Bonzini --- arch/mips/Kconfig | 1 + arch/mips/kernel/cpu-probe.c | 1 + arch/mips/kvm/vz.c | 2 +- 3 files changed, 3 insertions(+), 1 deletion(-) diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig index 9dc08ee3d6b94..1ad50c01c9701 100644 --- a/arch/mips/Kconfig +++ b/arch/mips/Kconfig @@ -1404,6 +1404,7 @@ config CPU_LOONGSON64 select MIPS_L1_CACHE_SHIFT_6 select GPIOLIB select SWIOTLB + select HAVE_KVM help The Loongson GSx64(GS264/GS464/GS464E/GS464V) series of processor cores implements the MIPS64R2 instruction set with many extensions, diff --git a/arch/mips/kernel/cpu-probe.c b/arch/mips/kernel/cpu-probe.c index f0e7f2d7bd9a5..def1659fe2621 100644 --- a/arch/mips/kernel/cpu-probe.c +++ b/arch/mips/kernel/cpu-probe.c @@ -2076,6 +2076,7 @@ static inline void cpu_probe_loongson(struct cpuinfo_mips *c, unsigned int cpu) c->writecombine = _CACHE_UNCACHED_ACCELERATED; c->ases |= (MIPS_ASE_LOONGSON_MMI | MIPS_ASE_LOONGSON_CAM | MIPS_ASE_LOONGSON_EXT | MIPS_ASE_LOONGSON_EXT2); + c->ases &= ~MIPS_ASE_VZ; /* VZ of Loongson-3A2000/3000 is incomplete */ break; case PRID_IMP_LOONGSON_64G: c->cputype = CPU_LOONGSON64; diff --git a/arch/mips/kvm/vz.c b/arch/mips/kvm/vz.c index 2810d1c8b67f8..d9c462c14163f 100644 --- a/arch/mips/kvm/vz.c +++ b/arch/mips/kvm/vz.c @@ -2697,7 +2697,7 @@ static int kvm_vz_vcpu_load(struct kvm_vcpu *vcpu, int cpu) * prevents a SC on the next VCPU from succeeding by matching a LL on * the previous VCPU. */ - if (cpu_guest_has_rw_llb) + if (vcpu->kvm->created_vcpus > 1) write_gc0_lladdr(0); return 0; -- GitLab From 0e96edd9a9c2e75ece7f581d4f75d26b38cd53ba Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Wed, 27 May 2020 20:11:21 -0700 Subject: [PATCH 1115/1971] x86/kvm: Remove defunct KVM_DEBUG_FS Kconfig Remove KVM_DEBUG_FS, which can easily be misconstrued as controlling KVM-as-a-host. The sole user of CONFIG_KVM_DEBUG_FS was removed by commit cfd8983f03c7b ("x86, locking/spinlocks: Remove ticket (spin)lock implementation"). Signed-off-by: Sean Christopherson Message-Id: <20200528031121.28904-1-sean.j.christopherson@intel.com> Signed-off-by: Paolo Bonzini --- arch/x86/Kconfig | 8 -------- arch/x86/kernel/kvm.c | 1 - 2 files changed, 9 deletions(-) diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 57d1c4e367387..67f6a40b5e93a 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -821,14 +821,6 @@ config PVH This option enables the PVH entry point for guest virtual machines as specified in the x86/HVM direct boot ABI. -config KVM_DEBUG_FS - bool "Enable debug information for KVM Guests in debugfs" - depends on KVM_GUEST && DEBUG_FS - ---help--- - This option enables collection of various statistics for KVM guest. - Statistics are displayed in debugfs filesystem. Enabling this option - may incur significant overhead. - config PARAVIRT_TIME_ACCOUNTING bool "Paravirtual steal time accounting" depends on PARAVIRT diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c index d6f22a3a1f7da..7e6403a8d8610 100644 --- a/arch/x86/kernel/kvm.c +++ b/arch/x86/kernel/kvm.c @@ -21,7 +21,6 @@ #include #include #include -#include #include #include #include -- GitLab From 7ec28e264f2e52089c14c6f8eba1ce7b6501e59b Mon Sep 17 00:00:00 2001 From: Denis Efremov Date: Wed, 3 Jun 2020 13:11:31 +0300 Subject: [PATCH 1116/1971] KVM: Use vmemdup_user() Replace opencoded alloc and copy with vmemdup_user(). Signed-off-by: Denis Efremov Message-Id: <20200603101131.2107303-1-efremov@linux.com> Signed-off-by: Paolo Bonzini --- arch/x86/kvm/cpuid.c | 17 +++++++---------- virt/kvm/kvm_main.c | 19 ++++++++----------- 2 files changed, 15 insertions(+), 21 deletions(-) diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c index 9ca32d9699388..8a294f9747aad 100644 --- a/arch/x86/kvm/cpuid.c +++ b/arch/x86/kvm/cpuid.c @@ -181,17 +181,14 @@ int kvm_vcpu_ioctl_set_cpuid(struct kvm_vcpu *vcpu, r = -E2BIG; if (cpuid->nent > KVM_MAX_CPUID_ENTRIES) goto out; - r = -ENOMEM; if (cpuid->nent) { - cpuid_entries = - vmalloc(array_size(sizeof(struct kvm_cpuid_entry), - cpuid->nent)); - if (!cpuid_entries) - goto out; - r = -EFAULT; - if (copy_from_user(cpuid_entries, entries, - cpuid->nent * sizeof(struct kvm_cpuid_entry))) + cpuid_entries = vmemdup_user(entries, + array_size(sizeof(struct kvm_cpuid_entry), + cpuid->nent)); + if (IS_ERR(cpuid_entries)) { + r = PTR_ERR(cpuid_entries); goto out; + } } for (i = 0; i < cpuid->nent; i++) { vcpu->arch.cpuid_entries[i].function = cpuid_entries[i].function; @@ -211,8 +208,8 @@ int kvm_vcpu_ioctl_set_cpuid(struct kvm_vcpu *vcpu, kvm_x86_ops.cpuid_update(vcpu); r = kvm_update_cpuid(vcpu); + kvfree(cpuid_entries); out: - vfree(cpuid_entries); return r; } diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 3577eb84eac0b..4db151f6101ed 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -3746,21 +3746,18 @@ static long kvm_vm_ioctl(struct file *filp, if (routing.flags) goto out; if (routing.nr) { - r = -ENOMEM; - entries = vmalloc(array_size(sizeof(*entries), - routing.nr)); - if (!entries) - goto out; - r = -EFAULT; urouting = argp; - if (copy_from_user(entries, urouting->entries, - routing.nr * sizeof(*entries))) - goto out_free_irq_routing; + entries = vmemdup_user(urouting->entries, + array_size(sizeof(*entries), + routing.nr)); + if (IS_ERR(entries)) { + r = PTR_ERR(entries); + goto out; + } } r = kvm_set_irq_routing(kvm, entries, routing.nr, routing.flags); -out_free_irq_routing: - vfree(entries); + kvfree(entries); break; } #endif /* CONFIG_HAVE_KVM_IRQ_ROUTING */ -- GitLab From 5688fed649f15a232e75148afb0a28864046f44c Mon Sep 17 00:00:00 2001 From: Anthony Yznaga Date: Tue, 2 Jun 2020 13:07:28 -0700 Subject: [PATCH 1117/1971] KVM: x86: remove unnecessary rmap walk of read-only memslots There's no write access to remove. An existing memslot cannot be updated to set or clear KVM_MEM_READONLY, and any mappings established in a newly created or moved read-only memslot will already be read-only. Signed-off-by: Anthony Yznaga Message-Id: <1591128450-11977-2-git-send-email-anthony.yznaga@oracle.com> Signed-off-by: Paolo Bonzini --- arch/x86/kvm/x86.c | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 9e41b51353401..e11e0494f833a 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -10142,11 +10142,9 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm, static void kvm_mmu_slot_apply_flags(struct kvm *kvm, struct kvm_memory_slot *new) { - /* Still write protect RO slot */ - if (new->flags & KVM_MEM_READONLY) { - kvm_mmu_slot_remove_write_access(kvm, new, PG_LEVEL_4K); + /* Nothing to do for RO slots */ + if (new->flags & KVM_MEM_READONLY) return; - } /* * Call kvm_x86_ops dirty logging hooks when they are valid. -- GitLab From 4b44295538b421de2de8b5754472284fe2daa4e9 Mon Sep 17 00:00:00 2001 From: Anthony Yznaga Date: Tue, 2 Jun 2020 13:07:29 -0700 Subject: [PATCH 1118/1971] KVM: x86: avoid unnecessary rmap walks when creating/moving slots On large memory guests it has been observed that creating a memslot for a very large range can take noticeable amount of time. Investigation showed that the time is spent walking the rmaps to update existing sptes to remove write access or set/clear dirty bits to support dirty logging. These rmap walks are unnecessary when creating or moving a memslot. A newly created memslot will not have any existing mappings, and the existing mappings of a moved memslot will have been invalidated and flushed. Any mappings established once the new/moved memslot becomes visible will be set using the properties of the new slot. Signed-off-by: Anthony Yznaga Message-Id: <1591128450-11977-3-git-send-email-anthony.yznaga@oracle.com> Signed-off-by: Paolo Bonzini --- arch/x86/kvm/x86.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index e11e0494f833a..f39bdd5fd7cf1 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -10242,7 +10242,7 @@ void kvm_arch_commit_memory_region(struct kvm *kvm, * * FIXME: const-ify all uses of struct kvm_memory_slot. */ - if (change != KVM_MR_DELETE) + if (change == KVM_MR_FLAGS_ONLY) kvm_mmu_slot_apply_flags(kvm, (struct kvm_memory_slot *) new); /* Free the arrays associated with the old memslot. */ -- GitLab From 3741679ba4b4b207e52587cfbee1a9bba947b15b Mon Sep 17 00:00:00 2001 From: Anthony Yznaga Date: Tue, 2 Jun 2020 13:07:30 -0700 Subject: [PATCH 1119/1971] KVM: x86: minor code refactor and comments fixup around dirty logging Consolidate the code and correct the comments to show that the actions taken to update existing mappings to disable or enable dirty logging are not necessary when creating, moving, or deleting a memslot. Signed-off-by: Anthony Yznaga Message-Id: <1591128450-11977-4-git-send-email-anthony.yznaga@oracle.com> Signed-off-by: Paolo Bonzini --- arch/x86/kvm/x86.c | 104 +++++++++++++++++++++------------------------ 1 file changed, 49 insertions(+), 55 deletions(-) diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index f39bdd5fd7cf1..13512baf6f6e3 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -10140,41 +10140,65 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm, } static void kvm_mmu_slot_apply_flags(struct kvm *kvm, - struct kvm_memory_slot *new) + struct kvm_memory_slot *old, + struct kvm_memory_slot *new, + enum kvm_mr_change change) { - /* Nothing to do for RO slots */ - if (new->flags & KVM_MEM_READONLY) + /* + * Nothing to do for RO slots or CREATE/MOVE/DELETE of a slot. + * See comments below. + */ + if ((change != KVM_MR_FLAGS_ONLY) || (new->flags & KVM_MEM_READONLY)) return; /* - * Call kvm_x86_ops dirty logging hooks when they are valid. - * - * kvm_x86_ops.slot_disable_log_dirty is called when: - * - * - KVM_MR_CREATE with dirty logging is disabled - * - KVM_MR_FLAGS_ONLY with dirty logging is disabled in new flag - * - * The reason is, in case of PML, we need to set D-bit for any slots - * with dirty logging disabled in order to eliminate unnecessary GPA - * logging in PML buffer (and potential PML buffer full VMEXIT). This - * guarantees leaving PML enabled during guest's lifetime won't have - * any additional overhead from PML when guest is running with dirty - * logging disabled for memory slots. + * Dirty logging tracks sptes in 4k granularity, meaning that large + * sptes have to be split. If live migration is successful, the guest + * in the source machine will be destroyed and large sptes will be + * created in the destination. However, if the guest continues to run + * in the source machine (for example if live migration fails), small + * sptes will remain around and cause bad performance. * - * kvm_x86_ops.slot_enable_log_dirty is called when switching new slot - * to dirty logging mode. + * Scan sptes if dirty logging has been stopped, dropping those + * which can be collapsed into a single large-page spte. Later + * page faults will create the large-page sptes. * - * If kvm_x86_ops dirty logging hooks are invalid, use write protect. + * There is no need to do this in any of the following cases: + * CREATE: No dirty mappings will already exist. + * MOVE/DELETE: The old mappings will already have been cleaned up by + * kvm_arch_flush_shadow_memslot() + */ + if ((old->flags & KVM_MEM_LOG_DIRTY_PAGES) && + !(new->flags & KVM_MEM_LOG_DIRTY_PAGES)) + kvm_mmu_zap_collapsible_sptes(kvm, new); + + /* + * Enable or disable dirty logging for the slot. * - * In case of write protect: + * For KVM_MR_DELETE and KVM_MR_MOVE, the shadow pages of the old + * slot have been zapped so no dirty logging updates are needed for + * the old slot. + * For KVM_MR_CREATE and KVM_MR_MOVE, once the new slot is visible + * any mappings that might be created in it will consume the + * properties of the new slot and do not need to be updated here. * - * Write protect all pages for dirty logging. + * When PML is enabled, the kvm_x86_ops dirty logging hooks are + * called to enable/disable dirty logging. * - * All the sptes including the large sptes which point to this - * slot are set to readonly. We can not create any new large - * spte on this slot until the end of the logging. + * When disabling dirty logging with PML enabled, the D-bit is set + * for sptes in the slot in order to prevent unnecessary GPA + * logging in the PML buffer (and potential PML buffer full VMEXIT). + * This guarantees leaving PML enabled for the guest's lifetime + * won't have any additional overhead from PML when the guest is + * running with dirty logging disabled. * + * When enabling dirty logging, large sptes are write-protected + * so they can be split on first write. New large sptes cannot + * be created for this slot until the end of the logging. * See the comments in fast_page_fault(). + * For small sptes, nothing is done if the dirty log is in the + * initial-all-set state. Otherwise, depending on whether pml + * is enabled the D-bit or the W-bit will be cleared. */ if (new->flags & KVM_MEM_LOG_DIRTY_PAGES) { if (kvm_x86_ops.slot_enable_log_dirty) { @@ -10211,39 +10235,9 @@ void kvm_arch_commit_memory_region(struct kvm *kvm, kvm_mmu_calculate_default_mmu_pages(kvm)); /* - * Dirty logging tracks sptes in 4k granularity, meaning that large - * sptes have to be split. If live migration is successful, the guest - * in the source machine will be destroyed and large sptes will be - * created in the destination. However, if the guest continues to run - * in the source machine (for example if live migration fails), small - * sptes will remain around and cause bad performance. - * - * Scan sptes if dirty logging has been stopped, dropping those - * which can be collapsed into a single large-page spte. Later - * page faults will create the large-page sptes. - * - * There is no need to do this in any of the following cases: - * CREATE: No dirty mappings will already exist. - * MOVE/DELETE: The old mappings will already have been cleaned up by - * kvm_arch_flush_shadow_memslot() - */ - if (change == KVM_MR_FLAGS_ONLY && - (old->flags & KVM_MEM_LOG_DIRTY_PAGES) && - !(new->flags & KVM_MEM_LOG_DIRTY_PAGES)) - kvm_mmu_zap_collapsible_sptes(kvm, new); - - /* - * Set up write protection and/or dirty logging for the new slot. - * - * For KVM_MR_DELETE and KVM_MR_MOVE, the shadow pages of old slot have - * been zapped so no dirty logging staff is needed for old slot. For - * KVM_MR_FLAGS_ONLY, the old slot is essentially the same one as the - * new and it's also covered when dealing with the new slot. - * * FIXME: const-ify all uses of struct kvm_memory_slot. */ - if (change == KVM_MR_FLAGS_ONLY) - kvm_mmu_slot_apply_flags(kvm, (struct kvm_memory_slot *) new); + kvm_mmu_slot_apply_flags(kvm, old, (struct kvm_memory_slot *) new, change); /* Free the arrays associated with the old memslot. */ if (change == KVM_MR_MOVE) -- GitLab From e78790f84a5417287965a06cd4dea85df0743935 Mon Sep 17 00:00:00 2001 From: Sahitya Tummala Date: Tue, 2 Jun 2020 18:11:47 +0530 Subject: [PATCH 1120/1971] f2fs: fix retry logic in f2fs_write_cache_pages() In case a compressed file is getting overwritten, the current retry logic doesn't include the current page to be retried now as it sets the new start index as 0 and new end index as writeback_index - 1. This causes the corresponding cluster to be uncompressed and written as normal pages without compression. Fix this by allowing writeback to be retried for the current page as well (in case of compressed page getting retried due to index mismatch with cluster index). So that this cluster can be written compressed in case of overwrite. Also, align f2fs_write_cache_pages() according to the change - <64081362e8ff>("mm/page-writeback.c: fix range_cyclic writeback vs writepages deadlock"). Signed-off-by: Sahitya Tummala Reviewed-by: Chao Yu Signed-off-by: Jaegeuk Kim --- fs/f2fs/data.c | 13 ++++--------- 1 file changed, 4 insertions(+), 9 deletions(-) diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c index 48a622b95b76e..a65bfc07ddb97 100644 --- a/fs/f2fs/data.c +++ b/fs/f2fs/data.c @@ -2861,7 +2861,6 @@ static int f2fs_write_cache_pages(struct address_space *mapping, pgoff_t index; pgoff_t end; /* Inclusive */ pgoff_t done_index; - int cycled; int range_whole = 0; xa_mark_t tag; int nwritten = 0; @@ -2879,17 +2878,12 @@ static int f2fs_write_cache_pages(struct address_space *mapping, if (wbc->range_cyclic) { writeback_index = mapping->writeback_index; /* prev offset */ index = writeback_index; - if (index == 0) - cycled = 1; - else - cycled = 0; end = -1; } else { index = wbc->range_start >> PAGE_SHIFT; end = wbc->range_end >> PAGE_SHIFT; if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX) range_whole = 1; - cycled = 1; /* ignore range_cyclic tests */ } if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages) tag = PAGECACHE_TAG_TOWRITE; @@ -3054,12 +3048,13 @@ next: } } #endif - if ((!cycled && !done) || retry) { - cycled = 1; + if (retry) { index = 0; - end = writeback_index - 1; + end = -1; goto retry; } + if (wbc->range_cyclic && !done) + done_index = 0; if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0)) mapping->writeback_index = done_index; -- GitLab From 01fcb1cbc88effb3493c6197efc96b69b9f4823a Mon Sep 17 00:00:00 2001 From: Jason Wang Date: Fri, 29 May 2020 16:02:58 +0800 Subject: [PATCH 1121/1971] vhost: allow device that does not depend on vhost worker vDPA device currently relays the eventfd via vhost worker. This is inefficient due the latency of wakeup and scheduling, so this patch tries to introduce a use_worker attribute for the vhost device. When use_worker is not set with vhost_dev_init(), vhost won't try to allocate a worker thread and the vhost_poll will be processed directly in the wakeup function. This help for vDPA since it reduces the latency caused by vhost worker. In my testing, it saves 0.2 ms in pings between VMs on a mutual host. Signed-off-by: Zhu Lingshan Signed-off-by: Jason Wang Link: https://lore.kernel.org/r/20200529080303.15449-2-jasowang@redhat.com Signed-off-by: Michael S. Tsirkin --- drivers/vhost/net.c | 2 +- drivers/vhost/scsi.c | 2 +- drivers/vhost/vdpa.c | 2 +- drivers/vhost/vhost.c | 38 +++++++++++++++++++++++++------------- drivers/vhost/vhost.h | 2 ++ drivers/vhost/vsock.c | 2 +- 6 files changed, 31 insertions(+), 17 deletions(-) diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c index 2927f02cc7e1b..bf5e1d81ae250 100644 --- a/drivers/vhost/net.c +++ b/drivers/vhost/net.c @@ -1326,7 +1326,7 @@ static int vhost_net_open(struct inode *inode, struct file *f) } vhost_dev_init(dev, vqs, VHOST_NET_VQ_MAX, UIO_MAXIOV + VHOST_NET_BATCH, - VHOST_NET_PKT_WEIGHT, VHOST_NET_WEIGHT, + VHOST_NET_PKT_WEIGHT, VHOST_NET_WEIGHT, true, NULL); vhost_poll_init(n->poll + VHOST_NET_VQ_TX, handle_tx_net, EPOLLOUT, dev); diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c index c39952243fd32..0cbaa0b3893df 100644 --- a/drivers/vhost/scsi.c +++ b/drivers/vhost/scsi.c @@ -1628,7 +1628,7 @@ static int vhost_scsi_open(struct inode *inode, struct file *f) vs->vqs[i].vq.handle_kick = vhost_scsi_handle_kick; } vhost_dev_init(&vs->dev, vqs, VHOST_SCSI_MAX_VQ, UIO_MAXIOV, - VHOST_SCSI_WEIGHT, 0, NULL); + VHOST_SCSI_WEIGHT, 0, true, NULL); vhost_scsi_init_inflight(vs, NULL); diff --git a/drivers/vhost/vdpa.c b/drivers/vhost/vdpa.c index 0968361e3b774..f22bb316b8c4f 100644 --- a/drivers/vhost/vdpa.c +++ b/drivers/vhost/vdpa.c @@ -694,7 +694,7 @@ static int vhost_vdpa_open(struct inode *inode, struct file *filep) vqs[i] = &v->vqs[i]; vqs[i]->handle_kick = handle_vq_kick; } - vhost_dev_init(dev, vqs, nvqs, 0, 0, 0, + vhost_dev_init(dev, vqs, nvqs, 0, 0, 0, false, vhost_vdpa_process_iotlb_msg); dev->iotlb = vhost_iotlb_alloc(0, 0); diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c index 96d9871fa0cb5..80da9d9662f26 100644 --- a/drivers/vhost/vhost.c +++ b/drivers/vhost/vhost.c @@ -166,11 +166,16 @@ static int vhost_poll_wakeup(wait_queue_entry_t *wait, unsigned mode, int sync, void *key) { struct vhost_poll *poll = container_of(wait, struct vhost_poll, wait); + struct vhost_work *work = &poll->work; if (!(key_to_poll(key) & poll->mask)) return 0; - vhost_poll_queue(poll); + if (!poll->dev->use_worker) + work->fn(work); + else + vhost_poll_queue(poll); + return 0; } @@ -454,6 +459,7 @@ static size_t vhost_get_desc_size(struct vhost_virtqueue *vq, void vhost_dev_init(struct vhost_dev *dev, struct vhost_virtqueue **vqs, int nvqs, int iov_limit, int weight, int byte_weight, + bool use_worker, int (*msg_handler)(struct vhost_dev *dev, struct vhost_iotlb_msg *msg)) { @@ -471,6 +477,7 @@ void vhost_dev_init(struct vhost_dev *dev, dev->iov_limit = iov_limit; dev->weight = weight; dev->byte_weight = byte_weight; + dev->use_worker = use_worker; dev->msg_handler = msg_handler; init_llist_head(&dev->work_list); init_waitqueue_head(&dev->wait); @@ -549,18 +556,21 @@ long vhost_dev_set_owner(struct vhost_dev *dev) /* No owner, become one */ dev->mm = get_task_mm(current); dev->kcov_handle = kcov_common_handle(); - worker = kthread_create(vhost_worker, dev, "vhost-%d", current->pid); - if (IS_ERR(worker)) { - err = PTR_ERR(worker); - goto err_worker; - } + if (dev->use_worker) { + worker = kthread_create(vhost_worker, dev, + "vhost-%d", current->pid); + if (IS_ERR(worker)) { + err = PTR_ERR(worker); + goto err_worker; + } - dev->worker = worker; - wake_up_process(worker); /* avoid contributing to loadavg */ + dev->worker = worker; + wake_up_process(worker); /* avoid contributing to loadavg */ - err = vhost_attach_cgroups(dev); - if (err) - goto err_cgroup; + err = vhost_attach_cgroups(dev); + if (err) + goto err_cgroup; + } err = vhost_dev_alloc_iovecs(dev); if (err) @@ -568,8 +578,10 @@ long vhost_dev_set_owner(struct vhost_dev *dev) return 0; err_cgroup: - kthread_stop(worker); - dev->worker = NULL; + if (dev->worker) { + kthread_stop(dev->worker); + dev->worker = NULL; + } err_worker: if (dev->mm) mmput(dev->mm); diff --git a/drivers/vhost/vhost.h b/drivers/vhost/vhost.h index 60cab4c782296..c8e96a095d3b8 100644 --- a/drivers/vhost/vhost.h +++ b/drivers/vhost/vhost.h @@ -154,6 +154,7 @@ struct vhost_dev { int weight; int byte_weight; u64 kcov_handle; + bool use_worker; int (*msg_handler)(struct vhost_dev *dev, struct vhost_iotlb_msg *msg); }; @@ -161,6 +162,7 @@ struct vhost_dev { bool vhost_exceeds_weight(struct vhost_virtqueue *vq, int pkts, int total_len); void vhost_dev_init(struct vhost_dev *, struct vhost_virtqueue **vqs, int nvqs, int iov_limit, int weight, int byte_weight, + bool use_worker, int (*msg_handler)(struct vhost_dev *dev, struct vhost_iotlb_msg *msg)); long vhost_dev_set_owner(struct vhost_dev *dev); diff --git a/drivers/vhost/vsock.c b/drivers/vhost/vsock.c index fb4e944c4d0d7..a483cec31d5cb 100644 --- a/drivers/vhost/vsock.c +++ b/drivers/vhost/vsock.c @@ -632,7 +632,7 @@ static int vhost_vsock_dev_open(struct inode *inode, struct file *file) vhost_dev_init(&vsock->dev, vqs, ARRAY_SIZE(vsock->vqs), UIO_MAXIOV, VHOST_VSOCK_PKT_WEIGHT, - VHOST_VSOCK_WEIGHT, NULL); + VHOST_VSOCK_WEIGHT, true, NULL); file->private_data = vsock; spin_lock_init(&vsock->send_pkt_list_lock); -- GitLab From 5ce995f313ce56c0c62425c3ddc37c5c50fc33db Mon Sep 17 00:00:00 2001 From: Jason Wang Date: Fri, 29 May 2020 16:02:59 +0800 Subject: [PATCH 1122/1971] vhost: use mmgrab() instead of mmget() for non worker device For the device that doesn't use vhost worker and use_mm(), mmget() is too heavy weight and it may brings troubles for implementing mmap() support for vDPA device. This is because, an reference to the address space was held via mm_get() in vhost_dev_set_owner() and an reference to the file was held in mmap(). This means when process exits, the mm can not be released thus we can not release the file. This patch tries to use mmgrab() instead of mmget(), which allows the address space to be destroy in process exit without releasing the mm structure itself. This is sufficient for vDPA device which pin user pages and does not depend on the address space to work. Signed-off-by: Jason Wang Link: https://lore.kernel.org/r/20200529080303.15449-3-jasowang@redhat.com Signed-off-by: Michael S. Tsirkin --- drivers/vhost/vhost.c | 42 ++++++++++++++++++++++++++++++++++-------- 1 file changed, 34 insertions(+), 8 deletions(-) diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c index 80da9d9662f26..a40d16bdebb5e 100644 --- a/drivers/vhost/vhost.c +++ b/drivers/vhost/vhost.c @@ -541,6 +541,36 @@ bool vhost_dev_has_owner(struct vhost_dev *dev) } EXPORT_SYMBOL_GPL(vhost_dev_has_owner); +static void vhost_attach_mm(struct vhost_dev *dev) +{ + /* No owner, become one */ + if (dev->use_worker) { + dev->mm = get_task_mm(current); + } else { + /* vDPA device does not use worker thead, so there's + * no need to hold the address space for mm. This help + * to avoid deadlock in the case of mmap() which may + * held the refcnt of the file and depends on release + * method to remove vma. + */ + dev->mm = current->mm; + mmgrab(dev->mm); + } +} + +static void vhost_detach_mm(struct vhost_dev *dev) +{ + if (!dev->mm) + return; + + if (dev->use_worker) + mmput(dev->mm); + else + mmdrop(dev->mm); + + dev->mm = NULL; +} + /* Caller should have device mutex */ long vhost_dev_set_owner(struct vhost_dev *dev) { @@ -553,8 +583,8 @@ long vhost_dev_set_owner(struct vhost_dev *dev) goto err_mm; } - /* No owner, become one */ - dev->mm = get_task_mm(current); + vhost_attach_mm(dev); + dev->kcov_handle = kcov_common_handle(); if (dev->use_worker) { worker = kthread_create(vhost_worker, dev, @@ -583,9 +613,7 @@ err_cgroup: dev->worker = NULL; } err_worker: - if (dev->mm) - mmput(dev->mm); - dev->mm = NULL; + vhost_detach_mm(dev); dev->kcov_handle = 0; err_mm: return err; @@ -682,9 +710,7 @@ void vhost_dev_cleanup(struct vhost_dev *dev) dev->worker = NULL; dev->kcov_handle = 0; } - if (dev->mm) - mmput(dev->mm); - dev->mm = NULL; + vhost_detach_mm(dev); } EXPORT_SYMBOL_GPL(vhost_dev_cleanup); -- GitLab From c25a26e653a61b93339dcd1b734c889df60b3eef Mon Sep 17 00:00:00 2001 From: Jason Wang Date: Fri, 29 May 2020 16:03:00 +0800 Subject: [PATCH 1123/1971] vdpa: introduce get_vq_notification method This patch introduces a new method in the vdpa_config_ops which reports the physical address and the size of the doorbell for a specific virtqueue. This will be used by the future patches that maps doorbell to userspace. Signed-off-by: Jason Wang Link: https://lore.kernel.org/r/20200529080303.15449-4-jasowang@redhat.com Signed-off-by: Michael S. Tsirkin --- include/linux/vdpa.h | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/include/linux/vdpa.h b/include/linux/vdpa.h index 5453af87a33ee..239db794357c2 100644 --- a/include/linux/vdpa.h +++ b/include/linux/vdpa.h @@ -17,6 +17,16 @@ struct vdpa_callback { void *private; }; +/** + * vDPA notification area + * @addr: base address of the notification area + * @size: size of the notification area + */ +struct vdpa_notification_area { + resource_size_t addr; + resource_size_t size; +}; + /** * vDPA device - representation of a vDPA device * @dev: underlying device @@ -73,6 +83,10 @@ struct vdpa_device { * @vdev: vdpa device * @idx: virtqueue index * Returns virtqueue state (last_avail_idx) + * @get_vq_notification: Get the notification area for a virtqueue + * @vdev: vdpa device + * @idx: virtqueue index + * Returns the notifcation area * @get_vq_align: Get the virtqueue align requirement * for the device * @vdev: vdpa device @@ -162,6 +176,8 @@ struct vdpa_config_ops { bool (*get_vq_ready)(struct vdpa_device *vdev, u16 idx); int (*set_vq_state)(struct vdpa_device *vdev, u16 idx, u64 state); u64 (*get_vq_state)(struct vdpa_device *vdev, u16 idx); + struct vdpa_notification_area + (*get_vq_notification)(struct vdpa_device *vdev, u16 idx); /* Device ops */ u32 (*get_vq_align)(struct vdpa_device *vdev); -- GitLab From ddd89d0a059d8e9740c75a97e0efe9bf07ee51f9 Mon Sep 17 00:00:00 2001 From: Jason Wang Date: Fri, 29 May 2020 16:03:01 +0800 Subject: [PATCH 1124/1971] vhost_vdpa: support doorbell mapping via mmap Currently the doorbell is relayed via eventfd which may have significant overhead because of the cost of vmexits or syscall. This patch introduces mmap() based doorbell mapping which can eliminate the overhead caused by vmexit or syscall. To ease the userspace modeling of the doorbell layout (usually virtio-pci), this patch starts from a doorbell per page model. Vhost-vdpa only support the hardware doorbell that sit at the boundary of a page and does not share the page with other registers. Doorbell of each virtqueue must be mapped separately, pgoff is the index of the virtqueue. This allows userspace to map a subset of the doorbell which may be useful for the implementation of software assisted virtqueue (control vq) in the future. Signed-off-by: Jason Wang Link: https://lore.kernel.org/r/20200529080303.15449-5-jasowang@redhat.com Signed-off-by: Michael S. Tsirkin --- drivers/vhost/vdpa.c | 59 ++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 59 insertions(+) diff --git a/drivers/vhost/vdpa.c b/drivers/vhost/vdpa.c index f22bb316b8c4f..3d163ec6fb28d 100644 --- a/drivers/vhost/vdpa.c +++ b/drivers/vhost/vdpa.c @@ -15,6 +15,7 @@ #include #include #include +#include #include #include #include @@ -739,12 +740,70 @@ static int vhost_vdpa_release(struct inode *inode, struct file *filep) return 0; } +static vm_fault_t vhost_vdpa_fault(struct vm_fault *vmf) +{ + struct vhost_vdpa *v = vmf->vma->vm_file->private_data; + struct vdpa_device *vdpa = v->vdpa; + const struct vdpa_config_ops *ops = vdpa->config; + struct vdpa_notification_area notify; + struct vm_area_struct *vma = vmf->vma; + u16 index = vma->vm_pgoff; + + notify = ops->get_vq_notification(vdpa, index); + + vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); + if (remap_pfn_range(vma, vmf->address & PAGE_MASK, + notify.addr >> PAGE_SHIFT, PAGE_SIZE, + vma->vm_page_prot)) + return VM_FAULT_SIGBUS; + + return VM_FAULT_NOPAGE; +} + +static const struct vm_operations_struct vhost_vdpa_vm_ops = { + .fault = vhost_vdpa_fault, +}; + +static int vhost_vdpa_mmap(struct file *file, struct vm_area_struct *vma) +{ + struct vhost_vdpa *v = vma->vm_file->private_data; + struct vdpa_device *vdpa = v->vdpa; + const struct vdpa_config_ops *ops = vdpa->config; + struct vdpa_notification_area notify; + int index = vma->vm_pgoff; + + if (vma->vm_end - vma->vm_start != PAGE_SIZE) + return -EINVAL; + if ((vma->vm_flags & VM_SHARED) == 0) + return -EINVAL; + if (vma->vm_flags & VM_READ) + return -EINVAL; + if (index > 65535) + return -EINVAL; + if (!ops->get_vq_notification) + return -ENOTSUPP; + + /* To be safe and easily modelled by userspace, We only + * support the doorbell which sits on the page boundary and + * does not share the page with other registers. + */ + notify = ops->get_vq_notification(vdpa, index); + if (notify.addr & (PAGE_SIZE - 1)) + return -EINVAL; + if (vma->vm_end - vma->vm_start != notify.size) + return -ENOTSUPP; + + vma->vm_ops = &vhost_vdpa_vm_ops; + return 0; +} + static const struct file_operations vhost_vdpa_fops = { .owner = THIS_MODULE, .open = vhost_vdpa_open, .release = vhost_vdpa_release, .write_iter = vhost_vdpa_chr_write_iter, .unlocked_ioctl = vhost_vdpa_unlocked_ioctl, + .mmap = vhost_vdpa_mmap, .compat_ioctl = compat_ptr_ioctl, }; -- GitLab From 4b4e4867d92205158c524842f59b1c1caeb969fe Mon Sep 17 00:00:00 2001 From: "Michael S. Tsirkin" Date: Thu, 4 Jun 2020 14:47:29 -0400 Subject: [PATCH 1125/1971] vhost_vdpa: disable doorbell mapping for !MMU There could be ways to support doorbell mapping with !MMU, but things like pgprot_noncached are not universally supported. Fixable, but just disable this for now. Signed-off-by: Michael S. Tsirkin --- drivers/vhost/vdpa.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/drivers/vhost/vdpa.c b/drivers/vhost/vdpa.c index 3d163ec6fb28d..6ca7660ee6b55 100644 --- a/drivers/vhost/vdpa.c +++ b/drivers/vhost/vdpa.c @@ -740,6 +740,7 @@ static int vhost_vdpa_release(struct inode *inode, struct file *filep) return 0; } +#ifdef CONFIG_MMU static vm_fault_t vhost_vdpa_fault(struct vm_fault *vmf) { struct vhost_vdpa *v = vmf->vma->vm_file->private_data; @@ -796,6 +797,7 @@ static int vhost_vdpa_mmap(struct file *file, struct vm_area_struct *vma) vma->vm_ops = &vhost_vdpa_vm_ops; return 0; } +#endif /* CONFIG_MMU */ static const struct file_operations vhost_vdpa_fops = { .owner = THIS_MODULE, @@ -803,7 +805,9 @@ static const struct file_operations vhost_vdpa_fops = { .release = vhost_vdpa_release, .write_iter = vhost_vdpa_chr_write_iter, .unlocked_ioctl = vhost_vdpa_unlocked_ioctl, +#ifdef CONFIG_MMU .mmap = vhost_vdpa_mmap, +#endif /* CONFIG_MMU */ .compat_ioctl = compat_ptr_ioctl, }; -- GitLab From fb69c2c896fc8289b0d9e2c0791472e7cd398bca Mon Sep 17 00:00:00 2001 From: Alexander Duyck Date: Fri, 8 May 2020 10:40:06 -0700 Subject: [PATCH 1126/1971] virtio-balloon: Disable free page reporting if page poison reporting is not enabled We should disable free page reporting if page poisoning is enabled but we cannot report it via the balloon interface. This way we can avoid the possibility of corrupting guest memory. Normally the page poisoning feature should always be present when free page reporting is enabled on the hypervisor, however this allows us to correctly handle a case of the virtio-balloon device being possibly misconfigured. Fixes: 5d757c8d518d ("virtio-balloon: add support for providing free page reports to host") Cc: stable@vger.kernel.org Acked-by: David Hildenbrand Signed-off-by: Alexander Duyck Link: https://lore.kernel.org/r/20200508173732.17877.85060.stgit@localhost.localdomain Signed-off-by: Michael S. Tsirkin --- drivers/virtio/virtio_balloon.c | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c index 51086a5afdd41..1f157d2f49526 100644 --- a/drivers/virtio/virtio_balloon.c +++ b/drivers/virtio/virtio_balloon.c @@ -1107,11 +1107,18 @@ static int virtballoon_restore(struct virtio_device *vdev) static int virtballoon_validate(struct virtio_device *vdev) { - /* Tell the host whether we care about poisoned pages. */ + /* + * Inform the hypervisor that our pages are poisoned or + * initialized. If we cannot do that then we should disable + * page reporting as it could potentially change the contents + * of our free pages. + */ if (!want_init_on_free() && (IS_ENABLED(CONFIG_PAGE_POISONING_NO_SANITY) || !page_poisoning_enabled())) __virtio_clear_bit(vdev, VIRTIO_BALLOON_F_PAGE_POISON); + else if (!virtio_has_feature(vdev, VIRTIO_BALLOON_F_PAGE_POISON)) + __virtio_clear_bit(vdev, VIRTIO_BALLOON_F_REPORTING); __virtio_clear_bit(vdev, VIRTIO_F_IOMMU_PLATFORM); return 0; -- GitLab From b02989f37fc5e865ceeee9070907e4493b3a21e2 Mon Sep 17 00:00:00 2001 From: "Longpeng(Mike)" Date: Tue, 2 Jun 2020 15:04:59 +0800 Subject: [PATCH 1127/1971] crypto: virtio: Fix src/dst scatterlist calculation in __virtio_crypto_skcipher_do_req() The system will crash when the users insmod crypto/tcrypt.ko with mode=38 ( testing "cts(cbc(aes))" ). Usually the next entry of one sg will be @sg@ + 1, but if this sg element is part of a chained scatterlist, it could jump to the start of a new scatterlist array. Fix it by sg_next() on calculation of src/dst scatterlist. Fixes: dbaf0624ffa5 ("crypto: add virtio-crypto driver") Reported-by: LABBE Corentin Cc: Herbert Xu Cc: "Michael S. Tsirkin" Cc: Jason Wang Cc: "David S. Miller" Cc: virtualization@lists.linux-foundation.org Cc: linux-kernel@vger.kernel.org Cc: stable@vger.kernel.org Link: https://lore.kernel.org/r/20200123101000.GB24255@Red Signed-off-by: Gonglei Signed-off-by: Longpeng(Mike) Link: https://lore.kernel.org/r/20200602070501.2023-2-longpeng2@huawei.com Signed-off-by: Michael S. Tsirkin --- drivers/crypto/virtio/virtio_crypto_algs.c | 15 ++++++++++----- 1 file changed, 10 insertions(+), 5 deletions(-) diff --git a/drivers/crypto/virtio/virtio_crypto_algs.c b/drivers/crypto/virtio/virtio_crypto_algs.c index fd045e64972a5..5f82435630094 100644 --- a/drivers/crypto/virtio/virtio_crypto_algs.c +++ b/drivers/crypto/virtio/virtio_crypto_algs.c @@ -350,13 +350,18 @@ __virtio_crypto_skcipher_do_req(struct virtio_crypto_sym_request *vc_sym_req, int err; unsigned long flags; struct scatterlist outhdr, iv_sg, status_sg, **sgs; - int i; u64 dst_len; unsigned int num_out = 0, num_in = 0; int sg_total; uint8_t *iv; + struct scatterlist *sg; src_nents = sg_nents_for_len(req->src, req->cryptlen); + if (src_nents < 0) { + pr_err("Invalid number of src SG.\n"); + return src_nents; + } + dst_nents = sg_nents(req->dst); pr_debug("virtio_crypto: Number of sgs (src_nents: %d, dst_nents: %d)\n", @@ -442,12 +447,12 @@ __virtio_crypto_skcipher_do_req(struct virtio_crypto_sym_request *vc_sym_req, vc_sym_req->iv = iv; /* Source data */ - for (i = 0; i < src_nents; i++) - sgs[num_out++] = &req->src[i]; + for (sg = req->src; src_nents; sg = sg_next(sg), src_nents--) + sgs[num_out++] = sg; /* Destination data */ - for (i = 0; i < dst_nents; i++) - sgs[num_out + num_in++] = &req->dst[i]; + for (sg = req->dst; sg; sg = sg_next(sg)) + sgs[num_out + num_in++] = sg; /* Status */ sg_init_one(&status_sg, &vc_req->status, sizeof(vc_req->status)); -- GitLab From 8c855f0720ff006d75d0a2512c7f6c4f60ff60ee Mon Sep 17 00:00:00 2001 From: "Longpeng(Mike)" Date: Tue, 2 Jun 2020 15:05:00 +0800 Subject: [PATCH 1128/1971] crypto: virtio: Fix use-after-free in virtio_crypto_skcipher_finalize_req() The system'll crash when the users insmod crypto/tcrypto.ko with mode=155 ( testing "authenc(hmac(sha1),cbc(aes))" ). It's caused by reuse the memory of request structure. In crypto_authenc_init_tfm(), the reqsize is set to: [PART 1] sizeof(authenc_request_ctx) + [PART 2] ictx->reqoff + [PART 3] MAX(ahash part, skcipher part) and the 'PART 3' is used by both ahash and skcipher in turn. When the virtio_crypto driver finish skcipher req, it'll call ->complete callback(in crypto_finalize_skcipher_request) and then free its resources whose pointers are recorded in 'skcipher parts'. However, the ->complete is 'crypto_authenc_encrypt_done' in this case, it will use the 'ahash part' of the request and change its content, so virtio_crypto driver will get the wrong pointer after ->complete finish and mistakenly free some other's memory. So the system will crash when these memory will be used again. The resources which need to be cleaned up are not used any more. But the pointers of these resources may be changed in the function "crypto_finalize_skcipher_request". Thus release specific resources before calling this function. Fixes: dbaf0624ffa5 ("crypto: add virtio-crypto driver") Reported-by: LABBE Corentin Cc: Gonglei Cc: Herbert Xu Cc: "Michael S. Tsirkin" Cc: Jason Wang Cc: "David S. Miller" Cc: virtualization@lists.linux-foundation.org Cc: linux-kernel@vger.kernel.org Cc: stable@vger.kernel.org Link: https://lore.kernel.org/r/20200123101000.GB24255@Red Acked-by: Gonglei Signed-off-by: Longpeng(Mike) Link: https://lore.kernel.org/r/20200602070501.2023-3-longpeng2@huawei.com Signed-off-by: Michael S. Tsirkin --- drivers/crypto/virtio/virtio_crypto_algs.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/drivers/crypto/virtio/virtio_crypto_algs.c b/drivers/crypto/virtio/virtio_crypto_algs.c index 5f82435630094..52261b6c247ee 100644 --- a/drivers/crypto/virtio/virtio_crypto_algs.c +++ b/drivers/crypto/virtio/virtio_crypto_algs.c @@ -582,10 +582,11 @@ static void virtio_crypto_skcipher_finalize_req( scatterwalk_map_and_copy(req->iv, req->dst, req->cryptlen - AES_BLOCK_SIZE, AES_BLOCK_SIZE, 0); - crypto_finalize_skcipher_request(vc_sym_req->base.dataq->engine, - req, err); kzfree(vc_sym_req->iv); virtcrypto_clear_request(&vc_sym_req->base); + + crypto_finalize_skcipher_request(vc_sym_req->base.dataq->engine, + req, err); } static struct virtio_crypto_algo virtio_crypto_algs[] = { { -- GitLab From d90ca42012db2863a9a30b564a2ace6016594bda Mon Sep 17 00:00:00 2001 From: "Longpeng(Mike)" Date: Tue, 2 Jun 2020 15:05:01 +0800 Subject: [PATCH 1129/1971] crypto: virtio: Fix dest length calculation in __virtio_crypto_skcipher_do_req() The src/dst length is not aligned with AES_BLOCK_SIZE(which is 16) in some testcases in tcrypto.ko. For example, the src/dst length of one of cts(cbc(aes))'s testcase is 17, the crypto_virtio driver will set @src_data_len=16 but @dst_data_len=17 in this case and get a wrong at then end. SRC: pp pp pp pp pp pp pp pp pp pp pp pp pp pp pp pp pp (17 bytes) EXP: cc cc cc cc cc cc cc cc cc cc cc cc cc cc cc cc pp (17 bytes) DST: cc cc cc cc cc cc cc cc cc cc cc cc cc cc cc cc 00 (pollute the last bytes) (pp: plaintext cc:ciphertext) Fix this issue by limit the length of dest buffer. Fixes: dbaf0624ffa5 ("crypto: add virtio-crypto driver") Cc: Gonglei Cc: Herbert Xu Cc: "Michael S. Tsirkin" Cc: Jason Wang Cc: "David S. Miller" Cc: virtualization@lists.linux-foundation.org Cc: linux-kernel@vger.kernel.org Cc: stable@vger.kernel.org Signed-off-by: Longpeng(Mike) Link: https://lore.kernel.org/r/20200602070501.2023-4-longpeng2@huawei.com Signed-off-by: Michael S. Tsirkin --- drivers/crypto/virtio/virtio_crypto_algs.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/crypto/virtio/virtio_crypto_algs.c b/drivers/crypto/virtio/virtio_crypto_algs.c index 52261b6c247ee..cb8a6ea2a4bc4 100644 --- a/drivers/crypto/virtio/virtio_crypto_algs.c +++ b/drivers/crypto/virtio/virtio_crypto_algs.c @@ -407,6 +407,7 @@ __virtio_crypto_skcipher_do_req(struct virtio_crypto_sym_request *vc_sym_req, goto free; } + dst_len = min_t(unsigned int, req->cryptlen, dst_len); pr_debug("virtio_crypto: src_len: %u, dst_len: %llu\n", req->cryptlen, dst_len); -- GitLab From 002ef18eff43708be1d914cb11dbdd84fb149474 Mon Sep 17 00:00:00 2001 From: Guennadi Liakhovetski Date: Wed, 27 May 2020 20:05:38 +0200 Subject: [PATCH 1130/1971] vhost: (cosmetic) remove a superfluous variable initialisation Even the compiler is able to figure out that in this case the initialisation is superfluous. Signed-off-by: Guennadi Liakhovetski Link: https://lore.kernel.org/r/20200527180541.5570-3-guennadi.liakhovetski@linux.intel.com Signed-off-by: Michael S. Tsirkin --- drivers/vhost/vhost.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c index a40d16bdebb5e..534d1267b761d 100644 --- a/drivers/vhost/vhost.c +++ b/drivers/vhost/vhost.c @@ -920,7 +920,7 @@ static inline void __user *__vhost_get_user(struct vhost_virtqueue *vq, #define vhost_put_user(vq, x, ptr) \ ({ \ - int ret = -EFAULT; \ + int ret; \ if (!vq->iotlb) { \ ret = __put_user(x, ptr); \ } else { \ -- GitLab From 7dd793f37e2ab4ca6585df8cfb1a1fcf44f61248 Mon Sep 17 00:00:00 2001 From: Zhu Lingshan Date: Tue, 12 May 2020 16:00:44 +0800 Subject: [PATCH 1131/1971] ifcvf: move IRQ request/free to status change handlers This commit move IRQ request and free operations from probe() to VIRTIO status change handler to comply with VIRTIO spec. VIRTIO spec 1.1, section 2.1.2 Device Requirements: Device Status Field The device MUST NOT consume buffers or send any used buffer notifications to the driver before DRIVER_OK. Signed-off-by: Zhu Lingshan Link: https://lore.kernel.org/r/1589270444-3669-1-git-send-email-lingshan.zhu@intel.com Signed-off-by: Michael S. Tsirkin Acked-by: Jason Wang --- drivers/vdpa/ifcvf/ifcvf_main.c | 120 +++++++++++++++++++------------- 1 file changed, 73 insertions(+), 47 deletions(-) diff --git a/drivers/vdpa/ifcvf/ifcvf_main.c b/drivers/vdpa/ifcvf/ifcvf_main.c index abf6a061cab65..d529ed681fe64 100644 --- a/drivers/vdpa/ifcvf/ifcvf_main.c +++ b/drivers/vdpa/ifcvf/ifcvf_main.c @@ -28,6 +28,60 @@ static irqreturn_t ifcvf_intr_handler(int irq, void *arg) return IRQ_HANDLED; } +static void ifcvf_free_irq_vectors(void *data) +{ + pci_free_irq_vectors(data); +} + +static void ifcvf_free_irq(struct ifcvf_adapter *adapter, int queues) +{ + struct pci_dev *pdev = adapter->pdev; + struct ifcvf_hw *vf = &adapter->vf; + int i; + + + for (i = 0; i < queues; i++) + devm_free_irq(&pdev->dev, vf->vring[i].irq, &vf->vring[i]); + + ifcvf_free_irq_vectors(pdev); +} + +static int ifcvf_request_irq(struct ifcvf_adapter *adapter) +{ + struct pci_dev *pdev = adapter->pdev; + struct ifcvf_hw *vf = &adapter->vf; + int vector, i, ret, irq; + + ret = pci_alloc_irq_vectors(pdev, IFCVF_MAX_INTR, + IFCVF_MAX_INTR, PCI_IRQ_MSIX); + if (ret < 0) { + IFCVF_ERR(pdev, "Failed to alloc IRQ vectors\n"); + return ret; + } + + for (i = 0; i < IFCVF_MAX_QUEUE_PAIRS * 2; i++) { + snprintf(vf->vring[i].msix_name, 256, "ifcvf[%s]-%d\n", + pci_name(pdev), i); + vector = i + IFCVF_MSI_QUEUE_OFF; + irq = pci_irq_vector(pdev, vector); + ret = devm_request_irq(&pdev->dev, irq, + ifcvf_intr_handler, 0, + vf->vring[i].msix_name, + &vf->vring[i]); + if (ret) { + IFCVF_ERR(pdev, + "Failed to request irq for vq %d\n", i); + ifcvf_free_irq(adapter, i); + + return ret; + } + + vf->vring[i].irq = irq; + } + + return 0; +} + static int ifcvf_start_datapath(void *private) { struct ifcvf_hw *vf = ifcvf_private_to_vf(private); @@ -118,17 +172,34 @@ static void ifcvf_vdpa_set_status(struct vdpa_device *vdpa_dev, u8 status) { struct ifcvf_adapter *adapter; struct ifcvf_hw *vf; + u8 status_old; + int ret; vf = vdpa_to_vf(vdpa_dev); adapter = dev_get_drvdata(vdpa_dev->dev.parent); + status_old = ifcvf_get_status(vf); - if (status == 0) { + if ((status_old & VIRTIO_CONFIG_S_DRIVER_OK) && + !(status & VIRTIO_CONFIG_S_DRIVER_OK)) { ifcvf_stop_datapath(adapter); + ifcvf_free_irq(adapter, IFCVF_MAX_QUEUE_PAIRS * 2); + } + + if (status == 0) { ifcvf_reset_vring(adapter); return; } - if (status & VIRTIO_CONFIG_S_DRIVER_OK) { + if ((status & VIRTIO_CONFIG_S_DRIVER_OK) && + !(status_old & VIRTIO_CONFIG_S_DRIVER_OK)) { + ret = ifcvf_request_irq(adapter); + if (ret) { + status = ifcvf_get_status(vf); + status |= VIRTIO_CONFIG_S_FAILED; + ifcvf_set_status(vf, status); + return; + } + if (ifcvf_start_datapath(adapter) < 0) IFCVF_ERR(adapter->pdev, "Failed to set ifcvf vdpa status %u\n", @@ -284,38 +355,6 @@ static const struct vdpa_config_ops ifc_vdpa_ops = { .set_config_cb = ifcvf_vdpa_set_config_cb, }; -static int ifcvf_request_irq(struct ifcvf_adapter *adapter) -{ - struct pci_dev *pdev = adapter->pdev; - struct ifcvf_hw *vf = &adapter->vf; - int vector, i, ret, irq; - - - for (i = 0; i < IFCVF_MAX_QUEUE_PAIRS * 2; i++) { - snprintf(vf->vring[i].msix_name, 256, "ifcvf[%s]-%d\n", - pci_name(pdev), i); - vector = i + IFCVF_MSI_QUEUE_OFF; - irq = pci_irq_vector(pdev, vector); - ret = devm_request_irq(&pdev->dev, irq, - ifcvf_intr_handler, 0, - vf->vring[i].msix_name, - &vf->vring[i]); - if (ret) { - IFCVF_ERR(pdev, - "Failed to request irq for vq %d\n", i); - return ret; - } - vf->vring[i].irq = irq; - } - - return 0; -} - -static void ifcvf_free_irq_vectors(void *data) -{ - pci_free_irq_vectors(data); -} - static int ifcvf_probe(struct pci_dev *pdev, const struct pci_device_id *id) { struct device *dev = &pdev->dev; @@ -349,13 +388,6 @@ static int ifcvf_probe(struct pci_dev *pdev, const struct pci_device_id *id) return ret; } - ret = pci_alloc_irq_vectors(pdev, IFCVF_MAX_INTR, - IFCVF_MAX_INTR, PCI_IRQ_MSIX); - if (ret < 0) { - IFCVF_ERR(pdev, "Failed to alloc irq vectors\n"); - return ret; - } - ret = devm_add_action_or_reset(dev, ifcvf_free_irq_vectors, pdev); if (ret) { IFCVF_ERR(pdev, @@ -379,12 +411,6 @@ static int ifcvf_probe(struct pci_dev *pdev, const struct pci_device_id *id) adapter->pdev = pdev; adapter->vdpa.dma_dev = &pdev->dev; - ret = ifcvf_request_irq(adapter); - if (ret) { - IFCVF_ERR(pdev, "Failed to request MSI-X irq\n"); - goto err; - } - ret = ifcvf_init_hw(vf, pdev); if (ret) { IFCVF_ERR(pdev, "Failed to init IFCVF hw\n"); -- GitLab From 18e643cd6c4dcdb09510a4bf739438e7cd1ae1f2 Mon Sep 17 00:00:00 2001 From: Samuel Zou Date: Sat, 9 May 2020 10:20:02 +0800 Subject: [PATCH 1132/1971] vdpasim: Fix some coccinelle warnings Fix below warnings reported by coccicheck: drivers/vdpa/vdpa_sim/vdpa_sim.c:104:1-10: WARNING: Assignment of 0/1 to bool variable drivers/vdpa/vdpa_sim/vdpa_sim.c:164:7-11: WARNING: Unsigned expression compared with zero: read <= 0 drivers/vdpa/vdpa_sim/vdpa_sim.c:169:7-12: WARNING: Unsigned expression compared with zero: write <= 0 1. The 'ready' variable in vdpasim_virtqueue struct is bool type. It is better to initialize vq->ready to false 2. Modify 'read' and 'write' variables type from size_t to ssize_t. And preserve the reverse christmas tree ordering of local variables. Fixes: 2c53d0f64c06 ("vdpasim: vDPA device simulator") Reported-by: Hulk Robot Signed-off-by: Samuel Zou Link: https://lore.kernel.org/r/1588990802-28451-1-git-send-email-zou_wei@huawei.com Signed-off-by: Michael S. Tsirkin --- drivers/vdpa/vdpa_sim/vdpa_sim.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/drivers/vdpa/vdpa_sim/vdpa_sim.c b/drivers/vdpa/vdpa_sim/vdpa_sim.c index 01c456f7c1f7b..c7334cc65bb25 100644 --- a/drivers/vdpa/vdpa_sim/vdpa_sim.c +++ b/drivers/vdpa/vdpa_sim/vdpa_sim.c @@ -101,7 +101,7 @@ static void vdpasim_queue_ready(struct vdpasim *vdpasim, unsigned int idx) static void vdpasim_vq_reset(struct vdpasim_virtqueue *vq) { - vq->ready = 0; + vq->ready = false; vq->desc_addr = 0; vq->driver_addr = 0; vq->device_addr = 0; @@ -131,9 +131,10 @@ static void vdpasim_work(struct work_struct *work) vdpasim, work); struct vdpasim_virtqueue *txq = &vdpasim->vqs[1]; struct vdpasim_virtqueue *rxq = &vdpasim->vqs[0]; - size_t read, write, total_write; - int err; + ssize_t read, write; + size_t total_write; int pkts = 0; + int err; spin_lock(&vdpasim->lock); -- GitLab From 5f1f79bbc9e26fa9412fa9522f957bb8f030c442 Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Thu, 7 May 2020 16:01:25 +0200 Subject: [PATCH 1133/1971] virtio-mem: Paravirtualized memory hotplug Each virtio-mem device owns exactly one memory region. It is responsible for adding/removing memory from that memory region on request. When the device driver starts up, the requested amount of memory is queried and then plugged to Linux. On request, further memory can be plugged or unplugged. This patch only implements the plugging part. On x86-64, memory can currently be plugged in 4MB ("subblock") granularity. When required, a new memory block will be added (e.g., usually 128MB on x86-64) in order to plug more subblocks. Only x86-64 was tested for now. The online_page callback is used to keep unplugged subblocks offline when onlining memory - similar to the Hyper-V balloon driver. Unplugged pages are marked PG_offline, to tell dump tools (e.g., makedumpfile) to skip them. User space is usually responsible for onlining the added memory. The memory hotplug notifier is used to synchronize virtio-mem activity against memory onlining/offlining. Each virtio-mem device can belong to a NUMA node, which allows us to easily add/remove small chunks of memory to/from a specific NUMA node by using multiple virtio-mem devices. Something that works even when the guest has no idea about the NUMA topology. One way to view virtio-mem is as a "resizable DIMM" or a DIMM with many "sub-DIMMS". This patch directly introduces the basic infrastructure to implement memory unplug. Especially the memory block states and subblock bitmaps will be heavily used there. Notes: - In case memory is to be onlined by user space, we limit the amount of offline memory blocks, to not run out of memory. This is esp. an issue if memory is added faster than it is getting onlined. - Suspend/Hibernate is not supported due to the way virtio-mem devices behave. Limited support might be possible in the future. - Reloading the device driver is not supported. Reviewed-by: Pankaj Gupta Tested-by: Pankaj Gupta Cc: "Michael S. Tsirkin" Cc: Jason Wang Cc: Oscar Salvador Cc: Michal Hocko Cc: Igor Mammedov Cc: Dave Young Cc: Andrew Morton Cc: Dan Williams Cc: Pavel Tatashin Cc: Stefan Hajnoczi Cc: Vlastimil Babka Cc: "Rafael J. Wysocki" Cc: Len Brown Cc: linux-acpi@vger.kernel.org Signed-off-by: David Hildenbrand Link: https://lore.kernel.org/r/20200507140139.17083-2-david@redhat.com Signed-off-by: Michael S. Tsirkin --- drivers/virtio/Kconfig | 16 + drivers/virtio/Makefile | 1 + drivers/virtio/virtio_mem.c | 1533 +++++++++++++++++++++++++++++++ include/uapi/linux/virtio_ids.h | 1 + include/uapi/linux/virtio_mem.h | 200 ++++ 5 files changed, 1751 insertions(+) create mode 100644 drivers/virtio/virtio_mem.c create mode 100644 include/uapi/linux/virtio_mem.h diff --git a/drivers/virtio/Kconfig b/drivers/virtio/Kconfig index 69a32dfc318a5..d6dde7d2cf762 100644 --- a/drivers/virtio/Kconfig +++ b/drivers/virtio/Kconfig @@ -78,6 +78,22 @@ config VIRTIO_BALLOON If unsure, say M. +config VIRTIO_MEM + tristate "Virtio mem driver" + default m + depends on X86_64 + depends on VIRTIO + depends on MEMORY_HOTPLUG_SPARSE + depends on MEMORY_HOTREMOVE + help + This driver provides access to virtio-mem paravirtualized memory + devices, allowing to hotplug and hotunplug memory. + + This driver was only tested under x86-64, but should theoretically + work on all architectures that support memory hotplug and hotremove. + + If unsure, say M. + config VIRTIO_INPUT tristate "Virtio input driver" depends on VIRTIO diff --git a/drivers/virtio/Makefile b/drivers/virtio/Makefile index 29a1386ecc03c..4d993791f2d72 100644 --- a/drivers/virtio/Makefile +++ b/drivers/virtio/Makefile @@ -7,3 +7,4 @@ virtio_pci-$(CONFIG_VIRTIO_PCI_LEGACY) += virtio_pci_legacy.o obj-$(CONFIG_VIRTIO_BALLOON) += virtio_balloon.o obj-$(CONFIG_VIRTIO_INPUT) += virtio_input.o obj-$(CONFIG_VIRTIO_VDPA) += virtio_vdpa.o +obj-$(CONFIG_VIRTIO_MEM) += virtio_mem.o diff --git a/drivers/virtio/virtio_mem.c b/drivers/virtio/virtio_mem.c new file mode 100644 index 0000000000000..5d1dcaa6fc42a --- /dev/null +++ b/drivers/virtio/virtio_mem.c @@ -0,0 +1,1533 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Virtio-mem device driver. + * + * Copyright Red Hat, Inc. 2020 + * + * Author(s): David Hildenbrand + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +enum virtio_mem_mb_state { + /* Unplugged, not added to Linux. Can be reused later. */ + VIRTIO_MEM_MB_STATE_UNUSED = 0, + /* (Partially) plugged, not added to Linux. Error on add_memory(). */ + VIRTIO_MEM_MB_STATE_PLUGGED, + /* Fully plugged, fully added to Linux, offline. */ + VIRTIO_MEM_MB_STATE_OFFLINE, + /* Partially plugged, fully added to Linux, offline. */ + VIRTIO_MEM_MB_STATE_OFFLINE_PARTIAL, + /* Fully plugged, fully added to Linux, online (!ZONE_MOVABLE). */ + VIRTIO_MEM_MB_STATE_ONLINE, + /* Partially plugged, fully added to Linux, online (!ZONE_MOVABLE). */ + VIRTIO_MEM_MB_STATE_ONLINE_PARTIAL, + /* + * Fully plugged, fully added to Linux, online (ZONE_MOVABLE). + * We are not allowed to allocate (unplug) parts of this block that + * are not movable (similar to gigantic pages). We will never allow + * to online OFFLINE_PARTIAL to ZONE_MOVABLE (as they would contain + * unmovable parts). + */ + VIRTIO_MEM_MB_STATE_ONLINE_MOVABLE, + VIRTIO_MEM_MB_STATE_COUNT +}; + +struct virtio_mem { + struct virtio_device *vdev; + + /* We might first have to unplug all memory when starting up. */ + bool unplug_all_required; + + /* Workqueue that processes the plug/unplug requests. */ + struct work_struct wq; + atomic_t config_changed; + + /* Virtqueue for guest->host requests. */ + struct virtqueue *vq; + + /* Wait for a host response to a guest request. */ + wait_queue_head_t host_resp; + + /* Space for one guest request and the host response. */ + struct virtio_mem_req req; + struct virtio_mem_resp resp; + + /* The current size of the device. */ + uint64_t plugged_size; + /* The requested size of the device. */ + uint64_t requested_size; + + /* The device block size (for communicating with the device). */ + uint32_t device_block_size; + /* Physical start address of the memory region. */ + uint64_t addr; + /* Maximum region size in bytes. */ + uint64_t region_size; + + /* The subblock size. */ + uint32_t subblock_size; + /* The number of subblocks per memory block. */ + uint32_t nb_sb_per_mb; + + /* Id of the first memory block of this device. */ + unsigned long first_mb_id; + /* Id of the last memory block of this device. */ + unsigned long last_mb_id; + /* Id of the last usable memory block of this device. */ + unsigned long last_usable_mb_id; + /* Id of the next memory bock to prepare when needed. */ + unsigned long next_mb_id; + + /* Summary of all memory block states. */ + unsigned long nb_mb_state[VIRTIO_MEM_MB_STATE_COUNT]; +#define VIRTIO_MEM_NB_OFFLINE_THRESHOLD 10 + + /* + * One byte state per memory block. + * + * Allocated via vmalloc(). When preparing new blocks, resized + * (alloc+copy+free) when needed (crossing pages with the next mb). + * (when crossing pages). + * + * With 128MB memory blocks, we have states for 512GB of memory in one + * page. + */ + uint8_t *mb_state; + + /* + * $nb_sb_per_mb bit per memory block. Handled similar to mb_state. + * + * With 4MB subblocks, we manage 128GB of memory in one page. + */ + unsigned long *sb_bitmap; + + /* + * Mutex that protects the nb_mb_state, mb_state, and sb_bitmap. + * + * When this lock is held the pointers can't change, ONLINE and + * OFFLINE blocks can't change the state and no subblocks will get + * plugged. + */ + struct mutex hotplug_mutex; + bool hotplug_active; + + /* An error occurred we cannot handle - stop processing requests. */ + bool broken; + + /* The driver is being removed. */ + spinlock_t removal_lock; + bool removing; + + /* Timer for retrying to plug/unplug memory. */ + struct hrtimer retry_timer; +#define VIRTIO_MEM_RETRY_TIMER_MS 30000 + + /* Memory notifier (online/offline events). */ + struct notifier_block memory_notifier; + + /* Next device in the list of virtio-mem devices. */ + struct list_head next; +}; + +/* + * We have to share a single online_page callback among all virtio-mem + * devices. We use RCU to iterate the list in the callback. + */ +static DEFINE_MUTEX(virtio_mem_mutex); +static LIST_HEAD(virtio_mem_devices); + +static void virtio_mem_online_page_cb(struct page *page, unsigned int order); + +/* + * Register a virtio-mem device so it will be considered for the online_page + * callback. + */ +static int register_virtio_mem_device(struct virtio_mem *vm) +{ + int rc = 0; + + /* First device registers the callback. */ + mutex_lock(&virtio_mem_mutex); + if (list_empty(&virtio_mem_devices)) + rc = set_online_page_callback(&virtio_mem_online_page_cb); + if (!rc) + list_add_rcu(&vm->next, &virtio_mem_devices); + mutex_unlock(&virtio_mem_mutex); + + return rc; +} + +/* + * Unregister a virtio-mem device so it will no longer be considered for the + * online_page callback. + */ +static void unregister_virtio_mem_device(struct virtio_mem *vm) +{ + /* Last device unregisters the callback. */ + mutex_lock(&virtio_mem_mutex); + list_del_rcu(&vm->next); + if (list_empty(&virtio_mem_devices)) + restore_online_page_callback(&virtio_mem_online_page_cb); + mutex_unlock(&virtio_mem_mutex); + + synchronize_rcu(); +} + +/* + * Calculate the memory block id of a given address. + */ +static unsigned long virtio_mem_phys_to_mb_id(unsigned long addr) +{ + return addr / memory_block_size_bytes(); +} + +/* + * Calculate the physical start address of a given memory block id. + */ +static unsigned long virtio_mem_mb_id_to_phys(unsigned long mb_id) +{ + return mb_id * memory_block_size_bytes(); +} + +/* + * Calculate the subblock id of a given address. + */ +static unsigned long virtio_mem_phys_to_sb_id(struct virtio_mem *vm, + unsigned long addr) +{ + const unsigned long mb_id = virtio_mem_phys_to_mb_id(addr); + const unsigned long mb_addr = virtio_mem_mb_id_to_phys(mb_id); + + return (addr - mb_addr) / vm->subblock_size; +} + +/* + * Set the state of a memory block, taking care of the state counter. + */ +static void virtio_mem_mb_set_state(struct virtio_mem *vm, unsigned long mb_id, + enum virtio_mem_mb_state state) +{ + const unsigned long idx = mb_id - vm->first_mb_id; + enum virtio_mem_mb_state old_state; + + old_state = vm->mb_state[idx]; + vm->mb_state[idx] = state; + + BUG_ON(vm->nb_mb_state[old_state] == 0); + vm->nb_mb_state[old_state]--; + vm->nb_mb_state[state]++; +} + +/* + * Get the state of a memory block. + */ +static enum virtio_mem_mb_state virtio_mem_mb_get_state(struct virtio_mem *vm, + unsigned long mb_id) +{ + const unsigned long idx = mb_id - vm->first_mb_id; + + return vm->mb_state[idx]; +} + +/* + * Prepare the state array for the next memory block. + */ +static int virtio_mem_mb_state_prepare_next_mb(struct virtio_mem *vm) +{ + unsigned long old_bytes = vm->next_mb_id - vm->first_mb_id + 1; + unsigned long new_bytes = vm->next_mb_id - vm->first_mb_id + 2; + int old_pages = PFN_UP(old_bytes); + int new_pages = PFN_UP(new_bytes); + uint8_t *new_mb_state; + + if (vm->mb_state && old_pages == new_pages) + return 0; + + new_mb_state = vzalloc(new_pages * PAGE_SIZE); + if (!new_mb_state) + return -ENOMEM; + + mutex_lock(&vm->hotplug_mutex); + if (vm->mb_state) + memcpy(new_mb_state, vm->mb_state, old_pages * PAGE_SIZE); + vfree(vm->mb_state); + vm->mb_state = new_mb_state; + mutex_unlock(&vm->hotplug_mutex); + + return 0; +} + +#define virtio_mem_for_each_mb_state(_vm, _mb_id, _state) \ + for (_mb_id = _vm->first_mb_id; \ + _mb_id < _vm->next_mb_id && _vm->nb_mb_state[_state]; \ + _mb_id++) \ + if (virtio_mem_mb_get_state(_vm, _mb_id) == _state) + +/* + * Mark all selected subblocks plugged. + * + * Will not modify the state of the memory block. + */ +static void virtio_mem_mb_set_sb_plugged(struct virtio_mem *vm, + unsigned long mb_id, int sb_id, + int count) +{ + const int bit = (mb_id - vm->first_mb_id) * vm->nb_sb_per_mb + sb_id; + + __bitmap_set(vm->sb_bitmap, bit, count); +} + +/* + * Mark all selected subblocks unplugged. + * + * Will not modify the state of the memory block. + */ +static void virtio_mem_mb_set_sb_unplugged(struct virtio_mem *vm, + unsigned long mb_id, int sb_id, + int count) +{ + const int bit = (mb_id - vm->first_mb_id) * vm->nb_sb_per_mb + sb_id; + + __bitmap_clear(vm->sb_bitmap, bit, count); +} + +/* + * Test if all selected subblocks are plugged. + */ +static bool virtio_mem_mb_test_sb_plugged(struct virtio_mem *vm, + unsigned long mb_id, int sb_id, + int count) +{ + const int bit = (mb_id - vm->first_mb_id) * vm->nb_sb_per_mb + sb_id; + + if (count == 1) + return test_bit(bit, vm->sb_bitmap); + + /* TODO: Helper similar to bitmap_set() */ + return find_next_zero_bit(vm->sb_bitmap, bit + count, bit) >= + bit + count; +} + +/* + * Find the first plugged subblock. Returns vm->nb_sb_per_mb in case there is + * none. + */ +static int virtio_mem_mb_first_plugged_sb(struct virtio_mem *vm, + unsigned long mb_id) +{ + const int bit = (mb_id - vm->first_mb_id) * vm->nb_sb_per_mb; + + return find_next_bit(vm->sb_bitmap, bit + vm->nb_sb_per_mb, bit) - bit; +} + +/* + * Find the first unplugged subblock. Returns vm->nb_sb_per_mb in case there is + * none. + */ +static int virtio_mem_mb_first_unplugged_sb(struct virtio_mem *vm, + unsigned long mb_id) +{ + const int bit = (mb_id - vm->first_mb_id) * vm->nb_sb_per_mb; + + return find_next_zero_bit(vm->sb_bitmap, bit + vm->nb_sb_per_mb, bit) - + bit; +} + +/* + * Prepare the subblock bitmap for the next memory block. + */ +static int virtio_mem_sb_bitmap_prepare_next_mb(struct virtio_mem *vm) +{ + const unsigned long old_nb_mb = vm->next_mb_id - vm->first_mb_id; + const unsigned long old_nb_bits = old_nb_mb * vm->nb_sb_per_mb; + const unsigned long new_nb_bits = (old_nb_mb + 1) * vm->nb_sb_per_mb; + int old_pages = PFN_UP(BITS_TO_LONGS(old_nb_bits) * sizeof(long)); + int new_pages = PFN_UP(BITS_TO_LONGS(new_nb_bits) * sizeof(long)); + unsigned long *new_sb_bitmap, *old_sb_bitmap; + + if (vm->sb_bitmap && old_pages == new_pages) + return 0; + + new_sb_bitmap = vzalloc(new_pages * PAGE_SIZE); + if (!new_sb_bitmap) + return -ENOMEM; + + mutex_lock(&vm->hotplug_mutex); + if (new_sb_bitmap) + memcpy(new_sb_bitmap, vm->sb_bitmap, old_pages * PAGE_SIZE); + + old_sb_bitmap = vm->sb_bitmap; + vm->sb_bitmap = new_sb_bitmap; + mutex_unlock(&vm->hotplug_mutex); + + vfree(old_sb_bitmap); + return 0; +} + +/* + * Try to add a memory block to Linux. This will usually only fail + * if out of memory. + * + * Must not be called with the vm->hotplug_mutex held (possible deadlock with + * onlining code). + * + * Will not modify the state of the memory block. + */ +static int virtio_mem_mb_add(struct virtio_mem *vm, unsigned long mb_id) +{ + const uint64_t addr = virtio_mem_mb_id_to_phys(mb_id); + int nid = memory_add_physaddr_to_nid(addr); + + dev_dbg(&vm->vdev->dev, "adding memory block: %lu\n", mb_id); + return add_memory(nid, addr, memory_block_size_bytes()); +} + +/* + * Try to remove a memory block from Linux. Will only fail if the memory block + * is not offline. + * + * Must not be called with the vm->hotplug_mutex held (possible deadlock with + * onlining code). + * + * Will not modify the state of the memory block. + */ +static int virtio_mem_mb_remove(struct virtio_mem *vm, unsigned long mb_id) +{ + const uint64_t addr = virtio_mem_mb_id_to_phys(mb_id); + int nid = memory_add_physaddr_to_nid(addr); + + dev_dbg(&vm->vdev->dev, "removing memory block: %lu\n", mb_id); + return remove_memory(nid, addr, memory_block_size_bytes()); +} + +/* + * Trigger the workqueue so the device can perform its magic. + */ +static void virtio_mem_retry(struct virtio_mem *vm) +{ + unsigned long flags; + + spin_lock_irqsave(&vm->removal_lock, flags); + if (!vm->removing) + queue_work(system_freezable_wq, &vm->wq); + spin_unlock_irqrestore(&vm->removal_lock, flags); +} + +/* + * Test if a virtio-mem device overlaps with the given range. Can be called + * from (notifier) callbacks lockless. + */ +static bool virtio_mem_overlaps_range(struct virtio_mem *vm, + unsigned long start, unsigned long size) +{ + unsigned long dev_start = virtio_mem_mb_id_to_phys(vm->first_mb_id); + unsigned long dev_end = virtio_mem_mb_id_to_phys(vm->last_mb_id) + + memory_block_size_bytes(); + + return start < dev_end && dev_start < start + size; +} + +/* + * Test if a virtio-mem device owns a memory block. Can be called from + * (notifier) callbacks lockless. + */ +static bool virtio_mem_owned_mb(struct virtio_mem *vm, unsigned long mb_id) +{ + return mb_id >= vm->first_mb_id && mb_id <= vm->last_mb_id; +} + +static int virtio_mem_notify_going_online(struct virtio_mem *vm, + unsigned long mb_id, + enum zone_type zone) +{ + switch (virtio_mem_mb_get_state(vm, mb_id)) { + case VIRTIO_MEM_MB_STATE_OFFLINE_PARTIAL: + /* + * We won't allow to online a partially plugged memory block + * to the MOVABLE zone - it would contain unmovable parts. + */ + if (zone == ZONE_MOVABLE) { + dev_warn_ratelimited(&vm->vdev->dev, + "memory block has holes, MOVABLE not supported\n"); + return NOTIFY_BAD; + } + return NOTIFY_OK; + case VIRTIO_MEM_MB_STATE_OFFLINE: + return NOTIFY_OK; + default: + break; + } + dev_warn_ratelimited(&vm->vdev->dev, + "memory block onlining denied\n"); + return NOTIFY_BAD; +} + +static void virtio_mem_notify_offline(struct virtio_mem *vm, + unsigned long mb_id) +{ + switch (virtio_mem_mb_get_state(vm, mb_id)) { + case VIRTIO_MEM_MB_STATE_ONLINE_PARTIAL: + virtio_mem_mb_set_state(vm, mb_id, + VIRTIO_MEM_MB_STATE_OFFLINE_PARTIAL); + break; + case VIRTIO_MEM_MB_STATE_ONLINE: + case VIRTIO_MEM_MB_STATE_ONLINE_MOVABLE: + virtio_mem_mb_set_state(vm, mb_id, + VIRTIO_MEM_MB_STATE_OFFLINE); + break; + default: + BUG(); + break; + } +} + +static void virtio_mem_notify_online(struct virtio_mem *vm, unsigned long mb_id, + enum zone_type zone) +{ + unsigned long nb_offline; + + switch (virtio_mem_mb_get_state(vm, mb_id)) { + case VIRTIO_MEM_MB_STATE_OFFLINE_PARTIAL: + BUG_ON(zone == ZONE_MOVABLE); + virtio_mem_mb_set_state(vm, mb_id, + VIRTIO_MEM_MB_STATE_ONLINE_PARTIAL); + break; + case VIRTIO_MEM_MB_STATE_OFFLINE: + if (zone == ZONE_MOVABLE) + virtio_mem_mb_set_state(vm, mb_id, + VIRTIO_MEM_MB_STATE_ONLINE_MOVABLE); + else + virtio_mem_mb_set_state(vm, mb_id, + VIRTIO_MEM_MB_STATE_ONLINE); + break; + default: + BUG(); + break; + } + nb_offline = vm->nb_mb_state[VIRTIO_MEM_MB_STATE_OFFLINE] + + vm->nb_mb_state[VIRTIO_MEM_MB_STATE_OFFLINE_PARTIAL]; + + /* see if we can add new blocks now that we onlined one block */ + if (nb_offline == VIRTIO_MEM_NB_OFFLINE_THRESHOLD - 1) + virtio_mem_retry(vm); +} + +/* + * This callback will either be called synchronously from add_memory() or + * asynchronously (e.g., triggered via user space). We have to be careful + * with locking when calling add_memory(). + */ +static int virtio_mem_memory_notifier_cb(struct notifier_block *nb, + unsigned long action, void *arg) +{ + struct virtio_mem *vm = container_of(nb, struct virtio_mem, + memory_notifier); + struct memory_notify *mhp = arg; + const unsigned long start = PFN_PHYS(mhp->start_pfn); + const unsigned long size = PFN_PHYS(mhp->nr_pages); + const unsigned long mb_id = virtio_mem_phys_to_mb_id(start); + enum zone_type zone; + int rc = NOTIFY_OK; + + if (!virtio_mem_overlaps_range(vm, start, size)) + return NOTIFY_DONE; + + /* + * Memory is onlined/offlined in memory block granularity. We cannot + * cross virtio-mem device boundaries and memory block boundaries. Bail + * out if this ever changes. + */ + if (WARN_ON_ONCE(size != memory_block_size_bytes() || + !IS_ALIGNED(start, memory_block_size_bytes()))) + return NOTIFY_BAD; + + /* + * Avoid circular locking lockdep warnings. We lock the mutex + * e.g., in MEM_GOING_ONLINE and unlock it in MEM_ONLINE. The + * blocking_notifier_call_chain() has it's own lock, which gets unlocked + * between both notifier calls and will bail out. False positive. + */ + lockdep_off(); + + switch (action) { + case MEM_GOING_OFFLINE: + mutex_lock(&vm->hotplug_mutex); + if (vm->removing) { + rc = notifier_from_errno(-EBUSY); + mutex_unlock(&vm->hotplug_mutex); + break; + } + vm->hotplug_active = true; + break; + case MEM_GOING_ONLINE: + mutex_lock(&vm->hotplug_mutex); + if (vm->removing) { + rc = notifier_from_errno(-EBUSY); + mutex_unlock(&vm->hotplug_mutex); + break; + } + vm->hotplug_active = true; + zone = page_zonenum(pfn_to_page(mhp->start_pfn)); + rc = virtio_mem_notify_going_online(vm, mb_id, zone); + break; + case MEM_OFFLINE: + virtio_mem_notify_offline(vm, mb_id); + vm->hotplug_active = false; + mutex_unlock(&vm->hotplug_mutex); + break; + case MEM_ONLINE: + zone = page_zonenum(pfn_to_page(mhp->start_pfn)); + virtio_mem_notify_online(vm, mb_id, zone); + vm->hotplug_active = false; + mutex_unlock(&vm->hotplug_mutex); + break; + case MEM_CANCEL_OFFLINE: + case MEM_CANCEL_ONLINE: + if (!vm->hotplug_active) + break; + vm->hotplug_active = false; + mutex_unlock(&vm->hotplug_mutex); + break; + default: + break; + } + + lockdep_on(); + + return rc; +} + +/* + * Set a range of pages PG_offline. + */ +static void virtio_mem_set_fake_offline(unsigned long pfn, + unsigned int nr_pages) +{ + for (; nr_pages--; pfn++) + __SetPageOffline(pfn_to_page(pfn)); +} + +/* + * Clear PG_offline from a range of pages. + */ +static void virtio_mem_clear_fake_offline(unsigned long pfn, + unsigned int nr_pages) +{ + for (; nr_pages--; pfn++) + __ClearPageOffline(pfn_to_page(pfn)); +} + +/* + * Release a range of fake-offline pages to the buddy, effectively + * fake-onlining them. + */ +static void virtio_mem_fake_online(unsigned long pfn, unsigned int nr_pages) +{ + const int order = MAX_ORDER - 1; + int i; + + /* + * We are always called with subblock granularity, which is at least + * aligned to MAX_ORDER - 1. + */ + virtio_mem_clear_fake_offline(pfn, nr_pages); + + for (i = 0; i < nr_pages; i += 1 << order) + generic_online_page(pfn_to_page(pfn + i), order); +} + +static void virtio_mem_online_page_cb(struct page *page, unsigned int order) +{ + const unsigned long addr = page_to_phys(page); + const unsigned long mb_id = virtio_mem_phys_to_mb_id(addr); + struct virtio_mem *vm; + int sb_id; + + /* + * We exploit here that subblocks have at least MAX_ORDER - 1 + * size/alignment and that this callback is is called with such a + * size/alignment. So we cannot cross subblocks and therefore + * also not memory blocks. + */ + rcu_read_lock(); + list_for_each_entry_rcu(vm, &virtio_mem_devices, next) { + if (!virtio_mem_owned_mb(vm, mb_id)) + continue; + + sb_id = virtio_mem_phys_to_sb_id(vm, addr); + /* + * If plugged, online the pages, otherwise, set them fake + * offline (PageOffline). + */ + if (virtio_mem_mb_test_sb_plugged(vm, mb_id, sb_id, 1)) + generic_online_page(page, order); + else + virtio_mem_set_fake_offline(PFN_DOWN(addr), 1 << order); + rcu_read_unlock(); + return; + } + rcu_read_unlock(); + + /* not virtio-mem memory, but e.g., a DIMM. online it */ + generic_online_page(page, order); +} + +static uint64_t virtio_mem_send_request(struct virtio_mem *vm, + const struct virtio_mem_req *req) +{ + struct scatterlist *sgs[2], sg_req, sg_resp; + unsigned int len; + int rc; + + /* don't use the request residing on the stack (vaddr) */ + vm->req = *req; + + /* out: buffer for request */ + sg_init_one(&sg_req, &vm->req, sizeof(vm->req)); + sgs[0] = &sg_req; + + /* in: buffer for response */ + sg_init_one(&sg_resp, &vm->resp, sizeof(vm->resp)); + sgs[1] = &sg_resp; + + rc = virtqueue_add_sgs(vm->vq, sgs, 1, 1, vm, GFP_KERNEL); + if (rc < 0) + return rc; + + virtqueue_kick(vm->vq); + + /* wait for a response */ + wait_event(vm->host_resp, virtqueue_get_buf(vm->vq, &len)); + + return virtio16_to_cpu(vm->vdev, vm->resp.type); +} + +static int virtio_mem_send_plug_request(struct virtio_mem *vm, uint64_t addr, + uint64_t size) +{ + const uint64_t nb_vm_blocks = size / vm->device_block_size; + const struct virtio_mem_req req = { + .type = cpu_to_virtio16(vm->vdev, VIRTIO_MEM_REQ_PLUG), + .u.plug.addr = cpu_to_virtio64(vm->vdev, addr), + .u.plug.nb_blocks = cpu_to_virtio16(vm->vdev, nb_vm_blocks), + }; + + if (atomic_read(&vm->config_changed)) + return -EAGAIN; + + switch (virtio_mem_send_request(vm, &req)) { + case VIRTIO_MEM_RESP_ACK: + vm->plugged_size += size; + return 0; + case VIRTIO_MEM_RESP_NACK: + return -EAGAIN; + case VIRTIO_MEM_RESP_BUSY: + return -EBUSY; + case VIRTIO_MEM_RESP_ERROR: + return -EINVAL; + default: + return -ENOMEM; + } +} + +static int virtio_mem_send_unplug_request(struct virtio_mem *vm, uint64_t addr, + uint64_t size) +{ + const uint64_t nb_vm_blocks = size / vm->device_block_size; + const struct virtio_mem_req req = { + .type = cpu_to_virtio16(vm->vdev, VIRTIO_MEM_REQ_UNPLUG), + .u.unplug.addr = cpu_to_virtio64(vm->vdev, addr), + .u.unplug.nb_blocks = cpu_to_virtio16(vm->vdev, nb_vm_blocks), + }; + + if (atomic_read(&vm->config_changed)) + return -EAGAIN; + + switch (virtio_mem_send_request(vm, &req)) { + case VIRTIO_MEM_RESP_ACK: + vm->plugged_size -= size; + return 0; + case VIRTIO_MEM_RESP_BUSY: + return -EBUSY; + case VIRTIO_MEM_RESP_ERROR: + return -EINVAL; + default: + return -ENOMEM; + } +} + +static int virtio_mem_send_unplug_all_request(struct virtio_mem *vm) +{ + const struct virtio_mem_req req = { + .type = cpu_to_virtio16(vm->vdev, VIRTIO_MEM_REQ_UNPLUG_ALL), + }; + + switch (virtio_mem_send_request(vm, &req)) { + case VIRTIO_MEM_RESP_ACK: + vm->unplug_all_required = false; + vm->plugged_size = 0; + /* usable region might have shrunk */ + atomic_set(&vm->config_changed, 1); + return 0; + case VIRTIO_MEM_RESP_BUSY: + return -EBUSY; + default: + return -ENOMEM; + } +} + +/* + * Plug selected subblocks. Updates the plugged state, but not the state + * of the memory block. + */ +static int virtio_mem_mb_plug_sb(struct virtio_mem *vm, unsigned long mb_id, + int sb_id, int count) +{ + const uint64_t addr = virtio_mem_mb_id_to_phys(mb_id) + + sb_id * vm->subblock_size; + const uint64_t size = count * vm->subblock_size; + int rc; + + dev_dbg(&vm->vdev->dev, "plugging memory block: %lu : %i - %i\n", mb_id, + sb_id, sb_id + count - 1); + + rc = virtio_mem_send_plug_request(vm, addr, size); + if (!rc) + virtio_mem_mb_set_sb_plugged(vm, mb_id, sb_id, count); + return rc; +} + +/* + * Unplug selected subblocks. Updates the plugged state, but not the state + * of the memory block. + */ +static int virtio_mem_mb_unplug_sb(struct virtio_mem *vm, unsigned long mb_id, + int sb_id, int count) +{ + const uint64_t addr = virtio_mem_mb_id_to_phys(mb_id) + + sb_id * vm->subblock_size; + const uint64_t size = count * vm->subblock_size; + int rc; + + dev_dbg(&vm->vdev->dev, "unplugging memory block: %lu : %i - %i\n", + mb_id, sb_id, sb_id + count - 1); + + rc = virtio_mem_send_unplug_request(vm, addr, size); + if (!rc) + virtio_mem_mb_set_sb_unplugged(vm, mb_id, sb_id, count); + return rc; +} + +/* + * Unplug the desired number of plugged subblocks of a offline or not-added + * memory block. Will fail if any subblock cannot get unplugged (instead of + * skipping it). + * + * Will not modify the state of the memory block. + * + * Note: can fail after some subblocks were unplugged. + */ +static int virtio_mem_mb_unplug_any_sb(struct virtio_mem *vm, + unsigned long mb_id, uint64_t *nb_sb) +{ + int sb_id, count; + int rc; + + while (*nb_sb) { + sb_id = virtio_mem_mb_first_plugged_sb(vm, mb_id); + if (sb_id >= vm->nb_sb_per_mb) + break; + count = 1; + while (count < *nb_sb && + sb_id + count < vm->nb_sb_per_mb && + virtio_mem_mb_test_sb_plugged(vm, mb_id, sb_id + count, + 1)) + count++; + + rc = virtio_mem_mb_unplug_sb(vm, mb_id, sb_id, count); + if (rc) + return rc; + *nb_sb -= count; + } + + return 0; +} + +/* + * Unplug all plugged subblocks of an offline or not-added memory block. + * + * Will not modify the state of the memory block. + * + * Note: can fail after some subblocks were unplugged. + */ +static int virtio_mem_mb_unplug(struct virtio_mem *vm, unsigned long mb_id) +{ + uint64_t nb_sb = vm->nb_sb_per_mb; + + return virtio_mem_mb_unplug_any_sb(vm, mb_id, &nb_sb); +} + +/* + * Prepare tracking data for the next memory block. + */ +static int virtio_mem_prepare_next_mb(struct virtio_mem *vm, + unsigned long *mb_id) +{ + int rc; + + if (vm->next_mb_id > vm->last_usable_mb_id) + return -ENOSPC; + + /* Resize the state array if required. */ + rc = virtio_mem_mb_state_prepare_next_mb(vm); + if (rc) + return rc; + + /* Resize the subblock bitmap if required. */ + rc = virtio_mem_sb_bitmap_prepare_next_mb(vm); + if (rc) + return rc; + + vm->nb_mb_state[VIRTIO_MEM_MB_STATE_UNUSED]++; + *mb_id = vm->next_mb_id++; + return 0; +} + +/* + * Don't add too many blocks that are not onlined yet to avoid running OOM. + */ +static bool virtio_mem_too_many_mb_offline(struct virtio_mem *vm) +{ + unsigned long nb_offline; + + nb_offline = vm->nb_mb_state[VIRTIO_MEM_MB_STATE_OFFLINE] + + vm->nb_mb_state[VIRTIO_MEM_MB_STATE_OFFLINE_PARTIAL]; + return nb_offline >= VIRTIO_MEM_NB_OFFLINE_THRESHOLD; +} + +/* + * Try to plug the desired number of subblocks and add the memory block + * to Linux. + * + * Will modify the state of the memory block. + */ +static int virtio_mem_mb_plug_and_add(struct virtio_mem *vm, + unsigned long mb_id, + uint64_t *nb_sb) +{ + const int count = min_t(int, *nb_sb, vm->nb_sb_per_mb); + int rc, rc2; + + if (WARN_ON_ONCE(!count)) + return -EINVAL; + + /* + * Plug the requested number of subblocks before adding it to linux, + * so that onlining will directly online all plugged subblocks. + */ + rc = virtio_mem_mb_plug_sb(vm, mb_id, 0, count); + if (rc) + return rc; + + /* + * Mark the block properly offline before adding it to Linux, + * so the memory notifiers will find the block in the right state. + */ + if (count == vm->nb_sb_per_mb) + virtio_mem_mb_set_state(vm, mb_id, + VIRTIO_MEM_MB_STATE_OFFLINE); + else + virtio_mem_mb_set_state(vm, mb_id, + VIRTIO_MEM_MB_STATE_OFFLINE_PARTIAL); + + /* Add the memory block to linux - if that fails, try to unplug. */ + rc = virtio_mem_mb_add(vm, mb_id); + if (rc) { + enum virtio_mem_mb_state new_state = VIRTIO_MEM_MB_STATE_UNUSED; + + dev_err(&vm->vdev->dev, + "adding memory block %lu failed with %d\n", mb_id, rc); + rc2 = virtio_mem_mb_unplug_sb(vm, mb_id, 0, count); + + /* + * TODO: Linux MM does not properly clean up yet in all cases + * where adding of memory failed - especially on -ENOMEM. + */ + if (rc2) + new_state = VIRTIO_MEM_MB_STATE_PLUGGED; + virtio_mem_mb_set_state(vm, mb_id, new_state); + return rc; + } + + *nb_sb -= count; + return 0; +} + +/* + * Try to plug the desired number of subblocks of a memory block that + * is already added to Linux. + * + * Will modify the state of the memory block. + * + * Note: Can fail after some subblocks were successfully plugged. + */ +static int virtio_mem_mb_plug_any_sb(struct virtio_mem *vm, unsigned long mb_id, + uint64_t *nb_sb, bool online) +{ + unsigned long pfn, nr_pages; + int sb_id, count; + int rc; + + if (WARN_ON_ONCE(!*nb_sb)) + return -EINVAL; + + while (*nb_sb) { + sb_id = virtio_mem_mb_first_unplugged_sb(vm, mb_id); + if (sb_id >= vm->nb_sb_per_mb) + break; + count = 1; + while (count < *nb_sb && + sb_id + count < vm->nb_sb_per_mb && + !virtio_mem_mb_test_sb_plugged(vm, mb_id, sb_id + count, + 1)) + count++; + + rc = virtio_mem_mb_plug_sb(vm, mb_id, sb_id, count); + if (rc) + return rc; + *nb_sb -= count; + if (!online) + continue; + + /* fake-online the pages if the memory block is online */ + pfn = PFN_DOWN(virtio_mem_mb_id_to_phys(mb_id) + + sb_id * vm->subblock_size); + nr_pages = PFN_DOWN(count * vm->subblock_size); + virtio_mem_fake_online(pfn, nr_pages); + } + + if (virtio_mem_mb_test_sb_plugged(vm, mb_id, 0, vm->nb_sb_per_mb)) { + if (online) + virtio_mem_mb_set_state(vm, mb_id, + VIRTIO_MEM_MB_STATE_ONLINE); + else + virtio_mem_mb_set_state(vm, mb_id, + VIRTIO_MEM_MB_STATE_OFFLINE); + } + + return rc; +} + +/* + * Try to plug the requested amount of memory. + */ +static int virtio_mem_plug_request(struct virtio_mem *vm, uint64_t diff) +{ + uint64_t nb_sb = diff / vm->subblock_size; + unsigned long mb_id; + int rc; + + if (!nb_sb) + return 0; + + /* Don't race with onlining/offlining */ + mutex_lock(&vm->hotplug_mutex); + + /* Try to plug subblocks of partially plugged online blocks. */ + virtio_mem_for_each_mb_state(vm, mb_id, + VIRTIO_MEM_MB_STATE_ONLINE_PARTIAL) { + rc = virtio_mem_mb_plug_any_sb(vm, mb_id, &nb_sb, true); + if (rc || !nb_sb) + goto out_unlock; + cond_resched(); + } + + /* Try to plug subblocks of partially plugged offline blocks. */ + virtio_mem_for_each_mb_state(vm, mb_id, + VIRTIO_MEM_MB_STATE_OFFLINE_PARTIAL) { + rc = virtio_mem_mb_plug_any_sb(vm, mb_id, &nb_sb, false); + if (rc || !nb_sb) + goto out_unlock; + cond_resched(); + } + + /* + * We won't be working on online/offline memory blocks from this point, + * so we can't race with memory onlining/offlining. Drop the mutex. + */ + mutex_unlock(&vm->hotplug_mutex); + + /* Try to plug and add unused blocks */ + virtio_mem_for_each_mb_state(vm, mb_id, VIRTIO_MEM_MB_STATE_UNUSED) { + if (virtio_mem_too_many_mb_offline(vm)) + return -ENOSPC; + + rc = virtio_mem_mb_plug_and_add(vm, mb_id, &nb_sb); + if (rc || !nb_sb) + return rc; + cond_resched(); + } + + /* Try to prepare, plug and add new blocks */ + while (nb_sb) { + if (virtio_mem_too_many_mb_offline(vm)) + return -ENOSPC; + + rc = virtio_mem_prepare_next_mb(vm, &mb_id); + if (rc) + return rc; + rc = virtio_mem_mb_plug_and_add(vm, mb_id, &nb_sb); + if (rc) + return rc; + cond_resched(); + } + + return 0; +out_unlock: + mutex_unlock(&vm->hotplug_mutex); + return rc; +} + +/* + * Try to unplug all blocks that couldn't be unplugged before, for example, + * because the hypervisor was busy. + */ +static int virtio_mem_unplug_pending_mb(struct virtio_mem *vm) +{ + unsigned long mb_id; + int rc; + + virtio_mem_for_each_mb_state(vm, mb_id, VIRTIO_MEM_MB_STATE_PLUGGED) { + rc = virtio_mem_mb_unplug(vm, mb_id); + if (rc) + return rc; + virtio_mem_mb_set_state(vm, mb_id, VIRTIO_MEM_MB_STATE_UNUSED); + } + + return 0; +} + +/* + * Update all parts of the config that could have changed. + */ +static void virtio_mem_refresh_config(struct virtio_mem *vm) +{ + const uint64_t phys_limit = 1UL << MAX_PHYSMEM_BITS; + uint64_t new_plugged_size, usable_region_size, end_addr; + + /* the plugged_size is just a reflection of what _we_ did previously */ + virtio_cread(vm->vdev, struct virtio_mem_config, plugged_size, + &new_plugged_size); + if (WARN_ON_ONCE(new_plugged_size != vm->plugged_size)) + vm->plugged_size = new_plugged_size; + + /* calculate the last usable memory block id */ + virtio_cread(vm->vdev, struct virtio_mem_config, + usable_region_size, &usable_region_size); + end_addr = vm->addr + usable_region_size; + end_addr = min(end_addr, phys_limit); + vm->last_usable_mb_id = virtio_mem_phys_to_mb_id(end_addr) - 1; + + /* see if there is a request to change the size */ + virtio_cread(vm->vdev, struct virtio_mem_config, requested_size, + &vm->requested_size); + + dev_info(&vm->vdev->dev, "plugged size: 0x%llx", vm->plugged_size); + dev_info(&vm->vdev->dev, "requested size: 0x%llx", vm->requested_size); +} + +/* + * Workqueue function for handling plug/unplug requests and config updates. + */ +static void virtio_mem_run_wq(struct work_struct *work) +{ + struct virtio_mem *vm = container_of(work, struct virtio_mem, wq); + uint64_t diff; + int rc; + + hrtimer_cancel(&vm->retry_timer); + + if (vm->broken) + return; + +retry: + rc = 0; + + /* Make sure we start with a clean state if there are leftovers. */ + if (unlikely(vm->unplug_all_required)) + rc = virtio_mem_send_unplug_all_request(vm); + + if (atomic_read(&vm->config_changed)) { + atomic_set(&vm->config_changed, 0); + virtio_mem_refresh_config(vm); + } + + /* Unplug any leftovers from previous runs */ + if (!rc) + rc = virtio_mem_unplug_pending_mb(vm); + + if (!rc && vm->requested_size != vm->plugged_size) { + if (vm->requested_size > vm->plugged_size) { + diff = vm->requested_size - vm->plugged_size; + rc = virtio_mem_plug_request(vm, diff); + } + /* TODO: try to unplug memory */ + } + + switch (rc) { + case 0: + break; + case -ENOSPC: + /* + * We cannot add any more memory (alignment, physical limit) + * or we have too many offline memory blocks. + */ + break; + case -EBUSY: + /* + * The hypervisor cannot process our request right now + * (e.g., out of memory, migrating). + */ + case -ENOMEM: + /* Out of memory, try again later. */ + hrtimer_start(&vm->retry_timer, + ms_to_ktime(VIRTIO_MEM_RETRY_TIMER_MS), + HRTIMER_MODE_REL); + break; + case -EAGAIN: + /* Retry immediately (e.g., the config changed). */ + goto retry; + default: + /* Unknown error, mark as broken */ + dev_err(&vm->vdev->dev, + "unknown error, marking device broken: %d\n", rc); + vm->broken = true; + } +} + +static enum hrtimer_restart virtio_mem_timer_expired(struct hrtimer *timer) +{ + struct virtio_mem *vm = container_of(timer, struct virtio_mem, + retry_timer); + + virtio_mem_retry(vm); + return HRTIMER_NORESTART; +} + +static void virtio_mem_handle_response(struct virtqueue *vq) +{ + struct virtio_mem *vm = vq->vdev->priv; + + wake_up(&vm->host_resp); +} + +static int virtio_mem_init_vq(struct virtio_mem *vm) +{ + struct virtqueue *vq; + + vq = virtio_find_single_vq(vm->vdev, virtio_mem_handle_response, + "guest-request"); + if (IS_ERR(vq)) + return PTR_ERR(vq); + vm->vq = vq; + + return 0; +} + +/* + * Test if any memory in the range is present in Linux. + */ +static bool virtio_mem_any_memory_present(unsigned long start, + unsigned long size) +{ + const unsigned long start_pfn = PFN_DOWN(start); + const unsigned long end_pfn = PFN_UP(start + size); + unsigned long pfn; + + for (pfn = start_pfn; pfn != end_pfn; pfn++) + if (present_section_nr(pfn_to_section_nr(pfn))) + return true; + + return false; +} + +static int virtio_mem_init(struct virtio_mem *vm) +{ + const uint64_t phys_limit = 1UL << MAX_PHYSMEM_BITS; + + if (!vm->vdev->config->get) { + dev_err(&vm->vdev->dev, "config access disabled\n"); + return -EINVAL; + } + + /* + * We don't want to (un)plug or reuse any memory when in kdump. The + * memory is still accessible (but not mapped). + */ + if (is_kdump_kernel()) { + dev_warn(&vm->vdev->dev, "disabled in kdump kernel\n"); + return -EBUSY; + } + + /* Fetch all properties that can't change. */ + virtio_cread(vm->vdev, struct virtio_mem_config, plugged_size, + &vm->plugged_size); + virtio_cread(vm->vdev, struct virtio_mem_config, block_size, + &vm->device_block_size); + virtio_cread(vm->vdev, struct virtio_mem_config, addr, &vm->addr); + virtio_cread(vm->vdev, struct virtio_mem_config, region_size, + &vm->region_size); + + /* + * If we still have memory plugged, we might have to unplug all + * memory first. However, if somebody simply unloaded the driver + * we would have to reinitialize the old state - something we don't + * support yet. Detect if we have any memory in the area present. + */ + if (vm->plugged_size) { + uint64_t usable_region_size; + + virtio_cread(vm->vdev, struct virtio_mem_config, + usable_region_size, &usable_region_size); + + if (virtio_mem_any_memory_present(vm->addr, + usable_region_size)) { + dev_err(&vm->vdev->dev, + "reloading the driver is not supported\n"); + return -EINVAL; + } + /* + * Note: it might happen that the device is busy and + * unplugging all memory might take some time. + */ + dev_info(&vm->vdev->dev, "unplugging all memory required\n"); + vm->unplug_all_required = 1; + } + + /* + * We always hotplug memory in memory block granularity. This way, + * we have to wait for exactly one memory block to online. + */ + if (vm->device_block_size > memory_block_size_bytes()) { + dev_err(&vm->vdev->dev, + "The block size is not supported (too big).\n"); + return -EINVAL; + } + + /* bad device setup - warn only */ + if (!IS_ALIGNED(vm->addr, memory_block_size_bytes())) + dev_warn(&vm->vdev->dev, + "The alignment of the physical start address can make some memory unusable.\n"); + if (!IS_ALIGNED(vm->addr + vm->region_size, memory_block_size_bytes())) + dev_warn(&vm->vdev->dev, + "The alignment of the physical end address can make some memory unusable.\n"); + if (vm->addr + vm->region_size > phys_limit) + dev_warn(&vm->vdev->dev, + "Some memory is not addressable. This can make some memory unusable.\n"); + + /* + * Calculate the subblock size: + * - At least MAX_ORDER - 1 / pageblock_order. + * - At least the device block size. + * In the worst case, a single subblock per memory block. + */ + vm->subblock_size = PAGE_SIZE * 1u << max_t(uint32_t, MAX_ORDER - 1, + pageblock_order); + vm->subblock_size = max_t(uint32_t, vm->device_block_size, + vm->subblock_size); + vm->nb_sb_per_mb = memory_block_size_bytes() / vm->subblock_size; + + /* Round up to the next full memory block */ + vm->first_mb_id = virtio_mem_phys_to_mb_id(vm->addr - 1 + + memory_block_size_bytes()); + vm->next_mb_id = vm->first_mb_id; + vm->last_mb_id = virtio_mem_phys_to_mb_id(vm->addr + + vm->region_size) - 1; + + dev_info(&vm->vdev->dev, "start address: 0x%llx", vm->addr); + dev_info(&vm->vdev->dev, "region size: 0x%llx", vm->region_size); + dev_info(&vm->vdev->dev, "device block size: 0x%x", + vm->device_block_size); + dev_info(&vm->vdev->dev, "memory block size: 0x%lx", + memory_block_size_bytes()); + dev_info(&vm->vdev->dev, "subblock size: 0x%x", + vm->subblock_size); + + return 0; +} + +static int virtio_mem_probe(struct virtio_device *vdev) +{ + struct virtio_mem *vm; + int rc = -EINVAL; + + vdev->priv = vm = kzalloc(sizeof(*vm), GFP_KERNEL); + if (!vm) + return -ENOMEM; + + init_waitqueue_head(&vm->host_resp); + vm->vdev = vdev; + INIT_WORK(&vm->wq, virtio_mem_run_wq); + mutex_init(&vm->hotplug_mutex); + INIT_LIST_HEAD(&vm->next); + spin_lock_init(&vm->removal_lock); + hrtimer_init(&vm->retry_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); + vm->retry_timer.function = virtio_mem_timer_expired; + + /* register the virtqueue */ + rc = virtio_mem_init_vq(vm); + if (rc) + goto out_free_vm; + + /* initialize the device by querying the config */ + rc = virtio_mem_init(vm); + if (rc) + goto out_del_vq; + + /* register callbacks */ + vm->memory_notifier.notifier_call = virtio_mem_memory_notifier_cb; + rc = register_memory_notifier(&vm->memory_notifier); + if (rc) + goto out_del_vq; + rc = register_virtio_mem_device(vm); + if (rc) + goto out_unreg_mem; + + virtio_device_ready(vdev); + + /* trigger a config update to start processing the requested_size */ + atomic_set(&vm->config_changed, 1); + queue_work(system_freezable_wq, &vm->wq); + + return 0; +out_unreg_mem: + unregister_memory_notifier(&vm->memory_notifier); +out_del_vq: + vdev->config->del_vqs(vdev); +out_free_vm: + kfree(vm); + vdev->priv = NULL; + + return rc; +} + +static void virtio_mem_remove(struct virtio_device *vdev) +{ + struct virtio_mem *vm = vdev->priv; + unsigned long mb_id; + int rc; + + /* + * Make sure the workqueue won't be triggered anymore and no memory + * blocks can be onlined/offlined until we're finished here. + */ + mutex_lock(&vm->hotplug_mutex); + spin_lock_irq(&vm->removal_lock); + vm->removing = true; + spin_unlock_irq(&vm->removal_lock); + mutex_unlock(&vm->hotplug_mutex); + + /* wait until the workqueue stopped */ + cancel_work_sync(&vm->wq); + hrtimer_cancel(&vm->retry_timer); + + /* + * After we unregistered our callbacks, user space can online partially + * plugged offline blocks. Make sure to remove them. + */ + virtio_mem_for_each_mb_state(vm, mb_id, + VIRTIO_MEM_MB_STATE_OFFLINE_PARTIAL) { + rc = virtio_mem_mb_remove(vm, mb_id); + BUG_ON(rc); + virtio_mem_mb_set_state(vm, mb_id, VIRTIO_MEM_MB_STATE_UNUSED); + } + + /* unregister callbacks */ + unregister_virtio_mem_device(vm); + unregister_memory_notifier(&vm->memory_notifier); + + /* + * There is no way we could reliably remove all memory we have added to + * the system. And there is no way to stop the driver/device from going + * away. Warn at least. + */ + if (vm->nb_mb_state[VIRTIO_MEM_MB_STATE_OFFLINE] || + vm->nb_mb_state[VIRTIO_MEM_MB_STATE_OFFLINE_PARTIAL] || + vm->nb_mb_state[VIRTIO_MEM_MB_STATE_ONLINE] || + vm->nb_mb_state[VIRTIO_MEM_MB_STATE_ONLINE_PARTIAL] || + vm->nb_mb_state[VIRTIO_MEM_MB_STATE_ONLINE_MOVABLE]) + dev_warn(&vdev->dev, "device still has system memory added\n"); + + /* remove all tracking data - no locking needed */ + vfree(vm->mb_state); + vfree(vm->sb_bitmap); + + /* reset the device and cleanup the queues */ + vdev->config->reset(vdev); + vdev->config->del_vqs(vdev); + + kfree(vm); + vdev->priv = NULL; +} + +static void virtio_mem_config_changed(struct virtio_device *vdev) +{ + struct virtio_mem *vm = vdev->priv; + + atomic_set(&vm->config_changed, 1); + virtio_mem_retry(vm); +} + +#ifdef CONFIG_PM_SLEEP +static int virtio_mem_freeze(struct virtio_device *vdev) +{ + /* + * When restarting the VM, all memory is usually unplugged. Don't + * allow to suspend/hibernate. + */ + dev_err(&vdev->dev, "save/restore not supported.\n"); + return -EPERM; +} + +static int virtio_mem_restore(struct virtio_device *vdev) +{ + return -EPERM; +} +#endif + +static struct virtio_device_id virtio_mem_id_table[] = { + { VIRTIO_ID_MEM, VIRTIO_DEV_ANY_ID }, + { 0 }, +}; + +static struct virtio_driver virtio_mem_driver = { + .driver.name = KBUILD_MODNAME, + .driver.owner = THIS_MODULE, + .id_table = virtio_mem_id_table, + .probe = virtio_mem_probe, + .remove = virtio_mem_remove, + .config_changed = virtio_mem_config_changed, +#ifdef CONFIG_PM_SLEEP + .freeze = virtio_mem_freeze, + .restore = virtio_mem_restore, +#endif +}; + +module_virtio_driver(virtio_mem_driver); +MODULE_DEVICE_TABLE(virtio, virtio_mem_id_table); +MODULE_AUTHOR("David Hildenbrand "); +MODULE_DESCRIPTION("Virtio-mem driver"); +MODULE_LICENSE("GPL"); diff --git a/include/uapi/linux/virtio_ids.h b/include/uapi/linux/virtio_ids.h index ecc27a17401a7..b052355ac7a32 100644 --- a/include/uapi/linux/virtio_ids.h +++ b/include/uapi/linux/virtio_ids.h @@ -44,6 +44,7 @@ #define VIRTIO_ID_VSOCK 19 /* virtio vsock transport */ #define VIRTIO_ID_CRYPTO 20 /* virtio crypto */ #define VIRTIO_ID_IOMMU 23 /* virtio IOMMU */ +#define VIRTIO_ID_MEM 24 /* virtio mem */ #define VIRTIO_ID_FS 26 /* virtio filesystem */ #define VIRTIO_ID_PMEM 27 /* virtio pmem */ #define VIRTIO_ID_MAC80211_HWSIM 29 /* virtio mac80211-hwsim */ diff --git a/include/uapi/linux/virtio_mem.h b/include/uapi/linux/virtio_mem.h new file mode 100644 index 0000000000000..1bfade78bdfd7 --- /dev/null +++ b/include/uapi/linux/virtio_mem.h @@ -0,0 +1,200 @@ +/* SPDX-License-Identifier: BSD-3-Clause */ +/* + * Virtio Mem Device + * + * Copyright Red Hat, Inc. 2020 + * + * Authors: + * David Hildenbrand + * + * This header is BSD licensed so anyone can use the definitions + * to implement compatible drivers/servers: + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of IBM nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS + * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL IBM OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF + * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT + * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +#ifndef _LINUX_VIRTIO_MEM_H +#define _LINUX_VIRTIO_MEM_H + +#include +#include +#include +#include + +/* + * Each virtio-mem device manages a dedicated region in physical address + * space. Each device can belong to a single NUMA node, multiple devices + * for a single NUMA node are possible. A virtio-mem device is like a + * "resizable DIMM" consisting of small memory blocks that can be plugged + * or unplugged. The device driver is responsible for (un)plugging memory + * blocks on demand. + * + * Virtio-mem devices can only operate on their assigned memory region in + * order to (un)plug memory. A device cannot (un)plug memory belonging to + * other devices. + * + * The "region_size" corresponds to the maximum amount of memory that can + * be provided by a device. The "size" corresponds to the amount of memory + * that is currently plugged. "requested_size" corresponds to a request + * from the device to the device driver to (un)plug blocks. The + * device driver should try to (un)plug blocks in order to reach the + * "requested_size". It is impossible to plug more memory than requested. + * + * The "usable_region_size" represents the memory region that can actually + * be used to (un)plug memory. It is always at least as big as the + * "requested_size" and will grow dynamically. It will only shrink when + * explicitly triggered (VIRTIO_MEM_REQ_UNPLUG). + * + * There are no guarantees what will happen if unplugged memory is + * read/written. Such memory should, in general, not be touched. E.g., + * even writing might succeed, but the values will simply be discarded at + * random points in time. + * + * It can happen that the device cannot process a request, because it is + * busy. The device driver has to retry later. + * + * Usually, during system resets all memory will get unplugged, so the + * device driver can start with a clean state. However, in specific + * scenarios (if the device is busy) it can happen that the device still + * has memory plugged. The device driver can request to unplug all memory + * (VIRTIO_MEM_REQ_UNPLUG) - which might take a while to succeed if the + * device is busy. + */ + +/* --- virtio-mem: guest -> host requests --- */ + +/* request to plug memory blocks */ +#define VIRTIO_MEM_REQ_PLUG 0 +/* request to unplug memory blocks */ +#define VIRTIO_MEM_REQ_UNPLUG 1 +/* request to unplug all blocks and shrink the usable size */ +#define VIRTIO_MEM_REQ_UNPLUG_ALL 2 +/* request information about the plugged state of memory blocks */ +#define VIRTIO_MEM_REQ_STATE 3 + +struct virtio_mem_req_plug { + __virtio64 addr; + __virtio16 nb_blocks; +}; + +struct virtio_mem_req_unplug { + __virtio64 addr; + __virtio16 nb_blocks; +}; + +struct virtio_mem_req_state { + __virtio64 addr; + __virtio16 nb_blocks; +}; + +struct virtio_mem_req { + __virtio16 type; + __virtio16 padding[3]; + + union { + struct virtio_mem_req_plug plug; + struct virtio_mem_req_unplug unplug; + struct virtio_mem_req_state state; + } u; +}; + + +/* --- virtio-mem: host -> guest response --- */ + +/* + * Request processed successfully, applicable for + * - VIRTIO_MEM_REQ_PLUG + * - VIRTIO_MEM_REQ_UNPLUG + * - VIRTIO_MEM_REQ_UNPLUG_ALL + * - VIRTIO_MEM_REQ_STATE + */ +#define VIRTIO_MEM_RESP_ACK 0 +/* + * Request denied - e.g. trying to plug more than requested, applicable for + * - VIRTIO_MEM_REQ_PLUG + */ +#define VIRTIO_MEM_RESP_NACK 1 +/* + * Request cannot be processed right now, try again later, applicable for + * - VIRTIO_MEM_REQ_PLUG + * - VIRTIO_MEM_REQ_UNPLUG + * - VIRTIO_MEM_REQ_UNPLUG_ALL + */ +#define VIRTIO_MEM_RESP_BUSY 2 +/* + * Error in request (e.g. addresses/alignment), applicable for + * - VIRTIO_MEM_REQ_PLUG + * - VIRTIO_MEM_REQ_UNPLUG + * - VIRTIO_MEM_REQ_STATE + */ +#define VIRTIO_MEM_RESP_ERROR 3 + + +/* State of memory blocks is "plugged" */ +#define VIRTIO_MEM_STATE_PLUGGED 0 +/* State of memory blocks is "unplugged" */ +#define VIRTIO_MEM_STATE_UNPLUGGED 1 +/* State of memory blocks is "mixed" */ +#define VIRTIO_MEM_STATE_MIXED 2 + +struct virtio_mem_resp_state { + __virtio16 state; +}; + +struct virtio_mem_resp { + __virtio16 type; + __virtio16 padding[3]; + + union { + struct virtio_mem_resp_state state; + } u; +}; + +/* --- virtio-mem: configuration --- */ + +struct virtio_mem_config { + /* Block size and alignment. Cannot change. */ + __u32 block_size; + __u32 padding; + /* Start address of the memory region. Cannot change. */ + __u64 addr; + /* Region size (maximum). Cannot change. */ + __u64 region_size; + /* + * Currently usable region size. Can grow up to region_size. Can + * shrink due to VIRTIO_MEM_REQ_UNPLUG_ALL (in which case no config + * update will be sent). + */ + __u64 usable_region_size; + /* + * Currently used size. Changes due to plug/unplug requests, but no + * config updates will be sent. + */ + __u64 plugged_size; + /* Requested size. New plug requests cannot exceed it. Can change. */ + __u64 requested_size; +}; + +#endif /* _LINUX_VIRTIO_MEM_H */ -- GitLab From dfb0b2e46d04d0b58754426184571a959bf30dce Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Thu, 7 May 2020 16:01:26 +0200 Subject: [PATCH 1134/1971] MAINTAINERS: Add myself as virtio-mem maintainer Let's make sure patches/bug reports find the right person. Cc: "Michael S. Tsirkin" Signed-off-by: David Hildenbrand Link: https://lore.kernel.org/r/20200507140139.17083-3-david@redhat.com Signed-off-by: Michael S. Tsirkin --- MAINTAINERS | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/MAINTAINERS b/MAINTAINERS index b5106b7d6d0c2..88170f86ad4e4 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -17986,6 +17986,13 @@ S: Maintained F: drivers/iommu/virtio-iommu.c F: include/uapi/linux/virtio_iommu.h +VIRTIO MEM DRIVER +M: David Hildenbrand +L: virtualization@lists.linux-foundation.org +S: Maintained +F: drivers/virtio/virtio_mem.c +F: include/uapi/linux/virtio_mem.h + VIRTUAL BOX GUEST DEVICE DRIVER M: Hans de Goede M: Arnd Bergmann -- GitLab From f2af6d3978d74a7891d0f428537b4494498202cb Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Thu, 7 May 2020 16:01:27 +0200 Subject: [PATCH 1135/1971] virtio-mem: Allow to specify an ACPI PXM as nid We want to allow to specify (similar as for a DIMM), to which node a virtio-mem device (and, therefore, its memory) belongs. Add a new virtio-mem feature flag and export pxm_to_node, so it can be used in kernel module context. Acked-by: Michal Hocko # for the export Acked-by: "Rafael J. Wysocki" # for the export Acked-by: Pankaj Gupta Tested-by: Pankaj Gupta Cc: "Michael S. Tsirkin" Cc: Jason Wang Cc: Oscar Salvador Cc: Michal Hocko Cc: Igor Mammedov Cc: Dave Young Cc: Andrew Morton Cc: Dan Williams Cc: Pavel Tatashin Cc: Stefan Hajnoczi Cc: Vlastimil Babka Cc: Len Brown Cc: linux-acpi@vger.kernel.org Signed-off-by: David Hildenbrand Link: https://lore.kernel.org/r/20200507140139.17083-4-david@redhat.com Signed-off-by: Michael S. Tsirkin --- drivers/acpi/numa/srat.c | 1 + drivers/virtio/virtio_mem.c | 39 +++++++++++++++++++++++++++++++-- include/uapi/linux/virtio_mem.h | 10 ++++++++- 3 files changed, 47 insertions(+), 3 deletions(-) diff --git a/drivers/acpi/numa/srat.c b/drivers/acpi/numa/srat.c index 47b4969d9b93d..5be5a977da1bf 100644 --- a/drivers/acpi/numa/srat.c +++ b/drivers/acpi/numa/srat.c @@ -35,6 +35,7 @@ int pxm_to_node(int pxm) return NUMA_NO_NODE; return pxm_to_node_map[pxm]; } +EXPORT_SYMBOL(pxm_to_node); int node_to_pxm(int node) { diff --git a/drivers/virtio/virtio_mem.c b/drivers/virtio/virtio_mem.c index 5d1dcaa6fc42a..270ddeaec0596 100644 --- a/drivers/virtio/virtio_mem.c +++ b/drivers/virtio/virtio_mem.c @@ -21,6 +21,8 @@ #include #include +#include + enum virtio_mem_mb_state { /* Unplugged, not added to Linux. Can be reused later. */ VIRTIO_MEM_MB_STATE_UNUSED = 0, @@ -72,6 +74,8 @@ struct virtio_mem { /* The device block size (for communicating with the device). */ uint32_t device_block_size; + /* The translated node id. NUMA_NO_NODE in case not specified. */ + int nid; /* Physical start address of the memory region. */ uint64_t addr; /* Maximum region size in bytes. */ @@ -389,7 +393,10 @@ static int virtio_mem_sb_bitmap_prepare_next_mb(struct virtio_mem *vm) static int virtio_mem_mb_add(struct virtio_mem *vm, unsigned long mb_id) { const uint64_t addr = virtio_mem_mb_id_to_phys(mb_id); - int nid = memory_add_physaddr_to_nid(addr); + int nid = vm->nid; + + if (nid == NUMA_NO_NODE) + nid = memory_add_physaddr_to_nid(addr); dev_dbg(&vm->vdev->dev, "adding memory block: %lu\n", mb_id); return add_memory(nid, addr, memory_block_size_bytes()); @@ -407,7 +414,10 @@ static int virtio_mem_mb_add(struct virtio_mem *vm, unsigned long mb_id) static int virtio_mem_mb_remove(struct virtio_mem *vm, unsigned long mb_id) { const uint64_t addr = virtio_mem_mb_id_to_phys(mb_id); - int nid = memory_add_physaddr_to_nid(addr); + int nid = vm->nid; + + if (nid == NUMA_NO_NODE) + nid = memory_add_physaddr_to_nid(addr); dev_dbg(&vm->vdev->dev, "removing memory block: %lu\n", mb_id); return remove_memory(nid, addr, memory_block_size_bytes()); @@ -426,6 +436,17 @@ static void virtio_mem_retry(struct virtio_mem *vm) spin_unlock_irqrestore(&vm->removal_lock, flags); } +static int virtio_mem_translate_node_id(struct virtio_mem *vm, uint16_t node_id) +{ + int node = NUMA_NO_NODE; + +#if defined(CONFIG_ACPI_NUMA) + if (virtio_has_feature(vm->vdev, VIRTIO_MEM_F_ACPI_PXM)) + node = pxm_to_node(node_id); +#endif + return node; +} + /* * Test if a virtio-mem device overlaps with the given range. Can be called * from (notifier) callbacks lockless. @@ -1267,6 +1288,7 @@ static bool virtio_mem_any_memory_present(unsigned long start, static int virtio_mem_init(struct virtio_mem *vm) { const uint64_t phys_limit = 1UL << MAX_PHYSMEM_BITS; + uint16_t node_id; if (!vm->vdev->config->get) { dev_err(&vm->vdev->dev, "config access disabled\n"); @@ -1287,6 +1309,9 @@ static int virtio_mem_init(struct virtio_mem *vm) &vm->plugged_size); virtio_cread(vm->vdev, struct virtio_mem_config, block_size, &vm->device_block_size); + virtio_cread(vm->vdev, struct virtio_mem_config, node_id, + &node_id); + vm->nid = virtio_mem_translate_node_id(vm, node_id); virtio_cread(vm->vdev, struct virtio_mem_config, addr, &vm->addr); virtio_cread(vm->vdev, struct virtio_mem_config, region_size, &vm->region_size); @@ -1365,6 +1390,8 @@ static int virtio_mem_init(struct virtio_mem *vm) memory_block_size_bytes()); dev_info(&vm->vdev->dev, "subblock size: 0x%x", vm->subblock_size); + if (vm->nid != NUMA_NO_NODE) + dev_info(&vm->vdev->dev, "nid: %d", vm->nid); return 0; } @@ -1508,12 +1535,20 @@ static int virtio_mem_restore(struct virtio_device *vdev) } #endif +static unsigned int virtio_mem_features[] = { +#if defined(CONFIG_NUMA) && defined(CONFIG_ACPI_NUMA) + VIRTIO_MEM_F_ACPI_PXM, +#endif +}; + static struct virtio_device_id virtio_mem_id_table[] = { { VIRTIO_ID_MEM, VIRTIO_DEV_ANY_ID }, { 0 }, }; static struct virtio_driver virtio_mem_driver = { + .feature_table = virtio_mem_features, + .feature_table_size = ARRAY_SIZE(virtio_mem_features), .driver.name = KBUILD_MODNAME, .driver.owner = THIS_MODULE, .id_table = virtio_mem_id_table, diff --git a/include/uapi/linux/virtio_mem.h b/include/uapi/linux/virtio_mem.h index 1bfade78bdfd7..e0a9dc7397c34 100644 --- a/include/uapi/linux/virtio_mem.h +++ b/include/uapi/linux/virtio_mem.h @@ -83,6 +83,12 @@ * device is busy. */ +/* --- virtio-mem: feature bits --- */ + +/* node_id is an ACPI PXM and is valid */ +#define VIRTIO_MEM_F_ACPI_PXM 0 + + /* --- virtio-mem: guest -> host requests --- */ /* request to plug memory blocks */ @@ -177,7 +183,9 @@ struct virtio_mem_resp { struct virtio_mem_config { /* Block size and alignment. Cannot change. */ __u32 block_size; - __u32 padding; + /* Valid with VIRTIO_MEM_F_ACPI_PXM. Cannot change. */ + __u16 node_id; + __u16 padding; /* Start address of the memory region. Cannot change. */ __u64 addr; /* Region size (maximum). Cannot change. */ -- GitLab From c627ff5d982276908188fae86dbe727ed49c9594 Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Thu, 7 May 2020 16:01:28 +0200 Subject: [PATCH 1136/1971] virtio-mem: Paravirtualized memory hotunplug part 1 Unplugging subblocks of memory blocks that are offline is easy. All we have to do is watch out for concurrent onlining activity. Tested-by: Pankaj Gupta Cc: "Michael S. Tsirkin" Cc: Jason Wang Cc: Oscar Salvador Cc: Michal Hocko Cc: Igor Mammedov Cc: Dave Young Cc: Andrew Morton Cc: Dan Williams Cc: Pavel Tatashin Cc: Stefan Hajnoczi Cc: Vlastimil Babka Signed-off-by: David Hildenbrand Link: https://lore.kernel.org/r/20200507140139.17083-5-david@redhat.com Signed-off-by: Michael S. Tsirkin --- drivers/virtio/virtio_mem.c | 116 +++++++++++++++++++++++++++++++++++- 1 file changed, 114 insertions(+), 2 deletions(-) diff --git a/drivers/virtio/virtio_mem.c b/drivers/virtio/virtio_mem.c index 270ddeaec0596..a3ec795be8be3 100644 --- a/drivers/virtio/virtio_mem.c +++ b/drivers/virtio/virtio_mem.c @@ -123,7 +123,7 @@ struct virtio_mem { * * When this lock is held the pointers can't change, ONLINE and * OFFLINE blocks can't change the state and no subblocks will get - * plugged. + * plugged/unplugged. */ struct mutex hotplug_mutex; bool hotplug_active; @@ -280,6 +280,12 @@ static int virtio_mem_mb_state_prepare_next_mb(struct virtio_mem *vm) _mb_id++) \ if (virtio_mem_mb_get_state(_vm, _mb_id) == _state) +#define virtio_mem_for_each_mb_state_rev(_vm, _mb_id, _state) \ + for (_mb_id = _vm->next_mb_id - 1; \ + _mb_id >= _vm->first_mb_id && _vm->nb_mb_state[_state]; \ + _mb_id--) \ + if (virtio_mem_mb_get_state(_vm, _mb_id) == _state) + /* * Mark all selected subblocks plugged. * @@ -325,6 +331,19 @@ static bool virtio_mem_mb_test_sb_plugged(struct virtio_mem *vm, bit + count; } +/* + * Test if all selected subblocks are unplugged. + */ +static bool virtio_mem_mb_test_sb_unplugged(struct virtio_mem *vm, + unsigned long mb_id, int sb_id, + int count) +{ + const int bit = (mb_id - vm->first_mb_id) * vm->nb_sb_per_mb + sb_id; + + /* TODO: Helper similar to bitmap_set() */ + return find_next_bit(vm->sb_bitmap, bit + count, bit) >= bit + count; +} + /* * Find the first plugged subblock. Returns vm->nb_sb_per_mb in case there is * none. @@ -513,6 +532,9 @@ static void virtio_mem_notify_offline(struct virtio_mem *vm, BUG(); break; } + + /* trigger the workqueue, maybe we can now unplug memory. */ + virtio_mem_retry(vm); } static void virtio_mem_notify_online(struct virtio_mem *vm, unsigned long mb_id, @@ -1122,6 +1144,94 @@ out_unlock: return rc; } +/* + * Unplug the desired number of plugged subblocks of an offline memory block. + * Will fail if any subblock cannot get unplugged (instead of skipping it). + * + * Will modify the state of the memory block. Might temporarily drop the + * hotplug_mutex. + * + * Note: Can fail after some subblocks were successfully unplugged. + */ +static int virtio_mem_mb_unplug_any_sb_offline(struct virtio_mem *vm, + unsigned long mb_id, + uint64_t *nb_sb) +{ + int rc; + + rc = virtio_mem_mb_unplug_any_sb(vm, mb_id, nb_sb); + + /* some subblocks might have been unplugged even on failure */ + if (!virtio_mem_mb_test_sb_plugged(vm, mb_id, 0, vm->nb_sb_per_mb)) + virtio_mem_mb_set_state(vm, mb_id, + VIRTIO_MEM_MB_STATE_OFFLINE_PARTIAL); + if (rc) + return rc; + + if (virtio_mem_mb_test_sb_unplugged(vm, mb_id, 0, vm->nb_sb_per_mb)) { + /* + * Remove the block from Linux - this should never fail. + * Hinder the block from getting onlined by marking it + * unplugged. Temporarily drop the mutex, so + * any pending GOING_ONLINE requests can be serviced/rejected. + */ + virtio_mem_mb_set_state(vm, mb_id, + VIRTIO_MEM_MB_STATE_UNUSED); + + mutex_unlock(&vm->hotplug_mutex); + rc = virtio_mem_mb_remove(vm, mb_id); + BUG_ON(rc); + mutex_lock(&vm->hotplug_mutex); + } + return 0; +} + +/* + * Try to unplug the requested amount of memory. + */ +static int virtio_mem_unplug_request(struct virtio_mem *vm, uint64_t diff) +{ + uint64_t nb_sb = diff / vm->subblock_size; + unsigned long mb_id; + int rc; + + if (!nb_sb) + return 0; + + /* + * We'll drop the mutex a couple of times when it is safe to do so. + * This might result in some blocks switching the state (online/offline) + * and we could miss them in this run - we will retry again later. + */ + mutex_lock(&vm->hotplug_mutex); + + /* Try to unplug subblocks of partially plugged offline blocks. */ + virtio_mem_for_each_mb_state_rev(vm, mb_id, + VIRTIO_MEM_MB_STATE_OFFLINE_PARTIAL) { + rc = virtio_mem_mb_unplug_any_sb_offline(vm, mb_id, + &nb_sb); + if (rc || !nb_sb) + goto out_unlock; + cond_resched(); + } + + /* Try to unplug subblocks of plugged offline blocks. */ + virtio_mem_for_each_mb_state_rev(vm, mb_id, + VIRTIO_MEM_MB_STATE_OFFLINE) { + rc = virtio_mem_mb_unplug_any_sb_offline(vm, mb_id, + &nb_sb); + if (rc || !nb_sb) + goto out_unlock; + cond_resched(); + } + + mutex_unlock(&vm->hotplug_mutex); + return 0; +out_unlock: + mutex_unlock(&vm->hotplug_mutex); + return rc; +} + /* * Try to unplug all blocks that couldn't be unplugged before, for example, * because the hypervisor was busy. @@ -1204,8 +1314,10 @@ retry: if (vm->requested_size > vm->plugged_size) { diff = vm->requested_size - vm->plugged_size; rc = virtio_mem_plug_request(vm, diff); + } else { + diff = vm->plugged_size - vm->requested_size; + rc = virtio_mem_unplug_request(vm, diff); } - /* TODO: try to unplug memory */ } switch (rc) { -- GitLab From 255f598507083905995ecab96392770ae03aac7f Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Thu, 7 May 2020 16:01:29 +0200 Subject: [PATCH 1137/1971] virtio-mem: Paravirtualized memory hotunplug part 2 We also want to unplug online memory (contained in online memory blocks and, therefore, managed by the buddy), and eventually replug it later. When requested to unplug memory, we use alloc_contig_range() to allocate subblocks in online memory blocks (so we are the owner) and send them to our hypervisor. When requested to plug memory, we can replug such memory using free_contig_range() after asking our hypervisor. We also want to mark all allocated pages PG_offline, so nobody will touch them. To differentiate pages that were never onlined when onlining the memory block from pages allocated via alloc_contig_range(), we use PageDirty(). Based on this flag, virtio_mem_fake_online() can either online the pages for the first time or use free_contig_range(). It is worth noting that there are no guarantees on how much memory can actually get unplugged again. All device memory might completely be fragmented with unmovable data, such that no subblock can get unplugged. We are not touching the ZONE_MOVABLE. If memory is onlined to the ZONE_MOVABLE, it can only get unplugged after that memory was offlined manually by user space. In normal operation, virtio-mem memory is suggested to be onlined to ZONE_NORMAL. In the future, we will try to make unplug more likely to succeed. Add a module parameter to control if online memory shall be touched. As we want to access alloc_contig_range()/free_contig_range() from kernel module context, export the symbols. Note: Whenever virtio-mem uses alloc_contig_range(), all affected pages are on the same node, in the same zone, and contain no holes. Acked-by: Michal Hocko # to export contig range allocator API Tested-by: Pankaj Gupta Cc: "Michael S. Tsirkin" Cc: Jason Wang Cc: Oscar Salvador Cc: Michal Hocko Cc: Igor Mammedov Cc: Dave Young Cc: Andrew Morton Cc: Dan Williams Cc: Pavel Tatashin Cc: Stefan Hajnoczi Cc: Vlastimil Babka Cc: Mel Gorman Cc: Mike Rapoport Cc: Alexander Duyck Cc: Alexander Potapenko Signed-off-by: David Hildenbrand Link: https://lore.kernel.org/r/20200507140139.17083-6-david@redhat.com Signed-off-by: Michael S. Tsirkin --- drivers/virtio/Kconfig | 1 + drivers/virtio/virtio_mem.c | 157 ++++++++++++++++++++++++++++++++---- mm/page_alloc.c | 2 + 3 files changed, 146 insertions(+), 14 deletions(-) diff --git a/drivers/virtio/Kconfig b/drivers/virtio/Kconfig index d6dde7d2cf762..4c1e14615001c 100644 --- a/drivers/virtio/Kconfig +++ b/drivers/virtio/Kconfig @@ -85,6 +85,7 @@ config VIRTIO_MEM depends on VIRTIO depends on MEMORY_HOTPLUG_SPARSE depends on MEMORY_HOTREMOVE + select CONTIG_ALLOC help This driver provides access to virtio-mem paravirtualized memory devices, allowing to hotplug and hotunplug memory. diff --git a/drivers/virtio/virtio_mem.c b/drivers/virtio/virtio_mem.c index a3ec795be8be3..74f0d3cb1d220 100644 --- a/drivers/virtio/virtio_mem.c +++ b/drivers/virtio/virtio_mem.c @@ -23,6 +23,10 @@ #include +static bool unplug_online = true; +module_param(unplug_online, bool, 0644); +MODULE_PARM_DESC(unplug_online, "Try to unplug online memory"); + enum virtio_mem_mb_state { /* Unplugged, not added to Linux. Can be reused later. */ VIRTIO_MEM_MB_STATE_UNUSED = 0, @@ -654,23 +658,35 @@ static int virtio_mem_memory_notifier_cb(struct notifier_block *nb, } /* - * Set a range of pages PG_offline. + * Set a range of pages PG_offline. Remember pages that were never onlined + * (via generic_online_page()) using PageDirty(). */ static void virtio_mem_set_fake_offline(unsigned long pfn, - unsigned int nr_pages) + unsigned int nr_pages, bool onlined) { - for (; nr_pages--; pfn++) - __SetPageOffline(pfn_to_page(pfn)); + for (; nr_pages--; pfn++) { + struct page *page = pfn_to_page(pfn); + + __SetPageOffline(page); + if (!onlined) + SetPageDirty(page); + } } /* - * Clear PG_offline from a range of pages. + * Clear PG_offline from a range of pages. If the pages were never onlined, + * (via generic_online_page()), clear PageDirty(). */ static void virtio_mem_clear_fake_offline(unsigned long pfn, - unsigned int nr_pages) + unsigned int nr_pages, bool onlined) { - for (; nr_pages--; pfn++) - __ClearPageOffline(pfn_to_page(pfn)); + for (; nr_pages--; pfn++) { + struct page *page = pfn_to_page(pfn); + + __ClearPageOffline(page); + if (!onlined) + ClearPageDirty(page); + } } /* @@ -686,10 +702,26 @@ static void virtio_mem_fake_online(unsigned long pfn, unsigned int nr_pages) * We are always called with subblock granularity, which is at least * aligned to MAX_ORDER - 1. */ - virtio_mem_clear_fake_offline(pfn, nr_pages); + for (i = 0; i < nr_pages; i += 1 << order) { + struct page *page = pfn_to_page(pfn + i); - for (i = 0; i < nr_pages; i += 1 << order) - generic_online_page(pfn_to_page(pfn + i), order); + /* + * If the page is PageDirty(), it was kept fake-offline when + * onlining the memory block. Otherwise, it was allocated + * using alloc_contig_range(). All pages in a subblock are + * alike. + */ + if (PageDirty(page)) { + virtio_mem_clear_fake_offline(pfn + i, 1 << order, + false); + generic_online_page(page, order); + } else { + virtio_mem_clear_fake_offline(pfn + i, 1 << order, + true); + free_contig_range(pfn + i, 1 << order); + adjust_managed_page_count(page, 1 << order); + } + } } static void virtio_mem_online_page_cb(struct page *page, unsigned int order) @@ -718,7 +750,8 @@ static void virtio_mem_online_page_cb(struct page *page, unsigned int order) if (virtio_mem_mb_test_sb_plugged(vm, mb_id, sb_id, 1)) generic_online_page(page, order); else - virtio_mem_set_fake_offline(PFN_DOWN(addr), 1 << order); + virtio_mem_set_fake_offline(PFN_DOWN(addr), 1 << order, + false); rcu_read_unlock(); return; } @@ -1186,6 +1219,72 @@ static int virtio_mem_mb_unplug_any_sb_offline(struct virtio_mem *vm, return 0; } +/* + * Unplug the desired number of plugged subblocks of an online memory block. + * Will skip subblock that are busy. + * + * Will modify the state of the memory block. + * + * Note: Can fail after some subblocks were successfully unplugged. Can + * return 0 even if subblocks were busy and could not get unplugged. + */ +static int virtio_mem_mb_unplug_any_sb_online(struct virtio_mem *vm, + unsigned long mb_id, + uint64_t *nb_sb) +{ + const unsigned long nr_pages = PFN_DOWN(vm->subblock_size); + unsigned long start_pfn; + int rc, sb_id; + + /* + * TODO: To increase the performance we want to try bigger, consecutive + * subblocks first before falling back to single subblocks. Also, + * we should sense via something like is_mem_section_removable() + * first if it makes sense to go ahead any try to allocate. + */ + for (sb_id = 0; sb_id < vm->nb_sb_per_mb && *nb_sb; sb_id++) { + /* Find the next candidate subblock */ + while (sb_id < vm->nb_sb_per_mb && + !virtio_mem_mb_test_sb_plugged(vm, mb_id, sb_id, 1)) + sb_id++; + if (sb_id >= vm->nb_sb_per_mb) + break; + + start_pfn = PFN_DOWN(virtio_mem_mb_id_to_phys(mb_id) + + sb_id * vm->subblock_size); + rc = alloc_contig_range(start_pfn, start_pfn + nr_pages, + MIGRATE_MOVABLE, GFP_KERNEL); + if (rc == -ENOMEM) + /* whoops, out of memory */ + return rc; + if (rc) + /* memory busy, we can't unplug this chunk */ + continue; + + /* Mark it as fake-offline before unplugging it */ + virtio_mem_set_fake_offline(start_pfn, nr_pages, true); + adjust_managed_page_count(pfn_to_page(start_pfn), -nr_pages); + + /* Try to unplug the allocated memory */ + rc = virtio_mem_mb_unplug_sb(vm, mb_id, sb_id, 1); + if (rc) { + /* Return the memory to the buddy. */ + virtio_mem_fake_online(start_pfn, nr_pages); + return rc; + } + + virtio_mem_mb_set_state(vm, mb_id, + VIRTIO_MEM_MB_STATE_ONLINE_PARTIAL); + *nb_sb -= 1; + } + + /* + * TODO: Once all subblocks of a memory block were unplugged, we want + * to offline the memory block and remove it. + */ + return 0; +} + /* * Try to unplug the requested amount of memory. */ @@ -1225,8 +1324,37 @@ static int virtio_mem_unplug_request(struct virtio_mem *vm, uint64_t diff) cond_resched(); } + if (!unplug_online) { + mutex_unlock(&vm->hotplug_mutex); + return 0; + } + + /* Try to unplug subblocks of partially plugged online blocks. */ + virtio_mem_for_each_mb_state_rev(vm, mb_id, + VIRTIO_MEM_MB_STATE_ONLINE_PARTIAL) { + rc = virtio_mem_mb_unplug_any_sb_online(vm, mb_id, + &nb_sb); + if (rc || !nb_sb) + goto out_unlock; + mutex_unlock(&vm->hotplug_mutex); + cond_resched(); + mutex_lock(&vm->hotplug_mutex); + } + + /* Try to unplug subblocks of plugged online blocks. */ + virtio_mem_for_each_mb_state_rev(vm, mb_id, + VIRTIO_MEM_MB_STATE_ONLINE) { + rc = virtio_mem_mb_unplug_any_sb_online(vm, mb_id, + &nb_sb); + if (rc || !nb_sb) + goto out_unlock; + mutex_unlock(&vm->hotplug_mutex); + cond_resched(); + mutex_lock(&vm->hotplug_mutex); + } + mutex_unlock(&vm->hotplug_mutex); - return 0; + return nb_sb ? -EBUSY : 0; out_unlock: mutex_unlock(&vm->hotplug_mutex); return rc; @@ -1332,7 +1460,8 @@ retry: case -EBUSY: /* * The hypervisor cannot process our request right now - * (e.g., out of memory, migrating). + * (e.g., out of memory, migrating) or we cannot free up + * any memory to unplug it (all plugged memory is busy). */ case -ENOMEM: /* Out of memory, try again later. */ diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 13cc653122b73..ce1c9df54eacb 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -8603,6 +8603,7 @@ done: pfn_max_align_up(end), migratetype); return ret; } +EXPORT_SYMBOL(alloc_contig_range); static int __alloc_contig_pages(unsigned long start_pfn, unsigned long nr_pages, gfp_t gfp_mask) @@ -8718,6 +8719,7 @@ void free_contig_range(unsigned long pfn, unsigned int nr_pages) } WARN(count != 0, "%d pages are still in use!\n", count); } +EXPORT_SYMBOL(free_contig_range); /* * The zone indicated has a new number of managed_pages; batch sizes and percpu -- GitLab From aa218795cb5fd583c94fc838dc76b7379dc4976a Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Thu, 7 May 2020 16:01:30 +0200 Subject: [PATCH 1138/1971] mm: Allow to offline unmovable PageOffline() pages via MEM_GOING_OFFLINE virtio-mem wants to allow to offline memory blocks of which some parts were unplugged (allocated via alloc_contig_range()), especially, to later offline and remove completely unplugged memory blocks. The important part is that PageOffline() has to remain set until the section is offline, so these pages will never get accessed (e.g., when dumping). The pages should not be handed back to the buddy (which would require clearing PageOffline() and result in issues if offlining fails and the pages are suddenly in the buddy). Let's allow to do that by allowing to isolate any PageOffline() page when offlining. This way, we can reach the memory hotplug notifier MEM_GOING_OFFLINE, where the driver can signal that he is fine with offlining this page by dropping its reference count. PageOffline() pages with a reference count of 0 can then be skipped when offlining the pages (like if they were free, however they are not in the buddy). Anybody who uses PageOffline() pages and does not agree to offline them (e.g., Hyper-V balloon, XEN balloon, VMWare balloon for 2MB pages) will not decrement the reference count and make offlining fail when trying to migrate such an unmovable page. So there should be no observable change. Same applies to balloon compaction users (movable PageOffline() pages), the pages will simply be migrated. Note 1: If offlining fails, a driver has to increment the reference count again in MEM_CANCEL_OFFLINE. Note 2: A driver that makes use of this has to be aware that re-onlining the memory block has to be handled by hooking into onlining code (online_page_callback_t), resetting the page PageOffline() and not giving them to the buddy. Reviewed-by: Alexander Duyck Acked-by: Michal Hocko Tested-by: Pankaj Gupta Acked-by: Andrew Morton Cc: Andrew Morton Cc: Juergen Gross Cc: Konrad Rzeszutek Wilk Cc: Pavel Tatashin Cc: Alexander Duyck Cc: Vlastimil Babka Cc: Johannes Weiner Cc: Anthony Yznaga Cc: Michal Hocko Cc: Oscar Salvador Cc: Mel Gorman Cc: Mike Rapoport Cc: Dan Williams Cc: Anshuman Khandual Cc: Qian Cai Cc: Pingfan Liu Signed-off-by: David Hildenbrand Link: https://lore.kernel.org/r/20200507140139.17083-7-david@redhat.com Signed-off-by: Michael S. Tsirkin --- include/linux/page-flags.h | 10 +++++++++ mm/memory_hotplug.c | 44 +++++++++++++++++++++++++++++--------- mm/page_alloc.c | 24 +++++++++++++++++++++ mm/page_isolation.c | 9 ++++++++ 4 files changed, 77 insertions(+), 10 deletions(-) diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h index 222f6f7b2bb35..6be1aa559b1e4 100644 --- a/include/linux/page-flags.h +++ b/include/linux/page-flags.h @@ -777,6 +777,16 @@ PAGE_TYPE_OPS(Buddy, buddy) * not onlined when onlining the section). * The content of these pages is effectively stale. Such pages should not * be touched (read/write/dump/save) except by their owner. + * + * If a driver wants to allow to offline unmovable PageOffline() pages without + * putting them back to the buddy, it can do so via the memory notifier by + * decrementing the reference count in MEM_GOING_OFFLINE and incrementing the + * reference count in MEM_CANCEL_OFFLINE. When offlining, the PageOffline() + * pages (now with a reference count of zero) are treated like free pages, + * allowing the containing memory block to get offlined. A driver that + * relies on this feature is aware that re-onlining the memory block will + * require to re-set the pages PageOffline() and not giving them to the + * buddy via online_page_callback_t. */ PAGE_TYPE_OPS(Offline, offline) diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c index fc0aad0bc1f54..008e4a7ed8bc2 100644 --- a/mm/memory_hotplug.c +++ b/mm/memory_hotplug.c @@ -1224,11 +1224,17 @@ struct zone *test_pages_in_a_zone(unsigned long start_pfn, /* * Scan pfn range [start,end) to find movable/migratable pages (LRU pages, - * non-lru movable pages and hugepages). We scan pfn because it's much - * easier than scanning over linked list. This function returns the pfn - * of the first found movable page if it's found, otherwise 0. + * non-lru movable pages and hugepages). Will skip over most unmovable + * pages (esp., pages that can be skipped when offlining), but bail out on + * definitely unmovable pages. + * + * Returns: + * 0 in case a movable page is found and movable_pfn was updated. + * -ENOENT in case no movable page was found. + * -EBUSY in case a definitely unmovable page was found. */ -static unsigned long scan_movable_pages(unsigned long start, unsigned long end) +static int scan_movable_pages(unsigned long start, unsigned long end, + unsigned long *movable_pfn) { unsigned long pfn; @@ -1240,18 +1246,30 @@ static unsigned long scan_movable_pages(unsigned long start, unsigned long end) continue; page = pfn_to_page(pfn); if (PageLRU(page)) - return pfn; + goto found; if (__PageMovable(page)) - return pfn; + goto found; + + /* + * PageOffline() pages that are not marked __PageMovable() and + * have a reference count > 0 (after MEM_GOING_OFFLINE) are + * definitely unmovable. If their reference count would be 0, + * they could at least be skipped when offlining memory. + */ + if (PageOffline(page) && page_count(page)) + return -EBUSY; if (!PageHuge(page)) continue; head = compound_head(page); if (page_huge_active(head)) - return pfn; + goto found; skip = compound_nr(head) - (page - head); pfn += skip - 1; } + return -ENOENT; +found: + *movable_pfn = pfn; return 0; } @@ -1518,7 +1536,8 @@ static int __ref __offline_pages(unsigned long start_pfn, } do { - for (pfn = start_pfn; pfn;) { + pfn = start_pfn; + do { if (signal_pending(current)) { ret = -EINTR; reason = "signal backoff"; @@ -1528,14 +1547,19 @@ static int __ref __offline_pages(unsigned long start_pfn, cond_resched(); lru_add_drain_all(); - pfn = scan_movable_pages(pfn, end_pfn); - if (pfn) { + ret = scan_movable_pages(pfn, end_pfn, &pfn); + if (!ret) { /* * TODO: fatal migration failures should bail * out */ do_migrate_range(pfn, end_pfn); } + } while (!ret); + + if (ret != -ENOENT) { + reason = "unmovable page"; + goto failed_removal_isolated; } /* diff --git a/mm/page_alloc.c b/mm/page_alloc.c index ce1c9df54eacb..9b1c49c3097dc 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -8372,6 +8372,19 @@ struct page *has_unmovable_pages(struct zone *zone, struct page *page, if ((flags & MEMORY_OFFLINE) && PageHWPoison(page)) continue; + /* + * We treat all PageOffline() pages as movable when offlining + * to give drivers a chance to decrement their reference count + * in MEM_GOING_OFFLINE in order to indicate that these pages + * can be offlined as there are no direct references anymore. + * For actually unmovable PageOffline() where the driver does + * not support this, we will fail later when trying to actually + * move these pages that still have a reference count > 0. + * (false negatives in this function only) + */ + if ((flags & MEMORY_OFFLINE) && PageOffline(page)) + continue; + if (__PageMovable(page) || PageLRU(page)) continue; @@ -8792,6 +8805,17 @@ __offline_isolated_pages(unsigned long start_pfn, unsigned long end_pfn) offlined_pages++; continue; } + /* + * At this point all remaining PageOffline() pages have a + * reference count of 0 and can simply be skipped. + */ + if (PageOffline(page)) { + BUG_ON(page_count(page)); + BUG_ON(PageBuddy(page)); + pfn++; + offlined_pages++; + continue; + } BUG_ON(page_count(page)); BUG_ON(!PageBuddy(page)); diff --git a/mm/page_isolation.c b/mm/page_isolation.c index 2c11a38d6e875..f6d07c5f0d34d 100644 --- a/mm/page_isolation.c +++ b/mm/page_isolation.c @@ -151,6 +151,7 @@ __first_valid_page(unsigned long pfn, unsigned long nr_pages) * a bit mask) * MEMORY_OFFLINE - isolate to offline (!allocate) memory * e.g., skip over PageHWPoison() pages + * and PageOffline() pages. * REPORT_FAILURE - report details about the failure to * isolate the range * @@ -259,6 +260,14 @@ __test_page_isolated_in_pageblock(unsigned long pfn, unsigned long end_pfn, else if ((flags & MEMORY_OFFLINE) && PageHWPoison(page)) /* A HWPoisoned page cannot be also PageBuddy */ pfn++; + else if ((flags & MEMORY_OFFLINE) && PageOffline(page) && + !page_count(page)) + /* + * The responsible driver agreed to skip PageOffline() + * pages when offlining memory by dropping its + * reference in MEM_GOING_OFFLINE. + */ + pfn++; else break; } -- GitLab From 8e5c921ca0cd9aa59386e6be4b86b32f0ba7296b Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Thu, 7 May 2020 16:01:31 +0200 Subject: [PATCH 1139/1971] virtio-mem: Allow to offline partially unplugged memory blocks Dropping the reference count of PageOffline() pages during MEM_GOING_ONLINE allows offlining code to skip them. However, we also have to clear PG_reserved, because PG_reserved pages get detected as unmovable right away. Take care of restoring the reference count when offlining is canceled. Clarify why we don't have to perform any action when unloading the driver. Also, let's add a warning if anybody is still holding a reference to unplugged pages when offlining. Tested-by: Pankaj Gupta Cc: "Michael S. Tsirkin" Cc: Jason Wang Cc: Oscar Salvador Cc: Michal Hocko Cc: Igor Mammedov Cc: Dave Young Cc: Andrew Morton Cc: Dan Williams Cc: Pavel Tatashin Cc: Stefan Hajnoczi Cc: Vlastimil Babka Signed-off-by: David Hildenbrand Link: https://lore.kernel.org/r/20200507140139.17083-8-david@redhat.com Signed-off-by: Michael S. Tsirkin --- drivers/virtio/virtio_mem.c | 68 ++++++++++++++++++++++++++++++++++++- 1 file changed, 67 insertions(+), 1 deletion(-) diff --git a/drivers/virtio/virtio_mem.c b/drivers/virtio/virtio_mem.c index 74f0d3cb1d220..b0b41c73ce89e 100644 --- a/drivers/virtio/virtio_mem.c +++ b/drivers/virtio/virtio_mem.c @@ -572,6 +572,57 @@ static void virtio_mem_notify_online(struct virtio_mem *vm, unsigned long mb_id, virtio_mem_retry(vm); } +static void virtio_mem_notify_going_offline(struct virtio_mem *vm, + unsigned long mb_id) +{ + const unsigned long nr_pages = PFN_DOWN(vm->subblock_size); + struct page *page; + unsigned long pfn; + int sb_id, i; + + for (sb_id = 0; sb_id < vm->nb_sb_per_mb; sb_id++) { + if (virtio_mem_mb_test_sb_plugged(vm, mb_id, sb_id, 1)) + continue; + /* + * Drop our reference to the pages so the memory can get + * offlined and add the unplugged pages to the managed + * page counters (so offlining code can correctly subtract + * them again). + */ + pfn = PFN_DOWN(virtio_mem_mb_id_to_phys(mb_id) + + sb_id * vm->subblock_size); + adjust_managed_page_count(pfn_to_page(pfn), nr_pages); + for (i = 0; i < nr_pages; i++) { + page = pfn_to_page(pfn + i); + if (WARN_ON(!page_ref_dec_and_test(page))) + dump_page(page, "unplugged page referenced"); + } + } +} + +static void virtio_mem_notify_cancel_offline(struct virtio_mem *vm, + unsigned long mb_id) +{ + const unsigned long nr_pages = PFN_DOWN(vm->subblock_size); + unsigned long pfn; + int sb_id, i; + + for (sb_id = 0; sb_id < vm->nb_sb_per_mb; sb_id++) { + if (virtio_mem_mb_test_sb_plugged(vm, mb_id, sb_id, 1)) + continue; + /* + * Get the reference we dropped when going offline and + * subtract the unplugged pages from the managed page + * counters. + */ + pfn = PFN_DOWN(virtio_mem_mb_id_to_phys(mb_id) + + sb_id * vm->subblock_size); + adjust_managed_page_count(pfn_to_page(pfn), -nr_pages); + for (i = 0; i < nr_pages; i++) + page_ref_inc(pfn_to_page(pfn + i)); + } +} + /* * This callback will either be called synchronously from add_memory() or * asynchronously (e.g., triggered via user space). We have to be careful @@ -618,6 +669,7 @@ static int virtio_mem_memory_notifier_cb(struct notifier_block *nb, break; } vm->hotplug_active = true; + virtio_mem_notify_going_offline(vm, mb_id); break; case MEM_GOING_ONLINE: mutex_lock(&vm->hotplug_mutex); @@ -642,6 +694,12 @@ static int virtio_mem_memory_notifier_cb(struct notifier_block *nb, mutex_unlock(&vm->hotplug_mutex); break; case MEM_CANCEL_OFFLINE: + if (!vm->hotplug_active) + break; + virtio_mem_notify_cancel_offline(vm, mb_id); + vm->hotplug_active = false; + mutex_unlock(&vm->hotplug_mutex); + break; case MEM_CANCEL_ONLINE: if (!vm->hotplug_active) break; @@ -668,8 +726,11 @@ static void virtio_mem_set_fake_offline(unsigned long pfn, struct page *page = pfn_to_page(pfn); __SetPageOffline(page); - if (!onlined) + if (!onlined) { SetPageDirty(page); + /* FIXME: remove after cleanups */ + ClearPageReserved(page); + } } } @@ -1722,6 +1783,11 @@ static void virtio_mem_remove(struct virtio_device *vdev) BUG_ON(rc); virtio_mem_mb_set_state(vm, mb_id, VIRTIO_MEM_MB_STATE_UNUSED); } + /* + * After we unregistered our callbacks, user space can no longer + * offline partially plugged online memory blocks. No need to worry + * about them. + */ /* unregister callbacks */ unregister_virtio_mem_device(vm); -- GitLab From 08b3acd7a68fc17902e1cb6b146389322840deab Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Thu, 7 May 2020 16:01:32 +0200 Subject: [PATCH 1140/1971] mm/memory_hotplug: Introduce offline_and_remove_memory() virtio-mem wants to offline and remove a memory block once it unplugged all subblocks (e.g., using alloc_contig_range()). Let's provide an interface to do that from a driver. virtio-mem already supports to offline partially unplugged memory blocks. Offlining a fully unplugged memory block will not require to migrate any pages. All unplugged subblocks are PageOffline() and have a reference count of 0 - so offlining code will simply skip them. All we need is an interface to offline and remove the memory from kernel module context, where we don't have access to the memory block devices (esp. find_memory_block() and device_offline()) and the device hotplug lock. To keep things simple, allow to only work on a single memory block. Acked-by: Michal Hocko Tested-by: Pankaj Gupta Acked-by: Andrew Morton Cc: Andrew Morton Cc: David Hildenbrand Cc: Oscar Salvador Cc: Michal Hocko Cc: Pavel Tatashin Cc: Wei Yang Cc: Dan Williams Cc: Qian Cai Signed-off-by: David Hildenbrand Link: https://lore.kernel.org/r/20200507140139.17083-9-david@redhat.com Signed-off-by: Michael S. Tsirkin --- include/linux/memory_hotplug.h | 1 + mm/memory_hotplug.c | 37 ++++++++++++++++++++++++++++++++++ 2 files changed, 38 insertions(+) diff --git a/include/linux/memory_hotplug.h b/include/linux/memory_hotplug.h index 93d9ada74ddd6..cb7499843f5c6 100644 --- a/include/linux/memory_hotplug.h +++ b/include/linux/memory_hotplug.h @@ -319,6 +319,7 @@ extern void try_offline_node(int nid); extern int offline_pages(unsigned long start_pfn, unsigned long nr_pages); extern int remove_memory(int nid, u64 start, u64 size); extern void __remove_memory(int nid, u64 start, u64 size); +extern int offline_and_remove_memory(int nid, u64 start, u64 size); #else static inline bool is_mem_section_removable(unsigned long pfn, diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c index 008e4a7ed8bc2..4acb99aa9bf4c 100644 --- a/mm/memory_hotplug.c +++ b/mm/memory_hotplug.c @@ -1821,4 +1821,41 @@ int remove_memory(int nid, u64 start, u64 size) return rc; } EXPORT_SYMBOL_GPL(remove_memory); + +/* + * Try to offline and remove a memory block. Might take a long time to + * finish in case memory is still in use. Primarily useful for memory devices + * that logically unplugged all memory (so it's no longer in use) and want to + * offline + remove the memory block. + */ +int offline_and_remove_memory(int nid, u64 start, u64 size) +{ + struct memory_block *mem; + int rc = -EINVAL; + + if (!IS_ALIGNED(start, memory_block_size_bytes()) || + size != memory_block_size_bytes()) + return rc; + + lock_device_hotplug(); + mem = find_memory_block(__pfn_to_section(PFN_DOWN(start))); + if (mem) + rc = device_offline(&mem->dev); + /* Ignore if the device is already offline. */ + if (rc > 0) + rc = 0; + + /* + * In case we succeeded to offline the memory block, remove it. + * This cannot fail as it cannot get onlined in the meantime. + */ + if (!rc) { + rc = try_remove_memory(nid, start, size); + WARN_ON_ONCE(rc); + } + unlock_device_hotplug(); + + return rc; +} +EXPORT_SYMBOL_GPL(offline_and_remove_memory); #endif /* CONFIG_MEMORY_HOTREMOVE */ -- GitLab From a573238786f8f16aca6946fc7b804b965e3038e9 Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Thu, 7 May 2020 16:01:33 +0200 Subject: [PATCH 1141/1971] virtio-mem: Offline and remove completely unplugged memory blocks Let's offline+remove memory blocks once all subblocks are unplugged. We can use the new Linux MM interface for that. As no memory is in use anymore, this shouldn't take a long time and shouldn't fail. There might be corner cases where the offlining could still fail (especially, if another notifier NACKs the offlining request). Acked-by: Pankaj Gupta Tested-by: Pankaj Gupta Cc: "Michael S. Tsirkin" Cc: Jason Wang Cc: Oscar Salvador Cc: Michal Hocko Cc: Igor Mammedov Cc: Dave Young Cc: Andrew Morton Cc: Dan Williams Cc: Pavel Tatashin Cc: Stefan Hajnoczi Cc: Vlastimil Babka Signed-off-by: David Hildenbrand Link: https://lore.kernel.org/r/20200507140139.17083-10-david@redhat.com Signed-off-by: Michael S. Tsirkin --- drivers/virtio/virtio_mem.c | 47 +++++++++++++++++++++++++++++++++---- 1 file changed, 43 insertions(+), 4 deletions(-) diff --git a/drivers/virtio/virtio_mem.c b/drivers/virtio/virtio_mem.c index b0b41c73ce89e..a2edb87e5ed86 100644 --- a/drivers/virtio/virtio_mem.c +++ b/drivers/virtio/virtio_mem.c @@ -446,6 +446,28 @@ static int virtio_mem_mb_remove(struct virtio_mem *vm, unsigned long mb_id) return remove_memory(nid, addr, memory_block_size_bytes()); } +/* + * Try to offline and remove a memory block from Linux. + * + * Must not be called with the vm->hotplug_mutex held (possible deadlock with + * onlining code). + * + * Will not modify the state of the memory block. + */ +static int virtio_mem_mb_offline_and_remove(struct virtio_mem *vm, + unsigned long mb_id) +{ + const uint64_t addr = virtio_mem_mb_id_to_phys(mb_id); + int nid = vm->nid; + + if (nid == NUMA_NO_NODE) + nid = memory_add_physaddr_to_nid(addr); + + dev_dbg(&vm->vdev->dev, "offlining and removing memory block: %lu\n", + mb_id); + return offline_and_remove_memory(nid, addr, memory_block_size_bytes()); +} + /* * Trigger the workqueue so the device can perform its magic. */ @@ -537,7 +559,13 @@ static void virtio_mem_notify_offline(struct virtio_mem *vm, break; } - /* trigger the workqueue, maybe we can now unplug memory. */ + /* + * Trigger the workqueue, maybe we can now unplug memory. Also, + * when we offline and remove a memory block, this will re-trigger + * us immediately - which is often nice because the removal of + * the memory block (e.g., memmap) might have freed up memory + * on other memory blocks we manage. + */ virtio_mem_retry(vm); } @@ -1284,7 +1312,8 @@ static int virtio_mem_mb_unplug_any_sb_offline(struct virtio_mem *vm, * Unplug the desired number of plugged subblocks of an online memory block. * Will skip subblock that are busy. * - * Will modify the state of the memory block. + * Will modify the state of the memory block. Might temporarily drop the + * hotplug_mutex. * * Note: Can fail after some subblocks were successfully unplugged. Can * return 0 even if subblocks were busy and could not get unplugged. @@ -1340,9 +1369,19 @@ static int virtio_mem_mb_unplug_any_sb_online(struct virtio_mem *vm, } /* - * TODO: Once all subblocks of a memory block were unplugged, we want - * to offline the memory block and remove it. + * Once all subblocks of a memory block were unplugged, offline and + * remove it. This will usually not fail, as no memory is in use + * anymore - however some other notifiers might NACK the request. */ + if (virtio_mem_mb_test_sb_unplugged(vm, mb_id, 0, vm->nb_sb_per_mb)) { + mutex_unlock(&vm->hotplug_mutex); + rc = virtio_mem_mb_offline_and_remove(vm, mb_id); + mutex_lock(&vm->hotplug_mutex); + if (!rc) + virtio_mem_mb_set_state(vm, mb_id, + VIRTIO_MEM_MB_STATE_UNUSED); + } + return 0; } -- GitLab From 23e77b5dc9cd88709c48ada936c07bdd72c49426 Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Thu, 7 May 2020 16:01:34 +0200 Subject: [PATCH 1142/1971] virtio-mem: Better retry handling Let's start with a retry interval of 5 seconds and double the time until we reach 5 minutes, in case we keep getting errors. Reset the retry interval in case we succeeded. The two main reasons for having to retry are - The hypervisor is busy and cannot process our request - We cannot reach the desired requested_size (esp., not enough memory can get unplugged because we can't allocate any subblocks). Tested-by: Pankaj Gupta Cc: "Michael S. Tsirkin" Cc: Jason Wang Cc: Oscar Salvador Cc: Michal Hocko Cc: Igor Mammedov Cc: Dave Young Cc: Andrew Morton Cc: Dan Williams Cc: Pavel Tatashin Cc: Stefan Hajnoczi Cc: Vlastimil Babka Signed-off-by: David Hildenbrand Link: https://lore.kernel.org/r/20200507140139.17083-11-david@redhat.com Signed-off-by: Michael S. Tsirkin --- drivers/virtio/virtio_mem.c | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/drivers/virtio/virtio_mem.c b/drivers/virtio/virtio_mem.c index a2edb87e5ed86..eb4c16d634e0d 100644 --- a/drivers/virtio/virtio_mem.c +++ b/drivers/virtio/virtio_mem.c @@ -141,7 +141,9 @@ struct virtio_mem { /* Timer for retrying to plug/unplug memory. */ struct hrtimer retry_timer; -#define VIRTIO_MEM_RETRY_TIMER_MS 30000 + unsigned int retry_timer_ms; +#define VIRTIO_MEM_RETRY_TIMER_MIN_MS 50000 +#define VIRTIO_MEM_RETRY_TIMER_MAX_MS 300000 /* Memory notifier (online/offline events). */ struct notifier_block memory_notifier; @@ -1550,6 +1552,7 @@ retry: switch (rc) { case 0: + vm->retry_timer_ms = VIRTIO_MEM_RETRY_TIMER_MIN_MS; break; case -ENOSPC: /* @@ -1565,8 +1568,7 @@ retry: */ case -ENOMEM: /* Out of memory, try again later. */ - hrtimer_start(&vm->retry_timer, - ms_to_ktime(VIRTIO_MEM_RETRY_TIMER_MS), + hrtimer_start(&vm->retry_timer, ms_to_ktime(vm->retry_timer_ms), HRTIMER_MODE_REL); break; case -EAGAIN: @@ -1586,6 +1588,8 @@ static enum hrtimer_restart virtio_mem_timer_expired(struct hrtimer *timer) retry_timer); virtio_mem_retry(vm); + vm->retry_timer_ms = min_t(unsigned int, vm->retry_timer_ms * 2, + VIRTIO_MEM_RETRY_TIMER_MAX_MS); return HRTIMER_NORESTART; } @@ -1754,6 +1758,7 @@ static int virtio_mem_probe(struct virtio_device *vdev) spin_lock_init(&vm->removal_lock); hrtimer_init(&vm->retry_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); vm->retry_timer.function = virtio_mem_timer_expired; + vm->retry_timer_ms = VIRTIO_MEM_RETRY_TIMER_MIN_MS; /* register the virtqueue */ rc = virtio_mem_init_vq(vm); -- GitLab From ebf71552bb0e690cad523ad175e8c4c89a33c333 Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Thu, 7 May 2020 16:01:35 +0200 Subject: [PATCH 1143/1971] virtio-mem: Add parent resource for all added "System RAM" Let's add a parent resource, named after the virtio device (inspired by drivers/dax/kmem.c). This allows user space to identify which memory belongs to which virtio-mem device. With this change and two virtio-mem devices: :/# cat /proc/iomem 00000000-00000fff : Reserved 00001000-0009fbff : System RAM [...] 140000000-333ffffff : virtio0 140000000-147ffffff : System RAM 148000000-14fffffff : System RAM 150000000-157ffffff : System RAM [...] 334000000-3033ffffff : virtio1 338000000-33fffffff : System RAM 340000000-347ffffff : System RAM 348000000-34fffffff : System RAM [...] Cc: "Michael S. Tsirkin" Cc: Pankaj Gupta Signed-off-by: David Hildenbrand Link: https://lore.kernel.org/r/20200507140139.17083-12-david@redhat.com Signed-off-by: Michael S. Tsirkin Reviewed-by: Pankaj Gupta --- drivers/virtio/virtio_mem.c | 52 ++++++++++++++++++++++++++++++++++++- 1 file changed, 51 insertions(+), 1 deletion(-) diff --git a/drivers/virtio/virtio_mem.c b/drivers/virtio/virtio_mem.c index eb4c16d634e0d..80cdb9e6b3c4c 100644 --- a/drivers/virtio/virtio_mem.c +++ b/drivers/virtio/virtio_mem.c @@ -99,6 +99,9 @@ struct virtio_mem { /* Id of the next memory bock to prepare when needed. */ unsigned long next_mb_id; + /* The parent resource for all memory added via this device. */ + struct resource *parent_resource; + /* Summary of all memory block states. */ unsigned long nb_mb_state[VIRTIO_MEM_MB_STATE_COUNT]; #define VIRTIO_MEM_NB_OFFLINE_THRESHOLD 10 @@ -1741,6 +1744,44 @@ static int virtio_mem_init(struct virtio_mem *vm) return 0; } +static int virtio_mem_create_resource(struct virtio_mem *vm) +{ + /* + * When force-unloading the driver and removing the device, we + * could have a garbage pointer. Duplicate the string. + */ + const char *name = kstrdup(dev_name(&vm->vdev->dev), GFP_KERNEL); + + if (!name) + return -ENOMEM; + + vm->parent_resource = __request_mem_region(vm->addr, vm->region_size, + name, IORESOURCE_SYSTEM_RAM); + if (!vm->parent_resource) { + kfree(name); + dev_warn(&vm->vdev->dev, "could not reserve device region\n"); + return -EBUSY; + } + + /* The memory is not actually busy - make add_memory() work. */ + vm->parent_resource->flags &= ~IORESOURCE_BUSY; + return 0; +} + +static void virtio_mem_delete_resource(struct virtio_mem *vm) +{ + const char *name; + + if (!vm->parent_resource) + return; + + name = vm->parent_resource->name; + release_resource(vm->parent_resource); + kfree(vm->parent_resource); + kfree(name); + vm->parent_resource = NULL; +} + static int virtio_mem_probe(struct virtio_device *vdev) { struct virtio_mem *vm; @@ -1770,11 +1811,16 @@ static int virtio_mem_probe(struct virtio_device *vdev) if (rc) goto out_del_vq; + /* create the parent resource for all memory */ + rc = virtio_mem_create_resource(vm); + if (rc) + goto out_del_vq; + /* register callbacks */ vm->memory_notifier.notifier_call = virtio_mem_memory_notifier_cb; rc = register_memory_notifier(&vm->memory_notifier); if (rc) - goto out_del_vq; + goto out_del_resource; rc = register_virtio_mem_device(vm); if (rc) goto out_unreg_mem; @@ -1788,6 +1834,8 @@ static int virtio_mem_probe(struct virtio_device *vdev) return 0; out_unreg_mem: unregister_memory_notifier(&vm->memory_notifier); +out_del_resource: + virtio_mem_delete_resource(vm); out_del_vq: vdev->config->del_vqs(vdev); out_free_vm: @@ -1848,6 +1896,8 @@ static void virtio_mem_remove(struct virtio_device *vdev) vm->nb_mb_state[VIRTIO_MEM_MB_STATE_ONLINE_PARTIAL] || vm->nb_mb_state[VIRTIO_MEM_MB_STATE_ONLINE_MOVABLE]) dev_warn(&vdev->dev, "device still has system memory added\n"); + else + virtio_mem_delete_resource(vm); /* remove all tracking data - no locking needed */ vfree(vm->mb_state); -- GitLab From 3c42e198e668e4040ef5cf3ad60d57765abc08a4 Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Thu, 7 May 2020 16:01:36 +0200 Subject: [PATCH 1144/1971] virtio-mem: Drop manual check for already present memory Registering our parent resource will fail if any memory is still present (e.g., because somebody unloaded the driver and tries to reload it). No need for the manual check. Move our "unplug all" handling to after registering the resource. Cc: "Michael S. Tsirkin" Cc: Pankaj Gupta Signed-off-by: David Hildenbrand Link: https://lore.kernel.org/r/20200507140139.17083-13-david@redhat.com Signed-off-by: Michael S. Tsirkin --- drivers/virtio/virtio_mem.c | 55 ++++++++----------------------------- 1 file changed, 12 insertions(+), 43 deletions(-) diff --git a/drivers/virtio/virtio_mem.c b/drivers/virtio/virtio_mem.c index 80cdb9e6b3c4c..8dd57b61b09bd 100644 --- a/drivers/virtio/virtio_mem.c +++ b/drivers/virtio/virtio_mem.c @@ -1616,23 +1616,6 @@ static int virtio_mem_init_vq(struct virtio_mem *vm) return 0; } -/* - * Test if any memory in the range is present in Linux. - */ -static bool virtio_mem_any_memory_present(unsigned long start, - unsigned long size) -{ - const unsigned long start_pfn = PFN_DOWN(start); - const unsigned long end_pfn = PFN_UP(start + size); - unsigned long pfn; - - for (pfn = start_pfn; pfn != end_pfn; pfn++) - if (present_section_nr(pfn_to_section_nr(pfn))) - return true; - - return false; -} - static int virtio_mem_init(struct virtio_mem *vm) { const uint64_t phys_limit = 1UL << MAX_PHYSMEM_BITS; @@ -1664,32 +1647,6 @@ static int virtio_mem_init(struct virtio_mem *vm) virtio_cread(vm->vdev, struct virtio_mem_config, region_size, &vm->region_size); - /* - * If we still have memory plugged, we might have to unplug all - * memory first. However, if somebody simply unloaded the driver - * we would have to reinitialize the old state - something we don't - * support yet. Detect if we have any memory in the area present. - */ - if (vm->plugged_size) { - uint64_t usable_region_size; - - virtio_cread(vm->vdev, struct virtio_mem_config, - usable_region_size, &usable_region_size); - - if (virtio_mem_any_memory_present(vm->addr, - usable_region_size)) { - dev_err(&vm->vdev->dev, - "reloading the driver is not supported\n"); - return -EINVAL; - } - /* - * Note: it might happen that the device is busy and - * unplugging all memory might take some time. - */ - dev_info(&vm->vdev->dev, "unplugging all memory required\n"); - vm->unplug_all_required = 1; - } - /* * We always hotplug memory in memory block granularity. This way, * we have to wait for exactly one memory block to online. @@ -1760,6 +1717,8 @@ static int virtio_mem_create_resource(struct virtio_mem *vm) if (!vm->parent_resource) { kfree(name); dev_warn(&vm->vdev->dev, "could not reserve device region\n"); + dev_info(&vm->vdev->dev, + "reloading the driver is not supported\n"); return -EBUSY; } @@ -1816,6 +1775,16 @@ static int virtio_mem_probe(struct virtio_device *vdev) if (rc) goto out_del_vq; + /* + * If we still have memory plugged, we have to unplug all memory first. + * Registering our parent resource makes sure that this memory isn't + * actually in use (e.g., trying to reload the driver). + */ + if (vm->plugged_size) { + vm->unplug_all_required = 1; + dev_info(&vm->vdev->dev, "unplugging all memory is required\n"); + } + /* register callbacks */ vm->memory_notifier.notifier_call = virtio_mem_memory_notifier_cb; rc = register_memory_notifier(&vm->memory_notifier); -- GitLab From 562e08cd249f98af3a3e0845998f3b27b56b0067 Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Thu, 7 May 2020 16:01:37 +0200 Subject: [PATCH 1145/1971] virtio-mem: Unplug subblocks right-to-left We unplug blocks right-to-left, let's also unplug subblocks within a block right-to-left. Cc: "Michael S. Tsirkin" Cc: Pankaj Gupta Signed-off-by: David Hildenbrand Link: https://lore.kernel.org/r/20200507140139.17083-14-david@redhat.com Signed-off-by: Michael S. Tsirkin --- drivers/virtio/virtio_mem.c | 38 ++++++++++++++++--------------------- 1 file changed, 16 insertions(+), 22 deletions(-) diff --git a/drivers/virtio/virtio_mem.c b/drivers/virtio/virtio_mem.c index 8dd57b61b09bd..a719e1a04ac78 100644 --- a/drivers/virtio/virtio_mem.c +++ b/drivers/virtio/virtio_mem.c @@ -353,18 +353,6 @@ static bool virtio_mem_mb_test_sb_unplugged(struct virtio_mem *vm, return find_next_bit(vm->sb_bitmap, bit + count, bit) >= bit + count; } -/* - * Find the first plugged subblock. Returns vm->nb_sb_per_mb in case there is - * none. - */ -static int virtio_mem_mb_first_plugged_sb(struct virtio_mem *vm, - unsigned long mb_id) -{ - const int bit = (mb_id - vm->first_mb_id) * vm->nb_sb_per_mb; - - return find_next_bit(vm->sb_bitmap, bit + vm->nb_sb_per_mb, bit) - bit; -} - /* * Find the first unplugged subblock. Returns vm->nb_sb_per_mb in case there is * none. @@ -1016,21 +1004,27 @@ static int virtio_mem_mb_unplug_any_sb(struct virtio_mem *vm, int sb_id, count; int rc; + sb_id = vm->nb_sb_per_mb - 1; while (*nb_sb) { - sb_id = virtio_mem_mb_first_plugged_sb(vm, mb_id); - if (sb_id >= vm->nb_sb_per_mb) + /* Find the next candidate subblock */ + while (sb_id >= 0 && + virtio_mem_mb_test_sb_unplugged(vm, mb_id, sb_id, 1)) + sb_id--; + if (sb_id < 0) break; + /* Try to unplug multiple subblocks at a time */ count = 1; - while (count < *nb_sb && - sb_id + count < vm->nb_sb_per_mb && - virtio_mem_mb_test_sb_plugged(vm, mb_id, sb_id + count, - 1)) + while (count < *nb_sb && sb_id > 0 && + virtio_mem_mb_test_sb_plugged(vm, mb_id, sb_id - 1, 1)) { count++; + sb_id--; + } rc = virtio_mem_mb_unplug_sb(vm, mb_id, sb_id, count); if (rc) return rc; *nb_sb -= count; + sb_id--; } return 0; @@ -1337,12 +1331,12 @@ static int virtio_mem_mb_unplug_any_sb_online(struct virtio_mem *vm, * we should sense via something like is_mem_section_removable() * first if it makes sense to go ahead any try to allocate. */ - for (sb_id = 0; sb_id < vm->nb_sb_per_mb && *nb_sb; sb_id++) { + for (sb_id = vm->nb_sb_per_mb - 1; sb_id >= 0 && *nb_sb; sb_id--) { /* Find the next candidate subblock */ - while (sb_id < vm->nb_sb_per_mb && + while (sb_id >= 0 && !virtio_mem_mb_test_sb_plugged(vm, mb_id, sb_id, 1)) - sb_id++; - if (sb_id >= vm->nb_sb_per_mb) + sb_id--; + if (sb_id < 0) break; start_pfn = PFN_DOWN(virtio_mem_mb_id_to_phys(mb_id) + -- GitLab From 8d4edcfe78c0008d95effc0c90455cee59e18d10 Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Thu, 7 May 2020 16:01:38 +0200 Subject: [PATCH 1146/1971] virtio-mem: Use -ETXTBSY as error code if the device is busy Let's be able to distinguish if the device or if memory is busy. Cc: "Michael S. Tsirkin" Cc: Pankaj Gupta Signed-off-by: David Hildenbrand Link: https://lore.kernel.org/r/20200507140139.17083-15-david@redhat.com Signed-off-by: Michael S. Tsirkin --- drivers/virtio/virtio_mem.c | 16 ++++++++++------ 1 file changed, 10 insertions(+), 6 deletions(-) diff --git a/drivers/virtio/virtio_mem.c b/drivers/virtio/virtio_mem.c index a719e1a04ac78..abd93b778a26a 100644 --- a/drivers/virtio/virtio_mem.c +++ b/drivers/virtio/virtio_mem.c @@ -893,7 +893,7 @@ static int virtio_mem_send_plug_request(struct virtio_mem *vm, uint64_t addr, case VIRTIO_MEM_RESP_NACK: return -EAGAIN; case VIRTIO_MEM_RESP_BUSY: - return -EBUSY; + return -ETXTBSY; case VIRTIO_MEM_RESP_ERROR: return -EINVAL; default: @@ -919,7 +919,7 @@ static int virtio_mem_send_unplug_request(struct virtio_mem *vm, uint64_t addr, vm->plugged_size -= size; return 0; case VIRTIO_MEM_RESP_BUSY: - return -EBUSY; + return -ETXTBSY; case VIRTIO_MEM_RESP_ERROR: return -EINVAL; default: @@ -941,7 +941,7 @@ static int virtio_mem_send_unplug_all_request(struct virtio_mem *vm) atomic_set(&vm->config_changed, 1); return 0; case VIRTIO_MEM_RESP_BUSY: - return -EBUSY; + return -ETXTBSY; default: return -ENOMEM; } @@ -1557,11 +1557,15 @@ retry: * or we have too many offline memory blocks. */ break; - case -EBUSY: + case -ETXTBSY: /* * The hypervisor cannot process our request right now - * (e.g., out of memory, migrating) or we cannot free up - * any memory to unplug it (all plugged memory is busy). + * (e.g., out of memory, migrating); + */ + case -EBUSY: + /* + * We cannot free up any memory to unplug it (all plugged memory + * is busy). */ case -ENOMEM: /* Out of memory, try again later. */ -- GitLab From 72f9525ad76b1ddfe663285805982e9d57c7b2c2 Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Thu, 7 May 2020 16:01:39 +0200 Subject: [PATCH 1147/1971] virtio-mem: Try to unplug the complete online memory block first Right now, we always try to unplug single subblocks when processing an online memory block. Let's try to unplug the complete online memory block first, in case it is fully plugged and the unplug request is large enough. Fallback to single subblocks in case the memory block cannot get unplugged as a whole. Cc: "Michael S. Tsirkin" Cc: Pankaj Gupta Signed-off-by: David Hildenbrand Link: https://lore.kernel.org/r/20200507140139.17083-16-david@redhat.com Signed-off-by: Michael S. Tsirkin --- drivers/virtio/virtio_mem.c | 88 ++++++++++++++++++++++++------------- 1 file changed, 57 insertions(+), 31 deletions(-) diff --git a/drivers/virtio/virtio_mem.c b/drivers/virtio/virtio_mem.c index abd93b778a26a..9e523db3bee17 100644 --- a/drivers/virtio/virtio_mem.c +++ b/drivers/virtio/virtio_mem.c @@ -1307,6 +1307,46 @@ static int virtio_mem_mb_unplug_any_sb_offline(struct virtio_mem *vm, return 0; } +/* + * Unplug the given plugged subblocks of an online memory block. + * + * Will modify the state of the memory block. + */ +static int virtio_mem_mb_unplug_sb_online(struct virtio_mem *vm, + unsigned long mb_id, int sb_id, + int count) +{ + const unsigned long nr_pages = PFN_DOWN(vm->subblock_size) * count; + unsigned long start_pfn; + int rc; + + start_pfn = PFN_DOWN(virtio_mem_mb_id_to_phys(mb_id) + + sb_id * vm->subblock_size); + rc = alloc_contig_range(start_pfn, start_pfn + nr_pages, + MIGRATE_MOVABLE, GFP_KERNEL); + if (rc == -ENOMEM) + /* whoops, out of memory */ + return rc; + if (rc) + return -EBUSY; + + /* Mark it as fake-offline before unplugging it */ + virtio_mem_set_fake_offline(start_pfn, nr_pages, true); + adjust_managed_page_count(pfn_to_page(start_pfn), -nr_pages); + + /* Try to unplug the allocated memory */ + rc = virtio_mem_mb_unplug_sb(vm, mb_id, sb_id, count); + if (rc) { + /* Return the memory to the buddy. */ + virtio_mem_fake_online(start_pfn, nr_pages); + return rc; + } + + virtio_mem_mb_set_state(vm, mb_id, + VIRTIO_MEM_MB_STATE_ONLINE_PARTIAL); + return 0; +} + /* * Unplug the desired number of plugged subblocks of an online memory block. * Will skip subblock that are busy. @@ -1321,16 +1361,21 @@ static int virtio_mem_mb_unplug_any_sb_online(struct virtio_mem *vm, unsigned long mb_id, uint64_t *nb_sb) { - const unsigned long nr_pages = PFN_DOWN(vm->subblock_size); - unsigned long start_pfn; int rc, sb_id; - /* - * TODO: To increase the performance we want to try bigger, consecutive - * subblocks first before falling back to single subblocks. Also, - * we should sense via something like is_mem_section_removable() - * first if it makes sense to go ahead any try to allocate. - */ + /* If possible, try to unplug the complete block in one shot. */ + if (*nb_sb >= vm->nb_sb_per_mb && + virtio_mem_mb_test_sb_plugged(vm, mb_id, 0, vm->nb_sb_per_mb)) { + rc = virtio_mem_mb_unplug_sb_online(vm, mb_id, 0, + vm->nb_sb_per_mb); + if (!rc) { + *nb_sb -= vm->nb_sb_per_mb; + goto unplugged; + } else if (rc != -EBUSY) + return rc; + } + + /* Fallback to single subblocks. */ for (sb_id = vm->nb_sb_per_mb - 1; sb_id >= 0 && *nb_sb; sb_id--) { /* Find the next candidate subblock */ while (sb_id >= 0 && @@ -1339,34 +1384,15 @@ static int virtio_mem_mb_unplug_any_sb_online(struct virtio_mem *vm, if (sb_id < 0) break; - start_pfn = PFN_DOWN(virtio_mem_mb_id_to_phys(mb_id) + - sb_id * vm->subblock_size); - rc = alloc_contig_range(start_pfn, start_pfn + nr_pages, - MIGRATE_MOVABLE, GFP_KERNEL); - if (rc == -ENOMEM) - /* whoops, out of memory */ - return rc; - if (rc) - /* memory busy, we can't unplug this chunk */ + rc = virtio_mem_mb_unplug_sb_online(vm, mb_id, sb_id, 1); + if (rc == -EBUSY) continue; - - /* Mark it as fake-offline before unplugging it */ - virtio_mem_set_fake_offline(start_pfn, nr_pages, true); - adjust_managed_page_count(pfn_to_page(start_pfn), -nr_pages); - - /* Try to unplug the allocated memory */ - rc = virtio_mem_mb_unplug_sb(vm, mb_id, sb_id, 1); - if (rc) { - /* Return the memory to the buddy. */ - virtio_mem_fake_online(start_pfn, nr_pages); + else if (rc) return rc; - } - - virtio_mem_mb_set_state(vm, mb_id, - VIRTIO_MEM_MB_STATE_ONLINE_PARTIAL); *nb_sb -= 1; } +unplugged: /* * Once all subblocks of a memory block were unplugged, offline and * remove it. This will usually not fail, as no memory is in use -- GitLab From fce8afd76e3a4d8c59c92f84f8027569fd7031d0 Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Fri, 15 May 2020 12:14:02 +0200 Subject: [PATCH 1148/1971] virtio-mem: Don't rely on implicit compiler padding for requests The compiler will add padding after the last member, make that explicit. The size of a request is always 24 bytes. The size of a response always 10 bytes. Add compile-time checks. Cc: "Michael S. Tsirkin" Cc: Pankaj Gupta Cc: teawater Signed-off-by: David Hildenbrand Link: https://lore.kernel.org/r/20200515101402.16597-1-david@redhat.com Signed-off-by: Michael S. Tsirkin --- drivers/virtio/virtio_mem.c | 3 +++ include/uapi/linux/virtio_mem.h | 3 +++ 2 files changed, 6 insertions(+) diff --git a/drivers/virtio/virtio_mem.c b/drivers/virtio/virtio_mem.c index 9e523db3bee17..f658fe9149beb 100644 --- a/drivers/virtio/virtio_mem.c +++ b/drivers/virtio/virtio_mem.c @@ -1770,6 +1770,9 @@ static int virtio_mem_probe(struct virtio_device *vdev) struct virtio_mem *vm; int rc = -EINVAL; + BUILD_BUG_ON(sizeof(struct virtio_mem_req) != 24); + BUILD_BUG_ON(sizeof(struct virtio_mem_resp) != 10); + vdev->priv = vm = kzalloc(sizeof(*vm), GFP_KERNEL); if (!vm) return -ENOMEM; diff --git a/include/uapi/linux/virtio_mem.h b/include/uapi/linux/virtio_mem.h index e0a9dc7397c34..a455c488a995c 100644 --- a/include/uapi/linux/virtio_mem.h +++ b/include/uapi/linux/virtio_mem.h @@ -103,16 +103,19 @@ struct virtio_mem_req_plug { __virtio64 addr; __virtio16 nb_blocks; + __virtio16 padding[3]; }; struct virtio_mem_req_unplug { __virtio64 addr; __virtio16 nb_blocks; + __virtio16 padding[3]; }; struct virtio_mem_req_state { __virtio64 addr; __virtio16 nb_blocks; + __virtio16 padding[3]; }; struct virtio_mem_req { -- GitLab From 49b23575943c04b6711107cfd08ad2b3ae4e81f5 Mon Sep 17 00:00:00 2001 From: Matthieu Baerts Date: Wed, 3 Jun 2020 21:03:47 +0200 Subject: [PATCH 1149/1971] bpf: Fix unused-var without NETDEVICES A recent commit added new variables only used if CONFIG_NETDEVICES is set. A simple fix would be to only declare these variables if the same condition is valid but Alexei suggested an even simpler solution: since CONFIG_NETDEVICES doesn't change anything in .h I think the best is to remove #ifdef CONFIG_NETDEVICES from net/core/filter.c and rely on sock_bindtoindex() returning ENOPROTOOPT in the extreme case of oddly configured kernels. Fixes: 70c58997c1e8 ("bpf: Allow SO_BINDTODEVICE opt in bpf_setsockopt") Suggested-by: Alexei Starovoitov Signed-off-by: Matthieu Baerts Signed-off-by: Daniel Borkmann Acked-by: Song Liu Link: https://lore.kernel.org/bpf/20200603190347.2310320-1-matthieu.baerts@tessares.net --- net/core/filter.c | 3 --- 1 file changed, 3 deletions(-) diff --git a/net/core/filter.c b/net/core/filter.c index d01a244b5087a..90d2eb77002fc 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -4340,8 +4340,6 @@ static int _bpf_setsockopt(struct sock *sk, int level, int optname, } break; case SO_BINDTODEVICE: - ret = -ENOPROTOOPT; -#ifdef CONFIG_NETDEVICES optlen = min_t(long, optlen, IFNAMSIZ - 1); strncpy(devname, optval, optlen); devname[optlen] = 0; @@ -4360,7 +4358,6 @@ static int _bpf_setsockopt(struct sock *sk, int level, int optname, dev_put(dev); } ret = sock_bindtoindex(sk, ifindex, false); -#endif break; default: ret = -EINVAL; -- GitLab From e7ed83d6fa1a00d0f2ad0327e73d3ea9e7ea8de1 Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Thu, 4 Jun 2020 11:54:36 +0300 Subject: [PATCH 1150/1971] bpf: Fix an error code in check_btf_func() This code returns success if the "info_aux" allocation fails but it should return -ENOMEM. Fixes: 8c1b6e69dcc1 ("bpf: Compare BTF types of functions arguments with actual types") Signed-off-by: Dan Carpenter Signed-off-by: Daniel Borkmann Acked-by: Song Liu Link: https://lore.kernel.org/bpf/20200604085436.GA943001@mwanda --- kernel/bpf/verifier.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index 5c7bbaac81ef9..34cde841ab681 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -7552,7 +7552,7 @@ static int check_btf_func(struct bpf_verifier_env *env, const struct btf *btf; void __user *urecord; u32 prev_offset = 0; - int ret = 0; + int ret = -ENOMEM; nfuncs = attr->func_info_cnt; if (!nfuncs) -- GitLab From 6ac92fb5cdff6e5708199f1d5d9d58011ccc76a0 Mon Sep 17 00:00:00 2001 From: Martijn Coenen Date: Thu, 4 Jun 2020 22:25:20 +0200 Subject: [PATCH 1151/1971] loop: Fix wrong masking of status flags In faf1d25440d6, loop_set_status() now assigns lo_status directly from the passed in lo_flags, but then fixes it up by masking out flags that can't be set by LOOP_SET_STATUS; unfortunately the mask was negated. Re-ran all ltp ioctl_loop tests, and they all passed. Pass run of the previously failing one: tst_test.c:1247: INFO: Timeout per run is 0h 05m 00s tst_device.c:88: INFO: Found free device 0 '/dev/loop0' ioctl_loop01.c:49: PASS: /sys/block/loop0/loop/partscan = 0 ioctl_loop01.c:50: PASS: /sys/block/loop0/loop/autoclear = 0 ioctl_loop01.c:51: PASS: /sys/block/loop0/loop/backing_file = '/tmp/ZRJ6H4/test.img' ioctl_loop01.c:65: PASS: get expected lo_flag 12 ioctl_loop01.c:67: PASS: /sys/block/loop0/loop/partscan = 1 ioctl_loop01.c:68: PASS: /sys/block/loop0/loop/autoclear = 1 ioctl_loop01.c:77: PASS: access /dev/loop0p1 succeeds ioctl_loop01.c:83: PASS: access /sys/block/loop0/loop0p1 succeeds Summary: passed 8 failed 0 skipped 0 warnings 0 Fixes: faf1d25440d6 ("loop: Clean up LOOP_SET_STATUS lo_flags handling") Reported-by: Naresh Kamboju Signed-off-by: Martijn Coenen Tested-by: Naresh Kamboju Signed-off-by: Jens Axboe --- drivers/block/loop.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/block/loop.c b/drivers/block/loop.c index 4212288ab157c..ad63e42478683 100644 --- a/drivers/block/loop.c +++ b/drivers/block/loop.c @@ -1390,7 +1390,7 @@ loop_set_status(struct loop_device *lo, const struct loop_info64 *info) goto out_unfreeze; /* Mask out flags that can't be set using LOOP_SET_STATUS. */ - lo->lo_flags &= ~LOOP_SET_STATUS_SETTABLE_FLAGS; + lo->lo_flags &= LOOP_SET_STATUS_SETTABLE_FLAGS; /* For those flags, use the previous values instead */ lo->lo_flags |= prev_lo_flags & ~LOOP_SET_STATUS_SETTABLE_FLAGS; /* For flags that can't be cleared, use previous values too */ -- GitLab From d24de76af836260a99ca2ba281a937bd5bc55591 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Wed, 3 Jun 2020 07:14:43 +0200 Subject: [PATCH 1152/1971] block: remove the error argument to the block_bio_complete tracepoint The status can be trivially derived from the bio itself. That also avoid callers like NVMe to incorrectly pass a blk_status_t instead of the errno, and the overhead of translating the blk_status_t to the errno in the I/O completion fast path when no tracing is enabled. Fixes: 35fe0d12c8a3 ("nvme: trace bio completion") Signed-off-by: Christoph Hellwig Reviewed-by: Sagi Grimberg Reviewed-by: Chaitanya Kulkarni Signed-off-by: Jens Axboe --- block/bio.c | 3 +-- drivers/nvme/host/nvme.h | 3 +-- include/trace/events/block.h | 6 +++--- kernel/trace/blktrace.c | 6 +++--- 4 files changed, 8 insertions(+), 10 deletions(-) diff --git a/block/bio.c b/block/bio.c index 5235da6434aab..a7366c02c9b57 100644 --- a/block/bio.c +++ b/block/bio.c @@ -1434,8 +1434,7 @@ again: } if (bio->bi_disk && bio_flagged(bio, BIO_TRACE_COMPLETION)) { - trace_block_bio_complete(bio->bi_disk->queue, bio, - blk_status_to_errno(bio->bi_status)); + trace_block_bio_complete(bio->bi_disk->queue, bio); bio_clear_flag(bio, BIO_TRACE_COMPLETION); } diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h index fa5c75501049d..c0f4226d32992 100644 --- a/drivers/nvme/host/nvme.h +++ b/drivers/nvme/host/nvme.h @@ -599,8 +599,7 @@ static inline void nvme_trace_bio_complete(struct request *req, struct nvme_ns *ns = req->q->queuedata; if (req->cmd_flags & REQ_NVME_MPATH) - trace_block_bio_complete(ns->head->disk->queue, - req->bio, status); + trace_block_bio_complete(ns->head->disk->queue, req->bio); } extern struct device_attribute dev_attr_ana_grpid; diff --git a/include/trace/events/block.h b/include/trace/events/block.h index 81b43f5bdf237..1257f26bb887b 100644 --- a/include/trace/events/block.h +++ b/include/trace/events/block.h @@ -261,9 +261,9 @@ TRACE_EVENT(block_bio_bounce, */ TRACE_EVENT(block_bio_complete, - TP_PROTO(struct request_queue *q, struct bio *bio, int error), + TP_PROTO(struct request_queue *q, struct bio *bio), - TP_ARGS(q, bio, error), + TP_ARGS(q, bio), TP_STRUCT__entry( __field( dev_t, dev ) @@ -277,7 +277,7 @@ TRACE_EVENT(block_bio_complete, __entry->dev = bio_dev(bio); __entry->sector = bio->bi_iter.bi_sector; __entry->nr_sector = bio_sectors(bio); - __entry->error = error; + __entry->error = blk_status_to_errno(bio->bi_status); blk_fill_rwbs(__entry->rwbs, bio->bi_opf, bio->bi_iter.bi_size); ), diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c index ea47f20840870..1e5499414cdf1 100644 --- a/kernel/trace/blktrace.c +++ b/kernel/trace/blktrace.c @@ -885,10 +885,10 @@ static void blk_add_trace_bio_bounce(void *ignore, } static void blk_add_trace_bio_complete(void *ignore, - struct request_queue *q, struct bio *bio, - int error) + struct request_queue *q, struct bio *bio) { - blk_add_trace_bio(q, bio, BLK_TA_COMPLETE, error); + blk_add_trace_bio(q, bio, BLK_TA_COMPLETE, + blk_status_to_errno(bio->bi_status)); } static void blk_add_trace_bio_backmerge(void *ignore, -- GitLab From 15b81ce5abdc4b502aa31dff2d415b79d2349d2f Mon Sep 17 00:00:00 2001 From: "Ahmed S. Darwish" Date: Wed, 3 Jun 2020 16:49:48 +0200 Subject: [PATCH 1153/1971] block: nr_sects_write(): Disable preemption on seqcount write For optimized block readers not holding a mutex, the "number of sectors" 64-bit value is protected from tearing on 32-bit architectures by a sequence counter. Disable preemption before entering that sequence counter's write side critical section. Otherwise, the read side can preempt the write side section and spin for the entire scheduler tick. If the reader belongs to a real-time scheduling class, it can spin forever and the kernel will livelock. Fixes: c83f6bf98dc1 ("block: add partition resize function to blkpg ioctl") Cc: Signed-off-by: Ahmed S. Darwish Reviewed-by: Sebastian Andrzej Siewior Signed-off-by: Jens Axboe --- block/blk.h | 2 ++ 1 file changed, 2 insertions(+) diff --git a/block/blk.h b/block/blk.h index aa16e524dc35e..b5d1f0fc6547c 100644 --- a/block/blk.h +++ b/block/blk.h @@ -420,9 +420,11 @@ static inline sector_t part_nr_sects_read(struct hd_struct *part) static inline void part_nr_sects_write(struct hd_struct *part, sector_t size) { #if BITS_PER_LONG==32 && defined(CONFIG_SMP) + preempt_disable(); write_seqcount_begin(&part->nr_sects_seq); part->nr_sects = size; write_seqcount_end(&part->nr_sects_seq); + preempt_enable(); #elif BITS_PER_LONG==32 && defined(CONFIG_PREEMPTION) preempt_disable(); part->nr_sects = size; -- GitLab From 48bc3cd3e07a1486f45d9971c75d6090976c3b1b Mon Sep 17 00:00:00 2001 From: Chaitanya Kulkarni Date: Thu, 4 Jun 2020 00:13:28 -0700 Subject: [PATCH 1154/1971] blktrace: use errno instead of bi_status In blk_add_trace_spliti() blk_add_trace_bio_remap() use blk_status_to_errno() to pass the error instead of pasing the bi_status. This fixes the sparse warning. Signed-off-by: Chaitanya Kulkarni Signed-off-by: Jens Axboe --- kernel/trace/blktrace.c | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c index 1e5499414cdf1..e2013bd0e2a63 100644 --- a/kernel/trace/blktrace.c +++ b/kernel/trace/blktrace.c @@ -995,8 +995,10 @@ static void blk_add_trace_split(void *ignore, __blk_add_trace(bt, bio->bi_iter.bi_sector, bio->bi_iter.bi_size, bio_op(bio), bio->bi_opf, - BLK_TA_SPLIT, bio->bi_status, sizeof(rpdu), - &rpdu, blk_trace_bio_get_cgid(q, bio)); + BLK_TA_SPLIT, + blk_status_to_errno(bio->bi_status), + sizeof(rpdu), &rpdu, + blk_trace_bio_get_cgid(q, bio)); } rcu_read_unlock(); } @@ -1033,7 +1035,8 @@ static void blk_add_trace_bio_remap(void *ignore, r.sector_from = cpu_to_be64(from); __blk_add_trace(bt, bio->bi_iter.bi_sector, bio->bi_iter.bi_size, - bio_op(bio), bio->bi_opf, BLK_TA_REMAP, bio->bi_status, + bio_op(bio), bio->bi_opf, BLK_TA_REMAP, + blk_status_to_errno(bio->bi_status), sizeof(r), &r, blk_trace_bio_get_cgid(q, bio)); rcu_read_unlock(); } -- GitLab From 71df3fd82e7cccec7b749a8607a4662d9f7febdd Mon Sep 17 00:00:00 2001 From: Chaitanya Kulkarni Date: Thu, 4 Jun 2020 00:13:29 -0700 Subject: [PATCH 1155/1971] blktrace: fix endianness in get_pdu_int() In function get_pdu_len() replace variable type from __u64 to __be64. This fixes sparse warning. Signed-off-by: Chaitanya Kulkarni Signed-off-by: Jens Axboe --- kernel/trace/blktrace.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c index e2013bd0e2a63..f857b4684bebe 100644 --- a/kernel/trace/blktrace.c +++ b/kernel/trace/blktrace.c @@ -1256,7 +1256,7 @@ static inline __u16 t_error(const struct trace_entry *ent) static __u64 get_pdu_int(const struct trace_entry *ent, bool has_cg) { - const __u64 *val = pdu_start(ent, has_cg); + const __be64 *val = pdu_start(ent, has_cg); return be64_to_cpu(*val); } -- GitLab From 5aec598c456fe3c1b71a1202cbb42bdc2a643277 Mon Sep 17 00:00:00 2001 From: Chaitanya Kulkarni Date: Thu, 4 Jun 2020 00:13:30 -0700 Subject: [PATCH 1156/1971] blktrace: fix endianness for blk_log_remap() The function blk_log_remap() can be simplified by removing the call to get_pdu_remap() that copies the values into extra variable to print the data, which also fixes the endiannness warning reported by sparse. Signed-off-by: Chaitanya Kulkarni Signed-off-by: Jens Axboe --- kernel/trace/blktrace.c | 19 ++++--------------- 1 file changed, 4 insertions(+), 15 deletions(-) diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c index f857b4684bebe..5773f0ba7e765 100644 --- a/kernel/trace/blktrace.c +++ b/kernel/trace/blktrace.c @@ -1260,17 +1260,6 @@ static __u64 get_pdu_int(const struct trace_entry *ent, bool has_cg) return be64_to_cpu(*val); } -static void get_pdu_remap(const struct trace_entry *ent, - struct blk_io_trace_remap *r, bool has_cg) -{ - const struct blk_io_trace_remap *__r = pdu_start(ent, has_cg); - __u64 sector_from = __r->sector_from; - - r->device_from = be32_to_cpu(__r->device_from); - r->device_to = be32_to_cpu(__r->device_to); - r->sector_from = be64_to_cpu(sector_from); -} - typedef void (blk_log_action_t) (struct trace_iterator *iter, const char *act, bool has_cg); @@ -1410,13 +1399,13 @@ static void blk_log_with_error(struct trace_seq *s, static void blk_log_remap(struct trace_seq *s, const struct trace_entry *ent, bool has_cg) { - struct blk_io_trace_remap r = { .device_from = 0, }; + const struct blk_io_trace_remap *__r = pdu_start(ent, has_cg); - get_pdu_remap(ent, &r, has_cg); trace_seq_printf(s, "%llu + %u <- (%d,%d) %llu\n", t_sector(ent), t_sec(ent), - MAJOR(r.device_from), MINOR(r.device_from), - (unsigned long long)r.sector_from); + MAJOR(be32_to_cpu(__r->device_from)), + MINOR(be32_to_cpu(__r->device_from)), + be64_to_cpu(__r->sector_from)); } static void blk_log_plug(struct trace_seq *s, const struct trace_entry *ent, bool has_cg) -- GitLab From 79ea1e12c0b8540100e89b32afb9f0e6503fad35 Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Thu, 4 Jun 2020 12:04:20 +0200 Subject: [PATCH 1157/1971] cfg80211: fix management registrations deadlock Lockdep reports that we may deadlock because we take the RTNL on the work struct, but flush it under RTNL. Clearly, it's correct. In practice, this can happen when doing rfkill on an active device. Fix this by moving the work struct to the wiphy (registered dev) layer, and iterate over all the wdevs inside there. This then means we need to track which one of them has work to do, so we don't update to the driver for all wdevs all the time. Also fix a locking bug I noticed while working on this - the registrations list is iterated as if it was an RCU list, but it isn't handle that way - and we need to lock now for the update flag anyway, so remove the RCU. Fixes: 6cd536fe62ef ("cfg80211: change internal management frame registration API") Reported-by: Markus Theil Reported-and-tested-by: Kenneth R. Crudup Signed-off-by: Johannes Berg Link: https://lore.kernel.org/r/20200604120420.b1dc540a7e26.I55dcca56bb5bdc5d7ad66a36a0b42afd7034d8be@changeid Signed-off-by: Johannes Berg --- include/net/cfg80211.h | 5 +++-- net/wireless/core.c | 6 +++--- net/wireless/core.h | 2 ++ net/wireless/mlme.c | 26 +++++++++++++++++++++----- 4 files changed, 29 insertions(+), 10 deletions(-) diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h index b58ad1a3f6956..fc7e8807838d9 100644 --- a/include/net/cfg80211.h +++ b/include/net/cfg80211.h @@ -5075,7 +5075,8 @@ struct cfg80211_cqm_config; * by cfg80211 on change_interface * @mgmt_registrations: list of registrations for management frames * @mgmt_registrations_lock: lock for the list - * @mgmt_registrations_update_wk: update work to defer from atomic context + * @mgmt_registrations_need_update: mgmt registrations were updated, + * need to propagate the update to the driver * @mtx: mutex used to lock data in this struct, may be used by drivers * and some API functions require it held * @beacon_interval: beacon interval used on this device for transmitting @@ -5121,7 +5122,7 @@ struct wireless_dev { struct list_head mgmt_registrations; spinlock_t mgmt_registrations_lock; - struct work_struct mgmt_registrations_update_wk; + u8 mgmt_registrations_need_update:1; struct mutex mtx; diff --git a/net/wireless/core.c b/net/wireless/core.c index f0226ae9561ca..c623d9bf5096b 100644 --- a/net/wireless/core.c +++ b/net/wireless/core.c @@ -497,6 +497,8 @@ use_default_name: INIT_WORK(&rdev->propagate_radar_detect_wk, cfg80211_propagate_radar_detect_wk); INIT_WORK(&rdev->propagate_cac_done_wk, cfg80211_propagate_cac_done_wk); + INIT_WORK(&rdev->mgmt_registrations_update_wk, + cfg80211_mgmt_registrations_update_wk); #ifdef CONFIG_CFG80211_DEFAULT_PS rdev->wiphy.flags |= WIPHY_FLAG_PS_ON_BY_DEFAULT; @@ -1047,6 +1049,7 @@ void wiphy_unregister(struct wiphy *wiphy) flush_work(&rdev->sched_scan_stop_wk); flush_work(&rdev->propagate_radar_detect_wk); flush_work(&rdev->propagate_cac_done_wk); + flush_work(&rdev->mgmt_registrations_update_wk); #ifdef CONFIG_PM if (rdev->wiphy.wowlan_config && rdev->ops->set_wakeup) @@ -1108,7 +1111,6 @@ static void __cfg80211_unregister_wdev(struct wireless_dev *wdev, bool sync) rdev->devlist_generation++; cfg80211_mlme_purge_registrations(wdev); - flush_work(&wdev->mgmt_registrations_update_wk); switch (wdev->iftype) { case NL80211_IFTYPE_P2P_DEVICE: @@ -1253,8 +1255,6 @@ void cfg80211_init_wdev(struct cfg80211_registered_device *rdev, spin_lock_init(&wdev->event_lock); INIT_LIST_HEAD(&wdev->mgmt_registrations); spin_lock_init(&wdev->mgmt_registrations_lock); - INIT_WORK(&wdev->mgmt_registrations_update_wk, - cfg80211_mgmt_registrations_update_wk); INIT_LIST_HEAD(&wdev->pmsr_list); spin_lock_init(&wdev->pmsr_lock); INIT_WORK(&wdev->pmsr_free_wk, cfg80211_pmsr_free_wk); diff --git a/net/wireless/core.h b/net/wireless/core.h index e0e5b3ee96998..67b0389fca4dc 100644 --- a/net/wireless/core.h +++ b/net/wireless/core.h @@ -99,6 +99,8 @@ struct cfg80211_registered_device { struct cfg80211_chan_def cac_done_chandef; struct work_struct propagate_cac_done_wk; + struct work_struct mgmt_registrations_update_wk; + /* must be last because of the way we do wiphy_priv(), * and it should at least be aligned to NETDEV_ALIGN */ struct wiphy wiphy __aligned(NETDEV_ALIGN); diff --git a/net/wireless/mlme.c b/net/wireless/mlme.c index 189334314cbae..a6c61a2e65691 100644 --- a/net/wireless/mlme.c +++ b/net/wireless/mlme.c @@ -440,9 +440,15 @@ static void cfg80211_mgmt_registrations_update(struct wireless_dev *wdev) ASSERT_RTNL(); + spin_lock_bh(&wdev->mgmt_registrations_lock); + if (!wdev->mgmt_registrations_need_update) { + spin_unlock_bh(&wdev->mgmt_registrations_lock); + return; + } + rcu_read_lock(); list_for_each_entry_rcu(tmp, &rdev->wiphy.wdev_list, list) { - list_for_each_entry_rcu(reg, &tmp->mgmt_registrations, list) { + list_for_each_entry(reg, &tmp->mgmt_registrations, list) { u32 mask = BIT(le16_to_cpu(reg->frame_type) >> 4); u32 mcast_mask = 0; @@ -460,16 +466,23 @@ static void cfg80211_mgmt_registrations_update(struct wireless_dev *wdev) } rcu_read_unlock(); + wdev->mgmt_registrations_need_update = 0; + spin_unlock_bh(&wdev->mgmt_registrations_lock); + rdev_update_mgmt_frame_registrations(rdev, wdev, &upd); } void cfg80211_mgmt_registrations_update_wk(struct work_struct *wk) { - struct wireless_dev *wdev = container_of(wk, struct wireless_dev, - mgmt_registrations_update_wk); + struct cfg80211_registered_device *rdev; + struct wireless_dev *wdev; + + rdev = container_of(wk, struct cfg80211_registered_device, + mgmt_registrations_update_wk); rtnl_lock(); - cfg80211_mgmt_registrations_update(wdev); + list_for_each_entry(wdev, &rdev->wiphy.wdev_list, list) + cfg80211_mgmt_registrations_update(wdev); rtnl_unlock(); } @@ -557,6 +570,7 @@ int cfg80211_mlme_register_mgmt(struct wireless_dev *wdev, u32 snd_portid, nreg->multicast_rx = multicast_rx; list_add(&nreg->list, &wdev->mgmt_registrations); } + wdev->mgmt_registrations_need_update = 1; spin_unlock_bh(&wdev->mgmt_registrations_lock); cfg80211_mgmt_registrations_update(wdev); @@ -585,7 +599,8 @@ void cfg80211_mlme_unregister_socket(struct wireless_dev *wdev, u32 nlportid) list_del(®->list); kfree(reg); - schedule_work(&wdev->mgmt_registrations_update_wk); + wdev->mgmt_registrations_need_update = 1; + schedule_work(&rdev->mgmt_registrations_update_wk); } spin_unlock_bh(&wdev->mgmt_registrations_lock); @@ -608,6 +623,7 @@ void cfg80211_mlme_purge_registrations(struct wireless_dev *wdev) list_del(®->list); kfree(reg); } + wdev->mgmt_registrations_need_update = 1; spin_unlock_bh(&wdev->mgmt_registrations_lock); cfg80211_mgmt_registrations_update(wdev); -- GitLab From 9029537c93b6f7347cf213d4e3b5c935a4d07ac8 Mon Sep 17 00:00:00 2001 From: Jason Yan Date: Fri, 3 Apr 2020 16:15:44 +0800 Subject: [PATCH 1158/1971] mtd: physmap_of_gemini: remove defined but not used symbol 'syscon_match' MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit It's not used by anyone now, remove it. Fix the following gcc warning: drivers/mtd/maps/physmap-gemini.c:49:34: warning: ‘syscon_match’ defined but not used [-Wunused-const-variable=] static const struct of_device_id syscon_match[] = { ^~~~~~~~~~~~ Reported-by: Hulk Robot Signed-off-by: Jason Yan Reviewed-by: Linus Walleij Signed-off-by: Richard Weinberger --- drivers/mtd/maps/physmap-gemini.c | 5 ----- 1 file changed, 5 deletions(-) diff --git a/drivers/mtd/maps/physmap-gemini.c b/drivers/mtd/maps/physmap-gemini.c index a289c8b5cabf8..d4a46e159d38f 100644 --- a/drivers/mtd/maps/physmap-gemini.c +++ b/drivers/mtd/maps/physmap-gemini.c @@ -46,11 +46,6 @@ #define FLASH_PARALLEL_HIGH_PIN_CNT (1 << 20) /* else low pin cnt */ -static const struct of_device_id syscon_match[] = { - { .compatible = "cortina,gemini-syscon" }, - { }, -}; - struct gemini_flash { struct device *dev; struct pinctrl *p; -- GitLab From eb13fa0227417e84aecc3bd9c029d376e33474d3 Mon Sep 17 00:00:00 2001 From: Boris Brezillon Date: Wed, 29 Apr 2020 09:53:47 -0700 Subject: [PATCH 1159/1971] mtd: parser: cmdline: Support MTD names containing one or more colons Looks like some drivers define MTD names with a colon in it, thus making mtdpart= parsing impossible. Let's fix the parser to gracefully handle that case: the last ':' in a partition definition sequence is considered instead of the first one. Signed-off-by: Boris Brezillon Signed-off-by: Ron Minnich Tested-by: Ron Minnich Signed-off-by: Richard Weinberger --- drivers/mtd/parsers/cmdlinepart.c | 23 ++++++++++++++++++++--- 1 file changed, 20 insertions(+), 3 deletions(-) diff --git a/drivers/mtd/parsers/cmdlinepart.c b/drivers/mtd/parsers/cmdlinepart.c index af712f1519c57..a79e4d866b08a 100644 --- a/drivers/mtd/parsers/cmdlinepart.c +++ b/drivers/mtd/parsers/cmdlinepart.c @@ -226,12 +226,29 @@ static int mtdpart_setup_real(char *s) struct cmdline_mtd_partition *this_mtd; struct mtd_partition *parts; int mtd_id_len, num_parts; - char *p, *mtd_id; + char *p, *mtd_id, *semicol; + + /* + * Replace the first ';' by a NULL char so strrchr can work + * properly. + */ + semicol = strchr(s, ';'); + if (semicol) + *semicol = '\0'; mtd_id = s; - /* fetch */ - p = strchr(s, ':'); + /* + * fetch . We use strrchr to ignore all ':' that could + * be present in the MTD name, only the last one is interpreted + * as an / separator. + */ + p = strrchr(s, ':'); + + /* Restore the ';' now. */ + if (semicol) + *semicol = ';'; + if (!p) { pr_err("no mtd-id\n"); return -EINVAL; -- GitLab From 5788ccf3c84f5587418a80128a3653aa35abf00b Mon Sep 17 00:00:00 2001 From: Xiaoming Ni Date: Tue, 31 Mar 2020 09:31:59 +0800 Subject: [PATCH 1160/1971] mtd: clear cache_state to avoid writing to bad blocks repeatedly The function call process is as follows: mtd_blktrans_work() while (1) do_blktrans_request() mtdblock_writesect() do_cached_write() write_cached_data() /*if cache_state is STATE_DIRTY*/ erase_write() write_cached_data() returns failure without modifying cache_state and cache_offset. So when do_cached_write() is called again, write_cached_data() will be called again to perform erase_write() on the same cache_offset. But if this cache_offset points to a bad block, erase_write() will always return -EIO. Writing to this mtdblk is equivalent to losing the current data, and repeatedly writing to the bad block. Repeatedly writing a bad block has no real benefits, but brings some negative effects: 1 Lost subsequent data 2 Loss of flash device life 3 erase_write() bad blocks are very time-consuming. For example: the function do_erase_oneblock() in chips/cfi_cmdset_0020.c or chips/cfi_cmdset_0002.c may take more than 20 seconds to return Therefore, when erase_write() returns -EIO in write_cached_data(), clear cache_state to avoid writing to bad blocks repeatedly. Signed-off-by: Xiaoming Ni Reviewed-by: Miquel Raynal Signed-off-by: Richard Weinberger --- drivers/mtd/mtdblock.c | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/drivers/mtd/mtdblock.c b/drivers/mtd/mtdblock.c index 078e0f67377da..32e52d83b961e 100644 --- a/drivers/mtd/mtdblock.c +++ b/drivers/mtd/mtdblock.c @@ -89,8 +89,6 @@ static int write_cached_data (struct mtdblk_dev *mtdblk) ret = erase_write (mtd, mtdblk->cache_offset, mtdblk->cache_size, mtdblk->cache_data); - if (ret) - return ret; /* * Here we could arguably set the cache state to STATE_CLEAN. @@ -98,9 +96,14 @@ static int write_cached_data (struct mtdblk_dev *mtdblk) * be notified if this content is altered on the flash by other * means. Let's declare it empty and leave buffering tasks to * the buffer cache instead. + * + * If this cache_offset points to a bad block, data cannot be + * written to the device. Clear cache_state to avoid writing to + * bad blocks repeatedly. */ - mtdblk->cache_state = STATE_EMPTY; - return 0; + if (ret == 0 || ret == -EIO) + mtdblk->cache_state = STATE_EMPTY; + return ret; } -- GitLab From 5872f1a2e5c783783d51e96468f0ff6aede61182 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Mon, 11 May 2020 21:59:51 +0100 Subject: [PATCH 1161/1971] READ_ONCE: Fix comment describing 2x32-bit atomicity READ_ONCE() permits 64-bit accesses on 32-bit architectures, since this crops up in a few places and is generally harmless because either the upper bits are always zero (e.g. for a virtual address or 32-bit time_t) or the architecture provides 64-bit atomicity anyway. Update the corresponding comment above compiletime_assert_rwonce_type(), which incorrectly states that 32-bit x86 provides 64-bit atomicity, and instead reference 32-bit Armv7 with LPAE. Cc: Thomas Gleixner Cc: Peter Zijlstra Reported-by: Jann Horn Signed-off-by: Will Deacon --- include/linux/compiler.h | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/include/linux/compiler.h b/include/linux/compiler.h index c363d8debc43f..657e4fd38a771 100644 --- a/include/linux/compiler.h +++ b/include/linux/compiler.h @@ -332,9 +332,9 @@ static inline void *offset_to_ptr(const int *off) /* * Yes, this permits 64-bit accesses on 32-bit architectures. These will - * actually be atomic in many cases (namely x86), but for others we rely on - * the access being split into 2x32-bit accesses for a 32-bit quantity (e.g. - * a virtual address) and a strong prevailing wind. + * actually be atomic in some cases (namely Armv7 + LPAE), but for others we + * rely on the access being split into 2x32-bit accesses for a 32-bit quantity + * (e.g. a virtual address) and a strong prevailing wind. */ #define compiletime_assert_rwonce_type(t) \ compiletime_assert(__native_word(t) || sizeof(t) == sizeof(long long), \ -- GitLab From 8d4beed7bbc71666de2630b79899c8852c3bf5cd Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Fri, 5 Jun 2020 11:05:51 +0100 Subject: [PATCH 1162/1971] compiler-types.h: Include naked type in __pick_integer_type() match __pick_integer_type() checks whether the type of its first argument is compatible with an explicitly signed or unsigned integer type, returning the compatible type if it exists. Unfortunately, 'char' is neither compatible with 'signed char' nor 'unsigned char', so add a check against the naked type to allow the __unqual_scalar_typeof() macro to strip qualifiers from char types without an explicit signedness. Reported-by: Rasmus Villemoes Signed-off-by: Will Deacon --- include/linux/compiler_types.h | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/include/linux/compiler_types.h b/include/linux/compiler_types.h index 233066c92f6f3..6ed0612bc1432 100644 --- a/include/linux/compiler_types.h +++ b/include/linux/compiler_types.h @@ -220,9 +220,14 @@ struct ftrace_likely_data { #define __pick_scalar_type(x, type, otherwise) \ __builtin_choose_expr(__same_type(x, type), (type)0, otherwise) +/* + * 'char' is not type-compatible with either 'signed char' or 'unsigned char', + * so we include the naked type here as well as the signed/unsigned variants. + */ #define __pick_integer_type(x, type, otherwise) \ - __pick_scalar_type(x, unsigned type, \ - __pick_scalar_type(x, signed type, otherwise)) + __pick_scalar_type(x, type, \ + __pick_scalar_type(x, unsigned type, \ + __pick_scalar_type(x, signed type, otherwise))) #define __unqual_scalar_typeof(x) typeof( \ __pick_integer_type(x, char, \ -- GitLab From b16d8ecf4fa17e16fff20638364f9bd2205615e7 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Fri, 5 Jun 2020 11:19:46 +0100 Subject: [PATCH 1163/1971] compiler.h: Enforce that READ_ONCE_NOCHECK() access size is sizeof(long) READ_ONCE_NOCHECK() unconditionally performs a sizeof(long)-sized access, so enforce that the size of the pointed-to object that we are loading from is the same size as 'long'. Reported-by: Marco Elver Signed-off-by: Will Deacon --- include/linux/compiler.h | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/include/linux/compiler.h b/include/linux/compiler.h index 657e4fd38a771..a0aa56e6b7823 100644 --- a/include/linux/compiler.h +++ b/include/linux/compiler.h @@ -254,9 +254,12 @@ unsigned long __read_once_word_nocheck(const void *addr) */ #define READ_ONCE_NOCHECK(x) \ ({ \ - unsigned long __x = __read_once_word_nocheck(&(x)); \ + unsigned long __x; \ + compiletime_assert(sizeof(x) == sizeof(__x), \ + "Unsupported access size for READ_ONCE_NOCHECK()."); \ + __x = __read_once_word_nocheck(&(x)); \ smp_read_barrier_depends(); \ - __x; \ + (typeof(x))__x; \ }) static __no_kasan_or_inline -- GitLab From 1fd76043ecb04b8567a76b106db09ac778e1e2b8 Mon Sep 17 00:00:00 2001 From: Marco Elver Date: Wed, 27 May 2020 12:32:36 +0200 Subject: [PATCH 1164/1971] compiler_types.h: Optimize __unqual_scalar_typeof compilation time If the compiler supports C11's _Generic, use it to speed up compilation times of __unqual_scalar_typeof(). GCC version 4.9 or later and all supported versions of Clang support the feature (the oldest supported compiler that doesn't support _Generic is GCC 4.8, for which we use the slower alternative). The non-_Generic variant relies on multiple expansions of __pick_integer_type -> __pick_scalar_type -> __builtin_choose_expr, which increases pre-processed code size, and can cause compile times to increase in files with numerous expansions of READ_ONCE(), or other users of __unqual_scalar_typeof(). Summary of compile-time benchmarking done by Arnd Bergmann: clang-11 gcc-9 this patch 0.78 0.91 ideal 0.76 0.86 See https://lkml.kernel.org/r/CAK8P3a3UYQeXhiufUevz=rwe09WM_vSTCd9W+KvJHJcOeQyWVA@mail.gmail.com Further compile-testing done with: gcc 4.8, 4.9, 5.5, 6.4, 7.5, 8.4; clang 9, 10. Reported-by: Arnd Bergmann Signed-off-by: Marco Elver Signed-off-by: Borislav Petkov Acked-by: Peter Zijlstra Tested-by: Arnd Bergmann Link: https://lkml.kernel.org/r/20200527103236.148700-1-elver@google.com Link: https://lkml.kernel.org/r/CAK8P3a0RJtbVi1JMsfik=jkHCNFv+DJn_FeDg-YLW+ueQW3tNg@mail.gmail.com [will: tweak new macros to make them a bit more readable] Signed-off-by: Will Deacon --- include/linux/compiler_types.h | 23 ++++++++++++++++++++++- 1 file changed, 22 insertions(+), 1 deletion(-) diff --git a/include/linux/compiler_types.h b/include/linux/compiler_types.h index 6ed0612bc1432..31416b60eabff 100644 --- a/include/linux/compiler_types.h +++ b/include/linux/compiler_types.h @@ -213,7 +213,9 @@ struct ftrace_likely_data { /* * __unqual_scalar_typeof(x) - Declare an unqualified scalar type, leaving * non-scalar types unchanged. - * + */ +#if defined(CONFIG_CC_IS_GCC) && CONFIG_GCC_VERSION < 40900 +/* * We build this out of a couple of helper macros in a vain attempt to * help you keep your lunch down while reading it. */ @@ -235,6 +237,25 @@ struct ftrace_likely_data { __pick_integer_type(x, int, \ __pick_integer_type(x, long, \ __pick_integer_type(x, long long, x)))))) +#else +/* + * If supported, prefer C11 _Generic for better compile-times. As above, 'char' + * is not type-compatible with 'signed char', and we define a separate case. + */ +#define __scalar_type_to_expr_cases(type) \ + unsigned type: (unsigned type)0, \ + signed type: (signed type)0 + +#define __unqual_scalar_typeof(x) typeof( \ + _Generic((x), \ + char: (char)0, \ + __scalar_type_to_expr_cases(char), \ + __scalar_type_to_expr_cases(short), \ + __scalar_type_to_expr_cases(int), \ + __scalar_type_to_expr_cases(long), \ + __scalar_type_to_expr_cases(long long), \ + default: (x))) +#endif /* Is this type a native word size -- useful for atomic operations */ #define __native_word(t) \ -- GitLab From b398ace5d2ea0b7f00d9f1ce23c647e289c206ca Mon Sep 17 00:00:00 2001 From: Marco Elver Date: Thu, 28 May 2020 09:43:13 +0200 Subject: [PATCH 1165/1971] compiler_types.h: Use unoptimized __unqual_scalar_typeof for sparse If the file is being checked with sparse, use the unoptimized version of __unqual_scalar_typeof(), since sparse does not support _Generic. Reported-by: kbuild test robot Signed-off-by: Marco Elver Signed-off-by: Borislav Petkov Link: https://lkml.kernel.org/r/202005280727.lXn1VnTw%lkp@intel.com Signed-off-by: Will Deacon --- include/linux/compiler_types.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/include/linux/compiler_types.h b/include/linux/compiler_types.h index 31416b60eabff..cd73e3857a879 100644 --- a/include/linux/compiler_types.h +++ b/include/linux/compiler_types.h @@ -214,7 +214,7 @@ struct ftrace_likely_data { * __unqual_scalar_typeof(x) - Declare an unqualified scalar type, leaving * non-scalar types unchanged. */ -#if defined(CONFIG_CC_IS_GCC) && CONFIG_GCC_VERSION < 40900 +#if (defined(CONFIG_CC_IS_GCC) && CONFIG_GCC_VERSION < 40900) || defined(__CHECKER__) /* * We build this out of a couple of helper macros in a vain attempt to * help you keep your lunch down while reading it. -- GitLab From 6e801dc411329aff592fbd48fb116183d0acdb00 Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Thu, 4 Jun 2020 18:12:16 +0100 Subject: [PATCH 1166/1971] ASoC: meson: fix memory leak of links if allocation of ldata fails Currently if the allocation of ldata fails the error return path does not kfree the allocated links object. Fix this by adding an error exit return path that performs the necessary kfree'ing. Fixes: 7864a79f37b5 ("ASoC: meson: add axg sound card support") Signed-off-by: Colin Ian King Addresses-Coverity: ("Resource leak") Link: https://lore.kernel.org/r/20200604171216.60043-1-colin.king@canonical.com Signed-off-by: Mark Brown --- sound/soc/meson/meson-card-utils.c | 17 ++++++++++++----- 1 file changed, 12 insertions(+), 5 deletions(-) diff --git a/sound/soc/meson/meson-card-utils.c b/sound/soc/meson/meson-card-utils.c index 2ca8c98e204f2..5a4a91c887347 100644 --- a/sound/soc/meson/meson-card-utils.c +++ b/sound/soc/meson/meson-card-utils.c @@ -49,19 +49,26 @@ int meson_card_reallocate_links(struct snd_soc_card *card, links = krealloc(priv->card.dai_link, num_links * sizeof(*priv->card.dai_link), GFP_KERNEL | __GFP_ZERO); + if (!links) + goto err_links; + ldata = krealloc(priv->link_data, num_links * sizeof(*priv->link_data), GFP_KERNEL | __GFP_ZERO); - - if (!links || !ldata) { - dev_err(priv->card.dev, "failed to allocate links\n"); - return -ENOMEM; - } + if (!ldata) + goto err_ldata; priv->card.dai_link = links; priv->link_data = ldata; priv->card.num_links = num_links; return 0; + +err_ldata: + kfree(links); +err_links: + dev_err(priv->card.dev, "failed to allocate links\n"); + return -ENOMEM; + } EXPORT_SYMBOL_GPL(meson_card_reallocate_links); -- GitLab From 8a9144c1cf523221b37dd3393827253c91fcbf54 Mon Sep 17 00:00:00 2001 From: Dan Murphy Date: Tue, 2 Jun 2020 14:35:23 -0500 Subject: [PATCH 1167/1971] dt-bindings: ASoc: Fix tdm-slot documentation spelling error Fix the spelling of 'specified'. Also fix grammarical issue with the use of 'a' over 'an' Signed-off-by: Dan Murphy Link: https://lore.kernel.org/r/20200602193524.30309-1-dmurphy@ti.com Signed-off-by: Mark Brown --- Documentation/devicetree/bindings/sound/tdm-slot.txt | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Documentation/devicetree/bindings/sound/tdm-slot.txt b/Documentation/devicetree/bindings/sound/tdm-slot.txt index 34cf70e2cbc47..4bb513ae62fc6 100644 --- a/Documentation/devicetree/bindings/sound/tdm-slot.txt +++ b/Documentation/devicetree/bindings/sound/tdm-slot.txt @@ -14,8 +14,8 @@ For instance: dai-tdm-slot-tx-mask = <0 1>; dai-tdm-slot-rx-mask = <1 0>; -And for each spcified driver, there could be one .of_xlate_tdm_slot_mask() -to specify a explicit mapping of the channels and the slots. If it's absent +And for each specified driver, there could be one .of_xlate_tdm_slot_mask() +to specify an explicit mapping of the channels and the slots. If it's absent the default snd_soc_of_xlate_tdm_slot_mask() will be used to generating the tx and rx masks. -- GitLab From d82faa0825061aef01b99c3c94f77713b483b060 Mon Sep 17 00:00:00 2001 From: Bob Moore Date: Thu, 4 Jun 2020 13:44:19 -0700 Subject: [PATCH 1168/1971] ACPICA: acpidump: Removed dead code from oslinuxtbl.c ACPICA commit 4d938d048790983b8b4252b0f4aeec59dabb476c ACPICA BZ 1119. Link: https://github.com/acpica/acpica/commit/4d938d04 Signed-off-by: Bob Moore Signed-off-by: Erik Kaneda Signed-off-by: Rafael J. Wysocki --- .../acpi/os_specific/service_layers/oslinuxtbl.c | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/tools/power/acpi/os_specific/service_layers/oslinuxtbl.c b/tools/power/acpi/os_specific/service_layers/oslinuxtbl.c index 5aaddc79adf77..dd38c2b2e1b45 100644 --- a/tools/power/acpi/os_specific/service_layers/oslinuxtbl.c +++ b/tools/power/acpi/os_specific/service_layers/oslinuxtbl.c @@ -35,7 +35,7 @@ static acpi_status osl_add_table_to_list(char *signature, u32 instance); static acpi_status osl_read_table_from_file(char *filename, acpi_size file_offset, - char *signature, struct acpi_table_header **table); + struct acpi_table_header **table); static acpi_status osl_map_table(acpi_size address, @@ -1184,8 +1184,6 @@ osl_table_name_from_file(char *filename, char *signature, u32 *instance) * * PARAMETERS: filename - File that contains the desired table * file_offset - Offset of the table in file - * signature - Optional ACPI Signature for desired table. - * A null terminated 4-character string. * table - Where a pointer to the table is returned * * RETURN: Status; Table buffer is returned if AE_OK. @@ -1197,7 +1195,7 @@ osl_table_name_from_file(char *filename, char *signature, u32 *instance) static acpi_status osl_read_table_from_file(char *filename, acpi_size file_offset, - char *signature, struct acpi_table_header **table) + struct acpi_table_header **table) { FILE *table_file; struct acpi_table_header header; @@ -1225,6 +1223,8 @@ osl_read_table_from_file(char *filename, goto exit; } +#ifdef ACPI_OBSOLETE_FUNCTIONS + /* If signature is specified, it must match the table */ if (signature) { @@ -1244,6 +1244,7 @@ osl_read_table_from_file(char *filename, goto exit; } } +#endif table_length = ap_get_table_length(&header); if (table_length == 0) { @@ -1366,7 +1367,7 @@ osl_get_customized_table(char *pathname, /* There is no physical address saved for customized tables, use zero */ *address = 0; - status = osl_read_table_from_file(table_filename, 0, NULL, table); + status = osl_read_table_from_file(table_filename, 0, table); return (status); } -- GitLab From f083906fa9c1643e5c16b887cc29099fdfc3e875 Mon Sep 17 00:00:00 2001 From: Erik Kaneda Date: Thu, 4 Jun 2020 13:44:20 -0700 Subject: [PATCH 1169/1971] ACPICA: iASL: add new OperationRegion subtype keyword PlatformRtMechanism ACPICA commit 2c2eefa827bd37297f5f9ca4b263fcba829aaf3f Link: https://github.com/acpica/acpica/commit/2c2eefa8 Signed-off-by: Erik Kaneda Signed-off-by: Bob Moore Signed-off-by: Rafael J. Wysocki --- drivers/acpi/acpica/dbdisply.c | 2 ++ drivers/acpi/acpica/utdecode.c | 3 ++- include/acpi/actypes.h | 3 ++- 3 files changed, 6 insertions(+), 2 deletions(-) diff --git a/drivers/acpi/acpica/dbdisply.c b/drivers/acpi/acpica/dbdisply.c index f2df416d0d2df..d41eb9e675000 100644 --- a/drivers/acpi/acpica/dbdisply.c +++ b/drivers/acpi/acpica/dbdisply.c @@ -51,6 +51,8 @@ static acpi_adr_space_type acpi_gbl_space_id_list[] = { ACPI_ADR_SPACE_IPMI, ACPI_ADR_SPACE_GPIO, ACPI_ADR_SPACE_GSBUS, + ACPI_ADR_SPACE_PLATFORM_COMM, + ACPI_ADR_SPACE_PLATFORM_RT, ACPI_ADR_SPACE_DATA_TABLE, ACPI_ADR_SPACE_FIXED_HARDWARE }; diff --git a/drivers/acpi/acpica/utdecode.c b/drivers/acpi/acpica/utdecode.c index 177ab88d95de5..ed9aedf604a1a 100644 --- a/drivers/acpi/acpica/utdecode.c +++ b/drivers/acpi/acpica/utdecode.c @@ -78,7 +78,8 @@ const char *acpi_gbl_region_types[ACPI_NUM_PREDEFINED_REGIONS] = { "IPMI", /* 0x07 */ "GeneralPurposeIo", /* 0x08 */ "GenericSerialBus", /* 0x09 */ - "PCC" /* 0x0A */ + "PCC", /* 0x0A */ + "PlatformRtMechanism" /* 0x0B */ }; const char *acpi_ut_get_region_name(u8 space_id) diff --git a/include/acpi/actypes.h b/include/acpi/actypes.h index 4defed58ea338..aa236b9e6f246 100644 --- a/include/acpi/actypes.h +++ b/include/acpi/actypes.h @@ -815,8 +815,9 @@ typedef u8 acpi_adr_space_type; #define ACPI_ADR_SPACE_GPIO (acpi_adr_space_type) 8 #define ACPI_ADR_SPACE_GSBUS (acpi_adr_space_type) 9 #define ACPI_ADR_SPACE_PLATFORM_COMM (acpi_adr_space_type) 10 +#define ACPI_ADR_SPACE_PLATFORM_RT (acpi_adr_space_type) 11 -#define ACPI_NUM_PREDEFINED_REGIONS 11 +#define ACPI_NUM_PREDEFINED_REGIONS 12 /* * Special Address Spaces -- GitLab From 34a09bffd9982fdced603643068c39de867ec778 Mon Sep 17 00:00:00 2001 From: Bob Moore Date: Thu, 4 Jun 2020 13:44:21 -0700 Subject: [PATCH 1170/1971] ACPICA: Update version to 20200528 ACPICA commit 30ceafb55b59724f73ae6f2b35523417c4e569ea Version 20200528. Link: https://github.com/acpica/acpica/commit/30ceafb5 Signed-off-by: Bob Moore Signed-off-by: Erik Kaneda Signed-off-by: Rafael J. Wysocki --- include/acpi/acpixf.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/include/acpi/acpixf.h b/include/acpi/acpixf.h index 1dc8d262035b1..459d6981ca964 100644 --- a/include/acpi/acpixf.h +++ b/include/acpi/acpixf.h @@ -12,7 +12,7 @@ /* Current ACPICA subsystem version in YYYYMMDD format */ -#define ACPI_CA_VERSION 0x20200430 +#define ACPI_CA_VERSION 0x20200528 #include #include -- GitLab From afd8d7c7f93681370f6fc2ec62f2705710eee62d Mon Sep 17 00:00:00 2001 From: Christophe JAILLET Date: Sun, 31 May 2020 23:00:59 +0200 Subject: [PATCH 1171/1971] PM: hibernate: Add __init annotation to swsusp_header_init() 'swsusp_header_init()' is only called via 'core_initcall'. It can be marked as __init to save a few bytes of memory. Signed-off-by: Christophe JAILLET [ rjw: Subject ] Signed-off-by: Rafael J. Wysocki --- kernel/power/swap.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kernel/power/swap.c b/kernel/power/swap.c index ca0fcb5ced714..01e2858b5fe36 100644 --- a/kernel/power/swap.c +++ b/kernel/power/swap.c @@ -1590,7 +1590,7 @@ int swsusp_unmark(void) } #endif -static int swsusp_header_init(void) +static int __init swsusp_header_init(void) { swsusp_header = (struct swsusp_header*) __get_free_page(GFP_KERNEL); if (!swsusp_header) -- GitLab From cf6fada71543ceea0f6228ffdc0b85778f3f5a6e Mon Sep 17 00:00:00 2001 From: Xiongfeng Wang Date: Sat, 30 May 2020 10:08:30 +0800 Subject: [PATCH 1172/1971] cpufreq: change '.set_boost' to act on one policy Macro 'for_each_active_policy()' is defined internally. To avoid some cpufreq driver needing this macro to iterate over all the policies in '.set_boost' callback, we redefine '.set_boost' to act on only one policy and pass the policy as an argument. 'cpufreq_boost_trigger_state()' iterates over all the policies to set boost for the system. This is preparation for adding SW BOOST support for CPPC. To protect Boost enable/disable by sysfs from CPU online/offline, add 'cpu_hotplug_lock' before calling '.set_boost' for each CPU. Also move the lock from 'set_boost()' to 'store_cpb()' in acpi_cpufreq. Signed-off-by: Xiongfeng Wang Suggested-by: Viresh Kumar Acked-by: Viresh Kumar [ rjw: Subject & changelog ] Signed-off-by: Rafael J. Wysocki --- drivers/cpufreq/acpi-cpufreq.c | 14 +++++---- drivers/cpufreq/cpufreq.c | 57 ++++++++++++++++++---------------- include/linux/cpufreq.h | 2 +- 3 files changed, 40 insertions(+), 33 deletions(-) diff --git a/drivers/cpufreq/acpi-cpufreq.c b/drivers/cpufreq/acpi-cpufreq.c index 289e8ce3fd13d..429e5a36c08a9 100644 --- a/drivers/cpufreq/acpi-cpufreq.c +++ b/drivers/cpufreq/acpi-cpufreq.c @@ -126,12 +126,12 @@ static void boost_set_msr_each(void *p_en) boost_set_msr(enable); } -static int set_boost(int val) +static int set_boost(struct cpufreq_policy *policy, int val) { - get_online_cpus(); - on_each_cpu(boost_set_msr_each, (void *)(long)val, 1); - put_online_cpus(); - pr_debug("Core Boosting %sabled.\n", val ? "en" : "dis"); + on_each_cpu_mask(policy->cpus, boost_set_msr_each, + (void *)(long)val, 1); + pr_debug("CPU %*pbl: Core Boosting %sabled.\n", + cpumask_pr_args(policy->cpus), val ? "en" : "dis"); return 0; } @@ -162,7 +162,9 @@ static ssize_t store_cpb(struct cpufreq_policy *policy, const char *buf, if (ret || val > 1) return -EINVAL; - set_boost(val); + get_online_cpus(); + set_boost(policy, val); + put_online_cpus(); return count; } diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c index d03f250f68e44..0128de3603dfc 100644 --- a/drivers/cpufreq/cpufreq.c +++ b/drivers/cpufreq/cpufreq.c @@ -2532,34 +2532,29 @@ EXPORT_SYMBOL_GPL(cpufreq_update_limits); /********************************************************************* * BOOST * *********************************************************************/ -static int cpufreq_boost_set_sw(int state) +static int cpufreq_boost_set_sw(struct cpufreq_policy *policy, int state) { - struct cpufreq_policy *policy; - - for_each_active_policy(policy) { - int ret; + int ret; - if (!policy->freq_table) - return -ENXIO; + if (!policy->freq_table) + return -ENXIO; - ret = cpufreq_frequency_table_cpuinfo(policy, - policy->freq_table); - if (ret) { - pr_err("%s: Policy frequency update failed\n", - __func__); - return ret; - } - - ret = freq_qos_update_request(policy->max_freq_req, policy->max); - if (ret < 0) - return ret; + ret = cpufreq_frequency_table_cpuinfo(policy, policy->freq_table); + if (ret) { + pr_err("%s: Policy frequency update failed\n", __func__); + return ret; } + ret = freq_qos_update_request(policy->max_freq_req, policy->max); + if (ret < 0) + return ret; + return 0; } int cpufreq_boost_trigger_state(int state) { + struct cpufreq_policy *policy; unsigned long flags; int ret = 0; @@ -2570,15 +2565,25 @@ int cpufreq_boost_trigger_state(int state) cpufreq_driver->boost_enabled = state; write_unlock_irqrestore(&cpufreq_driver_lock, flags); - ret = cpufreq_driver->set_boost(state); - if (ret) { - write_lock_irqsave(&cpufreq_driver_lock, flags); - cpufreq_driver->boost_enabled = !state; - write_unlock_irqrestore(&cpufreq_driver_lock, flags); - - pr_err("%s: Cannot %s BOOST\n", - __func__, state ? "enable" : "disable"); + get_online_cpus(); + for_each_active_policy(policy) { + ret = cpufreq_driver->set_boost(policy, state); + if (ret) + goto err_reset_state; } + put_online_cpus(); + + return 0; + +err_reset_state: + put_online_cpus(); + + write_lock_irqsave(&cpufreq_driver_lock, flags); + cpufreq_driver->boost_enabled = !state; + write_unlock_irqrestore(&cpufreq_driver_lock, flags); + + pr_err("%s: Cannot %s BOOST\n", + __func__, state ? "enable" : "disable"); return ret; } diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h index 67d5950bd878c..3494f6763597e 100644 --- a/include/linux/cpufreq.h +++ b/include/linux/cpufreq.h @@ -367,7 +367,7 @@ struct cpufreq_driver { /* platform specific boost support code */ bool boost_enabled; - int (*set_boost)(int state); + int (*set_boost)(struct cpufreq_policy *policy, int state); }; /* flags */ -- GitLab From 54e74df5d76dea824c7c0c9d1b97150bf9b33793 Mon Sep 17 00:00:00 2001 From: Xiongfeng Wang Date: Sat, 30 May 2020 10:08:31 +0800 Subject: [PATCH 1173/1971] cpufreq: CPPC: add SW BOOST support To add SW BOOST support for CPPC, we need to get the max frequency of boost mode and non-boost mode. ACPI spec 6.2 section 8.4.7.1 describes the following two CPC registers. "Highest performance is the absolute maximum performance an individual processor may reach, assuming ideal conditions. This performance level may not be sustainable for long durations, and may only be achievable if other platform components are in a specific state; for example, it may require other processors be in an idle state. Nominal Performance is the maximum sustained performance level of the processor, assuming ideal operating conditions. In absence of an external constraint (power, thermal, etc.) this is the performance level the platform is expected to be able to maintain continuously. All processors are expected to be able to sustain their nominal performance state simultaneously." To add SW BOOST support for CPPC, we can use Highest Performance as the max performance in boost mode and Nominal Performance as the max performance in non-boost mode. If the Highest Performance is greater than the Nominal Performance, we assume SW BOOST is supported. The current CPPC driver does not support SW BOOST and use 'Highest Performance' as the max performance the CPU can achieve. 'Nominal Performance' is used to convert 'performance' to 'frequency'. That means, if firmware enable boost and provide a value for Highest Performance which is greater than Nominal Performance, boost feature is enabled by default. Because SW BOOST is disabled by default, so, after this patch, boost feature is disabled by default even if boost is enabled by firmware. Signed-off-by: Xiongfeng Wang Suggested-by: Viresh Kumar Acked-by: Viresh Kumar [ rjw: Subject ] Signed-off-by: Rafael J. Wysocki --- drivers/cpufreq/cppc_cpufreq.c | 39 ++++++++++++++++++++++++++++++++-- 1 file changed, 37 insertions(+), 2 deletions(-) diff --git a/drivers/cpufreq/cppc_cpufreq.c b/drivers/cpufreq/cppc_cpufreq.c index bda0b2406fbab..257d726a4456b 100644 --- a/drivers/cpufreq/cppc_cpufreq.c +++ b/drivers/cpufreq/cppc_cpufreq.c @@ -37,6 +37,7 @@ * requested etc. */ static struct cppc_cpudata **all_cpu_data; +static bool boost_supported; struct cppc_workaround_oem_info { char oem_id[ACPI_OEM_ID_SIZE + 1]; @@ -310,7 +311,7 @@ static int cppc_cpufreq_cpu_init(struct cpufreq_policy *policy) * Section 8.4.7.1.1.5 of ACPI 6.1 spec) */ policy->min = cppc_cpufreq_perf_to_khz(cpu, cpu->perf_caps.lowest_nonlinear_perf); - policy->max = cppc_cpufreq_perf_to_khz(cpu, cpu->perf_caps.highest_perf); + policy->max = cppc_cpufreq_perf_to_khz(cpu, cpu->perf_caps.nominal_perf); /* * Set cpuinfo.min_freq to Lowest to make the full range of performance @@ -318,7 +319,7 @@ static int cppc_cpufreq_cpu_init(struct cpufreq_policy *policy) * nonlinear perf */ policy->cpuinfo.min_freq = cppc_cpufreq_perf_to_khz(cpu, cpu->perf_caps.lowest_perf); - policy->cpuinfo.max_freq = cppc_cpufreq_perf_to_khz(cpu, cpu->perf_caps.highest_perf); + policy->cpuinfo.max_freq = cppc_cpufreq_perf_to_khz(cpu, cpu->perf_caps.nominal_perf); policy->transition_delay_us = cppc_cpufreq_get_transition_delay_us(cpu_num); policy->shared_type = cpu->shared_type; @@ -343,6 +344,13 @@ static int cppc_cpufreq_cpu_init(struct cpufreq_policy *policy) cpu->cur_policy = policy; + /* + * If 'highest_perf' is greater than 'nominal_perf', we assume CPU Boost + * is supported. + */ + if (cpu->perf_caps.highest_perf > cpu->perf_caps.nominal_perf) + boost_supported = true; + /* Set policy->cur to max now. The governors will adjust later. */ policy->cur = cppc_cpufreq_perf_to_khz(cpu, cpu->perf_caps.highest_perf); @@ -410,6 +418,32 @@ static unsigned int cppc_cpufreq_get_rate(unsigned int cpunum) return cppc_get_rate_from_fbctrs(cpu, fb_ctrs_t0, fb_ctrs_t1); } +static int cppc_cpufreq_set_boost(struct cpufreq_policy *policy, int state) +{ + struct cppc_cpudata *cpudata; + int ret; + + if (!boost_supported) { + pr_err("BOOST not supported by CPU or firmware\n"); + return -EINVAL; + } + + cpudata = all_cpu_data[policy->cpu]; + if (state) + policy->max = cppc_cpufreq_perf_to_khz(cpudata, + cpudata->perf_caps.highest_perf); + else + policy->max = cppc_cpufreq_perf_to_khz(cpudata, + cpudata->perf_caps.nominal_perf); + policy->cpuinfo.max_freq = policy->max; + + ret = freq_qos_update_request(policy->max_freq_req, policy->max); + if (ret < 0) + return ret; + + return 0; +} + static struct cpufreq_driver cppc_cpufreq_driver = { .flags = CPUFREQ_CONST_LOOPS, .verify = cppc_verify_policy, @@ -417,6 +451,7 @@ static struct cpufreq_driver cppc_cpufreq_driver = { .get = cppc_cpufreq_get_rate, .init = cppc_cpufreq_cpu_init, .stop_cpu = cppc_cpufreq_stop_cpu, + .set_boost = cppc_cpufreq_set_boost, .name = "cppc_cpufreq", }; -- GitLab From 523f3ec030aa5bf4818ec8dee35b2646abf367fa Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Wed, 3 Jun 2020 11:15:03 +0200 Subject: [PATCH 1174/1971] mac80211: initialize return flags in HE 6 GHz operation parsing Dan points out that if ieee80211_chandef_he_6ghz_oper() succeeds, we don't initialize 'ret'. Initialize it to 0 in this case, since everything went fine and nothing has to be disabled. Reported-by: Dan Carpenter Fixes: 57fa5e85d53c ("mac80211: determine chandef from HE 6 GHz operation") Signed-off-by: Johannes Berg Link: https://lore.kernel.org/r/20200603111500.bd2a5ff37b83.I2c3f338ce343b581db493eb9a0d988d1b626c8fb@changeid Signed-off-by: Johannes Berg --- net/mac80211/mlme.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c index 5820ef02a5871..b2a9d47cf86dd 100644 --- a/net/mac80211/mlme.c +++ b/net/mac80211/mlme.c @@ -167,6 +167,8 @@ ieee80211_determine_chantype(struct ieee80211_sub_if_data *sdata, ret = IEEE80211_STA_DISABLE_HT | IEEE80211_STA_DISABLE_VHT | IEEE80211_STA_DISABLE_HE; + else + ret = 0; vht_chandef = *chandef; goto out; } -- GitLab From 3067bf8c596d59164f48569a2d362de5b4c42f59 Mon Sep 17 00:00:00 2001 From: David Howells Date: Wed, 3 Jun 2020 22:21:16 +0100 Subject: [PATCH 1175/1971] rxrpc: Move the call completion handling out of line Move the handling of call completion out of line so that the next patch can add more code in that area. Signed-off-by: David Howells Reviewed-by: Marc Dionne --- net/rxrpc/ar-internal.h | 119 +++++++++------------------------------- net/rxrpc/recvmsg.c | 74 +++++++++++++++++++++++++ net/rxrpc/sendmsg.c | 8 +-- 3 files changed, 103 insertions(+), 98 deletions(-) diff --git a/net/rxrpc/ar-internal.h b/net/rxrpc/ar-internal.h index 9fe264bec70ce..9a2139ebd67d7 100644 --- a/net/rxrpc/ar-internal.h +++ b/net/rxrpc/ar-internal.h @@ -809,100 +809,6 @@ static inline bool rxrpc_is_client_call(const struct rxrpc_call *call) return !rxrpc_is_service_call(call); } -/* - * Transition a call to the complete state. - */ -static inline bool __rxrpc_set_call_completion(struct rxrpc_call *call, - enum rxrpc_call_completion compl, - u32 abort_code, - int error) -{ - if (call->state < RXRPC_CALL_COMPLETE) { - call->abort_code = abort_code; - call->error = error; - call->completion = compl, - call->state = RXRPC_CALL_COMPLETE; - trace_rxrpc_call_complete(call); - wake_up(&call->waitq); - return true; - } - return false; -} - -static inline bool rxrpc_set_call_completion(struct rxrpc_call *call, - enum rxrpc_call_completion compl, - u32 abort_code, - int error) -{ - bool ret; - - write_lock_bh(&call->state_lock); - ret = __rxrpc_set_call_completion(call, compl, abort_code, error); - write_unlock_bh(&call->state_lock); - return ret; -} - -/* - * Record that a call successfully completed. - */ -static inline bool __rxrpc_call_completed(struct rxrpc_call *call) -{ - return __rxrpc_set_call_completion(call, RXRPC_CALL_SUCCEEDED, 0, 0); -} - -static inline bool rxrpc_call_completed(struct rxrpc_call *call) -{ - bool ret; - - write_lock_bh(&call->state_lock); - ret = __rxrpc_call_completed(call); - write_unlock_bh(&call->state_lock); - return ret; -} - -/* - * Record that a call is locally aborted. - */ -static inline bool __rxrpc_abort_call(const char *why, struct rxrpc_call *call, - rxrpc_seq_t seq, - u32 abort_code, int error) -{ - trace_rxrpc_abort(call->debug_id, why, call->cid, call->call_id, seq, - abort_code, error); - return __rxrpc_set_call_completion(call, RXRPC_CALL_LOCALLY_ABORTED, - abort_code, error); -} - -static inline bool rxrpc_abort_call(const char *why, struct rxrpc_call *call, - rxrpc_seq_t seq, u32 abort_code, int error) -{ - bool ret; - - write_lock_bh(&call->state_lock); - ret = __rxrpc_abort_call(why, call, seq, abort_code, error); - write_unlock_bh(&call->state_lock); - return ret; -} - -/* - * Abort a call due to a protocol error. - */ -static inline bool __rxrpc_abort_eproto(struct rxrpc_call *call, - struct sk_buff *skb, - const char *eproto_why, - const char *why, - u32 abort_code) -{ - struct rxrpc_skb_priv *sp = rxrpc_skb(skb); - - trace_rxrpc_rx_eproto(call, sp->hdr.serial, eproto_why); - return rxrpc_abort_call(why, call, sp->hdr.seq, abort_code, -EPROTO); -} - -#define rxrpc_abort_eproto(call, skb, eproto_why, abort_why, abort_code) \ - __rxrpc_abort_eproto((call), (skb), tracepoint_string(eproto_why), \ - (abort_why), (abort_code)) - /* * conn_client.c */ @@ -1101,8 +1007,33 @@ extern const struct seq_operations rxrpc_peer_seq_ops; * recvmsg.c */ void rxrpc_notify_socket(struct rxrpc_call *); +bool __rxrpc_set_call_completion(struct rxrpc_call *, enum rxrpc_call_completion, u32, int); +bool rxrpc_set_call_completion(struct rxrpc_call *, enum rxrpc_call_completion, u32, int); +bool __rxrpc_call_completed(struct rxrpc_call *); +bool rxrpc_call_completed(struct rxrpc_call *); +bool __rxrpc_abort_call(const char *, struct rxrpc_call *, rxrpc_seq_t, u32, int); +bool rxrpc_abort_call(const char *, struct rxrpc_call *, rxrpc_seq_t, u32, int); int rxrpc_recvmsg(struct socket *, struct msghdr *, size_t, int); +/* + * Abort a call due to a protocol error. + */ +static inline bool __rxrpc_abort_eproto(struct rxrpc_call *call, + struct sk_buff *skb, + const char *eproto_why, + const char *why, + u32 abort_code) +{ + struct rxrpc_skb_priv *sp = rxrpc_skb(skb); + + trace_rxrpc_rx_eproto(call, sp->hdr.serial, eproto_why); + return rxrpc_abort_call(why, call, sp->hdr.seq, abort_code, -EPROTO); +} + +#define rxrpc_abort_eproto(call, skb, eproto_why, abort_why, abort_code) \ + __rxrpc_abort_eproto((call), (skb), tracepoint_string(eproto_why), \ + (abort_why), (abort_code)) + /* * rtt.c */ diff --git a/net/rxrpc/recvmsg.c b/net/rxrpc/recvmsg.c index 8578c39ec8395..6c4ba4224ddc2 100644 --- a/net/rxrpc/recvmsg.c +++ b/net/rxrpc/recvmsg.c @@ -58,6 +58,80 @@ void rxrpc_notify_socket(struct rxrpc_call *call) _leave(""); } +/* + * Transition a call to the complete state. + */ +bool __rxrpc_set_call_completion(struct rxrpc_call *call, + enum rxrpc_call_completion compl, + u32 abort_code, + int error) +{ + if (call->state < RXRPC_CALL_COMPLETE) { + call->abort_code = abort_code; + call->error = error; + call->completion = compl, + call->state = RXRPC_CALL_COMPLETE; + trace_rxrpc_call_complete(call); + wake_up(&call->waitq); + return true; + } + return false; +} + +bool rxrpc_set_call_completion(struct rxrpc_call *call, + enum rxrpc_call_completion compl, + u32 abort_code, + int error) +{ + bool ret; + + write_lock_bh(&call->state_lock); + ret = __rxrpc_set_call_completion(call, compl, abort_code, error); + write_unlock_bh(&call->state_lock); + return ret; +} + +/* + * Record that a call successfully completed. + */ +bool __rxrpc_call_completed(struct rxrpc_call *call) +{ + return __rxrpc_set_call_completion(call, RXRPC_CALL_SUCCEEDED, 0, 0); +} + +bool rxrpc_call_completed(struct rxrpc_call *call) +{ + bool ret; + + write_lock_bh(&call->state_lock); + ret = __rxrpc_call_completed(call); + write_unlock_bh(&call->state_lock); + return ret; +} + +/* + * Record that a call is locally aborted. + */ +bool __rxrpc_abort_call(const char *why, struct rxrpc_call *call, + rxrpc_seq_t seq, u32 abort_code, int error) +{ + trace_rxrpc_abort(call->debug_id, why, call->cid, call->call_id, seq, + abort_code, error); + return __rxrpc_set_call_completion(call, RXRPC_CALL_LOCALLY_ABORTED, + abort_code, error); +} + +bool rxrpc_abort_call(const char *why, struct rxrpc_call *call, + rxrpc_seq_t seq, u32 abort_code, int error) +{ + bool ret; + + write_lock_bh(&call->state_lock); + ret = __rxrpc_abort_call(why, call, seq, abort_code, error); + write_unlock_bh(&call->state_lock); + return ret; +} + /* * Pass a call terminating message to userspace. */ diff --git a/net/rxrpc/sendmsg.c b/net/rxrpc/sendmsg.c index 5e9c43d4a314b..5dd9ba000c002 100644 --- a/net/rxrpc/sendmsg.c +++ b/net/rxrpc/sendmsg.c @@ -261,10 +261,10 @@ static int rxrpc_queue_packet(struct rxrpc_sock *rx, struct rxrpc_call *call, case -ENETUNREACH: case -EHOSTUNREACH: case -ECONNREFUSED: - rxrpc_set_call_completion(call, - RXRPC_CALL_LOCAL_ERROR, - 0, ret); - rxrpc_notify_socket(call); + if (rxrpc_set_call_completion(call, + RXRPC_CALL_LOCAL_ERROR, + 0, ret)) + rxrpc_notify_socket(call); goto out; } _debug("need instant resend %d", ret); -- GitLab From 5ac0d62226a07849b1a5233af8c800a19cecab83 Mon Sep 17 00:00:00 2001 From: David Howells Date: Wed, 3 Jun 2020 22:21:16 +0100 Subject: [PATCH 1176/1971] rxrpc: Fix missing notification Under some circumstances, rxrpc will fail a transmit a packet through the underlying UDP socket (ie. UDP sendmsg returns an error). This may result in a call getting stuck. In the instance being seen, where AFS tries to send a probe to the Volume Location server, tracepoints show the UDP Tx failure (in this case returing error 99 EADDRNOTAVAIL) and then nothing more: afs_make_vl_call: c=0000015d VL.GetCapabilities rxrpc_call: c=0000015d NWc u=1 sp=rxrpc_kernel_begin_call+0x106/0x170 [rxrpc] a=00000000dd89ee8a rxrpc_call: c=0000015d Gus u=2 sp=rxrpc_new_client_call+0x14f/0x580 [rxrpc] a=00000000e20e4b08 rxrpc_call: c=0000015d SEE u=2 sp=rxrpc_activate_one_channel+0x7b/0x1c0 [rxrpc] a=00000000e20e4b08 rxrpc_call: c=0000015d CON u=2 sp=rxrpc_kernel_begin_call+0x106/0x170 [rxrpc] a=00000000e20e4b08 rxrpc_tx_fail: c=0000015d r=1 ret=-99 CallDataNofrag The problem is that if the initial packet fails and the retransmission timer hasn't been started, the call is set to completed and an error is returned from rxrpc_send_data_packet() to rxrpc_queue_packet(). Though rxrpc_instant_resend() is called, this does nothing because the call is marked completed. So rxrpc_notify_socket() isn't called and the error is passed back up to rxrpc_send_data(), rxrpc_kernel_send_data() and thence to afs_make_call() and afs_vl_get_capabilities() where it is simply ignored because it is assumed that the result of a probe will be collected asynchronously. Fileserver probing is similarly affected via afs_fs_get_capabilities(). Fix this by always issuing a notification in __rxrpc_set_call_completion() if it shifts a call to the completed state, even if an error is also returned to the caller through the function return value. Also put in a little bit of optimisation to avoid taking the call state_lock and disabling softirqs if the call is already in the completed state and remove some now redundant rxrpc_notify_socket() calls. Fixes: f5c17aaeb2ae ("rxrpc: Calls should only have one terminal state") Reported-by: Gerry Seidman Signed-off-by: David Howells Reviewed-by: Marc Dionne --- net/rxrpc/call_event.c | 1 - net/rxrpc/conn_event.c | 7 +++---- net/rxrpc/input.c | 7 ++----- net/rxrpc/peer_event.c | 4 +--- net/rxrpc/recvmsg.c | 21 +++++++++++++-------- net/rxrpc/sendmsg.c | 6 ++---- 6 files changed, 21 insertions(+), 25 deletions(-) diff --git a/net/rxrpc/call_event.c b/net/rxrpc/call_event.c index 2a65ac41055f5..61a51c251e1b9 100644 --- a/net/rxrpc/call_event.c +++ b/net/rxrpc/call_event.c @@ -320,7 +320,6 @@ recheck_state: if (call->state == RXRPC_CALL_COMPLETE) { del_timer_sync(&call->timer); - rxrpc_notify_socket(call); goto out_put; } diff --git a/net/rxrpc/conn_event.c b/net/rxrpc/conn_event.c index 06fcff2ebbba1..447f55ca68860 100644 --- a/net/rxrpc/conn_event.c +++ b/net/rxrpc/conn_event.c @@ -173,10 +173,9 @@ static void rxrpc_abort_calls(struct rxrpc_connection *conn, else trace_rxrpc_rx_abort(call, serial, conn->abort_code); - if (rxrpc_set_call_completion(call, compl, - conn->abort_code, - conn->error)) - rxrpc_notify_socket(call); + rxrpc_set_call_completion(call, compl, + conn->abort_code, + conn->error); } } diff --git a/net/rxrpc/input.c b/net/rxrpc/input.c index 3be4177baf707..299ac98e9754e 100644 --- a/net/rxrpc/input.c +++ b/net/rxrpc/input.c @@ -275,7 +275,6 @@ static bool rxrpc_end_tx_phase(struct rxrpc_call *call, bool reply_begun, case RXRPC_CALL_SERVER_AWAIT_ACK: __rxrpc_call_completed(call); - rxrpc_notify_socket(call); state = call->state; break; @@ -1013,9 +1012,8 @@ static void rxrpc_input_abort(struct rxrpc_call *call, struct sk_buff *skb) _proto("Rx ABORT %%%u { %x }", sp->hdr.serial, abort_code); - if (rxrpc_set_call_completion(call, RXRPC_CALL_REMOTELY_ABORTED, - abort_code, -ECONNABORTED)) - rxrpc_notify_socket(call); + rxrpc_set_call_completion(call, RXRPC_CALL_REMOTELY_ABORTED, + abort_code, -ECONNABORTED); } /* @@ -1102,7 +1100,6 @@ static void rxrpc_input_implicit_end_call(struct rxrpc_sock *rx, spin_lock(&rx->incoming_lock); __rxrpc_disconnect_call(conn, call); spin_unlock(&rx->incoming_lock); - rxrpc_notify_socket(call); } /* diff --git a/net/rxrpc/peer_event.c b/net/rxrpc/peer_event.c index b1449d9718836..4704a8dceced8 100644 --- a/net/rxrpc/peer_event.c +++ b/net/rxrpc/peer_event.c @@ -289,9 +289,7 @@ static void rxrpc_distribute_error(struct rxrpc_peer *peer, int error, hlist_for_each_entry_rcu(call, &peer->error_targets, error_link) { rxrpc_see_call(call); - if (call->state < RXRPC_CALL_COMPLETE && - rxrpc_set_call_completion(call, compl, 0, -error)) - rxrpc_notify_socket(call); + rxrpc_set_call_completion(call, compl, 0, -error); } } diff --git a/net/rxrpc/recvmsg.c b/net/rxrpc/recvmsg.c index 6c4ba4224ddc2..2989742a4aa11 100644 --- a/net/rxrpc/recvmsg.c +++ b/net/rxrpc/recvmsg.c @@ -73,6 +73,7 @@ bool __rxrpc_set_call_completion(struct rxrpc_call *call, call->state = RXRPC_CALL_COMPLETE; trace_rxrpc_call_complete(call); wake_up(&call->waitq); + rxrpc_notify_socket(call); return true; } return false; @@ -83,11 +84,13 @@ bool rxrpc_set_call_completion(struct rxrpc_call *call, u32 abort_code, int error) { - bool ret; + bool ret = false; - write_lock_bh(&call->state_lock); - ret = __rxrpc_set_call_completion(call, compl, abort_code, error); - write_unlock_bh(&call->state_lock); + if (call->state < RXRPC_CALL_COMPLETE) { + write_lock_bh(&call->state_lock); + ret = __rxrpc_set_call_completion(call, compl, abort_code, error); + write_unlock_bh(&call->state_lock); + } return ret; } @@ -101,11 +104,13 @@ bool __rxrpc_call_completed(struct rxrpc_call *call) bool rxrpc_call_completed(struct rxrpc_call *call) { - bool ret; + bool ret = false; - write_lock_bh(&call->state_lock); - ret = __rxrpc_call_completed(call); - write_unlock_bh(&call->state_lock); + if (call->state < RXRPC_CALL_COMPLETE) { + write_lock_bh(&call->state_lock); + ret = __rxrpc_call_completed(call); + write_unlock_bh(&call->state_lock); + } return ret; } diff --git a/net/rxrpc/sendmsg.c b/net/rxrpc/sendmsg.c index 5dd9ba000c002..1304b8608f56e 100644 --- a/net/rxrpc/sendmsg.c +++ b/net/rxrpc/sendmsg.c @@ -261,10 +261,8 @@ static int rxrpc_queue_packet(struct rxrpc_sock *rx, struct rxrpc_call *call, case -ENETUNREACH: case -EHOSTUNREACH: case -ECONNREFUSED: - if (rxrpc_set_call_completion(call, - RXRPC_CALL_LOCAL_ERROR, - 0, ret)) - rxrpc_notify_socket(call); + rxrpc_set_call_completion(call, RXRPC_CALL_LOCAL_ERROR, + 0, ret); goto out; } _debug("need instant resend %d", ret); -- GitLab From b80db73dc8be7022adae1b4414a1bebce50fe915 Mon Sep 17 00:00:00 2001 From: Vitaly Kuznetsov Date: Fri, 5 Jun 2020 16:20:28 +0200 Subject: [PATCH 1177/1971] KVM: selftests: Fix build with "make ARCH=x86_64" Marcelo reports that kvm selftests fail to build with "make ARCH=x86_64": gcc -Wall -Wstrict-prototypes -Wuninitialized -O2 -g -std=gnu99 -fno-stack-protector -fno-PIE -I../../../../tools/include -I../../../../tools/arch/x86_64/include -I../../../../usr/include/ -Iinclude -Ilib -Iinclude/x86_64 -I.. -c lib/kvm_util.c -o /var/tmp/20200604202744-bin/lib/kvm_util.o In file included from lib/kvm_util.c:11: include/x86_64/processor.h:14:10: fatal error: asm/msr-index.h: No such file or directory #include ^~~~~~~~~~~~~~~~~ compilation terminated. "make ARCH=x86", however, works. The problem is that arch specific headers for x86_64 live in 'tools/arch/x86/include', not in 'tools/arch/x86_64/include'. Fixes: 66d69e081b52 ("selftests: fix kvm relocatable native/cross builds and installs") Reported-by: Marcelo Bandeira Condotta Signed-off-by: Vitaly Kuznetsov Message-Id: <20200605142028.550068-1-vkuznets@redhat.com> Signed-off-by: Paolo Bonzini --- tools/testing/selftests/kvm/Makefile | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/tools/testing/selftests/kvm/Makefile b/tools/testing/selftests/kvm/Makefile index b4ff112e5c7ec..4a166588d99f2 100644 --- a/tools/testing/selftests/kvm/Makefile +++ b/tools/testing/selftests/kvm/Makefile @@ -83,7 +83,11 @@ LIBKVM += $(LIBKVM_$(UNAME_M)) INSTALL_HDR_PATH = $(top_srcdir)/usr LINUX_HDR_PATH = $(INSTALL_HDR_PATH)/include/ LINUX_TOOL_INCLUDE = $(top_srcdir)/tools/include +ifeq ($(ARCH),x86_64) +LINUX_TOOL_ARCH_INCLUDE = $(top_srcdir)/tools/arch/x86/include +else LINUX_TOOL_ARCH_INCLUDE = $(top_srcdir)/tools/arch/$(ARCH)/include +endif CFLAGS += -Wall -Wstrict-prototypes -Wuninitialized -O2 -g -std=gnu99 \ -fno-stack-protector -fno-PIE -I$(LINUX_TOOL_INCLUDE) \ -I$(LINUX_TOOL_ARCH_INCLUDE) -I$(LINUX_HDR_PATH) -Iinclude \ -- GitLab From 34d2618d3318bf4832a13ef71c96833dc996f7d5 Mon Sep 17 00:00:00 2001 From: Paolo Bonzini Date: Fri, 15 May 2020 12:18:14 -0400 Subject: [PATCH 1178/1971] KVM: x86: emulate reserved nops from 0f/18 to 0f/1f Instructions starting with 0f18 up to 0f1f are reserved nops, except those that were assigned to MPX. These include the endbr markers used by CET. List them correctly in the opcode table. Signed-off-by: Paolo Bonzini --- arch/x86/kvm/emulate.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c index de5476f8683e9..d0e2825ae6174 100644 --- a/arch/x86/kvm/emulate.c +++ b/arch/x86/kvm/emulate.c @@ -4800,8 +4800,12 @@ static const struct opcode twobyte_table[256] = { GP(ModRM | DstReg | SrcMem | Mov | Sse, &pfx_0f_10_0f_11), GP(ModRM | DstMem | SrcReg | Mov | Sse, &pfx_0f_10_0f_11), N, N, N, N, N, N, - D(ImplicitOps | ModRM | SrcMem | NoAccess), - N, N, N, N, N, N, D(ImplicitOps | ModRM | SrcMem | NoAccess), + D(ImplicitOps | ModRM | SrcMem | NoAccess), /* 4 * prefetch + 4 * reserved NOP */ + D(ImplicitOps | ModRM | SrcMem | NoAccess), N, N, + D(ImplicitOps | ModRM | SrcMem | NoAccess), /* 8 * reserved NOP */ + D(ImplicitOps | ModRM | SrcMem | NoAccess), /* 8 * reserved NOP */ + D(ImplicitOps | ModRM | SrcMem | NoAccess), /* 8 * reserved NOP */ + D(ImplicitOps | ModRM | SrcMem | NoAccess), /* NOP + 7 * reserved NOP */ /* 0x20 - 0x2F */ DIP(ModRM | DstMem | Priv | Op3264 | NoMod, cr_read, check_cr_read), DIP(ModRM | DstMem | Priv | Op3264 | NoMod, dr_read, check_dr_read), -- GitLab From 91231e525bb654731f60d6699472e1e98379bb01 Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Fri, 5 Jun 2020 14:01:34 +0300 Subject: [PATCH 1179/1971] ALSA: emu10k1: delete an unnecessary condition The "val" variable is an unsigned int so it's always <= UINT_MAX. This check is always true so it can be removed. Signed-off-by: Dan Carpenter Link: https://lore.kernel.org/r/20200605110134.GC978434@mwanda Signed-off-by: Takashi Iwai --- sound/pci/emu10k1/emu10k1x.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sound/pci/emu10k1/emu10k1x.c b/sound/pci/emu10k1/emu10k1x.c index ddb7c2ce3f7c9..def8161cde4c2 100644 --- a/sound/pci/emu10k1/emu10k1x.c +++ b/sound/pci/emu10k1/emu10k1x.c @@ -1040,7 +1040,7 @@ static void snd_emu10k1x_proc_reg_write(struct snd_info_entry *entry, if (sscanf(line, "%x %x %x", ®, &channel_id, &val) != 3) continue; - if (reg < 0x49 && val <= 0xffffffff && channel_id <= 2) + if (reg < 0x49 && channel_id <= 2) snd_emu10k1x_ptr_write(emu, reg, channel_id, val); } } -- GitLab From 5a1c9565c07b981449d8432cab2927cbeca73476 Mon Sep 17 00:00:00 2001 From: Luca Ceresoli Date: Wed, 3 Jun 2020 18:21:18 +0200 Subject: [PATCH 1180/1971] power: reset: gpio-poweroff: add missing '\n' in dev_err() dev_err() needs a terminating newline. Signed-off-by: Luca Ceresoli Signed-off-by: Sebastian Reichel --- drivers/power/reset/gpio-poweroff.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/power/reset/gpio-poweroff.c b/drivers/power/reset/gpio-poweroff.c index 6a4bbb5065510..c5067eb753706 100644 --- a/drivers/power/reset/gpio-poweroff.c +++ b/drivers/power/reset/gpio-poweroff.c @@ -54,7 +54,7 @@ static int gpio_poweroff_probe(struct platform_device *pdev) /* If a pm_power_off function has already been added, leave it alone */ if (pm_power_off != NULL) { dev_err(&pdev->dev, - "%s: pm_power_off function already registered", + "%s: pm_power_off function already registered\n", __func__); return -EBUSY; } -- GitLab From 152204dbdcee6df9406f87c81f9591aeaf1ba55b Mon Sep 17 00:00:00 2001 From: Andy Shevchenko Date: Sat, 30 May 2020 19:40:30 +0300 Subject: [PATCH 1181/1971] power: supply: cw2015: Attach OF ID table to the driver It appears that OF ID table was hanging around without being attached to the driver. Attach it properly. Fixes: b4c7715c10c1 ("power: supply: add CellWise cw2015 fuel gauge driver") Reported-by: kbuild test robot Signed-off-by: Andy Shevchenko Acked-By: Tobias Schramm Tested-By: Tobias Schramm Signed-off-by: Sebastian Reichel --- drivers/power/supply/cw2015_battery.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/power/supply/cw2015_battery.c b/drivers/power/supply/cw2015_battery.c index 19f62ea957ee1..0146f1bfc29bb 100644 --- a/drivers/power/supply/cw2015_battery.c +++ b/drivers/power/supply/cw2015_battery.c @@ -734,6 +734,7 @@ MODULE_DEVICE_TABLE(of, cw2015_of_match); static struct i2c_driver cw_bat_driver = { .driver = { .name = "cw2015", + .of_match_table = cw2015_of_match, .pm = &cw_bat_pm_ops, }, .probe_new = cw_bat_probe, -- GitLab From 388d8bdb87e01bcea6d0b2bf797b5f6d7b2401fb Mon Sep 17 00:00:00 2001 From: Mel Gorman Date: Thu, 9 Apr 2020 11:40:34 +0100 Subject: [PATCH 1182/1971] tracing: Remove obsolete PREEMPTIRQ_EVENTS kconfig option The PREEMPTIRQ_EVENTS option is unused after commit c3bc8fd637a9 ("tracing: Centralize preemptirq tracepoints and unify their usage"). Remove it. Note that this option is hazardous as it stands. It enables TRACE_IRQFLAGS event on non-preempt configurations without the irqsoff tracer enabled. TRACE_IRQFLAGS as it stands incurs significant overhead on each IRQ entry/exit. This is because trace_hardirqs_[on|off] does all the per-cpu manipulations and NMI checks even if tracing is completely disabled for some insane reason. For example, netperf running UDP_STREAM on localhost incurs a 4-6% performance penalty without any tracing if IRQFLAGS is set. It can be put behind a static brach but even the function entry/exit costs a little bit. Link: https://lkml.kernel.org/r/20200409104034.GJ3818@techsingularity.net Signed-off-by: Mel Gorman Signed-off-by: Steven Rostedt (VMware) --- kernel/trace/Kconfig | 9 --------- 1 file changed, 9 deletions(-) diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig index 75407d5dc83a4..0c82ac2c5688c 100644 --- a/kernel/trace/Kconfig +++ b/kernel/trace/Kconfig @@ -258,15 +258,6 @@ config TRACE_PREEMPT_TOGGLE Enables hooks which will be called when preemption is first disabled, and last enabled. -config PREEMPTIRQ_EVENTS - bool "Enable trace events for preempt and irq disable/enable" - select TRACE_IRQFLAGS - select TRACE_PREEMPT_TOGGLE if PREEMPTION - select GENERIC_TRACER - default n - help - Enable tracing of disable and enable events for preemption and irqs. - config IRQSOFF_TRACER bool "Interrupts-off Latency Tracer" default n -- GitLab From 5865985416ebb5a0c198a819a098b5cc300ac8a4 Mon Sep 17 00:00:00 2001 From: Steve French Date: Fri, 5 Jun 2020 17:19:46 -0500 Subject: [PATCH 1183/1971] smb3: extend fscache mount volume coherency check It is better to check volume id and creation time, not just the root inode number to verify if the volume has changed when remounting. Reviewed-by: David Howells Signed-off-by: Steve French --- fs/cifs/cache.c | 9 ++------- fs/cifs/fscache.c | 17 +++++++++++++++-- fs/cifs/fscache.h | 9 +++++++++ 3 files changed, 26 insertions(+), 9 deletions(-) diff --git a/fs/cifs/cache.c b/fs/cifs/cache.c index b7420e605b288..0f2adecb94f22 100644 --- a/fs/cifs/cache.c +++ b/fs/cifs/cache.c @@ -53,13 +53,6 @@ const struct fscache_cookie_def cifs_fscache_server_index_def = { .type = FSCACHE_COOKIE_TYPE_INDEX, }; -/* - * Auxiliary data attached to CIFS superblock within the cache - */ -struct cifs_fscache_super_auxdata { - u64 resource_id; /* unique server resource id */ -}; - char *extract_sharename(const char *treename) { const char *src; @@ -98,6 +91,8 @@ fscache_checkaux cifs_fscache_super_check_aux(void *cookie_netfs_data, memset(&auxdata, 0, sizeof(auxdata)); auxdata.resource_id = tcon->resource_id; + auxdata.vol_create_time = tcon->vol_create_time; + auxdata.vol_serial_number = tcon->vol_serial_number; if (memcmp(data, &auxdata, datalen) != 0) return FSCACHE_CHECKAUX_OBSOLETE; diff --git a/fs/cifs/fscache.c b/fs/cifs/fscache.c index ea6ace9c2417d..da688185403c3 100644 --- a/fs/cifs/fscache.c +++ b/fs/cifs/fscache.c @@ -96,6 +96,7 @@ void cifs_fscache_get_super_cookie(struct cifs_tcon *tcon) { struct TCP_Server_Info *server = tcon->ses->server; char *sharename; + struct cifs_fscache_super_auxdata auxdata; sharename = extract_sharename(tcon->treeName); if (IS_ERR(sharename)) { @@ -104,11 +105,16 @@ void cifs_fscache_get_super_cookie(struct cifs_tcon *tcon) return; } + memset(&auxdata, 0, sizeof(auxdata)); + auxdata.resource_id = tcon->resource_id; + auxdata.vol_create_time = tcon->vol_create_time; + auxdata.vol_serial_number = tcon->vol_serial_number; + tcon->fscache = fscache_acquire_cookie(server->fscache, &cifs_fscache_super_index_def, sharename, strlen(sharename), - &tcon->resource_id, sizeof(tcon->resource_id), + &auxdata, sizeof(auxdata), tcon, 0, true); kfree(sharename); cifs_dbg(FYI, "%s: (0x%p/0x%p)\n", @@ -117,8 +123,15 @@ void cifs_fscache_get_super_cookie(struct cifs_tcon *tcon) void cifs_fscache_release_super_cookie(struct cifs_tcon *tcon) { + struct cifs_fscache_super_auxdata auxdata; + + memset(&auxdata, 0, sizeof(auxdata)); + auxdata.resource_id = tcon->resource_id; + auxdata.vol_create_time = tcon->vol_create_time; + auxdata.vol_serial_number = tcon->vol_serial_number; + cifs_dbg(FYI, "%s: (0x%p)\n", __func__, tcon->fscache); - fscache_relinquish_cookie(tcon->fscache, &tcon->resource_id, false); + fscache_relinquish_cookie(tcon->fscache, &auxdata, false); tcon->fscache = NULL; } diff --git a/fs/cifs/fscache.h b/fs/cifs/fscache.h index 8c0862e413064..1091633d2adbd 100644 --- a/fs/cifs/fscache.h +++ b/fs/cifs/fscache.h @@ -27,6 +27,15 @@ #ifdef CONFIG_CIFS_FSCACHE +/* + * Auxiliary data attached to CIFS superblock within the cache + */ +struct cifs_fscache_super_auxdata { + u64 resource_id; /* unique server resource id */ + __le64 vol_create_time; + u32 vol_serial_number; +} __packed; + /* * Auxiliary data attached to CIFS inode within the cache */ -- GitLab From bb02e6e63d0e71188bc5fe5f4732e66bc8b5dceb Mon Sep 17 00:00:00 2001 From: Zhu Lingshan Date: Fri, 5 Jun 2020 18:27:12 +0800 Subject: [PATCH 1184/1971] ifcvf: ignore continuous setting same status value User space may try to set status of same value multiple times, this patch handles this case more efficiently by ignoring the setting of the same status value. Signed-off-by: Zhu Lingshan Link: https://lore.kernel.org/r/1591352835-22441-3-git-send-email-lingshan.zhu@intel.com Signed-off-by: Michael S. Tsirkin --- drivers/vdpa/ifcvf/ifcvf_main.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/vdpa/ifcvf/ifcvf_main.c b/drivers/vdpa/ifcvf/ifcvf_main.c index d529ed681fe64..63a6366b43554 100644 --- a/drivers/vdpa/ifcvf/ifcvf_main.c +++ b/drivers/vdpa/ifcvf/ifcvf_main.c @@ -179,6 +179,9 @@ static void ifcvf_vdpa_set_status(struct vdpa_device *vdpa_dev, u8 status) adapter = dev_get_drvdata(vdpa_dev->dev.parent); status_old = ifcvf_get_status(vf); + if (status_old == status) + return; + if ((status_old & VIRTIO_CONFIG_S_DRIVER_OK) && !(status & VIRTIO_CONFIG_S_DRIVER_OK)) { ifcvf_stop_datapath(adapter); -- GitLab From 776f395004d829bbbf18c159ed9beb517a208c71 Mon Sep 17 00:00:00 2001 From: Zhu Lingshan Date: Fri, 5 Jun 2020 18:27:13 +0800 Subject: [PATCH 1185/1971] vhost_vdpa: Support config interrupt in vdpa This commit implements config interrupt support in vhost_vdpa layer. Signed-off-by: Zhu Lingshan Acked-by: Jason Wang Link: https://lore.kernel.org/r/1591352835-22441-4-git-send-email-lingshan.zhu@intel.com Signed-off-by: Michael S. Tsirkin --- drivers/vhost/vdpa.c | 47 ++++++++++++++++++++++++++++++++++++++ include/uapi/linux/vhost.h | 4 ++++ 2 files changed, 51 insertions(+) diff --git a/drivers/vhost/vdpa.c b/drivers/vhost/vdpa.c index 6ca7660ee6b55..77a0c9fb6cc39 100644 --- a/drivers/vhost/vdpa.c +++ b/drivers/vhost/vdpa.c @@ -22,6 +22,7 @@ #include #include #include +#include #include "vhost.h" @@ -71,6 +72,7 @@ struct vhost_vdpa { int nvqs; int virtio_id; int minor; + struct eventfd_ctx *config_ctx; }; static DEFINE_IDA(vhost_vdpa_ida); @@ -102,6 +104,17 @@ static irqreturn_t vhost_vdpa_virtqueue_cb(void *private) return IRQ_HANDLED; } +static irqreturn_t vhost_vdpa_config_cb(void *private) +{ + struct vhost_vdpa *v = private; + struct eventfd_ctx *config_ctx = v->config_ctx; + + if (config_ctx) + eventfd_signal(config_ctx, 1); + + return IRQ_HANDLED; +} + static void vhost_vdpa_reset(struct vhost_vdpa *v) { struct vdpa_device *vdpa = v->vdpa; @@ -289,6 +302,36 @@ static long vhost_vdpa_get_vring_num(struct vhost_vdpa *v, u16 __user *argp) return 0; } +static void vhost_vdpa_config_put(struct vhost_vdpa *v) +{ + if (v->config_ctx) + eventfd_ctx_put(v->config_ctx); +} + +static long vhost_vdpa_set_config_call(struct vhost_vdpa *v, u32 __user *argp) +{ + struct vdpa_callback cb; + int fd; + struct eventfd_ctx *ctx; + + cb.callback = vhost_vdpa_config_cb; + cb.private = v->vdpa; + if (copy_from_user(&fd, argp, sizeof(fd))) + return -EFAULT; + + ctx = fd == VHOST_FILE_UNBIND ? NULL : eventfd_ctx_fdget(fd); + swap(ctx, v->config_ctx); + + if (!IS_ERR_OR_NULL(ctx)) + eventfd_ctx_put(ctx); + + if (IS_ERR(v->config_ctx)) + return PTR_ERR(v->config_ctx); + + v->vdpa->config->set_config_cb(v->vdpa, &cb); + + return 0; +} static long vhost_vdpa_vring_ioctl(struct vhost_vdpa *v, unsigned int cmd, void __user *argp) { @@ -396,6 +439,9 @@ static long vhost_vdpa_unlocked_ioctl(struct file *filep, case VHOST_SET_LOG_FD: r = -ENOIOCTLCMD; break; + case VHOST_VDPA_SET_CONFIG_CALL: + r = vhost_vdpa_set_config_call(v, argp); + break; default: r = vhost_dev_ioctl(&v->vdev, cmd, argp); if (r == -ENOIOCTLCMD) @@ -730,6 +776,7 @@ static int vhost_vdpa_release(struct inode *inode, struct file *filep) vhost_dev_stop(&v->vdev); vhost_vdpa_iotlb_free(v); vhost_vdpa_free_domain(v); + vhost_vdpa_config_put(v); vhost_dev_cleanup(&v->vdev); kfree(v->vdev.vqs); mutex_unlock(&d->mutex); diff --git a/include/uapi/linux/vhost.h b/include/uapi/linux/vhost.h index 9fe72e4b13731..0c2349612e776 100644 --- a/include/uapi/linux/vhost.h +++ b/include/uapi/linux/vhost.h @@ -15,6 +15,8 @@ #include #include +#define VHOST_FILE_UNBIND -1 + /* ioctls */ #define VHOST_VIRTIO 0xAF @@ -140,4 +142,6 @@ /* Get the max ring size. */ #define VHOST_VDPA_GET_VRING_NUM _IOR(VHOST_VIRTIO, 0x76, __u16) +/* Set event fd for config interrupt*/ +#define VHOST_VDPA_SET_CONFIG_CALL _IOW(VHOST_VIRTIO, 0x77, int) #endif -- GitLab From e0136c16fae95dc3762f3912f6f9336f5f57fe81 Mon Sep 17 00:00:00 2001 From: Zhu Lingshan Date: Fri, 5 Jun 2020 18:27:14 +0800 Subject: [PATCH 1186/1971] vhost: replace -1 with VHOST_FILE_UNBIND in ioctls This commit replaces -1 with VHOST_FILE_UNBIND in ioctls since we have added such a macro in the uapi header for vdpa_host. Signed-off-by: Zhu Lingshan Acked-by: Jason Wang Link: https://lore.kernel.org/r/1591352835-22441-5-git-send-email-lingshan.zhu@intel.com Signed-off-by: Michael S. Tsirkin --- drivers/vhost/vhost.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c index 534d1267b761d..172da092107e8 100644 --- a/drivers/vhost/vhost.c +++ b/drivers/vhost/vhost.c @@ -1612,7 +1612,7 @@ long vhost_vring_ioctl(struct vhost_dev *d, unsigned int ioctl, void __user *arg r = -EFAULT; break; } - eventfp = f.fd == -1 ? NULL : eventfd_fget(f.fd); + eventfp = f.fd == VHOST_FILE_UNBIND ? NULL : eventfd_fget(f.fd); if (IS_ERR(eventfp)) { r = PTR_ERR(eventfp); break; @@ -1628,7 +1628,7 @@ long vhost_vring_ioctl(struct vhost_dev *d, unsigned int ioctl, void __user *arg r = -EFAULT; break; } - ctx = f.fd == -1 ? NULL : eventfd_ctx_fdget(f.fd); + ctx = f.fd == VHOST_FILE_UNBIND ? NULL : eventfd_ctx_fdget(f.fd); if (IS_ERR(ctx)) { r = PTR_ERR(ctx); break; @@ -1640,7 +1640,7 @@ long vhost_vring_ioctl(struct vhost_dev *d, unsigned int ioctl, void __user *arg r = -EFAULT; break; } - ctx = f.fd == -1 ? NULL : eventfd_ctx_fdget(f.fd); + ctx = f.fd == VHOST_FILE_UNBIND ? NULL : eventfd_ctx_fdget(f.fd); if (IS_ERR(ctx)) { r = PTR_ERR(ctx); break; @@ -1765,7 +1765,7 @@ long vhost_dev_ioctl(struct vhost_dev *d, unsigned int ioctl, void __user *argp) r = get_user(fd, (int __user *)argp); if (r < 0) break; - ctx = fd == -1 ? NULL : eventfd_ctx_fdget(fd); + ctx = fd == VHOST_FILE_UNBIND ? NULL : eventfd_ctx_fdget(fd); if (IS_ERR(ctx)) { r = PTR_ERR(ctx); break; -- GitLab From e7991f376a4dd837f455d9e2112a280617854d12 Mon Sep 17 00:00:00 2001 From: Zhu Lingshan Date: Fri, 5 Jun 2020 18:27:15 +0800 Subject: [PATCH 1187/1971] ifcvf: implement config interrupt in IFCVF This commit implements config interrupt support in IFC VF Signed-off-by: Zhu Lingshan Acked-by: Jason Wang Link: https://lore.kernel.org/r/1591352835-22441-6-git-send-email-lingshan.zhu@intel.com Signed-off-by: Michael S. Tsirkin --- drivers/vdpa/ifcvf/ifcvf_base.c | 3 +++ drivers/vdpa/ifcvf/ifcvf_base.h | 4 ++++ drivers/vdpa/ifcvf/ifcvf_main.c | 23 ++++++++++++++++++++++- 3 files changed, 29 insertions(+), 1 deletion(-) diff --git a/drivers/vdpa/ifcvf/ifcvf_base.c b/drivers/vdpa/ifcvf/ifcvf_base.c index e24371d644b50..94bf0328b68d6 100644 --- a/drivers/vdpa/ifcvf/ifcvf_base.c +++ b/drivers/vdpa/ifcvf/ifcvf_base.c @@ -185,6 +185,9 @@ void ifcvf_set_status(struct ifcvf_hw *hw, u8 status) void ifcvf_reset(struct ifcvf_hw *hw) { + hw->config_cb.callback = NULL; + hw->config_cb.private = NULL; + ifcvf_set_status(hw, 0); /* flush set_status, make sure VF is stopped, reset */ ifcvf_get_status(hw); diff --git a/drivers/vdpa/ifcvf/ifcvf_base.h b/drivers/vdpa/ifcvf/ifcvf_base.h index e80307092351c..f4554412e607f 100644 --- a/drivers/vdpa/ifcvf/ifcvf_base.h +++ b/drivers/vdpa/ifcvf/ifcvf_base.h @@ -27,6 +27,7 @@ ((1ULL << VIRTIO_NET_F_MAC) | \ (1ULL << VIRTIO_F_ANY_LAYOUT) | \ (1ULL << VIRTIO_F_VERSION_1) | \ + (1ULL << VIRTIO_NET_F_STATUS) | \ (1ULL << VIRTIO_F_ORDER_PLATFORM) | \ (1ULL << VIRTIO_F_IOMMU_PLATFORM) | \ (1ULL << VIRTIO_NET_F_MRG_RXBUF)) @@ -81,6 +82,9 @@ struct ifcvf_hw { void __iomem *net_cfg; struct vring_info vring[IFCVF_MAX_QUEUE_PAIRS * 2]; void __iomem * const *base; + char config_msix_name[256]; + struct vdpa_callback config_cb; + }; struct ifcvf_adapter { diff --git a/drivers/vdpa/ifcvf/ifcvf_main.c b/drivers/vdpa/ifcvf/ifcvf_main.c index 63a6366b43554..f5a60c14b9799 100644 --- a/drivers/vdpa/ifcvf/ifcvf_main.c +++ b/drivers/vdpa/ifcvf/ifcvf_main.c @@ -18,6 +18,16 @@ #define DRIVER_AUTHOR "Intel Corporation" #define IFCVF_DRIVER_NAME "ifcvf" +static irqreturn_t ifcvf_config_changed(int irq, void *arg) +{ + struct ifcvf_hw *vf = arg; + + if (vf->config_cb.callback) + return vf->config_cb.callback(vf->config_cb.private); + + return IRQ_HANDLED; +} + static irqreturn_t ifcvf_intr_handler(int irq, void *arg) { struct vring_info *vring = arg; @@ -59,6 +69,14 @@ static int ifcvf_request_irq(struct ifcvf_adapter *adapter) return ret; } + snprintf(vf->config_msix_name, 256, "ifcvf[%s]-config\n", + pci_name(pdev)); + vector = 0; + irq = pci_irq_vector(pdev, vector); + ret = devm_request_irq(&pdev->dev, irq, + ifcvf_config_changed, 0, + vf->config_msix_name, vf); + for (i = 0; i < IFCVF_MAX_QUEUE_PAIRS * 2; i++) { snprintf(vf->vring[i].msix_name, 256, "ifcvf[%s]-%d\n", pci_name(pdev), i); @@ -328,7 +346,10 @@ static void ifcvf_vdpa_set_config(struct vdpa_device *vdpa_dev, static void ifcvf_vdpa_set_config_cb(struct vdpa_device *vdpa_dev, struct vdpa_callback *cb) { - /* We don't support config interrupt */ + struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev); + + vf->config_cb.callback = cb->callback; + vf->config_cb.private = cb->private; } /* -- GitLab From 2667a6814cb518f0df6520a00ddea48e8af50bd0 Mon Sep 17 00:00:00 2001 From: "Gustavo A. R. Silva" Date: Thu, 4 Jun 2020 10:39:57 -0500 Subject: [PATCH 1188/1971] i2c: npcm7xx: Remove unnecessary parentheses Remove unnecessary parentheses around _bus_. This issue was found with the help of Coccinelle. Signed-off-by: Gustavo A. R. Silva Signed-off-by: Wolfram Sang --- drivers/i2c/busses/i2c-npcm7xx.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/i2c/busses/i2c-npcm7xx.c b/drivers/i2c/busses/i2c-npcm7xx.c index a8e75c3484f12..68951a293aae3 100644 --- a/drivers/i2c/busses/i2c-npcm7xx.c +++ b/drivers/i2c/busses/i2c-npcm7xx.c @@ -2248,7 +2248,7 @@ static int npcm_i2c_probe_bus(struct platform_device *pdev) bus->reg = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(bus->reg)) - return PTR_ERR((bus)->reg); + return PTR_ERR(bus->reg); spin_lock_init(&bus->lock); init_completion(&bus->cmd_complete); -- GitLab From d94ecfc399715f06da347922e7979c088b1d8834 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Fri, 5 Jun 2020 19:44:09 +0800 Subject: [PATCH 1189/1971] blk-mq: split out a __blk_mq_get_driver_tag helper Allocation of the driver tag in the case of using a scheduler shares very little code with the "normal" tag allocation. Split out a new helper to streamline this path, and untangle it from the complex normal tag allocation. This way also avoids to fail driver tag allocation because of inactive hctx during cpu hotplug, and fixes potential hang risk. Fixes: bf0beec0607d ("blk-mq: drain I/O when all CPUs in a hctx are offline") Signed-off-by: Ming Lei Signed-off-by: Christoph Hellwig Tested-by: John Garry Cc: Dongli Zhang Cc: Hannes Reinecke Cc: Daniel Wagner Signed-off-by: Jens Axboe --- block/blk-mq-tag.c | 27 +++++++++++++++++++++++++++ block/blk-mq-tag.h | 8 ++++++++ block/blk-mq.c | 29 ----------------------------- block/blk-mq.h | 1 - 4 files changed, 35 insertions(+), 30 deletions(-) diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c index 96a39d0724a29..cded7fdcad8ef 100644 --- a/block/blk-mq-tag.c +++ b/block/blk-mq-tag.c @@ -191,6 +191,33 @@ found_tag: return tag + tag_offset; } +bool __blk_mq_get_driver_tag(struct request *rq) +{ + struct sbitmap_queue *bt = &rq->mq_hctx->tags->bitmap_tags; + unsigned int tag_offset = rq->mq_hctx->tags->nr_reserved_tags; + bool shared = blk_mq_tag_busy(rq->mq_hctx); + int tag; + + if (blk_mq_tag_is_reserved(rq->mq_hctx->sched_tags, rq->internal_tag)) { + bt = &rq->mq_hctx->tags->breserved_tags; + tag_offset = 0; + } + + if (!hctx_may_queue(rq->mq_hctx, bt)) + return false; + tag = __sbitmap_queue_get(bt); + if (tag == BLK_MQ_NO_TAG) + return false; + + rq->tag = tag + tag_offset; + if (shared) { + rq->rq_flags |= RQF_MQ_INFLIGHT; + atomic_inc(&rq->mq_hctx->nr_active); + } + rq->mq_hctx->tags->rqs[rq->tag] = rq; + return true; +} + void blk_mq_put_tag(struct blk_mq_tags *tags, struct blk_mq_ctx *ctx, unsigned int tag) { diff --git a/block/blk-mq-tag.h b/block/blk-mq-tag.h index d38e48f2a0a4a..2e4ef51cdb32a 100644 --- a/block/blk-mq-tag.h +++ b/block/blk-mq-tag.h @@ -51,6 +51,14 @@ enum { BLK_MQ_TAG_MAX = BLK_MQ_NO_TAG - 1, }; +bool __blk_mq_get_driver_tag(struct request *rq); +static inline bool blk_mq_get_driver_tag(struct request *rq) +{ + if (rq->tag != BLK_MQ_NO_TAG) + return true; + return __blk_mq_get_driver_tag(rq); +} + extern bool __blk_mq_tag_busy(struct blk_mq_hw_ctx *); extern void __blk_mq_tag_idle(struct blk_mq_hw_ctx *); diff --git a/block/blk-mq.c b/block/blk-mq.c index 9a36ac1c1fa1d..4f57d27bfa737 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -1052,35 +1052,6 @@ static inline unsigned int queued_to_index(unsigned int queued) return min(BLK_MQ_MAX_DISPATCH_ORDER - 1, ilog2(queued) + 1); } -bool blk_mq_get_driver_tag(struct request *rq) -{ - struct blk_mq_alloc_data data = { - .q = rq->q, - .hctx = rq->mq_hctx, - .flags = BLK_MQ_REQ_NOWAIT, - .cmd_flags = rq->cmd_flags, - }; - bool shared; - - if (rq->tag != BLK_MQ_NO_TAG) - return true; - - if (blk_mq_tag_is_reserved(data.hctx->sched_tags, rq->internal_tag)) - data.flags |= BLK_MQ_REQ_RESERVED; - - shared = blk_mq_tag_busy(data.hctx); - rq->tag = blk_mq_get_tag(&data); - if (rq->tag >= 0) { - if (shared) { - rq->rq_flags |= RQF_MQ_INFLIGHT; - atomic_inc(&data.hctx->nr_active); - } - data.hctx->tags->rqs[rq->tag] = rq; - } - - return rq->tag != BLK_MQ_NO_TAG; -} - static int blk_mq_dispatch_wake(wait_queue_entry_t *wait, unsigned mode, int flags, void *key) { diff --git a/block/blk-mq.h b/block/blk-mq.h index a139b06318174..b3ce0f3a2ad2a 100644 --- a/block/blk-mq.h +++ b/block/blk-mq.h @@ -44,7 +44,6 @@ bool blk_mq_dispatch_rq_list(struct request_queue *, struct list_head *, bool); void blk_mq_add_to_requeue_list(struct request *rq, bool at_head, bool kick_requeue_list); void blk_mq_flush_busy_ctxs(struct blk_mq_hw_ctx *hctx, struct list_head *list); -bool blk_mq_get_driver_tag(struct request *rq); struct request *blk_mq_dequeue_from_ctx(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *start); -- GitLab From 22f614bc0f376e7a1bcf1a2cd912885f4a933045 Mon Sep 17 00:00:00 2001 From: Ming Lei Date: Fri, 5 Jun 2020 19:44:10 +0800 Subject: [PATCH 1190/1971] blk-mq: fix blk_mq_all_tag_iter blk_mq_all_tag_iter() is added to iterate all requests, so we should fetch the request from ->static_rqs][] instead of ->rqs[] which is for holding in-flight request only. Fix it by adding flag of BT_TAG_ITER_STATIC_RQS. Fixes: bf0beec0607d ("blk-mq: drain I/O when all CPUs in a hctx are offline") Signed-off-by: Ming Lei Tested-by: John Garry Cc: Dongli Zhang Cc: Hannes Reinecke Cc: Daniel Wagner Cc: Christoph Hellwig Signed-off-by: Jens Axboe --- block/blk-mq-tag.c | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c index cded7fdcad8ef..44f3d0967cb47 100644 --- a/block/blk-mq-tag.c +++ b/block/blk-mq-tag.c @@ -296,6 +296,7 @@ struct bt_tags_iter_data { #define BT_TAG_ITER_RESERVED (1 << 0) #define BT_TAG_ITER_STARTED (1 << 1) +#define BT_TAG_ITER_STATIC_RQS (1 << 2) static bool bt_tags_iter(struct sbitmap *bitmap, unsigned int bitnr, void *data) { @@ -309,9 +310,12 @@ static bool bt_tags_iter(struct sbitmap *bitmap, unsigned int bitnr, void *data) /* * We can hit rq == NULL here, because the tagging functions - * test and set the bit before assining ->rqs[]. + * test and set the bit before assigning ->rqs[]. */ - rq = tags->rqs[bitnr]; + if (iter_data->flags & BT_TAG_ITER_STATIC_RQS) + rq = tags->static_rqs[bitnr]; + else + rq = tags->rqs[bitnr]; if (!rq) return true; if ((iter_data->flags & BT_TAG_ITER_STARTED) && @@ -366,11 +370,13 @@ static void __blk_mq_all_tag_iter(struct blk_mq_tags *tags, * indicates whether or not @rq is a reserved request. Return * true to continue iterating tags, false to stop. * @priv: Will be passed as second argument to @fn. + * + * Caller has to pass the tag map from which requests are allocated. */ void blk_mq_all_tag_iter(struct blk_mq_tags *tags, busy_tag_iter_fn *fn, void *priv) { - return __blk_mq_all_tag_iter(tags, fn, priv, 0); + return __blk_mq_all_tag_iter(tags, fn, priv, BT_TAG_ITER_STATIC_RQS); } /** -- GitLab From cdb555397f438592bab00599037c347b700cf397 Mon Sep 17 00:00:00 2001 From: Max Staudt Date: Sun, 7 Jun 2020 20:28:12 +0200 Subject: [PATCH 1191/1971] i2c: icy: Fix build with CONFIG_AMIGA_PCMCIA=n This has been found by the Kernel Test Robot: http://lkml.iu.edu/hypermail/linux/kernel/2006.0/06862.html With CONFIG_AMIGA_PCMCIA=n, io_mm.h does not pull in amigahw.h and ZTWO_VADDR is undefined. Add forgotten include to i2c-icy.c Fixes: 4768e90ecaec ("i2c: Add i2c-icy for I2C on m68k/Amiga") Reported-by: kernel test robot Signed-off-by: Max Staudt Signed-off-by: Wolfram Sang --- drivers/i2c/busses/i2c-icy.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/i2c/busses/i2c-icy.c b/drivers/i2c/busses/i2c-icy.c index 271470f4d8a9b..66c9923fc7665 100644 --- a/drivers/i2c/busses/i2c-icy.c +++ b/drivers/i2c/busses/i2c-icy.c @@ -43,6 +43,7 @@ #include #include +#include #include #include -- GitLab From ba5f9fa0ca85a6137fa35efd3a1256d8bb6bc5ff Mon Sep 17 00:00:00 2001 From: Dong Aisheng Date: Wed, 3 Jun 2020 13:15:42 +0800 Subject: [PATCH 1192/1971] mailbox: imx: Add context save/restore for suspend/resume For "mem" mode suspend on i.MX8 SoCs, MU settings could be lost because its power is off, so save/restore is needed for MU settings during suspend/resume. However, the restore can ONLY be done when MU settings are actually lost, for the scenario of settings NOT lost in "freeze" mode suspend, since there could be still IPC going on multiple CPUs, restoring the MU settings could overwrite the TIE by mistake and cause system freeze, so need to make sure ONLY restore the MU settings when it is powered off, Anson fixes this by checking whether restore is actually needed when resume. Signed-off-by: Dong Aisheng Signed-off-by: Anson Huang Signed-off-by: Jassi Brar --- drivers/mailbox/imx-mailbox.c | 35 +++++++++++++++++++++++++++++++++++ 1 file changed, 35 insertions(+) diff --git a/drivers/mailbox/imx-mailbox.c b/drivers/mailbox/imx-mailbox.c index bd69ecfb93522..da90a8e1636d5 100644 --- a/drivers/mailbox/imx-mailbox.c +++ b/drivers/mailbox/imx-mailbox.c @@ -67,6 +67,8 @@ struct imx_mu_priv { struct clk *clk; int irq; + u32 xcr; + bool side_b; }; @@ -589,12 +591,45 @@ static const struct of_device_id imx_mu_dt_ids[] = { }; MODULE_DEVICE_TABLE(of, imx_mu_dt_ids); +static int imx_mu_suspend_noirq(struct device *dev) +{ + struct imx_mu_priv *priv = dev_get_drvdata(dev); + + priv->xcr = imx_mu_read(priv, priv->dcfg->xCR); + + return 0; +} + +static int imx_mu_resume_noirq(struct device *dev) +{ + struct imx_mu_priv *priv = dev_get_drvdata(dev); + + /* + * ONLY restore MU when context lost, the TIE could + * be set during noirq resume as there is MU data + * communication going on, and restore the saved + * value will overwrite the TIE and cause MU data + * send failed, may lead to system freeze. This issue + * is observed by testing freeze mode suspend. + */ + if (!imx_mu_read(priv, priv->dcfg->xCR)) + imx_mu_write(priv, priv->xcr, priv->dcfg->xCR); + + return 0; +} + +static const struct dev_pm_ops imx_mu_pm_ops = { + SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(imx_mu_suspend_noirq, + imx_mu_resume_noirq) +}; + static struct platform_driver imx_mu_driver = { .probe = imx_mu_probe, .remove = imx_mu_remove, .driver = { .name = "imx_mu", .of_match_table = imx_mu_dt_ids, + .pm = &imx_mu_pm_ops, }, }; module_platform_driver(imx_mu_driver); -- GitLab From bb2b2624dbe27246bd1ee087f2d50f80daa6622c Mon Sep 17 00:00:00 2001 From: Anson Huang Date: Wed, 3 Jun 2020 13:15:43 +0800 Subject: [PATCH 1193/1971] mailbox: imx: Add runtime PM callback to handle MU clocks Some of i.MX8M SoCs have MU clock, they need to be managed in runtime to make sure the MU clock can be off in runtime, add runtime PM callback to handle MU clock. And on i.MX8MP, the MU clock is combined with power domain and runtime PM is enabled for the clock driver, during noirq suspend/resume phase, runtime PM is disabled by device suspend, but the MU context save/restore needs to enable MU clock for register access, calling clock prepare/enable will trigger runtime resume failure and lead to system suspend failed. Actually, the MU context save/restore is ONLY necessary for SCU IPC MU, other MUs especially on i.MX8MP platforms which have MU clock assigned, they need to runtime request/free mailbox channel in the consumer driver, so no need to save/restore MU context for them, hence it can avoid this issue, so the MU context save/restore is ONLY applied to i.MX platforms MU instance without clock present. Signed-off-by: Anson Huang Signed-off-by: Jassi Brar --- drivers/mailbox/imx-mailbox.c | 32 +++++++++++++++++++++++++++++--- 1 file changed, 29 insertions(+), 3 deletions(-) diff --git a/drivers/mailbox/imx-mailbox.c b/drivers/mailbox/imx-mailbox.c index da90a8e1636d5..080b60849e48d 100644 --- a/drivers/mailbox/imx-mailbox.c +++ b/drivers/mailbox/imx-mailbox.c @@ -536,10 +536,13 @@ static int imx_mu_probe(struct platform_device *pdev) if (ret < 0) goto disable_runtime_pm; + clk_disable_unprepare(priv->clk); + return 0; disable_runtime_pm: pm_runtime_disable(dev); + clk_disable_unprepare(priv->clk); return ret; } @@ -547,7 +550,6 @@ static int imx_mu_remove(struct platform_device *pdev) { struct imx_mu_priv *priv = platform_get_drvdata(pdev); - clk_disable_unprepare(priv->clk); pm_runtime_disable(priv->dev); return 0; @@ -595,7 +597,8 @@ static int imx_mu_suspend_noirq(struct device *dev) { struct imx_mu_priv *priv = dev_get_drvdata(dev); - priv->xcr = imx_mu_read(priv, priv->dcfg->xCR); + if (!priv->clk) + priv->xcr = imx_mu_read(priv, priv->dcfg->xCR); return 0; } @@ -612,15 +615,38 @@ static int imx_mu_resume_noirq(struct device *dev) * send failed, may lead to system freeze. This issue * is observed by testing freeze mode suspend. */ - if (!imx_mu_read(priv, priv->dcfg->xCR)) + if (!imx_mu_read(priv, priv->dcfg->xCR) && !priv->clk) imx_mu_write(priv, priv->xcr, priv->dcfg->xCR); return 0; } +static int imx_mu_runtime_suspend(struct device *dev) +{ + struct imx_mu_priv *priv = dev_get_drvdata(dev); + + clk_disable_unprepare(priv->clk); + + return 0; +} + +static int imx_mu_runtime_resume(struct device *dev) +{ + struct imx_mu_priv *priv = dev_get_drvdata(dev); + int ret; + + ret = clk_prepare_enable(priv->clk); + if (ret) + dev_err(dev, "failed to enable clock\n"); + + return ret; +} + static const struct dev_pm_ops imx_mu_pm_ops = { SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(imx_mu_suspend_noirq, imx_mu_resume_noirq) + SET_RUNTIME_PM_OPS(imx_mu_runtime_suspend, + imx_mu_runtime_resume, NULL) }; static struct platform_driver imx_mu_driver = { -- GitLab From b7b2796b9b31e3dcbd563bcf8f983a79bb81163d Mon Sep 17 00:00:00 2001 From: Anson Huang Date: Wed, 3 Jun 2020 13:15:44 +0800 Subject: [PATCH 1194/1971] mailbox: imx: ONLY IPC MU needs IRQF_NO_SUSPEND flag IPC MU has no power domain assigned and there could be IPC during noirq suspend phase, so IRQF_NO_SUSPEND flag is needed for IPC MU. However, for other MUs, they have power domain assigned and their power will be turned off during noirq suspend phase, but with IRQF_NO_SUSPEND set, their interrupts are NOT disabled even after their power turned off, it will cause system crash when mailbox driver trys to handle pending interrupts but the MU power is already turned off. So, IRQF_NO_SUSPEND flag should ONLY be added to IPC MU which has power domain managed by SCU, then all other MUs' pending interrupts after noirq suspend phase will be handled after system resume. Signed-off-by: Anson Huang Signed-off-by: Jassi Brar --- drivers/mailbox/imx-mailbox.c | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/drivers/mailbox/imx-mailbox.c b/drivers/mailbox/imx-mailbox.c index 080b60849e48d..7205b825c8b54 100644 --- a/drivers/mailbox/imx-mailbox.c +++ b/drivers/mailbox/imx-mailbox.c @@ -292,6 +292,7 @@ static int imx_mu_startup(struct mbox_chan *chan) { struct imx_mu_priv *priv = to_imx_mu_priv(chan->mbox); struct imx_mu_con_priv *cp = chan->con_priv; + unsigned long irq_flag = IRQF_SHARED; int ret; pm_runtime_get_sync(priv->dev); @@ -302,8 +303,12 @@ static int imx_mu_startup(struct mbox_chan *chan) return 0; } - ret = request_irq(priv->irq, imx_mu_isr, IRQF_SHARED | - IRQF_NO_SUSPEND, cp->irq_desc, chan); + /* IPC MU should be with IRQF_NO_SUSPEND set */ + if (!priv->dev->pm_domain) + irq_flag |= IRQF_NO_SUSPEND; + + ret = request_irq(priv->irq, imx_mu_isr, irq_flag, + cp->irq_desc, chan); if (ret) { dev_err(priv->dev, "Unable to acquire IRQ %d\n", priv->irq); -- GitLab From 0c5086f5699906ec8e31ea6509239489f060f2dc Mon Sep 17 00:00:00 2001 From: Kai-Heng Feng Date: Mon, 8 Jun 2020 14:26:28 +0800 Subject: [PATCH 1195/1971] ALSA: usb-audio: Add vendor, product and profile name for HP Thunderbolt Dock The HP Thunderbolt Dock has two separate USB devices, one is for speaker and one is for headset. Add names for them so userspace can apply UCM settings. Signed-off-by: Kai-Heng Feng Cc: Link: https://lore.kernel.org/r/20200608062630.10806-1-kai.heng.feng@canonical.com Signed-off-by: Takashi Iwai --- sound/usb/quirks-table.h | 20 ++++++++++++++++++++ 1 file changed, 20 insertions(+) diff --git a/sound/usb/quirks-table.h b/sound/usb/quirks-table.h index 6d6492195bdc7..90d65bfa733dc 100644 --- a/sound/usb/quirks-table.h +++ b/sound/usb/quirks-table.h @@ -40,6 +40,26 @@ .ifnum = QUIRK_NO_INTERFACE \ } +/* HP Thunderbolt Dock Audio Headset */ +{ + USB_DEVICE(0x03f0, 0x0269), + .driver_info = (unsigned long) &(const struct snd_usb_audio_quirk) { + .vendor_name = "HP", + .product_name = "Thunderbolt Dock Audio Headset", + .profile_name = "HP-Thunderbolt-Dock-Audio-Headset", + .ifnum = QUIRK_NO_INTERFACE + } +}, +/* HP Thunderbolt Dock Audio Module */ +{ + USB_DEVICE(0x03f0, 0x0567), + .driver_info = (unsigned long) &(const struct snd_usb_audio_quirk) { + .vendor_name = "HP", + .product_name = "Thunderbolt Dock Audio Module", + .profile_name = "HP-Thunderbolt-Dock-Audio-Module", + .ifnum = QUIRK_NO_INTERFACE + } +}, /* FTDI devices */ { USB_DEVICE(0x0403, 0xb8d8), -- GitLab From e4b0e41fee948ad327f9c912160aa4b50a6a4340 Mon Sep 17 00:00:00 2001 From: Takashi Iwai Date: Mon, 8 Jun 2020 09:15:13 +0200 Subject: [PATCH 1196/1971] ALSA: usb-audio: Use the new macro for HP Dock rename quirks Replace the open-code with the new QUIRK_DEVICE_PROFILE() macro for simplicity. Fixes: 0c5086f56999 ("ALSA: usb-audio: Add vendor, product and profile name for HP Thunderbolt Dock") Link: https://lore.kernel.org/r/20200608071513.570-1-tiwai@suse.de Signed-off-by: Takashi Iwai --- sound/usb/quirks-table.h | 16 ++++------------ 1 file changed, 4 insertions(+), 12 deletions(-) diff --git a/sound/usb/quirks-table.h b/sound/usb/quirks-table.h index 90d65bfa733dc..4ec491011b19c 100644 --- a/sound/usb/quirks-table.h +++ b/sound/usb/quirks-table.h @@ -43,22 +43,14 @@ /* HP Thunderbolt Dock Audio Headset */ { USB_DEVICE(0x03f0, 0x0269), - .driver_info = (unsigned long) &(const struct snd_usb_audio_quirk) { - .vendor_name = "HP", - .product_name = "Thunderbolt Dock Audio Headset", - .profile_name = "HP-Thunderbolt-Dock-Audio-Headset", - .ifnum = QUIRK_NO_INTERFACE - } + QUIRK_DEVICE_PROFILE("HP", "Thunderbolt Dock Audio Headset", + "HP-Thunderbolt-Dock-Audio-Headset"), }, /* HP Thunderbolt Dock Audio Module */ { USB_DEVICE(0x03f0, 0x0567), - .driver_info = (unsigned long) &(const struct snd_usb_audio_quirk) { - .vendor_name = "HP", - .product_name = "Thunderbolt Dock Audio Module", - .profile_name = "HP-Thunderbolt-Dock-Audio-Module", - .ifnum = QUIRK_NO_INTERFACE - } + QUIRK_DEVICE_PROFILE("HP", "Thunderbolt Dock Audio Module", + "HP-Thunderbolt-Dock-Audio-Module"), }, /* FTDI devices */ { -- GitLab From 2068cf7dfbc69c4097c95af3a0bd943ced155a76 Mon Sep 17 00:00:00 2001 From: youngjun Date: Sun, 7 Jun 2020 02:04:06 -0700 Subject: [PATCH 1197/1971] ovl: remove unnecessary lock check Directory is always locked until "out_unlock" label. So lock check is not needed. Signed-off-by: youngjun Signed-off-by: Miklos Szeredi --- fs/overlayfs/super.c | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) diff --git a/fs/overlayfs/super.c b/fs/overlayfs/super.c index 8d8cd46e14827..91476bc422f96 100644 --- a/fs/overlayfs/super.c +++ b/fs/overlayfs/super.c @@ -676,11 +676,8 @@ static struct dentry *ovl_workdir_create(struct ovl_fs *ofs, struct dentry *work; int err; bool retried = false; - bool locked = false; inode_lock_nested(dir, I_MUTEX_PARENT); - locked = true; - retry: work = lookup_one_len(name, ofs->workbasedir, strlen(name)); @@ -741,9 +738,7 @@ retry: goto out_err; } out_unlock: - if (locked) - inode_unlock(dir); - + inode_unlock(dir); return work; out_dput: -- GitLab From 327cdb98fb3eed8373777465759c44f8d123c227 Mon Sep 17 00:00:00 2001 From: Flavio Suligoi Date: Fri, 5 Jun 2020 17:41:04 +0200 Subject: [PATCH 1198/1971] doc: networking: wireless: fix wiki website url In the files: - regulatory.rst - mac80211-injection.rst the wiki url is still the old "wireless.kernel.org" instead of the new "wireless.wiki.kernel.org" Signed-off-by: Flavio Suligoi Link: https://lore.kernel.org/r/20200605154112.16277-2-f.suligoi@asem.it Signed-off-by: Johannes Berg --- Documentation/networking/mac80211-injection.rst | 2 +- Documentation/networking/regulatory.rst | 6 +++--- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/Documentation/networking/mac80211-injection.rst b/Documentation/networking/mac80211-injection.rst index be65f886ff1fe..63ba6611fdff8 100644 --- a/Documentation/networking/mac80211-injection.rst +++ b/Documentation/networking/mac80211-injection.rst @@ -101,6 +101,6 @@ interface), along the following lines::: You can also find a link to a complete inject application here: -http://wireless.kernel.org/en/users/Documentation/packetspammer +https://wireless.wiki.kernel.org/en/users/Documentation/packetspammer Andy Green diff --git a/Documentation/networking/regulatory.rst b/Documentation/networking/regulatory.rst index 8701b91e81ee1..16782a95b74aa 100644 --- a/Documentation/networking/regulatory.rst +++ b/Documentation/networking/regulatory.rst @@ -9,7 +9,7 @@ regulatory infrastructure works. More up to date information can be obtained at the project's web page: -http://wireless.kernel.org/en/developers/Regulatory +https://wireless.wiki.kernel.org/en/developers/Regulatory Keeping regulatory domains in userspace --------------------------------------- @@ -37,7 +37,7 @@ expected regulatory domains will be respected by the kernel. A currently available userspace agent which can accomplish this is CRDA - central regulatory domain agent. Its documented here: -http://wireless.kernel.org/en/developers/Regulatory/CRDA +https://wireless.wiki.kernel.org/en/developers/Regulatory/CRDA Essentially the kernel will send a udev event when it knows it needs a new regulatory domain. A udev rule can be put in place @@ -58,7 +58,7 @@ Who asks for regulatory domains? Users can use iw: -http://wireless.kernel.org/en/users/Documentation/iw +https://wireless.wiki.kernel.org/en/users/Documentation/iw An example:: -- GitLab From 97eda66421c44f1449e8d087fd05eab5d466afb7 Mon Sep 17 00:00:00 2001 From: Flavio Suligoi Date: Fri, 5 Jun 2020 17:41:11 +0200 Subject: [PATCH 1199/1971] include: fix wiki website url in netlink interface header The wiki url is still the old "wireless.kernel.org" instead of the new "wireless.wiki.kernel.org" Signed-off-by: Flavio Suligoi Link: https://lore.kernel.org/r/20200605154112.16277-9-f.suligoi@asem.it Signed-off-by: Johannes Berg --- include/uapi/linux/nl80211.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/include/uapi/linux/nl80211.h b/include/uapi/linux/nl80211.h index dad8c8f8581f8..4e6339ab1fcea 100644 --- a/include/uapi/linux/nl80211.h +++ b/include/uapi/linux/nl80211.h @@ -794,7 +794,7 @@ * various triggers. These triggers can be configured through this * command with the %NL80211_ATTR_WOWLAN_TRIGGERS attribute. For * more background information, see - * http://wireless.kernel.org/en/users/Documentation/WoWLAN. + * https://wireless.wiki.kernel.org/en/users/Documentation/WoWLAN. * The @NL80211_CMD_SET_WOWLAN command can also be used as a notification * from the driver reporting the wakeup reason. In this case, the * @NL80211_ATTR_WOWLAN_TRIGGERS attribute will contain the reason -- GitLab From 59d4bfc1e2c09435d91c980b03f7b72ce6e9f24e Mon Sep 17 00:00:00 2001 From: Flavio Suligoi Date: Fri, 5 Jun 2020 17:41:12 +0200 Subject: [PATCH 1200/1971] net: fix wiki website url mac80211 and wireless files In the files: - net/mac80211/rx.c - net/wireless/Kconfig the wiki url is still the old "wireless.kernel.org" instead of the new "wireless.wiki.kernel.org" Signed-off-by: Flavio Suligoi Link: https://lore.kernel.org/r/20200605154112.16277-10-f.suligoi@asem.it Signed-off-by: Johannes Berg --- net/mac80211/rx.c | 2 +- net/wireless/Kconfig | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c index 21854a61a2b74..a88ab6fb16f20 100644 --- a/net/mac80211/rx.c +++ b/net/mac80211/rx.c @@ -4694,7 +4694,7 @@ void ieee80211_rx_napi(struct ieee80211_hw *hw, struct ieee80211_sta *pubsta, * rate_idx is MCS index, which can be [0-76] * as documented on: * - * http://wireless.kernel.org/en/developers/Documentation/ieee80211/802.11n + * https://wireless.wiki.kernel.org/en/developers/Documentation/ieee80211/802.11n * * Anything else would be some sort of driver or * hardware error. The driver should catch hardware diff --git a/net/wireless/Kconfig b/net/wireless/Kconfig index 813e93644ae7b..d695584870411 100644 --- a/net/wireless/Kconfig +++ b/net/wireless/Kconfig @@ -31,7 +31,7 @@ config CFG80211 For more information refer to documentation on the wireless wiki: - http://wireless.kernel.org/en/developers/Documentation/cfg80211 + https://wireless.wiki.kernel.org/en/developers/Documentation/cfg80211 When built as a module it will be called cfg80211. -- GitLab From b3fb6de7c6019c5d8495c3a115d42a0f118f631c Mon Sep 17 00:00:00 2001 From: "Michael S. Tsirkin" Date: Mon, 8 Jun 2020 01:43:22 -0400 Subject: [PATCH 1201/1971] virtio-mem: drop unnecessary initialization rc is initialized to -ENIVAL but that's never used. Drop it. Fixes: 5f1f79bbc9e2 ("virtio-mem: Paravirtualized memory hotplug") Reported-by: kernel test robot Signed-off-by: Michael S. Tsirkin Acked-by: David Hildenbrand --- drivers/virtio/virtio_mem.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/virtio/virtio_mem.c b/drivers/virtio/virtio_mem.c index f658fe9149beb..2f357142ea5e8 100644 --- a/drivers/virtio/virtio_mem.c +++ b/drivers/virtio/virtio_mem.c @@ -1768,7 +1768,7 @@ static void virtio_mem_delete_resource(struct virtio_mem *vm) static int virtio_mem_probe(struct virtio_device *vdev) { struct virtio_mem *vm; - int rc = -EINVAL; + int rc; BUILD_BUG_ON(sizeof(struct virtio_mem_req) != 24); BUILD_BUG_ON(sizeof(struct virtio_mem_resp) != 10); -- GitLab From d7466a5adbd61d79610981903eec19aaf8ac935d Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Thu, 4 Jun 2020 11:37:30 +0100 Subject: [PATCH 1202/1971] drm/i915/gem: Mark the buffer pool as active for the cmdparser If the execbuf is interrupted after building the cmdparser pipeline, and before we commit to submitting the request to HW, we would attempt to clean up the cmdparser early. While we held active references to the vma being parsed and constructed, we did not hold an active reference for the buffer pool itself. The result was that an interrupted execbuf could still have run the cmdparser pipeline, but since the buffer pool was idle, its target vma could have been recycled. Note this problem only occurs if the cmdparser is running async due to pipelined waits on busy fences, and the execbuf is interrupted. Fixes: 686c7c35abc2 ("drm/i915/gem: Asynchronous cmdparser") Fixes: 16e87459673a ("drm/i915/gt: Move the batch buffer pool from the engine to the gt") Signed-off-by: Chris Wilson Reviewed-by: Matthew Auld Link: https://patchwork.freedesktop.org/patch/msgid/20200604103751.18816-1-chris@chris-wilson.co.uk (cherry picked from commit 57a78ca4eceab1ecb0299fba8a10211289329889) Signed-off-by: Joonas Lahtinen --- .../gpu/drm/i915/gem/i915_gem_execbuffer.c | 56 ++++++++++++++++--- 1 file changed, 48 insertions(+), 8 deletions(-) diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c index c0d59d48e1986..1d646f5190705 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c @@ -1988,6 +1988,38 @@ static const struct dma_fence_work_ops eb_parse_ops = { .release = __eb_parse_release, }; +static inline int +__parser_mark_active(struct i915_vma *vma, + struct intel_timeline *tl, + struct dma_fence *fence) +{ + struct intel_gt_buffer_pool_node *node = vma->private; + + return i915_active_ref(&node->active, tl, fence); +} + +static int +parser_mark_active(struct eb_parse_work *pw, struct intel_timeline *tl) +{ + int err; + + mutex_lock(&tl->mutex); + + err = __parser_mark_active(pw->shadow, tl, &pw->base.dma); + if (err) + goto unlock; + + if (pw->trampoline) { + err = __parser_mark_active(pw->trampoline, tl, &pw->base.dma); + if (err) + goto unlock; + } + +unlock: + mutex_unlock(&tl->mutex); + return err; +} + static int eb_parse_pipeline(struct i915_execbuffer *eb, struct i915_vma *shadow, struct i915_vma *trampoline) @@ -2022,20 +2054,25 @@ static int eb_parse_pipeline(struct i915_execbuffer *eb, pw->shadow = shadow; pw->trampoline = trampoline; + /* Mark active refs early for this worker, in case we get interrupted */ + err = parser_mark_active(pw, eb->context->timeline); + if (err) + goto err_commit; + err = dma_resv_lock_interruptible(pw->batch->resv, NULL); if (err) - goto err_trampoline; + goto err_commit; err = dma_resv_reserve_shared(pw->batch->resv, 1); if (err) - goto err_batch_unlock; + goto err_commit_unlock; /* Wait for all writes (and relocs) into the batch to complete */ err = i915_sw_fence_await_reservation(&pw->base.chain, pw->batch->resv, NULL, false, 0, I915_FENCE_GFP); if (err < 0) - goto err_batch_unlock; + goto err_commit_unlock; /* Keep the batch alive and unwritten as we parse */ dma_resv_add_shared_fence(pw->batch->resv, &pw->base.dma); @@ -2050,11 +2087,13 @@ static int eb_parse_pipeline(struct i915_execbuffer *eb, dma_fence_work_commit_imm(&pw->base); return 0; -err_batch_unlock: +err_commit_unlock: dma_resv_unlock(pw->batch->resv); -err_trampoline: - if (trampoline) - i915_active_release(&trampoline->active); +err_commit: + i915_sw_fence_set_error_once(&pw->base.chain, err); + dma_fence_work_commit_imm(&pw->base); + return err; + err_shadow: i915_active_release(&shadow->active); err_batch: @@ -2100,6 +2139,7 @@ static int eb_parse(struct i915_execbuffer *eb) goto err; } i915_gem_object_set_readonly(shadow->obj); + shadow->private = pool; trampoline = NULL; if (CMDPARSER_USES_GGTT(eb->i915)) { @@ -2113,6 +2153,7 @@ static int eb_parse(struct i915_execbuffer *eb) shadow = trampoline; goto err_shadow; } + shadow->private = pool; eb->batch_flags |= I915_DISPATCH_SECURE; } @@ -2129,7 +2170,6 @@ static int eb_parse(struct i915_execbuffer *eb) eb->trampoline = trampoline; eb->batch_start_offset = 0; - shadow->private = pool; return 0; err_trampoline: -- GitLab From 4da1ced1e57c885b714255b130e17845234669ff Mon Sep 17 00:00:00 2001 From: Jani Nikula Date: Tue, 2 Jun 2020 18:11:26 +0300 Subject: [PATCH 1203/1971] drm/i915/params: fix i915.reset module param type The reset member in i915_params was previously changed to unsigned, but this failed to change the actual module parameter. Fixes: aae970d8454b ("drm/i915: Mark i915.reset as unsigned") Cc: Chris Wilson Cc: Mika Kuoppala Reviewed-by: Chris Wilson Signed-off-by: Jani Nikula Link: https://patchwork.freedesktop.org/patch/msgid/20200602151126.25626-1-jani.nikula@intel.com (cherry picked from commit 34becfdb945a5eb819b7c8e4f0ec5cc5952ec68f) Signed-off-by: Joonas Lahtinen --- drivers/gpu/drm/i915/i915_params.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/i915/i915_params.c b/drivers/gpu/drm/i915/i915_params.c index a3dde770226de..02559da61e6ea 100644 --- a/drivers/gpu/drm/i915/i915_params.c +++ b/drivers/gpu/drm/i915/i915_params.c @@ -65,7 +65,7 @@ i915_param_named_unsafe(vbt_sdvo_panel_type, int, 0400, "Override/Ignore selection of SDVO panel mode in the VBT " "(-2=ignore, -1=auto [default], index in VBT BIOS table)"); -i915_param_named_unsafe(reset, int, 0600, +i915_param_named_unsafe(reset, uint, 0600, "Attempt GPU resets (0=disabled, 1=full gpu reset, 2=engine reset [default])"); i915_param_named_unsafe(vbt_firmware, charp, 0400, -- GitLab From 956ad9d98b73f59e442cc119c98ba1e04e94fe6d Mon Sep 17 00:00:00 2001 From: "Rafael J. Wysocki" Date: Thu, 4 Jun 2020 19:22:26 +0200 Subject: [PATCH 1204/1971] ACPI: PM: Avoid using power resources if there are none for D0 As recently reported, some platforms provide a list of power resources for device power state D3hot, through the _PR3 object, but they do not provide a list of power resources for device power state D0. Among other things, this causes acpi_device_get_power() to return D3hot as the current state of the device in question if all of the D3hot power resources are "on", because it sees the power_resources flag set and calls acpi_power_get_inferred_state() which finds that D3hot is the shallowest power state with all of the associated power resources turned "on", so that's what it returns. Moreover, that value takes precedence over the acpi_dev_pm_explicit_get() return value, because it means a deeper power state. The device may very well be in D0 physically at that point, however. Moreover, the presence of _PR3 without _PR0 for a given device means that only one D3-level power state can be supported by it. Namely, because there are no power resources to turn "off" when transitioning the device from D0 into D3cold (which should be supported since _PR3 is present), the evaluation of _PS3 should be sufficient to put it straight into D3cold, but this means that the effect of turning "on" the _PR3 power resources is unclear, so it is better to avoid doing that altogether. Consequently, there is no practical way do distinguish D3cold from D3hot for the device in question and the power states of it can be labeled so that D3hot is the deepest supported one (and Linux assumes that putting a device into D3hot via ACPI may cause power to be removed from it anyway, for legacy reasons). To work around the problem described above modify the ACPI enumeration of devices so that power resources are only used for device power management if the list of D0 power resources is not empty and make it mart D3cold as supported only if that is the case and the D3hot list of power resources is not empty too. Fixes: ef85bdbec444 ("ACPI / scan: Consolidate extraction of power resources lists") Link: https://bugzilla.kernel.org/show_bug.cgi?id=205057 Link: https://lore.kernel.org/linux-acpi/20200603194659.185757-1-hdegoede@redhat.com/ Reported-by: Hans de Goede Tested-by: Hans de Goede Tested-by: youling257@gmail.com Cc: 3.10+ # 3.10+ Signed-off-by: Rafael J. Wysocki Reviewed-by: Hans de Goede --- drivers/acpi/device_pm.c | 2 +- drivers/acpi/scan.c | 28 +++++++++++++++++++--------- 2 files changed, 20 insertions(+), 10 deletions(-) diff --git a/drivers/acpi/device_pm.c b/drivers/acpi/device_pm.c index 5832bc10aca8d..95e200b618bd2 100644 --- a/drivers/acpi/device_pm.c +++ b/drivers/acpi/device_pm.c @@ -186,7 +186,7 @@ int acpi_device_set_power(struct acpi_device *device, int state) * possibly drop references to the power resources in use. */ state = ACPI_STATE_D3_HOT; - /* If _PR3 is not available, use D3hot as the target state. */ + /* If D3cold is not supported, use D3hot as the target state. */ if (!device->power.states[ACPI_STATE_D3_COLD].flags.valid) target_state = state; } else if (!device->power.states[state].flags.valid) { diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c index 6d34488953822..1b255e98de4d0 100644 --- a/drivers/acpi/scan.c +++ b/drivers/acpi/scan.c @@ -919,12 +919,9 @@ static void acpi_bus_init_power_state(struct acpi_device *device, int state) if (buffer.length && package && package->type == ACPI_TYPE_PACKAGE - && package->package.count) { - int err = acpi_extract_power_resources(package, 0, - &ps->resources); - if (!err) - device->power.flags.power_resources = 1; - } + && package->package.count) + acpi_extract_power_resources(package, 0, &ps->resources); + ACPI_FREE(buffer.pointer); } @@ -971,14 +968,27 @@ static void acpi_bus_get_power_flags(struct acpi_device *device) acpi_bus_init_power_state(device, i); INIT_LIST_HEAD(&device->power.states[ACPI_STATE_D3_COLD].resources); - if (!list_empty(&device->power.states[ACPI_STATE_D3_HOT].resources)) - device->power.states[ACPI_STATE_D3_COLD].flags.valid = 1; - /* Set defaults for D0 and D3hot states (always valid) */ + /* Set the defaults for D0 and D3hot (always supported). */ device->power.states[ACPI_STATE_D0].flags.valid = 1; device->power.states[ACPI_STATE_D0].power = 100; device->power.states[ACPI_STATE_D3_HOT].flags.valid = 1; + /* + * Use power resources only if the D0 list of them is populated, because + * some platforms may provide _PR3 only to indicate D3cold support and + * in those cases the power resources list returned by it may be bogus. + */ + if (!list_empty(&device->power.states[ACPI_STATE_D0].resources)) { + device->power.flags.power_resources = 1; + /* + * D3cold is supported if the D3hot list of power resources is + * not empty. + */ + if (!list_empty(&device->power.states[ACPI_STATE_D3_HOT].resources)) + device->power.states[ACPI_STATE_D3_COLD].flags.valid = 1; + } + if (acpi_bus_init_power(device)) device->flags.power_manageable = 0; } -- GitLab From 7a35e515a7055f483f87d12041c41db11b36c9ee Mon Sep 17 00:00:00 2001 From: Vitaly Kuznetsov Date: Fri, 5 Jun 2020 13:59:05 +0200 Subject: [PATCH 1205/1971] KVM: VMX: Properly handle kvm_read/write_guest_virt*() result Syzbot reports the following issue: WARNING: CPU: 0 PID: 6819 at arch/x86/kvm/x86.c:618 kvm_inject_emulated_page_fault+0x210/0x290 arch/x86/kvm/x86.c:618 ... Call Trace: ... RIP: 0010:kvm_inject_emulated_page_fault+0x210/0x290 arch/x86/kvm/x86.c:618 ... nested_vmx_get_vmptr+0x1f9/0x2a0 arch/x86/kvm/vmx/nested.c:4638 handle_vmon arch/x86/kvm/vmx/nested.c:4767 [inline] handle_vmon+0x168/0x3a0 arch/x86/kvm/vmx/nested.c:4728 vmx_handle_exit+0x29c/0x1260 arch/x86/kvm/vmx/vmx.c:6067 'exception' we're trying to inject with kvm_inject_emulated_page_fault() comes from: nested_vmx_get_vmptr() kvm_read_guest_virt() kvm_read_guest_virt_helper() vcpu->arch.walk_mmu->gva_to_gpa() but it is only set when GVA to GPA conversion fails. In case it doesn't but we still fail kvm_vcpu_read_guest_page(), X86EMUL_IO_NEEDED is returned and nested_vmx_get_vmptr() calls kvm_inject_emulated_page_fault() with zeroed 'exception'. This happen when the argument is MMIO. Paolo also noticed that nested_vmx_get_vmptr() is not the only place in KVM code where kvm_read/write_guest_virt*() return result is mishandled. VMX instructions along with INVPCID have the same issue. This was already noticed before, e.g. see commit 541ab2aeb282 ("KVM: x86: work around leak of uninitialized stack contents") but was never fully fixed. KVM could've handled the request correctly by going to userspace and performing I/O but there doesn't seem to be a good need for such requests in the first place. Introduce vmx_handle_memory_failure() as an interim solution. Note, nested_vmx_get_vmptr() now has three possible outcomes: OK, PF, KVM_EXIT_INTERNAL_ERROR and callers need to know if userspace exit is needed (for KVM_EXIT_INTERNAL_ERROR) in case of failure. We don't seem to have a good enum describing this tristate, just add "int *ret" to nested_vmx_get_vmptr() interface to pass the information. Reported-by: syzbot+2a7156e11dc199bdbd8a@syzkaller.appspotmail.com Suggested-by: Sean Christopherson Signed-off-by: Vitaly Kuznetsov Message-Id: <20200605115906.532682-1-vkuznets@redhat.com> Signed-off-by: Paolo Bonzini --- arch/x86/kvm/vmx/nested.c | 78 +++++++++++++++++++++------------------ arch/x86/kvm/vmx/vmx.c | 34 +++++++++++++++-- arch/x86/kvm/vmx/vmx.h | 2 + 3 files changed, 74 insertions(+), 40 deletions(-) diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c index 9c74a732b08d8..bcb50724be381 100644 --- a/arch/x86/kvm/vmx/nested.c +++ b/arch/x86/kvm/vmx/nested.c @@ -4624,19 +4624,24 @@ void nested_vmx_pmu_entry_exit_ctls_update(struct kvm_vcpu *vcpu) } } -static int nested_vmx_get_vmptr(struct kvm_vcpu *vcpu, gpa_t *vmpointer) +static int nested_vmx_get_vmptr(struct kvm_vcpu *vcpu, gpa_t *vmpointer, + int *ret) { gva_t gva; struct x86_exception e; + int r; if (get_vmx_mem_address(vcpu, vmx_get_exit_qual(vcpu), vmcs_read32(VMX_INSTRUCTION_INFO), false, - sizeof(*vmpointer), &gva)) - return 1; + sizeof(*vmpointer), &gva)) { + *ret = 1; + return -EINVAL; + } - if (kvm_read_guest_virt(vcpu, gva, vmpointer, sizeof(*vmpointer), &e)) { - kvm_inject_emulated_page_fault(vcpu, &e); - return 1; + r = kvm_read_guest_virt(vcpu, gva, vmpointer, sizeof(*vmpointer), &e); + if (r != X86EMUL_CONTINUE) { + *ret = vmx_handle_memory_failure(vcpu, r, &e); + return -EINVAL; } return 0; @@ -4764,8 +4769,8 @@ static int handle_vmon(struct kvm_vcpu *vcpu) return 1; } - if (nested_vmx_get_vmptr(vcpu, &vmptr)) - return 1; + if (nested_vmx_get_vmptr(vcpu, &vmptr, &ret)) + return ret; /* * SDM 3: 24.11.5 @@ -4838,12 +4843,13 @@ static int handle_vmclear(struct kvm_vcpu *vcpu) u32 zero = 0; gpa_t vmptr; u64 evmcs_gpa; + int r; if (!nested_vmx_check_permission(vcpu)) return 1; - if (nested_vmx_get_vmptr(vcpu, &vmptr)) - return 1; + if (nested_vmx_get_vmptr(vcpu, &vmptr, &r)) + return r; if (!page_address_valid(vcpu, vmptr)) return nested_vmx_failValid(vcpu, @@ -4902,7 +4908,7 @@ static int handle_vmread(struct kvm_vcpu *vcpu) u64 value; gva_t gva = 0; short offset; - int len; + int len, r; if (!nested_vmx_check_permission(vcpu)) return 1; @@ -4943,10 +4949,9 @@ static int handle_vmread(struct kvm_vcpu *vcpu) instr_info, true, len, &gva)) return 1; /* _system ok, nested_vmx_check_permission has verified cpl=0 */ - if (kvm_write_guest_virt_system(vcpu, gva, &value, len, &e)) { - kvm_inject_emulated_page_fault(vcpu, &e); - return 1; - } + r = kvm_write_guest_virt_system(vcpu, gva, &value, len, &e); + if (r != X86EMUL_CONTINUE) + return vmx_handle_memory_failure(vcpu, r, &e); } return nested_vmx_succeed(vcpu); @@ -4987,7 +4992,7 @@ static int handle_vmwrite(struct kvm_vcpu *vcpu) unsigned long field; short offset; gva_t gva; - int len; + int len, r; /* * The value to write might be 32 or 64 bits, depending on L1's long @@ -5017,10 +5022,9 @@ static int handle_vmwrite(struct kvm_vcpu *vcpu) if (get_vmx_mem_address(vcpu, exit_qualification, instr_info, false, len, &gva)) return 1; - if (kvm_read_guest_virt(vcpu, gva, &value, len, &e)) { - kvm_inject_emulated_page_fault(vcpu, &e); - return 1; - } + r = kvm_read_guest_virt(vcpu, gva, &value, len, &e); + if (r != X86EMUL_CONTINUE) + return vmx_handle_memory_failure(vcpu, r, &e); } field = kvm_register_readl(vcpu, (((instr_info) >> 28) & 0xf)); @@ -5103,12 +5107,13 @@ static int handle_vmptrld(struct kvm_vcpu *vcpu) { struct vcpu_vmx *vmx = to_vmx(vcpu); gpa_t vmptr; + int r; if (!nested_vmx_check_permission(vcpu)) return 1; - if (nested_vmx_get_vmptr(vcpu, &vmptr)) - return 1; + if (nested_vmx_get_vmptr(vcpu, &vmptr, &r)) + return r; if (!page_address_valid(vcpu, vmptr)) return nested_vmx_failValid(vcpu, @@ -5170,6 +5175,7 @@ static int handle_vmptrst(struct kvm_vcpu *vcpu) gpa_t current_vmptr = to_vmx(vcpu)->nested.current_vmptr; struct x86_exception e; gva_t gva; + int r; if (!nested_vmx_check_permission(vcpu)) return 1; @@ -5181,11 +5187,11 @@ static int handle_vmptrst(struct kvm_vcpu *vcpu) true, sizeof(gpa_t), &gva)) return 1; /* *_system ok, nested_vmx_check_permission has verified cpl=0 */ - if (kvm_write_guest_virt_system(vcpu, gva, (void *)¤t_vmptr, - sizeof(gpa_t), &e)) { - kvm_inject_emulated_page_fault(vcpu, &e); - return 1; - } + r = kvm_write_guest_virt_system(vcpu, gva, (void *)¤t_vmptr, + sizeof(gpa_t), &e); + if (r != X86EMUL_CONTINUE) + return vmx_handle_memory_failure(vcpu, r, &e); + return nested_vmx_succeed(vcpu); } @@ -5209,7 +5215,7 @@ static int handle_invept(struct kvm_vcpu *vcpu) struct { u64 eptp, gpa; } operand; - int i; + int i, r; if (!(vmx->nested.msrs.secondary_ctls_high & SECONDARY_EXEC_ENABLE_EPT) || @@ -5236,10 +5242,9 @@ static int handle_invept(struct kvm_vcpu *vcpu) if (get_vmx_mem_address(vcpu, vmx_get_exit_qual(vcpu), vmx_instruction_info, false, sizeof(operand), &gva)) return 1; - if (kvm_read_guest_virt(vcpu, gva, &operand, sizeof(operand), &e)) { - kvm_inject_emulated_page_fault(vcpu, &e); - return 1; - } + r = kvm_read_guest_virt(vcpu, gva, &operand, sizeof(operand), &e); + if (r != X86EMUL_CONTINUE) + return vmx_handle_memory_failure(vcpu, r, &e); /* * Nested EPT roots are always held through guest_mmu, @@ -5291,6 +5296,7 @@ static int handle_invvpid(struct kvm_vcpu *vcpu) u64 gla; } operand; u16 vpid02; + int r; if (!(vmx->nested.msrs.secondary_ctls_high & SECONDARY_EXEC_ENABLE_VPID) || @@ -5318,10 +5324,10 @@ static int handle_invvpid(struct kvm_vcpu *vcpu) if (get_vmx_mem_address(vcpu, vmx_get_exit_qual(vcpu), vmx_instruction_info, false, sizeof(operand), &gva)) return 1; - if (kvm_read_guest_virt(vcpu, gva, &operand, sizeof(operand), &e)) { - kvm_inject_emulated_page_fault(vcpu, &e); - return 1; - } + r = kvm_read_guest_virt(vcpu, gva, &operand, sizeof(operand), &e); + if (r != X86EMUL_CONTINUE) + return vmx_handle_memory_failure(vcpu, r, &e); + if (operand.vpid >> 16) return nested_vmx_failValid(vcpu, VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID); diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index 0e4f7ff54e6a4..08e26a9518c2d 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -1600,6 +1600,32 @@ static int skip_emulated_instruction(struct kvm_vcpu *vcpu) return 1; } +/* + * Handles kvm_read/write_guest_virt*() result and either injects #PF or returns + * KVM_EXIT_INTERNAL_ERROR for cases not currently handled by KVM. Return value + * indicates whether exit to userspace is needed. + */ +int vmx_handle_memory_failure(struct kvm_vcpu *vcpu, int r, + struct x86_exception *e) +{ + if (r == X86EMUL_PROPAGATE_FAULT) { + kvm_inject_emulated_page_fault(vcpu, e); + return 1; + } + + /* + * In case kvm_read/write_guest_virt*() failed with X86EMUL_IO_NEEDED + * while handling a VMX instruction KVM could've handled the request + * correctly by exiting to userspace and performing I/O but there + * doesn't seem to be a real use-case behind such requests, just return + * KVM_EXIT_INTERNAL_ERROR for now. + */ + vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; + vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION; + vcpu->run->internal.ndata = 0; + + return 0; +} /* * Recognizes a pending MTF VM-exit and records the nested state for later @@ -5486,6 +5512,7 @@ static int handle_invpcid(struct kvm_vcpu *vcpu) u64 pcid; u64 gla; } operand; + int r; if (!guest_cpuid_has(vcpu, X86_FEATURE_INVPCID)) { kvm_queue_exception(vcpu, UD_VECTOR); @@ -5508,10 +5535,9 @@ static int handle_invpcid(struct kvm_vcpu *vcpu) sizeof(operand), &gva)) return 1; - if (kvm_read_guest_virt(vcpu, gva, &operand, sizeof(operand), &e)) { - kvm_inject_emulated_page_fault(vcpu, &e); - return 1; - } + r = kvm_read_guest_virt(vcpu, gva, &operand, sizeof(operand), &e); + if (r != X86EMUL_CONTINUE) + return vmx_handle_memory_failure(vcpu, r, &e); if (operand.pcid >> 12 != 0) { kvm_inject_gp(vcpu, 0); diff --git a/arch/x86/kvm/vmx/vmx.h b/arch/x86/kvm/vmx/vmx.h index 672c28f17e497..8a83b5edc8204 100644 --- a/arch/x86/kvm/vmx/vmx.h +++ b/arch/x86/kvm/vmx/vmx.h @@ -355,6 +355,8 @@ struct shared_msr_entry *find_msr_entry(struct vcpu_vmx *vmx, u32 msr); void pt_update_intercept_for_msr(struct vcpu_vmx *vmx); void vmx_update_host_rsp(struct vcpu_vmx *vmx, unsigned long host_rsp); int vmx_find_msr_index(struct vmx_msrs *m, u32 msr); +int vmx_handle_memory_failure(struct kvm_vcpu *vcpu, int r, + struct x86_exception *e); #define POSTED_INTR_ON 0 #define POSTED_INTR_SN 1 -- GitLab From 25597f64c2f6fffa367d1e6ff4f62b9a751f9051 Mon Sep 17 00:00:00 2001 From: Vitaly Kuznetsov Date: Fri, 5 Jun 2020 13:59:06 +0200 Subject: [PATCH 1206/1971] Revert "KVM: x86: work around leak of uninitialized stack contents" handle_vmptrst()/handle_vmread() stopped injecting #PF unconditionally and switched to nested_vmx_handle_memory_failure() which just kills the guest with KVM_EXIT_INTERNAL_ERROR in case of MMIO access, zeroing 'exception' in kvm_write_guest_virt_system() is not needed anymore. This reverts commit 541ab2aeb28251bf7135c7961f3a6080eebcc705. Signed-off-by: Vitaly Kuznetsov Message-Id: <20200605115906.532682-2-vkuznets@redhat.com> Signed-off-by: Paolo Bonzini --- arch/x86/kvm/x86.c | 7 ------- 1 file changed, 7 deletions(-) diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 13512baf6f6e3..c26dd1363151e 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -5647,13 +5647,6 @@ int kvm_write_guest_virt_system(struct kvm_vcpu *vcpu, gva_t addr, void *val, /* kvm_write_guest_virt_system can pull in tons of pages. */ vcpu->arch.l1tf_flush_l1d = true; - /* - * FIXME: this should call handle_emulation_failure if X86EMUL_IO_NEEDED - * is returned, but our callers are not ready for that and they blindly - * call kvm_inject_page_fault. Ensure that they at least do not leak - * uninitialized kernel stack memory into cr2 and error code. - */ - memset(exception, 0, sizeof(*exception)); return kvm_write_guest_virt_helper(addr, val, bytes, vcpu, PFERR_WRITE_MASK, exception); } -- GitLab From 5ae1452f5ff3084a6a273b9980499620eae20f7e Mon Sep 17 00:00:00 2001 From: Vitaly Kuznetsov Date: Mon, 8 Jun 2020 13:23:45 +0200 Subject: [PATCH 1207/1971] KVM: selftests: Add x86_64/debug_regs to .gitignore Add x86_64/debug_regs to .gitignore. Reported-by: Marcelo Bandeira Condotta Fixes: 449aa906e67e ("KVM: selftests: Add KVM_SET_GUEST_DEBUG test") Signed-off-by: Vitaly Kuznetsov Message-Id: <20200608112346.593513-1-vkuznets@redhat.com> Signed-off-by: Paolo Bonzini --- tools/testing/selftests/kvm/.gitignore | 1 + 1 file changed, 1 insertion(+) diff --git a/tools/testing/selftests/kvm/.gitignore b/tools/testing/selftests/kvm/.gitignore index f159718f90c0a..4527871527484 100644 --- a/tools/testing/selftests/kvm/.gitignore +++ b/tools/testing/selftests/kvm/.gitignore @@ -3,6 +3,7 @@ /s390x/resets /s390x/sync_regs_test /x86_64/cr4_cpuid_sync_test +/x86_64/debug_regs /x86_64/evmcs_test /x86_64/hyperv_cpuid /x86_64/mmio_warning_test -- GitLab From 75ad6e800217220b8ee63904fb472fd5c3819259 Mon Sep 17 00:00:00 2001 From: Vitaly Kuznetsov Date: Mon, 8 Jun 2020 13:23:46 +0200 Subject: [PATCH 1208/1971] KVM: selftests: fix vmx_preemption_timer_test build with GCC10 GCC10 fails to build vmx_preemption_timer_test: gcc -Wall -Wstrict-prototypes -Wuninitialized -O2 -g -std=gnu99 -fno-stack-protector -fno-PIE -I../../../../tools/include -I../../../../tools/arch/x86/include -I../../../../usr/include/ -Iinclude -Ix86_64 -Iinclude/x86_64 -I.. -pthread -no-pie x86_64/evmcs_test.c ./linux/tools/testing/selftests/kselftest_harness.h ./linux/tools/testing/selftests/kselftest.h ./linux/tools/testing/selftests/kvm/libkvm.a -o ./linux/tools/testing/selftests/kvm/x86_64/evmcs_test /usr/bin/ld: ./linux/tools/testing/selftests/kvm/libkvm.a(vmx.o): ./linux/tools/testing/selftests/kvm/include/x86_64/vmx.h:603: multiple definition of `ctrl_exit_rev'; /tmp/ccMQpvNt.o: ./linux/tools/testing/selftests/kvm/include/x86_64/vmx.h:603: first defined here /usr/bin/ld: ./linux/tools/testing/selftests/kvm/libkvm.a(vmx.o): ./linux/tools/testing/selftests/kvm/include/x86_64/vmx.h:602: multiple definition of `ctrl_pin_rev'; /tmp/ccMQpvNt.o: ./linux/tools/testing/selftests/kvm/include/x86_64/vmx.h:602: first defined here ... ctrl_exit_rev/ctrl_pin_rev/basic variables are only used in vmx_preemption_timer_test.c, just move them there. Fixes: 8d7fbf01f9af ("KVM: selftests: VMX preemption timer migration test") Reported-by: Marcelo Bandeira Condotta Signed-off-by: Vitaly Kuznetsov Message-Id: <20200608112346.593513-2-vkuznets@redhat.com> Signed-off-by: Paolo Bonzini --- tools/testing/selftests/kvm/include/x86_64/vmx.h | 4 ---- .../testing/selftests/kvm/x86_64/vmx_preemption_timer_test.c | 4 ++++ 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/tools/testing/selftests/kvm/include/x86_64/vmx.h b/tools/testing/selftests/kvm/include/x86_64/vmx.h index ccff3e6e27048..766af9944294d 100644 --- a/tools/testing/selftests/kvm/include/x86_64/vmx.h +++ b/tools/testing/selftests/kvm/include/x86_64/vmx.h @@ -598,10 +598,6 @@ union vmx_ctrl_msr { }; }; -union vmx_basic basic; -union vmx_ctrl_msr ctrl_pin_rev; -union vmx_ctrl_msr ctrl_exit_rev; - struct vmx_pages *vcpu_alloc_vmx(struct kvm_vm *vm, vm_vaddr_t *p_vmx_gva); bool prepare_for_vmx_operation(struct vmx_pages *vmx); void prepare_vmcs(struct vmx_pages *vmx, void *guest_rip, void *guest_rsp); diff --git a/tools/testing/selftests/kvm/x86_64/vmx_preemption_timer_test.c b/tools/testing/selftests/kvm/x86_64/vmx_preemption_timer_test.c index cc72b6188ca79..a7737af1224fe 100644 --- a/tools/testing/selftests/kvm/x86_64/vmx_preemption_timer_test.c +++ b/tools/testing/selftests/kvm/x86_64/vmx_preemption_timer_test.c @@ -31,6 +31,10 @@ bool l2_save_restore_done; static u64 l2_vmx_pt_start; volatile u64 l2_vmx_pt_finish; +union vmx_basic basic; +union vmx_ctrl_msr ctrl_pin_rev; +union vmx_ctrl_msr ctrl_exit_rev; + void l2_guest_code(void) { u64 vmx_pt_delta; -- GitLab From fb7333dfd812062d3d51f377e70c1d3a3788472b Mon Sep 17 00:00:00 2001 From: Paolo Bonzini Date: Mon, 8 Jun 2020 07:11:47 -0400 Subject: [PATCH 1209/1971] KVM: SVM: fix calls to is_intercept is_intercept takes an INTERCEPT_* constant, not SVM_EXIT_*; because of this, the compiler was removing the body of the conditionals, as if is_intercept returned 0. This unveils a latent bug: when clearing the VINTR intercept, int_ctl must also be changed in the L1 VMCB (svm->nested.hsave), just like the intercept itself is also changed in the L1 VMCB. Otherwise V_IRQ remains set and, due to the VINTR intercept being clear, we get a spurious injection of a vector 0 interrupt on the next L2->L1 vmexit. Reported-by: Qian Cai Signed-off-by: Paolo Bonzini --- arch/x86/kvm/svm/nested.c | 2 +- arch/x86/kvm/svm/svm.c | 4 +++- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/arch/x86/kvm/svm/nested.c b/arch/x86/kvm/svm/nested.c index 8a6db11dcb437..6bceafb191084 100644 --- a/arch/x86/kvm/svm/nested.c +++ b/arch/x86/kvm/svm/nested.c @@ -258,7 +258,7 @@ void sync_nested_vmcb_control(struct vcpu_svm *svm) /* Only a few fields of int_ctl are written by the processor. */ mask = V_IRQ_MASK | V_TPR_MASK; if (!(svm->nested.ctl.int_ctl & V_INTR_MASKING_MASK) && - is_intercept(svm, SVM_EXIT_VINTR)) { + is_intercept(svm, INTERCEPT_VINTR)) { /* * In order to request an interrupt window, L0 is usurping * svm->vmcb->control.int_ctl and possibly setting V_IRQ diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c index 9e333b91ff782..c8f5e87615d54 100644 --- a/arch/x86/kvm/svm/svm.c +++ b/arch/x86/kvm/svm/svm.c @@ -1378,6 +1378,8 @@ static void svm_clear_vintr(struct vcpu_svm *svm) /* Drop int_ctl fields related to VINTR injection. */ svm->vmcb->control.int_ctl &= mask; if (is_guest_mode(&svm->vcpu)) { + svm->nested.hsave->control.int_ctl &= mask; + WARN_ON((svm->vmcb->control.int_ctl & V_TPR_MASK) != (svm->nested.ctl.int_ctl & V_TPR_MASK)); svm->vmcb->control.int_ctl |= svm->nested.ctl.int_ctl & ~mask; @@ -1999,7 +2001,7 @@ void svm_set_gif(struct vcpu_svm *svm, bool value) */ if (vgif_enabled(svm)) clr_intercept(svm, INTERCEPT_STGI); - if (is_intercept(svm, SVM_EXIT_VINTR)) + if (is_intercept(svm, INTERCEPT_VINTR)) svm_clear_vintr(svm); enable_gif(svm); -- GitLab From e18035cf5cb3d2bf8e4f4d350a23608bd208b934 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Micha=C5=82=20Miros=C5=82aw?= Date: Mon, 8 Jun 2020 12:06:32 +0200 Subject: [PATCH 1210/1971] ALSA: pcm: fix snd_pcm_link() lockdep splat MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Add and use snd_pcm_stream_lock_nested() in snd_pcm_link/unlink implementation. The code is fine, but generates a lockdep complaint: ============================================ WARNING: possible recursive locking detected 5.7.1mq+ #381 Tainted: G O -------------------------------------------- pulseaudio/4180 is trying to acquire lock: ffff888402d6f508 (&group->lock){-...}-{2:2}, at: snd_pcm_common_ioctl+0xda8/0xee0 [snd_pcm] but task is already holding lock: ffff8883f7a8cf18 (&group->lock){-...}-{2:2}, at: snd_pcm_common_ioctl+0xe4e/0xee0 [snd_pcm] other info that might help us debug this: Possible unsafe locking scenario: CPU0 ---- lock(&group->lock); lock(&group->lock); *** DEADLOCK *** May be due to missing lock nesting notation 2 locks held by pulseaudio/4180: #0: ffffffffa1a05190 (snd_pcm_link_rwsem){++++}-{3:3}, at: snd_pcm_common_ioctl+0xca0/0xee0 [snd_pcm] #1: ffff8883f7a8cf18 (&group->lock){-...}-{2:2}, at: snd_pcm_common_ioctl+0xe4e/0xee0 [snd_pcm] [...] Cc: stable@vger.kernel.org Fixes: f57f3df03a8e ("ALSA: pcm: More fine-grained PCM link locking") Signed-off-by: Michał Mirosław Link: https://lore.kernel.org/r/37252c65941e58473b1219ca9fab03d48f47e3e3.1591610330.git.mirq-linux@rere.qmqm.pl Signed-off-by: Takashi Iwai --- sound/core/pcm_native.c | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c index aef8602562783..c08732998a425 100644 --- a/sound/core/pcm_native.c +++ b/sound/core/pcm_native.c @@ -138,6 +138,16 @@ void snd_pcm_stream_lock_irq(struct snd_pcm_substream *substream) } EXPORT_SYMBOL_GPL(snd_pcm_stream_lock_irq); +static void snd_pcm_stream_lock_nested(struct snd_pcm_substream *substream) +{ + struct snd_pcm_group *group = &substream->self_group; + + if (substream->pcm->nonatomic) + mutex_lock_nested(&group->mutex, SINGLE_DEPTH_NESTING); + else + spin_lock_nested(&group->lock, SINGLE_DEPTH_NESTING); +} + /** * snd_pcm_stream_unlock_irq - Unlock the PCM stream * @substream: PCM substream @@ -2194,7 +2204,7 @@ static int snd_pcm_link(struct snd_pcm_substream *substream, int fd) snd_pcm_stream_unlock_irq(substream); snd_pcm_group_lock_irq(target_group, nonatomic); - snd_pcm_stream_lock(substream1); + snd_pcm_stream_lock_nested(substream1); snd_pcm_group_assign(substream1, target_group); refcount_inc(&target_group->refs); snd_pcm_stream_unlock(substream1); @@ -2210,7 +2220,7 @@ static int snd_pcm_link(struct snd_pcm_substream *substream, int fd) static void relink_to_local(struct snd_pcm_substream *substream) { - snd_pcm_stream_lock(substream); + snd_pcm_stream_lock_nested(substream); snd_pcm_group_assign(substream, &substream->self_group); snd_pcm_stream_unlock(substream); } -- GitLab From 573fcbfd319ccef26caa3700320242accea7fd5c Mon Sep 17 00:00:00 2001 From: Hui Wang Date: Mon, 8 Jun 2020 19:55:41 +0800 Subject: [PATCH 1211/1971] ALSA: hda/realtek - add a pintbl quirk for several Lenovo machines A couple of Lenovo ThinkCentre machines all have 2 front mics and they use the same codec alc623 and have the same pin config, so add a pintbl entry for those machines to apply the fixup ALC283_FIXUP_HEADSET_MIC. Cc: Signed-off-by: Hui Wang Link: https://lore.kernel.org/r/20200608115541.9531-1-hui.wang@canonical.com Signed-off-by: Takashi Iwai --- sound/pci/hda/patch_realtek.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c index 0aa778ff7f2b2..6d73f8beadb6e 100644 --- a/sound/pci/hda/patch_realtek.c +++ b/sound/pci/hda/patch_realtek.c @@ -8161,6 +8161,12 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = { ALC225_STANDARD_PINS, {0x12, 0xb7a60130}, {0x17, 0x90170110}), + SND_HDA_PIN_QUIRK(0x10ec0623, 0x17aa, "Lenovo", ALC283_FIXUP_HEADSET_MIC, + {0x14, 0x01014010}, + {0x17, 0x90170120}, + {0x18, 0x02a11030}, + {0x19, 0x02a1103f}, + {0x21, 0x0221101f}), {} }; -- GitLab From e649b3f0188f8fd34dd0dde8d43fd3312b902fb2 Mon Sep 17 00:00:00 2001 From: Eiichi Tsukata Date: Sat, 6 Jun 2020 13:26:27 +0900 Subject: [PATCH 1212/1971] KVM: x86: Fix APIC page invalidation race Commit b1394e745b94 ("KVM: x86: fix APIC page invalidation") tried to fix inappropriate APIC page invalidation by re-introducing arch specific kvm_arch_mmu_notifier_invalidate_range() and calling it from kvm_mmu_notifier_invalidate_range_start. However, the patch left a possible race where the VMCS APIC address cache is updated *before* it is unmapped: (Invalidator) kvm_mmu_notifier_invalidate_range_start() (Invalidator) kvm_make_all_cpus_request(kvm, KVM_REQ_APIC_PAGE_RELOAD) (KVM VCPU) vcpu_enter_guest() (KVM VCPU) kvm_vcpu_reload_apic_access_page() (Invalidator) actually unmap page Because of the above race, there can be a mismatch between the host physical address stored in the APIC_ACCESS_PAGE VMCS field and the host physical address stored in the EPT entry for the APIC GPA (0xfee0000). When this happens, the processor will not trap APIC accesses, and will instead show the raw contents of the APIC-access page. Because Windows OS periodically checks for unexpected modifications to the LAPIC register, this will show up as a BSOD crash with BugCheck CRITICAL_STRUCTURE_CORRUPTION (109) we are currently seeing in https://bugzilla.redhat.com/show_bug.cgi?id=1751017. The root cause of the issue is that kvm_arch_mmu_notifier_invalidate_range() cannot guarantee that no additional references are taken to the pages in the range before kvm_mmu_notifier_invalidate_range_end(). Fortunately, this case is supported by the MMU notifier API, as documented in include/linux/mmu_notifier.h: * If the subsystem * can't guarantee that no additional references are taken to * the pages in the range, it has to implement the * invalidate_range() notifier to remove any references taken * after invalidate_range_start(). The fix therefore is to reload the APIC-access page field in the VMCS from kvm_mmu_notifier_invalidate_range() instead of ..._range_start(). Cc: stable@vger.kernel.org Fixes: b1394e745b94 ("KVM: x86: fix APIC page invalidation") Fixes: https://bugzilla.kernel.org/show_bug.cgi?id=197951 Signed-off-by: Eiichi Tsukata Message-Id: <20200606042627.61070-1-eiichi.tsukata@nutanix.com> Signed-off-by: Paolo Bonzini --- arch/x86/kvm/x86.c | 7 ++----- include/linux/kvm_host.h | 4 ++-- virt/kvm/kvm_main.c | 26 ++++++++++++++++---------- 3 files changed, 20 insertions(+), 17 deletions(-) diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index c26dd1363151e..24de847af52eb 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -8270,9 +8270,8 @@ static void vcpu_load_eoi_exitmap(struct kvm_vcpu *vcpu) kvm_x86_ops.load_eoi_exitmap(vcpu, eoi_exit_bitmap); } -int kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm, - unsigned long start, unsigned long end, - bool blockable) +void kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm, + unsigned long start, unsigned long end) { unsigned long apic_address; @@ -8283,8 +8282,6 @@ int kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm, apic_address = gfn_to_hva(kvm, APIC_DEFAULT_PHYS_BASE >> PAGE_SHIFT); if (start <= apic_address && apic_address < end) kvm_make_all_cpus_request(kvm, KVM_REQ_APIC_PAGE_RELOAD); - - return 0; } void kvm_vcpu_reload_apic_access_page(struct kvm_vcpu *vcpu) diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index d38d6b9c24be0..e2f82131bb3e9 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -1420,8 +1420,8 @@ static inline long kvm_arch_vcpu_async_ioctl(struct file *filp, } #endif /* CONFIG_HAVE_KVM_VCPU_ASYNC_IOCTL */ -int kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm, - unsigned long start, unsigned long end, bool blockable); +void kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm, + unsigned long start, unsigned long end); #ifdef CONFIG_HAVE_KVM_VCPU_RUN_PID_CHANGE int kvm_arch_vcpu_run_pid_change(struct kvm_vcpu *vcpu); diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 4db151f6101ed..7b6013f2ba195 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -155,10 +155,9 @@ static void kvm_uevent_notify_change(unsigned int type, struct kvm *kvm); static unsigned long long kvm_createvm_count; static unsigned long long kvm_active_vms; -__weak int kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm, - unsigned long start, unsigned long end, bool blockable) +__weak void kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm, + unsigned long start, unsigned long end) { - return 0; } bool kvm_is_zone_device_pfn(kvm_pfn_t pfn) @@ -384,6 +383,18 @@ static inline struct kvm *mmu_notifier_to_kvm(struct mmu_notifier *mn) return container_of(mn, struct kvm, mmu_notifier); } +static void kvm_mmu_notifier_invalidate_range(struct mmu_notifier *mn, + struct mm_struct *mm, + unsigned long start, unsigned long end) +{ + struct kvm *kvm = mmu_notifier_to_kvm(mn); + int idx; + + idx = srcu_read_lock(&kvm->srcu); + kvm_arch_mmu_notifier_invalidate_range(kvm, start, end); + srcu_read_unlock(&kvm->srcu, idx); +} + static void kvm_mmu_notifier_change_pte(struct mmu_notifier *mn, struct mm_struct *mm, unsigned long address, @@ -408,7 +419,6 @@ static int kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn, { struct kvm *kvm = mmu_notifier_to_kvm(mn); int need_tlb_flush = 0, idx; - int ret; idx = srcu_read_lock(&kvm->srcu); spin_lock(&kvm->mmu_lock); @@ -425,14 +435,9 @@ static int kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn, kvm_flush_remote_tlbs(kvm); spin_unlock(&kvm->mmu_lock); - - ret = kvm_arch_mmu_notifier_invalidate_range(kvm, range->start, - range->end, - mmu_notifier_range_blockable(range)); - srcu_read_unlock(&kvm->srcu, idx); - return ret; + return 0; } static void kvm_mmu_notifier_invalidate_range_end(struct mmu_notifier *mn, @@ -538,6 +543,7 @@ static void kvm_mmu_notifier_release(struct mmu_notifier *mn, } static const struct mmu_notifier_ops kvm_mmu_notifier_ops = { + .invalidate_range = kvm_mmu_notifier_invalidate_range, .invalidate_range_start = kvm_mmu_notifier_invalidate_range_start, .invalidate_range_end = kvm_mmu_notifier_invalidate_range_end, .clear_flush_young = kvm_mmu_notifier_clear_flush_young, -- GitLab From 1f2436229bf64ac040f2f5018df059c21fc5526a Mon Sep 17 00:00:00 2001 From: Andrii Nakryiko Date: Sun, 7 Jun 2020 17:36:15 -0700 Subject: [PATCH 1213/1971] selftests/bpf: Fix ringbuf selftest sample counting undeterminism Fix test race, in which background poll can get either 5 or 6 samples, depending on timing of notification. Prevent this by open-coding sample triggering and forcing notification for the very last sample only. Also switch to using atomic increments and exchanges for more obviously reliable counting and checking. Additionally, check expected processed sample counters for single-threaded use cases as well. Fixes: 9a5f25ad30e5 ("selftests/bpf: Fix sample_cnt shared between two threads") Signed-off-by: Andrii Nakryiko Signed-off-by: Daniel Borkmann Link: https://lore.kernel.org/bpf/20200608003615.3549991-1-andriin@fb.com --- .../selftests/bpf/prog_tests/ringbuf.c | 42 +++++++++++++++---- 1 file changed, 35 insertions(+), 7 deletions(-) diff --git a/tools/testing/selftests/bpf/prog_tests/ringbuf.c b/tools/testing/selftests/bpf/prog_tests/ringbuf.c index 2bba908dfa637..c1650548433c4 100644 --- a/tools/testing/selftests/bpf/prog_tests/ringbuf.c +++ b/tools/testing/selftests/bpf/prog_tests/ringbuf.c @@ -25,13 +25,23 @@ struct sample { char comm[16]; }; -static volatile int sample_cnt; +static int sample_cnt; + +static void atomic_inc(int *cnt) +{ + __atomic_add_fetch(cnt, 1, __ATOMIC_SEQ_CST); +} + +static int atomic_xchg(int *cnt, int val) +{ + return __atomic_exchange_n(cnt, val, __ATOMIC_SEQ_CST); +} static int process_sample(void *ctx, void *data, size_t len) { struct sample *s = data; - sample_cnt++; + atomic_inc(&sample_cnt); switch (s->seq) { case 0: @@ -76,7 +86,7 @@ void test_ringbuf(void) const size_t rec_sz = BPF_RINGBUF_HDR_SZ + sizeof(struct sample); pthread_t thread; long bg_ret = -1; - int err; + int err, cnt; skel = test_ringbuf__open_and_load(); if (CHECK(!skel, "skel_open_load", "skeleton open&load failed\n")) @@ -116,11 +126,15 @@ void test_ringbuf(void) /* -EDONE is used as an indicator that we are done */ if (CHECK(err != -EDONE, "err_done", "done err: %d\n", err)) goto cleanup; + cnt = atomic_xchg(&sample_cnt, 0); + CHECK(cnt != 2, "cnt", "exp %d samples, got %d\n", 2, cnt); /* we expect extra polling to return nothing */ err = ring_buffer__poll(ringbuf, 0); if (CHECK(err != 0, "extra_samples", "poll result: %d\n", err)) goto cleanup; + cnt = atomic_xchg(&sample_cnt, 0); + CHECK(cnt != 0, "cnt", "exp %d samples, got %d\n", 0, cnt); CHECK(skel->bss->dropped != 0, "err_dropped", "exp %ld, got %ld\n", 0L, skel->bss->dropped); @@ -136,6 +150,8 @@ void test_ringbuf(void) 3L * rec_sz, skel->bss->cons_pos); err = ring_buffer__poll(ringbuf, -1); CHECK(err <= 0, "poll_err", "err %d\n", err); + cnt = atomic_xchg(&sample_cnt, 0); + CHECK(cnt != 2, "cnt", "exp %d samples, got %d\n", 2, cnt); /* start poll in background w/ long timeout */ err = pthread_create(&thread, NULL, poll_thread, (void *)(long)10000); @@ -164,6 +180,8 @@ void test_ringbuf(void) 2L, skel->bss->total); CHECK(skel->bss->discarded != 1, "err_discarded", "exp %ld, got %ld\n", 1L, skel->bss->discarded); + cnt = atomic_xchg(&sample_cnt, 0); + CHECK(cnt != 0, "cnt", "exp %d samples, got %d\n", 0, cnt); /* clear flags to return to "adaptive" notification mode */ skel->bss->flags = 0; @@ -178,10 +196,20 @@ void test_ringbuf(void) if (CHECK(err != EBUSY, "try_join", "err %d\n", err)) goto cleanup; + /* still no samples, because consumer is behind */ + cnt = atomic_xchg(&sample_cnt, 0); + CHECK(cnt != 0, "cnt", "exp %d samples, got %d\n", 0, cnt); + + skel->bss->dropped = 0; + skel->bss->total = 0; + skel->bss->discarded = 0; + + skel->bss->value = 333; + syscall(__NR_getpgid); /* now force notifications */ skel->bss->flags = BPF_RB_FORCE_WAKEUP; - sample_cnt = 0; - trigger_samples(); + skel->bss->value = 777; + syscall(__NR_getpgid); /* now we should get a pending notification */ usleep(50000); @@ -193,8 +221,8 @@ void test_ringbuf(void) goto cleanup; /* 3 rounds, 2 samples each */ - CHECK(sample_cnt != 6, "wrong_sample_cnt", - "expected to see %d samples, got %d\n", 6, sample_cnt); + cnt = atomic_xchg(&sample_cnt, 0); + CHECK(cnt != 6, "cnt", "exp %d samples, got %d\n", 6, cnt); /* BPF side did everything right */ CHECK(skel->bss->dropped != 0, "err_dropped", "exp %ld, got %ld\n", -- GitLab From 56965ac7253ece22050cf31ea5d1558e4c49cff2 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Wed, 3 Jun 2020 07:52:34 +0200 Subject: [PATCH 1214/1971] net/sysctl: use cpumask_parse in flow_limit_cpu_sysctl cpumask_parse_user works on __user pointers, so this is wrong now. Fixes: 32927393dc1c ("sysctl: pass kernel pointers to ->proc_handler") Reported-by: build test robot Signed-off-by: Christoph Hellwig Signed-off-by: Al Viro --- net/core/sysctl_net_core.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c index 0ddb13a6282b0..d14d049af52ae 100644 --- a/net/core/sysctl_net_core.c +++ b/net/core/sysctl_net_core.c @@ -126,7 +126,7 @@ static int flow_limit_cpu_sysctl(struct ctl_table *table, int write, return -ENOMEM; if (write) { - ret = cpumask_parse_user(buffer, *lenp, mask); + ret = cpumask_parse(buffer, mask); if (ret) goto done; -- GitLab From c7388c1f8fb6d8687403e9554c873f9df4512797 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Wed, 3 Jun 2020 07:52:35 +0200 Subject: [PATCH 1215/1971] net/sysctl: remove leftover __user annotations on neigh_proc_dointvec* Remove the leftover __user annotation on the prototypes for neigh_proc_dointvec*. The implementations already got this right, but the headers kept the __user tags around. Fixes: 32927393dc1c ("sysctl: pass kernel pointers to ->proc_handler") Reported-by: build test robot Signed-off-by: Christoph Hellwig Signed-off-by: Al Viro --- include/net/neighbour.h | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/include/net/neighbour.h b/include/net/neighbour.h index e1476775769c9..81ee17594c329 100644 --- a/include/net/neighbour.h +++ b/include/net/neighbour.h @@ -392,13 +392,12 @@ void *neigh_seq_next(struct seq_file *, void *, loff_t *); void neigh_seq_stop(struct seq_file *, void *); int neigh_proc_dointvec(struct ctl_table *ctl, int write, - void __user *buffer, size_t *lenp, loff_t *ppos); + void *buffer, size_t *lenp, loff_t *ppos); int neigh_proc_dointvec_jiffies(struct ctl_table *ctl, int write, - void __user *buffer, + void *buffer, size_t *lenp, loff_t *ppos); int neigh_proc_dointvec_ms_jiffies(struct ctl_table *ctl, int write, - void __user *buffer, - size_t *lenp, loff_t *ppos); + void *buffer, size_t *lenp, loff_t *ppos); int neigh_sysctl_register(struct net_device *dev, struct neigh_parms *p, proc_handler *proc_handler); -- GitLab From a2541dcb51127dc31934ab93bc99ae7df458e41b Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Wed, 3 Jun 2020 07:52:36 +0200 Subject: [PATCH 1216/1971] random: fix an incorrect __user annotation on proc_do_entropy No user pointers for sysctls anymore. Fixes: 32927393dc1c ("sysctl: pass kernel pointers to ->proc_handler") Reported-by: build test robot Signed-off-by: Christoph Hellwig Signed-off-by: Al Viro --- drivers/char/random.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/char/random.c b/drivers/char/random.c index 1e0db78b83baa..cf8a43f5eb2a0 100644 --- a/drivers/char/random.c +++ b/drivers/char/random.c @@ -2087,7 +2087,7 @@ static int proc_do_uuid(struct ctl_table *table, int write, * Return entropy available scaled to integral bits */ static int proc_do_entropy(struct ctl_table *table, int write, - void __user *buffer, size_t *lenp, loff_t *ppos) + void *buffer, size_t *lenp, loff_t *ppos) { struct ctl_table fake_table; int entropy_count; -- GitLab From 7ff0d4490ebadae8037a079a70c5cac13385a808 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Wed, 3 Jun 2020 07:52:37 +0200 Subject: [PATCH 1217/1971] trace: fix an incorrect __user annotation on stack_trace_sysctl No user pointers for sysctls anymore. Fixes: 32927393dc1c ("sysctl: pass kernel pointers to ->proc_handler") Reported-by: build test robot Signed-off-by: Christoph Hellwig Signed-off-by: Al Viro --- include/linux/ftrace.h | 5 ++--- kernel/trace/trace_stack.c | 5 ++--- 2 files changed, 4 insertions(+), 6 deletions(-) diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h index ddfc377de0d2c..fce81238f304d 100644 --- a/include/linux/ftrace.h +++ b/include/linux/ftrace.h @@ -319,9 +319,8 @@ static inline void arch_ftrace_set_direct_caller(struct pt_regs *regs, extern int stack_tracer_enabled; -int stack_trace_sysctl(struct ctl_table *table, int write, - void __user *buffer, size_t *lenp, - loff_t *ppos); +int stack_trace_sysctl(struct ctl_table *table, int write, void *buffer, + size_t *lenp, loff_t *ppos); /* DO NOT MODIFY THIS VARIABLE DIRECTLY! */ DECLARE_PER_CPU(int, disable_stack_tracer); diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c index c557f42a93971..98bba4764c527 100644 --- a/kernel/trace/trace_stack.c +++ b/kernel/trace/trace_stack.c @@ -515,9 +515,8 @@ static const struct file_operations stack_trace_filter_fops = { #endif /* CONFIG_DYNAMIC_FTRACE */ int -stack_trace_sysctl(struct ctl_table *table, int write, - void __user *buffer, size_t *lenp, - loff_t *ppos) +stack_trace_sysctl(struct ctl_table *table, int write, void *buffer, + size_t *lenp, loff_t *ppos) { int was_enabled; int ret; -- GitLab From f3a1381ec84ec1fdc8091f9935f97e836792b6a7 Mon Sep 17 00:00:00 2001 From: Sivaprakash Murugesan Date: Mon, 8 Jun 2020 15:07:24 +0530 Subject: [PATCH 1218/1971] dt-bindings: mailbox: Add YAML schemas for QCOM APCS global block Qualcomm APCS global block provides a bunch of generic properties which are required in a device tree. Add YAML schema for these properties. Reviewed-by: Rob Herring Signed-off-by: Sivaprakash Murugesan Signed-off-by: Jassi Brar --- .../mailbox/qcom,apcs-kpss-global.txt | 88 ------------------- .../mailbox/qcom,apcs-kpss-global.yaml | 86 ++++++++++++++++++ 2 files changed, 86 insertions(+), 88 deletions(-) delete mode 100644 Documentation/devicetree/bindings/mailbox/qcom,apcs-kpss-global.txt create mode 100644 Documentation/devicetree/bindings/mailbox/qcom,apcs-kpss-global.yaml diff --git a/Documentation/devicetree/bindings/mailbox/qcom,apcs-kpss-global.txt b/Documentation/devicetree/bindings/mailbox/qcom,apcs-kpss-global.txt deleted file mode 100644 index beec612dbe6a4..0000000000000 --- a/Documentation/devicetree/bindings/mailbox/qcom,apcs-kpss-global.txt +++ /dev/null @@ -1,88 +0,0 @@ -Binding for the Qualcomm APCS global block -========================================== - -This binding describes the APCS "global" block found in various Qualcomm -platforms. - -- compatible: - Usage: required - Value type: - Definition: must be one of: - "qcom,msm8916-apcs-kpss-global", - "qcom,msm8996-apcs-hmss-global" - "qcom,msm8998-apcs-hmss-global" - "qcom,qcs404-apcs-apps-global" - "qcom,sc7180-apss-shared" - "qcom,sdm845-apss-shared" - "qcom,sm8150-apss-shared" - "qcom,ipq8074-apcs-apps-global" - -- reg: - Usage: required - Value type: - Definition: must specify the base address and size of the global block - -- clocks: - Usage: required if #clock-names property is present - Value type: - Definition: phandles to the two parent clocks of the clock driver. - -- #mbox-cells: - Usage: required - Value type: - Definition: as described in mailbox.txt, must be 1 - -- #clock-cells: - Usage: optional - Value type: - Definition: as described in clock.txt, must be 0 - -- clock-names: - Usage: required if the platform data based clock driver needs to - retrieve the parent clock names from device tree. - This will requires two mandatory clocks to be defined. - Value type: - Definition: must be "pll" and "aux" - -= EXAMPLE -The following example describes the APCS HMSS found in MSM8996 and part of the -GLINK RPM referencing the "rpm_hlos" doorbell therein. - - apcs_glb: mailbox@9820000 { - compatible = "qcom,msm8996-apcs-hmss-global"; - reg = <0x9820000 0x1000>; - - #mbox-cells = <1>; - }; - - rpm-glink { - compatible = "qcom,glink-rpm"; - - interrupts = ; - - qcom,rpm-msg-ram = <&rpm_msg_ram>; - - mboxes = <&apcs_glb 0>; - mbox-names = "rpm_hlos"; - }; - -Below is another example of the APCS binding on MSM8916 platforms: - - apcs: mailbox@b011000 { - compatible = "qcom,msm8916-apcs-kpss-global"; - reg = <0xb011000 0x1000>; - #mbox-cells = <1>; - clocks = <&a53pll>; - #clock-cells = <0>; - }; - -Below is another example of the APCS binding on QCS404 platforms: - - apcs_glb: mailbox@b011000 { - compatible = "qcom,qcs404-apcs-apps-global", "syscon"; - reg = <0x0b011000 0x1000>; - #mbox-cells = <1>; - clocks = <&apcs_hfpll>, <&gcc GCC_GPLL0_AO_OUT_MAIN>; - clock-names = "pll", "aux"; - #clock-cells = <0>; - }; diff --git a/Documentation/devicetree/bindings/mailbox/qcom,apcs-kpss-global.yaml b/Documentation/devicetree/bindings/mailbox/qcom,apcs-kpss-global.yaml new file mode 100644 index 0000000000000..12eff942708de --- /dev/null +++ b/Documentation/devicetree/bindings/mailbox/qcom,apcs-kpss-global.yaml @@ -0,0 +1,86 @@ +# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause) +%YAML 1.2 +--- +$id: "http://devicetree.org/schemas/mailbox/qcom,apcs-kpss-global.yaml#" +$schema: "http://devicetree.org/meta-schemas/core.yaml#" + +title: Qualcomm APCS global block bindings + +description: + This binding describes the APCS "global" block found in various Qualcomm + platforms. + +maintainers: + - Sivaprakash Murugesan + +properties: + compatible: + enum: + - qcom,ipq8074-apcs-apps-global + - qcom,msm8916-apcs-kpss-global + - qcom,msm8996-apcs-hmss-global + - qcom,msm8998-apcs-hmss-global + - qcom,qcs404-apcs-apps-global + - qcom,sc7180-apss-shared + - qcom,sdm845-apss-shared + - qcom,sm8150-apss-shared + + reg: + maxItems: 1 + + clocks: + description: phandles to the parent clocks of the clock driver + items: + - description: primary pll parent of the clock driver + - description: auxiliary parent + + '#mbox-cells': + const: 1 + + '#clock-cells': + const: 0 + + clock-names: + items: + - const: pll + - const: aux + +required: + - compatible + - reg + - '#mbox-cells' + +additionalProperties: false + +examples: + + # Example apcs with msm8996 + - | + #include + apcs_glb: mailbox@9820000 { + compatible = "qcom,msm8996-apcs-hmss-global"; + reg = <0x9820000 0x1000>; + + #mbox-cells = <1>; + }; + + rpm-glink { + compatible = "qcom,glink-rpm"; + interrupts = ; + qcom,rpm-msg-ram = <&rpm_msg_ram>; + mboxes = <&apcs_glb 0>; + mbox-names = "rpm_hlos"; + }; + + # Example apcs with qcs404 + - | + #define GCC_APSS_AHB_CLK_SRC 1 + #define GCC_GPLL0_AO_OUT_MAIN 123 + apcs: mailbox@b011000 { + compatible = "qcom,qcs404-apcs-apps-global"; + reg = <0x0b011000 0x1000>; + #mbox-cells = <1>; + clocks = <&apcs_hfpll>, <&gcc GCC_GPLL0_AO_OUT_MAIN>; + clock-names = "pll", "aux"; + #clock-cells = <0>; + }; -- GitLab From 91970bef48d68d06b2bb3f464b572ad50941f6a9 Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Sat, 6 Jun 2020 12:25:50 -0700 Subject: [PATCH 1219/1971] arm64: ftrace: Change CONFIG_FTRACE_WITH_REGS to CONFIG_DYNAMIC_FTRACE_WITH_REGS CONFIG_FTRACE_WITH_REGS does not exist as a Kconfig symbol. Fixes: 3b23e4991fb6 ("arm64: implement ftrace with regs") Signed-off-by: Joe Perches Acked-by: Mark Rutland Link: https://lore.kernel.org/r/b9b27f2233bd1fa31d72ff937beefdae0e2104e5.camel@perches.com Signed-off-by: Will Deacon --- arch/arm64/kernel/ftrace.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/arch/arm64/kernel/ftrace.c b/arch/arm64/kernel/ftrace.c index 8618faa82e6d2..86a5cf9bc19a1 100644 --- a/arch/arm64/kernel/ftrace.c +++ b/arch/arm64/kernel/ftrace.c @@ -69,7 +69,8 @@ static struct plt_entry *get_ftrace_plt(struct module *mod, unsigned long addr) if (addr == FTRACE_ADDR) return &plt[FTRACE_PLT_IDX]; - if (addr == FTRACE_REGS_ADDR && IS_ENABLED(CONFIG_FTRACE_WITH_REGS)) + if (addr == FTRACE_REGS_ADDR && + IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_REGS)) return &plt[FTRACE_REGS_PLT_IDX]; #endif return NULL; -- GitLab From 961abd78adcb4c72c343fcd9f9dc5e2ebbe9b448 Mon Sep 17 00:00:00 2001 From: Shaokun Zhang Date: Fri, 5 Jun 2020 17:43:41 +0800 Subject: [PATCH 1220/1971] drivers/perf: hisi: Fix wrong value for all counters enable In L3C uncore PMU drivers, bit16 is used to control all counters enable & disable. Wrong value is given in the driver and its default value is 1'b1, it can work because each PMU counter has its own control bits too. Let's fix the wrong value. Fixes: 2940bc433370 ("perf: hisi: Add support for HiSilicon SoC L3C PMU driver") Signed-off-by: Shaokun Zhang Cc: Will Deacon Cc: Mark Rutland Link: https://lore.kernel.org/r/1591350221-32275-1-git-send-email-zhangshaokun@hisilicon.com Signed-off-by: Will Deacon --- drivers/perf/hisilicon/hisi_uncore_l3c_pmu.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/perf/hisilicon/hisi_uncore_l3c_pmu.c b/drivers/perf/hisilicon/hisi_uncore_l3c_pmu.c index 8dd1278bec043..7719ae4e2c561 100644 --- a/drivers/perf/hisilicon/hisi_uncore_l3c_pmu.c +++ b/drivers/perf/hisilicon/hisi_uncore_l3c_pmu.c @@ -35,7 +35,7 @@ /* L3C has 8-counters */ #define L3C_NR_COUNTERS 0x8 -#define L3C_PERF_CTRL_EN 0x20000 +#define L3C_PERF_CTRL_EN 0x10000 #define L3C_EVTYPE_NONE 0xff /* -- GitLab From 1eb2f96d0bffb2cca1fb7249ad9b6b4daa1d1d6a Mon Sep 17 00:00:00 2001 From: Chen Zhou Date: Fri, 8 May 2020 20:40:00 +0800 Subject: [PATCH 1221/1971] sunrpc: use kmemdup_nul() in gssp_stringify() It is more efficient to use kmemdup_nul() if the size is known exactly . According to doc: "Note: Use kmemdup_nul() instead if the size is known exactly." Signed-off-by: Chen Zhou Signed-off-by: J. Bruce Fields --- net/sunrpc/auth_gss/gss_rpc_upcall.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/sunrpc/auth_gss/gss_rpc_upcall.c b/net/sunrpc/auth_gss/gss_rpc_upcall.c index 0349f455a8624..af9c7f43859c4 100644 --- a/net/sunrpc/auth_gss/gss_rpc_upcall.c +++ b/net/sunrpc/auth_gss/gss_rpc_upcall.c @@ -223,7 +223,7 @@ static int gssp_alloc_receive_pages(struct gssx_arg_accept_sec_context *arg) static char *gssp_stringify(struct xdr_netobj *netobj) { - return kstrndup(netobj->data, netobj->len, GFP_KERNEL); + return kmemdup_nul(netobj->data, netobj->len, GFP_KERNEL); } static void gssp_hostbased_service(char **principal) -- GitLab From cc8246debbe4146668acafcfa45358cfe286d03c Mon Sep 17 00:00:00 2001 From: Heinrich Schuchardt Date: Fri, 5 Jun 2020 05:04:05 +0200 Subject: [PATCH 1222/1971] docs: dev-tools: coccinelle: underlines Underline lengths should match the lengths of headings to avoid build warnings with Sphinx. Signed-off-by: Heinrich Schuchardt Acked-by: Julia Lawall Link: https://lore.kernel.org/r/20200605030405.6479-1-xypron.glpk@gmx.de Signed-off-by: Jonathan Corbet --- Documentation/dev-tools/coccinelle.rst | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/Documentation/dev-tools/coccinelle.rst b/Documentation/dev-tools/coccinelle.rst index 00a3409b0c288..70274c3f5f5a0 100644 --- a/Documentation/dev-tools/coccinelle.rst +++ b/Documentation/dev-tools/coccinelle.rst @@ -14,7 +14,7 @@ many uses in kernel development, including the application of complex, tree-wide patches and detection of problematic programming patterns. Getting Coccinelle -------------------- +------------------ The semantic patches included in the kernel use features and options which are provided by Coccinelle version 1.0.0-rc11 and above. @@ -56,7 +56,7 @@ found at: https://github.com/coccinelle/coccinelle/blob/master/install.txt Supplemental documentation ---------------------------- +-------------------------- For supplemental documentation refer to the wiki: @@ -128,7 +128,7 @@ To enable verbose messages set the V= variable, for example:: make coccicheck MODE=report V=1 Coccinelle parallelization ---------------------------- +-------------------------- By default, coccicheck tries to run as parallel as possible. To change the parallelism, set the J= variable. For example, to run across 4 CPUs:: @@ -333,7 +333,7 @@ as an example if requiring at least Coccinelle >= 1.0.5:: // Requires: 1.0.5 Proposing new semantic patches -------------------------------- +------------------------------ New semantic patches can be proposed and submitted by kernel developers. For sake of clarity, they should be organized in the -- GitLab From 99702304745941271c73ad7de18f38d93844929c Mon Sep 17 00:00:00 2001 From: Jonathan Corbet Date: Wed, 3 Jun 2020 16:10:20 -0600 Subject: [PATCH 1223/1971] docs: Update the location of the LF NDA program The link to the Linux Foundation NDA program got broken in one of their web-site thrashups; now that the information is back online, point to its current location. This should last until the next thrashup... Reported-by: Lukas Bulwahn Reviewed-by: Lukas Bulwahn Signed-off-by: Jonathan Corbet --- Documentation/process/3.Early-stage.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Documentation/process/3.Early-stage.rst b/Documentation/process/3.Early-stage.rst index be00716071d4d..7067abbd75bce 100644 --- a/Documentation/process/3.Early-stage.rst +++ b/Documentation/process/3.Early-stage.rst @@ -216,7 +216,7 @@ a non-disclosure agreement. The Linux Foundation operates an NDA program designed to help with this sort of situation; more information can be found at: - http://www.linuxfoundation.org/en/NDA_program + https://www.linuxfoundation.org/nda/ This kind of review is often enough to avoid serious problems later on without requiring public disclosure of the project. -- GitLab From a5001484e203377e553326bc8f3f91e4ea80bcc2 Mon Sep 17 00:00:00 2001 From: Lukas Bulwahn Date: Sun, 31 May 2020 20:35:56 +0200 Subject: [PATCH 1224/1971] doc: zh_CN: use doc reference to resolve undefined label warning Documentation generation warns: Documentation/translations/zh_CN/filesystems/debugfs.rst:5: WARNING: undefined label: debugfs_index Use doc reference for files rather than introducing a label to refer to. This resolves the warning above. Signed-off-by: Lukas Bulwahn Reviewed-by: Alex Shi Link: https://lore.kernel.org/r/20200531183556.5751-1-lukas.bulwahn@gmail.com Signed-off-by: Jonathan Corbet --- Documentation/translations/zh_CN/filesystems/debugfs.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Documentation/translations/zh_CN/filesystems/debugfs.rst b/Documentation/translations/zh_CN/filesystems/debugfs.rst index f8a28793c2777..822c4d42fdf95 100644 --- a/Documentation/translations/zh_CN/filesystems/debugfs.rst +++ b/Documentation/translations/zh_CN/filesystems/debugfs.rst @@ -2,7 +2,7 @@ .. include:: ../disclaimer-zh_CN.rst -:Original: :ref:`Documentation/filesystems/debugfs.txt ` +:Original: :doc:`../../../filesystems/debugfs` ======= Debugfs -- GitLab From b55e45a59cb6a421fc52084ea154e7cf21e6b569 Mon Sep 17 00:00:00 2001 From: Lukas Bulwahn Date: Sun, 31 May 2020 20:56:18 +0200 Subject: [PATCH 1225/1971] docs: it_IT: address invalid reference warnings Documentation generation warns: it_IT/kernel-hacking/hacking.rst: WARNING: unknown document: ../core-api/symbol/namespaces it_IT/process/5.Posting.rst: WARNING: undefined label: it_email_clients it_IT/process/submitting-patches.rst: WARNING: undefined label: it_email_clients it_IT/process/howto.rst: WARNING: undefined label: it_managementstyle Refer to English documentation, as Italian translation does not exist, and add labels for Italian process documents to resolve label references. Signed-off-by: Lukas Bulwahn Link: https://lore.kernel.org/r/20200531185618.7099-1-lukas.bulwahn@gmail.com Signed-off-by: Jonathan Corbet --- Documentation/translations/it_IT/kernel-hacking/hacking.rst | 4 ++-- Documentation/translations/it_IT/process/email-clients.rst | 2 ++ Documentation/translations/it_IT/process/management-style.rst | 2 ++ 3 files changed, 6 insertions(+), 2 deletions(-) diff --git a/Documentation/translations/it_IT/kernel-hacking/hacking.rst b/Documentation/translations/it_IT/kernel-hacking/hacking.rst index 6aab27a8d3238..e9a2e92134f0f 100644 --- a/Documentation/translations/it_IT/kernel-hacking/hacking.rst +++ b/Documentation/translations/it_IT/kernel-hacking/hacking.rst @@ -634,7 +634,7 @@ Definita in ``include/linux/export.h`` Questa è una variate di `EXPORT_SYMBOL()` che permette di specificare uno spazio dei nomi. Lo spazio dei nomi è documentato in -:doc:`../core-api/symbol-namespaces` +:doc:`../../../core-api/symbol-namespaces` :c:func:`EXPORT_SYMBOL_NS_GPL()` -------------------------------- @@ -643,7 +643,7 @@ Definita in ``include/linux/export.h`` Questa è una variate di `EXPORT_SYMBOL_GPL()` che permette di specificare uno spazio dei nomi. Lo spazio dei nomi è documentato in -:doc:`../core-api/symbol-namespaces` +:doc:`../../../core-api/symbol-namespaces` Procedure e convenzioni ======================= diff --git a/Documentation/translations/it_IT/process/email-clients.rst b/Documentation/translations/it_IT/process/email-clients.rst index 89abf6d325f23..66d3d65776f70 100644 --- a/Documentation/translations/it_IT/process/email-clients.rst +++ b/Documentation/translations/it_IT/process/email-clients.rst @@ -3,6 +3,8 @@ :Original: :doc:`../../../process/email-clients` :Translator: Alessia Mantegazza +.. _it_email_clients: + Informazioni sui programmi di posta elettronica per Linux ========================================================= diff --git a/Documentation/translations/it_IT/process/management-style.rst b/Documentation/translations/it_IT/process/management-style.rst index c709285138a72..76ed074082eab 100644 --- a/Documentation/translations/it_IT/process/management-style.rst +++ b/Documentation/translations/it_IT/process/management-style.rst @@ -3,6 +3,8 @@ :Original: :doc:`../../../process/management-style` :Translator: Alessia Mantegazza +.. _it_managementstyle: + Il modello di gestione del kernel Linux ======================================= -- GitLab From 93431e0607e58a3c997a134adc0fad4fdc147dab Mon Sep 17 00:00:00 2001 From: "Alexander A. Klimov" Date: Tue, 26 May 2020 08:05:44 +0200 Subject: [PATCH 1226/1971] Replace HTTP links with HTTPS ones: documentation Rationale: Reduces attack surface on kernel devs opening the links for MITM as HTTPS traffic is much harder to manipulate. Deterministic algorithm: For each file: For each line: If doesn't contain `\bxmlns\b`: For each link, `\bhttp://[^# \t\r\n]*(?:\w|/)`: If both the HTTP and HTTPS versions return 200 OK and serve the same content: Replace HTTP with HTTPS. Signed-off-by: Alexander A. Klimov Link: https://lore.kernel.org/r/20200526060544.25127-1-grandmaster@al2klimov.de Signed-off-by: Jonathan Corbet --- Documentation/COPYING-logo | 2 +- Documentation/admin-guide/LSM/tomoyo.rst | 16 ++++++++-------- .../admin-guide/acpi/initrd_table_override.rst | 2 +- Documentation/admin-guide/bcache.rst | 4 ++-- Documentation/admin-guide/devices.rst | 2 +- Documentation/admin-guide/initrd.rst | 2 +- Documentation/admin-guide/md.rst | 2 +- Documentation/admin-guide/mono.rst | 4 ++-- Documentation/admin-guide/reporting-bugs.rst | 2 +- Documentation/admin-guide/unicode.rst | 4 ++-- Documentation/conf.py | 2 +- Documentation/core-api/rbtree.rst | 4 ++-- Documentation/dev-tools/gdb-kernel-debugging.rst | 2 +- Documentation/doc-guide/parse-headers.rst | 2 +- .../driver-api/acpi/linuxized-acpica.rst | 6 +++--- Documentation/driver-api/usb/bulk-streams.rst | 4 ++-- .../driver-api/usb/writing_musb_glue_layer.rst | 6 +++--- Documentation/filesystems/path-lookup.txt | 2 +- Documentation/filesystems/seq_file.rst | 4 ++-- Documentation/misc-devices/c2port.txt | 6 +++--- Documentation/process/3.Early-stage.rst | 2 +- Documentation/process/7.AdvancedTopics.rst | 8 ++++---- Documentation/process/8.Conclusion.rst | 14 +++++++------- Documentation/process/adding-syscalls.rst | 4 ++-- Documentation/process/applying-patches.rst | 4 ++-- .../process/volatile-considered-harmful.rst | 4 ++-- Documentation/security/SCTP.rst | 2 +- Documentation/sphinx/kfigure.py | 6 +++--- Documentation/static-keys.txt | 2 +- Documentation/trace/events-msr.rst | 2 +- Documentation/trace/mmiotrace.rst | 2 +- Documentation/vm/ksm.rst | 2 +- Documentation/xz.txt | 6 +++--- scripts/kernel-doc | 2 +- 34 files changed, 69 insertions(+), 69 deletions(-) diff --git a/Documentation/COPYING-logo b/Documentation/COPYING-logo index 296f0f7f67eb2..b21c7cf7d9f65 100644 --- a/Documentation/COPYING-logo +++ b/Documentation/COPYING-logo @@ -9,5 +9,5 @@ scale down to smaller sizes and are better for letterheads or whatever you want to use it for: for the full range of logos take a look at Larry's web-page: - http://www.isc.tamu.edu/~lewing/linux/ + https://www.isc.tamu.edu/~lewing/linux/ diff --git a/Documentation/admin-guide/LSM/tomoyo.rst b/Documentation/admin-guide/LSM/tomoyo.rst index e2d6b6e150825..4bc9c2b4da6f3 100644 --- a/Documentation/admin-guide/LSM/tomoyo.rst +++ b/Documentation/admin-guide/LSM/tomoyo.rst @@ -27,29 +27,29 @@ Where is documentation? ======================= User <-> Kernel interface documentation is available at -http://tomoyo.osdn.jp/2.5/policy-specification/index.html . +https://tomoyo.osdn.jp/2.5/policy-specification/index.html . Materials we prepared for seminars and symposiums are available at -http://osdn.jp/projects/tomoyo/docs/?category_id=532&language_id=1 . +https://osdn.jp/projects/tomoyo/docs/?category_id=532&language_id=1 . Below lists are chosen from three aspects. What is TOMOYO? TOMOYO Linux Overview - http://osdn.jp/projects/tomoyo/docs/lca2009-takeda.pdf + https://osdn.jp/projects/tomoyo/docs/lca2009-takeda.pdf TOMOYO Linux: pragmatic and manageable security for Linux - http://osdn.jp/projects/tomoyo/docs/freedomhectaipei-tomoyo.pdf + https://osdn.jp/projects/tomoyo/docs/freedomhectaipei-tomoyo.pdf TOMOYO Linux: A Practical Method to Understand and Protect Your Own Linux Box - http://osdn.jp/projects/tomoyo/docs/PacSec2007-en-no-demo.pdf + https://osdn.jp/projects/tomoyo/docs/PacSec2007-en-no-demo.pdf What can TOMOYO do? Deep inside TOMOYO Linux - http://osdn.jp/projects/tomoyo/docs/lca2009-kumaneko.pdf + https://osdn.jp/projects/tomoyo/docs/lca2009-kumaneko.pdf The role of "pathname based access control" in security. - http://osdn.jp/projects/tomoyo/docs/lfj2008-bof.pdf + https://osdn.jp/projects/tomoyo/docs/lfj2008-bof.pdf History of TOMOYO? Realities of Mainlining - http://osdn.jp/projects/tomoyo/docs/lfj2008.pdf + https://osdn.jp/projects/tomoyo/docs/lfj2008.pdf What is future plan? ==================== diff --git a/Documentation/admin-guide/acpi/initrd_table_override.rst b/Documentation/admin-guide/acpi/initrd_table_override.rst index cbd7682076314..bb24fa6b5fbe6 100644 --- a/Documentation/admin-guide/acpi/initrd_table_override.rst +++ b/Documentation/admin-guide/acpi/initrd_table_override.rst @@ -102,7 +102,7 @@ Where to retrieve userspace tools ================================= iasl and acpixtract are part of Intel's ACPICA project: -http://acpica.org/ +https://acpica.org/ and should be packaged by distributions (for example in the acpica package on SUSE). diff --git a/Documentation/admin-guide/bcache.rst b/Documentation/admin-guide/bcache.rst index c0ce64d75bbf7..1eccf952876d4 100644 --- a/Documentation/admin-guide/bcache.rst +++ b/Documentation/admin-guide/bcache.rst @@ -7,9 +7,9 @@ nice if you could use them as cache... Hence bcache. Wiki and git repositories are at: - - http://bcache.evilpiepirate.org + - https://bcache.evilpiepirate.org - http://evilpiepirate.org/git/linux-bcache.git - - http://evilpiepirate.org/git/bcache-tools.git + - https://evilpiepirate.org/git/bcache-tools.git It's designed around the performance characteristics of SSDs - it only allocates in erase block sized buckets, and it uses a hybrid btree/log to track cached diff --git a/Documentation/admin-guide/devices.rst b/Documentation/admin-guide/devices.rst index d41671aeaef06..035275fedbdd7 100644 --- a/Documentation/admin-guide/devices.rst +++ b/Documentation/admin-guide/devices.rst @@ -17,7 +17,7 @@ Specifically explore the sections titled "CHAR and MISC DRIVERS", and to involve for character and block devices. This document is included by reference into the Filesystem Hierarchy -Standard (FHS). The FHS is available from http://www.pathname.com/fhs/. +Standard (FHS). The FHS is available from https://www.pathname.com/fhs/. Allocations marked (68k/Amiga) apply to Linux/68k on the Amiga platform only. Allocations marked (68k/Atari) apply to Linux/68k on diff --git a/Documentation/admin-guide/initrd.rst b/Documentation/admin-guide/initrd.rst index a03dabaaf3a3d..67bbad8806e8c 100644 --- a/Documentation/admin-guide/initrd.rst +++ b/Documentation/admin-guide/initrd.rst @@ -376,7 +376,7 @@ Resources --------- .. [#f1] Almesberger, Werner; "Booting Linux: The History and the Future" - http://www.almesberger.net/cv/papers/ols2k-9.ps.gz + https://www.almesberger.net/cv/papers/ols2k-9.ps.gz .. [#f2] newlib package (experimental), with initrd example https://www.sourceware.org/newlib/ .. [#f3] util-linux: Miscellaneous utilities for Linux diff --git a/Documentation/admin-guide/md.rst b/Documentation/admin-guide/md.rst index 3c51084ffd379..d973d469ffc4e 100644 --- a/Documentation/admin-guide/md.rst +++ b/Documentation/admin-guide/md.rst @@ -5,7 +5,7 @@ Boot time assembly of RAID arrays --------------------------------- Tools that manage md devices can be found at - http://www.kernel.org/pub/linux/utils/raid/ + https://www.kernel.org/pub/linux/utils/raid/ You can boot with your md device with the following kernel command diff --git a/Documentation/admin-guide/mono.rst b/Documentation/admin-guide/mono.rst index 59e6d59f0ed9e..c6dab56800655 100644 --- a/Documentation/admin-guide/mono.rst +++ b/Documentation/admin-guide/mono.rst @@ -12,11 +12,11 @@ other program after you have done the following: a binary package, a source tarball or by installing from Git. Binary packages for several distributions can be found at: - http://www.mono-project.com/download/ + https://www.mono-project.com/download/ Instructions for compiling Mono can be found at: - http://www.mono-project.com/docs/compiling-mono/linux/ + https://www.mono-project.com/docs/compiling-mono/linux/ Once the Mono CLR support has been installed, just check that ``/usr/bin/mono`` (which could be located elsewhere, for example diff --git a/Documentation/admin-guide/reporting-bugs.rst b/Documentation/admin-guide/reporting-bugs.rst index 49ac8dc3594d6..42481ea7b41db 100644 --- a/Documentation/admin-guide/reporting-bugs.rst +++ b/Documentation/admin-guide/reporting-bugs.rst @@ -75,7 +75,7 @@ Tips for reporting bugs If you haven't reported a bug before, please read: - http://www.chiark.greenend.org.uk/~sgtatham/bugs.html + https://www.chiark.greenend.org.uk/~sgtatham/bugs.html http://www.catb.org/esr/faqs/smart-questions.html diff --git a/Documentation/admin-guide/unicode.rst b/Documentation/admin-guide/unicode.rst index 7425a3351321b..290fe83ebe828 100644 --- a/Documentation/admin-guide/unicode.rst +++ b/Documentation/admin-guide/unicode.rst @@ -114,7 +114,7 @@ Unicode practice. This range is now officially managed by the ConScript Unicode Registry. The normative reference is at: - http://www.evertype.com/standards/csur/klingon.html + https://www.evertype.com/standards/csur/klingon.html Klingon has an alphabet of 26 characters, a positional numeric writing system with 10 digits, and is written left-to-right, top-to-bottom. @@ -178,7 +178,7 @@ fictional and artificial scripts has been established by John Cowan and Michael Everson . The ConScript Unicode Registry is accessible at: - http://www.evertype.com/standards/csur/ + https://www.evertype.com/standards/csur/ The ranges used fall at the low end of the End User Zone and can hence not be normatively assigned, but it is recommended that people who diff --git a/Documentation/conf.py b/Documentation/conf.py index f6a1bc07c4101..c503188880d95 100644 --- a/Documentation/conf.py +++ b/Documentation/conf.py @@ -538,7 +538,7 @@ epub_exclude_files = ['search.html'] # Grouping the document tree into PDF files. List of tuples # (source start file, target name, title, author, options). # -# See the Sphinx chapter of http://ralsina.me/static/manual.pdf +# See the Sphinx chapter of https://ralsina.me/static/manual.pdf # # FIXME: Do not add the index file here; the result will be too big. Adding # multiple PDF files here actually tries to get the cross-referencing right diff --git a/Documentation/core-api/rbtree.rst b/Documentation/core-api/rbtree.rst index 523d54b600876..6b88837fbf824 100644 --- a/Documentation/core-api/rbtree.rst +++ b/Documentation/core-api/rbtree.rst @@ -36,10 +36,10 @@ This document covers use of the Linux rbtree implementation. For more information on the nature and implementation of Red Black Trees, see: Linux Weekly News article on red-black trees - http://lwn.net/Articles/184495/ + https://lwn.net/Articles/184495/ Wikipedia entry on red-black trees - http://en.wikipedia.org/wiki/Red-black_tree + https://en.wikipedia.org/wiki/Red-black_tree Linux implementation of red-black trees --------------------------------------- diff --git a/Documentation/dev-tools/gdb-kernel-debugging.rst b/Documentation/dev-tools/gdb-kernel-debugging.rst index 19df79286f000..4756f6b3a04e5 100644 --- a/Documentation/dev-tools/gdb-kernel-debugging.rst +++ b/Documentation/dev-tools/gdb-kernel-debugging.rst @@ -24,7 +24,7 @@ Setup - Create a virtual Linux machine for QEMU/KVM (see www.linux-kvm.org and www.qemu.org for more details). For cross-development, - http://landley.net/aboriginal/bin keeps a pool of machine images and + https://landley.net/aboriginal/bin keeps a pool of machine images and toolchains that can be helpful to start from. - Build the kernel with CONFIG_GDB_SCRIPTS enabled, but leave diff --git a/Documentation/doc-guide/parse-headers.rst b/Documentation/doc-guide/parse-headers.rst index 24cfaa15dd81e..ac5d9304a9180 100644 --- a/Documentation/doc-guide/parse-headers.rst +++ b/Documentation/doc-guide/parse-headers.rst @@ -186,7 +186,7 @@ COPYRIGHT Copyright (c) 2016 by Mauro Carvalho Chehab . -License GPLv2: GNU GPL version 2 . +License GPLv2: GNU GPL version 2 . This is free software: you are free to change and redistribute it. There is NO WARRANTY, to the extent permitted by law. diff --git a/Documentation/driver-api/acpi/linuxized-acpica.rst b/Documentation/driver-api/acpi/linuxized-acpica.rst index 0ca8f15385190..6bee033832255 100644 --- a/Documentation/driver-api/acpi/linuxized-acpica.rst +++ b/Documentation/driver-api/acpi/linuxized-acpica.rst @@ -175,9 +175,9 @@ illustrated in the following figure:: B. acpica / master - "master" branch of the git repository at . C. linux-pm / linux-next - "linux-next" branch of the git repository at - . + . D. linux / master - "master" branch of the git repository at - . + . Before the linuxized ACPICA patches are sent to the Linux ACPI community for review, there is a quality assurance build test process to reduce @@ -274,6 +274,6 @@ before they become available from the ACPICA release process. a diff file indicating the state of the current divergences:: # git clone https://github.com/acpica/acpica - # git clone http://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git + # git clone https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git # cd acpica # generate/linux/divergences.sh -s ../linux diff --git a/Documentation/driver-api/usb/bulk-streams.rst b/Documentation/driver-api/usb/bulk-streams.rst index 99b515babdeb2..eeefe582f8ffb 100644 --- a/Documentation/driver-api/usb/bulk-streams.rst +++ b/Documentation/driver-api/usb/bulk-streams.rst @@ -9,9 +9,9 @@ device driver to overload a bulk endpoint so that multiple transfers can be queued at once. Streams are defined in sections 4.4.6.4 and 8.12.1.4 of the Universal Serial Bus -3.0 specification at http://www.usb.org/developers/docs/ The USB Attached SCSI +3.0 specification at https://www.usb.org/developers/docs/ The USB Attached SCSI Protocol, which uses streams to queue multiple SCSI commands, can be found on -the T10 website (http://t10.org/). +the T10 website (https://t10.org/). Device-side implications diff --git a/Documentation/driver-api/usb/writing_musb_glue_layer.rst b/Documentation/driver-api/usb/writing_musb_glue_layer.rst index 5bf7152fd76fb..10416cc11cd5a 100644 --- a/Documentation/driver-api/usb/writing_musb_glue_layer.rst +++ b/Documentation/driver-api/usb/writing_musb_glue_layer.rst @@ -707,12 +707,12 @@ cheerful guidance and support. Resources ========= -USB Home Page: http://www.usb.org +USB Home Page: https://www.usb.org -linux-usb Mailing List Archives: http://marc.info/?l=linux-usb +linux-usb Mailing List Archives: https://marc.info/?l=linux-usb USB On-the-Go Basics: -http://www.maximintegrated.com/app-notes/index.mvp/id/1822 +https://www.maximintegrated.com/app-notes/index.mvp/id/1822 :ref:`Writing USB Device Drivers ` diff --git a/Documentation/filesystems/path-lookup.txt b/Documentation/filesystems/path-lookup.txt index 9b8930f589d9e..1aa7ce099f6f5 100644 --- a/Documentation/filesystems/path-lookup.txt +++ b/Documentation/filesystems/path-lookup.txt @@ -375,7 +375,7 @@ common path elements, the more likely they will exist in dentry cache. Papers and other documentation on dcache locking ================================================ -1. Scaling dcache with RCU (http://linuxjournal.com/article.php?sid=7124). +1. Scaling dcache with RCU (https://linuxjournal.com/article.php?sid=7124). 2. http://lse.sourceforge.net/locking/dcache/dcache.html diff --git a/Documentation/filesystems/seq_file.rst b/Documentation/filesystems/seq_file.rst index fab302046b13c..7f7ee06b26939 100644 --- a/Documentation/filesystems/seq_file.rst +++ b/Documentation/filesystems/seq_file.rst @@ -7,7 +7,7 @@ The seq_file Interface Copyright 2003 Jonathan Corbet This file is originally from the LWN.net Driver Porting series at - http://lwn.net/Articles/driver-porting/ + https://lwn.net/Articles/driver-porting/ There are numerous ways for a device driver (or other kernel component) to @@ -57,7 +57,7 @@ Then concatenate the output files out1 and out2 and get the right result. Yes, it is a thoroughly useless module, but the point is to show how the mechanism works without getting lost in other details. (Those wanting to see the full source for this module can find it at -http://lwn.net/Articles/22359/). +https://lwn.net/Articles/22359/). Deprecated create_proc_entry ============================ diff --git a/Documentation/misc-devices/c2port.txt b/Documentation/misc-devices/c2port.txt index ea7344465610b..31351b1a5a1fb 100644 --- a/Documentation/misc-devices/c2port.txt +++ b/Documentation/misc-devices/c2port.txt @@ -28,14 +28,14 @@ where the micro controller is connected via special GPIOs pins. References ---------- -The C2 Interface main references are at (http://www.silabs.com) +The C2 Interface main references are at (https://www.silabs.com) Silicon Laboratories site], see: - AN127: FLASH Programming via the C2 Interface at -http://www.silabs.com/Support Documents/TechnicalDocs/an127.pdf +https://www.silabs.com/Support Documents/TechnicalDocs/an127.pdf - C2 Specification at -http://www.silabs.com/pages/DownloadDoc.aspx?FILEURL=Support%20Documents/TechnicalDocs/an127.pdf&src=SearchResults +https://www.silabs.com/pages/DownloadDoc.aspx?FILEURL=Support%20Documents/TechnicalDocs/an127.pdf&src=SearchResults however it implements a two wire serial communication protocol (bit banging) designed to enable in-system programming, debugging, and diff --git a/Documentation/process/3.Early-stage.rst b/Documentation/process/3.Early-stage.rst index 7067abbd75bce..6bfd60d77d1ad 100644 --- a/Documentation/process/3.Early-stage.rst +++ b/Documentation/process/3.Early-stage.rst @@ -46,7 +46,7 @@ and posted this: to communicate user requirements to these people is a waste of time. They are much too "intelligent" to listen to lesser mortals. -(http://lwn.net/Articles/131776/). +(https://lwn.net/Articles/131776/). The reality of the situation was different; the kernel developers were far more concerned about system stability, long-term maintenance, and finding diff --git a/Documentation/process/7.AdvancedTopics.rst b/Documentation/process/7.AdvancedTopics.rst index 172733cff097f..bf7cbfb4caa5b 100644 --- a/Documentation/process/7.AdvancedTopics.rst +++ b/Documentation/process/7.AdvancedTopics.rst @@ -29,9 +29,9 @@ long document in its own right. Instead, the focus here will be on how git fits into the kernel development process in particular. Developers who wish to come up to speed with git will find more information at: - http://git-scm.com/ + https://git-scm.com/ - http://www.kernel.org/pub/software/scm/git/docs/user-manual.html + https://www.kernel.org/pub/software/scm/git/docs/user-manual.html and on various tutorials found on the web. @@ -55,7 +55,7 @@ server with git-daemon is relatively straightforward if you have a system which is accessible to the Internet. Otherwise, free, public hosting sites (Github, for example) are starting to appear on the net. Established developers can get an account on kernel.org, but those are not easy to come -by; see http://kernel.org/faq/ for more information. +by; see https://kernel.org/faq/ for more information. The normal git workflow involves the use of a lot of branches. Each line of development can be separated into a separate "topic branch" and @@ -125,7 +125,7 @@ can affect your ability to get trees pulled in the future. Quoting Linus: to trust things *without* then having to go and check every individual change by hand. -(http://lwn.net/Articles/224135/). +(https://lwn.net/Articles/224135/). To avoid this kind of situation, ensure that all patches within a given branch stick closely to the associated topic; a "driver fixes" branch diff --git a/Documentation/process/8.Conclusion.rst b/Documentation/process/8.Conclusion.rst index 8395aa2c1f3ae..b32a402158581 100644 --- a/Documentation/process/8.Conclusion.rst +++ b/Documentation/process/8.Conclusion.rst @@ -16,24 +16,24 @@ distributions runs into internal limits and fails to process the documents properly). Various web sites discuss kernel development at all levels of detail. Your -author would like to humbly suggest http://lwn.net/ as a source; +author would like to humbly suggest https://lwn.net/ as a source; information on many specific kernel topics can be found via the LWN kernel index at: - http://lwn.net/Kernel/Index/ + https://lwn.net/Kernel/Index/ Beyond that, a valuable resource for kernel developers is: - http://kernelnewbies.org/ + https://kernelnewbies.org/ -And, of course, one should not forget http://kernel.org/, the definitive +And, of course, one should not forget https://kernel.org/, the definitive location for kernel release information. There are a number of books on kernel development: Linux Device Drivers, 3rd Edition (Jonathan Corbet, Alessandro Rubini, and Greg Kroah-Hartman). Online at - http://lwn.net/Kernel/LDD3/. + https://lwn.net/Kernel/LDD3/. Linux Kernel Development (Robert Love). @@ -46,9 +46,9 @@ information to be found there. Documentation for git can be found at: - http://www.kernel.org/pub/software/scm/git/docs/ + https://www.kernel.org/pub/software/scm/git/docs/ - http://www.kernel.org/pub/software/scm/git/docs/user-manual.html + https://www.kernel.org/pub/software/scm/git/docs/user-manual.html Conclusion diff --git a/Documentation/process/adding-syscalls.rst b/Documentation/process/adding-syscalls.rst index a6b4a3a5bf3fd..a3ecb236576ca 100644 --- a/Documentation/process/adding-syscalls.rst +++ b/Documentation/process/adding-syscalls.rst @@ -541,9 +541,9 @@ References and Sources :manpage:`syscall(2)` man-page: http://man7.org/linux/man-pages/man2/syscall.2.html#NOTES - Collated emails from Linus Torvalds discussing the problems with ``ioctl()``: - http://yarchive.net/comp/linux/ioctl.html + https://yarchive.net/comp/linux/ioctl.html - "How to not invent kernel interfaces", Arnd Bergmann, - http://www.ukuug.org/events/linux2007/2007/papers/Bergmann.pdf + https://www.ukuug.org/events/linux2007/2007/papers/Bergmann.pdf - LWN article from Michael Kerrisk on avoiding new uses of CAP_SYS_ADMIN: https://lwn.net/Articles/486306/ - Recommendation from Andrew Morton that all related information for a new diff --git a/Documentation/process/applying-patches.rst b/Documentation/process/applying-patches.rst index fbb9297e6360d..2e7017bef4b81 100644 --- a/Documentation/process/applying-patches.rst +++ b/Documentation/process/applying-patches.rst @@ -229,7 +229,7 @@ Although interdiff may save you a step or two you are generally advised to do the additional steps since interdiff can get things wrong in some cases. Another alternative is ``ketchup``, which is a python script for automatic -downloading and applying of patches (http://www.selenic.com/ketchup/). +downloading and applying of patches (https://www.selenic.com/ketchup/). Other nice tools are diffstat, which shows a summary of changes made by a patch; lsdiff, which displays a short listing of affected files in a patch @@ -241,7 +241,7 @@ the patch contains a given regular expression. Where can I download the patches? ================================= -The patches are available at http://kernel.org/ +The patches are available at https://kernel.org/ Most recent patches are linked from the front page, but they also have specific homes. diff --git a/Documentation/process/volatile-considered-harmful.rst b/Documentation/process/volatile-considered-harmful.rst index 4934e656a6f35..7eb6bd7c92146 100644 --- a/Documentation/process/volatile-considered-harmful.rst +++ b/Documentation/process/volatile-considered-harmful.rst @@ -109,9 +109,9 @@ been properly thought through. References ========== -[1] http://lwn.net/Articles/233481/ +[1] https://lwn.net/Articles/233481/ -[2] http://lwn.net/Articles/233482/ +[2] https://lwn.net/Articles/233482/ Credits ======= diff --git a/Documentation/security/SCTP.rst b/Documentation/security/SCTP.rst index d903eb97fcf32..0bcf6c1245ee9 100644 --- a/Documentation/security/SCTP.rst +++ b/Documentation/security/SCTP.rst @@ -328,7 +328,7 @@ NOTES: label (see **netlabel-config**\(8) helper script for details). 5) The NetLabel SCTP peer labeling rules apply as discussed in the following - set of posts tagged "netlabel" at: http://www.paul-moore.com/blog/t. + set of posts tagged "netlabel" at: https://www.paul-moore.com/blog/t. 6) CIPSO is only supported for IPv4 addressing: ``socket(AF_INET, ...)`` CALIPSO is only supported for IPv6 addressing: ``socket(AF_INET6, ...)`` diff --git a/Documentation/sphinx/kfigure.py b/Documentation/sphinx/kfigure.py index fbfe6693bb60a..788704886eec9 100644 --- a/Documentation/sphinx/kfigure.py +++ b/Documentation/sphinx/kfigure.py @@ -29,7 +29,7 @@ u""" Used tools: - * ``dot(1)``: Graphviz (http://www.graphviz.org). If Graphviz is not + * ``dot(1)``: Graphviz (https://www.graphviz.org). If Graphviz is not available, the DOT language is inserted as literal-block. * SVG to PDF: To generate PDF, you need at least one of this tools: @@ -41,7 +41,7 @@ u""" * generate PDF from SVG / used by PDF (LaTeX) builder * generate SVG (html-builder) and PDF (latex-builder) from DOT files. - DOT: see http://www.graphviz.org/content/dot-language + DOT: see https://www.graphviz.org/content/dot-language """ @@ -182,7 +182,7 @@ def setupTools(app): kernellog.verbose(app, "use dot(1) from: " + dot_cmd) else: kernellog.warn(app, "dot(1) not found, for better output quality install " - "graphviz from http://www.graphviz.org") + "graphviz from https://www.graphviz.org") if convert_cmd: kernellog.verbose(app, "use convert(1) from: " + convert_cmd) else: diff --git a/Documentation/static-keys.txt b/Documentation/static-keys.txt index 9803e14639bfc..38290b9f25eb2 100644 --- a/Documentation/static-keys.txt +++ b/Documentation/static-keys.txt @@ -71,7 +71,7 @@ Solution gcc (v4.5) adds a new 'asm goto' statement that allows branching to a label: -http://gcc.gnu.org/ml/gcc-patches/2009-07/msg01556.html +https://gcc.gnu.org/ml/gcc-patches/2009-07/msg01556.html Using the 'asm goto', we can create branches that are either taken or not taken by default, without the need to check memory. Then, at run-time, we can patch diff --git a/Documentation/trace/events-msr.rst b/Documentation/trace/events-msr.rst index e938aa0b6f4ff..810481e530b6c 100644 --- a/Documentation/trace/events-msr.rst +++ b/Documentation/trace/events-msr.rst @@ -4,7 +4,7 @@ MSR Trace Events The x86 kernel supports tracing most MSR (Model Specific Register) accesses. To see the definition of the MSRs on Intel systems please see the SDM -at http://www.intel.com/sdm (Volume 3) +at https://www.intel.com/sdm (Volume 3) Available trace points: diff --git a/Documentation/trace/mmiotrace.rst b/Documentation/trace/mmiotrace.rst index 5116e8ca27b45..fed13eaead891 100644 --- a/Documentation/trace/mmiotrace.rst +++ b/Documentation/trace/mmiotrace.rst @@ -5,7 +5,7 @@ In-kernel memory-mapped I/O tracing Home page and links to optional user space tools: - http://nouveau.freedesktop.org/wiki/MmioTrace + https://nouveau.freedesktop.org/wiki/MmioTrace MMIO tracing was originally developed by Intel around 2003 for their Fault Injection Test Harness. In Dec 2006 - Jan 2007, using the code from Intel, diff --git a/Documentation/vm/ksm.rst b/Documentation/vm/ksm.rst index d32016d9be2cb..d1b7270ad55c9 100644 --- a/Documentation/vm/ksm.rst +++ b/Documentation/vm/ksm.rst @@ -6,7 +6,7 @@ Kernel Samepage Merging KSM is a memory-saving de-duplication feature, enabled by CONFIG_KSM=y, added to the Linux kernel in 2.6.32. See ``mm/ksm.c`` for its implementation, -and http://lwn.net/Articles/306704/ and http://lwn.net/Articles/330589/ +and http://lwn.net/Articles/306704/ and https://lwn.net/Articles/330589/ The userspace interface of KSM is described in :ref:`Documentation/admin-guide/mm/ksm.rst ` diff --git a/Documentation/xz.txt b/Documentation/xz.txt index b2220d03aa509..b2f5ff12a161f 100644 --- a/Documentation/xz.txt +++ b/Documentation/xz.txt @@ -14,13 +14,13 @@ improve compression ratio of executable data. The XZ decompressor in Linux is called XZ Embedded. It supports the LZMA2 filter and optionally also BCJ filters. CRC32 is supported for integrity checking. The home page of XZ Embedded is at -, where you can find the +, where you can find the latest version and also information about using the code outside the Linux kernel. For userspace, XZ Utils provide a zlib-like compression library and a gzip-like command line tool. XZ Utils can be downloaded from -. +. XZ related components in the kernel =================================== @@ -113,7 +113,7 @@ Reporting bugs ============== Before reporting a bug, please check that it's not fixed already -at upstream. See to get the +at upstream. See to get the latest code. Report bugs to or visit #tukaani on diff --git a/scripts/kernel-doc b/scripts/kernel-doc index f68d76dd97bad..b4c963f8364ed 100755 --- a/scripts/kernel-doc +++ b/scripts/kernel-doc @@ -321,7 +321,7 @@ if (defined($ENV{'KBUILD_VERBOSE'})) { # Generated docbook code is inserted in a template at a point where # docbook v3.1 requires a non-zero sequence of RefEntry's; see: -# http://www.oasis-open.org/docbook/documentation/reference/html/refentry.html +# https://www.oasis-open.org/docbook/documentation/reference/html/refentry.html # We keep track of number of generated entries and generate a dummy # if needs be to ensure the expanded template can be postprocessed # into html. -- GitLab From 3699158d3fe31a7739356ce4ad6934d6ede445c7 Mon Sep 17 00:00:00 2001 From: Dejin Zheng Date: Wed, 27 May 2020 22:45:31 +0800 Subject: [PATCH 1227/1971] Documentation: devres: add missing entry for devm_platform_get_and_ioremap_resource() The devm_platform_get_and_ioremap_resource() should be documented in devres.rst. Add the missing entry. Signed-off-by: Dejin Zheng Link: https://lore.kernel.org/r/20200527144531.9376-1-zhengdejin5@gmail.com Signed-off-by: Jonathan Corbet --- Documentation/driver-api/driver-model/devres.rst | 1 + 1 file changed, 1 insertion(+) diff --git a/Documentation/driver-api/driver-model/devres.rst b/Documentation/driver-api/driver-model/devres.rst index 46c13780994c4..cff593e467357 100644 --- a/Documentation/driver-api/driver-model/devres.rst +++ b/Documentation/driver-api/driver-model/devres.rst @@ -322,6 +322,7 @@ IOMAP devm_platform_ioremap_resource() : calls devm_ioremap_resource() for platform device devm_platform_ioremap_resource_wc() devm_platform_ioremap_resource_byname() + devm_platform_get_and_ioremap_resource() devm_iounmap() pcim_iomap() pcim_iomap_regions() : do request_region() and iomap() on multiple BARs -- GitLab From efe68c1ca8f49e8c06afd74b699411bfbb8ba1ff Mon Sep 17 00:00:00 2001 From: Bijan Mottahedeh Date: Thu, 4 Jun 2020 18:01:52 -0700 Subject: [PATCH 1228/1971] io_uring: validate the full range of provided buffers for access Account for the number of provided buffers when validating the address range. Signed-off-by: Bijan Mottahedeh Signed-off-by: Jens Axboe --- fs/io_uring.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fs/io_uring.c b/fs/io_uring.c index 70f0f2f940fbd..5431b182b6b00 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -3183,7 +3183,7 @@ static int io_provide_buffers_prep(struct io_kiocb *req, p->addr = READ_ONCE(sqe->addr); p->len = READ_ONCE(sqe->len); - if (!access_ok(u64_to_user_ptr(p->addr), p->len)) + if (!access_ok(u64_to_user_ptr(p->addr), (p->len * p->nbufs))) return -EFAULT; p->bgid = READ_ONCE(sqe->buf_group); -- GitLab From a8c73c1a614f6da6c0b04c393f87447e28cb6de4 Mon Sep 17 00:00:00 2001 From: Denis Efremov Date: Fri, 5 Jun 2020 12:32:03 +0300 Subject: [PATCH 1229/1971] io_uring: use kvfree() in io_sqe_buffer_register() Use kvfree() to free the pages and vmas, since they are allocated by kvmalloc_array() in a loop. Fixes: d4ef647510b1 ("io_uring: avoid page allocation warnings") Signed-off-by: Denis Efremov Signed-off-by: Jens Axboe Cc: stable@vger.kernel.org Link: https://lore.kernel.org/r/20200605093203.40087-1-efremov@linux.com --- fs/io_uring.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/fs/io_uring.c b/fs/io_uring.c index 5431b182b6b00..5e36e78e766e7 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -7171,8 +7171,8 @@ static int io_sqe_buffer_register(struct io_ring_ctx *ctx, void __user *arg, ret = 0; if (!pages || nr_pages > got_pages) { - kfree(vmas); - kfree(pages); + kvfree(vmas); + kvfree(pages); pages = kvmalloc_array(nr_pages, sizeof(struct page *), GFP_KERNEL); vmas = kvmalloc_array(nr_pages, -- GitLab From 146f5cdeda153a1c7866cd240d2f464543d368c0 Mon Sep 17 00:00:00 2001 From: SeongJae Park Date: Sat, 6 Jun 2020 10:37:46 +0200 Subject: [PATCH 1230/1971] docs/memory-barriers.txt/kokr: smp_mb__{before,after}_atomic(): update Documentation Translate this commit to Korean: 39323c64b8a9 ("smp_mb__{before,after}_atomic(): update Documentation") Signed-off-by: SeongJae Park Reviewed-by: Yunjae Lee Link: https://lore.kernel.org/r/20200606083746.20869-1-sjpark@amazon.de Signed-off-by: Jonathan Corbet --- .../translations/ko_KR/memory-barriers.txt | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/Documentation/translations/ko_KR/memory-barriers.txt b/Documentation/translations/ko_KR/memory-barriers.txt index e50fe65413350..34d041d68f783 100644 --- a/Documentation/translations/ko_KR/memory-barriers.txt +++ b/Documentation/translations/ko_KR/memory-barriers.txt @@ -1842,12 +1842,15 @@ Mandatory 배리어들은 SMP 시스템에서도 UP 시스템에서도 SMP 효 (*) smp_mb__before_atomic(); (*) smp_mb__after_atomic(); - 이것들은 값을 리턴하지 않는 (더하기, 빼기, 증가, 감소와 같은) 어토믹 - 함수들을 위한, 특히 그것들이 레퍼런스 카운팅에 사용될 때를 위한 - 함수들입니다. 이 함수들은 메모리 배리어를 내포하고 있지는 않습니다. - - 이것들은 값을 리턴하지 않으며 어토믹한 (set_bit 과 clear_bit 같은) 비트 - 연산에도 사용될 수 있습니다. + 이것들은 메모리 배리어를 내포하지 않는 어토믹 RMW 함수를 사용하지만 코드에 + 메모리 배리어가 필요한 경우를 위한 것들입니다. 메모리 배리어를 내포하지 + 않는 어토믹 RMW 함수들의 예로는 더하기, 빼기, (실패한) 조건적 + 오퍼레이션들, _relaxed 함수들이 있으며, atomic_read 나 atomic_set 은 이에 + 해당되지 않습니다. 메모리 배리어가 필요해지는 흔한 예로는 어토믹 + 오퍼레이션을 사용해 레퍼런스 카운트를 수정하는 경우를 들 수 있습니다. + + 이것들은 또한 (set_bit 과 clear_bit 같은) 메모리 배리어를 내포하지 않는 + 어토믹 RMW bitop 함수들을 위해서도 사용될 수 있습니다. 한 예로, 객체 하나를 무효한 것으로 표시하고 그 객체의 레퍼런스 카운트를 감소시키는 다음 코드를 보세요: -- GitLab From 88d8822d30cc6f668d44a1a7466bed5a1f85f607 Mon Sep 17 00:00:00 2001 From: Takashi Iwai Date: Fri, 5 Jun 2020 08:41:17 +0200 Subject: [PATCH 1231/1971] ALSA: usb-audio: Manage auto-pm of all bundled interfaces Currently USB-audio driver manages the auto-pm of the primary interface although a card may consist of multiple interfaces. This may leave the secondary and other interfaces left running unnecessarily after the auto-suspend. This patch allows the driver managing the auto-pm of all bundled interfaces per card. The chip->pm_intf field is extended as chip->intf[] to contain the array of assigned interfaces, and the runtime-PM is performed to all those interfaces. Tested-by: Macpaul Lin Link: https://lore.kernel.org/r/20200605064117.28504-1-tiwai@suse.de Signed-off-by: Takashi Iwai --- sound/usb/card.c | 35 ++++++++++++++++++++++++++++++----- sound/usb/usbaudio.h | 4 +++- 2 files changed, 33 insertions(+), 6 deletions(-) diff --git a/sound/usb/card.c b/sound/usb/card.c index 359f7a04be1ce..162bdd6eb4d4b 100644 --- a/sound/usb/card.c +++ b/sound/usb/card.c @@ -634,7 +634,6 @@ static int usb_audio_probe(struct usb_interface *intf, id, &chip); if (err < 0) goto __error; - chip->pm_intf = intf; break; } else if (vid[i] != -1 || pid[i] != -1) { dev_info(&dev->dev, @@ -651,6 +650,13 @@ static int usb_audio_probe(struct usb_interface *intf, goto __error; } } + + if (chip->num_interfaces >= MAX_CARD_INTERFACES) { + dev_info(&dev->dev, "Too many interfaces assigned to the single USB-audio card\n"); + err = -EINVAL; + goto __error; + } + dev_set_drvdata(&dev->dev, chip); /* @@ -703,6 +709,7 @@ static int usb_audio_probe(struct usb_interface *intf, } usb_chip[chip->index] = chip; + chip->intf[chip->num_interfaces] = intf; chip->num_interfaces++; usb_set_intfdata(intf, chip); atomic_dec(&chip->active); @@ -818,19 +825,37 @@ void snd_usb_unlock_shutdown(struct snd_usb_audio *chip) int snd_usb_autoresume(struct snd_usb_audio *chip) { + int i, err; + if (atomic_read(&chip->shutdown)) return -EIO; - if (atomic_inc_return(&chip->active) == 1) - return usb_autopm_get_interface(chip->pm_intf); + if (atomic_inc_return(&chip->active) != 1) + return 0; + + for (i = 0; i < chip->num_interfaces; i++) { + err = usb_autopm_get_interface(chip->intf[i]); + if (err < 0) { + /* rollback */ + while (--i >= 0) + usb_autopm_put_interface(chip->intf[i]); + atomic_dec(&chip->active); + return err; + } + } return 0; } void snd_usb_autosuspend(struct snd_usb_audio *chip) { + int i; + if (atomic_read(&chip->shutdown)) return; - if (atomic_dec_and_test(&chip->active)) - usb_autopm_put_interface(chip->pm_intf); + if (!atomic_dec_and_test(&chip->active)) + return; + + for (i = 0; i < chip->num_interfaces; i++) + usb_autopm_put_interface(chip->intf[i]); } static int usb_audio_suspend(struct usb_interface *intf, pm_message_t message) diff --git a/sound/usb/usbaudio.h b/sound/usb/usbaudio.h index e0ebfb25fbd51..b91c4c0807eca 100644 --- a/sound/usb/usbaudio.h +++ b/sound/usb/usbaudio.h @@ -19,11 +19,13 @@ struct media_device; struct media_intf_devnode; +#define MAX_CARD_INTERFACES 16 + struct snd_usb_audio { int index; struct usb_device *dev; struct snd_card *card; - struct usb_interface *pm_intf; + struct usb_interface *intf[MAX_CARD_INTERFACES]; u32 usb_id; struct mutex mutex; unsigned int system_suspend; -- GitLab From 951e2736f4b11b58dc44d41964fa17c3527d882a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Micha=C5=82=20Miros=C5=82aw?= Date: Mon, 8 Jun 2020 18:50:39 +0200 Subject: [PATCH 1232/1971] ALSA: pcm: disallow linking stream to itself MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Prevent SNDRV_PCM_IOCTL_LINK linking stream to itself - the code can't handle it. Fixed commit is not where bug was introduced, but changes the context significantly. Cc: stable@vger.kernel.org Fixes: 0888c321de70 ("pcm_native: switch to fdget()/fdput()") Signed-off-by: Michał Mirosław Link: https://lore.kernel.org/r/89c4a2487609a0ed6af3ecf01cc972bdc59a7a2d.1591634956.git.mirq-linux@rere.qmqm.pl Signed-off-by: Takashi Iwai --- sound/core/pcm_native.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c index c08732998a425..eeab8850ed76c 100644 --- a/sound/core/pcm_native.c +++ b/sound/core/pcm_native.c @@ -2176,6 +2176,12 @@ static int snd_pcm_link(struct snd_pcm_substream *substream, int fd) } pcm_file = f.file->private_data; substream1 = pcm_file->substream; + + if (substream == substream1) { + res = -EINVAL; + goto _badf; + } + group = kzalloc(sizeof(*group), GFP_KERNEL); if (!group) { res = -ENOMEM; -- GitLab From 33d077996a87175b155fe88030e8fec7ca76327e Mon Sep 17 00:00:00 2001 From: Stefano Brivio Date: Wed, 3 Jun 2020 01:50:11 +0200 Subject: [PATCH 1233/1971] netfilter: nft_set_rbtree: Don't account for expired elements on insertion While checking the validity of insertion in __nft_rbtree_insert(), we currently ignore conflicting elements and intervals only if they are not active within the next generation. However, if we consider expired elements and intervals as potentially conflicting and overlapping, we'll return error for entries that should be added instead. This is particularly visible with garbage collection intervals that are comparable with the element timeout itself, as reported by Mike Dillinger. Other than the simple issue of denying insertion of valid entries, this might also result in insertion of a single element (opening or closing) out of a given interval. With single entries (that are inserted as intervals of size 1), this leads in turn to the creation of new intervals. For example: # nft add element t s { 192.0.2.1 } # nft list ruleset [...] elements = { 192.0.2.1-255.255.255.255 } Always ignore expired elements active in the next generation, while checking for conflicts. It might be more convenient to introduce a new macro that covers both inactive and expired items, as this type of check also appears quite frequently in other set back-ends. This is however beyond the scope of this fix and can be deferred to a separate patch. Other than the overlap detection cases introduced by commit 7c84d41416d8 ("netfilter: nft_set_rbtree: Detect partial overlaps on insertion"), we also have to cover the original conflict check dealing with conflicts between two intervals of size 1, which was introduced before support for timeout was introduced. This won't return an error to the user as -EEXIST is masked by nft if NLM_F_EXCL is not given, but would result in a silent failure adding the entry. Reported-by: Mike Dillinger Cc: # 5.6.x Fixes: 8d8540c4f5e0 ("netfilter: nft_set_rbtree: add timeout support") Fixes: 7c84d41416d8 ("netfilter: nft_set_rbtree: Detect partial overlaps on insertion") Signed-off-by: Stefano Brivio Acked-by: Phil Sutter Signed-off-by: Pablo Neira Ayuso --- net/netfilter/nft_set_rbtree.c | 21 ++++++++++++++------- 1 file changed, 14 insertions(+), 7 deletions(-) diff --git a/net/netfilter/nft_set_rbtree.c b/net/netfilter/nft_set_rbtree.c index 62f416bc05796..b6aad3fc46c35 100644 --- a/net/netfilter/nft_set_rbtree.c +++ b/net/netfilter/nft_set_rbtree.c @@ -271,12 +271,14 @@ static int __nft_rbtree_insert(const struct net *net, const struct nft_set *set, if (nft_rbtree_interval_start(new)) { if (nft_rbtree_interval_end(rbe) && - nft_set_elem_active(&rbe->ext, genmask)) + nft_set_elem_active(&rbe->ext, genmask) && + !nft_set_elem_expired(&rbe->ext)) overlap = false; } else { overlap = nft_rbtree_interval_end(rbe) && nft_set_elem_active(&rbe->ext, - genmask); + genmask) && + !nft_set_elem_expired(&rbe->ext); } } else if (d > 0) { p = &parent->rb_right; @@ -284,9 +286,11 @@ static int __nft_rbtree_insert(const struct net *net, const struct nft_set *set, if (nft_rbtree_interval_end(new)) { overlap = nft_rbtree_interval_end(rbe) && nft_set_elem_active(&rbe->ext, - genmask); + genmask) && + !nft_set_elem_expired(&rbe->ext); } else if (nft_rbtree_interval_end(rbe) && - nft_set_elem_active(&rbe->ext, genmask)) { + nft_set_elem_active(&rbe->ext, genmask) && + !nft_set_elem_expired(&rbe->ext)) { overlap = true; } } else { @@ -294,15 +298,18 @@ static int __nft_rbtree_insert(const struct net *net, const struct nft_set *set, nft_rbtree_interval_start(new)) { p = &parent->rb_left; - if (nft_set_elem_active(&rbe->ext, genmask)) + if (nft_set_elem_active(&rbe->ext, genmask) && + !nft_set_elem_expired(&rbe->ext)) overlap = false; } else if (nft_rbtree_interval_start(rbe) && nft_rbtree_interval_end(new)) { p = &parent->rb_right; - if (nft_set_elem_active(&rbe->ext, genmask)) + if (nft_set_elem_active(&rbe->ext, genmask) && + !nft_set_elem_expired(&rbe->ext)) overlap = false; - } else if (nft_set_elem_active(&rbe->ext, genmask)) { + } else if (nft_set_elem_active(&rbe->ext, genmask) && + !nft_set_elem_expired(&rbe->ext)) { *ext = &rbe->ext; return -EEXIST; } else { -- GitLab From f8d8b46cd20e3a262c17ba1061640d9c190ad769 Mon Sep 17 00:00:00 2001 From: Masahiro Yamada Date: Tue, 5 May 2020 19:03:19 +0900 Subject: [PATCH 1234/1971] scripts/dtc: use pkg-config to include in non-standard path Commit 067c650c456e ("dtc: Use pkg-config to locate libyaml") added 'pkg-config --libs' to link libyaml installed in a non-standard location. yamltree.c includes , but that commit did not add the search path for . If /usr/include/yaml.h does not exist, it fails to build. A user can explicitly pass HOSTCFLAGS to work around it, but the policy is not consistent. There are two ways to deal with libraries in a non-default location. [1] Use HOSTCFLAGS and HOSTLDFLAGS for additional search paths for headers and libraries. They are documented in Documentation/kbuild/kbuild.rst $ make HOSTCFLAGS='-I /include' HOSTLDFLAGS='-L /lib' [2] Use pkg-config 'pkg-config --cflags' for querying the header search path 'pkg-config --libs' for querying the lib and its path If we go with pkg-config, use [2] consistently. Do not mix up [1] and [2]. Signed-off-by: Masahiro Yamada Signed-off-by: Rob Herring --- scripts/dtc/Makefile | 3 +++ 1 file changed, 3 insertions(+) diff --git a/scripts/dtc/Makefile b/scripts/dtc/Makefile index ef85f8b7d4a76..0b44917f981c7 100644 --- a/scripts/dtc/Makefile +++ b/scripts/dtc/Makefile @@ -20,6 +20,9 @@ endif HOST_EXTRACFLAGS += -DNO_YAML else dtc-objs += yamltree.o +# To include installed in a non-default path +HOSTCFLAGS_yamltree.o := $(shell pkg-config --cflags yaml-0.1) +# To link libyaml installed in a non-default path HOSTLDLIBS_dtc := $(shell pkg-config yaml-0.1 --libs) endif -- GitLab From 3af73b286ccee493dc055fc58da02b2dc7a5304d Mon Sep 17 00:00:00 2001 From: Pavel Begunkov Date: Mon, 8 Jun 2020 21:08:17 +0300 Subject: [PATCH 1235/1971] io_uring: don't derive close state from ->func Relying on having a specific work.func is dangerous, even if an opcode handler set it itself. E.g. io_wq_assign_next() can modify it. io_close() sets a custom work.func to indicate that __close_fd_get_file() was already called. Fortunately, there is no bugs with io_wq_assign_next() and close yet. Still, do it safe and always be prepared to be called through io_wq_submit_work(). Zero req->close.put_file in prep, and call __close_fd_get_file() IFF it's NULL. Signed-off-by: Pavel Begunkov Signed-off-by: Jens Axboe --- fs/io_uring.c | 50 +++++++++++++++++--------------------------------- 1 file changed, 17 insertions(+), 33 deletions(-) diff --git a/fs/io_uring.c b/fs/io_uring.c index 5e36e78e766e7..43721f046f031 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -3437,53 +3437,37 @@ static int io_close_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) req->close.fd == req->ctx->ring_fd) return -EBADF; + req->close.put_file = NULL; return 0; } -/* only called when __close_fd_get_file() is done */ -static void __io_close_finish(struct io_kiocb *req) -{ - int ret; - - ret = filp_close(req->close.put_file, req->work.files); - if (ret < 0) - req_set_fail_links(req); - io_cqring_add_event(req, ret); - fput(req->close.put_file); - io_put_req(req); -} - -static void io_close_finish(struct io_wq_work **workptr) -{ - struct io_kiocb *req = container_of(*workptr, struct io_kiocb, work); - - /* not cancellable, don't do io_req_cancelled() */ - __io_close_finish(req); - io_steal_work(req, workptr); -} - static int io_close(struct io_kiocb *req, bool force_nonblock) { + struct io_close *close = &req->close; int ret; - req->close.put_file = NULL; - ret = __close_fd_get_file(req->close.fd, &req->close.put_file); - if (ret < 0) - return (ret == -ENOENT) ? -EBADF : ret; + /* might be already done during nonblock submission */ + if (!close->put_file) { + ret = __close_fd_get_file(close->fd, &close->put_file); + if (ret < 0) + return (ret == -ENOENT) ? -EBADF : ret; + } /* if the file has a flush method, be safe and punt to async */ - if (req->close.put_file->f_op->flush && force_nonblock) { + if (close->put_file->f_op->flush && force_nonblock) { /* avoid grabbing files - we don't need the files */ req->flags |= REQ_F_NO_FILE_TABLE | REQ_F_MUST_PUNT; - req->work.func = io_close_finish; return -EAGAIN; } - /* - * No ->flush(), safely close from here and just punt the - * fput() to async context. - */ - __io_close_finish(req); + /* No ->flush() or already async, safely close from here */ + ret = filp_close(close->put_file, req->work.files); + if (ret < 0) + req_set_fail_links(req); + io_cqring_add_event(req, ret); + fput(close->put_file); + close->put_file = NULL; + io_put_req(req); return 0; } -- GitLab From ac45abc0e2a8ed16ecc0eea039fe762ddfefbcad Mon Sep 17 00:00:00 2001 From: Pavel Begunkov Date: Mon, 8 Jun 2020 21:08:18 +0300 Subject: [PATCH 1236/1971] io_uring: remove custom ->func handlers In preparation of getting rid of work.func, this removes almost all custom instances of it, leaving only io_wq_submit_work() and io_link_work_cb(). And the last one will be dealt later. Nothing fancy, just routinely remove *_finish() function and inline what's left. E.g. remove io_fsync_finish() + inline __io_fsync() into io_fsync(). As no users of io_req_cancelled() are left, delete it as well. The patch adds extra switch lookup on cold-ish path, but that's overweighted by nice diffstat and other benefits of the following patches. Signed-off-by: Pavel Begunkov Signed-off-by: Jens Axboe --- fs/io_uring.c | 139 ++++++++++---------------------------------------- 1 file changed, 27 insertions(+), 112 deletions(-) diff --git a/fs/io_uring.c b/fs/io_uring.c index 43721f046f031..42a90e8831bf7 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -2898,23 +2898,15 @@ static int io_prep_fsync(struct io_kiocb *req, const struct io_uring_sqe *sqe) return 0; } -static bool io_req_cancelled(struct io_kiocb *req) -{ - if (req->work.flags & IO_WQ_WORK_CANCEL) { - req_set_fail_links(req); - io_cqring_add_event(req, -ECANCELED); - io_put_req(req); - return true; - } - - return false; -} - -static void __io_fsync(struct io_kiocb *req) +static int io_fsync(struct io_kiocb *req, bool force_nonblock) { loff_t end = req->sync.off + req->sync.len; int ret; + /* fsync always requires a blocking context */ + if (force_nonblock) + return -EAGAIN; + ret = vfs_fsync_range(req->file, req->sync.off, end > 0 ? end : LLONG_MAX, req->sync.flags & IORING_FSYNC_DATASYNC); @@ -2922,53 +2914,9 @@ static void __io_fsync(struct io_kiocb *req) req_set_fail_links(req); io_cqring_add_event(req, ret); io_put_req(req); -} - -static void io_fsync_finish(struct io_wq_work **workptr) -{ - struct io_kiocb *req = container_of(*workptr, struct io_kiocb, work); - - if (io_req_cancelled(req)) - return; - __io_fsync(req); - io_steal_work(req, workptr); -} - -static int io_fsync(struct io_kiocb *req, bool force_nonblock) -{ - /* fsync always requires a blocking context */ - if (force_nonblock) { - req->work.func = io_fsync_finish; - return -EAGAIN; - } - __io_fsync(req); return 0; } -static void __io_fallocate(struct io_kiocb *req) -{ - int ret; - - current->signal->rlim[RLIMIT_FSIZE].rlim_cur = req->fsize; - ret = vfs_fallocate(req->file, req->sync.mode, req->sync.off, - req->sync.len); - current->signal->rlim[RLIMIT_FSIZE].rlim_cur = RLIM_INFINITY; - if (ret < 0) - req_set_fail_links(req); - io_cqring_add_event(req, ret); - io_put_req(req); -} - -static void io_fallocate_finish(struct io_wq_work **workptr) -{ - struct io_kiocb *req = container_of(*workptr, struct io_kiocb, work); - - if (io_req_cancelled(req)) - return; - __io_fallocate(req); - io_steal_work(req, workptr); -} - static int io_fallocate_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) { @@ -2986,13 +2934,20 @@ static int io_fallocate_prep(struct io_kiocb *req, static int io_fallocate(struct io_kiocb *req, bool force_nonblock) { + int ret; + /* fallocate always requiring blocking context */ - if (force_nonblock) { - req->work.func = io_fallocate_finish; + if (force_nonblock) return -EAGAIN; - } - __io_fallocate(req); + current->signal->rlim[RLIMIT_FSIZE].rlim_cur = req->fsize; + ret = vfs_fallocate(req->file, req->sync.mode, req->sync.off, + req->sync.len); + current->signal->rlim[RLIMIT_FSIZE].rlim_cur = RLIM_INFINITY; + if (ret < 0) + req_set_fail_links(req); + io_cqring_add_event(req, ret); + io_put_req(req); return 0; } @@ -3489,38 +3444,20 @@ static int io_prep_sfr(struct io_kiocb *req, const struct io_uring_sqe *sqe) return 0; } -static void __io_sync_file_range(struct io_kiocb *req) +static int io_sync_file_range(struct io_kiocb *req, bool force_nonblock) { int ret; + /* sync_file_range always requires a blocking context */ + if (force_nonblock) + return -EAGAIN; + ret = sync_file_range(req->file, req->sync.off, req->sync.len, req->sync.flags); if (ret < 0) req_set_fail_links(req); io_cqring_add_event(req, ret); io_put_req(req); -} - - -static void io_sync_file_range_finish(struct io_wq_work **workptr) -{ - struct io_kiocb *req = container_of(*workptr, struct io_kiocb, work); - - if (io_req_cancelled(req)) - return; - __io_sync_file_range(req); - io_steal_work(req, workptr); -} - -static int io_sync_file_range(struct io_kiocb *req, bool force_nonblock) -{ - /* sync_file_range always requires a blocking context */ - if (force_nonblock) { - req->work.func = io_sync_file_range_finish; - return -EAGAIN; - } - - __io_sync_file_range(req); return 0; } @@ -3942,49 +3879,27 @@ static int io_accept_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) return 0; } -static int __io_accept(struct io_kiocb *req, bool force_nonblock) +static int io_accept(struct io_kiocb *req, bool force_nonblock) { struct io_accept *accept = &req->accept; - unsigned file_flags; + unsigned int file_flags = force_nonblock ? O_NONBLOCK : 0; int ret; - file_flags = force_nonblock ? O_NONBLOCK : 0; ret = __sys_accept4_file(req->file, file_flags, accept->addr, accept->addr_len, accept->flags, accept->nofile); if (ret == -EAGAIN && force_nonblock) return -EAGAIN; - if (ret == -ERESTARTSYS) - ret = -EINTR; - if (ret < 0) + if (ret < 0) { + if (ret == -ERESTARTSYS) + ret = -EINTR; req_set_fail_links(req); + } io_cqring_add_event(req, ret); io_put_req(req); return 0; } -static void io_accept_finish(struct io_wq_work **workptr) -{ - struct io_kiocb *req = container_of(*workptr, struct io_kiocb, work); - - if (io_req_cancelled(req)) - return; - __io_accept(req, false); - io_steal_work(req, workptr); -} - -static int io_accept(struct io_kiocb *req, bool force_nonblock) -{ - int ret; - - ret = __io_accept(req, force_nonblock); - if (ret == -EAGAIN && force_nonblock) { - req->work.func = io_accept_finish; - return -EAGAIN; - } - return 0; -} - static int io_connect_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) { struct io_connect *conn = &req->connect; -- GitLab From d4c81f38522f3e7f4be1b472ef9988d0ed7f3696 Mon Sep 17 00:00:00 2001 From: Pavel Begunkov Date: Mon, 8 Jun 2020 21:08:19 +0300 Subject: [PATCH 1237/1971] io_uring: don't arm a timeout through work.func Remove io_link_work_cb() -- the last custom work.func. Not the prettiest thing, but works. Instead of queueing a linked timeout in io_link_work_cb() mark a request with REQ_F_QUEUE_TIMEOUT and do enqueueing based on the flag in io_wq_submit_work(). Signed-off-by: Pavel Begunkov Signed-off-by: Jens Axboe --- fs/io_uring.c | 29 ++++++++++++++++++----------- 1 file changed, 18 insertions(+), 11 deletions(-) diff --git a/fs/io_uring.c b/fs/io_uring.c index 42a90e8831bf7..35d96d2a4c8cb 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -541,6 +541,7 @@ enum { REQ_F_POLLED_BIT, REQ_F_BUFFER_SELECTED_BIT, REQ_F_NO_FILE_TABLE_BIT, + REQ_F_QUEUE_TIMEOUT_BIT, /* not a real bit, just to check we're not overflowing the space */ __REQ_F_LAST_BIT, @@ -596,6 +597,8 @@ enum { REQ_F_BUFFER_SELECTED = BIT(REQ_F_BUFFER_SELECTED_BIT), /* doesn't need file table for this request */ REQ_F_NO_FILE_TABLE = BIT(REQ_F_NO_FILE_TABLE_BIT), + /* needs to queue linked timeout */ + REQ_F_QUEUE_TIMEOUT = BIT(REQ_F_QUEUE_TIMEOUT_BIT), }; struct async_poll { @@ -1580,16 +1583,6 @@ static void io_free_req(struct io_kiocb *req) io_queue_async_work(nxt); } -static void io_link_work_cb(struct io_wq_work **workptr) -{ - struct io_kiocb *req = container_of(*workptr, struct io_kiocb, work); - struct io_kiocb *link; - - link = list_first_entry(&req->link_list, struct io_kiocb, link_list); - io_queue_linked_timeout(link); - io_wq_submit_work(workptr); -} - static void io_wq_assign_next(struct io_wq_work **workptr, struct io_kiocb *nxt) { struct io_kiocb *link; @@ -1601,7 +1594,7 @@ static void io_wq_assign_next(struct io_wq_work **workptr, struct io_kiocb *nxt) *workptr = &nxt->work; link = io_prep_linked_timeout(nxt); if (link) - nxt->work.func = io_link_work_cb; + nxt->flags |= REQ_F_QUEUE_TIMEOUT; } /* @@ -5291,12 +5284,26 @@ static int io_issue_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe, return 0; } +static void io_arm_async_linked_timeout(struct io_kiocb *req) +{ + struct io_kiocb *link; + + /* link head's timeout is queued in io_queue_async_work() */ + if (!(req->flags & REQ_F_QUEUE_TIMEOUT)) + return; + + link = list_first_entry(&req->link_list, struct io_kiocb, link_list); + io_queue_linked_timeout(link); +} + static void io_wq_submit_work(struct io_wq_work **workptr) { struct io_wq_work *work = *workptr; struct io_kiocb *req = container_of(work, struct io_kiocb, work); int ret = 0; + io_arm_async_linked_timeout(req); + /* if NO_CANCEL is set, we must still run the work */ if ((work->flags & (IO_WQ_WORK_CANCEL|IO_WQ_WORK_NO_CANCEL)) == IO_WQ_WORK_CANCEL) { -- GitLab From f5fa38c59cb0b40633dee5cdf7465801be3e4928 Mon Sep 17 00:00:00 2001 From: Pavel Begunkov Date: Mon, 8 Jun 2020 21:08:20 +0300 Subject: [PATCH 1238/1971] io_wq: add per-wq work handler instead of per work io_uring is the only user of io-wq, and now it uses only io-wq callback for all its requests, namely io_wq_submit_work(). Instead of storing work->runner callback in each instance of io_wq_work, keep it in io-wq itself. pros: - reduces io_wq_work size - more robust -- ->func won't be invalidated with mem{cpy,set}(req) - helps other work Signed-off-by: Pavel Begunkov Signed-off-by: Jens Axboe --- fs/io-wq.c | 10 ++++++---- fs/io-wq.h | 7 ++++--- fs/io_uring.c | 3 ++- 3 files changed, 12 insertions(+), 8 deletions(-) diff --git a/fs/io-wq.c b/fs/io-wq.c index 4023c98468608..d7dc638f4b8e7 100644 --- a/fs/io-wq.c +++ b/fs/io-wq.c @@ -112,6 +112,7 @@ struct io_wq { unsigned long state; free_work_fn *free_work; + io_wq_work_fn *do_work; struct task_struct *manager; struct user_struct *user; @@ -528,7 +529,7 @@ get_next: hash = io_get_work_hash(work); linked = old_work = work; - linked->func(&linked); + wq->do_work(&linked); linked = (old_work == linked) ? NULL : linked; work = next_hashed; @@ -785,7 +786,7 @@ static void io_run_cancel(struct io_wq_work *work, struct io_wqe *wqe) struct io_wq_work *old_work = work; work->flags |= IO_WQ_WORK_CANCEL; - work->func(&work); + wq->do_work(&work); work = (work == old_work) ? NULL : work; wq->free_work(old_work); } while (work); @@ -1023,7 +1024,7 @@ struct io_wq *io_wq_create(unsigned bounded, struct io_wq_data *data) int ret = -ENOMEM, node; struct io_wq *wq; - if (WARN_ON_ONCE(!data->free_work)) + if (WARN_ON_ONCE(!data->free_work || !data->do_work)) return ERR_PTR(-EINVAL); wq = kzalloc(sizeof(*wq), GFP_KERNEL); @@ -1037,6 +1038,7 @@ struct io_wq *io_wq_create(unsigned bounded, struct io_wq_data *data) } wq->free_work = data->free_work; + wq->do_work = data->do_work; /* caller must already hold a reference to this */ wq->user = data->user; @@ -1093,7 +1095,7 @@ err: bool io_wq_get(struct io_wq *wq, struct io_wq_data *data) { - if (data->free_work != wq->free_work) + if (data->free_work != wq->free_work || data->do_work != wq->do_work) return false; return refcount_inc_not_zero(&wq->use_refs); diff --git a/fs/io-wq.h b/fs/io-wq.h index 5ba12de7572f0..2db24d31fbc56 100644 --- a/fs/io-wq.h +++ b/fs/io-wq.h @@ -85,7 +85,6 @@ static inline void wq_list_del(struct io_wq_work_list *list, struct io_wq_work { struct io_wq_work_node list; - void (*func)(struct io_wq_work **); struct files_struct *files; struct mm_struct *mm; const struct cred *creds; @@ -94,9 +93,9 @@ struct io_wq_work { pid_t task_pid; }; -#define INIT_IO_WORK(work, _func) \ +#define INIT_IO_WORK(work) \ do { \ - *(work) = (struct io_wq_work){ .func = _func }; \ + *(work) = (struct io_wq_work){}; \ } while (0) \ static inline struct io_wq_work *wq_next_work(struct io_wq_work *work) @@ -108,10 +107,12 @@ static inline struct io_wq_work *wq_next_work(struct io_wq_work *work) } typedef void (free_work_fn)(struct io_wq_work *); +typedef void (io_wq_work_fn)(struct io_wq_work **); struct io_wq_data { struct user_struct *user; + io_wq_work_fn *do_work; free_work_fn *free_work; }; diff --git a/fs/io_uring.c b/fs/io_uring.c index 35d96d2a4c8cb..3ffe03194c1ea 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -5776,7 +5776,7 @@ static int io_init_req(struct io_ring_ctx *ctx, struct io_kiocb *req, refcount_set(&req->refs, 2); req->task = NULL; req->result = 0; - INIT_IO_WORK(&req->work, io_wq_submit_work); + INIT_IO_WORK(&req->work); if (unlikely(req->opcode >= IORING_OP_LAST)) return -EINVAL; @@ -6796,6 +6796,7 @@ static int io_init_wq_offload(struct io_ring_ctx *ctx, data.user = ctx->user; data.free_work = io_free_work; + data.do_work = io_wq_submit_work; if (!(p->flags & IORING_SETUP_ATTACH_WQ)) { /* Do QD, or 4 * CPUS, whatever is smallest */ -- GitLab From de9be772c6794f27d1aa02d90d370b724cb67fd7 Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Mon, 8 Jun 2020 17:17:27 +0300 Subject: [PATCH 1239/1971] i2c: npcm7xx: Fix a couple of error codes in probe The code here is accidentally returning IS_ERR() which is 1 but it should be returning negative error codes with PTR_ERR(). Fixes: 56a1485b102e ("i2c: npcm7xx: Add Nuvoton NPCM I2C controller driver") Signed-off-by: Dan Carpenter Signed-off-by: Wolfram Sang --- drivers/i2c/busses/i2c-npcm7xx.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/i2c/busses/i2c-npcm7xx.c b/drivers/i2c/busses/i2c-npcm7xx.c index 68951a293aae3..3528d30ea6502 100644 --- a/drivers/i2c/busses/i2c-npcm7xx.c +++ b/drivers/i2c/busses/i2c-npcm7xx.c @@ -2239,12 +2239,12 @@ static int npcm_i2c_probe_bus(struct platform_device *pdev) gcr_regmap = syscon_regmap_lookup_by_compatible("nuvoton,npcm750-gcr"); if (IS_ERR(gcr_regmap)) - return IS_ERR(gcr_regmap); + return PTR_ERR(gcr_regmap); regmap_write(gcr_regmap, NPCM_I2CSEGCTL, NPCM_I2CSEGCTL_INIT_VAL); clk_regmap = syscon_regmap_lookup_by_compatible("nuvoton,npcm750-clk"); if (IS_ERR(clk_regmap)) - return IS_ERR(clk_regmap); + return PTR_ERR(clk_regmap); bus->reg = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(bus->reg)) -- GitLab From c3829285b2e6a0d5461078d7f6cbb2c2b4bf8c4e Mon Sep 17 00:00:00 2001 From: Stefano Brivio Date: Mon, 8 Jun 2020 10:50:29 +0200 Subject: [PATCH 1240/1971] netfilter: nft_set_pipapo: Disable preemption before getting per-CPU pointer The lkp kernel test robot reports, with CONFIG_DEBUG_PREEMPT enabled: [ 165.316525] BUG: using smp_processor_id() in preemptible [00000000] code: nft/6247 [ 165.319547] caller is nft_pipapo_insert+0x464/0x610 [nf_tables] [ 165.321846] CPU: 1 PID: 6247 Comm: nft Not tainted 5.6.0-rc5-01595-ge32a4dc6512ce3 #1 [ 165.332128] Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.12.0-1 04/01/2014 [ 165.334892] Call Trace: [ 165.336435] dump_stack+0x8f/0xcb [ 165.338128] debug_smp_processor_id+0xb2/0xc0 [ 165.340117] nft_pipapo_insert+0x464/0x610 [nf_tables] [ 165.342290] ? nft_trans_alloc_gfp+0x1c/0x60 [nf_tables] [ 165.344420] ? rcu_read_lock_sched_held+0x52/0x80 [ 165.346460] ? nft_trans_alloc_gfp+0x1c/0x60 [nf_tables] [ 165.348543] ? __mmu_interval_notifier_insert+0xa0/0xf0 [ 165.350629] nft_add_set_elem+0x5ff/0xa90 [nf_tables] [ 165.352699] ? __lock_acquire+0x241/0x1400 [ 165.354573] ? __lock_acquire+0x241/0x1400 [ 165.356399] ? reacquire_held_locks+0x12f/0x200 [ 165.358384] ? nf_tables_valid_genid+0x1f/0x40 [nf_tables] [ 165.360502] ? nla_strcmp+0x10/0x50 [ 165.362199] ? nft_table_lookup+0x4f/0xa0 [nf_tables] [ 165.364217] ? nla_strcmp+0x10/0x50 [ 165.365891] ? nf_tables_newsetelem+0xd5/0x150 [nf_tables] [ 165.367997] nf_tables_newsetelem+0xd5/0x150 [nf_tables] [ 165.370083] nfnetlink_rcv_batch+0x4fd/0x790 [nfnetlink] [ 165.372205] ? __lock_acquire+0x241/0x1400 [ 165.374058] ? __nla_validate_parse+0x57/0x8a0 [ 165.375989] ? cap_inode_getsecurity+0x230/0x230 [ 165.377954] ? security_capable+0x38/0x50 [ 165.379795] nfnetlink_rcv+0x11d/0x140 [nfnetlink] [ 165.381779] netlink_unicast+0x1b2/0x280 [ 165.383612] netlink_sendmsg+0x351/0x470 [ 165.385439] sock_sendmsg+0x5b/0x60 [ 165.387133] ____sys_sendmsg+0x200/0x280 [ 165.388871] ? copy_msghdr_from_user+0xd9/0x160 [ 165.390805] ___sys_sendmsg+0x88/0xd0 [ 165.392524] ? __might_fault+0x3e/0x90 [ 165.394273] ? sock_getsockopt+0x3d5/0xbb0 [ 165.396021] ? __handle_mm_fault+0x545/0x6a0 [ 165.397822] ? find_held_lock+0x2d/0x90 [ 165.399593] ? __sys_sendmsg+0x5e/0xa0 [ 165.401338] __sys_sendmsg+0x5e/0xa0 [ 165.402979] do_syscall_64+0x60/0x280 [ 165.404680] entry_SYSCALL_64_after_hwframe+0x49/0xbe [ 165.406621] RIP: 0033:0x7ff1fa46e783 [ 165.408299] Code: c7 c0 ff ff ff ff eb bb 66 2e 0f 1f 84 00 00 00 00 00 0f 1f 44 00 00 64 8b 04 25 18 00 00 00 85 c0 75 14 b8 2e 00 00 00 0f 05 <48> 3d 00 f0 ff ff 77 55 c3 0f 1f 40 00 48 83 ec 28 89 54 24 1c 48 [ 165.414163] RSP: 002b:00007ffedf59ea78 EFLAGS: 00000246 ORIG_RAX: 000000000000002e [ 165.416804] RAX: ffffffffffffffda RBX: 00007ffedf59fc60 RCX: 00007ff1fa46e783 [ 165.419419] RDX: 0000000000000000 RSI: 00007ffedf59fb10 RDI: 0000000000000005 [ 165.421886] RBP: 00007ffedf59fc10 R08: 00007ffedf59ea54 R09: 0000000000000001 [ 165.424445] R10: 00007ff1fa630c6c R11: 0000000000000246 R12: 0000000000020000 [ 165.426954] R13: 0000000000000280 R14: 0000000000000005 R15: 00007ffedf59ea90 Disable preemption before accessing the lookup scratch area in nft_pipapo_insert(). Reported-by: kernel test robot Analysed-by: Florian Westphal Cc: # 5.6.x Fixes: 3c4287f62044 ("nf_tables: Add set type for arbitrary concatenation of ranges") Signed-off-by: Stefano Brivio Signed-off-by: Pablo Neira Ayuso --- net/netfilter/nft_set_pipapo.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/net/netfilter/nft_set_pipapo.c b/net/netfilter/nft_set_pipapo.c index 8b5acc6910fd7..8c04388296b02 100644 --- a/net/netfilter/nft_set_pipapo.c +++ b/net/netfilter/nft_set_pipapo.c @@ -1242,7 +1242,9 @@ static int nft_pipapo_insert(const struct net *net, const struct nft_set *set, end += NFT_PIPAPO_GROUPS_PADDED_SIZE(f); } - if (!*this_cpu_ptr(m->scratch) || bsize_max > m->bsize_max) { + if (!*get_cpu_ptr(m->scratch) || bsize_max > m->bsize_max) { + put_cpu_ptr(m->scratch); + err = pipapo_realloc_scratch(m, bsize_max); if (err) return err; @@ -1250,6 +1252,8 @@ static int nft_pipapo_insert(const struct net *net, const struct nft_set *set, this_cpu_write(nft_pipapo_scratch_index, false); m->bsize_max = bsize_max; + } else { + put_cpu_ptr(m->scratch); } *ext2 = &e->ext; -- GitLab From 0e6fbe39bdf71b4e665767bcbf53567a3e6d0623 Mon Sep 17 00:00:00 2001 From: Pooja Trivedi Date: Fri, 5 Jun 2020 16:01:18 +0000 Subject: [PATCH 1241/1971] net/tls(TLS_SW): Add selftest for 'chunked' sendfile test This selftest tests for cases where sendfile's 'count' parameter is provided with a size greater than the intended file size. Motivation: When sendfile is provided with 'count' parameter value that is greater than the size of the file, kTLS example fails to send the file correctly. Last chunk of the file is not sent, and the data integrity is compromised. The reason is that the last chunk has MSG_MORE flag set because of which it gets added to pending records, but is not pushed. Note that if user space were to send SSL_shutdown control message, pending records would get flushed and the issue would not happen. So a shutdown control message following sendfile can mask the issue. Signed-off-by: Pooja Trivedi Signed-off-by: Mallesham Jatharkonda Signed-off-by: Josh Tway Reviewed-by: Jakub Kicinski Signed-off-by: David S. Miller --- tools/testing/selftests/net/tls.c | 58 +++++++++++++++++++++++++++++++ 1 file changed, 58 insertions(+) diff --git a/tools/testing/selftests/net/tls.c b/tools/testing/selftests/net/tls.c index c5282e62df75c..b599f1fa99b55 100644 --- a/tools/testing/selftests/net/tls.c +++ b/tools/testing/selftests/net/tls.c @@ -213,6 +213,64 @@ TEST_F(tls, send_then_sendfile) EXPECT_EQ(recv(self->cfd, buf, st.st_size, MSG_WAITALL), st.st_size); } +static void chunked_sendfile(struct __test_metadata *_metadata, + struct _test_data_tls *self, + uint16_t chunk_size, + uint16_t extra_payload_size) +{ + char buf[TLS_PAYLOAD_MAX_LEN]; + uint16_t test_payload_size; + int size = 0; + int ret; + char filename[] = "/tmp/mytemp.XXXXXX"; + int fd = mkstemp(filename); + off_t offset = 0; + + unlink(filename); + ASSERT_GE(fd, 0); + EXPECT_GE(chunk_size, 1); + test_payload_size = chunk_size + extra_payload_size; + ASSERT_GE(TLS_PAYLOAD_MAX_LEN, test_payload_size); + memset(buf, 1, test_payload_size); + size = write(fd, buf, test_payload_size); + EXPECT_EQ(size, test_payload_size); + fsync(fd); + + while (size > 0) { + ret = sendfile(self->fd, fd, &offset, chunk_size); + EXPECT_GE(ret, 0); + size -= ret; + } + + EXPECT_EQ(recv(self->cfd, buf, test_payload_size, MSG_WAITALL), + test_payload_size); + + close(fd); +} + +TEST_F(tls, multi_chunk_sendfile) +{ + chunked_sendfile(_metadata, self, 4096, 4096); + chunked_sendfile(_metadata, self, 4096, 0); + chunked_sendfile(_metadata, self, 4096, 1); + chunked_sendfile(_metadata, self, 4096, 2048); + chunked_sendfile(_metadata, self, 8192, 2048); + chunked_sendfile(_metadata, self, 4096, 8192); + chunked_sendfile(_metadata, self, 8192, 4096); + chunked_sendfile(_metadata, self, 12288, 1024); + chunked_sendfile(_metadata, self, 12288, 2000); + chunked_sendfile(_metadata, self, 15360, 100); + chunked_sendfile(_metadata, self, 15360, 300); + chunked_sendfile(_metadata, self, 1, 4096); + chunked_sendfile(_metadata, self, 2048, 4096); + chunked_sendfile(_metadata, self, 2048, 8192); + chunked_sendfile(_metadata, self, 4096, 8192); + chunked_sendfile(_metadata, self, 1024, 12288); + chunked_sendfile(_metadata, self, 2000, 12288); + chunked_sendfile(_metadata, self, 100, 15360); + chunked_sendfile(_metadata, self, 300, 15360); +} + TEST_F(tls, recv_max) { unsigned int send_len = TLS_PAYLOAD_MAX_LEN; -- GitLab From 3763a24c727ecf236358a81ee749e5fcab1c972a Mon Sep 17 00:00:00 2001 From: Arjun Roy Date: Sun, 7 Jun 2020 18:54:41 -0700 Subject: [PATCH 1242/1971] net-zerocopy: use vm_insert_pages() for tcp rcv zerocopy Use vm_insert_pages() for tcp receive zerocopy. Spin lock cycles (as reported by perf) drop from a couple of percentage points to a fraction of a percent. This results in a roughly 6% increase in efficiency, measured roughly as zerocopy receive count divided by CPU utilization. The intention of this patchset is to reduce atomic ops for tcp zerocopy receives, which normally hits the same spinlock multiple times consecutively. [akpm@linux-foundation.org: suppress gcc-7.2.0 warning] Link: http://lkml.kernel.org/r/20200128025958.43490-3-arjunroy.kdev@gmail.com Signed-off-by: Arjun Roy Signed-off-by: Eric Dumazet Signed-off-by: Soheil Hassas Yeganeh Cc: David Miller Cc: Matthew Wilcox Cc: Jason Gunthorpe Cc: Stephen Rothwell Signed-off-by: Andrew Morton Signed-off-by: David S. Miller --- net/ipv4/tcp.c | 70 +++++++++++++++++++++++++++++++++++++++++++++----- 1 file changed, 63 insertions(+), 7 deletions(-) diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 15d47d5e79510..ecbba0abd3e5b 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -1742,14 +1742,48 @@ int tcp_mmap(struct file *file, struct socket *sock, } EXPORT_SYMBOL(tcp_mmap); +static int tcp_zerocopy_vm_insert_batch(struct vm_area_struct *vma, + struct page **pages, + unsigned long pages_to_map, + unsigned long *insert_addr, + u32 *length_with_pending, + u32 *seq, + struct tcp_zerocopy_receive *zc) +{ + unsigned long pages_remaining = pages_to_map; + int bytes_mapped; + int ret; + + ret = vm_insert_pages(vma, *insert_addr, pages, &pages_remaining); + bytes_mapped = PAGE_SIZE * (pages_to_map - pages_remaining); + /* Even if vm_insert_pages fails, it may have partially succeeded in + * mapping (some but not all of the pages). + */ + *seq += bytes_mapped; + *insert_addr += bytes_mapped; + if (ret) { + /* But if vm_insert_pages did fail, we have to unroll some state + * we speculatively touched before. + */ + const int bytes_not_mapped = PAGE_SIZE * pages_remaining; + *length_with_pending -= bytes_not_mapped; + zc->recv_skip_hint += bytes_not_mapped; + } + return ret; +} + static int tcp_zerocopy_receive(struct sock *sk, struct tcp_zerocopy_receive *zc) { unsigned long address = (unsigned long)zc->address; u32 length = 0, seq, offset, zap_len; + #define PAGE_BATCH_SIZE 8 + struct page *pages[PAGE_BATCH_SIZE]; const skb_frag_t *frags = NULL; struct vm_area_struct *vma; struct sk_buff *skb = NULL; + unsigned long pg_idx = 0; + unsigned long curr_addr; struct tcp_sock *tp; int inq; int ret; @@ -1762,6 +1796,8 @@ static int tcp_zerocopy_receive(struct sock *sk, sock_rps_record_flow(sk); + tp = tcp_sk(sk); + down_read(¤t->mm->mmap_sem); vma = find_vma(current->mm, address); @@ -1771,7 +1807,6 @@ static int tcp_zerocopy_receive(struct sock *sk, } zc->length = min_t(unsigned long, zc->length, vma->vm_end - address); - tp = tcp_sk(sk); seq = tp->copied_seq; inq = tcp_inq(sk); zc->length = min_t(u32, zc->length, inq); @@ -1783,8 +1818,20 @@ static int tcp_zerocopy_receive(struct sock *sk, zc->recv_skip_hint = zc->length; } ret = 0; + curr_addr = address; while (length + PAGE_SIZE <= zc->length) { if (zc->recv_skip_hint < PAGE_SIZE) { + /* If we're here, finish the current batch. */ + if (pg_idx) { + ret = tcp_zerocopy_vm_insert_batch(vma, pages, + pg_idx, + &curr_addr, + &length, + &seq, zc); + if (ret) + goto out; + pg_idx = 0; + } if (skb) { if (zc->recv_skip_hint > 0) break; @@ -1793,7 +1840,6 @@ static int tcp_zerocopy_receive(struct sock *sk, } else { skb = tcp_recv_skb(sk, seq, &offset); } - zc->recv_skip_hint = skb->len - offset; offset -= skb_headlen(skb); if ((int)offset < 0 || skb_has_frag_list(skb)) @@ -1817,14 +1863,24 @@ static int tcp_zerocopy_receive(struct sock *sk, zc->recv_skip_hint -= remaining; break; } - ret = vm_insert_page(vma, address + length, - skb_frag_page(frags)); - if (ret) - break; + pages[pg_idx] = skb_frag_page(frags); + pg_idx++; length += PAGE_SIZE; - seq += PAGE_SIZE; zc->recv_skip_hint -= PAGE_SIZE; frags++; + if (pg_idx == PAGE_BATCH_SIZE) { + ret = tcp_zerocopy_vm_insert_batch(vma, pages, pg_idx, + &curr_addr, &length, + &seq, zc); + if (ret) + goto out; + pg_idx = 0; + } + } + if (pg_idx) { + ret = tcp_zerocopy_vm_insert_batch(vma, pages, pg_idx, + &curr_addr, &length, &seq, + zc); } out: up_read(¤t->mm->mmap_sem); -- GitLab From 8e60eed6b38e464e8c9d68f9caecafaa554dffe0 Mon Sep 17 00:00:00 2001 From: Geliang Tang Date: Mon, 8 Jun 2020 18:47:54 +0800 Subject: [PATCH 1243/1971] mptcp: bugfix for RM_ADDR option parsing In MPTCPOPT_RM_ADDR option parsing, the pointer "ptr" pointed to the "Subtype" octet, the pointer "ptr+1" pointed to the "Address ID" octet: +-------+-------+---------------+ |Subtype|(resvd)| Address ID | +-------+-------+---------------+ | | ptr ptr+1 We should set mp_opt->rm_id to the value of "ptr+1", not "ptr". This patch will fix this bug. Fixes: 3df523ab582c ("mptcp: Add ADD_ADDR handling") Signed-off-by: Geliang Tang Reviewed-by: Matthieu Baerts Signed-off-by: David S. Miller --- net/mptcp/options.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/net/mptcp/options.c b/net/mptcp/options.c index 01f1f4cf4902a..490b92534afc3 100644 --- a/net/mptcp/options.c +++ b/net/mptcp/options.c @@ -273,6 +273,8 @@ static void mptcp_parse_option(const struct sk_buff *skb, if (opsize != TCPOLEN_MPTCP_RM_ADDR_BASE) break; + ptr++; + mp_opt->rm_addr = 1; mp_opt->rm_id = *ptr++; pr_debug("RM_ADDR: id=%d", mp_opt->rm_id); -- GitLab From 8027bc0307ce59759b90679fa5d8b22949586d20 Mon Sep 17 00:00:00 2001 From: tannerlove Date: Mon, 8 Jun 2020 15:37:15 -0400 Subject: [PATCH 1244/1971] selftests/net: in timestamping, strncpy needs to preserve null byte MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit If user passed an interface option longer than 15 characters, then device.ifr_name and hwtstamp.ifr_name became non-null-terminated strings. The compiler warned about this: timestamping.c:353:2: warning: ‘strncpy’ specified bound 16 equals \ destination size [-Wstringop-truncation] 353 | strncpy(device.ifr_name, interface, sizeof(device.ifr_name)); Fixes: cb9eff097831 ("net: new user space API for time stamping of incoming and outgoing packets") Signed-off-by: Tanner Love Acked-by: Willem de Bruijn Signed-off-by: David S. Miller --- tools/testing/selftests/net/timestamping.c | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/tools/testing/selftests/net/timestamping.c b/tools/testing/selftests/net/timestamping.c index aca3491174a1e..f4bb4fef0f399 100644 --- a/tools/testing/selftests/net/timestamping.c +++ b/tools/testing/selftests/net/timestamping.c @@ -313,10 +313,16 @@ int main(int argc, char **argv) int val; socklen_t len; struct timeval next; + size_t if_len; if (argc < 2) usage(0); interface = argv[1]; + if_len = strlen(interface); + if (if_len >= IFNAMSIZ) { + printf("interface name exceeds IFNAMSIZ\n"); + exit(1); + } for (i = 2; i < argc; i++) { if (!strcasecmp(argv[i], "SO_TIMESTAMP")) @@ -350,12 +356,12 @@ int main(int argc, char **argv) bail("socket"); memset(&device, 0, sizeof(device)); - strncpy(device.ifr_name, interface, sizeof(device.ifr_name)); + memcpy(device.ifr_name, interface, if_len + 1); if (ioctl(sock, SIOCGIFADDR, &device) < 0) bail("getting interface IP address"); memset(&hwtstamp, 0, sizeof(hwtstamp)); - strncpy(hwtstamp.ifr_name, interface, sizeof(hwtstamp.ifr_name)); + memcpy(hwtstamp.ifr_name, interface, if_len + 1); hwtstamp.ifr_data = (void *)&hwconfig; memset(&hwconfig, 0, sizeof(hwconfig)); hwconfig.tx_type = -- GitLab From 0b6d4ca04a86b9dababbb76e58d33c437e127b77 Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Thu, 4 Jun 2020 21:57:48 -0700 Subject: [PATCH 1245/1971] f2fs: don't return vmalloc() memory from f2fs_kmalloc() kmalloc() returns kmalloc'ed memory, and kvmalloc() returns either kmalloc'ed or vmalloc'ed memory. But the f2fs wrappers, f2fs_kmalloc() and f2fs_kvmalloc(), both return both kinds of memory. It's redundant to have two functions that do the same thing, and also breaking the standard naming convention is causing bugs since people assume it's safe to kfree() memory allocated by f2fs_kmalloc(). See e.g. the various allocations in fs/f2fs/compress.c. Fix this by making f2fs_kmalloc() just use kmalloc(). And to avoid re-introducing the allocation failures that the vmalloc fallback was intended to fix, convert the largest allocations to use f2fs_kvmalloc(). Signed-off-by: Eric Biggers Reviewed-by: Chao Yu Signed-off-by: Jaegeuk Kim --- fs/f2fs/checkpoint.c | 4 ++-- fs/f2fs/f2fs.h | 8 +------- fs/f2fs/node.c | 8 ++++---- fs/f2fs/super.c | 2 +- 4 files changed, 8 insertions(+), 14 deletions(-) diff --git a/fs/f2fs/checkpoint.c b/fs/f2fs/checkpoint.c index 3dc3ac6fe1432..2360649302516 100644 --- a/fs/f2fs/checkpoint.c +++ b/fs/f2fs/checkpoint.c @@ -895,8 +895,8 @@ int f2fs_get_valid_checkpoint(struct f2fs_sb_info *sbi) int i; int err; - sbi->ckpt = f2fs_kzalloc(sbi, array_size(blk_size, cp_blks), - GFP_KERNEL); + sbi->ckpt = f2fs_kvzalloc(sbi, array_size(blk_size, cp_blks), + GFP_KERNEL); if (!sbi->ckpt) return -ENOMEM; /* diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h index fb180020e175c..7794e2f0d6e8a 100644 --- a/fs/f2fs/f2fs.h +++ b/fs/f2fs/f2fs.h @@ -2997,18 +2997,12 @@ static inline bool f2fs_may_extent_tree(struct inode *inode) static inline void *f2fs_kmalloc(struct f2fs_sb_info *sbi, size_t size, gfp_t flags) { - void *ret; - if (time_to_inject(sbi, FAULT_KMALLOC)) { f2fs_show_injection_info(sbi, FAULT_KMALLOC); return NULL; } - ret = kmalloc(size, flags); - if (ret) - return ret; - - return kvmalloc(size, flags); + return kmalloc(size, flags); } static inline void *f2fs_kzalloc(struct f2fs_sb_info *sbi, diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c index e0bb0f7e0506e..03e24df1c84f5 100644 --- a/fs/f2fs/node.c +++ b/fs/f2fs/node.c @@ -2993,7 +2993,7 @@ static int __get_nat_bitmaps(struct f2fs_sb_info *sbi) return 0; nm_i->nat_bits_blocks = F2FS_BLK_ALIGN((nat_bits_bytes << 1) + 8); - nm_i->nat_bits = f2fs_kzalloc(sbi, + nm_i->nat_bits = f2fs_kvzalloc(sbi, nm_i->nat_bits_blocks << F2FS_BLKSIZE_BITS, GFP_KERNEL); if (!nm_i->nat_bits) return -ENOMEM; @@ -3126,9 +3126,9 @@ static int init_free_nid_cache(struct f2fs_sb_info *sbi) int i; nm_i->free_nid_bitmap = - f2fs_kzalloc(sbi, array_size(sizeof(unsigned char *), - nm_i->nat_blocks), - GFP_KERNEL); + f2fs_kvzalloc(sbi, array_size(sizeof(unsigned char *), + nm_i->nat_blocks), + GFP_KERNEL); if (!nm_i->free_nid_bitmap) return -ENOMEM; diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c index a71da699cb2d5..f3c1511695421 100644 --- a/fs/f2fs/super.c +++ b/fs/f2fs/super.c @@ -3033,7 +3033,7 @@ static int init_blkz_info(struct f2fs_sb_info *sbi, int devi) if (nr_sectors & (bdev_zone_sectors(bdev) - 1)) FDEV(devi).nr_blkz++; - FDEV(devi).blkz_seq = f2fs_kzalloc(sbi, + FDEV(devi).blkz_seq = f2fs_kvzalloc(sbi, BITS_TO_LONGS(FDEV(devi).nr_blkz) * sizeof(unsigned long), GFP_KERNEL); -- GitLab From fc3bb095ab02b9e7d89a069ade2cead15c64c504 Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Mon, 1 Jun 2020 13:08:05 -0700 Subject: [PATCH 1246/1971] f2fs: avoid utf8_strncasecmp() with unstable name If the dentry name passed to ->d_compare() fits in dentry::d_iname, then it may be concurrently modified by a rename. This can cause undefined behavior (possibly out-of-bounds memory accesses or crashes) in utf8_strncasecmp(), since fs/unicode/ isn't written to handle strings that may be concurrently modified. Fix this by first copying the filename to a stack buffer if needed. This way we get a stable snapshot of the filename. Fixes: 2c2eb7a300cd ("f2fs: Support case-insensitive file name lookups") Cc: # v5.4+ Cc: Al Viro Cc: Daniel Rosenberg Cc: Gabriel Krisman Bertazi Signed-off-by: Eric Biggers Reviewed-by: Chao Yu Signed-off-by: Jaegeuk Kim --- fs/f2fs/dir.c | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/fs/f2fs/dir.c b/fs/f2fs/dir.c index 29f70f2295cce..d35976785e8c5 100644 --- a/fs/f2fs/dir.c +++ b/fs/f2fs/dir.c @@ -1114,11 +1114,27 @@ static int f2fs_d_compare(const struct dentry *dentry, unsigned int len, const struct inode *dir = READ_ONCE(parent->d_inode); const struct f2fs_sb_info *sbi = F2FS_SB(dentry->d_sb); struct qstr entry = QSTR_INIT(str, len); + char strbuf[DNAME_INLINE_LEN]; int res; if (!dir || !IS_CASEFOLDED(dir)) goto fallback; + /* + * If the dentry name is stored in-line, then it may be concurrently + * modified by a rename. If this happens, the VFS will eventually retry + * the lookup, so it doesn't matter what ->d_compare() returns. + * However, it's unsafe to call utf8_strncasecmp() with an unstable + * string. Therefore, we have to copy the name into a temporary buffer. + */ + if (len <= DNAME_INLINE_LEN - 1) { + memcpy(strbuf, str, len); + strbuf[len] = 0; + entry.name = strbuf; + /* prevent compiler from optimizing out the temporary buffer */ + barrier(); + } + res = utf8_strncasecmp(sbi->s_encoding, name, &entry); if (res >= 0) return res; -- GitLab From 8626441f05dc45a2f4693ee6863d02456ce39e60 Mon Sep 17 00:00:00 2001 From: Chao Yu Date: Mon, 8 Jun 2020 20:03:16 +0800 Subject: [PATCH 1247/1971] f2fs: handle readonly filesystem in f2fs_ioc_shutdown() If mountpoint is readonly, we should allow shutdowning filesystem successfully, this fixes issue found by generic/599 testcase of xfstest. Signed-off-by: Chao Yu Signed-off-by: Jaegeuk Kim --- fs/f2fs/file.c | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c index dfa1ac2d751a0..3268f8dd59bba 100644 --- a/fs/f2fs/file.c +++ b/fs/f2fs/file.c @@ -2232,8 +2232,15 @@ static int f2fs_ioc_shutdown(struct file *filp, unsigned long arg) if (in != F2FS_GOING_DOWN_FULLSYNC) { ret = mnt_want_write_file(filp); - if (ret) + if (ret) { + if (ret == -EROFS) { + ret = 0; + f2fs_stop_checkpoint(sbi, false); + set_sbi_flag(sbi, SBI_IS_SHUTDOWN); + trace_f2fs_shutdown(sbi, in, ret); + } return ret; + } } switch (in) { -- GitLab From bc67c5d0ce406522cc846159f6a0dc0180be58d6 Mon Sep 17 00:00:00 2001 From: Chao Yu Date: Mon, 8 Jun 2020 20:03:17 +0800 Subject: [PATCH 1248/1971] f2fs: remove unused parameter of f2fs_put_rpages_mapping() Just cleanup, no logic change. Signed-off-by: Chao Yu Signed-off-by: Jaegeuk Kim --- fs/f2fs/compress.c | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/fs/f2fs/compress.c b/fs/f2fs/compress.c index a53578a892112..1e02a8c106b0a 100644 --- a/fs/f2fs/compress.c +++ b/fs/f2fs/compress.c @@ -89,8 +89,7 @@ static void f2fs_unlock_rpages(struct compress_ctx *cc, int len) f2fs_drop_rpages(cc, len, true); } -static void f2fs_put_rpages_mapping(struct compress_ctx *cc, - struct address_space *mapping, +static void f2fs_put_rpages_mapping(struct address_space *mapping, pgoff_t start, int len) { int i; @@ -942,7 +941,7 @@ retry: if (!PageUptodate(page)) { f2fs_unlock_rpages(cc, i + 1); - f2fs_put_rpages_mapping(cc, mapping, start_idx, + f2fs_put_rpages_mapping(mapping, start_idx, cc->cluster_size); f2fs_destroy_compress_ctx(cc); goto retry; @@ -977,7 +976,7 @@ retry: unlock_pages: f2fs_unlock_rpages(cc, i); release_pages: - f2fs_put_rpages_mapping(cc, mapping, start_idx, i); + f2fs_put_rpages_mapping(mapping, start_idx, i); f2fs_destroy_compress_ctx(cc); return ret; } -- GitLab From 32b6aba85c8dbd1d3a155543986574c6969f0a48 Mon Sep 17 00:00:00 2001 From: Jaegeuk Kim Date: Thu, 4 Jun 2020 11:49:43 -0700 Subject: [PATCH 1249/1971] f2fs: add node_io_flag for bio flags likewise data_io_flag This patch adds another way to attach bio flags to node writes. Description: Give a way to attach REQ_META|FUA to node writes given temperature-based bits. Now the bits indicate: * REQ_META | REQ_FUA | * 5 | 4 | 3 | 2 | 1 | 0 | * Cold | Warm | Hot | Cold | Warm | Hot | Signed-off-by: Jaegeuk Kim --- Documentation/ABI/testing/sysfs-fs-f2fs | 9 +++++++++ fs/f2fs/data.c | 24 +++++++++++++++--------- fs/f2fs/f2fs.h | 1 + fs/f2fs/sysfs.c | 2 ++ 4 files changed, 27 insertions(+), 9 deletions(-) diff --git a/Documentation/ABI/testing/sysfs-fs-f2fs b/Documentation/ABI/testing/sysfs-fs-f2fs index 427f5b45c67f1..4bb93a06d8abc 100644 --- a/Documentation/ABI/testing/sysfs-fs-f2fs +++ b/Documentation/ABI/testing/sysfs-fs-f2fs @@ -333,6 +333,15 @@ Description: Give a way to attach REQ_META|FUA to data writes * 5 | 4 | 3 | 2 | 1 | 0 | * Cold | Warm | Hot | Cold | Warm | Hot | +What: /sys/fs/f2fs//node_io_flag +Date: June 2020 +Contact: "Jaegeuk Kim" +Description: Give a way to attach REQ_META|FUA to node writes + given temperature-based bits. Now the bits indicate: + * REQ_META | REQ_FUA | + * 5 | 4 | 3 | 2 | 1 | 0 | + * Cold | Warm | Hot | Cold | Warm | Hot | + What: /sys/fs/f2fs//iostat_period_ms Date: April 2020 Contact: "Daeho Jeong" diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c index a65bfc07ddb97..798c325a820dc 100644 --- a/fs/f2fs/data.c +++ b/fs/f2fs/data.c @@ -514,22 +514,28 @@ void f2fs_submit_bio(struct f2fs_sb_info *sbi, __submit_bio(sbi, bio, type); } -static void __attach_data_io_flag(struct f2fs_io_info *fio) +static void __attach_io_flag(struct f2fs_io_info *fio) { struct f2fs_sb_info *sbi = fio->sbi; unsigned int temp_mask = (1 << NR_TEMP_TYPE) - 1; - unsigned int fua_flag = sbi->data_io_flag & temp_mask; - unsigned int meta_flag = (sbi->data_io_flag >> NR_TEMP_TYPE) & - temp_mask; + unsigned int io_flag, fua_flag, meta_flag; + + if (fio->type == DATA) + io_flag = sbi->data_io_flag; + else if (fio->type == NODE) + io_flag = sbi->node_io_flag; + else + return; + + fua_flag = io_flag & temp_mask; + meta_flag = (io_flag >> NR_TEMP_TYPE) & temp_mask; + /* - * data io flag bits per temp: + * data/node io flag bits per temp: * REQ_META | REQ_FUA | * 5 | 4 | 3 | 2 | 1 | 0 | * Cold | Warm | Hot | Cold | Warm | Hot | */ - if (fio->type != DATA) - return; - if ((1 << fio->temp) & meta_flag) fio->op_flags |= REQ_META; if ((1 << fio->temp) & fua_flag) @@ -543,7 +549,7 @@ static void __submit_merged_bio(struct f2fs_bio_info *io) if (!io->bio) return; - __attach_data_io_flag(fio); + __attach_io_flag(fio); bio_set_op_attrs(io->bio, fio->op, fio->op_flags); if (is_read_io(fio->op)) diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h index 7794e2f0d6e8a..c812fb8e2d9c7 100644 --- a/fs/f2fs/f2fs.h +++ b/fs/f2fs/f2fs.h @@ -1568,6 +1568,7 @@ struct f2fs_sb_info { /* to attach REQ_META|REQ_FUA flags */ unsigned int data_io_flag; + unsigned int node_io_flag; /* For sysfs suppport */ struct kobject s_kobj; diff --git a/fs/f2fs/sysfs.c b/fs/f2fs/sysfs.c index a117ae1f9d5f1..fc4a46b689040 100644 --- a/fs/f2fs/sysfs.c +++ b/fs/f2fs/sysfs.c @@ -554,6 +554,7 @@ F2FS_RW_ATTR(FAULT_INFO_RATE, f2fs_fault_info, inject_rate, inject_rate); F2FS_RW_ATTR(FAULT_INFO_TYPE, f2fs_fault_info, inject_type, inject_type); #endif F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, data_io_flag, data_io_flag); +F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, node_io_flag, node_io_flag); F2FS_GENERAL_RO_ATTR(dirty_segments); F2FS_GENERAL_RO_ATTR(free_segments); F2FS_GENERAL_RO_ATTR(lifetime_write_kbytes); @@ -635,6 +636,7 @@ static struct attribute *f2fs_attrs[] = { ATTR_LIST(inject_type), #endif ATTR_LIST(data_io_flag), + ATTR_LIST(node_io_flag), ATTR_LIST(dirty_segments), ATTR_LIST(free_segments), ATTR_LIST(unusable), -- GitLab From b7b911d59dacb47511a1e604bbfa901beb108305 Mon Sep 17 00:00:00 2001 From: Jaegeuk Kim Date: Thu, 4 Jun 2020 16:45:30 -0700 Subject: [PATCH 1250/1971] f2fs: attach IO flags to the missing cases This adds more IOs to attach flags. Reviewed-by: Chao Yu Signed-off-by: Jaegeuk Kim --- fs/f2fs/data.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c index 798c325a820dc..267b5e76a02b5 100644 --- a/fs/f2fs/data.c +++ b/fs/f2fs/data.c @@ -691,6 +691,7 @@ int f2fs_submit_page_bio(struct f2fs_io_info *fio) if (fio->io_wbc && !is_read_io(fio->op)) wbc_account_cgroup_owner(fio->io_wbc, page, PAGE_SIZE); + __attach_io_flag(fio); bio_set_op_attrs(bio, fio->op, fio->op_flags); inc_page_count(fio->sbi, is_read_io(fio->op) ? @@ -877,6 +878,7 @@ int f2fs_merge_page_bio(struct f2fs_io_info *fio) alloc_new: if (!bio) { bio = __bio_alloc(fio, BIO_MAX_PAGES); + __attach_io_flag(fio); bio_set_op_attrs(bio, fio->op, fio->op_flags); add_bio_entry(fio->sbi, bio, page, fio->temp); -- GitLab From 8cc0072469723459dc6bd7beff81b2b3149f4cf4 Mon Sep 17 00:00:00 2001 From: Chuhong Yuan Date: Wed, 3 Jun 2020 09:27:28 -0700 Subject: [PATCH 1251/1971] xfs: Add the missed xfs_perag_put() for xfs_ifree_cluster() xfs_ifree_cluster() calls xfs_perag_get() at the beginning, but forgets to call xfs_perag_put() in one failed path. Add the missed function call to fix it. Fixes: ce92464c180b ("xfs: make xfs_trans_get_buf return an error code") Signed-off-by: Chuhong Yuan Reviewed-by: Darrick J. Wong Signed-off-by: Darrick J. Wong --- fs/xfs/xfs_inode.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c index 64f5f9a440aed..88a9e49648021 100644 --- a/fs/xfs/xfs_inode.c +++ b/fs/xfs/xfs_inode.c @@ -2634,8 +2634,10 @@ xfs_ifree_cluster( error = xfs_trans_get_buf(tp, mp->m_ddev_targp, blkno, mp->m_bsize * igeo->blocks_per_cluster, XBF_UNMAPPED, &bp); - if (error) + if (error) { + xfs_perag_put(pag); return error; + } /* * This buffer may not have been correctly initialised as we -- GitLab From d4ff3b2ef901cd451fb8a9ff4623d060a79502cd Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Mon, 8 Jun 2020 20:58:29 -0700 Subject: [PATCH 1252/1971] iomap: Fix unsharing of an extent >2GB on a 32-bit machine Widen the type used for counting the number of bytes unshared. Signed-off-by: Matthew Wilcox (Oracle) Reviewed-by: Christoph Hellwig Reviewed-by: Darrick J. Wong Signed-off-by: Darrick J. Wong --- fs/iomap/buffered-io.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fs/iomap/buffered-io.c b/fs/iomap/buffered-io.c index 89e21961d1adf..17ed7bb92c6c7 100644 --- a/fs/iomap/buffered-io.c +++ b/fs/iomap/buffered-io.c @@ -909,7 +909,7 @@ iomap_unshare_actor(struct inode *inode, loff_t pos, loff_t length, void *data, struct iomap *iomap, struct iomap *srcmap) { long status = 0; - ssize_t written = 0; + loff_t written = 0; /* don't bother with blocks that are not shared to start with */ if (!(iomap->flags & IOMAP_F_SHARED)) -- GitLab From 0b0430c6a10a22813e3d40c5658ae644acda4303 Mon Sep 17 00:00:00 2001 From: Kenneth D'souza Date: Tue, 9 Jun 2020 10:12:10 +0530 Subject: [PATCH 1253/1971] cifs: Add get_security_type_str function to return sec type. This code is more organized and robust. Signed-off-by: Kenneth D'souza Signed-off-by: Roberto Bergantinos Corpas Signed-off-by: Steve French --- fs/cifs/cifs_debug.c | 4 +--- fs/cifs/cifsglob.h | 18 ++++++++++++++++++ 2 files changed, 19 insertions(+), 3 deletions(-) diff --git a/fs/cifs/cifs_debug.c b/fs/cifs/cifs_debug.c index 3ad1a98fd567c..fc98b97b396a4 100644 --- a/fs/cifs/cifs_debug.c +++ b/fs/cifs/cifs_debug.c @@ -221,8 +221,6 @@ static int cifs_debug_data_proc_show(struct seq_file *m, void *v) struct cifs_ses *ses; struct cifs_tcon *tcon; int i, j; - const char *security_types[] = {"Unspecified", "LANMAN", "NTLM", - "NTLMv2", "RawNTLMSSP", "Kerberos"}; seq_puts(m, "Display Internal CIFS Data Structures for Debugging\n" @@ -379,7 +377,7 @@ skip_rdma: } seq_printf(m,"Security type: %s\n", - security_types[server->ops->select_sectype(server, ses->sectype)]); + get_security_type_str(server->ops->select_sectype(server, ses->sectype))); if (server->rdma) seq_printf(m, "RDMA\n\t"); diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h index e133bb3e172f9..fad37d61910aa 100644 --- a/fs/cifs/cifsglob.h +++ b/fs/cifs/cifsglob.h @@ -2008,6 +2008,24 @@ extern struct smb_version_values smb302_values; extern struct smb_version_operations smb311_operations; extern struct smb_version_values smb311_values; +static inline char *get_security_type_str(enum securityEnum sectype) +{ + switch (sectype) { + case RawNTLMSSP: + return "RawNTLMSSP"; + case Kerberos: + return "Kerberos"; + case NTLMv2: + return "NTLMv2"; + case NTLM: + return "NTLM"; + case LANMAN: + return "LANMAN"; + default: + return "Unknown"; + } +} + static inline bool is_smb1_server(struct TCP_Server_Info *server) { return strcmp(server->vals->version_string, SMB1_VERSION_STRING) == 0; -- GitLab From 197298a64983e2beaf1a87413daff3044b4f3821 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pali=20Roh=C3=A1r?= Date: Tue, 17 Mar 2020 22:34:33 +0100 Subject: [PATCH 1254/1971] exfat: Simplify exfat_utf8_d_cmp() for code points above U+FFFF MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit If two Unicode code points represented in UTF-16 are different then also their UTF-32 representation must be different. Therefore conversion from UTF-32 to UTF-16 is not needed. Signed-off-by: Pali Rohár Signed-off-by: Namjae Jeon --- fs/exfat/namei.c | 9 ++------- 1 file changed, 2 insertions(+), 7 deletions(-) diff --git a/fs/exfat/namei.c b/fs/exfat/namei.c index a2659a8a68a14..731da41cabbf7 100644 --- a/fs/exfat/namei.c +++ b/fs/exfat/namei.c @@ -185,14 +185,9 @@ static int exfat_utf8_d_cmp(const struct dentry *dentry, unsigned int len, if (u_a <= 0xFFFF && u_b <= 0xFFFF) { if (exfat_toupper(sb, u_a) != exfat_toupper(sb, u_b)) return 1; - } else if (u_a > 0xFFFF && u_b > 0xFFFF) { - if (exfat_low_surrogate(u_a) != - exfat_low_surrogate(u_b) || - exfat_high_surrogate(u_a) != - exfat_high_surrogate(u_b)) - return 1; } else { - return 1; + if (u_a != u_b) + return 1; } } -- GitLab From d1727d55c0327efdba2a5bad7801d509b721fef3 Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Fri, 24 Apr 2020 13:31:12 +0900 Subject: [PATCH 1255/1971] exfat: Use a more common logging style Remove the direct use of KERN_ in functions by creating separate exfat_ macros. Miscellanea: o Remove several unnecessary terminating newlines in formats o Realign arguments and fit to 80 columns where appropriate Signed-off-by: Joe Perches Signed-off-by: Namjae Jeon --- fs/exfat/balloc.c | 8 +++----- fs/exfat/dir.c | 9 ++++----- fs/exfat/exfat_fs.h | 7 +++++++ fs/exfat/fatent.c | 13 +++++-------- fs/exfat/misc.c | 4 ++-- fs/exfat/namei.c | 26 ++++++++++--------------- fs/exfat/nls.c | 20 ++++++++----------- fs/exfat/super.c | 47 ++++++++++++++++++++------------------------- 8 files changed, 60 insertions(+), 74 deletions(-) diff --git a/fs/exfat/balloc.c b/fs/exfat/balloc.c index 6774a5a6ded82..4055eb00ea9b8 100644 --- a/fs/exfat/balloc.c +++ b/fs/exfat/balloc.c @@ -58,9 +58,8 @@ static int exfat_allocate_bitmap(struct super_block *sb, need_map_size = ((EXFAT_DATA_CLUSTER_COUNT(sbi) - 1) / BITS_PER_BYTE) + 1; if (need_map_size != map_size) { - exfat_msg(sb, KERN_ERR, - "bogus allocation bitmap size(need : %u, cur : %lld)", - need_map_size, map_size); + exfat_err(sb, "bogus allocation bitmap size(need : %u, cur : %lld)", + need_map_size, map_size); /* * Only allowed when bogus allocation * bitmap size is large @@ -192,8 +191,7 @@ void exfat_clear_bitmap(struct inode *inode, unsigned int clu) (1 << sbi->sect_per_clus_bits), GFP_NOFS, 0); if (ret_discard == -EOPNOTSUPP) { - exfat_msg(sb, KERN_ERR, - "discard not supported by device, disabling"); + exfat_err(sb, "discard not supported by device, disabling"); opts->discard = 0; } } diff --git a/fs/exfat/dir.c b/fs/exfat/dir.c index 4b91afb0f0515..53ae965da7ec5 100644 --- a/fs/exfat/dir.c +++ b/fs/exfat/dir.c @@ -720,9 +720,8 @@ static int exfat_dir_readahead(struct super_block *sb, sector_t sec) return 0; if (sec < sbi->data_start_sector) { - exfat_msg(sb, KERN_ERR, - "requested sector is invalid(sect:%llu, root:%llu)", - (unsigned long long)sec, sbi->data_start_sector); + exfat_err(sb, "requested sector is invalid(sect:%llu, root:%llu)", + (unsigned long long)sec, sbi->data_start_sector); return -EIO; } @@ -750,7 +749,7 @@ struct exfat_dentry *exfat_get_dentry(struct super_block *sb, sector_t sec; if (p_dir->dir == DIR_DELETED) { - exfat_msg(sb, KERN_ERR, "abnormal access to deleted dentry\n"); + exfat_err(sb, "abnormal access to deleted dentry"); return NULL; } @@ -853,7 +852,7 @@ struct exfat_entry_set_cache *exfat_get_dentry_set(struct super_block *sb, struct buffer_head *bh; if (p_dir->dir == DIR_DELETED) { - exfat_msg(sb, KERN_ERR, "access to deleted dentry\n"); + exfat_err(sb, "access to deleted dentry"); return NULL; } diff --git a/fs/exfat/exfat_fs.h b/fs/exfat/exfat_fs.h index d67fb8a6f770c..1ebfb9085f1f7 100644 --- a/fs/exfat/exfat_fs.h +++ b/fs/exfat/exfat_fs.h @@ -505,6 +505,13 @@ void __exfat_fs_error(struct super_block *sb, int report, const char *fmt, ...) fmt, ## args) void exfat_msg(struct super_block *sb, const char *lv, const char *fmt, ...) __printf(3, 4) __cold; +#define exfat_err(sb, fmt, ...) \ + exfat_msg(sb, KERN_ERR, fmt, ##__VA_ARGS__) +#define exfat_warn(sb, fmt, ...) \ + exfat_msg(sb, KERN_WARNING, fmt, ##__VA_ARGS__) +#define exfat_info(sb, fmt, ...) \ + exfat_msg(sb, KERN_INFO, fmt, ##__VA_ARGS__) + void exfat_get_entry_time(struct exfat_sb_info *sbi, struct timespec64 *ts, u8 tz, __le16 time, __le16 date, u8 time_ms); void exfat_truncate_atime(struct timespec64 *ts); diff --git a/fs/exfat/fatent.c b/fs/exfat/fatent.c index a855b1769a961..267e5e09eb134 100644 --- a/fs/exfat/fatent.c +++ b/fs/exfat/fatent.c @@ -170,8 +170,7 @@ int exfat_free_cluster(struct inode *inode, struct exfat_chain *p_chain) /* check cluster validation */ if (p_chain->dir < 2 && p_chain->dir >= sbi->num_clusters) { - exfat_msg(sb, KERN_ERR, "invalid start cluster (%u)", - p_chain->dir); + exfat_err(sb, "invalid start cluster (%u)", p_chain->dir); return -EIO; } @@ -305,8 +304,7 @@ int exfat_zeroed_cluster(struct inode *dir, unsigned int clu) return 0; release_bhs: - exfat_msg(sb, KERN_ERR, "failed zeroed sect %llu\n", - (unsigned long long)blknr); + exfat_err(sb, "failed zeroed sect %llu\n", (unsigned long long)blknr); for (i = 0; i < n; i++) bforget(bhs[i]); return err; @@ -337,9 +335,8 @@ int exfat_alloc_cluster(struct inode *inode, unsigned int num_alloc, /* find new cluster */ if (hint_clu == EXFAT_EOF_CLUSTER) { if (sbi->clu_srch_ptr < EXFAT_FIRST_CLUSTER) { - exfat_msg(sb, KERN_ERR, - "sbi->clu_srch_ptr is invalid (%u)\n", - sbi->clu_srch_ptr); + exfat_err(sb, "sbi->clu_srch_ptr is invalid (%u)\n", + sbi->clu_srch_ptr); sbi->clu_srch_ptr = EXFAT_FIRST_CLUSTER; } @@ -350,7 +347,7 @@ int exfat_alloc_cluster(struct inode *inode, unsigned int num_alloc, /* check cluster validation */ if (hint_clu < EXFAT_FIRST_CLUSTER && hint_clu >= sbi->num_clusters) { - exfat_msg(sb, KERN_ERR, "hint_cluster is invalid (%u)\n", + exfat_err(sb, "hint_cluster is invalid (%u)", hint_clu); hint_clu = EXFAT_FIRST_CLUSTER; if (p_chain->flags == ALLOC_NO_FAT_CHAIN) { diff --git a/fs/exfat/misc.c b/fs/exfat/misc.c index ebd2cbe3cbc11..ce5e8a1b0726c 100644 --- a/fs/exfat/misc.c +++ b/fs/exfat/misc.c @@ -32,7 +32,7 @@ void __exfat_fs_error(struct super_block *sb, int report, const char *fmt, ...) va_start(args, fmt); vaf.fmt = fmt; vaf.va = &args; - exfat_msg(sb, KERN_ERR, "error, %pV\n", &vaf); + exfat_err(sb, "error, %pV", &vaf); va_end(args); } @@ -41,7 +41,7 @@ void __exfat_fs_error(struct super_block *sb, int report, const char *fmt, ...) sb->s_id); } else if (opts->errors == EXFAT_ERRORS_RO && !sb_rdonly(sb)) { sb->s_flags |= SB_RDONLY; - exfat_msg(sb, KERN_ERR, "Filesystem has been set read-only"); + exfat_err(sb, "Filesystem has been set read-only"); } } diff --git a/fs/exfat/namei.c b/fs/exfat/namei.c index 731da41cabbf7..585b47b2db3da 100644 --- a/fs/exfat/namei.c +++ b/fs/exfat/namei.c @@ -773,8 +773,8 @@ static struct dentry *exfat_lookup(struct inode *dir, struct dentry *dentry, if (d_unhashed(alias)) { WARN_ON(alias->d_name.hash_len != dentry->d_name.hash_len); - exfat_msg(sb, KERN_INFO, - "rehashed a dentry(%p) in read lookup", alias); + exfat_info(sb, "rehashed a dentry(%p) in read lookup", + alias); d_drop(dentry); d_rehash(alias); } else if (!S_ISDIR(i_mode)) { @@ -819,7 +819,7 @@ static int exfat_unlink(struct inode *dir, struct dentry *dentry) exfat_chain_dup(&cdir, &ei->dir); entry = ei->entry; if (ei->dir.dir == DIR_DELETED) { - exfat_msg(sb, KERN_ERR, "abnormal access to deleted dentry"); + exfat_err(sb, "abnormal access to deleted dentry"); err = -ENOENT; goto unlock; } @@ -974,7 +974,7 @@ static int exfat_rmdir(struct inode *dir, struct dentry *dentry) entry = ei->entry; if (ei->dir.dir == DIR_DELETED) { - exfat_msg(sb, KERN_ERR, "abnormal access to deleted dentry"); + exfat_err(sb, "abnormal access to deleted dentry"); err = -ENOENT; goto unlock; } @@ -986,9 +986,8 @@ static int exfat_rmdir(struct inode *dir, struct dentry *dentry) err = exfat_check_dir_empty(sb, &clu_to_free); if (err) { if (err == -EIO) - exfat_msg(sb, KERN_ERR, - "failed to exfat_check_dir_empty : err(%d)", - err); + exfat_err(sb, "failed to exfat_check_dir_empty : err(%d)", + err); goto unlock; } @@ -1009,9 +1008,7 @@ static int exfat_rmdir(struct inode *dir, struct dentry *dentry) err = exfat_remove_entries(dir, &cdir, entry, 0, num_entries); if (err) { - exfat_msg(sb, KERN_ERR, - "failed to exfat_remove_entries : err(%d)", - err); + exfat_err(sb, "failed to exfat_remove_entries : err(%d)", err); goto unlock; } ei->dir.dir = DIR_DELETED; @@ -1240,8 +1237,7 @@ static int __exfat_rename(struct inode *old_parent_inode, return -EINVAL; if (ei->dir.dir == DIR_DELETED) { - exfat_msg(sb, KERN_ERR, - "abnormal access to deleted source dentry"); + exfat_err(sb, "abnormal access to deleted source dentry"); return -ENOENT; } @@ -1263,8 +1259,7 @@ static int __exfat_rename(struct inode *old_parent_inode, new_ei = EXFAT_I(new_inode); if (new_ei->dir.dir == DIR_DELETED) { - exfat_msg(sb, KERN_ERR, - "abnormal access to deleted target dentry"); + exfat_err(sb, "abnormal access to deleted target dentry"); goto out; } @@ -1426,8 +1421,7 @@ static int exfat_rename(struct inode *old_dir, struct dentry *old_dentry, if (S_ISDIR(new_inode->i_mode)) drop_nlink(new_inode); } else { - exfat_msg(sb, KERN_WARNING, - "abnormal access to an inode dropped"); + exfat_warn(sb, "abnormal access to an inode dropped"); WARN_ON(new_inode->i_nlink == 0); } new_inode->i_ctime = EXFAT_I(new_inode)->i_crtime = diff --git a/fs/exfat/nls.c b/fs/exfat/nls.c index 6d1c3ae130ff1..2178786b708b5 100644 --- a/fs/exfat/nls.c +++ b/fs/exfat/nls.c @@ -503,16 +503,14 @@ static int exfat_utf8_to_utf16(struct super_block *sb, unilen = utf8s_to_utf16s(p_cstring, len, UTF16_HOST_ENDIAN, (wchar_t *)uniname, MAX_NAME_LENGTH + 2); if (unilen < 0) { - exfat_msg(sb, KERN_ERR, - "failed to %s (err : %d) nls len : %d", - __func__, unilen, len); + exfat_err(sb, "failed to %s (err : %d) nls len : %d", + __func__, unilen, len); return unilen; } if (unilen > MAX_NAME_LENGTH) { - exfat_msg(sb, KERN_ERR, - "failed to %s (estr:ENAMETOOLONG) nls len : %d, unilen : %d > %d", - __func__, len, unilen, MAX_NAME_LENGTH); + exfat_err(sb, "failed to %s (estr:ENAMETOOLONG) nls len : %d, unilen : %d > %d", + __func__, len, unilen, MAX_NAME_LENGTH); return -ENAMETOOLONG; } @@ -687,9 +685,8 @@ static int exfat_load_upcase_table(struct super_block *sb, bh = sb_bread(sb, sector); if (!bh) { - exfat_msg(sb, KERN_ERR, - "failed to read sector(0x%llx)\n", - (unsigned long long)sector); + exfat_err(sb, "failed to read sector(0x%llx)\n", + (unsigned long long)sector); ret = -EIO; goto free_table; } @@ -722,9 +719,8 @@ static int exfat_load_upcase_table(struct super_block *sb, if (index >= 0xFFFF && utbl_checksum == checksum) return 0; - exfat_msg(sb, KERN_ERR, - "failed to load upcase table (idx : 0x%08x, chksum : 0x%08x, utbl_chksum : 0x%08x)\n", - index, checksum, utbl_checksum); + exfat_err(sb, "failed to load upcase table (idx : 0x%08x, chksum : 0x%08x, utbl_chksum : 0x%08x)", + index, checksum, utbl_checksum); ret = -EINVAL; free_table: exfat_free_upcase_table(sbi); diff --git a/fs/exfat/super.c b/fs/exfat/super.c index a846ff555656c..f9aa1e5dc2381 100644 --- a/fs/exfat/super.c +++ b/fs/exfat/super.c @@ -376,15 +376,13 @@ static struct pbr *exfat_read_pbr_with_logical_sector(struct super_block *sb) if (!is_power_of_2(logical_sect) || logical_sect < 512 || logical_sect > 4096) { - exfat_msg(sb, KERN_ERR, "bogus logical sector size %u", - logical_sect); + exfat_err(sb, "bogus logical sector size %u", logical_sect); return NULL; } if (logical_sect < sb->s_blocksize) { - exfat_msg(sb, KERN_ERR, - "logical sector size too small for device (logical sector size = %u)", - logical_sect); + exfat_err(sb, "logical sector size too small for device (logical sector size = %u)", + logical_sect); return NULL; } @@ -393,15 +391,14 @@ static struct pbr *exfat_read_pbr_with_logical_sector(struct super_block *sb) sbi->pbr_bh = NULL; if (!sb_set_blocksize(sb, logical_sect)) { - exfat_msg(sb, KERN_ERR, - "unable to set blocksize %u", logical_sect); + exfat_err(sb, "unable to set blocksize %u", + logical_sect); return NULL; } sbi->pbr_bh = sb_bread(sb, 0); if (!sbi->pbr_bh) { - exfat_msg(sb, KERN_ERR, - "unable to read boot sector (logical sector size = %lu)", - sb->s_blocksize); + exfat_err(sb, "unable to read boot sector (logical sector size = %lu)", + sb->s_blocksize); return NULL; } @@ -424,7 +421,7 @@ static int __exfat_fill_super(struct super_block *sb) /* read boot sector */ sbi->pbr_bh = sb_bread(sb, 0); if (!sbi->pbr_bh) { - exfat_msg(sb, KERN_ERR, "unable to read boot sector"); + exfat_err(sb, "unable to read boot sector"); return -EIO; } @@ -433,7 +430,7 @@ static int __exfat_fill_super(struct super_block *sb) /* check the validity of PBR */ if (le16_to_cpu((p_pbr->signature)) != PBR_SIGNATURE) { - exfat_msg(sb, KERN_ERR, "invalid boot record signature"); + exfat_err(sb, "invalid boot record signature"); ret = -EINVAL; goto free_bh; } @@ -458,7 +455,7 @@ static int __exfat_fill_super(struct super_block *sb) p_bpb = (struct pbr64 *)p_pbr; if (!p_bpb->bsx.num_fats) { - exfat_msg(sb, KERN_ERR, "bogus number of FAT structure"); + exfat_err(sb, "bogus number of FAT structure"); ret = -EINVAL; goto free_bh; } @@ -488,8 +485,7 @@ static int __exfat_fill_super(struct super_block *sb) if (le16_to_cpu(p_bpb->bsx.vol_flags) & VOL_DIRTY) { sbi->vol_flag |= VOL_DIRTY; - exfat_msg(sb, KERN_WARNING, - "Volume was not properly unmounted. Some data may be corrupt. Please run fsck."); + exfat_warn(sb, "Volume was not properly unmounted. Some data may be corrupt. Please run fsck."); } /* exFAT file size is limited by a disk volume size */ @@ -498,19 +494,19 @@ static int __exfat_fill_super(struct super_block *sb) ret = exfat_create_upcase_table(sb); if (ret) { - exfat_msg(sb, KERN_ERR, "failed to load upcase table"); + exfat_err(sb, "failed to load upcase table"); goto free_bh; } ret = exfat_load_bitmap(sb); if (ret) { - exfat_msg(sb, KERN_ERR, "failed to load alloc-bitmap"); + exfat_err(sb, "failed to load alloc-bitmap"); goto free_upcase_table; } ret = exfat_count_used_clusters(sb, &sbi->used_clusters); if (ret) { - exfat_msg(sb, KERN_ERR, "failed to scan clusters"); + exfat_err(sb, "failed to scan clusters"); goto free_alloc_bitmap; } @@ -539,8 +535,7 @@ static int exfat_fill_super(struct super_block *sb, struct fs_context *fc) struct request_queue *q = bdev_get_queue(sb->s_bdev); if (!blk_queue_discard(q)) { - exfat_msg(sb, KERN_WARNING, - "mounting with \"discard\" option, but the device does not support discard"); + exfat_warn(sb, "mounting with \"discard\" option, but the device does not support discard"); opts->discard = 0; } } @@ -555,7 +550,7 @@ static int exfat_fill_super(struct super_block *sb, struct fs_context *fc) err = __exfat_fill_super(sb); if (err) { - exfat_msg(sb, KERN_ERR, "failed to recognize exfat type"); + exfat_err(sb, "failed to recognize exfat type"); goto check_nls_io; } @@ -567,8 +562,8 @@ static int exfat_fill_super(struct super_block *sb, struct fs_context *fc) else { sbi->nls_io = load_nls(sbi->options.iocharset); if (!sbi->nls_io) { - exfat_msg(sb, KERN_ERR, "IO charset %s not found", - sbi->options.iocharset); + exfat_err(sb, "IO charset %s not found", + sbi->options.iocharset); err = -EINVAL; goto free_table; } @@ -581,7 +576,7 @@ static int exfat_fill_super(struct super_block *sb, struct fs_context *fc) root_inode = new_inode(sb); if (!root_inode) { - exfat_msg(sb, KERN_ERR, "failed to allocate root inode."); + exfat_err(sb, "failed to allocate root inode"); err = -ENOMEM; goto free_table; } @@ -590,7 +585,7 @@ static int exfat_fill_super(struct super_block *sb, struct fs_context *fc) inode_set_iversion(root_inode, 1); err = exfat_read_root(root_inode); if (err) { - exfat_msg(sb, KERN_ERR, "failed to initialize root inode."); + exfat_err(sb, "failed to initialize root inode"); goto put_inode; } @@ -599,7 +594,7 @@ static int exfat_fill_super(struct super_block *sb, struct fs_context *fc) sb->s_root = d_make_root(root_inode); if (!sb->s_root) { - exfat_msg(sb, KERN_ERR, "failed to get the root dentry"); + exfat_err(sb, "failed to get the root dentry"); err = -ENOMEM; goto put_inode; } -- GitLab From 31f5acc0aaa319a34308442413ef0b878e9d2c93 Mon Sep 17 00:00:00 2001 From: Geert Uytterhoeven Date: Mon, 6 Apr 2020 11:13:43 +0200 Subject: [PATCH 1256/1971] exfat: Improve wording of EXFAT_DEFAULT_IOCHARSET config option - Use consistent capitalization for "exFAT". - Fix grammar, - Split long sentence. Signed-off-by: Geert Uytterhoeven Signed-off-by: Namjae Jeon --- fs/exfat/Kconfig | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/fs/exfat/Kconfig b/fs/exfat/Kconfig index 2d3636dc5b8c3..5a65071b5ecf1 100644 --- a/fs/exfat/Kconfig +++ b/fs/exfat/Kconfig @@ -16,6 +16,7 @@ config EXFAT_DEFAULT_IOCHARSET depends on EXFAT_FS help Set this to the default input/output character set to use for - converting between the encoding is used for user visible filename and - UTF-16 character that exfat filesystem use, and can be overridden with - the "iocharset" mount option for exFAT filesystems. + converting between the encoding that is used for user visible + filenames and the UTF-16 character encoding that the exFAT + filesystem uses. This can be overridden with the "iocharset" mount + option for the exFAT filesystems. -- GitLab From dddf7da3985ee144b46ee287e81e47b9ee8bb980 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pali=20Roh=C3=A1r?= Date: Tue, 17 Mar 2020 23:25:52 +0100 Subject: [PATCH 1257/1971] exfat: Simplify exfat_utf8_d_hash() for code points above U+FFFF MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Function partial_name_hash() takes long type value into which can be stored one Unicode code point. Therefore conversion from UTF-32 to UTF-16 is not needed. Signed-off-by: Pali Rohár Signed-off-by: Namjae Jeon --- fs/exfat/namei.c | 10 ++-------- 1 file changed, 2 insertions(+), 8 deletions(-) diff --git a/fs/exfat/namei.c b/fs/exfat/namei.c index 585b47b2db3da..fa926b9c883ad 100644 --- a/fs/exfat/namei.c +++ b/fs/exfat/namei.c @@ -147,16 +147,10 @@ static int exfat_utf8_d_hash(const struct dentry *dentry, struct qstr *qstr) return charlen; /* - * Convert to UTF-16: code points above U+FFFF are encoded as - * surrogate pairs. * exfat_toupper() works only for code points up to the U+FFFF. */ - if (u > 0xFFFF) { - hash = partial_name_hash(exfat_high_surrogate(u), hash); - hash = partial_name_hash(exfat_low_surrogate(u), hash); - } else { - hash = partial_name_hash(exfat_toupper(sb, u), hash); - } + hash = partial_name_hash(u <= 0xFFFF ? exfat_toupper(sb, u) : u, + hash); } qstr->hash = end_name_hash(hash); -- GitLab From 6778337a7a4e51ec9fa4a76846d34e8a66ca6418 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pali=20Roh=C3=A1r?= Date: Tue, 17 Mar 2020 23:25:54 +0100 Subject: [PATCH 1258/1971] exfat: Remove unused functions exfat_high_surrogate() and exfat_low_surrogate() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit After applying previous two patches, these functions are not used anymore. Signed-off-by: Pali Rohár Signed-off-by: Namjae Jeon --- fs/exfat/exfat_fs.h | 2 -- fs/exfat/nls.c | 13 ------------- 2 files changed, 15 deletions(-) diff --git a/fs/exfat/exfat_fs.h b/fs/exfat/exfat_fs.h index 1ebfb9085f1f7..3862df6af7386 100644 --- a/fs/exfat/exfat_fs.h +++ b/fs/exfat/exfat_fs.h @@ -492,8 +492,6 @@ int exfat_nls_to_utf16(struct super_block *sb, struct exfat_uni_name *uniname, int *p_lossy); int exfat_create_upcase_table(struct super_block *sb); void exfat_free_upcase_table(struct exfat_sb_info *sbi); -unsigned short exfat_high_surrogate(unicode_t u); -unsigned short exfat_low_surrogate(unicode_t u); /* exfat/misc.c */ void __exfat_fs_error(struct super_block *sb, int report, const char *fmt, ...) diff --git a/fs/exfat/nls.c b/fs/exfat/nls.c index 2178786b708b5..1ebda90cbdd7c 100644 --- a/fs/exfat/nls.c +++ b/fs/exfat/nls.c @@ -535,22 +535,9 @@ static int exfat_utf8_to_utf16(struct super_block *sb, return unilen; } -#define PLANE_SIZE 0x00010000 #define SURROGATE_MASK 0xfffff800 #define SURROGATE_PAIR 0x0000d800 #define SURROGATE_LOW 0x00000400 -#define SURROGATE_BITS 0x000003ff - -unsigned short exfat_high_surrogate(unicode_t u) -{ - return ((u - PLANE_SIZE) >> 10) + SURROGATE_PAIR; -} - -unsigned short exfat_low_surrogate(unicode_t u) -{ - return ((u - PLANE_SIZE) & SURROGATE_BITS) | SURROGATE_PAIR | - SURROGATE_LOW; -} static int __exfat_utf16_to_nls(struct super_block *sb, struct exfat_uni_name *p_uniname, unsigned char *p_cstring, -- GitLab From cdc06129a6cea0e4863fa2b34a0c132c6eb7278b Mon Sep 17 00:00:00 2001 From: Jason Yan Date: Thu, 16 Apr 2020 13:34:26 +0900 Subject: [PATCH 1259/1971] exfat: remove the assignment of 0 to bool variable There is no need to init 'sync' in exfat_set_vol_flags(). This also fixes the following coccicheck warning: fs/exfat/super.c:104:6-10: WARNING: Assignment of 0/1 to bool variable Signed-off-by: Jason Yan Signed-off-by: Namjae Jeon --- fs/exfat/super.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fs/exfat/super.c b/fs/exfat/super.c index f9aa1e5dc2381..c1f47f4071a80 100644 --- a/fs/exfat/super.c +++ b/fs/exfat/super.c @@ -102,7 +102,7 @@ int exfat_set_vol_flags(struct super_block *sb, unsigned short new_flag) { struct exfat_sb_info *sbi = EXFAT_SB(sb); struct pbr64 *bpb = (struct pbr64 *)sbi->pbr_bh->b_data; - bool sync = 0; + bool sync; /* flags are not changed */ if (sbi->vol_flag == new_flag) -- GitLab From ed0f84d30ba65f44bed2739572c7ab0fdeed4004 Mon Sep 17 00:00:00 2001 From: Tetsuhiro Kohada Date: Wed, 22 Apr 2020 08:30:56 +0900 Subject: [PATCH 1260/1971] exfat: replace 'time_ms' with 'time_cs' Replace time_ms with time_cs in the file directory entry structure and related functions. The unit of create_time_ms/modify_time_ms in File Directory Entry are not 'milli-second', but 'centi-second'. The exfat specification uses the term '10ms', but instead use 'cs' as in msdos_fs.h. Signed-off-by: Tetsuhiro Kohada Signed-off-by: Namjae Jeon --- fs/exfat/dir.c | 8 ++++---- fs/exfat/exfat_fs.h | 4 ++-- fs/exfat/exfat_raw.h | 4 ++-- fs/exfat/file.c | 2 +- fs/exfat/inode.c | 4 ++-- fs/exfat/misc.c | 18 +++++++++--------- fs/exfat/namei.c | 4 ++-- 7 files changed, 22 insertions(+), 22 deletions(-) diff --git a/fs/exfat/dir.c b/fs/exfat/dir.c index 53ae965da7ec5..b5a237c33d50c 100644 --- a/fs/exfat/dir.c +++ b/fs/exfat/dir.c @@ -137,12 +137,12 @@ static int exfat_readdir(struct inode *inode, struct exfat_dir_entry *dir_entry) ep->dentry.file.create_tz, ep->dentry.file.create_time, ep->dentry.file.create_date, - ep->dentry.file.create_time_ms); + ep->dentry.file.create_time_cs); exfat_get_entry_time(sbi, &dir_entry->mtime, ep->dentry.file.modify_tz, ep->dentry.file.modify_time, ep->dentry.file.modify_date, - ep->dentry.file.modify_time_ms); + ep->dentry.file.modify_time_cs); exfat_get_entry_time(sbi, &dir_entry->atime, ep->dentry.file.access_tz, ep->dentry.file.access_time, @@ -461,12 +461,12 @@ int exfat_init_dir_entry(struct inode *inode, struct exfat_chain *p_dir, &ep->dentry.file.create_tz, &ep->dentry.file.create_time, &ep->dentry.file.create_date, - &ep->dentry.file.create_time_ms); + &ep->dentry.file.create_time_cs); exfat_set_entry_time(sbi, &ts, &ep->dentry.file.modify_tz, &ep->dentry.file.modify_time, &ep->dentry.file.modify_date, - &ep->dentry.file.modify_time_ms); + &ep->dentry.file.modify_time_cs); exfat_set_entry_time(sbi, &ts, &ep->dentry.file.access_tz, &ep->dentry.file.access_time, diff --git a/fs/exfat/exfat_fs.h b/fs/exfat/exfat_fs.h index 3862df6af7386..294aa7792bc3c 100644 --- a/fs/exfat/exfat_fs.h +++ b/fs/exfat/exfat_fs.h @@ -511,10 +511,10 @@ void exfat_msg(struct super_block *sb, const char *lv, const char *fmt, ...) exfat_msg(sb, KERN_INFO, fmt, ##__VA_ARGS__) void exfat_get_entry_time(struct exfat_sb_info *sbi, struct timespec64 *ts, - u8 tz, __le16 time, __le16 date, u8 time_ms); + u8 tz, __le16 time, __le16 date, u8 time_cs); void exfat_truncate_atime(struct timespec64 *ts); void exfat_set_entry_time(struct exfat_sb_info *sbi, struct timespec64 *ts, - u8 *tz, __le16 *time, __le16 *date, u8 *time_ms); + u8 *tz, __le16 *time, __le16 *date, u8 *time_cs); unsigned short exfat_calc_chksum_2byte(void *data, int len, unsigned short chksum, int type); void exfat_update_bh(struct super_block *sb, struct buffer_head *bh, int sync); diff --git a/fs/exfat/exfat_raw.h b/fs/exfat/exfat_raw.h index 2a841010e6490..8d6c64a7546d8 100644 --- a/fs/exfat/exfat_raw.h +++ b/fs/exfat/exfat_raw.h @@ -136,8 +136,8 @@ struct exfat_dentry { __le16 modify_date; __le16 access_time; __le16 access_date; - __u8 create_time_ms; - __u8 modify_time_ms; + __u8 create_time_cs; + __u8 modify_time_cs; __u8 create_tz; __u8 modify_tz; __u8 access_tz; diff --git a/fs/exfat/file.c b/fs/exfat/file.c index c9db8eb0cfc3e..84f3d31a3a555 100644 --- a/fs/exfat/file.c +++ b/fs/exfat/file.c @@ -165,7 +165,7 @@ int __exfat_truncate(struct inode *inode, loff_t new_size) &ep->dentry.file.modify_tz, &ep->dentry.file.modify_time, &ep->dentry.file.modify_date, - &ep->dentry.file.modify_time_ms); + &ep->dentry.file.modify_time_cs); ep->dentry.file.attr = cpu_to_le16(ei->attr); /* File size should be zero if there is no cluster allocated */ diff --git a/fs/exfat/inode.c b/fs/exfat/inode.c index 06887492f54b7..3f367d081cd6d 100644 --- a/fs/exfat/inode.c +++ b/fs/exfat/inode.c @@ -56,12 +56,12 @@ static int __exfat_write_inode(struct inode *inode, int sync) &ep->dentry.file.create_tz, &ep->dentry.file.create_time, &ep->dentry.file.create_date, - &ep->dentry.file.create_time_ms); + &ep->dentry.file.create_time_cs); exfat_set_entry_time(sbi, &inode->i_mtime, &ep->dentry.file.modify_tz, &ep->dentry.file.modify_time, &ep->dentry.file.modify_date, - &ep->dentry.file.modify_time_ms); + &ep->dentry.file.modify_time_cs); exfat_set_entry_time(sbi, &inode->i_atime, &ep->dentry.file.access_tz, &ep->dentry.file.access_time, diff --git a/fs/exfat/misc.c b/fs/exfat/misc.c index ce5e8a1b0726c..ab7f88b1f6d30 100644 --- a/fs/exfat/misc.c +++ b/fs/exfat/misc.c @@ -75,7 +75,7 @@ static void exfat_adjust_tz(struct timespec64 *ts, u8 tz_off) /* Convert a EXFAT time/date pair to a UNIX date (seconds since 1 1 70). */ void exfat_get_entry_time(struct exfat_sb_info *sbi, struct timespec64 *ts, - u8 tz, __le16 time, __le16 date, u8 time_ms) + u8 tz, __le16 time, __le16 date, u8 time_cs) { u16 t = le16_to_cpu(time); u16 d = le16_to_cpu(date); @@ -84,10 +84,10 @@ void exfat_get_entry_time(struct exfat_sb_info *sbi, struct timespec64 *ts, t >> 11, (t >> 5) & 0x003F, (t & 0x001F) << 1); - /* time_ms field represent 0 ~ 199(1990 ms) */ - if (time_ms) { - ts->tv_sec += time_ms / 100; - ts->tv_nsec = (time_ms % 100) * 10 * NSEC_PER_MSEC; + /* time_cs field represent 0 ~ 199cs(1990 ms) */ + if (time_cs) { + ts->tv_sec += time_cs / 100; + ts->tv_nsec = (time_cs % 100) * 10 * NSEC_PER_MSEC; } else ts->tv_nsec = 0; @@ -101,7 +101,7 @@ void exfat_get_entry_time(struct exfat_sb_info *sbi, struct timespec64 *ts, /* Convert linear UNIX date to a EXFAT time/date pair. */ void exfat_set_entry_time(struct exfat_sb_info *sbi, struct timespec64 *ts, - u8 *tz, __le16 *time, __le16 *date, u8 *time_ms) + u8 *tz, __le16 *time, __le16 *date, u8 *time_cs) { struct tm tm; u16 t, d; @@ -113,9 +113,9 @@ void exfat_set_entry_time(struct exfat_sb_info *sbi, struct timespec64 *ts, *time = cpu_to_le16(t); *date = cpu_to_le16(d); - /* time_ms field represent 0 ~ 199(1990 ms) */ - if (time_ms) - *time_ms = (tm.tm_sec & 1) * 100 + + /* time_cs field represent 0 ~ 199cs(1990 ms) */ + if (time_cs) + *time_cs = (tm.tm_sec & 1) * 100 + ts->tv_nsec / (10 * NSEC_PER_MSEC); /* diff --git a/fs/exfat/namei.c b/fs/exfat/namei.c index fa926b9c883ad..48f4df883f3ba 100644 --- a/fs/exfat/namei.c +++ b/fs/exfat/namei.c @@ -689,12 +689,12 @@ static int exfat_find(struct inode *dir, struct qstr *qname, ep->dentry.file.create_tz, ep->dentry.file.create_time, ep->dentry.file.create_date, - ep->dentry.file.create_time_ms); + ep->dentry.file.create_time_cs); exfat_get_entry_time(sbi, &info->mtime, ep->dentry.file.modify_tz, ep->dentry.file.modify_time, ep->dentry.file.modify_date, - ep->dentry.file.modify_time_ms); + ep->dentry.file.modify_time_cs); exfat_get_entry_time(sbi, &info->atime, ep->dentry.file.access_tz, ep->dentry.file.access_time, -- GitLab From 943af1fdacfebe4ff13430655abc460a5e1072f7 Mon Sep 17 00:00:00 2001 From: Tetsuhiro Kohada Date: Wed, 20 May 2020 16:56:41 +0900 Subject: [PATCH 1261/1971] exfat: optimize dir-cache Optimize directory access based on exfat_entry_set_cache. - Hold bh instead of copied d-entry. - Modify bh->data directly instead of the copied d-entry. - Write back the retained bh instead of rescanning the d-entry-set. And - Remove unused cache related definitions. Signed-off-by: Tetsuhiro Kohada Reviewed-by: Sungjong Seo Signed-off-by: Namjae Jeon --- fs/exfat/dir.c | 197 +++++++++++++++++--------------------------- fs/exfat/exfat_fs.h | 27 +++--- fs/exfat/file.c | 15 ++-- fs/exfat/inode.c | 53 +++++------- fs/exfat/namei.c | 14 ++-- 5 files changed, 124 insertions(+), 182 deletions(-) diff --git a/fs/exfat/dir.c b/fs/exfat/dir.c index b5a237c33d50c..2902d285bf204 100644 --- a/fs/exfat/dir.c +++ b/fs/exfat/dir.c @@ -32,35 +32,30 @@ static void exfat_get_uniname_from_ext_entry(struct super_block *sb, struct exfat_chain *p_dir, int entry, unsigned short *uniname) { int i; - struct exfat_dentry *ep; struct exfat_entry_set_cache *es; - es = exfat_get_dentry_set(sb, p_dir, entry, ES_ALL_ENTRIES, &ep); + es = exfat_get_dentry_set(sb, p_dir, entry, ES_ALL_ENTRIES); if (!es) return; - if (es->num_entries < 3) - goto free_es; - - ep += 2; - /* * First entry : file entry * Second entry : stream-extension entry * Third entry : first file-name entry * So, the index of first file-name dentry should start from 2. */ - for (i = 2; i < es->num_entries; i++, ep++) { + for (i = 2; i < es->num_entries; i++) { + struct exfat_dentry *ep = exfat_get_dentry_cached(es, i); + /* end of name entry */ if (exfat_get_entry_type(ep) != TYPE_EXTEND) - goto free_es; + break; exfat_extract_uni_name(ep, uniname); uniname += EXFAT_FILE_NAME_LEN; } -free_es: - kfree(es); + exfat_free_dentry_set(es, false); } /* read a directory entry from the opened directory */ @@ -590,62 +585,33 @@ int exfat_remove_entries(struct inode *inode, struct exfat_chain *p_dir, return 0; } -int exfat_update_dir_chksum_with_entry_set(struct super_block *sb, - struct exfat_entry_set_cache *es, int sync) +void exfat_update_dir_chksum_with_entry_set(struct exfat_entry_set_cache *es) { - struct exfat_sb_info *sbi = EXFAT_SB(sb); - struct buffer_head *bh; - sector_t sec = es->sector; - unsigned int off = es->offset; - int chksum_type = CS_DIR_ENTRY, i, num_entries = es->num_entries; - unsigned int buf_off = (off - es->offset); - unsigned int remaining_byte_in_sector, copy_entries, clu; + int chksum_type = CS_DIR_ENTRY, i; unsigned short chksum = 0; + struct exfat_dentry *ep; - for (i = 0; i < num_entries; i++) { - chksum = exfat_calc_chksum_2byte(&es->entries[i], DENTRY_SIZE, - chksum, chksum_type); + for (i = 0; i < es->num_entries; i++) { + ep = exfat_get_dentry_cached(es, i); + chksum = exfat_calc_chksum_2byte(ep, DENTRY_SIZE, chksum, + chksum_type); chksum_type = CS_DEFAULT; } + ep = exfat_get_dentry_cached(es, 0); + ep->dentry.file.checksum = cpu_to_le16(chksum); + es->modified = true; +} - es->entries[0].dentry.file.checksum = cpu_to_le16(chksum); +void exfat_free_dentry_set(struct exfat_entry_set_cache *es, int sync) +{ + int i; - while (num_entries) { - /* write per sector base */ - remaining_byte_in_sector = (1 << sb->s_blocksize_bits) - off; - copy_entries = min_t(int, - EXFAT_B_TO_DEN(remaining_byte_in_sector), - num_entries); - bh = sb_bread(sb, sec); - if (!bh) - goto err_out; - memcpy(bh->b_data + off, - (unsigned char *)&es->entries[0] + buf_off, - EXFAT_DEN_TO_B(copy_entries)); - exfat_update_bh(sb, bh, sync); - brelse(bh); - num_entries -= copy_entries; - - if (num_entries) { - /* get next sector */ - if (exfat_is_last_sector_in_cluster(sbi, sec)) { - clu = exfat_sector_to_cluster(sbi, sec); - if (es->alloc_flag == ALLOC_NO_FAT_CHAIN) - clu++; - else if (exfat_get_next_cluster(sb, &clu)) - goto err_out; - sec = exfat_cluster_to_sector(sbi, clu); - } else { - sec++; - } - off = 0; - buf_off += EXFAT_DEN_TO_B(copy_entries); - } + for (i = 0; i < es->num_bh; i++) { + if (es->modified) + exfat_update_bh(es->sb, es->bh[i], sync); + brelse(es->bh[i]); } - - return 0; -err_out: - return -EIO; + kfree(es); } static int exfat_walk_fat_chain(struct super_block *sb, @@ -820,34 +786,40 @@ static bool exfat_validate_entry(unsigned int type, } } +struct exfat_dentry *exfat_get_dentry_cached( + struct exfat_entry_set_cache *es, int num) +{ + int off = es->start_off + num * DENTRY_SIZE; + struct buffer_head *bh = es->bh[EXFAT_B_TO_BLK(off, es->sb)]; + char *p = bh->b_data + EXFAT_BLK_OFFSET(off, es->sb); + + return (struct exfat_dentry *)p; +} + /* * Returns a set of dentries for a file or dir. * - * Note that this is a copy (dump) of dentries so that user should - * call write_entry_set() to apply changes made in this entry set - * to the real device. + * Note It provides a direct pointer to bh->data via exfat_get_dentry_cached(). + * User should call exfat_get_dentry_set() after setting 'modified' to apply + * changes made in this entry set to the real device. * * in: * sb+p_dir+entry: indicates a file/dir * type: specifies how many dentries should be included. - * out: - * file_ep: will point the first dentry(= file dentry) on success * return: * pointer of entry set on success, * NULL on failure. */ struct exfat_entry_set_cache *exfat_get_dentry_set(struct super_block *sb, - struct exfat_chain *p_dir, int entry, unsigned int type, - struct exfat_dentry **file_ep) + struct exfat_chain *p_dir, int entry, unsigned int type) { - int ret; + int ret, i, num_bh; unsigned int off, byte_offset, clu = 0; - unsigned int entry_type; sector_t sec; struct exfat_sb_info *sbi = EXFAT_SB(sb); struct exfat_entry_set_cache *es; - struct exfat_dentry *ep, *pos; - unsigned char num_entries; + struct exfat_dentry *ep; + int num_entries; enum exfat_validate_dentry_mode mode = ES_MODE_STARTED; struct buffer_head *bh; @@ -861,11 +833,18 @@ struct exfat_entry_set_cache *exfat_get_dentry_set(struct super_block *sb, if (ret) return NULL; + es = kzalloc(sizeof(*es), GFP_KERNEL); + if (!es) + return NULL; + es->sb = sb; + es->modified = false; + /* byte offset in cluster */ byte_offset = EXFAT_CLU_OFFSET(byte_offset, sbi); /* byte offset in sector */ off = EXFAT_BLK_OFFSET(byte_offset, sb); + es->start_off = off; /* sector offset in cluster */ sec = EXFAT_B_TO_BLK(byte_offset, sb); @@ -873,72 +852,46 @@ struct exfat_entry_set_cache *exfat_get_dentry_set(struct super_block *sb, bh = sb_bread(sb, sec); if (!bh) - return NULL; - - ep = (struct exfat_dentry *)(bh->b_data + off); - entry_type = exfat_get_entry_type(ep); + goto free_es; + es->bh[es->num_bh++] = bh; - if (entry_type != TYPE_FILE && entry_type != TYPE_DIR) - goto release_bh; + ep = exfat_get_dentry_cached(es, 0); + if (!exfat_validate_entry(exfat_get_entry_type(ep), &mode)) + goto free_es; num_entries = type == ES_ALL_ENTRIES ? ep->dentry.file.num_ext + 1 : type; - es = kmalloc(struct_size(es, entries, num_entries), GFP_KERNEL); - if (!es) - goto release_bh; - es->num_entries = num_entries; - es->sector = sec; - es->offset = off; - es->alloc_flag = p_dir->flags; - - pos = &es->entries[0]; - - while (num_entries) { - if (!exfat_validate_entry(exfat_get_entry_type(ep), &mode)) - goto free_es; - /* copy dentry */ - memcpy(pos, ep, sizeof(struct exfat_dentry)); - - if (--num_entries == 0) - break; - - if (((off + DENTRY_SIZE) & (sb->s_blocksize - 1)) < - (off & (sb->s_blocksize - 1))) { - /* get the next sector */ - if (exfat_is_last_sector_in_cluster(sbi, sec)) { - if (es->alloc_flag == ALLOC_NO_FAT_CHAIN) - clu++; - else if (exfat_get_next_cluster(sb, &clu)) - goto free_es; - sec = exfat_cluster_to_sector(sbi, clu); - } else { - sec++; - } - - brelse(bh); - bh = sb_bread(sb, sec); - if (!bh) + num_bh = EXFAT_B_TO_BLK_ROUND_UP(off + num_entries * DENTRY_SIZE, sb); + for (i = 1; i < num_bh; i++) { + /* get the next sector */ + if (exfat_is_last_sector_in_cluster(sbi, sec)) { + if (p_dir->flags == ALLOC_NO_FAT_CHAIN) + clu++; + else if (exfat_get_next_cluster(sb, &clu)) goto free_es; - off = 0; - ep = (struct exfat_dentry *)bh->b_data; + sec = exfat_cluster_to_sector(sbi, clu); } else { - ep++; - off += DENTRY_SIZE; + sec++; } - pos++; + + bh = sb_bread(sb, sec); + if (!bh) + goto free_es; + es->bh[es->num_bh++] = bh; } - if (file_ep) - *file_ep = &es->entries[0]; - brelse(bh); + /* validiate cached dentries */ + for (i = 1; i < num_entries; i++) { + ep = exfat_get_dentry_cached(es, i); + if (!exfat_validate_entry(exfat_get_entry_type(ep), &mode)) + goto free_es; + } return es; free_es: - kfree(es); -release_bh: - brelse(bh); + exfat_free_dentry_set(es, false); return NULL; } diff --git a/fs/exfat/exfat_fs.h b/fs/exfat/exfat_fs.h index 294aa7792bc3c..c84ae9e605085 100644 --- a/fs/exfat/exfat_fs.h +++ b/fs/exfat/exfat_fs.h @@ -71,10 +71,8 @@ enum { #define MAX_NAME_LENGTH 255 /* max len of file name excluding NULL */ #define MAX_VFSNAME_BUF_SIZE ((MAX_NAME_LENGTH + 1) * MAX_CHARSET_SIZE) -#define FAT_CACHE_SIZE 128 -#define FAT_CACHE_HASH_SIZE 64 -#define BUF_CACHE_SIZE 256 -#define BUF_CACHE_HASH_SIZE 64 +/* Enough size to hold 256 dentry (even 512 Byte sector) */ +#define DIR_CACHE_SIZE (256*sizeof(struct exfat_dentry)/512+1) #define EXFAT_HINT_NONE -1 #define EXFAT_MIN_SUBDIR 2 @@ -170,14 +168,12 @@ struct exfat_hint { }; struct exfat_entry_set_cache { - /* sector number that contains file_entry */ - sector_t sector; - /* byte offset in the sector */ - unsigned int offset; - /* flag in stream entry. 01 for cluster chain, 03 for contig. */ - int alloc_flag; + struct super_block *sb; + bool modified; + unsigned int start_off; + int num_bh; + struct buffer_head *bh[DIR_CACHE_SIZE]; unsigned int num_entries; - struct exfat_dentry entries[]; }; struct exfat_dir_entry { @@ -451,8 +447,7 @@ int exfat_remove_entries(struct inode *inode, struct exfat_chain *p_dir, int entry, int order, int num_entries); int exfat_update_dir_chksum(struct inode *inode, struct exfat_chain *p_dir, int entry); -int exfat_update_dir_chksum_with_entry_set(struct super_block *sb, - struct exfat_entry_set_cache *es, int sync); +void exfat_update_dir_chksum_with_entry_set(struct exfat_entry_set_cache *es); int exfat_calc_num_entries(struct exfat_uni_name *p_uniname); int exfat_find_dir_entry(struct super_block *sb, struct exfat_inode_info *ei, struct exfat_chain *p_dir, struct exfat_uni_name *p_uniname, @@ -463,9 +458,11 @@ int exfat_find_location(struct super_block *sb, struct exfat_chain *p_dir, struct exfat_dentry *exfat_get_dentry(struct super_block *sb, struct exfat_chain *p_dir, int entry, struct buffer_head **bh, sector_t *sector); +struct exfat_dentry *exfat_get_dentry_cached(struct exfat_entry_set_cache *es, + int num); struct exfat_entry_set_cache *exfat_get_dentry_set(struct super_block *sb, - struct exfat_chain *p_dir, int entry, unsigned int type, - struct exfat_dentry **file_ep); + struct exfat_chain *p_dir, int entry, unsigned int type); +void exfat_free_dentry_set(struct exfat_entry_set_cache *es, int sync); int exfat_count_dir_entries(struct super_block *sb, struct exfat_chain *p_dir); /* inode.c */ diff --git a/fs/exfat/file.c b/fs/exfat/file.c index 84f3d31a3a555..8e3f0eef45d77 100644 --- a/fs/exfat/file.c +++ b/fs/exfat/file.c @@ -96,11 +96,9 @@ int __exfat_truncate(struct inode *inode, loff_t new_size) unsigned int num_clusters_new, num_clusters_phys; unsigned int last_clu = EXFAT_FREE_CLUSTER; struct exfat_chain clu; - struct exfat_dentry *ep, *ep2; struct super_block *sb = inode->i_sb; struct exfat_sb_info *sbi = EXFAT_SB(sb); struct exfat_inode_info *ei = EXFAT_I(inode); - struct exfat_entry_set_cache *es = NULL; int evict = (ei->dir.dir == DIR_DELETED) ? 1 : 0; /* check if the given file ID is opened */ @@ -153,12 +151,15 @@ int __exfat_truncate(struct inode *inode, loff_t new_size) /* update the directory entry */ if (!evict) { struct timespec64 ts; + struct exfat_dentry *ep, *ep2; + struct exfat_entry_set_cache *es; es = exfat_get_dentry_set(sb, &(ei->dir), ei->entry, - ES_ALL_ENTRIES, &ep); + ES_ALL_ENTRIES); if (!es) return -EIO; - ep2 = ep + 1; + ep = exfat_get_dentry_cached(es, 0); + ep2 = exfat_get_dentry_cached(es, 1); ts = current_time(inode); exfat_set_entry_time(sbi, &ts, @@ -185,10 +186,8 @@ int __exfat_truncate(struct inode *inode, loff_t new_size) ep2->dentry.stream.start_clu = EXFAT_FREE_CLUSTER; } - if (exfat_update_dir_chksum_with_entry_set(sb, es, - inode_needs_sync(inode))) - return -EIO; - kfree(es); + exfat_update_dir_chksum_with_entry_set(es); + exfat_free_dentry_set(es, inode_needs_sync(inode)); } /* cut off from the FAT chain */ diff --git a/fs/exfat/inode.c b/fs/exfat/inode.c index 3f367d081cd6d..ef7cf7a6d187b 100644 --- a/fs/exfat/inode.c +++ b/fs/exfat/inode.c @@ -19,7 +19,6 @@ static int __exfat_write_inode(struct inode *inode, int sync) { - int ret = -EIO; unsigned long long on_disk_size; struct exfat_dentry *ep, *ep2; struct exfat_entry_set_cache *es = NULL; @@ -43,11 +42,11 @@ static int __exfat_write_inode(struct inode *inode, int sync) exfat_set_vol_flags(sb, VOL_DIRTY); /* get the directory entry of given file or directory */ - es = exfat_get_dentry_set(sb, &(ei->dir), ei->entry, ES_ALL_ENTRIES, - &ep); + es = exfat_get_dentry_set(sb, &(ei->dir), ei->entry, ES_ALL_ENTRIES); if (!es) return -EIO; - ep2 = ep + 1; + ep = exfat_get_dentry_cached(es, 0); + ep2 = exfat_get_dentry_cached(es, 1); ep->dentry.file.attr = cpu_to_le16(exfat_make_attr(inode)); @@ -77,9 +76,9 @@ static int __exfat_write_inode(struct inode *inode, int sync) ep2->dentry.stream.valid_size = cpu_to_le64(on_disk_size); ep2->dentry.stream.size = ep2->dentry.stream.valid_size; - ret = exfat_update_dir_chksum_with_entry_set(sb, es, sync); - kfree(es); - return ret; + exfat_update_dir_chksum_with_entry_set(es); + exfat_free_dentry_set(es, sync); + return 0; } int exfat_write_inode(struct inode *inode, struct writeback_control *wbc) @@ -110,8 +109,6 @@ static int exfat_map_cluster(struct inode *inode, unsigned int clu_offset, int ret, modified = false; unsigned int last_clu; struct exfat_chain new_clu; - struct exfat_dentry *ep; - struct exfat_entry_set_cache *es = NULL; struct super_block *sb = inode->i_sb; struct exfat_sb_info *sbi = EXFAT_SB(sb); struct exfat_inode_info *ei = EXFAT_I(inode); @@ -222,34 +219,28 @@ static int exfat_map_cluster(struct inode *inode, unsigned int clu_offset, num_clusters += num_to_be_allocated; *clu = new_clu.dir; - if (ei->dir.dir != DIR_DELETED) { + if (ei->dir.dir != DIR_DELETED && modified) { + struct exfat_dentry *ep; + struct exfat_entry_set_cache *es; + es = exfat_get_dentry_set(sb, &(ei->dir), ei->entry, - ES_ALL_ENTRIES, &ep); + ES_ALL_ENTRIES); if (!es) return -EIO; /* get stream entry */ - ep++; + ep = exfat_get_dentry_cached(es, 1); /* update directory entry */ - if (modified) { - if (ep->dentry.stream.flags != ei->flags) - ep->dentry.stream.flags = ei->flags; - - if (le32_to_cpu(ep->dentry.stream.start_clu) != - ei->start_clu) - ep->dentry.stream.start_clu = - cpu_to_le32(ei->start_clu); - - ep->dentry.stream.valid_size = - cpu_to_le64(i_size_read(inode)); - ep->dentry.stream.size = - ep->dentry.stream.valid_size; - } - - if (exfat_update_dir_chksum_with_entry_set(sb, es, - inode_needs_sync(inode))) - return -EIO; - kfree(es); + ep->dentry.stream.flags = ei->flags; + ep->dentry.stream.start_clu = + cpu_to_le32(ei->start_clu); + ep->dentry.stream.valid_size = + cpu_to_le64(i_size_read(inode)); + ep->dentry.stream.size = + ep->dentry.stream.valid_size; + + exfat_update_dir_chksum_with_entry_set(es); + exfat_free_dentry_set(es, inode_needs_sync(inode)); } /* end of if != DIR_DELETED */ diff --git a/fs/exfat/namei.c b/fs/exfat/namei.c index 48f4df883f3ba..5b0f35329d63e 100644 --- a/fs/exfat/namei.c +++ b/fs/exfat/namei.c @@ -600,8 +600,6 @@ static int exfat_find(struct inode *dir, struct qstr *qname, int ret, dentry, num_entries, count; struct exfat_chain cdir; struct exfat_uni_name uni_name; - struct exfat_dentry *ep, *ep2; - struct exfat_entry_set_cache *es = NULL; struct super_block *sb = dir->i_sb; struct exfat_sb_info *sbi = EXFAT_SB(sb); struct exfat_inode_info *ei = EXFAT_I(dir); @@ -660,10 +658,14 @@ static int exfat_find(struct inode *dir, struct qstr *qname, info->num_subdirs = count; } else { - es = exfat_get_dentry_set(sb, &cdir, dentry, ES_2_ENTRIES, &ep); + struct exfat_dentry *ep, *ep2; + struct exfat_entry_set_cache *es; + + es = exfat_get_dentry_set(sb, &cdir, dentry, ES_2_ENTRIES); if (!es) return -EIO; - ep2 = ep + 1; + ep = exfat_get_dentry_cached(es, 0); + ep2 = exfat_get_dentry_cached(es, 1); info->type = exfat_get_entry_type(ep); info->attr = le16_to_cpu(ep->dentry.file.attr); @@ -681,7 +683,7 @@ static int exfat_find(struct inode *dir, struct qstr *qname, exfat_fs_error(sb, "non-zero size file starts with zero cluster (size : %llu, p_dir : %u, entry : 0x%08x)", i_size_read(dir), ei->dir.dir, ei->entry); - kfree(es); + exfat_free_dentry_set(es, false); return -EIO; } @@ -700,7 +702,7 @@ static int exfat_find(struct inode *dir, struct qstr *qname, ep->dentry.file.access_time, ep->dentry.file.access_date, 0); - kfree(es); + exfat_free_dentry_set(es, false); if (info->type == TYPE_DIR) { exfat_chain_set(&cdir, info->start_clu, -- GitLab From 181a9e8009a8a8bdb19c2e24eeeae7d8e77c8c47 Mon Sep 17 00:00:00 2001 From: Tetsuhiro Kohada Date: Fri, 29 May 2020 19:14:56 +0900 Subject: [PATCH 1262/1971] exfat: redefine PBR as boot_sector Aggregate PBR related definitions and redefine as "boot_sector" to comply with the exFAT specification. And, rename variable names including 'pbr'. Signed-off-by: Tetsuhiro Kohada Reviewed-by: Sungjong Seo Signed-off-by: Namjae Jeon --- fs/exfat/exfat_fs.h | 2 +- fs/exfat/exfat_raw.h | 79 +++++++++++++++-------------------------- fs/exfat/super.c | 84 ++++++++++++++++++++++---------------------- 3 files changed, 72 insertions(+), 93 deletions(-) diff --git a/fs/exfat/exfat_fs.h b/fs/exfat/exfat_fs.h index c84ae9e605085..911f58b93f3d2 100644 --- a/fs/exfat/exfat_fs.h +++ b/fs/exfat/exfat_fs.h @@ -227,7 +227,7 @@ struct exfat_sb_info { unsigned int root_dir; /* root dir cluster */ unsigned int dentries_per_clu; /* num of dentries per cluster */ unsigned int vol_flag; /* volume dirty flag */ - struct buffer_head *pbr_bh; /* buffer_head of PBR sector */ + struct buffer_head *boot_bh; /* buffer_head of BOOT sector */ unsigned int map_clu; /* allocation bitmap start cluster */ unsigned int map_sectors; /* num of allocation bitmap sectors */ diff --git a/fs/exfat/exfat_raw.h b/fs/exfat/exfat_raw.h index 8d6c64a7546d8..07f74190df44b 100644 --- a/fs/exfat/exfat_raw.h +++ b/fs/exfat/exfat_raw.h @@ -8,7 +8,8 @@ #include -#define PBR_SIGNATURE 0xAA55 +#define BOOT_SIGNATURE 0xAA55 +#define EXBOOT_SIGNATURE 0xAA550000 #define EXFAT_MAX_FILE_LEN 255 @@ -55,7 +56,7 @@ /* checksum types */ #define CS_DIR_ENTRY 0 -#define CS_PBR_SECTOR 1 +#define CS_BOOT_SECTOR 1 #define CS_DEFAULT 2 /* file attributes */ @@ -69,57 +70,35 @@ #define ATTR_RWMASK (ATTR_HIDDEN | ATTR_SYSTEM | ATTR_VOLUME | \ ATTR_SUBDIR | ATTR_ARCHIVE) -#define PBR64_JUMP_BOOT_LEN 3 -#define PBR64_OEM_NAME_LEN 8 -#define PBR64_RESERVED_LEN 53 +#define BOOTSEC_JUMP_BOOT_LEN 3 +#define BOOTSEC_FS_NAME_LEN 8 +#define BOOTSEC_OLDBPB_LEN 53 #define EXFAT_FILE_NAME_LEN 15 -/* EXFAT BIOS parameter block (64 bytes) */ -struct bpb64 { - __u8 jmp_boot[PBR64_JUMP_BOOT_LEN]; - __u8 oem_name[PBR64_OEM_NAME_LEN]; - __u8 res_zero[PBR64_RESERVED_LEN]; -} __packed; - -/* EXFAT EXTEND BIOS parameter block (56 bytes) */ -struct bsx64 { - __le64 vol_offset; - __le64 vol_length; - __le32 fat_offset; - __le32 fat_length; - __le32 clu_offset; - __le32 clu_count; - __le32 root_cluster; - __le32 vol_serial; - __u8 fs_version[2]; - __le16 vol_flags; - __u8 sect_size_bits; - __u8 sect_per_clus_bits; - __u8 num_fats; - __u8 phy_drv_no; - __u8 perc_in_use; - __u8 reserved2[7]; -} __packed; - -/* EXFAT PBR[BPB+BSX] (120 bytes) */ -struct pbr64 { - struct bpb64 bpb; - struct bsx64 bsx; -} __packed; - -/* Common PBR[Partition Boot Record] (512 bytes) */ -struct pbr { - union { - __u8 raw[64]; - struct bpb64 f64; - } bpb; - union { - __u8 raw[56]; - struct bsx64 f64; - } bsx; - __u8 boot_code[390]; - __le16 signature; +/* EXFAT: Main and Backup Boot Sector (512 bytes) */ +struct boot_sector { + __u8 jmp_boot[BOOTSEC_JUMP_BOOT_LEN]; + __u8 fs_name[BOOTSEC_FS_NAME_LEN]; + __u8 must_be_zero[BOOTSEC_OLDBPB_LEN]; + __le64 partition_offset; + __le64 vol_length; + __le32 fat_offset; + __le32 fat_length; + __le32 clu_offset; + __le32 clu_count; + __le32 root_cluster; + __le32 vol_serial; + __u8 fs_revision[2]; + __le16 vol_flags; + __u8 sect_size_bits; + __u8 sect_per_clus_bits; + __u8 num_fats; + __u8 drv_sel; + __u8 percent_in_use; + __u8 reserved[7]; + __u8 boot_code[390]; + __le16 signature; } __packed; struct exfat_dentry { diff --git a/fs/exfat/super.c b/fs/exfat/super.c index c1f47f4071a80..e60d28e73ff00 100644 --- a/fs/exfat/super.c +++ b/fs/exfat/super.c @@ -49,7 +49,7 @@ static void exfat_put_super(struct super_block *sb) sync_blockdev(sb->s_bdev); exfat_set_vol_flags(sb, VOL_CLEAN); exfat_free_bitmap(sbi); - brelse(sbi->pbr_bh); + brelse(sbi->boot_bh); mutex_unlock(&sbi->s_lock); call_rcu(&sbi->rcu, exfat_delayed_free); @@ -101,7 +101,7 @@ static int exfat_statfs(struct dentry *dentry, struct kstatfs *buf) int exfat_set_vol_flags(struct super_block *sb, unsigned short new_flag) { struct exfat_sb_info *sbi = EXFAT_SB(sb); - struct pbr64 *bpb = (struct pbr64 *)sbi->pbr_bh->b_data; + struct boot_sector *p_boot = (struct boot_sector *)sbi->boot_bh->b_data; bool sync; /* flags are not changed */ @@ -116,18 +116,18 @@ int exfat_set_vol_flags(struct super_block *sb, unsigned short new_flag) if (sb_rdonly(sb)) return 0; - bpb->bsx.vol_flags = cpu_to_le16(new_flag); + p_boot->vol_flags = cpu_to_le16(new_flag); - if (new_flag == VOL_DIRTY && !buffer_dirty(sbi->pbr_bh)) + if (new_flag == VOL_DIRTY && !buffer_dirty(sbi->boot_bh)) sync = true; else sync = false; - set_buffer_uptodate(sbi->pbr_bh); - mark_buffer_dirty(sbi->pbr_bh); + set_buffer_uptodate(sbi->boot_bh); + mark_buffer_dirty(sbi->boot_bh); if (sync) - sync_dirty_buffer(sbi->pbr_bh); + sync_dirty_buffer(sbi->boot_bh); return 0; } @@ -366,13 +366,14 @@ static int exfat_read_root(struct inode *inode) return 0; } -static struct pbr *exfat_read_pbr_with_logical_sector(struct super_block *sb) +static struct boot_sector *exfat_read_boot_with_logical_sector( + struct super_block *sb) { struct exfat_sb_info *sbi = EXFAT_SB(sb); - struct pbr *p_pbr = (struct pbr *) (sbi->pbr_bh)->b_data; + struct boot_sector *p_boot = (struct boot_sector *)sbi->boot_bh->b_data; unsigned short logical_sect = 0; - logical_sect = 1 << p_pbr->bsx.f64.sect_size_bits; + logical_sect = 1 << p_boot->sect_size_bits; if (!is_power_of_2(logical_sect) || logical_sect < 512 || logical_sect > 4096) { @@ -387,49 +388,48 @@ static struct pbr *exfat_read_pbr_with_logical_sector(struct super_block *sb) } if (logical_sect > sb->s_blocksize) { - brelse(sbi->pbr_bh); - sbi->pbr_bh = NULL; + brelse(sbi->boot_bh); + sbi->boot_bh = NULL; if (!sb_set_blocksize(sb, logical_sect)) { exfat_err(sb, "unable to set blocksize %u", logical_sect); return NULL; } - sbi->pbr_bh = sb_bread(sb, 0); - if (!sbi->pbr_bh) { + sbi->boot_bh = sb_bread(sb, 0); + if (!sbi->boot_bh) { exfat_err(sb, "unable to read boot sector (logical sector size = %lu)", sb->s_blocksize); return NULL; } - p_pbr = (struct pbr *)sbi->pbr_bh->b_data; + p_boot = (struct boot_sector *)sbi->boot_bh->b_data; } - return p_pbr; + return p_boot; } /* mount the file system volume */ static int __exfat_fill_super(struct super_block *sb) { int ret; - struct pbr *p_pbr; - struct pbr64 *p_bpb; + struct boot_sector *p_boot; struct exfat_sb_info *sbi = EXFAT_SB(sb); /* set block size to read super block */ sb_min_blocksize(sb, 512); /* read boot sector */ - sbi->pbr_bh = sb_bread(sb, 0); - if (!sbi->pbr_bh) { + sbi->boot_bh = sb_bread(sb, 0); + if (!sbi->boot_bh) { exfat_err(sb, "unable to read boot sector"); return -EIO; } /* PRB is read */ - p_pbr = (struct pbr *)sbi->pbr_bh->b_data; + p_boot = (struct boot_sector *)sbi->boot_bh->b_data; - /* check the validity of PBR */ - if (le16_to_cpu((p_pbr->signature)) != PBR_SIGNATURE) { + /* check the validity of BOOT */ + if (le16_to_cpu((p_boot->signature)) != BOOT_SIGNATURE) { exfat_err(sb, "invalid boot record signature"); ret = -EINVAL; goto free_bh; @@ -437,8 +437,8 @@ static int __exfat_fill_super(struct super_block *sb) /* check logical sector size */ - p_pbr = exfat_read_pbr_with_logical_sector(sb); - if (!p_pbr) { + p_boot = exfat_read_boot_with_logical_sector(sb); + if (!p_boot) { ret = -EIO; goto free_bh; } @@ -447,43 +447,43 @@ static int __exfat_fill_super(struct super_block *sb) * res_zero field must be filled with zero to prevent mounting * from FAT volume. */ - if (memchr_inv(p_pbr->bpb.f64.res_zero, 0, - sizeof(p_pbr->bpb.f64.res_zero))) { + if (memchr_inv(p_boot->must_be_zero, 0, + sizeof(p_boot->must_be_zero))) { ret = -EINVAL; goto free_bh; } - p_bpb = (struct pbr64 *)p_pbr; - if (!p_bpb->bsx.num_fats) { + p_boot = (struct boot_sector *)p_boot; + if (!p_boot->num_fats) { exfat_err(sb, "bogus number of FAT structure"); ret = -EINVAL; goto free_bh; } - sbi->sect_per_clus = 1 << p_bpb->bsx.sect_per_clus_bits; - sbi->sect_per_clus_bits = p_bpb->bsx.sect_per_clus_bits; + sbi->sect_per_clus = 1 << p_boot->sect_per_clus_bits; + sbi->sect_per_clus_bits = p_boot->sect_per_clus_bits; sbi->cluster_size_bits = sbi->sect_per_clus_bits + sb->s_blocksize_bits; sbi->cluster_size = 1 << sbi->cluster_size_bits; - sbi->num_FAT_sectors = le32_to_cpu(p_bpb->bsx.fat_length); - sbi->FAT1_start_sector = le32_to_cpu(p_bpb->bsx.fat_offset); - sbi->FAT2_start_sector = p_bpb->bsx.num_fats == 1 ? + sbi->num_FAT_sectors = le32_to_cpu(p_boot->fat_length); + sbi->FAT1_start_sector = le32_to_cpu(p_boot->fat_offset); + sbi->FAT2_start_sector = p_boot->num_fats == 1 ? sbi->FAT1_start_sector : sbi->FAT1_start_sector + sbi->num_FAT_sectors; - sbi->data_start_sector = le32_to_cpu(p_bpb->bsx.clu_offset); - sbi->num_sectors = le64_to_cpu(p_bpb->bsx.vol_length); + sbi->data_start_sector = le32_to_cpu(p_boot->clu_offset); + sbi->num_sectors = le64_to_cpu(p_boot->vol_length); /* because the cluster index starts with 2 */ - sbi->num_clusters = le32_to_cpu(p_bpb->bsx.clu_count) + + sbi->num_clusters = le32_to_cpu(p_boot->clu_count) + EXFAT_RESERVED_CLUSTERS; - sbi->root_dir = le32_to_cpu(p_bpb->bsx.root_cluster); + sbi->root_dir = le32_to_cpu(p_boot->root_cluster); sbi->dentries_per_clu = 1 << (sbi->cluster_size_bits - DENTRY_SIZE_BITS); - sbi->vol_flag = le16_to_cpu(p_bpb->bsx.vol_flags); + sbi->vol_flag = le16_to_cpu(p_boot->vol_flags); sbi->clu_srch_ptr = EXFAT_FIRST_CLUSTER; sbi->used_clusters = EXFAT_CLUSTERS_UNTRACKED; - if (le16_to_cpu(p_bpb->bsx.vol_flags) & VOL_DIRTY) { + if (le16_to_cpu(p_boot->vol_flags) & VOL_DIRTY) { sbi->vol_flag |= VOL_DIRTY; exfat_warn(sb, "Volume was not properly unmounted. Some data may be corrupt. Please run fsck."); } @@ -517,7 +517,7 @@ free_alloc_bitmap: free_upcase_table: exfat_free_upcase_table(sbi); free_bh: - brelse(sbi->pbr_bh); + brelse(sbi->boot_bh); return ret; } @@ -608,7 +608,7 @@ put_inode: free_table: exfat_free_upcase_table(sbi); exfat_free_bitmap(sbi); - brelse(sbi->pbr_bh); + brelse(sbi->boot_bh); check_nls_io: unload_nls(sbi->nls_io); -- GitLab From 33404a159828a97fefada0d8f4cf910873b8e9f1 Mon Sep 17 00:00:00 2001 From: Tetsuhiro Kohada Date: Fri, 29 May 2020 19:14:57 +0900 Subject: [PATCH 1263/1971] exfat: separate the boot sector analysis Separate the boot sector analysis to read_boot_sector(). And add a check for the fs_name field. Furthermore, add a strict consistency check, because overlapping areas can cause serious corruption. Signed-off-by: Tetsuhiro Kohada Reviewed-by: Sungjong Seo Signed-off-by: Namjae Jeon --- fs/exfat/exfat_raw.h | 2 + fs/exfat/super.c | 97 ++++++++++++++++++++++++-------------------- 2 files changed, 56 insertions(+), 43 deletions(-) diff --git a/fs/exfat/exfat_raw.h b/fs/exfat/exfat_raw.h index 07f74190df44b..350ce59cc3247 100644 --- a/fs/exfat/exfat_raw.h +++ b/fs/exfat/exfat_raw.h @@ -10,11 +10,13 @@ #define BOOT_SIGNATURE 0xAA55 #define EXBOOT_SIGNATURE 0xAA550000 +#define STR_EXFAT "EXFAT " /* size should be 8 */ #define EXFAT_MAX_FILE_LEN 255 #define VOL_CLEAN 0x0000 #define VOL_DIRTY 0x0002 +#define ERR_MEDIUM 0x0004 #define EXFAT_EOF_CLUSTER 0xFFFFFFFFu #define EXFAT_BAD_CLUSTER 0xFFFFFFF7u diff --git a/fs/exfat/super.c b/fs/exfat/super.c index e60d28e73ff00..6a1330be5a9a4 100644 --- a/fs/exfat/super.c +++ b/fs/exfat/super.c @@ -366,25 +366,20 @@ static int exfat_read_root(struct inode *inode) return 0; } -static struct boot_sector *exfat_read_boot_with_logical_sector( - struct super_block *sb) +static int exfat_calibrate_blocksize(struct super_block *sb, int logical_sect) { struct exfat_sb_info *sbi = EXFAT_SB(sb); - struct boot_sector *p_boot = (struct boot_sector *)sbi->boot_bh->b_data; - unsigned short logical_sect = 0; - - logical_sect = 1 << p_boot->sect_size_bits; if (!is_power_of_2(logical_sect) || logical_sect < 512 || logical_sect > 4096) { exfat_err(sb, "bogus logical sector size %u", logical_sect); - return NULL; + return -EIO; } if (logical_sect < sb->s_blocksize) { exfat_err(sb, "logical sector size too small for device (logical sector size = %u)", logical_sect); - return NULL; + return -EIO; } if (logical_sect > sb->s_blocksize) { @@ -394,24 +389,20 @@ static struct boot_sector *exfat_read_boot_with_logical_sector( if (!sb_set_blocksize(sb, logical_sect)) { exfat_err(sb, "unable to set blocksize %u", logical_sect); - return NULL; + return -EIO; } sbi->boot_bh = sb_bread(sb, 0); if (!sbi->boot_bh) { exfat_err(sb, "unable to read boot sector (logical sector size = %lu)", sb->s_blocksize); - return NULL; + return -EIO; } - - p_boot = (struct boot_sector *)sbi->boot_bh->b_data; } - return p_boot; + return 0; } -/* mount the file system volume */ -static int __exfat_fill_super(struct super_block *sb) +static int exfat_read_boot_sector(struct super_block *sb) { - int ret; struct boot_sector *p_boot; struct exfat_sb_info *sbi = EXFAT_SB(sb); @@ -424,51 +415,41 @@ static int __exfat_fill_super(struct super_block *sb) exfat_err(sb, "unable to read boot sector"); return -EIO; } - - /* PRB is read */ p_boot = (struct boot_sector *)sbi->boot_bh->b_data; /* check the validity of BOOT */ if (le16_to_cpu((p_boot->signature)) != BOOT_SIGNATURE) { exfat_err(sb, "invalid boot record signature"); - ret = -EINVAL; - goto free_bh; + return -EINVAL; } - - /* check logical sector size */ - p_boot = exfat_read_boot_with_logical_sector(sb); - if (!p_boot) { - ret = -EIO; - goto free_bh; + if (memcmp(p_boot->fs_name, STR_EXFAT, BOOTSEC_FS_NAME_LEN)) { + exfat_err(sb, "invalid fs_name"); /* fs_name may unprintable */ + return -EINVAL; } /* - * res_zero field must be filled with zero to prevent mounting + * must_be_zero field must be filled with zero to prevent mounting * from FAT volume. */ - if (memchr_inv(p_boot->must_be_zero, 0, - sizeof(p_boot->must_be_zero))) { - ret = -EINVAL; - goto free_bh; - } + if (memchr_inv(p_boot->must_be_zero, 0, sizeof(p_boot->must_be_zero))) + return -EINVAL; - p_boot = (struct boot_sector *)p_boot; - if (!p_boot->num_fats) { + if (p_boot->num_fats != 1 && p_boot->num_fats != 2) { exfat_err(sb, "bogus number of FAT structure"); - ret = -EINVAL; - goto free_bh; + return -EINVAL; } sbi->sect_per_clus = 1 << p_boot->sect_per_clus_bits; sbi->sect_per_clus_bits = p_boot->sect_per_clus_bits; - sbi->cluster_size_bits = sbi->sect_per_clus_bits + sb->s_blocksize_bits; + sbi->cluster_size_bits = p_boot->sect_per_clus_bits + + p_boot->sect_size_bits; sbi->cluster_size = 1 << sbi->cluster_size_bits; sbi->num_FAT_sectors = le32_to_cpu(p_boot->fat_length); sbi->FAT1_start_sector = le32_to_cpu(p_boot->fat_offset); - sbi->FAT2_start_sector = p_boot->num_fats == 1 ? - sbi->FAT1_start_sector : - sbi->FAT1_start_sector + sbi->num_FAT_sectors; + sbi->FAT2_start_sector = le32_to_cpu(p_boot->fat_offset); + if (p_boot->num_fats == 2) + sbi->FAT2_start_sector += sbi->num_FAT_sectors; sbi->data_start_sector = le32_to_cpu(p_boot->clu_offset); sbi->num_sectors = le64_to_cpu(p_boot->vol_length); /* because the cluster index starts with 2 */ @@ -483,15 +464,45 @@ static int __exfat_fill_super(struct super_block *sb) sbi->clu_srch_ptr = EXFAT_FIRST_CLUSTER; sbi->used_clusters = EXFAT_CLUSTERS_UNTRACKED; - if (le16_to_cpu(p_boot->vol_flags) & VOL_DIRTY) { - sbi->vol_flag |= VOL_DIRTY; - exfat_warn(sb, "Volume was not properly unmounted. Some data may be corrupt. Please run fsck."); + /* check consistencies */ + if (sbi->num_FAT_sectors << p_boot->sect_size_bits < + sbi->num_clusters * 4) { + exfat_err(sb, "bogus fat length"); + return -EINVAL; + } + if (sbi->data_start_sector < + sbi->FAT1_start_sector + sbi->num_FAT_sectors * p_boot->num_fats) { + exfat_err(sb, "bogus data start sector"); + return -EINVAL; } + if (sbi->vol_flag & VOL_DIRTY) + exfat_warn(sb, "Volume was not properly unmounted. Some data may be corrupt. Please run fsck."); + if (sbi->vol_flag & ERR_MEDIUM) + exfat_warn(sb, "Medium has reported failures. Some data may be lost."); /* exFAT file size is limited by a disk volume size */ sb->s_maxbytes = (u64)(sbi->num_clusters - EXFAT_RESERVED_CLUSTERS) << sbi->cluster_size_bits; + /* check logical sector size */ + if (exfat_calibrate_blocksize(sb, 1 << p_boot->sect_size_bits)) + return -EIO; + + return 0; +} + +/* mount the file system volume */ +static int __exfat_fill_super(struct super_block *sb) +{ + int ret; + struct exfat_sb_info *sbi = EXFAT_SB(sb); + + ret = exfat_read_boot_sector(sb); + if (ret) { + exfat_err(sb, "failed to read boot sector"); + goto free_bh; + } + ret = exfat_create_upcase_table(sb); if (ret) { exfat_err(sb, "failed to load upcase table"); -- GitLab From 476189c0ef3b658de3f6b89fd0fdeb6dc451b564 Mon Sep 17 00:00:00 2001 From: Tetsuhiro Kohada Date: Sun, 31 May 2020 18:30:17 +0900 Subject: [PATCH 1264/1971] exfat: add boot region verification Add Boot-Regions verification specified in exFAT specification. Note that the checksum type is strongly related to the raw structure, so the'u32 'type is used to clarify the number of bits. Signed-off-by: Tetsuhiro Kohada Reviewed-by: Sungjong Seo Signed-off-by: Namjae Jeon --- fs/exfat/exfat_fs.h | 1 + fs/exfat/misc.c | 14 +++++++++++++ fs/exfat/super.c | 50 +++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 65 insertions(+) diff --git a/fs/exfat/exfat_fs.h b/fs/exfat/exfat_fs.h index 911f58b93f3d2..8c2a70bfaa10d 100644 --- a/fs/exfat/exfat_fs.h +++ b/fs/exfat/exfat_fs.h @@ -514,6 +514,7 @@ void exfat_set_entry_time(struct exfat_sb_info *sbi, struct timespec64 *ts, u8 *tz, __le16 *time, __le16 *date, u8 *time_cs); unsigned short exfat_calc_chksum_2byte(void *data, int len, unsigned short chksum, int type); +u32 exfat_calc_chksum32(void *data, int len, u32 chksum, int type); void exfat_update_bh(struct super_block *sb, struct buffer_head *bh, int sync); void exfat_chain_set(struct exfat_chain *ec, unsigned int dir, unsigned int size, unsigned char flags); diff --git a/fs/exfat/misc.c b/fs/exfat/misc.c index ab7f88b1f6d30..b82d2dd5bd7cb 100644 --- a/fs/exfat/misc.c +++ b/fs/exfat/misc.c @@ -151,6 +151,20 @@ unsigned short exfat_calc_chksum_2byte(void *data, int len, return chksum; } +u32 exfat_calc_chksum32(void *data, int len, u32 chksum, int type) +{ + int i; + u8 *c = (u8 *)data; + + for (i = 0; i < len; i++, c++) { + if (unlikely(type == CS_BOOT_SECTOR && + (i == 106 || i == 107 || i == 112))) + continue; + chksum = ((chksum << 31) | (chksum >> 1)) + *c; + } + return chksum; +} + void exfat_update_bh(struct super_block *sb, struct buffer_head *bh, int sync) { set_bit(EXFAT_SB_DIRTY, &EXFAT_SB(sb)->s_state); diff --git a/fs/exfat/super.c b/fs/exfat/super.c index 6a1330be5a9a4..405717e4e3eac 100644 --- a/fs/exfat/super.c +++ b/fs/exfat/super.c @@ -491,6 +491,50 @@ static int exfat_read_boot_sector(struct super_block *sb) return 0; } +static int exfat_verify_boot_region(struct super_block *sb) +{ + struct buffer_head *bh = NULL; + u32 chksum = 0; + __le32 *p_sig, *p_chksum; + int sn, i; + + /* read boot sector sub-regions */ + for (sn = 0; sn < 11; sn++) { + bh = sb_bread(sb, sn); + if (!bh) + return -EIO; + + if (sn != 0 && sn <= 8) { + /* extended boot sector sub-regions */ + p_sig = (__le32 *)&bh->b_data[sb->s_blocksize - 4]; + if (le32_to_cpu(*p_sig) != EXBOOT_SIGNATURE) + exfat_warn(sb, "Invalid exboot-signature(sector = %d): 0x%08x", + sn, le32_to_cpu(*p_sig)); + } + + chksum = exfat_calc_chksum32(bh->b_data, sb->s_blocksize, + chksum, sn ? CS_DEFAULT : CS_BOOT_SECTOR); + brelse(bh); + } + + /* boot checksum sub-regions */ + bh = sb_bread(sb, sn); + if (!bh) + return -EIO; + + for (i = 0; i < sb->s_blocksize; i += sizeof(u32)) { + p_chksum = (__le32 *)&bh->b_data[i]; + if (le32_to_cpu(*p_chksum) != chksum) { + exfat_err(sb, "Invalid boot checksum (boot checksum : 0x%08x, checksum : 0x%08x)", + le32_to_cpu(*p_chksum), chksum); + brelse(bh); + return -EINVAL; + } + } + brelse(bh); + return 0; +} + /* mount the file system volume */ static int __exfat_fill_super(struct super_block *sb) { @@ -503,6 +547,12 @@ static int __exfat_fill_super(struct super_block *sb) goto free_bh; } + ret = exfat_verify_boot_region(sb); + if (ret) { + exfat_err(sb, "invalid boot region"); + goto free_bh; + } + ret = exfat_create_upcase_table(sb); if (ret) { exfat_err(sb, "failed to load upcase table"); -- GitLab From 5875bf287d95314c58add01184f361cc5aa38429 Mon Sep 17 00:00:00 2001 From: Tetsuhiro Kohada Date: Fri, 29 May 2020 19:14:59 +0900 Subject: [PATCH 1265/1971] exfat: standardize checksum calculation To clarify that it is a 16-bit checksum, the parts related to the 16-bit checksum are renamed and change type to u16. Furthermore, replace checksum calculation in exfat_load_upcase_table() with exfat_calc_checksum32(). Signed-off-by: Tetsuhiro Kohada Reviewed-by: Sungjong Seo Signed-off-by: Namjae Jeon --- fs/exfat/dir.c | 12 ++++++------ fs/exfat/exfat_fs.h | 5 ++--- fs/exfat/misc.c | 10 ++++------ fs/exfat/nls.c | 19 +++++++------------ 4 files changed, 19 insertions(+), 27 deletions(-) diff --git a/fs/exfat/dir.c b/fs/exfat/dir.c index 2902d285bf204..de43534aa2997 100644 --- a/fs/exfat/dir.c +++ b/fs/exfat/dir.c @@ -491,7 +491,7 @@ int exfat_update_dir_chksum(struct inode *inode, struct exfat_chain *p_dir, int ret = 0; int i, num_entries; sector_t sector; - unsigned short chksum; + u16 chksum; struct exfat_dentry *ep, *fep; struct buffer_head *fbh, *bh; @@ -500,7 +500,7 @@ int exfat_update_dir_chksum(struct inode *inode, struct exfat_chain *p_dir, return -EIO; num_entries = fep->dentry.file.num_ext + 1; - chksum = exfat_calc_chksum_2byte(fep, DENTRY_SIZE, 0, CS_DIR_ENTRY); + chksum = exfat_calc_chksum16(fep, DENTRY_SIZE, 0, CS_DIR_ENTRY); for (i = 1; i < num_entries; i++) { ep = exfat_get_dentry(sb, p_dir, entry + i, &bh, NULL); @@ -508,7 +508,7 @@ int exfat_update_dir_chksum(struct inode *inode, struct exfat_chain *p_dir, ret = -EIO; goto release_fbh; } - chksum = exfat_calc_chksum_2byte(ep, DENTRY_SIZE, chksum, + chksum = exfat_calc_chksum16(ep, DENTRY_SIZE, chksum, CS_DEFAULT); brelse(bh); } @@ -593,8 +593,8 @@ void exfat_update_dir_chksum_with_entry_set(struct exfat_entry_set_cache *es) for (i = 0; i < es->num_entries; i++) { ep = exfat_get_dentry_cached(es, i); - chksum = exfat_calc_chksum_2byte(ep, DENTRY_SIZE, chksum, - chksum_type); + chksum = exfat_calc_chksum16(ep, DENTRY_SIZE, chksum, + chksum_type); chksum_type = CS_DEFAULT; } ep = exfat_get_dentry_cached(es, 0); @@ -1000,7 +1000,7 @@ rewind: } if (entry_type == TYPE_STREAM) { - unsigned short name_hash; + u16 name_hash; if (step != DIRENT_STEP_STRM) { step = DIRENT_STEP_FILE; diff --git a/fs/exfat/exfat_fs.h b/fs/exfat/exfat_fs.h index 8c2a70bfaa10d..595f3117f4924 100644 --- a/fs/exfat/exfat_fs.h +++ b/fs/exfat/exfat_fs.h @@ -137,7 +137,7 @@ struct exfat_dentry_namebuf { struct exfat_uni_name { /* +3 for null and for converting */ unsigned short name[MAX_NAME_LENGTH + 3]; - unsigned short name_hash; + u16 name_hash; unsigned char name_len; }; @@ -512,8 +512,7 @@ void exfat_get_entry_time(struct exfat_sb_info *sbi, struct timespec64 *ts, void exfat_truncate_atime(struct timespec64 *ts); void exfat_set_entry_time(struct exfat_sb_info *sbi, struct timespec64 *ts, u8 *tz, __le16 *time, __le16 *date, u8 *time_cs); -unsigned short exfat_calc_chksum_2byte(void *data, int len, - unsigned short chksum, int type); +u16 exfat_calc_chksum16(void *data, int len, u16 chksum, int type); u32 exfat_calc_chksum32(void *data, int len, u32 chksum, int type); void exfat_update_bh(struct super_block *sb, struct buffer_head *bh, int sync); void exfat_chain_set(struct exfat_chain *ec, unsigned int dir, diff --git a/fs/exfat/misc.c b/fs/exfat/misc.c index b82d2dd5bd7cb..17d41f3d37092 100644 --- a/fs/exfat/misc.c +++ b/fs/exfat/misc.c @@ -136,17 +136,15 @@ void exfat_truncate_atime(struct timespec64 *ts) ts->tv_nsec = 0; } -unsigned short exfat_calc_chksum_2byte(void *data, int len, - unsigned short chksum, int type) +u16 exfat_calc_chksum16(void *data, int len, u16 chksum, int type) { int i; - unsigned char *c = (unsigned char *)data; + u8 *c = (u8 *)data; for (i = 0; i < len; i++, c++) { - if (((i == 2) || (i == 3)) && (type == CS_DIR_ENTRY)) + if (unlikely(type == CS_DIR_ENTRY && (i == 2 || i == 3))) continue; - chksum = (((chksum & 1) << 15) | ((chksum & 0xFFFE) >> 1)) + - (unsigned short)*c; + chksum = ((chksum << 15) | (chksum >> 1)) + *c; } return chksum; } diff --git a/fs/exfat/nls.c b/fs/exfat/nls.c index 1ebda90cbdd7c..19321773dd078 100644 --- a/fs/exfat/nls.c +++ b/fs/exfat/nls.c @@ -527,7 +527,7 @@ static int exfat_utf8_to_utf16(struct super_block *sb, *uniname = '\0'; p_uniname->name_len = unilen; - p_uniname->name_hash = exfat_calc_chksum_2byte(upname, unilen << 1, 0, + p_uniname->name_hash = exfat_calc_chksum16(upname, unilen << 1, 0, CS_DEFAULT); if (p_lossy) @@ -623,7 +623,7 @@ static int exfat_nls_to_ucs2(struct super_block *sb, *uniname = '\0'; p_uniname->name_len = unilen; - p_uniname->name_hash = exfat_calc_chksum_2byte(upname, unilen << 1, 0, + p_uniname->name_hash = exfat_calc_chksum16(upname, unilen << 1, 0, CS_DEFAULT); if (p_lossy) @@ -655,7 +655,8 @@ static int exfat_load_upcase_table(struct super_block *sb, { struct exfat_sb_info *sbi = EXFAT_SB(sb); unsigned int sect_size = sb->s_blocksize; - unsigned int i, index = 0, checksum = 0; + unsigned int i, index = 0; + u32 chksum = 0; int ret; unsigned char skip = false; unsigned short *upcase_table; @@ -681,13 +682,6 @@ static int exfat_load_upcase_table(struct super_block *sb, for (i = 0; i < sect_size && index <= 0xFFFF; i += 2) { unsigned short uni = get_unaligned_le16(bh->b_data + i); - checksum = ((checksum & 1) ? 0x80000000 : 0) + - (checksum >> 1) + - *(((unsigned char *)bh->b_data) + i); - checksum = ((checksum & 1) ? 0x80000000 : 0) + - (checksum >> 1) + - *(((unsigned char *)bh->b_data) + (i + 1)); - if (skip) { index += uni; skip = false; @@ -701,13 +695,14 @@ static int exfat_load_upcase_table(struct super_block *sb, } } brelse(bh); + chksum = exfat_calc_chksum32(bh->b_data, i, chksum, CS_DEFAULT); } - if (index >= 0xFFFF && utbl_checksum == checksum) + if (index >= 0xFFFF && utbl_checksum == chksum) return 0; exfat_err(sb, "failed to load upcase table (idx : 0x%08x, chksum : 0x%08x, utbl_chksum : 0x%08x)", - index, checksum, utbl_checksum); + index, chksum, utbl_checksum); ret = -EINVAL; free_table: exfat_free_upcase_table(sbi); -- GitLab From f78059805fb9fc5c343e89f39cf11856d047dd60 Mon Sep 17 00:00:00 2001 From: Namjae Jeon Date: Mon, 1 Jun 2020 09:43:49 +0900 Subject: [PATCH 1266/1971] exfat: remove unnecessary reassignment of p_uniname->name_len kbuild test robot reported : fs/exfat/nls.c:531:22: warning: Variable 'p_uniname->name_len' is reassigned a value before the old one has been used. The reassignment of p_uniname->name_len is not needed and remove it. Reported-by: kbuild test robot Signed-off-by: Namjae Jeon --- fs/exfat/nls.c | 2 -- 1 file changed, 2 deletions(-) diff --git a/fs/exfat/nls.c b/fs/exfat/nls.c index 19321773dd078..c1ec056954974 100644 --- a/fs/exfat/nls.c +++ b/fs/exfat/nls.c @@ -514,8 +514,6 @@ static int exfat_utf8_to_utf16(struct super_block *sb, return -ENAMETOOLONG; } - p_uniname->name_len = unilen & 0xFF; - for (i = 0; i < unilen; i++) { if (*uniname < 0x0020 || exfat_wstrchr(bad_uni_chars, *uniname)) -- GitLab From f341a7d8dcc4e3d01544d7bc145633f062ef6249 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Wed, 3 Jun 2020 09:48:36 +0900 Subject: [PATCH 1267/1971] exfat: fix memory leak in exfat_parse_param() butt3rflyh4ck reported memory leak found by syzkaller. A param->string held by exfat_mount_options. BUG: memory leak unreferenced object 0xffff88801972e090 (size 8): comm "syz-executor.2", pid 16298, jiffies 4295172466 (age 14.060s) hex dump (first 8 bytes): 6b 6f 69 38 2d 75 00 00 koi8-u.. backtrace: [<000000005bfe35d6>] kstrdup+0x36/0x70 mm/util.c:60 [<0000000018ed3277>] exfat_parse_param+0x160/0x5e0 fs/exfat/super.c:276 [<000000007680462b>] vfs_parse_fs_param+0x2b4/0x610 fs/fs_context.c:147 [<0000000097c027f2>] vfs_parse_fs_string+0xe6/0x150 fs/fs_context.c:191 [<00000000371bf78f>] generic_parse_monolithic+0x16f/0x1f0 fs/fs_context.c:231 [<000000005ce5eb1b>] do_new_mount fs/namespace.c:2812 [inline] [<000000005ce5eb1b>] do_mount+0x12bb/0x1b30 fs/namespace.c:3141 [<00000000b642040c>] __do_sys_mount fs/namespace.c:3350 [inline] [<00000000b642040c>] __se_sys_mount fs/namespace.c:3327 [inline] [<00000000b642040c>] __x64_sys_mount+0x18f/0x230 fs/namespace.c:3327 [<000000003b024e98>] do_syscall_64+0xf6/0x7d0 arch/x86/entry/common.c:295 [<00000000ce2b698c>] entry_SYSCALL_64_after_hwframe+0x49/0xb3 exfat_free() should call exfat_free_iocharset(), to prevent a leak in case we fail after parsing iocharset= but before calling get_tree_bdev(). Additionally, there's no point copying param->string in exfat_parse_param() - just steal it, leaving NULL in param->string. That's independent from the leak or fix thereof - it's simply avoiding an extra copy. Fixes: 719c1e182916 ("exfat: add super block operations") Cc: stable@vger.kernel.org # v5.7 Reported-by: butt3rflyh4ck Signed-off-by: Al Viro Signed-off-by: Namjae Jeon --- fs/exfat/super.c | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/fs/exfat/super.c b/fs/exfat/super.c index 405717e4e3eac..e650e65536f84 100644 --- a/fs/exfat/super.c +++ b/fs/exfat/super.c @@ -273,9 +273,8 @@ static int exfat_parse_param(struct fs_context *fc, struct fs_parameter *param) break; case Opt_charset: exfat_free_iocharset(sbi); - opts->iocharset = kstrdup(param->string, GFP_KERNEL); - if (!opts->iocharset) - return -ENOMEM; + opts->iocharset = param->string; + param->string = NULL; break; case Opt_errors: opts->errors = result.uint_32; @@ -686,7 +685,12 @@ static int exfat_get_tree(struct fs_context *fc) static void exfat_free(struct fs_context *fc) { - kfree(fc->s_fs_info); + struct exfat_sb_info *sbi = fc->s_fs_info; + + if (sbi) { + exfat_free_iocharset(sbi); + kfree(sbi); + } } static const struct fs_context_operations exfat_context_ops = { -- GitLab From 29bbb14bfc80dd760b07d2be0a27e610562982e3 Mon Sep 17 00:00:00 2001 From: Namjae Jeon Date: Thu, 4 Jun 2020 08:05:31 +0900 Subject: [PATCH 1268/1971] exfat: fix incorrect update of stream entry in __exfat_truncate() At truncate, there is a problem of incorrect updating in the file entry pointer instead of stream entry. This will cause the problem of overwriting the time field of the file entry to new_size. Fix it to update stream entry. Fixes: 98d917047e8b ("exfat: add file operations") Cc: stable@vger.kernel.org # v5.7 Signed-off-by: Namjae Jeon --- fs/exfat/file.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/fs/exfat/file.c b/fs/exfat/file.c index 8e3f0eef45d77..fce03f3187873 100644 --- a/fs/exfat/file.c +++ b/fs/exfat/file.c @@ -171,11 +171,11 @@ int __exfat_truncate(struct inode *inode, loff_t new_size) /* File size should be zero if there is no cluster allocated */ if (ei->start_clu == EXFAT_EOF_CLUSTER) { - ep->dentry.stream.valid_size = 0; - ep->dentry.stream.size = 0; + ep2->dentry.stream.valid_size = 0; + ep2->dentry.stream.size = 0; } else { - ep->dentry.stream.valid_size = cpu_to_le64(new_size); - ep->dentry.stream.size = ep->dentry.stream.valid_size; + ep2->dentry.stream.valid_size = cpu_to_le64(new_size); + ep2->dentry.stream.size = ep->dentry.stream.valid_size; } if (new_size == 0) { -- GitLab From a949824f01f3b39f737d77aed6cba47aced09319 Mon Sep 17 00:00:00 2001 From: "hyeongseok.kim" Date: Thu, 4 Jun 2020 13:54:28 +0900 Subject: [PATCH 1269/1971] exfat: fix range validation error in alloc and free cluster There is check error in range condition that can never be entered even with invalid input. Replace incorrent checking code with already existing valid checker. Signed-off-by: hyeongseok.kim Acked-by: Sungjong Seo Signed-off-by: Namjae Jeon --- fs/exfat/fatent.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/fs/exfat/fatent.c b/fs/exfat/fatent.c index 267e5e09eb134..4e5c5c9c0f2df 100644 --- a/fs/exfat/fatent.c +++ b/fs/exfat/fatent.c @@ -169,7 +169,7 @@ int exfat_free_cluster(struct inode *inode, struct exfat_chain *p_chain) return 0; /* check cluster validation */ - if (p_chain->dir < 2 && p_chain->dir >= sbi->num_clusters) { + if (!is_valid_cluster(sbi, p_chain->dir)) { exfat_err(sb, "invalid start cluster (%u)", p_chain->dir); return -EIO; } @@ -346,7 +346,7 @@ int exfat_alloc_cluster(struct inode *inode, unsigned int num_alloc, } /* check cluster validation */ - if (hint_clu < EXFAT_FIRST_CLUSTER && hint_clu >= sbi->num_clusters) { + if (!is_valid_cluster(sbi, hint_clu)) { exfat_err(sb, "hint_cluster is invalid (%u)", hint_clu); hint_clu = EXFAT_FIRST_CLUSTER; -- GitLab From fc961522ddbdf00254dd03b677627139cc1f68bc Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Mon, 8 Jun 2020 17:16:29 +0300 Subject: [PATCH 1270/1971] exfat: Fix potential use after free in exfat_load_upcase_table() This code calls brelse(bh) and then dereferences "bh" on the next line resulting in a possible use after free. The brelse() should just be moved down a line. Fixes: b676fdbcf4c8 ("exfat: standardize checksum calculation") Signed-off-by: Dan Carpenter Signed-off-by: Namjae Jeon --- fs/exfat/nls.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fs/exfat/nls.c b/fs/exfat/nls.c index c1ec056954974..57b5a7a4d1f7a 100644 --- a/fs/exfat/nls.c +++ b/fs/exfat/nls.c @@ -692,8 +692,8 @@ static int exfat_load_upcase_table(struct super_block *sb, index++; } } - brelse(bh); chksum = exfat_calc_chksum32(bh->b_data, i, chksum, CS_DEFAULT); + brelse(bh); } if (index >= 0xFFFF && utbl_checksum == chksum) -- GitLab From 7c582bf4ed84f3eb58bdd1f63024a14c17551e7d Mon Sep 17 00:00:00 2001 From: James Morse Date: Fri, 29 May 2020 15:06:54 +0000 Subject: [PATCH 1271/1971] KVM: arm64: Stop writing aarch32's CSSELR into ACTLR aarch32 has pairs of registers to access the high and low parts of 64bit registers. KVM has a union of 64bit sys_regs[] and 32bit copro[]. The 32bit accessors read the high or low part of the 64bit sys_reg[] value through the union. Both sys_reg_descs[] and cp15_regs[] list access_csselr() as the accessor for CSSELR{,_EL1}. access_csselr() is only aware of the 64bit sys_regs[], and expects r->reg to be 'CSSELR_EL1' in the enum, index 2 of the 64bit array. cp15_regs[] uses the 32bit copro[] alias of sys_regs[]. Here CSSELR is c0_CSSELR which is the same location in sys_reg[]. r->reg is 'c0_CSSELR', index 4 in the 32bit array. access_csselr() uses the 32bit r->reg value to access the 64bit array, so reads and write the wrong value. sys_regs[4], is ACTLR_EL1, which is subsequently save/restored when we enter the guest. ACTLR_EL1 is supposed to be read-only for the guest. This register only affects execution at EL1, and the host's value is restored before we return to host EL1. Convert the 32bit register index back to the 64bit version. Suggested-by: Marc Zyngier Signed-off-by: James Morse Signed-off-by: Marc Zyngier Cc: stable@vger.kernel.org Link: https://lore.kernel.org/r/20200529150656.7339-2-james.morse@arm.com --- arch/arm64/kvm/sys_regs.c | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c index ad1d57501d6d8..12f8d57a3cb8e 100644 --- a/arch/arm64/kvm/sys_regs.c +++ b/arch/arm64/kvm/sys_regs.c @@ -1319,10 +1319,16 @@ static bool access_clidr(struct kvm_vcpu *vcpu, struct sys_reg_params *p, static bool access_csselr(struct kvm_vcpu *vcpu, struct sys_reg_params *p, const struct sys_reg_desc *r) { + int reg = r->reg; + + /* See the 32bit mapping in kvm_host.h */ + if (p->is_aarch32) + reg = r->reg / 2; + if (p->is_write) - vcpu_write_sys_reg(vcpu, p->regval, r->reg); + vcpu_write_sys_reg(vcpu, p->regval, reg); else - p->regval = vcpu_read_sys_reg(vcpu, r->reg); + p->regval = vcpu_read_sys_reg(vcpu, reg); return true; } -- GitLab From ef5a294be8d0e17b91d23be905f69368b0d3952e Mon Sep 17 00:00:00 2001 From: James Morse Date: Fri, 29 May 2020 15:06:55 +0000 Subject: [PATCH 1272/1971] KVM: arm64: Add emulation for 32bit guests accessing ACTLR2 ACTLR_EL1 is a 64bit register while the 32bit ACTLR is obviously 32bit. For 32bit software, the extra bits are accessible via ACTLR2... which KVM doesn't emulate. Suggested-by: Marc Zyngier Signed-off-by: James Morse Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20200529150656.7339-3-james.morse@arm.com --- arch/arm64/kvm/sys_regs_generic_v8.c | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/arch/arm64/kvm/sys_regs_generic_v8.c b/arch/arm64/kvm/sys_regs_generic_v8.c index 9cb6b4c8355a9..aa9d356451ebc 100644 --- a/arch/arm64/kvm/sys_regs_generic_v8.c +++ b/arch/arm64/kvm/sys_regs_generic_v8.c @@ -27,6 +27,14 @@ static bool access_actlr(struct kvm_vcpu *vcpu, return ignore_write(vcpu, p); p->regval = vcpu_read_sys_reg(vcpu, ACTLR_EL1); + + if (p->is_aarch32) { + if (r->Op2 & 2) + p->regval = upper_32_bits(p->regval); + else + p->regval = lower_32_bits(p->regval); + } + return true; } @@ -47,6 +55,8 @@ static const struct sys_reg_desc genericv8_cp15_regs[] = { /* ACTLR */ { Op1(0b000), CRn(0b0001), CRm(0b0000), Op2(0b001), access_actlr }, + { Op1(0b000), CRn(0b0001), CRm(0b0000), Op2(0b011), + access_actlr }, }; static struct kvm_sys_reg_target_table genericv8_target_table = { -- GitLab From e8679fedd026eb3b4655af83829d9036e32c9b97 Mon Sep 17 00:00:00 2001 From: James Morse Date: Fri, 29 May 2020 15:06:56 +0000 Subject: [PATCH 1273/1971] KVM: arm64: Stop save/restoring ACTLR_EL1 KVM sets HCR_EL2.TACR via HCR_GUEST_FLAGS. This means ACTLR* accesses from the guest are always trapped, and always return the value in the sys_regs array. The guest can't change the value of these registers, so we are save restoring the reset value, which came from the host. Stop save/restoring this register. Keep the storage for this register in sys_regs[] as this is how the value is exposed to user-space, removing it would break migration. Signed-off-by: James Morse Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20200529150656.7339-4-james.morse@arm.com --- arch/arm64/kvm/hyp/sysreg-sr.c | 2 -- arch/arm64/kvm/sys_regs.c | 2 -- 2 files changed, 4 deletions(-) diff --git a/arch/arm64/kvm/hyp/sysreg-sr.c b/arch/arm64/kvm/hyp/sysreg-sr.c index 6d2df9fe0b5d2..0d60e6ee96b29 100644 --- a/arch/arm64/kvm/hyp/sysreg-sr.c +++ b/arch/arm64/kvm/hyp/sysreg-sr.c @@ -39,7 +39,6 @@ static void __hyp_text __sysreg_save_el1_state(struct kvm_cpu_context *ctxt) { ctxt->sys_regs[CSSELR_EL1] = read_sysreg(csselr_el1); ctxt->sys_regs[SCTLR_EL1] = read_sysreg_el1(SYS_SCTLR); - ctxt->sys_regs[ACTLR_EL1] = read_sysreg(actlr_el1); ctxt->sys_regs[CPACR_EL1] = read_sysreg_el1(SYS_CPACR); ctxt->sys_regs[TTBR0_EL1] = read_sysreg_el1(SYS_TTBR0); ctxt->sys_regs[TTBR1_EL1] = read_sysreg_el1(SYS_TTBR1); @@ -122,7 +121,6 @@ static void __hyp_text __sysreg_restore_el1_state(struct kvm_cpu_context *ctxt) isb(); } - write_sysreg(ctxt->sys_regs[ACTLR_EL1], actlr_el1); write_sysreg_el1(ctxt->sys_regs[CPACR_EL1], SYS_CPACR); write_sysreg_el1(ctxt->sys_regs[TTBR0_EL1], SYS_TTBR0); write_sysreg_el1(ctxt->sys_regs[TTBR1_EL1], SYS_TTBR1); diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c index 12f8d57a3cb8e..dfb966f3863a4 100644 --- a/arch/arm64/kvm/sys_regs.c +++ b/arch/arm64/kvm/sys_regs.c @@ -78,7 +78,6 @@ static bool __vcpu_read_sys_reg_from_cpu(int reg, u64 *val) switch (reg) { case CSSELR_EL1: *val = read_sysreg_s(SYS_CSSELR_EL1); break; case SCTLR_EL1: *val = read_sysreg_s(SYS_SCTLR_EL12); break; - case ACTLR_EL1: *val = read_sysreg_s(SYS_ACTLR_EL1); break; case CPACR_EL1: *val = read_sysreg_s(SYS_CPACR_EL12); break; case TTBR0_EL1: *val = read_sysreg_s(SYS_TTBR0_EL12); break; case TTBR1_EL1: *val = read_sysreg_s(SYS_TTBR1_EL12); break; @@ -118,7 +117,6 @@ static bool __vcpu_write_sys_reg_to_cpu(u64 val, int reg) switch (reg) { case CSSELR_EL1: write_sysreg_s(val, SYS_CSSELR_EL1); break; case SCTLR_EL1: write_sysreg_s(val, SYS_SCTLR_EL12); break; - case ACTLR_EL1: write_sysreg_s(val, SYS_ACTLR_EL1); break; case CPACR_EL1: write_sysreg_s(val, SYS_CPACR_EL12); break; case TTBR0_EL1: write_sysreg_s(val, SYS_TTBR0_EL12); break; case TTBR1_EL1: write_sysreg_s(val, SYS_TTBR1_EL12); break; -- GitLab From 21998a351512eba4ed5969006f0c55882d995ada Mon Sep 17 00:00:00 2001 From: Anthony Steinhauser Date: Tue, 19 May 2020 06:40:42 -0700 Subject: [PATCH 1274/1971] x86/speculation: Avoid force-disabling IBPB based on STIBP and enhanced IBRS. When STIBP is unavailable or enhanced IBRS is available, Linux force-disables the IBPB mitigation of Spectre-BTB even when simultaneous multithreading is disabled. While attempts to enable IBPB using prctl(PR_SET_SPECULATION_CTRL, PR_SPEC_INDIRECT_BRANCH, ...) fail with EPERM, the seccomp syscall (or its prctl(PR_SET_SECCOMP, ...) equivalent) which are used e.g. by Chromium or OpenSSH succeed with no errors but the application remains silently vulnerable to cross-process Spectre v2 attacks (classical BTB poisoning). At the same time the SYSFS reporting (/sys/devices/system/cpu/vulnerabilities/spectre_v2) displays that IBPB is conditionally enabled when in fact it is unconditionally disabled. STIBP is useful only when SMT is enabled. When SMT is disabled and STIBP is unavailable, it makes no sense to force-disable also IBPB, because IBPB protects against cross-process Spectre-BTB attacks regardless of the SMT state. At the same time since missing STIBP was only observed on AMD CPUs, AMD does not recommend using STIBP, but recommends using IBPB, so disabling IBPB because of missing STIBP goes directly against AMD's advice: https://developer.amd.com/wp-content/resources/Architecture_Guidelines_Update_Indirect_Branch_Control.pdf Similarly, enhanced IBRS is designed to protect cross-core BTB poisoning and BTB-poisoning attacks from user space against kernel (and BTB-poisoning attacks from guest against hypervisor), it is not designed to prevent cross-process (or cross-VM) BTB poisoning between processes (or VMs) running on the same core. Therefore, even with enhanced IBRS it is necessary to flush the BTB during context-switches, so there is no reason to force disable IBPB when enhanced IBRS is available. Enable the prctl control of IBPB even when STIBP is unavailable or enhanced IBRS is available. Fixes: 7cc765a67d8e ("x86/speculation: Enable prctl mode for spectre_v2_user") Signed-off-by: Anthony Steinhauser Signed-off-by: Thomas Gleixner Cc: stable@vger.kernel.org --- arch/x86/kernel/cpu/bugs.c | 87 ++++++++++++++++++++++---------------- 1 file changed, 50 insertions(+), 37 deletions(-) diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c index ed54b3b21c396..8d57562b1d2c3 100644 --- a/arch/x86/kernel/cpu/bugs.c +++ b/arch/x86/kernel/cpu/bugs.c @@ -495,7 +495,9 @@ early_param("nospectre_v1", nospectre_v1_cmdline); static enum spectre_v2_mitigation spectre_v2_enabled __ro_after_init = SPECTRE_V2_NONE; -static enum spectre_v2_user_mitigation spectre_v2_user __ro_after_init = +static enum spectre_v2_user_mitigation spectre_v2_user_stibp __ro_after_init = + SPECTRE_V2_USER_NONE; +static enum spectre_v2_user_mitigation spectre_v2_user_ibpb __ro_after_init = SPECTRE_V2_USER_NONE; #ifdef CONFIG_RETPOLINE @@ -641,15 +643,6 @@ spectre_v2_user_select_mitigation(enum spectre_v2_mitigation_cmd v2_cmd) break; } - /* - * At this point, an STIBP mode other than "off" has been set. - * If STIBP support is not being forced, check if STIBP always-on - * is preferred. - */ - if (mode != SPECTRE_V2_USER_STRICT && - boot_cpu_has(X86_FEATURE_AMD_STIBP_ALWAYS_ON)) - mode = SPECTRE_V2_USER_STRICT_PREFERRED; - /* Initialize Indirect Branch Prediction Barrier */ if (boot_cpu_has(X86_FEATURE_IBPB)) { setup_force_cpu_cap(X86_FEATURE_USE_IBPB); @@ -672,23 +665,36 @@ spectre_v2_user_select_mitigation(enum spectre_v2_mitigation_cmd v2_cmd) pr_info("mitigation: Enabling %s Indirect Branch Prediction Barrier\n", static_key_enabled(&switch_mm_always_ibpb) ? "always-on" : "conditional"); + + spectre_v2_user_ibpb = mode; } - /* If enhanced IBRS is enabled no STIBP required */ - if (spectre_v2_enabled == SPECTRE_V2_IBRS_ENHANCED) + /* + * If enhanced IBRS is enabled or SMT impossible, STIBP is not + * required. + */ + if (!smt_possible || spectre_v2_enabled == SPECTRE_V2_IBRS_ENHANCED) return; /* - * If SMT is not possible or STIBP is not available clear the STIBP - * mode. + * At this point, an STIBP mode other than "off" has been set. + * If STIBP support is not being forced, check if STIBP always-on + * is preferred. + */ + if (mode != SPECTRE_V2_USER_STRICT && + boot_cpu_has(X86_FEATURE_AMD_STIBP_ALWAYS_ON)) + mode = SPECTRE_V2_USER_STRICT_PREFERRED; + + /* + * If STIBP is not available, clear the STIBP mode. */ - if (!smt_possible || !boot_cpu_has(X86_FEATURE_STIBP)) + if (!boot_cpu_has(X86_FEATURE_STIBP)) mode = SPECTRE_V2_USER_NONE; + + spectre_v2_user_stibp = mode; + set_mode: - spectre_v2_user = mode; - /* Only print the STIBP mode when SMT possible */ - if (smt_possible) - pr_info("%s\n", spectre_v2_user_strings[mode]); + pr_info("%s\n", spectre_v2_user_strings[mode]); } static const char * const spectre_v2_strings[] = { @@ -921,7 +927,7 @@ void cpu_bugs_smt_update(void) { mutex_lock(&spec_ctrl_mutex); - switch (spectre_v2_user) { + switch (spectre_v2_user_stibp) { case SPECTRE_V2_USER_NONE: break; case SPECTRE_V2_USER_STRICT: @@ -1164,14 +1170,16 @@ static int ib_prctl_set(struct task_struct *task, unsigned long ctrl) { switch (ctrl) { case PR_SPEC_ENABLE: - if (spectre_v2_user == SPECTRE_V2_USER_NONE) + if (spectre_v2_user_ibpb == SPECTRE_V2_USER_NONE && + spectre_v2_user_stibp == SPECTRE_V2_USER_NONE) return 0; /* * Indirect branch speculation is always disabled in strict * mode. */ - if (spectre_v2_user == SPECTRE_V2_USER_STRICT || - spectre_v2_user == SPECTRE_V2_USER_STRICT_PREFERRED) + if (spectre_v2_user_ibpb == SPECTRE_V2_USER_STRICT || + spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT || + spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT_PREFERRED) return -EPERM; task_clear_spec_ib_disable(task); task_update_spec_tif(task); @@ -1182,10 +1190,12 @@ static int ib_prctl_set(struct task_struct *task, unsigned long ctrl) * Indirect branch speculation is always allowed when * mitigation is force disabled. */ - if (spectre_v2_user == SPECTRE_V2_USER_NONE) + if (spectre_v2_user_ibpb == SPECTRE_V2_USER_NONE && + spectre_v2_user_stibp == SPECTRE_V2_USER_NONE) return -EPERM; - if (spectre_v2_user == SPECTRE_V2_USER_STRICT || - spectre_v2_user == SPECTRE_V2_USER_STRICT_PREFERRED) + if (spectre_v2_user_ibpb == SPECTRE_V2_USER_STRICT || + spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT || + spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT_PREFERRED) return 0; task_set_spec_ib_disable(task); if (ctrl == PR_SPEC_FORCE_DISABLE) @@ -1216,7 +1226,8 @@ void arch_seccomp_spec_mitigate(struct task_struct *task) { if (ssb_mode == SPEC_STORE_BYPASS_SECCOMP) ssb_prctl_set(task, PR_SPEC_FORCE_DISABLE); - if (spectre_v2_user == SPECTRE_V2_USER_SECCOMP) + if (spectre_v2_user_ibpb == SPECTRE_V2_USER_SECCOMP || + spectre_v2_user_stibp == SPECTRE_V2_USER_SECCOMP) ib_prctl_set(task, PR_SPEC_FORCE_DISABLE); } #endif @@ -1247,22 +1258,24 @@ static int ib_prctl_get(struct task_struct *task) if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2)) return PR_SPEC_NOT_AFFECTED; - switch (spectre_v2_user) { - case SPECTRE_V2_USER_NONE: + if (spectre_v2_user_ibpb == SPECTRE_V2_USER_NONE && + spectre_v2_user_stibp == SPECTRE_V2_USER_NONE) return PR_SPEC_ENABLE; - case SPECTRE_V2_USER_PRCTL: - case SPECTRE_V2_USER_SECCOMP: + else if (spectre_v2_user_ibpb == SPECTRE_V2_USER_STRICT || + spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT || + spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT_PREFERRED) + return PR_SPEC_DISABLE; + else if (spectre_v2_user_ibpb == SPECTRE_V2_USER_PRCTL || + spectre_v2_user_ibpb == SPECTRE_V2_USER_SECCOMP || + spectre_v2_user_stibp == SPECTRE_V2_USER_PRCTL || + spectre_v2_user_stibp == SPECTRE_V2_USER_SECCOMP) { if (task_spec_ib_force_disable(task)) return PR_SPEC_PRCTL | PR_SPEC_FORCE_DISABLE; if (task_spec_ib_disable(task)) return PR_SPEC_PRCTL | PR_SPEC_DISABLE; return PR_SPEC_PRCTL | PR_SPEC_ENABLE; - case SPECTRE_V2_USER_STRICT: - case SPECTRE_V2_USER_STRICT_PREFERRED: - return PR_SPEC_DISABLE; - default: + } else return PR_SPEC_NOT_AFFECTED; - } } int arch_prctl_spec_ctrl_get(struct task_struct *task, unsigned long which) @@ -1501,7 +1514,7 @@ static char *stibp_state(void) if (spectre_v2_enabled == SPECTRE_V2_IBRS_ENHANCED) return ""; - switch (spectre_v2_user) { + switch (spectre_v2_user_stibp) { case SPECTRE_V2_USER_NONE: return ", STIBP: disabled"; case SPECTRE_V2_USER_STRICT: -- GitLab From dbbe2ad02e9df26e372f38cc3e70dab9222c832e Mon Sep 17 00:00:00 2001 From: Anthony Steinhauser Date: Sun, 5 Jan 2020 12:19:43 -0800 Subject: [PATCH 1275/1971] x86/speculation: Prevent rogue cross-process SSBD shutdown On context switch the change of TIF_SSBD and TIF_SPEC_IB are evaluated to adjust the mitigations accordingly. This is optimized to avoid the expensive MSR write if not needed. This optimization is buggy and allows an attacker to shutdown the SSBD protection of a victim process. The update logic reads the cached base value for the speculation control MSR which has neither the SSBD nor the STIBP bit set. It then OR's the SSBD bit only when TIF_SSBD is different and requests the MSR update. That means if TIF_SSBD of the previous and next task are the same, then the base value is not updated, even if TIF_SSBD is set. The MSR write is not requested. Subsequently if the TIF_STIBP bit differs then the STIBP bit is updated in the base value and the MSR is written with a wrong SSBD value. This was introduced when the per task/process conditional STIPB switching was added on top of the existing SSBD switching. It is exploitable if the attacker creates a process which enforces SSBD and has the contrary value of STIBP than the victim process (i.e. if the victim process enforces STIBP, the attacker process must not enforce it; if the victim process does not enforce STIBP, the attacker process must enforce it) and schedule it on the same core as the victim process. If the victim runs after the attacker the victim becomes vulnerable to Spectre V4. To fix this, update the MSR value independent of the TIF_SSBD difference and dependent on the SSBD mitigation method available. This ensures that a subsequent STIPB initiated MSR write has the correct state of SSBD. [ tglx: Handle X86_FEATURE_VIRT_SSBD & X86_FEATURE_VIRT_SSBD correctly and massaged changelog ] Fixes: 5bfbe3ad5840 ("x86/speculation: Prepare for per task indirect branch speculation control") Signed-off-by: Anthony Steinhauser Signed-off-by: Thomas Gleixner Cc: stable@vger.kernel.org --- arch/x86/kernel/process.c | 28 ++++++++++------------------ 1 file changed, 10 insertions(+), 18 deletions(-) diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c index 35638f1c5791f..8f4533c1a4ec2 100644 --- a/arch/x86/kernel/process.c +++ b/arch/x86/kernel/process.c @@ -545,28 +545,20 @@ static __always_inline void __speculation_ctrl_update(unsigned long tifp, lockdep_assert_irqs_disabled(); - /* - * If TIF_SSBD is different, select the proper mitigation - * method. Note that if SSBD mitigation is disabled or permanentely - * enabled this branch can't be taken because nothing can set - * TIF_SSBD. - */ - if (tif_diff & _TIF_SSBD) { - if (static_cpu_has(X86_FEATURE_VIRT_SSBD)) { + /* Handle change of TIF_SSBD depending on the mitigation method. */ + if (static_cpu_has(X86_FEATURE_VIRT_SSBD)) { + if (tif_diff & _TIF_SSBD) amd_set_ssb_virt_state(tifn); - } else if (static_cpu_has(X86_FEATURE_LS_CFG_SSBD)) { + } else if (static_cpu_has(X86_FEATURE_LS_CFG_SSBD)) { + if (tif_diff & _TIF_SSBD) amd_set_core_ssb_state(tifn); - } else if (static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD) || - static_cpu_has(X86_FEATURE_AMD_SSBD)) { - msr |= ssbd_tif_to_spec_ctrl(tifn); - updmsr = true; - } + } else if (static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD) || + static_cpu_has(X86_FEATURE_AMD_SSBD)) { + updmsr |= !!(tif_diff & _TIF_SSBD); + msr |= ssbd_tif_to_spec_ctrl(tifn); } - /* - * Only evaluate TIF_SPEC_IB if conditional STIBP is enabled, - * otherwise avoid the MSR write. - */ + /* Only evaluate TIF_SPEC_IB if conditional STIBP is enabled. */ if (IS_ENABLED(CONFIG_SMP) && static_branch_unlikely(&switch_to_cond_stibp)) { updmsr |= !!(tif_diff & _TIF_SPEC_IB); -- GitLab From 4d8df8cbb9156b0a0ab3f802b80cb5db57acc0bf Mon Sep 17 00:00:00 2001 From: Anthony Steinhauser Date: Sun, 7 Jun 2020 05:44:19 -0700 Subject: [PATCH 1276/1971] x86/speculation: PR_SPEC_FORCE_DISABLE enforcement for indirect branches. Currently, it is possible to enable indirect branch speculation even after it was force-disabled using the PR_SPEC_FORCE_DISABLE option. Moreover, the PR_GET_SPECULATION_CTRL command gives afterwards an incorrect result (force-disabled when it is in fact enabled). This also is inconsistent vs. STIBP and the documention which cleary states that PR_SPEC_FORCE_DISABLE cannot be undone. Fix this by actually enforcing force-disabled indirect branch speculation. PR_SPEC_ENABLE called after PR_SPEC_FORCE_DISABLE now fails with -EPERM as described in the documentation. Fixes: 9137bb27e60e ("x86/speculation: Add prctl() control for indirect branch speculation") Signed-off-by: Anthony Steinhauser Signed-off-by: Thomas Gleixner Cc: stable@vger.kernel.org --- arch/x86/kernel/cpu/bugs.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c index 8d57562b1d2c3..56f573aa764f4 100644 --- a/arch/x86/kernel/cpu/bugs.c +++ b/arch/x86/kernel/cpu/bugs.c @@ -1175,11 +1175,14 @@ static int ib_prctl_set(struct task_struct *task, unsigned long ctrl) return 0; /* * Indirect branch speculation is always disabled in strict - * mode. + * mode. It can neither be enabled if it was force-disabled + * by a previous prctl call. + */ if (spectre_v2_user_ibpb == SPECTRE_V2_USER_STRICT || spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT || - spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT_PREFERRED) + spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT_PREFERRED || + task_spec_ib_force_disable(task)) return -EPERM; task_clear_spec_ib_disable(task); task_update_spec_tif(task); -- GitLab From d8ad6d39c35d2b44b3d48b787df7f3359381dcbf Mon Sep 17 00:00:00 2001 From: Bob Haarman Date: Tue, 2 Jun 2020 12:30:59 -0700 Subject: [PATCH 1277/1971] x86_64: Fix jiffies ODR violation 'jiffies' and 'jiffies_64' are meant to alias (two different symbols that share the same address). Most architectures make the symbols alias to the same address via a linker script assignment in their arch//kernel/vmlinux.lds.S: jiffies = jiffies_64; which is effectively a definition of jiffies. jiffies and jiffies_64 are both forward declared for all architectures in include/linux/jiffies.h. jiffies_64 is defined in kernel/time/timer.c. x86_64 was peculiar in that it wasn't doing the above linker script assignment, but rather was: 1. defining jiffies in arch/x86/kernel/time.c instead via the linker script. 2. overriding the symbol jiffies_64 from kernel/time/timer.c in arch/x86/kernel/vmlinux.lds.s via 'jiffies_64 = jiffies;'. As Fangrui notes: In LLD, symbol assignments in linker scripts override definitions in object files. GNU ld appears to have the same behavior. It would probably make sense for LLD to error "duplicate symbol" but GNU ld is unlikely to adopt for compatibility reasons. This results in an ODR violation (UB), which seems to have survived thus far. Where it becomes harmful is when; 1. -fno-semantic-interposition is used: As Fangrui notes: Clang after LLVM commit 5b22bcc2b70d ("[X86][ELF] Prefer to lower MC_GlobalAddress operands to .Lfoo$local") defaults to -fno-semantic-interposition similar semantics which help -fpic/-fPIC code avoid GOT/PLT when the referenced symbol is defined within the same translation unit. Unlike GCC -fno-semantic-interposition, Clang emits such relocations referencing local symbols for non-pic code as well. This causes references to jiffies to refer to '.Ljiffies$local' when jiffies is defined in the same translation unit. Likewise, references to jiffies_64 become references to '.Ljiffies_64$local' in translation units that define jiffies_64. Because these differ from the names used in the linker script, they will not be rewritten to alias one another. 2. Full LTO Full LTO effectively treats all source files as one translation unit, causing these local references to be produced everywhere. When the linker processes the linker script, there are no longer any references to jiffies_64' anywhere to replace with 'jiffies'. And thus '.Ljiffies$local' and '.Ljiffies_64$local' no longer alias at all. In the process of porting patches enabling Full LTO from arm64 to x86_64, spooky bugs have been observed where the kernel appeared to boot, but init doesn't get scheduled. Avoid the ODR violation by matching other architectures and define jiffies only by linker script. For -fno-semantic-interposition + Full LTO, there is no longer a global definition of jiffies for the compiler to produce a local symbol which the linker script won't ensure aliases to jiffies_64. Fixes: 40747ffa5aa8 ("asmlinkage: Make jiffies visible") Reported-by: Nathan Chancellor Reported-by: Alistair Delva Debugged-by: Nick Desaulniers Debugged-by: Sami Tolvanen Suggested-by: Fangrui Song Signed-off-by: Bob Haarman Signed-off-by: Thomas Gleixner Tested-by: Sedat Dilek # build+boot on Reviewed-by: Andi Kleen Reviewed-by: Josh Poimboeuf Cc: stable@vger.kernel.org Link: https://github.com/ClangBuiltLinux/linux/issues/852 Link: https://lkml.kernel.org/r/20200602193100.229287-1-inglorion@google.com --- arch/x86/kernel/time.c | 4 ---- arch/x86/kernel/vmlinux.lds.S | 4 ++-- 2 files changed, 2 insertions(+), 6 deletions(-) diff --git a/arch/x86/kernel/time.c b/arch/x86/kernel/time.c index 106e7f87f5344..f395729826357 100644 --- a/arch/x86/kernel/time.c +++ b/arch/x86/kernel/time.c @@ -25,10 +25,6 @@ #include #include -#ifdef CONFIG_X86_64 -__visible volatile unsigned long jiffies __cacheline_aligned_in_smp = INITIAL_JIFFIES; -#endif - unsigned long profile_pc(struct pt_regs *regs) { unsigned long pc = instruction_pointer(regs); diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S index 1bf7e312361f4..7c35556c78272 100644 --- a/arch/x86/kernel/vmlinux.lds.S +++ b/arch/x86/kernel/vmlinux.lds.S @@ -40,13 +40,13 @@ OUTPUT_FORMAT(CONFIG_OUTPUT_FORMAT) #ifdef CONFIG_X86_32 OUTPUT_ARCH(i386) ENTRY(phys_startup_32) -jiffies = jiffies_64; #else OUTPUT_ARCH(i386:x86-64) ENTRY(phys_startup_64) -jiffies_64 = jiffies; #endif +jiffies = jiffies_64; + #if defined(CONFIG_X86_64) /* * On 64-bit, align RODATA to 2MB so we retain large page mappings for -- GitLab From ef3e40a7ea8dbe2abd0a345032cd7d5023b9684f Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Wed, 3 Jun 2020 18:24:01 +0100 Subject: [PATCH 1278/1971] KVM: arm64: Save the host's PtrAuth keys in non-preemptible context When using the PtrAuth feature in a guest, we need to save the host's keys before allowing the guest to program them. For that, we dump them in a per-CPU data structure (the so called host context). But both call sites that do this are in preemptible context, which may end up in disaster should the vcpu thread get preempted before reentering the guest. Instead, save the keys eagerly on each vcpu_load(). This has an increased overhead, but is at least safe. Cc: stable@vger.kernel.org Reviewed-by: Mark Rutland Signed-off-by: Marc Zyngier --- arch/arm64/include/asm/kvm_emulate.h | 6 ------ arch/arm64/kvm/arm.c | 18 +++++++++++++++++- arch/arm64/kvm/handle_exit.c | 19 ++----------------- 3 files changed, 19 insertions(+), 24 deletions(-) diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h index a30b4eec7cb40..977843e4d5fb7 100644 --- a/arch/arm64/include/asm/kvm_emulate.h +++ b/arch/arm64/include/asm/kvm_emulate.h @@ -112,12 +112,6 @@ static inline void vcpu_ptrauth_disable(struct kvm_vcpu *vcpu) vcpu->arch.hcr_el2 &= ~(HCR_API | HCR_APK); } -static inline void vcpu_ptrauth_setup_lazy(struct kvm_vcpu *vcpu) -{ - if (vcpu_has_ptrauth(vcpu)) - vcpu_ptrauth_disable(vcpu); -} - static inline unsigned long vcpu_get_vsesr(struct kvm_vcpu *vcpu) { return vcpu->arch.vsesr_el2; diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c index d6988401c22a5..152049c5055dc 100644 --- a/arch/arm64/kvm/arm.c +++ b/arch/arm64/kvm/arm.c @@ -337,6 +337,12 @@ void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu) preempt_enable(); } +#define __ptrauth_save_key(regs, key) \ +({ \ + regs[key ## KEYLO_EL1] = read_sysreg_s(SYS_ ## key ## KEYLO_EL1); \ + regs[key ## KEYHI_EL1] = read_sysreg_s(SYS_ ## key ## KEYHI_EL1); \ +}) + void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) { int *last_ran; @@ -370,7 +376,17 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) else vcpu_set_wfx_traps(vcpu); - vcpu_ptrauth_setup_lazy(vcpu); + if (vcpu_has_ptrauth(vcpu)) { + struct kvm_cpu_context *ctxt = vcpu->arch.host_cpu_context; + + __ptrauth_save_key(ctxt->sys_regs, APIA); + __ptrauth_save_key(ctxt->sys_regs, APIB); + __ptrauth_save_key(ctxt->sys_regs, APDA); + __ptrauth_save_key(ctxt->sys_regs, APDB); + __ptrauth_save_key(ctxt->sys_regs, APGA); + + vcpu_ptrauth_disable(vcpu); + } } void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) diff --git a/arch/arm64/kvm/handle_exit.c b/arch/arm64/kvm/handle_exit.c index eb194696ef622..065251efa2e6a 100644 --- a/arch/arm64/kvm/handle_exit.c +++ b/arch/arm64/kvm/handle_exit.c @@ -162,31 +162,16 @@ static int handle_sve(struct kvm_vcpu *vcpu, struct kvm_run *run) return 1; } -#define __ptrauth_save_key(regs, key) \ -({ \ - regs[key ## KEYLO_EL1] = read_sysreg_s(SYS_ ## key ## KEYLO_EL1); \ - regs[key ## KEYHI_EL1] = read_sysreg_s(SYS_ ## key ## KEYHI_EL1); \ -}) - /* * Handle the guest trying to use a ptrauth instruction, or trying to access a * ptrauth register. */ void kvm_arm_vcpu_ptrauth_trap(struct kvm_vcpu *vcpu) { - struct kvm_cpu_context *ctxt; - - if (vcpu_has_ptrauth(vcpu)) { + if (vcpu_has_ptrauth(vcpu)) vcpu_ptrauth_enable(vcpu); - ctxt = vcpu->arch.host_cpu_context; - __ptrauth_save_key(ctxt->sys_regs, APIA); - __ptrauth_save_key(ctxt->sys_regs, APIB); - __ptrauth_save_key(ctxt->sys_regs, APDA); - __ptrauth_save_key(ctxt->sys_regs, APDB); - __ptrauth_save_key(ctxt->sys_regs, APGA); - } else { + else kvm_inject_undefined(vcpu); - } } /* -- GitLab From 2067028512b60c78961c3e075bb72cc44310355c Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Wed, 27 May 2020 19:15:30 -0700 Subject: [PATCH 1279/1971] KVM: selftests: Ignore KVM 5-level paging support for VM_MODE_PXXV48_4K Explicitly set the VA width to 48 bits for the x86_64-only PXXV48_4K VM mode instead of asserting the guest VA width is 48 bits. The fact that KVM supports 5-level paging is irrelevant unless the selftests opt-in to 5-level paging by setting CR4.LA57 for the guest. The overzealous assert prevents running the selftests on a kernel with 5-level paging enabled. Incorporate LA57 into the assert instead of removing the assert entirely as a sanity check of KVM's CPUID output. Fixes: 567a9f1e9deb ("KVM: selftests: Introduce VM_MODE_PXXV48_4K") Reported-by: Sergio Perez Gonzalez Cc: Adriana Cervantes Jimenez Cc: Peter Xu Signed-off-by: Sean Christopherson Message-Id: <20200528021530.28091-1-sean.j.christopherson@intel.com> Signed-off-by: Paolo Bonzini --- tools/testing/selftests/kvm/lib/kvm_util.c | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/tools/testing/selftests/kvm/lib/kvm_util.c b/tools/testing/selftests/kvm/lib/kvm_util.c index c9cede5c7d0de..74776ee228f2d 100644 --- a/tools/testing/selftests/kvm/lib/kvm_util.c +++ b/tools/testing/selftests/kvm/lib/kvm_util.c @@ -195,11 +195,18 @@ struct kvm_vm *_vm_create(enum vm_guest_mode mode, uint64_t phy_pages, int perm) case VM_MODE_PXXV48_4K: #ifdef __x86_64__ kvm_get_cpu_address_width(&vm->pa_bits, &vm->va_bits); - TEST_ASSERT(vm->va_bits == 48, "Linear address width " - "(%d bits) not supported", vm->va_bits); + /* + * Ignore KVM support for 5-level paging (vm->va_bits == 57), + * it doesn't take effect unless a CR4.LA57 is set, which it + * isn't for this VM_MODE. + */ + TEST_ASSERT(vm->va_bits == 48 || vm->va_bits == 57, + "Linear address width (%d bits) not supported", + vm->va_bits); pr_debug("Guest physical address width detected: %d\n", vm->pa_bits); vm->pgtable_levels = 4; + vm->va_bits = 48; #else TEST_FAIL("VM_MODE_PXXV48_4K not supported on non-x86 platforms"); #endif -- GitLab From 80fbd280beea1bcc298660f9024cbc4bf392c7f7 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Mon, 8 Jun 2020 11:02:18 -0700 Subject: [PATCH 1280/1971] KVM: x86: Unexport x86_fpu_cache and make it static Make x86_fpu_cache static now that FPU allocation and destruction is handled entirely by common x86 code. Signed-off-by: Sean Christopherson Message-Id: <20200608180218.20946-1-sean.j.christopherson@intel.com> Signed-off-by: Paolo Bonzini --- arch/x86/include/asm/kvm_host.h | 1 - arch/x86/kvm/x86.c | 3 +-- 2 files changed, 1 insertion(+), 3 deletions(-) diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 1da5858501ca3..7030f22212595 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -1306,7 +1306,6 @@ struct kvm_arch_async_pf { extern u64 __read_mostly host_efer; extern struct kvm_x86_ops kvm_x86_ops; -extern struct kmem_cache *x86_fpu_cache; #define __KVM_HAVE_ARCH_VM_ALLOC static inline struct kvm *kvm_arch_alloc_vm(void) diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 24de847af52eb..87ee76b73d723 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -239,8 +239,7 @@ u64 __read_mostly host_xcr0; u64 __read_mostly supported_xcr0; EXPORT_SYMBOL_GPL(supported_xcr0); -struct kmem_cache *x86_fpu_cache; -EXPORT_SYMBOL_GPL(x86_fpu_cache); +static struct kmem_cache *x86_fpu_cache; static struct kmem_cache *x86_emulator_cache; -- GitLab From 29eb5a3c57f7e06d803bb44a0ce2f9ed79f39cd9 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Thu, 4 Jun 2020 11:14:00 +0100 Subject: [PATCH 1281/1971] KVM: arm64: Handle PtrAuth traps early The current way we deal with PtrAuth is a bit heavy handed: - We forcefully save the host's keys on each vcpu_load() - Handling the PtrAuth trap forces us to go all the way back to the exit handling code to just set the HCR bits Overall, this is pretty cumbersome. A better approach would be to handle it the same way we deal with the FPSIMD registers: - On vcpu_load() disable PtrAuth for the guest - On first use, save the host's keys, enable PtrAuth in the guest Crucially, this can happen as a fixup, which is done very early on exit. We can then reenter the guest immediately without leaving the hypervisor role. Another thing is that it simplify the rest of the host handling: exiting all the way to the host means that the only possible outcome for this trap is to inject an UNDEF. Reviewed-by: Mark Rutland Signed-off-by: Marc Zyngier --- arch/arm64/kvm/arm.c | 17 +--------- arch/arm64/kvm/handle_exit.c | 17 ++-------- arch/arm64/kvm/hyp/switch.c | 61 ++++++++++++++++++++++++++++++++++++ arch/arm64/kvm/sys_regs.c | 13 +++----- 4 files changed, 70 insertions(+), 38 deletions(-) diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c index 152049c5055dc..14b7472666079 100644 --- a/arch/arm64/kvm/arm.c +++ b/arch/arm64/kvm/arm.c @@ -337,12 +337,6 @@ void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu) preempt_enable(); } -#define __ptrauth_save_key(regs, key) \ -({ \ - regs[key ## KEYLO_EL1] = read_sysreg_s(SYS_ ## key ## KEYLO_EL1); \ - regs[key ## KEYHI_EL1] = read_sysreg_s(SYS_ ## key ## KEYHI_EL1); \ -}) - void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) { int *last_ran; @@ -376,17 +370,8 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) else vcpu_set_wfx_traps(vcpu); - if (vcpu_has_ptrauth(vcpu)) { - struct kvm_cpu_context *ctxt = vcpu->arch.host_cpu_context; - - __ptrauth_save_key(ctxt->sys_regs, APIA); - __ptrauth_save_key(ctxt->sys_regs, APIB); - __ptrauth_save_key(ctxt->sys_regs, APDA); - __ptrauth_save_key(ctxt->sys_regs, APDB); - __ptrauth_save_key(ctxt->sys_regs, APGA); - + if (vcpu_has_ptrauth(vcpu)) vcpu_ptrauth_disable(vcpu); - } } void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) diff --git a/arch/arm64/kvm/handle_exit.c b/arch/arm64/kvm/handle_exit.c index 065251efa2e6a..5a02d4c90559e 100644 --- a/arch/arm64/kvm/handle_exit.c +++ b/arch/arm64/kvm/handle_exit.c @@ -162,25 +162,14 @@ static int handle_sve(struct kvm_vcpu *vcpu, struct kvm_run *run) return 1; } -/* - * Handle the guest trying to use a ptrauth instruction, or trying to access a - * ptrauth register. - */ -void kvm_arm_vcpu_ptrauth_trap(struct kvm_vcpu *vcpu) -{ - if (vcpu_has_ptrauth(vcpu)) - vcpu_ptrauth_enable(vcpu); - else - kvm_inject_undefined(vcpu); -} - /* * Guest usage of a ptrauth instruction (which the guest EL1 did not turn into - * a NOP). + * a NOP). If we get here, it is that we didn't fixup ptrauth on exit, and all + * that we can do is give the guest an UNDEF. */ static int kvm_handle_ptrauth(struct kvm_vcpu *vcpu, struct kvm_run *run) { - kvm_arm_vcpu_ptrauth_trap(vcpu); + kvm_inject_undefined(vcpu); return 1; } diff --git a/arch/arm64/kvm/hyp/switch.c b/arch/arm64/kvm/hyp/switch.c index c07a45643cd41..d60c2ef0fe8c0 100644 --- a/arch/arm64/kvm/hyp/switch.c +++ b/arch/arm64/kvm/hyp/switch.c @@ -490,6 +490,64 @@ static bool __hyp_text handle_tx2_tvm(struct kvm_vcpu *vcpu) return true; } +static bool __hyp_text esr_is_ptrauth_trap(u32 esr) +{ + u32 ec = ESR_ELx_EC(esr); + + if (ec == ESR_ELx_EC_PAC) + return true; + + if (ec != ESR_ELx_EC_SYS64) + return false; + + switch (esr_sys64_to_sysreg(esr)) { + case SYS_APIAKEYLO_EL1: + case SYS_APIAKEYHI_EL1: + case SYS_APIBKEYLO_EL1: + case SYS_APIBKEYHI_EL1: + case SYS_APDAKEYLO_EL1: + case SYS_APDAKEYHI_EL1: + case SYS_APDBKEYLO_EL1: + case SYS_APDBKEYHI_EL1: + case SYS_APGAKEYLO_EL1: + case SYS_APGAKEYHI_EL1: + return true; + } + + return false; +} + +#define __ptrauth_save_key(regs, key) \ +({ \ + regs[key ## KEYLO_EL1] = read_sysreg_s(SYS_ ## key ## KEYLO_EL1); \ + regs[key ## KEYHI_EL1] = read_sysreg_s(SYS_ ## key ## KEYHI_EL1); \ +}) + +static bool __hyp_text __hyp_handle_ptrauth(struct kvm_vcpu *vcpu) +{ + struct kvm_cpu_context *ctxt; + u64 val; + + if (!vcpu_has_ptrauth(vcpu) || + !esr_is_ptrauth_trap(kvm_vcpu_get_hsr(vcpu))) + return false; + + ctxt = kern_hyp_va(vcpu->arch.host_cpu_context); + __ptrauth_save_key(ctxt->sys_regs, APIA); + __ptrauth_save_key(ctxt->sys_regs, APIB); + __ptrauth_save_key(ctxt->sys_regs, APDA); + __ptrauth_save_key(ctxt->sys_regs, APDB); + __ptrauth_save_key(ctxt->sys_regs, APGA); + + vcpu_ptrauth_enable(vcpu); + + val = read_sysreg(hcr_el2); + val |= (HCR_API | HCR_APK); + write_sysreg(val, hcr_el2); + + return true; +} + /* * Return true when we were able to fixup the guest exit and should return to * the guest, false when we should restore the host state and return to the @@ -524,6 +582,9 @@ static bool __hyp_text fixup_guest_exit(struct kvm_vcpu *vcpu, u64 *exit_code) if (__hyp_handle_fpsimd(vcpu)) return true; + if (__hyp_handle_ptrauth(vcpu)) + return true; + if (!__populate_fault_info(vcpu)) return true; diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c index ad1d57501d6d8..564995084cf87 100644 --- a/arch/arm64/kvm/sys_regs.c +++ b/arch/arm64/kvm/sys_regs.c @@ -1034,16 +1034,13 @@ static bool trap_ptrauth(struct kvm_vcpu *vcpu, struct sys_reg_params *p, const struct sys_reg_desc *rd) { - kvm_arm_vcpu_ptrauth_trap(vcpu); - /* - * Return false for both cases as we never skip the trapped - * instruction: - * - * - Either we re-execute the same key register access instruction - * after enabling ptrauth. - * - Or an UNDEF is injected as ptrauth is not supported/enabled. + * If we land here, that is because we didn't fixup the access on exit + * by allowing the PtrAuth sysregs. The only way this happens is when + * the guest does not have PtrAuth support enabled. */ + kvm_inject_undefined(vcpu); + return false; } -- GitLab From b990d37fdf6781fd0907ffd14d0dff16b5d58ffa Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Sun, 7 Jun 2020 10:55:28 +0100 Subject: [PATCH 1282/1971] KVM: arm64: Stop sparse from moaning at __hyp_this_cpu_ptr Sparse complains that __hyp_this_cpu_ptr() returns something that is flagged noderef and not in the correct address space (both being the result of the __percpu annotation). Pretend that __hyp_this_cpu_ptr() knows what it is doing by forcefully casting the pointer with __kernel __force. Signed-off-by: Marc Zyngier --- arch/arm64/include/asm/kvm_asm.h | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) diff --git a/arch/arm64/include/asm/kvm_asm.h b/arch/arm64/include/asm/kvm_asm.h index 0c9b5fc4ba0ae..d9b7da15dbca4 100644 --- a/arch/arm64/include/asm/kvm_asm.h +++ b/arch/arm64/include/asm/kvm_asm.h @@ -81,12 +81,19 @@ extern u32 __kvm_get_mdcr_el2(void); extern char __smccc_workaround_1_smc[__SMCCC_WORKAROUND_1_SMC_SZ]; -/* Home-grown __this_cpu_{ptr,read} variants that always work at HYP */ +/* + * Home-grown __this_cpu_{ptr,read} variants that always work at HYP, + * provided that sym is really a *symbol* and not a pointer obtained from + * a data structure. As for SHIFT_PERCPU_PTR(), the creative casting keeps + * sparse quiet. + */ #define __hyp_this_cpu_ptr(sym) \ ({ \ - void *__ptr = hyp_symbol_addr(sym); \ + void *__ptr; \ + __verify_pcpu_ptr(&sym); \ + __ptr = hyp_symbol_addr(sym); \ __ptr += read_sysreg(tpidr_el2); \ - (typeof(&sym))__ptr; \ + (typeof(sym) __kernel __force *)__ptr; \ }) #define __hyp_this_cpu_read(sym) \ -- GitLab From 07da1ffaa1373f99331712faa67a00b5b807dfe8 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Fri, 5 Jun 2020 14:08:13 +0100 Subject: [PATCH 1283/1971] KVM: arm64: Remove host_cpu_context member from vcpu structure For very long, we have kept this pointer back to the per-cpu host state, despite having working per-cpu accessors at EL2 for some time now. Recent investigations have shown that this pointer is easy to abuse in preemptible context, which is a sure sign that it would better be gone. Not to mention that a per-cpu pointer is faster to access at all times. Reported-by: Andrew Scull Acked-by: Mark Rutland Signed-off-by: Marc Zyngier --- arch/arm64/include/asm/kvm_host.h | 3 --- arch/arm64/kvm/arm.c | 3 --- arch/arm64/kvm/hyp/debug-sr.c | 4 ++-- arch/arm64/kvm/hyp/switch.c | 6 +++--- arch/arm64/kvm/hyp/sysreg-sr.c | 6 ++++-- arch/arm64/kvm/pmu.c | 8 ++------ 6 files changed, 11 insertions(+), 19 deletions(-) diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index 59029e90b557e..ada1faa92211b 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -284,9 +284,6 @@ struct kvm_vcpu_arch { struct kvm_guest_debug_arch vcpu_debug_state; struct kvm_guest_debug_arch external_debug_state; - /* Pointer to host CPU context */ - struct kvm_cpu_context *host_cpu_context; - struct thread_info *host_thread_info; /* hyp VA */ struct user_fpsimd_state *host_fpsimd_state; /* hyp VA */ diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c index 14b7472666079..6ddaa23ef346f 100644 --- a/arch/arm64/kvm/arm.c +++ b/arch/arm64/kvm/arm.c @@ -340,10 +340,8 @@ void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu) void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) { int *last_ran; - kvm_host_data_t *cpu_data; last_ran = this_cpu_ptr(vcpu->kvm->arch.last_vcpu_ran); - cpu_data = this_cpu_ptr(&kvm_host_data); /* * We might get preempted before the vCPU actually runs, but @@ -355,7 +353,6 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) } vcpu->cpu = cpu; - vcpu->arch.host_cpu_context = &cpu_data->host_ctxt; kvm_vgic_load(vcpu); kvm_timer_vcpu_load(vcpu); diff --git a/arch/arm64/kvm/hyp/debug-sr.c b/arch/arm64/kvm/hyp/debug-sr.c index 0fc9872a14671..e95af204fec74 100644 --- a/arch/arm64/kvm/hyp/debug-sr.c +++ b/arch/arm64/kvm/hyp/debug-sr.c @@ -185,7 +185,7 @@ void __hyp_text __debug_switch_to_guest(struct kvm_vcpu *vcpu) if (!(vcpu->arch.flags & KVM_ARM64_DEBUG_DIRTY)) return; - host_ctxt = kern_hyp_va(vcpu->arch.host_cpu_context); + host_ctxt = &__hyp_this_cpu_ptr(kvm_host_data)->host_ctxt; guest_ctxt = &vcpu->arch.ctxt; host_dbg = &vcpu->arch.host_debug_state.regs; guest_dbg = kern_hyp_va(vcpu->arch.debug_ptr); @@ -207,7 +207,7 @@ void __hyp_text __debug_switch_to_host(struct kvm_vcpu *vcpu) if (!(vcpu->arch.flags & KVM_ARM64_DEBUG_DIRTY)) return; - host_ctxt = kern_hyp_va(vcpu->arch.host_cpu_context); + host_ctxt = &__hyp_this_cpu_ptr(kvm_host_data)->host_ctxt; guest_ctxt = &vcpu->arch.ctxt; host_dbg = &vcpu->arch.host_debug_state.regs; guest_dbg = kern_hyp_va(vcpu->arch.debug_ptr); diff --git a/arch/arm64/kvm/hyp/switch.c b/arch/arm64/kvm/hyp/switch.c index d60c2ef0fe8c0..1853c1788e0c6 100644 --- a/arch/arm64/kvm/hyp/switch.c +++ b/arch/arm64/kvm/hyp/switch.c @@ -532,7 +532,7 @@ static bool __hyp_text __hyp_handle_ptrauth(struct kvm_vcpu *vcpu) !esr_is_ptrauth_trap(kvm_vcpu_get_hsr(vcpu))) return false; - ctxt = kern_hyp_va(vcpu->arch.host_cpu_context); + ctxt = &__hyp_this_cpu_ptr(kvm_host_data)->host_ctxt; __ptrauth_save_key(ctxt->sys_regs, APIA); __ptrauth_save_key(ctxt->sys_regs, APIB); __ptrauth_save_key(ctxt->sys_regs, APDA); @@ -703,7 +703,7 @@ static int __kvm_vcpu_run_vhe(struct kvm_vcpu *vcpu) struct kvm_cpu_context *guest_ctxt; u64 exit_code; - host_ctxt = vcpu->arch.host_cpu_context; + host_ctxt = &__hyp_this_cpu_ptr(kvm_host_data)->host_ctxt; host_ctxt->__hyp_running_vcpu = vcpu; guest_ctxt = &vcpu->arch.ctxt; @@ -808,7 +808,7 @@ int __hyp_text __kvm_vcpu_run_nvhe(struct kvm_vcpu *vcpu) vcpu = kern_hyp_va(vcpu); - host_ctxt = kern_hyp_va(vcpu->arch.host_cpu_context); + host_ctxt = &__hyp_this_cpu_ptr(kvm_host_data)->host_ctxt; host_ctxt->__hyp_running_vcpu = vcpu; guest_ctxt = &vcpu->arch.ctxt; diff --git a/arch/arm64/kvm/hyp/sysreg-sr.c b/arch/arm64/kvm/hyp/sysreg-sr.c index 6d2df9fe0b5d2..143d7b7358f2c 100644 --- a/arch/arm64/kvm/hyp/sysreg-sr.c +++ b/arch/arm64/kvm/hyp/sysreg-sr.c @@ -265,12 +265,13 @@ void __hyp_text __sysreg32_restore_state(struct kvm_vcpu *vcpu) */ void kvm_vcpu_load_sysregs(struct kvm_vcpu *vcpu) { - struct kvm_cpu_context *host_ctxt = vcpu->arch.host_cpu_context; struct kvm_cpu_context *guest_ctxt = &vcpu->arch.ctxt; + struct kvm_cpu_context *host_ctxt; if (!has_vhe()) return; + host_ctxt = &__hyp_this_cpu_ptr(kvm_host_data)->host_ctxt; __sysreg_save_user_state(host_ctxt); /* @@ -301,12 +302,13 @@ void kvm_vcpu_load_sysregs(struct kvm_vcpu *vcpu) */ void kvm_vcpu_put_sysregs(struct kvm_vcpu *vcpu) { - struct kvm_cpu_context *host_ctxt = vcpu->arch.host_cpu_context; struct kvm_cpu_context *guest_ctxt = &vcpu->arch.ctxt; + struct kvm_cpu_context *host_ctxt; if (!has_vhe()) return; + host_ctxt = &__hyp_this_cpu_ptr(kvm_host_data)->host_ctxt; deactivate_traps_vhe_put(); __sysreg_save_el1_state(guest_ctxt); diff --git a/arch/arm64/kvm/pmu.c b/arch/arm64/kvm/pmu.c index e71d00bb5271b..b5ae3a5d509e4 100644 --- a/arch/arm64/kvm/pmu.c +++ b/arch/arm64/kvm/pmu.c @@ -163,15 +163,13 @@ static void kvm_vcpu_pmu_disable_el0(unsigned long events) */ void kvm_vcpu_pmu_restore_guest(struct kvm_vcpu *vcpu) { - struct kvm_cpu_context *host_ctxt; struct kvm_host_data *host; u32 events_guest, events_host; if (!has_vhe()) return; - host_ctxt = vcpu->arch.host_cpu_context; - host = container_of(host_ctxt, struct kvm_host_data, host_ctxt); + host = this_cpu_ptr(&kvm_host_data); events_guest = host->pmu_events.events_guest; events_host = host->pmu_events.events_host; @@ -184,15 +182,13 @@ void kvm_vcpu_pmu_restore_guest(struct kvm_vcpu *vcpu) */ void kvm_vcpu_pmu_restore_host(struct kvm_vcpu *vcpu) { - struct kvm_cpu_context *host_ctxt; struct kvm_host_data *host; u32 events_guest, events_host; if (!has_vhe()) return; - host_ctxt = vcpu->arch.host_cpu_context; - host = container_of(host_ctxt, struct kvm_host_data, host_ctxt); + host = this_cpu_ptr(&kvm_host_data); events_guest = host->pmu_events.events_guest; events_host = host->pmu_events.events_host; -- GitLab From 544fc7dbbf920a3e64d109c416ee229e8e1763c5 Mon Sep 17 00:00:00 2001 From: "Michael S. Tsirkin" Date: Mon, 8 Jun 2020 02:03:15 -0400 Subject: [PATCH 1284/1971] virtio_mem: convert device block size into 64bit If subblock size is large (e.g. 1G) 32 bit math involving it can overflow. Rather than try to catch all instances of that, let's tweak block size to 64 bit. It ripples through UAPI which is an ABI change, but it's not too late to make it, and it will allow supporting >4Gbyte blocks while might become necessary down the road. Fixes: 5f1f79bbc9e26 ("virtio-mem: Paravirtualized memory hotplug") Signed-off-by: Michael S. Tsirkin Acked-by: David Hildenbrand --- drivers/virtio/virtio_mem.c | 18 +++++++++--------- include/uapi/linux/virtio_mem.h | 4 ++-- 2 files changed, 11 insertions(+), 11 deletions(-) diff --git a/drivers/virtio/virtio_mem.c b/drivers/virtio/virtio_mem.c index 2f357142ea5e8..50c689f250450 100644 --- a/drivers/virtio/virtio_mem.c +++ b/drivers/virtio/virtio_mem.c @@ -77,7 +77,7 @@ struct virtio_mem { uint64_t requested_size; /* The device block size (for communicating with the device). */ - uint32_t device_block_size; + uint64_t device_block_size; /* The translated node id. NUMA_NO_NODE in case not specified. */ int nid; /* Physical start address of the memory region. */ @@ -86,7 +86,7 @@ struct virtio_mem { uint64_t region_size; /* The subblock size. */ - uint32_t subblock_size; + uint64_t subblock_size; /* The number of subblocks per memory block. */ uint32_t nb_sb_per_mb; @@ -1698,9 +1698,9 @@ static int virtio_mem_init(struct virtio_mem *vm) * - At least the device block size. * In the worst case, a single subblock per memory block. */ - vm->subblock_size = PAGE_SIZE * 1u << max_t(uint32_t, MAX_ORDER - 1, - pageblock_order); - vm->subblock_size = max_t(uint32_t, vm->device_block_size, + vm->subblock_size = PAGE_SIZE * 1ul << max_t(uint32_t, MAX_ORDER - 1, + pageblock_order); + vm->subblock_size = max_t(uint64_t, vm->device_block_size, vm->subblock_size); vm->nb_sb_per_mb = memory_block_size_bytes() / vm->subblock_size; @@ -1713,12 +1713,12 @@ static int virtio_mem_init(struct virtio_mem *vm) dev_info(&vm->vdev->dev, "start address: 0x%llx", vm->addr); dev_info(&vm->vdev->dev, "region size: 0x%llx", vm->region_size); - dev_info(&vm->vdev->dev, "device block size: 0x%x", - vm->device_block_size); + dev_info(&vm->vdev->dev, "device block size: 0x%llx", + (unsigned long long)vm->device_block_size); dev_info(&vm->vdev->dev, "memory block size: 0x%lx", memory_block_size_bytes()); - dev_info(&vm->vdev->dev, "subblock size: 0x%x", - vm->subblock_size); + dev_info(&vm->vdev->dev, "subblock size: 0x%llx", + (unsigned long long)vm->subblock_size); if (vm->nid != NUMA_NO_NODE) dev_info(&vm->vdev->dev, "nid: %d", vm->nid); diff --git a/include/uapi/linux/virtio_mem.h b/include/uapi/linux/virtio_mem.h index a455c488a995c..a9ffe041843c6 100644 --- a/include/uapi/linux/virtio_mem.h +++ b/include/uapi/linux/virtio_mem.h @@ -185,10 +185,10 @@ struct virtio_mem_resp { struct virtio_mem_config { /* Block size and alignment. Cannot change. */ - __u32 block_size; + __u64 block_size; /* Valid with VIRTIO_MEM_F_ACPI_PXM. Cannot change. */ __u16 node_id; - __u16 padding; + __u8 padding[6]; /* Start address of the memory region. Cannot change. */ __u64 addr; /* Region size (maximum). Cannot change. */ -- GitLab From 044e4b09223039e571e6ec540e25552054208765 Mon Sep 17 00:00:00 2001 From: "Michael S. Tsirkin" Date: Mon, 8 Jun 2020 08:42:09 -0400 Subject: [PATCH 1285/1971] vhost/test: fix up after API change Pass a flag to request kernel thread use. Fixes: 01fcb1cbc88e ("vhost: allow device that does not depend on vhost worker") Signed-off-by: Michael S. Tsirkin --- drivers/vhost/test.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/vhost/test.c b/drivers/vhost/test.c index 9a3a09005e03b..0466921f4772c 100644 --- a/drivers/vhost/test.c +++ b/drivers/vhost/test.c @@ -120,7 +120,7 @@ static int vhost_test_open(struct inode *inode, struct file *f) vqs[VHOST_TEST_VQ] = &n->vqs[VHOST_TEST_VQ]; n->vqs[VHOST_TEST_VQ].handle_kick = handle_vq_kick; vhost_dev_init(dev, vqs, VHOST_TEST_VQ_MAX, UIO_MAXIOV, - VHOST_TEST_PKT_WEIGHT, VHOST_TEST_WEIGHT, NULL); + VHOST_TEST_PKT_WEIGHT, VHOST_TEST_WEIGHT, true, NULL); f->private_data = n; -- GitLab From 3b8a299a58b2afce464ae11324b59dcf0f1d10a7 Mon Sep 17 00:00:00 2001 From: "Pavel Machek (CIP)" Date: Sat, 6 Jun 2020 17:31:03 +0200 Subject: [PATCH 1286/1971] ASoC: meson: add missing free_irq() in error path free_irq() is missing in case of error, fix that. Signed-off-by: Pavel Machek (CIP) Reviewed-by: Jerome Brunet Link: https://lore.kernel.org/r/20200606153103.GA17905@amd Signed-off-by: Mark Brown --- sound/soc/meson/axg-fifo.c | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/sound/soc/meson/axg-fifo.c b/sound/soc/meson/axg-fifo.c index 2e9b56b29d313..b2e867113226b 100644 --- a/sound/soc/meson/axg-fifo.c +++ b/sound/soc/meson/axg-fifo.c @@ -249,7 +249,7 @@ int axg_fifo_pcm_open(struct snd_soc_component *component, /* Enable pclk to access registers and clock the fifo ip */ ret = clk_prepare_enable(fifo->pclk); if (ret) - return ret; + goto free_irq; /* Setup status2 so it reports the memory pointer */ regmap_update_bits(fifo->map, FIFO_CTRL1, @@ -269,8 +269,14 @@ int axg_fifo_pcm_open(struct snd_soc_component *component, /* Take memory arbitror out of reset */ ret = reset_control_deassert(fifo->arb); if (ret) - clk_disable_unprepare(fifo->pclk); + goto free_clk; + + return 0; +free_clk: + clk_disable_unprepare(fifo->pclk); +free_irq: + free_irq(fifo->irq, ss); return ret; } EXPORT_SYMBOL_GPL(axg_fifo_pcm_open); -- GitLab From 3680c2e9f4254d1f033bf00f540e47a51f8f996b Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Mon, 25 May 2020 13:49:12 +0100 Subject: [PATCH 1287/1971] drm/i915/display: Only query DP state of a DDI encoder MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Avoid a NULL dereference for a mismatched encoder type, hit when probing state for all encoders. This is a band aid to prevent the OOPS as the right fix is "probably to swap the psr vs infoframes.enable checks, or outright disappear from this function" (Ville). Closes: https://gitlab.freedesktop.org/drm/intel/-/issues/1892 Signed-off-by: Chris Wilson Acked-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20200525124912.16019-1-chris@chris-wilson.co.uk (cherry picked from commit 22da5d846d54dd13183b57874b9d5611d583d7c8) Signed-off-by: Joonas Lahtinen --- drivers/gpu/drm/i915/display/intel_dp.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c index 40d42dcff0b7d..ed9e53c373a76 100644 --- a/drivers/gpu/drm/i915/display/intel_dp.c +++ b/drivers/gpu/drm/i915/display/intel_dp.c @@ -5206,6 +5206,9 @@ void intel_read_dp_sdp(struct intel_encoder *encoder, struct intel_crtc_state *crtc_state, unsigned int type) { + if (encoder->type != INTEL_OUTPUT_DDI) + return; + switch (type) { case DP_SDP_VSC: intel_read_dp_vsc_sdp(encoder, crtc_state, -- GitLab From a952f64d14e5f8461f04cd9d729037db9099ddb0 Mon Sep 17 00:00:00 2001 From: Deep Shah Date: Thu, 16 Apr 2020 16:45:20 -0700 Subject: [PATCH 1288/1971] MAINTAINERS: Update PARAVIRT_OPS_INTERFACE and VMWARE_HYPERVISOR_INTERFACE Thomas Hellstrom will be handing over VMware's maintainership of these interfaces to Deep Shah. Signed-off-by: Deep Shah Acked-by: Thomas Hellstrom Acked-by: Juergen Gross Link: https://lore.kernel.org/r/20200416234520.GA1700@prme-mon-cfl-mlw-07 Signed-off-by: Juergen Gross Signed-off-by: Boris Ostrovsky --- MAINTAINERS | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/MAINTAINERS b/MAINTAINERS index ecc0749810b00..775352e48b99c 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -12734,7 +12734,7 @@ F: include/uapi/linux/ppdev.h PARAVIRT_OPS INTERFACE M: Juergen Gross -M: Thomas Hellstrom +M: Deep Shah M: "VMware, Inc." L: virtualization@lists.linux-foundation.org S: Supported @@ -18029,7 +18029,7 @@ S: Maintained F: drivers/misc/vmw_balloon.c VMWARE HYPERVISOR INTERFACE -M: Thomas Hellstrom +M: Deep Shah M: "VMware, Inc." L: virtualization@lists.linux-foundation.org S: Supported -- GitLab From 2ca068be09bf8e285036603823696140026dcbe7 Mon Sep 17 00:00:00 2001 From: Zhihao Cheng Date: Tue, 2 Jun 2020 09:30:45 +0800 Subject: [PATCH 1289/1971] afs: Fix memory leak in afs_put_sysnames() Fix afs_put_sysnames() to actually free the specified afs_sysnames object after its reference count has been decreased to zero and its contents have been released. Fixes: 6f8880d8e681557 ("afs: Implement @sys substitution handling") Signed-off-by: Zhihao Cheng Signed-off-by: David Howells --- fs/afs/proc.c | 1 + 1 file changed, 1 insertion(+) diff --git a/fs/afs/proc.c b/fs/afs/proc.c index 22d00cf1913d1..e817fc740ba01 100644 --- a/fs/afs/proc.c +++ b/fs/afs/proc.c @@ -567,6 +567,7 @@ void afs_put_sysnames(struct afs_sysnames *sysnames) if (sysnames->subs[i] != afs_init_sysname && sysnames->subs[i] != sysnames->blank) kfree(sysnames->subs[i]); + kfree(sysnames); } } -- GitLab From 5749ce92c4b707353cbd934dd0518a1966d7988f Mon Sep 17 00:00:00 2001 From: David Howells Date: Thu, 4 Jun 2020 21:31:39 +0100 Subject: [PATCH 1290/1971] afs: Fix file locking Fix AFS file locking to use the correct vnode pointer and remove a member of the afs_operation struct that is never set, but it is read and followed, causing an oops. This can be triggered by: flock -s /afs/example.com/foo sleep 1 when it calls the kernel to get a file lock. Fixes: e49c7b2f6de7 ("afs: Build an abstraction around an "operation" concept") Reported-by: Dave Botsch Signed-off-by: David Howells Tested-by: Dave Botsch --- fs/afs/flock.c | 2 +- fs/afs/internal.h | 1 - 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/fs/afs/flock.c b/fs/afs/flock.c index 70e518f7bc19f..71eea2a908c70 100644 --- a/fs/afs/flock.c +++ b/fs/afs/flock.c @@ -71,7 +71,7 @@ static void afs_schedule_lock_extension(struct afs_vnode *vnode) void afs_lock_op_done(struct afs_call *call) { struct afs_operation *op = call->op; - struct afs_vnode *vnode = op->lock.lvnode; + struct afs_vnode *vnode = op->file[0].vnode; if (call->error == 0) { spin_lock(&vnode->lock); diff --git a/fs/afs/internal.h b/fs/afs/internal.h index e1621b0670cc4..519ffb104616e 100644 --- a/fs/afs/internal.h +++ b/fs/afs/internal.h @@ -795,7 +795,6 @@ struct afs_operation { struct afs_read *req; } fetch; struct { - struct afs_vnode *lvnode; /* vnode being locked */ afs_lock_type_t type; } lock; struct { -- GitLab From b73287f0b0745961b14e5ebcce92cc8ed24d4d52 Mon Sep 17 00:00:00 2001 From: Pierre-Louis Bossart Date: Mon, 8 Jun 2020 14:44:12 -0500 Subject: [PATCH 1291/1971] ASoC: soc-pcm: dpcm: fix playback/capture checks Recent changes in the ASoC core prevent multi-cpu BE dailinks from being used. DPCM does support multi-cpu DAIs for BE Dailinks, but not for FE. Handle the FE checks first, and make sure all DAIs support the same capabilities within the same dailink. Fixes: 9b5db059366ae2 ("ASoC: soc-pcm: dpcm: Only allow playback/capture if supported") Signed-off-by: Pierre-Louis Bossart Reviewed-by: Bard Liao Reviewed-by: Guennadi Liakhovetski Reviewed-by: Ranjani Sridharan Reviewed-by: Daniel Baluta BugLink: https://github.com/thesofproject/linux/issues/2031 Link: https://lore.kernel.org/r/20200608194415.4663-2-pierre-louis.bossart@linux.intel.com Signed-off-by: Mark Brown --- sound/soc/soc-pcm.c | 44 ++++++++++++++++++++++++++++++++++---------- 1 file changed, 34 insertions(+), 10 deletions(-) diff --git a/sound/soc/soc-pcm.c b/sound/soc/soc-pcm.c index 276505fb9d504..2c114b4542ce4 100644 --- a/sound/soc/soc-pcm.c +++ b/sound/soc/soc-pcm.c @@ -2789,20 +2789,44 @@ int soc_new_pcm(struct snd_soc_pcm_runtime *rtd, int num) struct snd_pcm *pcm; char new_name[64]; int ret = 0, playback = 0, capture = 0; + int stream; int i; + if (rtd->dai_link->dynamic && rtd->num_cpus > 1) { + dev_err(rtd->dev, + "DPCM doesn't support Multi CPU for Front-Ends yet\n"); + return -EINVAL; + } + if (rtd->dai_link->dynamic || rtd->dai_link->no_pcm) { - cpu_dai = asoc_rtd_to_cpu(rtd, 0); - if (rtd->num_cpus > 1) { - dev_err(rtd->dev, - "DPCM doesn't support Multi CPU yet\n"); - return -EINVAL; + if (rtd->dai_link->dpcm_playback) { + stream = SNDRV_PCM_STREAM_PLAYBACK; + + for_each_rtd_cpu_dais(rtd, i, cpu_dai) + if (!snd_soc_dai_stream_valid(cpu_dai, + stream)) { + dev_err(rtd->card->dev, + "CPU DAI %s for rtd %s does not support playback\n", + cpu_dai->name, + rtd->dai_link->stream_name); + return -EINVAL; + } + playback = 1; + } + if (rtd->dai_link->dpcm_capture) { + stream = SNDRV_PCM_STREAM_CAPTURE; + + for_each_rtd_cpu_dais(rtd, i, cpu_dai) + if (!snd_soc_dai_stream_valid(cpu_dai, + stream)) { + dev_err(rtd->card->dev, + "CPU DAI %s for rtd %s does not support capture\n", + cpu_dai->name, + rtd->dai_link->stream_name); + return -EINVAL; + } + capture = 1; } - - playback = rtd->dai_link->dpcm_playback && - snd_soc_dai_stream_valid(cpu_dai, SNDRV_PCM_STREAM_PLAYBACK); - capture = rtd->dai_link->dpcm_capture && - snd_soc_dai_stream_valid(cpu_dai, SNDRV_PCM_STREAM_CAPTURE); } else { /* Adapt stream for codec2codec links */ int cpu_capture = rtd->dai_link->params ? -- GitLab From 607fa205a7e4dfad28b8a67ab1c985756ddbccb0 Mon Sep 17 00:00:00 2001 From: Bard Liao Date: Mon, 8 Jun 2020 14:44:13 -0500 Subject: [PATCH 1292/1971] ASoC: core: only convert non DPCM link to DPCM link Additional checks for valid DAIs expose a corner case, where existing BE dailinks get modified, e.g. HDMI links are tagged with dpcm_capture=1 even if the DAIs are for playback. This patch makes those changes conditional and flags configuration issues when a BE dailink is has no_pcm=0 but dpcm_playback or dpcm_capture=1 (which makes no sense). As discussed on the alsa-devel mailing list, there are redundant flags for dpcm_playback, dpcm_capture, playback_only, capture_only. This will have to be cleaned-up in a future update. For now only correct and flag problematic configurations. Fixes: 218fe9b7ec7f3 ("ASoC: soc-core: Set dpcm_playback / dpcm_capture") Suggested-by: Daniel Baluta Signed-off-by: Bard Liao Signed-off-by: Pierre-Louis Bossart Reviewed-by: Guennadi Liakhovetski Reviewed-by: Daniel Baluta Reviewed-by: Bard Liao Link: https://lore.kernel.org/r/20200608194415.4663-3-pierre-louis.bossart@linux.intel.com Signed-off-by: Mark Brown --- sound/soc/soc-core.c | 22 +++++++++++++++++++--- 1 file changed, 19 insertions(+), 3 deletions(-) diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c index b07eca2c6cccb..7b387202c5dbd 100644 --- a/sound/soc/soc-core.c +++ b/sound/soc/soc-core.c @@ -1648,9 +1648,25 @@ match: dai_link->platforms->name = component->name; /* convert non BE into BE */ - dai_link->no_pcm = 1; - dai_link->dpcm_playback = 1; - dai_link->dpcm_capture = 1; + if (!dai_link->no_pcm) { + dai_link->no_pcm = 1; + + if (dai_link->dpcm_playback) + dev_warn(card->dev, + "invalid configuration, dailink %s has flags no_pcm=0 and dpcm_playback=1\n", + dai_link->name); + if (dai_link->dpcm_capture) + dev_warn(card->dev, + "invalid configuration, dailink %s has flags no_pcm=0 and dpcm_capture=1\n", + dai_link->name); + + /* convert normal link into DPCM one */ + if (!(dai_link->dpcm_playback || + dai_link->dpcm_capture)) { + dai_link->dpcm_playback = !dai_link->capture_only; + dai_link->dpcm_capture = !dai_link->playback_only; + } + } /* * override any BE fixups -- GitLab From dc261875865539ca91bff9bc44d3e62f811dec1d Mon Sep 17 00:00:00 2001 From: Pierre-Louis Bossart Date: Mon, 8 Jun 2020 14:44:14 -0500 Subject: [PATCH 1293/1971] ASoC: Intel: boards: replace capture_only by dpcm_capture It's not clear why specific FE dailinks use capture_only flags, likely blind copy/paste from Chromebook driver to the other. Replace by dpcm_capture, this will make future alignment and removal of flags easier. Signed-off-by: Pierre-Louis Bossart Reviewed-by: Guennadi Liakhovetski Reviewed-by: Daniel Baluta Reviewed-by: Bard Liao Link: https://lore.kernel.org/r/20200608194415.4663-4-pierre-louis.bossart@linux.intel.com Signed-off-by: Mark Brown --- sound/soc/intel/boards/glk_rt5682_max98357a.c | 2 +- sound/soc/intel/boards/kbl_da7219_max98927.c | 4 ++-- sound/soc/intel/boards/kbl_rt5663_max98927.c | 2 +- sound/soc/intel/boards/kbl_rt5663_rt5514_max98927.c | 2 +- 4 files changed, 5 insertions(+), 5 deletions(-) diff --git a/sound/soc/intel/boards/glk_rt5682_max98357a.c b/sound/soc/intel/boards/glk_rt5682_max98357a.c index 48eda1a8aa6cb..954ab01f695b8 100644 --- a/sound/soc/intel/boards/glk_rt5682_max98357a.c +++ b/sound/soc/intel/boards/glk_rt5682_max98357a.c @@ -407,7 +407,7 @@ static struct snd_soc_dai_link geminilake_dais[] = { .name = "Glk Audio Echo Reference cap", .stream_name = "Echoreference Capture", .init = NULL, - .capture_only = 1, + .dpcm_capture = 1, .nonatomic = 1, .dynamic = 1, SND_SOC_DAILINK_REG(echoref, dummy, platform), diff --git a/sound/soc/intel/boards/kbl_da7219_max98927.c b/sound/soc/intel/boards/kbl_da7219_max98927.c index cc9b5eab8b4a5..e29c31ffd241f 100644 --- a/sound/soc/intel/boards/kbl_da7219_max98927.c +++ b/sound/soc/intel/boards/kbl_da7219_max98927.c @@ -692,7 +692,7 @@ static struct snd_soc_dai_link kabylake_dais[] = { .name = "Kbl Audio Echo Reference cap", .stream_name = "Echoreference Capture", .init = NULL, - .capture_only = 1, + .dpcm_capture = 1, .nonatomic = 1, SND_SOC_DAILINK_REG(echoref, dummy, platform), }, @@ -858,7 +858,7 @@ static struct snd_soc_dai_link kabylake_max98_927_373_dais[] = { .name = "Kbl Audio Echo Reference cap", .stream_name = "Echoreference Capture", .init = NULL, - .capture_only = 1, + .dpcm_capture = 1, .nonatomic = 1, SND_SOC_DAILINK_REG(echoref, dummy, platform), }, diff --git a/sound/soc/intel/boards/kbl_rt5663_max98927.c b/sound/soc/intel/boards/kbl_rt5663_max98927.c index 658a9da3a40fd..09ba55fc36d51 100644 --- a/sound/soc/intel/boards/kbl_rt5663_max98927.c +++ b/sound/soc/intel/boards/kbl_rt5663_max98927.c @@ -672,7 +672,7 @@ static struct snd_soc_dai_link kabylake_dais[] = { .name = "Kbl Audio Echo Reference cap", .stream_name = "Echoreference Capture", .init = NULL, - .capture_only = 1, + .dpcm_capture = 1, .nonatomic = 1, SND_SOC_DAILINK_REG(echoref, dummy, platform), }, diff --git a/sound/soc/intel/boards/kbl_rt5663_rt5514_max98927.c b/sound/soc/intel/boards/kbl_rt5663_rt5514_max98927.c index 1b1f8d7a4ea3f..b34cf6cf11395 100644 --- a/sound/soc/intel/boards/kbl_rt5663_rt5514_max98927.c +++ b/sound/soc/intel/boards/kbl_rt5663_rt5514_max98927.c @@ -566,7 +566,7 @@ static struct snd_soc_dai_link kabylake_dais[] = { .name = "Kbl Audio Echo Reference cap", .stream_name = "Echoreference Capture", .init = NULL, - .capture_only = 1, + .dpcm_capture = 1, .nonatomic = 1, SND_SOC_DAILINK_REG(echoref, dummy, platform), }, -- GitLab From ba4e5abc6c4e173af7c941c03c067263b686665d Mon Sep 17 00:00:00 2001 From: Pierre-Louis Bossart Date: Mon, 8 Jun 2020 14:44:15 -0500 Subject: [PATCH 1294/1971] ASoC: SOF: nocodec: conditionally set dpcm_capture/dpcm_playback flags With additional checks on dailinks, we see errors such as [ 3.000418] sof-nocodec sof-nocodec: CPU DAI DMIC01 Pin for rtd NoCodec-6 does not support playback It's not clear why we set the dpcm_playback and dpcm_capture flags unconditionally, add a check on number of channels for each direction to avoid invalid configurations. Fixes: 8017b8fd37bf5e ('ASoC: SOF: Add Nocodec machine driver support') Signed-off-by: Pierre-Louis Bossart Reviewed-by: Guennadi Liakhovetski Reviewed-by: Daniel Baluta Reviewed-by: Bard Liao Link: https://lore.kernel.org/r/20200608194415.4663-5-pierre-louis.bossart@linux.intel.com Signed-off-by: Mark Brown --- sound/soc/sof/nocodec.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/sound/soc/sof/nocodec.c b/sound/soc/sof/nocodec.c index ce053ba8f2e82..d03b5be31255b 100644 --- a/sound/soc/sof/nocodec.c +++ b/sound/soc/sof/nocodec.c @@ -52,8 +52,10 @@ static int sof_nocodec_bes_setup(struct device *dev, links[i].platforms->name = dev_name(dev); links[i].codecs->dai_name = "snd-soc-dummy-dai"; links[i].codecs->name = "snd-soc-dummy"; - links[i].dpcm_playback = 1; - links[i].dpcm_capture = 1; + if (ops->drv[i].playback.channels_min) + links[i].dpcm_playback = 1; + if (ops->drv[i].capture.channels_min) + links[i].dpcm_capture = 1; } card->dai_link = links; -- GitLab From c7f3d43b629b598a2bb9ec3524e844eae7492e7e Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Sat, 6 Jun 2020 23:51:15 +0200 Subject: [PATCH 1295/1971] clocksource: Remove obsolete ifdef CONFIG_GENERIC_VDSO_CLOCK_MODE was a transitional config switch which got removed after all architectures got converted to the new storage model. But the removal forgot to remove the #ifdef which guards the vdso_clock_mode sanity check, which effectively disables the sanity check. Remove it now. Fixes: f86fd32db706 ("lib/vdso: Cleanup clock mode storage leftovers") Signed-off-by: Thomas Gleixner Tested-by: Miklos Szeredi Cc: stable@vger.kernel.org Link: https://lkml.kernel.org/r/20200606221531.845475036@linutronix.de --- kernel/time/clocksource.c | 2 -- 1 file changed, 2 deletions(-) diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c index 7cb09c4cf21c8..02441ead3c3bb 100644 --- a/kernel/time/clocksource.c +++ b/kernel/time/clocksource.c @@ -928,14 +928,12 @@ int __clocksource_register_scale(struct clocksource *cs, u32 scale, u32 freq) clocksource_arch_init(cs); -#ifdef CONFIG_GENERIC_VDSO_CLOCK_MODE if (cs->vdso_clock_mode < 0 || cs->vdso_clock_mode >= VDSO_CLOCKMODE_MAX) { pr_warn("clocksource %s registered with invalid VDSO mode %d. Disabling VDSO support.\n", cs->name, cs->vdso_clock_mode); cs->vdso_clock_mode = VDSO_CLOCKMODE_NONE; } -#endif /* Initialize mult/shift and max_idle_ns */ __clocksource_update_freq_scale(cs, scale, freq); -- GitLab From 72ce778007e57e8996b4bebdec738fc5e1145fd2 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Sat, 6 Jun 2020 23:51:16 +0200 Subject: [PATCH 1296/1971] lib/vdso: Provide sanity check for cycles (again) The original x86 VDSO implementation checked for the validity of the clock source read by testing whether the returned signed cycles value is less than zero. This check was also used by the vdso read function to signal that the current selected clocksource is not VDSO capable. During the rework of the VDSO code the check was removed and replaced with a check for the clocksource mode being != NONE. This turned out to be a mistake because the check is necessary for paravirt and hyperv clock sources. The reason is that these clock sources have their own internal sequence counter to validate the clocksource at the point of reading it. This is necessary because the hypervisor can invalidate the clocksource asynchronously so a check during the VDSO data update is not sufficient. Having a separate indicator for the validity is slower than just validating the cycles value. The check for it being negative turned out to be the fastest implementation and safe as it would require an uptime of ~73 years with a 4GHz counter frequency to result in a false positive. Add an optional function to validate the cycles with a default implementation which allows the compiler to optimize it out for architectures which do not require it. Fixes: 5d51bee725cc ("clocksource: Add common vdso clock mode storage") Reported-by: Miklos Szeredi Signed-off-by: Thomas Gleixner Tested-by: Miklos Szeredi Cc: stable@vger.kernel.org Link: https://lkml.kernel.org/r/20200606221531.963970768@linutronix.de --- lib/vdso/gettimeofday.c | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/lib/vdso/gettimeofday.c b/lib/vdso/gettimeofday.c index a2909af4b9240..3bb82a6cc5aa2 100644 --- a/lib/vdso/gettimeofday.c +++ b/lib/vdso/gettimeofday.c @@ -38,6 +38,13 @@ static inline bool vdso_clocksource_ok(const struct vdso_data *vd) } #endif +#ifndef vdso_cycles_ok +static inline bool vdso_cycles_ok(u64 cycles) +{ + return true; +} +#endif + #ifdef CONFIG_TIME_NS static int do_hres_timens(const struct vdso_data *vdns, clockid_t clk, struct __kernel_timespec *ts) @@ -62,6 +69,8 @@ static int do_hres_timens(const struct vdso_data *vdns, clockid_t clk, return -1; cycles = __arch_get_hw_counter(vd->clock_mode); + if (unlikely(!vdso_cycles_ok(cycles))) + return -1; ns = vdso_ts->nsec; last = vd->cycle_last; ns += vdso_calc_delta(cycles, last, vd->mask, vd->mult); @@ -130,6 +139,8 @@ static __always_inline int do_hres(const struct vdso_data *vd, clockid_t clk, return -1; cycles = __arch_get_hw_counter(vd->clock_mode); + if (unlikely(!vdso_cycles_ok(cycles))) + return -1; ns = vdso_ts->nsec; last = vd->cycle_last; ns += vdso_calc_delta(cycles, last, vd->mask, vd->mult); -- GitLab From 7778d8417b74aded842eeb372961cfc460417fa0 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Sat, 6 Jun 2020 23:51:17 +0200 Subject: [PATCH 1297/1971] x86/vdso: Unbreak paravirt VDSO clocks The conversion of x86 VDSO to the generic clock mode storage broke the paravirt and hyperv clocksource logic. These clock sources have their own internal sequence counter to validate the clocksource at the point of reading it. This is necessary because the hypervisor can invalidate the clocksource asynchronously so a check during the VDSO data update is not sufficient. If the internal check during read invalidates the clocksource the read return U64_MAX. The original code checked this efficiently by testing whether the result (casted to signed) is negative, i.e. bit 63 is set. This was done that way because an extra indicator for the validity had more overhead. The conversion broke this check because the check was replaced by a check for a valid VDSO clock mode. The wreckage manifests itself when the paravirt clock is installed as a valid VDSO clock and during runtime invalidated by the hypervisor, e.g. after a host suspend/resume cycle. After the invalidation the read function returns U64_MAX which is used as cycles and makes the clock jump by ~2200 seconds, and become stale until the 2200 seconds have elapsed where it starts to jump again. The period of this effect depends on the shift/mult pair of the clocksource and the jumps and staleness are an artifact of undefined but reproducible behaviour of math overflow. Implement an x86 version of the new vdso_cycles_ok() inline which adds this check back and a variant of vdso_clocksource_ok() which lets the compiler optimize it out to avoid the extra conditional. That's suboptimal when the system does not have a VDSO capable clocksource, but that's not the case which is optimized for. Fixes: 5d51bee725cc ("clocksource: Add common vdso clock mode storage") Reported-by: Miklos Szeredi Signed-off-by: Thomas Gleixner Tested-by: Miklos Szeredi Cc: stable@vger.kernel.org Link: https://lkml.kernel.org/r/20200606221532.080560273@linutronix.de --- arch/x86/include/asm/vdso/gettimeofday.h | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/arch/x86/include/asm/vdso/gettimeofday.h b/arch/x86/include/asm/vdso/gettimeofday.h index 9a6dc9b4ec997..fb81fea99093c 100644 --- a/arch/x86/include/asm/vdso/gettimeofday.h +++ b/arch/x86/include/asm/vdso/gettimeofday.h @@ -271,6 +271,24 @@ static __always_inline const struct vdso_data *__arch_get_vdso_data(void) return __vdso_data; } +static inline bool arch_vdso_clocksource_ok(const struct vdso_data *vd) +{ + return true; +} +#define vdso_clocksource_ok arch_vdso_clocksource_ok + +/* + * Clocksource read value validation to handle PV and HyperV clocksources + * which can be invalidated asynchronously and indicate invalidation by + * returning U64_MAX, which can be effectively tested by checking for a + * negative value after casting it to s64. + */ +static inline bool arch_vdso_cycles_ok(u64 cycles) +{ + return (s64)cycles >= 0; +} +#define vdso_cycles_ok arch_vdso_cycles_ok + /* * x86 specific delta calculation. * -- GitLab From 199a5e8fda54ab3c8c6f6bf980c004e97ebf5ccb Mon Sep 17 00:00:00 2001 From: Hans de Goede Date: Mon, 8 Jun 2020 22:46:33 +0200 Subject: [PATCH 1298/1971] ASoC: Intel: bytcr_rt5640: Add quirk for Toshiba Encore WT10-A tablet The Toshiba Encore WT10-A tablet almost fully works with the default settings for Bay Trail CR devices. The only issue is that it uses a digital mic. connected the the DMIC1 input instead of an analog mic. Add a quirk for this model using the default settings with the input-map replaced with BYT_RT5640_DMIC1_MAP. Signed-off-by: Hans de Goede Acked-by: Pierre-Louis Bossart Link: https://lore.kernel.org/r/20200608204634.93407-1-hdegoede@redhat.com Signed-off-by: Mark Brown --- sound/soc/intel/boards/bytcr_rt5640.c | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/sound/soc/intel/boards/bytcr_rt5640.c b/sound/soc/intel/boards/bytcr_rt5640.c index 30f70bbdf89c7..1fdb70b9e4788 100644 --- a/sound/soc/intel/boards/bytcr_rt5640.c +++ b/sound/soc/intel/boards/bytcr_rt5640.c @@ -754,6 +754,18 @@ static const struct dmi_system_id byt_rt5640_quirk_table[] = { BYT_RT5640_JD_NOT_INV | BYT_RT5640_MCLK_EN), }, + { /* Toshiba Encore WT10-A */ + .matches = { + DMI_EXACT_MATCH(DMI_SYS_VENDOR, "TOSHIBA"), + DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "TOSHIBA WT10-A-103"), + }, + .driver_data = (void *)(BYT_RT5640_DMIC1_MAP | + BYT_RT5640_JD_SRC_JD1_IN4P | + BYT_RT5640_OVCD_TH_2000UA | + BYT_RT5640_OVCD_SF_0P75 | + BYT_RT5640_SSP0_AIF2 | + BYT_RT5640_MCLK_EN), + }, { /* Catch-all for generic Insyde tablets, must be last */ .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Insyde"), -- GitLab From 79d4f823a06796656289f97b922493da5690e46c Mon Sep 17 00:00:00 2001 From: Hans de Goede Date: Mon, 8 Jun 2020 22:46:34 +0200 Subject: [PATCH 1299/1971] ASoC: rt5645: Add platform-data for Asus T101HA The Asus T101HA uses the default jack-detect mode 3, but instead of using an analog microphone it is using a DMIC on dmic-data-pin 1, like the Asus T100HA. Note unlike the T100HA its jack-detect is not inverted. Add a DMI quirk with the correct settings for this model. Signed-off-by: Hans de Goede Acked-by: Pierre-Louis Bossart Link: https://lore.kernel.org/r/20200608204634.93407-2-hdegoede@redhat.com Signed-off-by: Mark Brown --- sound/soc/codecs/rt5645.c | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/sound/soc/codecs/rt5645.c b/sound/soc/codecs/rt5645.c index 6ba1849a77b08..e2e1d5b03b381 100644 --- a/sound/soc/codecs/rt5645.c +++ b/sound/soc/codecs/rt5645.c @@ -3625,6 +3625,12 @@ static const struct rt5645_platform_data asus_t100ha_platform_data = { .inv_jd1_1 = true, }; +static const struct rt5645_platform_data asus_t101ha_platform_data = { + .dmic1_data_pin = RT5645_DMIC_DATA_IN2N, + .dmic2_data_pin = RT5645_DMIC2_DISABLE, + .jd_mode = 3, +}; + static const struct rt5645_platform_data lenovo_ideapad_miix_310_pdata = { .jd_mode = 3, .in2_diff = true, @@ -3708,6 +3714,14 @@ static const struct dmi_system_id dmi_platform_data[] = { }, .driver_data = (void *)&asus_t100ha_platform_data, }, + { + .ident = "ASUS T101HA", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), + DMI_MATCH(DMI_PRODUCT_NAME, "T101HA"), + }, + .driver_data = (void *)&asus_t101ha_platform_data, + }, { .ident = "MINIX Z83-4", .matches = { -- GitLab From 9ca0652596bd924a4023db6b429a0aaaea629826 Mon Sep 17 00:00:00 2001 From: David Howells Date: Tue, 9 Jun 2020 16:15:45 +0100 Subject: [PATCH 1300/1971] afs: Fix use of BUG() Fix afs_compare_addrs() to use WARN_ON(1) instead of BUG() and return 1 (ie. srx_a > srx_b). There's no point trying to put actual error handling in as this should not occur unless a new transport address type is allowed by AFS. And even if it does, in this particular case, it'll just never match unknown types of addresses. This BUG() was more of a 'you need to add a case here' indicator. Reported-by: Kees Cook Signed-off-by: David Howells Reviewed-by: Kees Cook --- fs/afs/vl_alias.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/fs/afs/vl_alias.c b/fs/afs/vl_alias.c index 093895c49c219..136fc6164e009 100644 --- a/fs/afs/vl_alias.c +++ b/fs/afs/vl_alias.c @@ -73,7 +73,8 @@ static int afs_compare_addrs(const struct sockaddr_rxrpc *srx_a, } default: - BUG(); + WARN_ON(1); + diff = 1; } out: -- GitLab From 2062a4e8ae9f486847652927aaf88e21ab8d195d Mon Sep 17 00:00:00 2001 From: Dmitry Safonov Date: Mon, 8 Jun 2020 21:29:56 -0700 Subject: [PATCH 1301/1971] kallsyms/printk: add loglvl to print_ip_sym() Patch series "Add log level to show_stack()", v3. Add log level argument to show_stack(). Done in three stages: 1. Introducing show_stack_loglvl() for every architecture 2. Migrating old users with an explicit log level 3. Renaming show_stack_loglvl() into show_stack() Justification: - It's a design mistake to move a business-logic decision into platform realization detail. - I have currently two patches sets that would benefit from this work: Removing console_loglevel jumps in sysrq driver [1] Hung task warning before panic [2] - suggested by Tetsuo (but he probably didn't realise what it would involve). - While doing (1), (2) the backtraces were adjusted to headers and other messages for each situation - so there won't be a situation when the backtrace is printed, but the headers are missing because they have lesser log level (or the reverse). - As the result in (2) plays with console_loglevel for kdb are removed. The least important for upstream, but maybe still worth to note that every company I've worked in so far had an off-list patch to print backtrace with the needed log level (but only for the architecture they cared about). If you have other ideas how you will benefit from show_stack() with a log level - please, reply to this cover letter. See also discussion on v1: https://lore.kernel.org/linux-riscv/20191106083538.z5nlpuf64cigxigh@pathway.suse.cz/ This patch (of 50): print_ip_sym() needs to have a log level parameter to comply with other parts being printed. Otherwise, half of the expected backtrace would be printed and other may be missing with some logging level. The following callee(s) are using now the adjusted log level: - microblaze/unwind: the same level as headers & userspace unwind. Note that pr_debug()'s there are for debugging the unwinder itself. - nds32/traps: symbol addresses are printed with the same log level as backtrace headers. - lockdep: ip for locking issues is printed with the same log level as other part of the warning. - sched: ip where preemption was disabled is printed as error like the rest part of the message. - ftrace: bug reports are now consistent in the log level being used. Signed-off-by: Dmitry Safonov Signed-off-by: Andrew Morton Acked-by: Steven Rostedt (VMware) Cc: Albert Ou Cc: Ben Segall Cc: Dietmar Eggemann Cc: Greentime Hu Cc: Greg Kroah-Hartman Cc: Ingo Molnar Cc: James Hogan Cc: Juri Lelli Cc: Mel Gorman Cc: Michal Simek Cc: Palmer Dabbelt Cc: Paul Burton Cc: Paul Walmsley Cc: Peter Zijlstra Cc: Ralf Baechle Cc: Thomas Gleixner Cc: Vincent Chen Cc: Vincent Guittot Cc: Will Deacon Cc: Dmitry Safonov <0x7f454c46@gmail.com> Cc: Dmitry Safonov Cc: Jiri Slaby Cc: Petr Mladek Cc: Sergey Senozhatsky Cc: Ivan Kokshaysky Cc: Matt Turner Cc: Richard Henderson Cc: Vineet Gupta Cc: Russell King Cc: Catalin Marinas Cc: Aurelien Jacquiot Cc: Mark Salter Cc: Guo Ren Cc: Yoshinori Sato Cc: Brian Cain Cc: Fenghua Yu Cc: Tony Luck Cc: Geert Uytterhoeven Cc: Ley Foon Tan Cc: Jonas Bonn Cc: Stafford Horne Cc: Stefan Kristiansson Cc: Helge Deller Cc: "James E.J. Bottomley" Cc: Benjamin Herrenschmidt Cc: Michael Ellerman Cc: Paul Mackerras Cc: Christian Borntraeger Cc: Heiko Carstens Cc: Vasily Gorbik Cc: Rich Felker Cc: "David S. Miller" Cc: Anton Ivanov Cc: Jeff Dike Cc: Richard Weinberger Cc: Guan Xuetao Cc: Borislav Petkov Cc: "H. Peter Anvin" Cc: Chris Zankel Cc: Max Filippov Cc: Len Brown Cc: Pavel Machek Cc: "Rafael J. Wysocki" Cc: "Rafael J. Wysocki" Cc: Daniel Thompson Cc: Douglas Anderson Cc: Jason Wessel Link: http://lkml.kernel.org/r/20200418201944.482088-2-dima@arista.com Signed-off-by: Linus Torvalds --- arch/microblaze/kernel/unwind.c | 2 +- arch/mips/kernel/traps.c | 4 ++-- arch/nds32/kernel/traps.c | 4 ++-- arch/riscv/kernel/stacktrace.c | 2 +- include/linux/kallsyms.h | 4 ++-- kernel/locking/lockdep.c | 4 ++-- kernel/sched/core.c | 6 ++---- kernel/trace/ftrace.c | 8 ++++---- tools/include/linux/kallsyms.h | 2 +- 9 files changed, 17 insertions(+), 19 deletions(-) diff --git a/arch/microblaze/kernel/unwind.c b/arch/microblaze/kernel/unwind.c index 34c270cb11fcb..4241cdd28ee77 100644 --- a/arch/microblaze/kernel/unwind.c +++ b/arch/microblaze/kernel/unwind.c @@ -254,7 +254,7 @@ static void microblaze_unwind_inner(struct task_struct *task, task->comm); break; } else - print_ip_sym(pc); + print_ip_sym(KERN_INFO, pc); } /* Stop when we reach anything not part of the kernel */ diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c index 22f805a73921d..210fea63de750 100644 --- a/arch/mips/kernel/traps.c +++ b/arch/mips/kernel/traps.c @@ -125,7 +125,7 @@ static void show_raw_backtrace(unsigned long reg29) break; } if (__kernel_text_address(addr)) - print_ip_sym(addr); + print_ip_sym(KERN_DEFAULT, addr); } printk("\n"); } @@ -155,7 +155,7 @@ static void show_backtrace(struct task_struct *task, const struct pt_regs *regs) } printk("Call Trace:\n"); do { - print_ip_sym(pc); + print_ip_sym(KERN_DEFAULT, pc); pc = unwind_stack(task, &sp, pc, &ra); } while (pc); pr_cont("\n"); diff --git a/arch/nds32/kernel/traps.c b/arch/nds32/kernel/traps.c index f4d386b526227..40625760a125e 100644 --- a/arch/nds32/kernel/traps.c +++ b/arch/nds32/kernel/traps.c @@ -108,7 +108,7 @@ static void __dump(struct task_struct *tsk, unsigned long *base_reg) if (__kernel_text_address(ret_addr)) { ret_addr = ftrace_graph_ret_addr( tsk, &graph, ret_addr, NULL); - print_ip_sym(ret_addr); + print_ip_sym(KERN_EMERG, ret_addr); } if (--cnt < 0) break; @@ -124,7 +124,7 @@ static void __dump(struct task_struct *tsk, unsigned long *base_reg) ret_addr = ftrace_graph_ret_addr( tsk, &graph, ret_addr, NULL); - print_ip_sym(ret_addr); + print_ip_sym(KERN_EMERG, ret_addr); } if (--cnt < 0) break; diff --git a/arch/riscv/kernel/stacktrace.c b/arch/riscv/kernel/stacktrace.c index 837b9b38f825c..9f1ac258482fd 100644 --- a/arch/riscv/kernel/stacktrace.c +++ b/arch/riscv/kernel/stacktrace.c @@ -99,7 +99,7 @@ void notrace walk_stackframe(struct task_struct *task, static bool print_trace_address(unsigned long pc, void *arg) { - print_ip_sym(pc); + print_ip_sym(KERN_DEFAULT, pc); return false; } diff --git a/include/linux/kallsyms.h b/include/linux/kallsyms.h index 657a83b943f06..98338dc6b5d27 100644 --- a/include/linux/kallsyms.h +++ b/include/linux/kallsyms.h @@ -165,9 +165,9 @@ static inline int kallsyms_show_value(void) #endif /*CONFIG_KALLSYMS*/ -static inline void print_ip_sym(unsigned long ip) +static inline void print_ip_sym(const char *loglvl, unsigned long ip) { - printk("[<%px>] %pS\n", (void *) ip, (void *) ip); + printk("%s[<%px>] %pS\n", loglvl, (void *) ip, (void *) ip); } #endif /*_LINUX_KALLSYMS_H*/ diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c index 4c057dd8e93b2..38cce34d03dc3 100644 --- a/kernel/locking/lockdep.c +++ b/kernel/locking/lockdep.c @@ -4424,7 +4424,7 @@ static void print_unlock_imbalance_bug(struct task_struct *curr, curr->comm, task_pid_nr(curr)); print_lockdep_cache(lock); pr_cont(") at:\n"); - print_ip_sym(ip); + print_ip_sym(KERN_WARNING, ip); pr_warn("but there are no more locks to release!\n"); pr_warn("\nother info that might help us debug this:\n"); lockdep_print_held_locks(curr); @@ -5075,7 +5075,7 @@ static void print_lock_contention_bug(struct task_struct *curr, curr->comm, task_pid_nr(curr)); print_lockdep_cache(lock); pr_cont(") at:\n"); - print_ip_sym(ip); + print_ip_sym(KERN_WARNING, ip); pr_warn("but there are no locks held!\n"); pr_warn("\nother info that might help us debug this:\n"); lockdep_print_held_locks(curr); diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 8298b2c240ce8..c06da3c3e317d 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -3922,8 +3922,7 @@ static noinline void __schedule_bug(struct task_struct *prev) if (IS_ENABLED(CONFIG_DEBUG_PREEMPT) && in_atomic_preempt_off()) { pr_err("Preemption disabled at:"); - print_ip_sym(preempt_disable_ip); - pr_cont("\n"); + print_ip_sym(KERN_ERR, preempt_disable_ip); } if (panic_on_warn) panic("scheduling while atomic\n"); @@ -6871,8 +6870,7 @@ void ___might_sleep(const char *file, int line, int preempt_offset) if (IS_ENABLED(CONFIG_DEBUG_PREEMPT) && !preempt_count_equals(preempt_offset)) { pr_err("Preemption disabled at:"); - print_ip_sym(preempt_disable_ip); - pr_cont("\n"); + print_ip_sym(KERN_ERR, preempt_disable_ip); } dump_stack(); add_taint(TAINT_WARN, LOCKDEP_STILL_OK); diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index b5765aeea698a..7d0ebd1047069 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c @@ -2020,12 +2020,12 @@ void ftrace_bug(int failed, struct dyn_ftrace *rec) case -EFAULT: FTRACE_WARN_ON_ONCE(1); pr_info("ftrace faulted on modifying "); - print_ip_sym(ip); + print_ip_sym(KERN_INFO, ip); break; case -EINVAL: FTRACE_WARN_ON_ONCE(1); pr_info("ftrace failed to modify "); - print_ip_sym(ip); + print_ip_sym(KERN_INFO, ip); print_ip_ins(" actual: ", (unsigned char *)ip); pr_cont("\n"); if (ftrace_expected) { @@ -2036,12 +2036,12 @@ void ftrace_bug(int failed, struct dyn_ftrace *rec) case -EPERM: FTRACE_WARN_ON_ONCE(1); pr_info("ftrace faulted on writing "); - print_ip_sym(ip); + print_ip_sym(KERN_INFO, ip); break; default: FTRACE_WARN_ON_ONCE(1); pr_info("ftrace faulted on unknown error "); - print_ip_sym(ip); + print_ip_sym(KERN_INFO, ip); } print_bug_type(); if (rec) { diff --git a/tools/include/linux/kallsyms.h b/tools/include/linux/kallsyms.h index 89ca6fe257ccb..efb6c3f5f2a9a 100644 --- a/tools/include/linux/kallsyms.h +++ b/tools/include/linux/kallsyms.h @@ -20,7 +20,7 @@ static inline const char *kallsyms_lookup(unsigned long addr, #include #include -static inline void print_ip_sym(unsigned long ip) +static inline void print_ip_sym(const char *loglvl, unsigned long ip) { char **name; -- GitLab From 8c49a909872ca757a62ccbfca08784c224d20fdc Mon Sep 17 00:00:00 2001 From: Dmitry Safonov Date: Mon, 8 Jun 2020 21:30:01 -0700 Subject: [PATCH 1302/1971] alpha: add show_stack_loglvl() Currently, the log-level of show_stack() depends on a platform realization. It creates situations where the headers are printed with lower log level or higher than the stacktrace (depending on a platform or user). Furthermore, it forces the logic decision from user to an architecture side. In result, some users as sysrq/kdb/etc are doing tricks with temporary rising console_loglevel while printing their messages. And in result it not only may print unwanted messages from other CPUs, but also omit printing at all in the unlucky case where the printk() was deferred. Introducing log-level parameter and KERN_UNSUPPRESSED [1] seems an easier approach than introducing more printk buffers. Also, it will consolidate printings with headers. Introduce show_stack_loglvl(), that eventually will substitute show_stack(). [1]: https://lore.kernel.org/lkml/20190528002412.1625-1-dima@arista.com/T/#u Signed-off-by: Dmitry Safonov Signed-off-by: Andrew Morton Cc: Ivan Kokshaysky Cc: Matt Turner Cc: Richard Henderson Link: http://lkml.kernel.org/r/20200418201944.482088-3-dima@arista.com Signed-off-by: Linus Torvalds --- arch/alpha/kernel/traps.c | 28 +++++++++++++++++----------- 1 file changed, 17 insertions(+), 11 deletions(-) diff --git a/arch/alpha/kernel/traps.c b/arch/alpha/kernel/traps.c index f6b9664ac5042..2402f1777f54e 100644 --- a/arch/alpha/kernel/traps.c +++ b/arch/alpha/kernel/traps.c @@ -121,10 +121,10 @@ dik_show_code(unsigned int *pc) } static void -dik_show_trace(unsigned long *sp) +dik_show_trace(unsigned long *sp, const char *loglvl) { long i = 0; - printk("Trace:\n"); + printk("%sTrace:\n", loglvl); while (0x1ff8 & (unsigned long) sp) { extern char _stext[], _etext[]; unsigned long tmp = *sp; @@ -133,24 +133,25 @@ dik_show_trace(unsigned long *sp) continue; if (tmp >= (unsigned long) &_etext) continue; - printk("[<%lx>] %pSR\n", tmp, (void *)tmp); + printk("%s[<%lx>] %pSR\n", loglvl, tmp, (void *)tmp); if (i > 40) { - printk(" ..."); + printk("%s ...", loglvl); break; } } - printk("\n"); + printk("%s\n", loglvl); } static int kstack_depth_to_print = 24; -void show_stack(struct task_struct *task, unsigned long *sp) +void show_stack_loglvl(struct task_struct *task, unsigned long *sp, + const char *loglvl) { unsigned long *stack; int i; /* - * debugging aid: "show_stack(NULL);" prints the + * debugging aid: "show_stack(NULL, NULL, KERN_EMERG);" prints the * back trace for this cpu. */ if(sp==NULL) @@ -163,14 +164,19 @@ void show_stack(struct task_struct *task, unsigned long *sp) if ((i % 4) == 0) { if (i) pr_cont("\n"); - printk(" "); + printk("%s ", loglvl); } else { pr_cont(" "); } pr_cont("%016lx", *stack++); } pr_cont("\n"); - dik_show_trace(sp); + dik_show_trace(sp, loglvl); +} + +void show_stack(struct task_struct *task, unsigned long *sp) +{ + show_stack_loglvl(task, sp, KERN_DEFAULT); } void @@ -184,7 +190,7 @@ die_if_kernel(char * str, struct pt_regs *regs, long err, unsigned long *r9_15) printk("%s(%d): %s %ld\n", current->comm, task_pid_nr(current), str, err); dik_show_regs(regs, r9_15); add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE); - dik_show_trace((unsigned long *)(regs+1)); + dik_show_trace((unsigned long *)(regs+1), KERN_DEFAULT); dik_show_code((unsigned int *)regs->pc); if (test_and_set_thread_flag (TIF_DIE_IF_KERNEL)) { @@ -625,7 +631,7 @@ got_exception: printk("gp = %016lx sp = %p\n", regs->gp, regs+1); dik_show_code((unsigned int *)pc); - dik_show_trace((unsigned long *)(regs+1)); + dik_show_trace((unsigned long *)(regs+1), KERN_DEFAULT); if (test_and_set_thread_flag (TIF_DIE_IF_KERNEL)) { printk("die_if_kernel recursion detected.\n"); -- GitLab From 8ca4d19932a5649930db29202b285d8e4e96a98a Mon Sep 17 00:00:00 2001 From: Dmitry Safonov Date: Mon, 8 Jun 2020 21:30:04 -0700 Subject: [PATCH 1303/1971] arc: add show_stack_loglvl() Currently, the log-level of show_stack() depends on a platform realization. It creates situations where the headers are printed with lower log level or higher than the stacktrace (depending on a platform or user). Furthermore, it forces the logic decision from user to an architecture side. In result, some users as sysrq/kdb/etc are doing tricks with temporary rising console_loglevel while printing their messages. And in result it not only may print unwanted messages from other CPUs, but also omit printing at all in the unlucky case where the printk() was deferred. Introducing log-level parameter and KERN_UNSUPPRESSED [1] seems an easier approach than introducing more printk buffers. Also, it will consolidate printings with headers. Introduce show_stack_loglvl(), that eventually will substitute show_stack(). As a good side-effect header "Stack Trace:" is now printed with the same log level as the rest of backtrace. [1]: https://lore.kernel.org/lkml/20190528002412.1625-1-dima@arista.com/T/#u Signed-off-by: Dmitry Safonov Signed-off-by: Andrew Morton Cc: Vineet Gupta Link: http://lkml.kernel.org/r/20200418201944.482088-4-dima@arista.com Signed-off-by: Linus Torvalds --- arch/arc/include/asm/bug.h | 3 ++- arch/arc/kernel/stacktrace.c | 21 +++++++++++++++------ arch/arc/kernel/troubleshoot.c | 2 +- 3 files changed, 18 insertions(+), 8 deletions(-) diff --git a/arch/arc/include/asm/bug.h b/arch/arc/include/asm/bug.h index 0be19fd1a4126..4c453ba96c519 100644 --- a/arch/arc/include/asm/bug.h +++ b/arch/arc/include/asm/bug.h @@ -13,7 +13,8 @@ struct task_struct; void show_regs(struct pt_regs *regs); -void show_stacktrace(struct task_struct *tsk, struct pt_regs *regs); +void show_stacktrace(struct task_struct *tsk, struct pt_regs *regs, + const char *loglvl); void show_kernel_fault_diag(const char *str, struct pt_regs *regs, unsigned long address); void die(const char *str, struct pt_regs *regs, unsigned long address); diff --git a/arch/arc/kernel/stacktrace.c b/arch/arc/kernel/stacktrace.c index 1e440bbfa876b..24f9cd8a12c94 100644 --- a/arch/arc/kernel/stacktrace.c +++ b/arch/arc/kernel/stacktrace.c @@ -158,9 +158,11 @@ arc_unwind_core(struct task_struct *tsk, struct pt_regs *regs, /* Call-back which plugs into unwinding core to dump the stack in * case of panic/OOPs/BUG etc */ -static int __print_sym(unsigned int address, void *unused) +static int __print_sym(unsigned int address, void *arg) { - printk(" %pS\n", (void *)address); + const char *loglvl = arg; + + printk("%s %pS\n", loglvl, (void *)address); return 0; } @@ -217,17 +219,24 @@ static int __get_first_nonsched(unsigned int address, void *unused) *------------------------------------------------------------------------- */ -noinline void show_stacktrace(struct task_struct *tsk, struct pt_regs *regs) +noinline void show_stacktrace(struct task_struct *tsk, struct pt_regs *regs, + const char *loglvl) { - pr_info("\nStack Trace:\n"); - arc_unwind_core(tsk, regs, __print_sym, NULL); + printk("%s\nStack Trace:\n", loglvl); + arc_unwind_core(tsk, regs, __print_sym, (void *)loglvl); } EXPORT_SYMBOL(show_stacktrace); /* Expected by sched Code */ +void show_stack_loglvl(struct task_struct *tsk, unsigned long *sp, + const char *loglvl) +{ + show_stacktrace(tsk, NULL, loglvl); +} + void show_stack(struct task_struct *tsk, unsigned long *sp) { - show_stacktrace(tsk, NULL); + show_stack_loglvl(tsk, sp, KERN_DEFAULT); } /* Another API expected by schedular, shows up in "ps" as Wait Channel diff --git a/arch/arc/kernel/troubleshoot.c b/arch/arc/kernel/troubleshoot.c index 3393558876a9b..3044c8347b746 100644 --- a/arch/arc/kernel/troubleshoot.c +++ b/arch/arc/kernel/troubleshoot.c @@ -240,5 +240,5 @@ void show_kernel_fault_diag(const char *str, struct pt_regs *regs, /* Show stack trace if this Fatality happened in kernel mode */ if (!user_mode(regs)) - show_stacktrace(current, regs); + show_stacktrace(current, regs, KERN_DEFAULT); } -- GitLab From 5489ab50c22771ddcad014968141b0d104d650a3 Mon Sep 17 00:00:00 2001 From: Dmitry Safonov Date: Mon, 8 Jun 2020 21:30:07 -0700 Subject: [PATCH 1304/1971] arm/asm: add loglvl to c_backtrace() Currently, the log-level of show_stack() depends on a platform realization. It creates situations where the headers are printed with lower log level or higher than the stacktrace (depending on a platform or user). Furthermore, it forces the logic decision from user to an architecture side. In result, some users as sysrq/kdb/etc are doing tricks with temporary rising console_loglevel while printing their messages. And in result it not only may print unwanted messages from other CPUs, but also omit printing at all in the unlucky case where the printk() was deferred. Introducing log-level parameter and KERN_UNSUPPRESSED [1] seems an easier approach than introducing more printk buffers. Also, it will consolidate printings with headers. Add log level argument to c_backtrace() as a preparation for introducing show_stack_loglvl(). [1]: https://lore.kernel.org/lkml/20190528002412.1625-1-dima@arista.com/T/#u Signed-off-by: Dmitry Safonov Signed-off-by: Andrew Morton Cc: Russell King Cc: Will Deacon Link: http://lkml.kernel.org/r/20200418201944.482088-5-dima@arista.com Signed-off-by: Linus Torvalds --- arch/arm/include/asm/bug.h | 3 ++- arch/arm/include/asm/traps.h | 3 ++- arch/arm/kernel/traps.c | 9 +++++---- arch/arm/kernel/unwind.c | 2 +- arch/arm/lib/backtrace-clang.S | 9 +++++++-- arch/arm/lib/backtrace.S | 14 ++++++++++---- 6 files changed, 27 insertions(+), 13 deletions(-) diff --git a/arch/arm/include/asm/bug.h b/arch/arm/include/asm/bug.h index deef4d0cb3b50..673c7dd75ab90 100644 --- a/arch/arm/include/asm/bug.h +++ b/arch/arm/include/asm/bug.h @@ -82,7 +82,8 @@ void hook_ifault_code(int nr, int (*fn)(unsigned long, unsigned int, struct pt_regs *), int sig, int code, const char *name); -extern asmlinkage void c_backtrace(unsigned long fp, int pmode); +extern asmlinkage void c_backtrace(unsigned long fp, int pmode, + const char *loglvl); struct mm_struct; void show_pte(const char *lvl, struct mm_struct *mm, unsigned long addr); diff --git a/arch/arm/include/asm/traps.h b/arch/arm/include/asm/traps.h index 172b08ff3760d..987fefb0a4dbc 100644 --- a/arch/arm/include/asm/traps.h +++ b/arch/arm/include/asm/traps.h @@ -29,7 +29,8 @@ static inline int __in_irqentry_text(unsigned long ptr) } extern void __init early_trap_init(void *); -extern void dump_backtrace_entry(unsigned long where, unsigned long from, unsigned long frame); +extern void dump_backtrace_entry(unsigned long where, unsigned long from, + unsigned long frame, const char *loglvl); extern void ptrace_break(struct pt_regs *regs); extern void *vectors_page; diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c index 316a7687f8133..7bba15fdce9f6 100644 --- a/arch/arm/kernel/traps.c +++ b/arch/arm/kernel/traps.c @@ -62,7 +62,8 @@ __setup("user_debug=", user_debug_setup); static void dump_mem(const char *, const char *, unsigned long, unsigned long); -void dump_backtrace_entry(unsigned long where, unsigned long from, unsigned long frame) +void dump_backtrace_entry(unsigned long where, unsigned long from, + unsigned long frame, const char *loglvl) { unsigned long end = frame + 4 + sizeof(struct pt_regs); @@ -76,7 +77,7 @@ void dump_backtrace_entry(unsigned long where, unsigned long from, unsigned long dump_mem("", "Exception stack", frame + 4, end); } -void dump_backtrace_stm(u32 *stack, u32 instruction) +void dump_backtrace_stm(u32 *stack, u32 instruction, const char *loglvl) { char str[80], *p; unsigned int x; @@ -238,7 +239,7 @@ static void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk) pr_cont("\n"); if (ok) - c_backtrace(fp, mode); + c_backtrace(fp, mode, NULL); } #endif @@ -666,7 +667,7 @@ asmlinkage int arm_syscall(int no, struct pt_regs *regs) dump_instr("", regs); if (user_mode(regs)) { __show_regs(regs); - c_backtrace(frame_pointer(regs), processor_mode(regs)); + c_backtrace(frame_pointer(regs), processor_mode(regs), NULL); } } #endif diff --git a/arch/arm/kernel/unwind.c b/arch/arm/kernel/unwind.c index 11a964fd66f47..343cc27b36c4c 100644 --- a/arch/arm/kernel/unwind.c +++ b/arch/arm/kernel/unwind.c @@ -493,7 +493,7 @@ void unwind_backtrace(struct pt_regs *regs, struct task_struct *tsk) urc = unwind_frame(&frame); if (urc < 0) break; - dump_backtrace_entry(where, frame.pc, frame.sp - 4); + dump_backtrace_entry(where, frame.pc, frame.sp - 4, NULL); } } diff --git a/arch/arm/lib/backtrace-clang.S b/arch/arm/lib/backtrace-clang.S index 2ff375144b55b..6174c45f53a5d 100644 --- a/arch/arm/lib/backtrace-clang.S +++ b/arch/arm/lib/backtrace-clang.S @@ -17,6 +17,7 @@ #define sv_pc r6 #define mask r7 #define sv_lr r8 +#define loglvl r9 ENTRY(c_backtrace) @@ -99,6 +100,7 @@ ENDPROC(c_backtrace) @ to ensure 8 byte alignment movs frame, r0 @ if frame pointer is zero beq no_frame @ we have no stack frames + mov loglvl, r2 tst r1, #0x10 @ 26 or 32-bit mode? moveq mask, #0xfc000003 movne mask, #0 @ mask for 32-bit @@ -167,6 +169,7 @@ finished_setup: mov r1, sv_lr mov r2, frame bic r1, r1, mask @ mask PC/LR for the mode + mov r3, loglvl bl dump_backtrace_entry /* @@ -183,6 +186,7 @@ finished_setup: ldr r0, [frame] @ locals are stored in @ the preceding frame subeq r0, r0, #4 + mov r2, loglvl bleq dump_backtrace_stm @ dump saved registers /* @@ -196,7 +200,8 @@ finished_setup: bhi for_each_frame 1006: adr r0, .Lbad - mov r1, frame + mov r1, loglvl + mov r2, frame bl printk no_frame: ldmfd sp!, {r4 - r9, fp, pc} ENDPROC(c_backtrace) @@ -209,7 +214,7 @@ ENDPROC(c_backtrace) .long 1005b, 1006b .popsection -.Lbad: .asciz "Backtrace aborted due to bad frame pointer <%p>\n" +.Lbad: .asciz "%sBacktrace aborted due to bad frame pointer <%p>\n" .align .Lopcode: .word 0xe92d4800 >> 11 @ stmfd sp!, {... fp, lr} .word 0x0b000000 @ bl if these bits are set diff --git a/arch/arm/lib/backtrace.S b/arch/arm/lib/backtrace.S index 582925238d65e..872f658638d99 100644 --- a/arch/arm/lib/backtrace.S +++ b/arch/arm/lib/backtrace.S @@ -18,6 +18,7 @@ #define sv_pc r6 #define mask r7 #define offset r8 +#define loglvl r9 ENTRY(c_backtrace) @@ -25,9 +26,10 @@ ENTRY(c_backtrace) ret lr ENDPROC(c_backtrace) #else - stmfd sp!, {r4 - r8, lr} @ Save an extra register so we have a location... + stmfd sp!, {r4 - r9, lr} @ Save an extra register so we have a location... movs frame, r0 @ if frame pointer is zero beq no_frame @ we have no stack frames + mov loglvl, r2 tst r1, #0x10 @ 26 or 32-bit mode? ARM( moveq mask, #0xfc000003 ) @@ -73,6 +75,7 @@ for_each_frame: tst frame, mask @ Check for address exceptions ldr r1, [frame, #-4] @ get saved lr mov r2, frame bic r1, r1, mask @ mask PC/LR for the mode + mov r3, loglvl bl dump_backtrace_entry ldr r1, [sv_pc, #-4] @ if stmfd sp!, {args} exists, @@ -80,12 +83,14 @@ for_each_frame: tst frame, mask @ Check for address exceptions teq r3, r1, lsr #11 ldreq r0, [frame, #-8] @ get sp subeq r0, r0, #4 @ point at the last arg + mov r2, loglvl bleq dump_backtrace_stm @ dump saved registers 1004: ldr r1, [sv_pc, #0] @ if stmfd sp!, {..., fp, ip, lr, pc} ldr r3, .Ldsi @ instruction exists, teq r3, r1, lsr #11 subeq r0, frame, #16 + mov r2, loglvl bleq dump_backtrace_stm @ dump saved registers teq sv_fp, #0 @ zero saved fp means @@ -96,9 +101,10 @@ for_each_frame: tst frame, mask @ Check for address exceptions bhi for_each_frame 1006: adr r0, .Lbad - mov r1, frame + mov r1, loglvl + mov r2, frame bl printk -no_frame: ldmfd sp!, {r4 - r8, pc} +no_frame: ldmfd sp!, {r4 - r9, pc} ENDPROC(c_backtrace) .pushsection __ex_table,"a" @@ -109,7 +115,7 @@ ENDPROC(c_backtrace) .long 1004b, 1006b .popsection -.Lbad: .asciz "Backtrace aborted due to bad frame pointer <%p>\n" +.Lbad: .asciz "%sBacktrace aborted due to bad frame pointer <%p>\n" .align .Ldsi: .word 0xe92dd800 >> 11 @ stmfd sp!, {... fp, ip, lr, pc} .word 0xe92d0000 >> 11 @ stmfd sp!, {} -- GitLab From e8d7b7353216586f64f5bbcc024951e3cab10a60 Mon Sep 17 00:00:00 2001 From: Dmitry Safonov Date: Mon, 8 Jun 2020 21:30:10 -0700 Subject: [PATCH 1305/1971] arm: add loglvl to unwind_backtrace() Currently, the log-level of show_stack() depends on a platform realization. It creates situations where the headers are printed with lower log level or higher than the stacktrace (depending on a platform or user). Furthermore, it forces the logic decision from user to an architecture side. In result, some users as sysrq/kdb/etc are doing tricks with temporary rising console_loglevel while printing their messages. And in result it not only may print unwanted messages from other CPUs, but also omit printing at all in the unlucky case where the printk() was deferred. Introducing log-level parameter and KERN_UNSUPPRESSED [1] seems an easier approach than introducing more printk buffers. Also, it will consolidate printings with headers. Add log level argument to unwind_backtrace() as a preparation for introducing show_stack_loglvl(). As a good side-effect arm_syscall() is now printing errors with the same log level as the backtrace. [1]: https://lore.kernel.org/lkml/20190528002412.1625-1-dima@arista.com/T/#u Signed-off-by: Dmitry Safonov Signed-off-by: Andrew Morton Cc: Russell King Cc: Will Deacon Link: http://lkml.kernel.org/r/20200418201944.482088-6-dima@arista.com Signed-off-by: Linus Torvalds --- arch/arm/include/asm/unwind.h | 3 ++- arch/arm/kernel/traps.c | 6 +++--- arch/arm/kernel/unwind.c | 5 +++-- 3 files changed, 8 insertions(+), 6 deletions(-) diff --git a/arch/arm/include/asm/unwind.h b/arch/arm/include/asm/unwind.h index 6e282c33126b6..0f8a3439902d0 100644 --- a/arch/arm/include/asm/unwind.h +++ b/arch/arm/include/asm/unwind.h @@ -36,7 +36,8 @@ extern struct unwind_table *unwind_table_add(unsigned long start, unsigned long text_addr, unsigned long text_size); extern void unwind_table_del(struct unwind_table *tab); -extern void unwind_backtrace(struct pt_regs *regs, struct task_struct *tsk); +extern void unwind_backtrace(struct pt_regs *regs, struct task_struct *tsk, + const char *loglvl); #endif /* !__ASSEMBLY__ */ diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c index 7bba15fdce9f6..aef52a401ad50 100644 --- a/arch/arm/kernel/traps.c +++ b/arch/arm/kernel/traps.c @@ -204,7 +204,7 @@ static void dump_instr(const char *lvl, struct pt_regs *regs) #ifdef CONFIG_ARM_UNWIND static inline void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk) { - unwind_backtrace(regs, tsk); + unwind_backtrace(regs, tsk, KERN_DEFAULT); } #else static void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk) @@ -664,10 +664,10 @@ asmlinkage int arm_syscall(int no, struct pt_regs *regs) if (user_debug & UDBG_SYSCALL) { pr_err("[%d] %s: arm syscall %d\n", task_pid_nr(current), current->comm, no); - dump_instr("", regs); + dump_instr(KERN_ERR, regs); if (user_mode(regs)) { __show_regs(regs); - c_backtrace(frame_pointer(regs), processor_mode(regs), NULL); + c_backtrace(frame_pointer(regs), processor_mode(regs), KERN_ERR); } } #endif diff --git a/arch/arm/kernel/unwind.c b/arch/arm/kernel/unwind.c index 343cc27b36c4c..d2bd0df2318d6 100644 --- a/arch/arm/kernel/unwind.c +++ b/arch/arm/kernel/unwind.c @@ -455,7 +455,8 @@ int unwind_frame(struct stackframe *frame) return URC_OK; } -void unwind_backtrace(struct pt_regs *regs, struct task_struct *tsk) +void unwind_backtrace(struct pt_regs *regs, struct task_struct *tsk, + const char *loglvl) { struct stackframe frame; @@ -493,7 +494,7 @@ void unwind_backtrace(struct pt_regs *regs, struct task_struct *tsk) urc = unwind_frame(&frame); if (urc < 0) break; - dump_backtrace_entry(where, frame.pc, frame.sp - 4, NULL); + dump_backtrace_entry(where, frame.pc, frame.sp - 4, loglvl); } } -- GitLab From ee65ca01c62cd89cea6192017806b55c34f1cfda Mon Sep 17 00:00:00 2001 From: Dmitry Safonov Date: Mon, 8 Jun 2020 21:30:13 -0700 Subject: [PATCH 1306/1971] arm: add loglvl to dump_backtrace() Currently, the log-level of show_stack() depends on a platform realization. It creates situations where the headers are printed with lower log level or higher than the stacktrace (depending on a platform or user). Furthermore, it forces the logic decision from user to an architecture side. In result, some users as sysrq/kdb/etc are doing tricks with temporary rising console_loglevel while printing their messages. And in result it not only may print unwanted messages from other CPUs, but also omit printing at all in the unlucky case where the printk() was deferred. Introducing log-level parameter and KERN_UNSUPPRESSED [1] seems an easier approach than introducing more printk buffers. Also, it will consolidate printings with headers. Add log level argument to dump_backtrace() as a preparation for introducing show_stack_loglvl(). As a good side-effect __die() now prints not only "Stack:" header with KERN_EMERG, but the backtrace itself. [1]: https://lore.kernel.org/lkml/20190528002412.1625-1-dima@arista.com/T/#u Signed-off-by: Dmitry Safonov Signed-off-by: Andrew Morton Cc: Russell King Cc: Will Deacon Link: http://lkml.kernel.org/r/20200418201944.482088-7-dima@arista.com Signed-off-by: Linus Torvalds --- arch/arm/kernel/traps.c | 16 +++++++++------- 1 file changed, 9 insertions(+), 7 deletions(-) diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c index aef52a401ad50..014fb5ad3a8b5 100644 --- a/arch/arm/kernel/traps.c +++ b/arch/arm/kernel/traps.c @@ -202,17 +202,19 @@ static void dump_instr(const char *lvl, struct pt_regs *regs) } #ifdef CONFIG_ARM_UNWIND -static inline void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk) +static inline void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk, + const char *loglvl) { - unwind_backtrace(regs, tsk, KERN_DEFAULT); + unwind_backtrace(regs, tsk, loglvl); } #else -static void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk) +static void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk, + const char *loglvl) { unsigned int fp, mode; int ok = 1; - printk("Backtrace: "); + printk("%sBacktrace: ", loglvl); if (!tsk) tsk = current; @@ -239,13 +241,13 @@ static void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk) pr_cont("\n"); if (ok) - c_backtrace(fp, mode, NULL); + c_backtrace(fp, mode, loglvl); } #endif void show_stack(struct task_struct *tsk, unsigned long *sp) { - dump_backtrace(NULL, tsk); + dump_backtrace(NULL, tsk, KERN_DEFAULT); barrier(); } @@ -289,7 +291,7 @@ static int __die(const char *str, int err, struct pt_regs *regs) if (!user_mode(regs) || in_interrupt()) { dump_mem(KERN_EMERG, "Stack: ", regs->ARM_sp, THREAD_SIZE + (unsigned long)task_stack_page(tsk)); - dump_backtrace(regs, tsk); + dump_backtrace(regs, tsk, KERN_EMERG); dump_instr(KERN_EMERG, regs); } -- GitLab From 34135eacae6497d9c7bfca450f6c33e5245c92d2 Mon Sep 17 00:00:00 2001 From: Dmitry Safonov Date: Mon, 8 Jun 2020 21:30:16 -0700 Subject: [PATCH 1307/1971] arm: wire up dump_backtrace_{entry,stm} Currently, the log-level of show_stack() depends on a platform realization. It creates situations where the headers are printed with lower log level or higher than the stacktrace (depending on a platform or user). Furthermore, it forces the logic decision from user to an architecture side. In result, some users as sysrq/kdb/etc are doing tricks with temporary rising console_loglevel while printing their messages. And in result it not only may print unwanted messages from other CPUs, but also omit printing at all in the unlucky case where the printk() was deferred. Introducing log-level parameter and KERN_UNSUPPRESSED [1] seems an easier approach than introducing more printk buffers. Also, it will consolidate printings with headers. Now that c_backtrace() always emits correct loglvl, use it for printing. [1]: https://lore.kernel.org/lkml/20190528002412.1625-1-dima@arista.com/T/#u Signed-off-by: Dmitry Safonov Signed-off-by: Andrew Morton Cc: Russell King Cc: Will Deacon Link: http://lkml.kernel.org/r/20200418201944.482088-8-dima@arista.com Signed-off-by: Linus Torvalds --- arch/arm/kernel/traps.c | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c index 014fb5ad3a8b5..0aa55dc025a13 100644 --- a/arch/arm/kernel/traps.c +++ b/arch/arm/kernel/traps.c @@ -68,13 +68,15 @@ void dump_backtrace_entry(unsigned long where, unsigned long from, unsigned long end = frame + 4 + sizeof(struct pt_regs); #ifdef CONFIG_KALLSYMS - printk("[<%08lx>] (%ps) from [<%08lx>] (%pS)\n", where, (void *)where, from, (void *)from); + printk("%s[<%08lx>] (%ps) from [<%08lx>] (%pS)\n", + loglvl, where, (void *)where, from, (void *)from); #else - printk("Function entered at [<%08lx>] from [<%08lx>]\n", where, from); + printk("%sFunction entered at [<%08lx>] from [<%08lx>]\n", + loglvl, where, from); #endif if (in_entry_text(from) && end <= ALIGN(frame, THREAD_SIZE)) - dump_mem("", "Exception stack", frame + 4, end); + dump_mem(loglvl, "Exception stack", frame + 4, end); } void dump_backtrace_stm(u32 *stack, u32 instruction, const char *loglvl) @@ -89,12 +91,12 @@ void dump_backtrace_stm(u32 *stack, u32 instruction, const char *loglvl) if (++x == 6) { x = 0; p = str; - printk("%s\n", str); + printk("%s%s\n", loglvl, str); } } } if (p != str) - printk("%s\n", str); + printk("%s%s\n", loglvl, str); } #ifndef CONFIG_ARM_UNWIND -- GitLab From a4502d04c7dd075846bb70ebfde3843f9df86f98 Mon Sep 17 00:00:00 2001 From: Dmitry Safonov Date: Mon, 8 Jun 2020 21:30:19 -0700 Subject: [PATCH 1308/1971] arm: add show_stack_loglvl() Currently, the log-level of show_stack() depends on a platform realization. It creates situations where the headers are printed with lower log level or higher than the stacktrace (depending on a platform or user). Furthermore, it forces the logic decision from user to an architecture side. In result, some users as sysrq/kdb/etc are doing tricks with temporary rising console_loglevel while printing their messages. And in result it not only may print unwanted messages from other CPUs, but also omit printing at all in the unlucky case where the printk() was deferred. Introducing log-level parameter and KERN_UNSUPPRESSED [1] seems an easier approach than introducing more printk buffers. Also, it will consolidate printings with headers. Introduce show_stack_loglvl(), that eventually will substitute show_stack(). [1]: https://lore.kernel.org/lkml/20190528002412.1625-1-dima@arista.com/T/#u Signed-off-by: Dmitry Safonov Signed-off-by: Andrew Morton Cc: Russell King Cc: Will Deacon Link: http://lkml.kernel.org/r/20200418201944.482088-9-dima@arista.com Signed-off-by: Linus Torvalds --- arch/arm/kernel/traps.c | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c index 0aa55dc025a13..2ae2c23799ada 100644 --- a/arch/arm/kernel/traps.c +++ b/arch/arm/kernel/traps.c @@ -247,12 +247,18 @@ static void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk, } #endif -void show_stack(struct task_struct *tsk, unsigned long *sp) +void show_stack_loglvl(struct task_struct *tsk, unsigned long *sp, + const char *loglvl) { - dump_backtrace(NULL, tsk, KERN_DEFAULT); + dump_backtrace(NULL, tsk, loglvl); barrier(); } +void show_stack(struct task_struct *tsk, unsigned long *sp) +{ + show_stack_loglvl(tsk, sp, KERN_DEFAULT); +} + #ifdef CONFIG_PREEMPT #define S_PREEMPT " PREEMPT" #elif defined(CONFIG_PREEMPT_RT) -- GitLab From c76898373f9b71586edaf150190c493ae9ed3e77 Mon Sep 17 00:00:00 2001 From: Dmitry Safonov Date: Mon, 8 Jun 2020 21:30:23 -0700 Subject: [PATCH 1309/1971] arm64: add loglvl to dump_backtrace() Currently, the log-level of show_stack() depends on a platform realization. It creates situations where the headers are printed with lower log level or higher than the stacktrace (depending on a platform or user). Furthermore, it forces the logic decision from user to an architecture side. In result, some users as sysrq/kdb/etc are doing tricks with temporary rising console_loglevel while printing their messages. And in result it not only may print unwanted messages from other CPUs, but also omit printing at all in the unlucky case where the printk() was deferred. Introducing log-level parameter and KERN_UNSUPPRESSED [1] seems an easier approach than introducing more printk buffers. Also, it will consolidate printings with headers. Add log level argument to dump_backtrace() as a preparation for introducing show_stack_loglvl(). [1]: https://lore.kernel.org/lkml/20190528002412.1625-1-dima@arista.com/T/#u Signed-off-by: Dmitry Safonov Signed-off-by: Andrew Morton Cc: Catalin Marinas Cc: Russell King Cc: Will Deacon Link: http://lkml.kernel.org/r/20200418201944.482088-10-dima@arista.com Signed-off-by: Linus Torvalds --- arch/arm64/include/asm/stacktrace.h | 3 ++- arch/arm64/kernel/process.c | 2 +- arch/arm64/kernel/traps.c | 15 ++++++++------- 3 files changed, 11 insertions(+), 9 deletions(-) diff --git a/arch/arm64/include/asm/stacktrace.h b/arch/arm64/include/asm/stacktrace.h index 5017b531a4153..fc7613023c192 100644 --- a/arch/arm64/include/asm/stacktrace.h +++ b/arch/arm64/include/asm/stacktrace.h @@ -64,7 +64,8 @@ struct stackframe { extern int unwind_frame(struct task_struct *tsk, struct stackframe *frame); extern void walk_stackframe(struct task_struct *tsk, struct stackframe *frame, int (*fn)(struct stackframe *, void *), void *data); -extern void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk); +extern void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk, + const char *loglvl); DECLARE_PER_CPU(unsigned long *, irq_stack_ptr); diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c index eade7807e819d..6089638c7d43f 100644 --- a/arch/arm64/kernel/process.c +++ b/arch/arm64/kernel/process.c @@ -306,7 +306,7 @@ void __show_regs(struct pt_regs *regs) void show_regs(struct pt_regs * regs) { __show_regs(regs); - dump_backtrace(regs, NULL); + dump_backtrace(regs, NULL, KERN_DEFAULT); } static void tls_thread_flush(void) diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c index d332590f59782..f602aca64baaf 100644 --- a/arch/arm64/kernel/traps.c +++ b/arch/arm64/kernel/traps.c @@ -53,9 +53,9 @@ static const char *handler[]= { int show_unhandled_signals = 0; -static void dump_backtrace_entry(unsigned long where) +static void dump_backtrace_entry(unsigned long where, const char *loglvl) { - printk(" %pS\n", (void *)where); + printk("%s %pS\n", loglvl, (void *)where); } static void dump_kernel_instr(const char *lvl, struct pt_regs *regs) @@ -83,7 +83,8 @@ static void dump_kernel_instr(const char *lvl, struct pt_regs *regs) printk("%sCode: %s\n", lvl, str); } -void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk) +void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk, + const char *loglvl) { struct stackframe frame; int skip = 0; @@ -115,11 +116,11 @@ void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk) thread_saved_pc(tsk)); } - printk("Call trace:\n"); + printk("%sCall trace:\n", loglvl); do { /* skip until specified stack frame */ if (!skip) { - dump_backtrace_entry(frame.pc); + dump_backtrace_entry(frame.pc, loglvl); } else if (frame.fp == regs->regs[29]) { skip = 0; /* @@ -129,7 +130,7 @@ void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk) * at which an exception has taken place, use regs->pc * instead. */ - dump_backtrace_entry(regs->pc); + dump_backtrace_entry(regs->pc, loglvl); } } while (!unwind_frame(tsk, &frame)); @@ -138,7 +139,7 @@ void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk) void show_stack(struct task_struct *tsk, unsigned long *sp) { - dump_backtrace(NULL, tsk); + dump_backtrace(NULL, tsk, KERN_DEFAULT); barrier(); } -- GitLab From c0fe096a8aba4b00c2a928c22b7f6a3ce3e9ca97 Mon Sep 17 00:00:00 2001 From: Dmitry Safonov Date: Mon, 8 Jun 2020 21:30:26 -0700 Subject: [PATCH 1310/1971] arm64: add show_stack_loglvl() Currently, the log-level of show_stack() depends on a platform realization. It creates situations where the headers are printed with lower log level or higher than the stacktrace (depending on a platform or user). Furthermore, it forces the logic decision from user to an architecture side. In result, some users as sysrq/kdb/etc are doing tricks with temporary rising console_loglevel while printing their messages. And in result it not only may print unwanted messages from other CPUs, but also omit printing at all in the unlucky case where the printk() was deferred. Introducing log-level parameter and KERN_UNSUPPRESSED [1] seems an easier approach than introducing more printk buffers. Also, it will consolidate printings with headers. Introduce show_stack_loglvl(), that eventually will substitute show_stack(). [1]: https://lore.kernel.org/lkml/20190528002412.1625-1-dima@arista.com/T/#u Signed-off-by: Dmitry Safonov Signed-off-by: Andrew Morton Cc: Catalin Marinas Cc: Russell King Cc: Will Deacon Link: http://lkml.kernel.org/r/20200418201944.482088-11-dima@arista.com Signed-off-by: Linus Torvalds --- arch/arm64/kernel/traps.c | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c index f602aca64baaf..3621868b2fcc8 100644 --- a/arch/arm64/kernel/traps.c +++ b/arch/arm64/kernel/traps.c @@ -137,12 +137,18 @@ void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk, put_task_stack(tsk); } -void show_stack(struct task_struct *tsk, unsigned long *sp) +void show_stack_loglvl(struct task_struct *tsk, unsigned long *sp, + const char *loglvl) { - dump_backtrace(NULL, tsk, KERN_DEFAULT); + dump_backtrace(NULL, tsk, loglvl); barrier(); } +void show_stack(struct task_struct *tsk, unsigned long *sp) +{ + show_stack_loglvl(tsk, sp, KERN_DEFAULT); +} + #ifdef CONFIG_PREEMPT #define S_PREEMPT " PREEMPT" #elif defined(CONFIG_PREEMPT_RT) -- GitLab From a1eea2efdcaa69275294ea2432114369c14b2b8f Mon Sep 17 00:00:00 2001 From: Dmitry Safonov Date: Mon, 8 Jun 2020 21:30:29 -0700 Subject: [PATCH 1311/1971] c6x: add show_stack_loglvl() Currently, the log-level of show_stack() depends on a platform realization. It creates situations where the headers are printed with lower log level or higher than the stacktrace (depending on a platform or user). Furthermore, it forces the logic decision from user to an architecture side. In result, some users as sysrq/kdb/etc are doing tricks with temporary rising console_loglevel while printing their messages. And in result it not only may print unwanted messages from other CPUs, but also omit printing at all in the unlucky case where the printk() was deferred. Introducing log-level parameter and KERN_UNSUPPRESSED [1] seems an easier approach than introducing more printk buffers. Also, it will consolidate printings with headers. Introduce show_stack_loglvl(), that eventually will substitute show_stack(). [1]: https://lore.kernel.org/lkml/20190528002412.1625-1-dima@arista.com/T/#u Signed-off-by: Dmitry Safonov Signed-off-by: Andrew Morton Cc: Aurelien Jacquiot Cc: Mark Salter Link: http://lkml.kernel.org/r/20200418201944.482088-12-dima@arista.com Signed-off-by: Linus Torvalds --- arch/c6x/kernel/traps.c | 21 ++++++++++++++------- 1 file changed, 14 insertions(+), 7 deletions(-) diff --git a/arch/c6x/kernel/traps.c b/arch/c6x/kernel/traps.c index ec61034fdf56c..4afbf48f1ce00 100644 --- a/arch/c6x/kernel/traps.c +++ b/arch/c6x/kernel/traps.c @@ -344,12 +344,13 @@ asmlinkage int process_exception(struct pt_regs *regs) static int kstack_depth_to_print = 48; -static void show_trace(unsigned long *stack, unsigned long *endstack) +static void show_trace(unsigned long *stack, unsigned long *endstack, + const char *loglvl) { unsigned long addr; int i; - pr_debug("Call trace:"); + printk("%sCall trace:", loglvl); i = 0; while (stack + 1 <= endstack) { addr = *stack++; @@ -364,16 +365,17 @@ static void show_trace(unsigned long *stack, unsigned long *endstack) if (__kernel_text_address(addr)) { #ifndef CONFIG_KALLSYMS if (i % 5 == 0) - pr_debug("\n "); + printk("%s\n ", loglvl); #endif - pr_debug(" [<%08lx>] %pS\n", addr, (void *)addr); + printk("%s [<%08lx>] %pS\n", loglvl, addr, (void *)addr); i++; } } - pr_debug("\n"); + printk("%s\n", loglvl); } -void show_stack(struct task_struct *task, unsigned long *stack) +void show_stack_loglvl(struct task_struct *task, unsigned long *stack, + const char *loglvl) { unsigned long *p, *endstack; int i; @@ -398,7 +400,12 @@ void show_stack(struct task_struct *task, unsigned long *stack) pr_cont(" %08lx", *p++); } pr_cont("\n"); - show_trace(stack, endstack); + show_trace(stack, endstack, loglvl); +} + +void show_stack(struct task_struct *task, unsigned long *stack) +{ + show_stack_loglvl(task, stack, KERN_DEBUG); } int is_valid_bugaddr(unsigned long addr) -- GitLab From aeeb59d692c1bafecde3dfc47bfd0958fc96dfad Mon Sep 17 00:00:00 2001 From: Dmitry Safonov Date: Mon, 8 Jun 2020 21:30:32 -0700 Subject: [PATCH 1312/1971] csky: add show_stack_loglvl() Currently, the log-level of show_stack() depends on a platform realization. It creates situations where the headers are printed with lower log level or higher than the stacktrace (depending on a platform or user). Furthermore, it forces the logic decision from user to an architecture side. In result, some users as sysrq/kdb/etc are doing tricks with temporary rising console_loglevel while printing their messages. And in result it not only may print unwanted messages from other CPUs, but also omit printing at all in the unlucky case where the printk() was deferred. Introducing log-level parameter and KERN_UNSUPPRESSED [1] seems an easier approach than introducing more printk buffers. Also, it will consolidate printings with headers. Introduce show_stack_loglvl(), that eventually will substitute show_stack(). [1]: https://lore.kernel.org/lkml/20190528002412.1625-1-dima@arista.com/T/#u Signed-off-by: Dmitry Safonov Signed-off-by: Andrew Morton Cc: Guo Ren Link: http://lkml.kernel.org/r/20200418201944.482088-13-dima@arista.com Signed-off-by: Linus Torvalds --- arch/csky/kernel/stacktrace.c | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/arch/csky/kernel/stacktrace.c b/arch/csky/kernel/stacktrace.c index 92809e1da723c..ca135f13cc138 100644 --- a/arch/csky/kernel/stacktrace.c +++ b/arch/csky/kernel/stacktrace.c @@ -91,14 +91,21 @@ static void notrace walk_stackframe(struct task_struct *task, static bool print_trace_address(unsigned long pc, void *arg) { - print_ip_sym(pc); + print_ip_sym((const char *)arg, pc); return false; } +void show_stack_loglvl(struct task_struct *task, unsigned long *sp, + const char *loglvl) +{ + pr_cont("Call Trace:\n"); + walk_stackframe(task, NULL, print_trace_address, (void *)loglvl); +} + void show_stack(struct task_struct *task, unsigned long *sp) { pr_cont("Call Trace:\n"); - walk_stackframe(task, NULL, print_trace_address, NULL); + walk_stackframe(task, NULL, print_trace_address, KERN_INFO); } static bool save_wchan(unsigned long pc, void *arg) -- GitLab From 0b2ad0c7ae0fb37dee4e8ae36b887c1d4db1f898 Mon Sep 17 00:00:00 2001 From: Dmitry Safonov Date: Mon, 8 Jun 2020 21:30:35 -0700 Subject: [PATCH 1313/1971] h8300: add show_stack_loglvl() Currently, the log-level of show_stack() depends on a platform realization. It creates situations where the headers are printed with lower log level or higher than the stacktrace (depending on a platform or user). Furthermore, it forces the logic decision from user to an architecture side. In result, some users as sysrq/kdb/etc are doing tricks with temporary rising console_loglevel while printing their messages. And in result it not only may print unwanted messages from other CPUs, but also omit printing at all in the unlucky case where the printk() was deferred. Introducing log-level parameter and KERN_UNSUPPRESSED [1] seems an easier approach than introducing more printk buffers. Also, it will consolidate printings with headers. Introduce show_stack_loglvl(), that eventually will substitute show_stack(). [1]: https://lore.kernel.org/lkml/20190528002412.1625-1-dima@arista.com/T/#u Signed-off-by: Dmitry Safonov Signed-off-by: Andrew Morton Cc: Yoshinori Sato Link: http://lkml.kernel.org/r/20200418201944.482088-14-dima@arista.com Signed-off-by: Linus Torvalds --- arch/h8300/kernel/traps.c | 18 ++++++++++++------ 1 file changed, 12 insertions(+), 6 deletions(-) diff --git a/arch/h8300/kernel/traps.c b/arch/h8300/kernel/traps.c index e47a9e0dc278f..6362446563d6a 100644 --- a/arch/h8300/kernel/traps.c +++ b/arch/h8300/kernel/traps.c @@ -115,7 +115,8 @@ void die(const char *str, struct pt_regs *fp, unsigned long err) static int kstack_depth_to_print = 24; -void show_stack(struct task_struct *task, unsigned long *esp) +void show_stack_loglvl(struct task_struct *task, unsigned long *esp, + const char *loglvl) { unsigned long *stack, addr; int i; @@ -125,17 +126,17 @@ void show_stack(struct task_struct *task, unsigned long *esp) stack = esp; - pr_info("Stack from %08lx:", (unsigned long)stack); + printk("%sStack from %08lx:", loglvl, (unsigned long)stack); for (i = 0; i < kstack_depth_to_print; i++) { if (((unsigned long)stack & (THREAD_SIZE - 1)) >= THREAD_SIZE-4) break; if (i % 8 == 0) - pr_info(" "); + printk("%s ", loglvl); pr_cont(" %08lx", *stack++); } - pr_info("\nCall Trace:\n"); + printk("%s\nCall Trace:\n", loglvl); i = 0; stack = esp; while (((unsigned long)stack & (THREAD_SIZE - 1)) < THREAD_SIZE-4) { @@ -150,10 +151,15 @@ void show_stack(struct task_struct *task, unsigned long *esp) */ if (check_kernel_text(addr)) { if (i % 4 == 0) - pr_info(" "); + printk("%s ", loglvl); pr_cont(" [<%08lx>]", addr); i++; } } - pr_info("\n"); + printk("%s\n", loglvl); +} + +void show_stack(struct task_struct *task, unsigned long *esp) +{ + show_stack_loglvl(task, esp, KERN_INFO); } -- GitLab From d1e9086dd99b2501a692e9dbccdb211ac6e09d14 Mon Sep 17 00:00:00 2001 From: Dmitry Safonov Date: Mon, 8 Jun 2020 21:30:38 -0700 Subject: [PATCH 1314/1971] hexagon: add show_stack_loglvl() Currently, the log-level of show_stack() depends on a platform realization. It creates situations where the headers are printed with lower log level or higher than the stacktrace (depending on a platform or user). Furthermore, it forces the logic decision from user to an architecture side. In result, some users as sysrq/kdb/etc are doing tricks with temporary rising console_loglevel while printing their messages. And in result it not only may print unwanted messages from other CPUs, but also omit printing at all in the unlucky case where the printk() was deferred. Introducing log-level parameter and KERN_UNSUPPRESSED [1] seems an easier approach than introducing more printk buffers. Also, it will consolidate printings with headers. Introduce show_stack_loglvl(), that eventually will substitute show_stack(). As a good side-effect die() now prints the stacktrace with KERN_EMERG aligned with other messages. [1]: https://lore.kernel.org/lkml/20190528002412.1625-1-dima@arista.com/T/#u Signed-off-by: Dmitry Safonov Signed-off-by: Andrew Morton Acked-by: Brian Cain Link: http://lkml.kernel.org/r/20200418201944.482088-15-dima@arista.com Signed-off-by: Linus Torvalds --- arch/hexagon/kernel/traps.c | 31 +++++++++++++++++-------------- 1 file changed, 17 insertions(+), 14 deletions(-) diff --git a/arch/hexagon/kernel/traps.c b/arch/hexagon/kernel/traps.c index 69c623b14ddd2..a8a3a210d7810 100644 --- a/arch/hexagon/kernel/traps.c +++ b/arch/hexagon/kernel/traps.c @@ -79,7 +79,7 @@ static const char *ex_name(int ex) } static void do_show_stack(struct task_struct *task, unsigned long *fp, - unsigned long ip) + unsigned long ip, const char *loglvl) { int kstack_depth_to_print = 24; unsigned long offset, size; @@ -93,9 +93,8 @@ static void do_show_stack(struct task_struct *task, unsigned long *fp, if (task == NULL) task = current; - printk(KERN_INFO "CPU#%d, %s/%d, Call Trace:\n", - raw_smp_processor_id(), task->comm, - task_pid_nr(task)); + printk("%sCPU#%d, %s/%d, Call Trace:\n", loglvl, raw_smp_processor_id(), + task->comm, task_pid_nr(task)); if (fp == NULL) { if (task == current) { @@ -108,7 +107,7 @@ static void do_show_stack(struct task_struct *task, unsigned long *fp, } if ((((unsigned long) fp) & 0x3) || ((unsigned long) fp < 0x1000)) { - printk(KERN_INFO "-- Corrupt frame pointer %p\n", fp); + printk("%s-- Corrupt frame pointer %p\n", loglvl, fp); return; } @@ -125,8 +124,7 @@ static void do_show_stack(struct task_struct *task, unsigned long *fp, name = kallsyms_lookup(ip, &size, &offset, &modname, tmpstr); - printk(KERN_INFO "[%p] 0x%lx: %s + 0x%lx", fp, ip, name, - offset); + printk("%s[%p] 0x%lx: %s + 0x%lx", loglvl, fp, ip, name, offset); if (((unsigned long) fp < low) || (high < (unsigned long) fp)) printk(KERN_CONT " (FP out of bounds!)"); if (modname) @@ -136,8 +134,7 @@ static void do_show_stack(struct task_struct *task, unsigned long *fp, newfp = (unsigned long *) *fp; if (((unsigned long) newfp) & 0x3) { - printk(KERN_INFO "-- Corrupt frame pointer %p\n", - newfp); + printk("%s-- Corrupt frame pointer %p\n", loglvl, newfp); break; } @@ -147,7 +144,7 @@ static void do_show_stack(struct task_struct *task, unsigned long *fp, + 8); if (regs->syscall_nr != -1) { - printk(KERN_INFO "-- trap0 -- syscall_nr: %ld", + printk("%s-- trap0 -- syscall_nr: %ld", loglvl, regs->syscall_nr); printk(KERN_CONT " psp: %lx elr: %lx\n", pt_psp(regs), pt_elr(regs)); @@ -155,7 +152,7 @@ static void do_show_stack(struct task_struct *task, unsigned long *fp, } else { /* really want to see more ... */ kstack_depth_to_print += 6; - printk(KERN_INFO "-- %s (0x%lx) badva: %lx\n", + printk("%s-- %s (0x%lx) badva: %lx\n", loglvl, ex_name(pt_cause(regs)), pt_cause(regs), pt_badva(regs)); } @@ -178,10 +175,16 @@ static void do_show_stack(struct task_struct *task, unsigned long *fp, } } -void show_stack(struct task_struct *task, unsigned long *fp) +void show_stack_loglvl(struct task_struct *task, unsigned long *fp, + const char *loglvl) { /* Saved link reg is one word above FP */ - do_show_stack(task, fp, 0); + do_show_stack(task, fp, 0, loglvl); +} + +void show_stack(struct task_struct *task, unsigned long *fp) +{ + show_stack_loglvl(task, fp, 0, KERN_INFO); } int die(const char *str, struct pt_regs *regs, long err) @@ -207,7 +210,7 @@ int die(const char *str, struct pt_regs *regs, long err) print_modules(); show_regs(regs); - do_show_stack(current, ®s->r30, pt_elr(regs)); + do_show_stack(current, ®s->r30, pt_elr(regs), KERN_EMERG); bust_spinlocks(0); add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE); -- GitLab From c261ad6ee80e337fb01a9cee91241f195c80f3d1 Mon Sep 17 00:00:00 2001 From: Dmitry Safonov Date: Mon, 8 Jun 2020 21:30:41 -0700 Subject: [PATCH 1315/1971] ia64: pass log level as arg into ia64_do_show_stack() Currently, the log-level of show_stack() depends on a platform realization. It creates situations where the headers are printed with lower log level or higher than the stacktrace (depending on a platform or user). Furthermore, it forces the logic decision from user to an architecture side. In result, some users as sysrq/kdb/etc are doing tricks with temporary rising console_loglevel while printing their messages. And in result it not only may print unwanted messages from other CPUs, but also omit printing at all in the unlucky case where the printk() was deferred. Introducing log-level parameter and KERN_UNSUPPRESSED [1] seems an easier approach than introducing more printk buffers. Also, it will consolidate printings with headers. Add log level argument to ia64_do_show_stack() as a preparation to introduce show_stack_loglvl(). Also, make ia64_do_show_stack() static as it's not used outside. [1]: https://lore.kernel.org/lkml/20190528002412.1625-1-dima@arista.com/T/#u Signed-off-by: Dmitry Safonov Signed-off-by: Andrew Morton Cc: Fenghua Yu Cc: Tony Luck Link: http://lkml.kernel.org/r/20200418201944.482088-16-dima@arista.com Signed-off-by: Linus Torvalds --- arch/ia64/include/asm/ptrace.h | 1 - arch/ia64/kernel/process.c | 13 +++++++------ 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/arch/ia64/include/asm/ptrace.h b/arch/ia64/include/asm/ptrace.h index 7ff574d56429c..b3aa460901012 100644 --- a/arch/ia64/include/asm/ptrace.h +++ b/arch/ia64/include/asm/ptrace.h @@ -114,7 +114,6 @@ static inline long regs_return_value(struct pt_regs *regs) struct task_struct; /* forward decl */ struct unw_frame_info; /* forward decl */ - extern void ia64_do_show_stack (struct unw_frame_info *, void *); extern unsigned long ia64_get_user_rbs_end (struct task_struct *, struct pt_regs *, unsigned long *); extern long ia64_peek (struct task_struct *, struct switch_stack *, unsigned long, diff --git a/arch/ia64/kernel/process.c b/arch/ia64/kernel/process.c index 10cb9382ab76c..332c6dfe73338 100644 --- a/arch/ia64/kernel/process.c +++ b/arch/ia64/kernel/process.c @@ -64,12 +64,13 @@ EXPORT_SYMBOL(boot_option_idle_override); void (*pm_power_off) (void); EXPORT_SYMBOL(pm_power_off); -void +static void ia64_do_show_stack (struct unw_frame_info *info, void *arg) { unsigned long ip, sp, bsp; + const char *loglvl = arg; - printk("\nCall Trace:\n"); + printk("%s\nCall Trace:\n", loglvl); do { unw_get_ip(info, &ip); if (ip == 0) @@ -77,9 +78,9 @@ ia64_do_show_stack (struct unw_frame_info *info, void *arg) unw_get_sp(info, &sp); unw_get_bsp(info, &bsp); - printk(" [<%016lx>] %pS\n" + printk("%s [<%016lx>] %pS\n" " sp=%016lx bsp=%016lx\n", - ip, (void *)ip, sp, bsp); + loglvl, ip, (void *)ip, sp, bsp); } while (unw_unwind(info) >= 0); } @@ -87,12 +88,12 @@ void show_stack (struct task_struct *task, unsigned long *sp) { if (!task) - unw_init_running(ia64_do_show_stack, NULL); + unw_init_running(ia64_do_show_stack, (void *)KERN_DEFAULT); else { struct unw_frame_info info; unw_init_from_blocked_task(&info, task); - ia64_do_show_stack(&info, NULL); + ia64_do_show_stack(&info, (void *)KERN_DEFAULT); } } -- GitLab From ffdac29e40545cfb21d347c67daeb72541792ec8 Mon Sep 17 00:00:00 2001 From: Dmitry Safonov Date: Mon, 8 Jun 2020 21:30:44 -0700 Subject: [PATCH 1316/1971] ia64: add show_stack_loglvl() Currently, the log-level of show_stack() depends on a platform realization. It creates situations where the headers are printed with lower log level or higher than the stacktrace (depending on a platform or user). Furthermore, it forces the logic decision from user to an architecture side. In result, some users as sysrq/kdb/etc are doing tricks with temporary rising console_loglevel while printing their messages. And in result it not only may print unwanted messages from other CPUs, but also omit printing at all in the unlucky case where the printk() was deferred. Introducing log-level parameter and KERN_UNSUPPRESSED [1] seems an easier approach than introducing more printk buffers. Also, it will consolidate printings with headers. Introduce show_stack_loglvl(), that eventually will substitute show_stack(). [1]: https://lore.kernel.org/lkml/20190528002412.1625-1-dima@arista.com/T/#u Signed-off-by: Dmitry Safonov Signed-off-by: Andrew Morton Cc: Fenghua Yu Cc: Tony Luck Link: http://lkml.kernel.org/r/20200418201944.482088-17-dima@arista.com Signed-off-by: Linus Torvalds --- arch/ia64/kernel/process.c | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) diff --git a/arch/ia64/kernel/process.c b/arch/ia64/kernel/process.c index 332c6dfe73338..913d9a01cbf9b 100644 --- a/arch/ia64/kernel/process.c +++ b/arch/ia64/kernel/process.c @@ -85,18 +85,25 @@ ia64_do_show_stack (struct unw_frame_info *info, void *arg) } void -show_stack (struct task_struct *task, unsigned long *sp) +show_stack_loglvl (struct task_struct *task, unsigned long *sp, + const char *loglvl) { if (!task) - unw_init_running(ia64_do_show_stack, (void *)KERN_DEFAULT); + unw_init_running(ia64_do_show_stack, (void *)loglvl); else { struct unw_frame_info info; unw_init_from_blocked_task(&info, task); - ia64_do_show_stack(&info, (void *)KERN_DEFAULT); + ia64_do_show_stack(&info, (void *)loglvl); } } +void +show_stack (struct task_struct *task, unsigned long *sp) +{ + show_stack_loglvl(task, sp, KERN_DEFAULT); +} + void show_regs (struct pt_regs *regs) { -- GitLab From ce23c47a5632612dfaa80bd8f861256a419c0abf Mon Sep 17 00:00:00 2001 From: Dmitry Safonov Date: Mon, 8 Jun 2020 21:30:47 -0700 Subject: [PATCH 1317/1971] m68k: add show_stack_loglvl() Currently, the log-level of show_stack() depends on a platform realization. It creates situations where the headers are printed with lower log level or higher than the stacktrace (depending on a platform or user). Furthermore, it forces the logic decision from user to an architecture side. In result, some users as sysrq/kdb/etc are doing tricks with temporary rising console_loglevel while printing their messages. And in result it not only may print unwanted messages from other CPUs, but also omit printing at all in the unlucky case where the printk() was deferred. Introducing log-level parameter and KERN_UNSUPPRESSED [1] seems an easier approach than introducing more printk buffers. Also, it will consolidate printings with headers. Introduce show_stack_loglvl(), that eventually will substitute show_stack(). [1]: https://lore.kernel.org/lkml/20190528002412.1625-1-dima@arista.com/T/#u Signed-off-by: Dmitry Safonov Signed-off-by: Andrew Morton Cc: Geert Uytterhoeven Link: http://lkml.kernel.org/r/20200418201944.482088-18-dima@arista.com Signed-off-by: Linus Torvalds --- arch/m68k/kernel/traps.c | 16 +++++++++++----- 1 file changed, 11 insertions(+), 5 deletions(-) diff --git a/arch/m68k/kernel/traps.c b/arch/m68k/kernel/traps.c index 344f93d36a9a0..ffcc5ec4fac3b 100644 --- a/arch/m68k/kernel/traps.c +++ b/arch/m68k/kernel/traps.c @@ -811,13 +811,13 @@ asmlinkage void buserr_c(struct frame *fp) static int kstack_depth_to_print = 48; -void show_trace(unsigned long *stack) +static void show_trace(unsigned long *stack, const char *loglvl) { unsigned long *endstack; unsigned long addr; int i; - pr_info("Call Trace:"); + printk("%sCall Trace:", loglvl); addr = (unsigned long)stack + THREAD_SIZE - 1; endstack = (unsigned long *)(addr & -THREAD_SIZE); i = 0; @@ -935,7 +935,8 @@ void show_registers(struct pt_regs *regs) pr_cont("\n"); } -void show_stack(struct task_struct *task, unsigned long *stack) +void show_stack_loglvl(struct task_struct *task, unsigned long *stack, + const char *loglvl) { unsigned long *p; unsigned long *endstack; @@ -949,7 +950,7 @@ void show_stack(struct task_struct *task, unsigned long *stack) } endstack = (unsigned long *)(((unsigned long)stack + THREAD_SIZE - 1) & -THREAD_SIZE); - pr_info("Stack from %08lx:", (unsigned long)stack); + printk("%sStack from %08lx:", loglvl, (unsigned long)stack); p = stack; for (i = 0; i < kstack_depth_to_print; i++) { if (p + 1 > endstack) @@ -959,7 +960,12 @@ void show_stack(struct task_struct *task, unsigned long *stack) pr_cont(" %08lx", *p++); } pr_cont("\n"); - show_trace(stack); + show_trace(stack, loglvl); +} + +void show_stack(struct task_struct *task, unsigned long *stack) +{ + show_stack_loglvl(task, stack, KERN_INFO); } /* -- GitLab From 77530a5277bcab0433d0f68af11e04a5c4d79039 Mon Sep 17 00:00:00 2001 From: Dmitry Safonov Date: Mon, 8 Jun 2020 21:30:50 -0700 Subject: [PATCH 1318/1971] microblaze: add loglvl to microblaze_unwind_inner() Currently, the log-level of show_stack() depends on a platform realization. It creates situations where the headers are printed with lower log level or higher than the stacktrace (depending on a platform or user). Furthermore, it forces the logic decision from user to an architecture side. In result, some users as sysrq/kdb/etc are doing tricks with temporary rising console_loglevel while printing their messages. And in result it not only may print unwanted messages from other CPUs, but also omit printing at all in the unlucky case where the printk() was deferred. Introducing log-level parameter and KERN_UNSUPPRESSED [1] seems an easier approach than introducing more printk buffers. Also, it will consolidate printings with headers. Add log level argument to microblaze_unwind_inner() as a preparation for introducing show_stack_loglvl(). [1]: https://lore.kernel.org/lkml/20190528002412.1625-1-dima@arista.com/T/#u Signed-off-by: Dmitry Safonov Signed-off-by: Andrew Morton Cc: Michal Simek Link: http://lkml.kernel.org/r/20200418201944.482088-19-dima@arista.com Signed-off-by: Linus Torvalds --- arch/microblaze/kernel/unwind.c | 38 ++++++++++++++++++++------------- 1 file changed, 23 insertions(+), 15 deletions(-) diff --git a/arch/microblaze/kernel/unwind.c b/arch/microblaze/kernel/unwind.c index 4241cdd28ee77..804bf0c99d8bf 100644 --- a/arch/microblaze/kernel/unwind.c +++ b/arch/microblaze/kernel/unwind.c @@ -154,7 +154,8 @@ static int lookup_prev_stack_frame(unsigned long fp, unsigned long pc, static void microblaze_unwind_inner(struct task_struct *task, unsigned long pc, unsigned long fp, unsigned long leaf_return, - struct stack_trace *trace); + struct stack_trace *trace, + const char *loglvl); /** * unwind_trap - Unwind through a system trap, that stored previous state @@ -162,16 +163,18 @@ static void microblaze_unwind_inner(struct task_struct *task, */ #ifdef CONFIG_MMU static inline void unwind_trap(struct task_struct *task, unsigned long pc, - unsigned long fp, struct stack_trace *trace) + unsigned long fp, struct stack_trace *trace, + const char *loglvl) { /* To be implemented */ } #else static inline void unwind_trap(struct task_struct *task, unsigned long pc, - unsigned long fp, struct stack_trace *trace) + unsigned long fp, struct stack_trace *trace, + const char *loglvl) { const struct pt_regs *regs = (const struct pt_regs *) fp; - microblaze_unwind_inner(task, regs->pc, regs->r1, regs->r15, trace); + microblaze_unwind_inner(task, regs->pc, regs->r1, regs->r15, trace, loglvl); } #endif @@ -184,11 +187,13 @@ static inline void unwind_trap(struct task_struct *task, unsigned long pc, * the caller's return address. * @trace : Where to store stack backtrace (PC values). * NULL == print backtrace to kernel log + * @loglvl : Used for printk log level if (trace == NULL). */ static void microblaze_unwind_inner(struct task_struct *task, unsigned long pc, unsigned long fp, unsigned long leaf_return, - struct stack_trace *trace) + struct stack_trace *trace, + const char *loglvl) { int ofs = 0; @@ -214,11 +219,11 @@ static void microblaze_unwind_inner(struct task_struct *task, const struct pt_regs *regs = (const struct pt_regs *) fp; #endif - pr_info("HW EXCEPTION\n"); + printk("%sHW EXCEPTION\n", loglvl); #ifndef CONFIG_MMU microblaze_unwind_inner(task, regs->r17 - 4, fp + EX_HANDLER_STACK_SIZ, - regs->r15, trace); + regs->r15, trace, loglvl); #endif return; } @@ -228,8 +233,8 @@ static void microblaze_unwind_inner(struct task_struct *task, if ((return_to >= handler->start_addr) && (return_to <= handler->end_addr)) { if (!trace) - pr_info("%s\n", handler->trap_name); - unwind_trap(task, pc, fp, trace); + printk("%s%s\n", loglvl, handler->trap_name); + unwind_trap(task, pc, fp, trace, loglvl); return; } } @@ -248,13 +253,13 @@ static void microblaze_unwind_inner(struct task_struct *task, } else { /* Have we reached userland? */ if (unlikely(pc == task_pt_regs(task)->pc)) { - pr_info("[<%p>] PID %lu [%s]\n", - (void *) pc, + printk("%s[<%p>] PID %lu [%s]\n", + loglvl, (void *) pc, (unsigned long) task->pid, task->comm); break; } else - print_ip_sym(KERN_INFO, pc); + print_ip_sym(loglvl, pc); } /* Stop when we reach anything not part of the kernel */ @@ -285,11 +290,13 @@ static void microblaze_unwind_inner(struct task_struct *task, */ void microblaze_unwind(struct task_struct *task, struct stack_trace *trace) { + const char *loglvl = KERN_INFO; + if (task) { if (task == current) { const struct pt_regs *regs = task_pt_regs(task); microblaze_unwind_inner(task, regs->pc, regs->r1, - regs->r15, trace); + regs->r15, trace, loglvl); } else { struct thread_info *thread_info = (struct thread_info *)(task->stack); @@ -299,7 +306,8 @@ void microblaze_unwind(struct task_struct *task, struct stack_trace *trace) microblaze_unwind_inner(task, (unsigned long) &_switch_to, cpu_context->r1, - cpu_context->r15, trace); + cpu_context->r15, + trace, loglvl); } } else { unsigned long pc, fp; @@ -314,7 +322,7 @@ void microblaze_unwind(struct task_struct *task, struct stack_trace *trace) ); /* Since we are not a leaf function, use leaf_return = 0 */ - microblaze_unwind_inner(current, pc, fp, 0, trace); + microblaze_unwind_inner(current, pc, fp, 0, trace, loglvl); } } -- GitLab From 14b0dd870f6f3b28fad1235b70d1a692db1d6a2f Mon Sep 17 00:00:00 2001 From: Dmitry Safonov Date: Mon, 8 Jun 2020 21:30:53 -0700 Subject: [PATCH 1319/1971] microblaze: add loglvl to microblaze_unwind() Currently, the log-level of show_stack() depends on a platform realization. It creates situations where the headers are printed with lower log level or higher than the stacktrace (depending on a platform or user). Furthermore, it forces the logic decision from user to an architecture side. In result, some users as sysrq/kdb/etc are doing tricks with temporary rising console_loglevel while printing their messages. And in result it not only may print unwanted messages from other CPUs, but also omit printing at all in the unlucky case where the printk() was deferred. Introducing log-level parameter and KERN_UNSUPPRESSED [1] seems an easier approach than introducing more printk buffers. Also, it will consolidate printings with headers. Add log level parameter to microblaze_unwind() as a preparation to add show_stack_loglvl(). [1]: https://lore.kernel.org/lkml/20190528002412.1625-1-dima@arista.com/T/#u Signed-off-by: Dmitry Safonov Signed-off-by: Andrew Morton Cc: Michal Simek Link: http://lkml.kernel.org/r/20200418201944.482088-20-dima@arista.com Signed-off-by: Linus Torvalds --- arch/microblaze/include/asm/unwind.h | 3 ++- arch/microblaze/kernel/stacktrace.c | 4 ++-- arch/microblaze/kernel/traps.c | 2 +- arch/microblaze/kernel/unwind.c | 6 +++--- 4 files changed, 8 insertions(+), 7 deletions(-) diff --git a/arch/microblaze/include/asm/unwind.h b/arch/microblaze/include/asm/unwind.h index c327d673622af..3db81777a8872 100644 --- a/arch/microblaze/include/asm/unwind.h +++ b/arch/microblaze/include/asm/unwind.h @@ -20,7 +20,8 @@ extern struct trap_handler_info microblaze_trap_handlers; extern const char _hw_exception_handler; extern const char ex_handler_unhandled; -void microblaze_unwind(struct task_struct *task, struct stack_trace *trace); +void microblaze_unwind(struct task_struct *task, struct stack_trace *trace, + const char *loglvl); #endif /* __MICROBLAZE_UNWIND_H */ diff --git a/arch/microblaze/kernel/stacktrace.c b/arch/microblaze/kernel/stacktrace.c index b4debe283a79e..b266c4d6ed9df 100644 --- a/arch/microblaze/kernel/stacktrace.c +++ b/arch/microblaze/kernel/stacktrace.c @@ -20,12 +20,12 @@ void save_stack_trace(struct stack_trace *trace) { /* Exclude our helper functions from the trace*/ trace->skip += 2; - microblaze_unwind(NULL, trace); + microblaze_unwind(NULL, trace, ""); } EXPORT_SYMBOL_GPL(save_stack_trace); void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace) { - microblaze_unwind(tsk, trace); + microblaze_unwind(tsk, trace, ""); } EXPORT_SYMBOL_GPL(save_stack_trace_tsk); diff --git a/arch/microblaze/kernel/traps.c b/arch/microblaze/kernel/traps.c index 45bbba9d919f9..be726ee120fb5 100644 --- a/arch/microblaze/kernel/traps.c +++ b/arch/microblaze/kernel/traps.c @@ -68,7 +68,7 @@ void show_stack(struct task_struct *task, unsigned long *sp) print_hex_dump(KERN_INFO, "", DUMP_PREFIX_ADDRESS, 32, 4, (void *)fp, words_to_show << 2, 0); pr_info("\n\nCall Trace:\n"); - microblaze_unwind(task, NULL); + microblaze_unwind(task, NULL, KERN_INFO); pr_info("\n"); if (!task) diff --git a/arch/microblaze/kernel/unwind.c b/arch/microblaze/kernel/unwind.c index 804bf0c99d8bf..778a761af0a7a 100644 --- a/arch/microblaze/kernel/unwind.c +++ b/arch/microblaze/kernel/unwind.c @@ -287,11 +287,11 @@ static void microblaze_unwind_inner(struct task_struct *task, * @task : Task whose stack we are to unwind (NULL == current) * @trace : Where to store stack backtrace (PC values). * NULL == print backtrace to kernel log + * @loglvl : Used for printk log level if (trace == NULL). */ -void microblaze_unwind(struct task_struct *task, struct stack_trace *trace) +void microblaze_unwind(struct task_struct *task, struct stack_trace *trace, + const char *loglvl) { - const char *loglvl = KERN_INFO; - if (task) { if (task == current) { const struct pt_regs *regs = task_pt_regs(task); -- GitLab From 35f3968b499c6dd026a828933b57ebdb11e74cff Mon Sep 17 00:00:00 2001 From: Dmitry Safonov Date: Mon, 8 Jun 2020 21:30:56 -0700 Subject: [PATCH 1320/1971] microblaze: add show_stack_loglvl() Currently, the log-level of show_stack() depends on a platform realization. It creates situations where the headers are printed with lower log level or higher than the stacktrace (depending on a platform or user). Furthermore, it forces the logic decision from user to an architecture side. In result, some users as sysrq/kdb/etc are doing tricks with temporary rising console_loglevel while printing their messages. And in result it not only may print unwanted messages from other CPUs, but also omit printing at all in the unlucky case where the printk() was deferred. Introducing log-level parameter and KERN_UNSUPPRESSED [1] seems an easier approach than introducing more printk buffers. Also, it will consolidate printings with headers. Introduce show_stack_loglvl(), that eventually will substitute show_stack(). [1]: https://lore.kernel.org/lkml/20190528002412.1625-1-dima@arista.com/T/#u Signed-off-by: Dmitry Safonov Signed-off-by: Andrew Morton Cc: Michal Simek Link: http://lkml.kernel.org/r/20200418201944.482088-21-dima@arista.com Signed-off-by: Linus Torvalds --- arch/microblaze/kernel/traps.c | 18 ++++++++++++------ 1 file changed, 12 insertions(+), 6 deletions(-) diff --git a/arch/microblaze/kernel/traps.c b/arch/microblaze/kernel/traps.c index be726ee120fb5..149ae534937ef 100644 --- a/arch/microblaze/kernel/traps.c +++ b/arch/microblaze/kernel/traps.c @@ -31,7 +31,8 @@ static int __init kstack_setup(char *s) } __setup("kstack=", kstack_setup); -void show_stack(struct task_struct *task, unsigned long *sp) +void show_stack_loglvl(struct task_struct *task, unsigned long *sp, + const char *loglvl) { unsigned long words_to_show; u32 fp = (u32) sp; @@ -50,7 +51,7 @@ void show_stack(struct task_struct *task, unsigned long *sp) if (kstack_depth_to_print && (words_to_show > kstack_depth_to_print)) words_to_show = kstack_depth_to_print; - pr_info("Kernel Stack:\n"); + printk("%sKernel Stack:\n", loglvl); /* * Make the first line an 'odd' size if necessary to get @@ -65,14 +66,19 @@ void show_stack(struct task_struct *task, unsigned long *sp) words_to_show -= line1_words; } } - print_hex_dump(KERN_INFO, "", DUMP_PREFIX_ADDRESS, 32, 4, (void *)fp, + print_hex_dump(loglvl, "", DUMP_PREFIX_ADDRESS, 32, 4, (void *)fp, words_to_show << 2, 0); - pr_info("\n\nCall Trace:\n"); - microblaze_unwind(task, NULL, KERN_INFO); - pr_info("\n"); + printk("%s\n\nCall Trace:\n", loglvl); + microblaze_unwind(task, NULL, loglvl); + printk("%s\n", loglvl); if (!task) task = current; debug_show_held_locks(task); } + +void show_stack(struct task_struct *task, unsigned long *sp) +{ + show_stack_loglvl(task, sp, KERN_INFO); +} -- GitLab From 96f0458a96892dd2f1589a7517724d541e1c4520 Mon Sep 17 00:00:00 2001 From: Dmitry Safonov Date: Mon, 8 Jun 2020 21:30:59 -0700 Subject: [PATCH 1321/1971] mips: add show_stack_loglvl() Currently, the log-level of show_stack() depends on a platform realization. It creates situations where the headers are printed with lower log level or higher than the stacktrace (depending on a platform or user). Furthermore, it forces the logic decision from user to an architecture side. In result, some users as sysrq/kdb/etc are doing tricks with temporary rising console_loglevel while printing their messages. And in result it not only may print unwanted messages from other CPUs, but also omit printing at all in the unlucky case where the printk() was deferred. Introducing log-level parameter and KERN_UNSUPPRESSED [1] seems an easier approach than introducing more printk buffers. Also, it will consolidate printings with headers. Introduce show_stack_loglvl(), that eventually will substitute show_stack(). [1]: https://lore.kernel.org/lkml/20190528002412.1625-1-dima@arista.com/T/#u Signed-off-by: Dmitry Safonov Signed-off-by: Andrew Morton Cc: James Hogan Cc: Paul Burton Cc: Ralf Baechle Link: http://lkml.kernel.org/r/20200418201944.482088-22-dima@arista.com Signed-off-by: Linus Torvalds --- arch/mips/kernel/traps.c | 41 +++++++++++++++++++++++----------------- 1 file changed, 24 insertions(+), 17 deletions(-) diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c index 210fea63de750..e49040739b61d 100644 --- a/arch/mips/kernel/traps.c +++ b/arch/mips/kernel/traps.c @@ -108,26 +108,26 @@ void (*board_bind_eic_interrupt)(int irq, int regset); void (*board_ebase_setup)(void); void(*board_cache_error_setup)(void); -static void show_raw_backtrace(unsigned long reg29) +static void show_raw_backtrace(unsigned long reg29, const char *loglvl) { unsigned long *sp = (unsigned long *)(reg29 & ~3); unsigned long addr; - printk("Call Trace:"); + printk("%sCall Trace:", loglvl); #ifdef CONFIG_KALLSYMS - printk("\n"); + printk("%s\n", loglvl); #endif while (!kstack_end(sp)) { unsigned long __user *p = (unsigned long __user *)(unsigned long)sp++; if (__get_user(addr, p)) { - printk(" (Bad stack address)"); + printk("%s (Bad stack address)", loglvl); break; } if (__kernel_text_address(addr)) - print_ip_sym(KERN_DEFAULT, addr); + print_ip_sym(loglvl, addr); } - printk("\n"); + printk("%s\n", loglvl); } #ifdef CONFIG_KALLSYMS @@ -140,7 +140,8 @@ static int __init set_raw_show_trace(char *str) __setup("raw_show_trace", set_raw_show_trace); #endif -static void show_backtrace(struct task_struct *task, const struct pt_regs *regs) +static void show_backtrace(struct task_struct *task, const struct pt_regs *regs, + const char *loglvl) { unsigned long sp = regs->regs[29]; unsigned long ra = regs->regs[31]; @@ -150,12 +151,12 @@ static void show_backtrace(struct task_struct *task, const struct pt_regs *regs) task = current; if (raw_show_trace || user_mode(regs) || !__kernel_text_address(pc)) { - show_raw_backtrace(sp); + show_raw_backtrace(sp, loglvl); return; } - printk("Call Trace:\n"); + printk("%sCall Trace:\n", loglvl); do { - print_ip_sym(KERN_DEFAULT, pc); + print_ip_sym(loglvl, pc); pc = unwind_stack(task, &sp, pc, &ra); } while (pc); pr_cont("\n"); @@ -166,19 +167,19 @@ static void show_backtrace(struct task_struct *task, const struct pt_regs *regs) * with at least a bit of error checking ... */ static void show_stacktrace(struct task_struct *task, - const struct pt_regs *regs) + const struct pt_regs *regs, const char *loglvl) { const int field = 2 * sizeof(unsigned long); long stackdata; int i; unsigned long __user *sp = (unsigned long __user *)regs->regs[29]; - printk("Stack :"); + printk("%sStack :", loglvl); i = 0; while ((unsigned long) sp & (PAGE_SIZE - 1)) { if (i && ((i % (64 / field)) == 0)) { pr_cont("\n"); - printk(" "); + printk("%s ", loglvl); } if (i > 39) { pr_cont(" ..."); @@ -194,10 +195,11 @@ static void show_stacktrace(struct task_struct *task, i++; } pr_cont("\n"); - show_backtrace(task, regs); + show_backtrace(task, regs, loglvl); } -void show_stack(struct task_struct *task, unsigned long *sp) +void show_stack_loglvl(struct task_struct *task, unsigned long *sp, + const char *loglvl) { struct pt_regs regs; mm_segment_t old_fs = get_fs(); @@ -221,10 +223,15 @@ void show_stack(struct task_struct *task, unsigned long *sp) * the stack in the kernel (not user) address space. */ set_fs(KERNEL_DS); - show_stacktrace(task, ®s); + show_stacktrace(task, ®s, loglvl); set_fs(old_fs); } +void show_stack(struct task_struct *task, unsigned long *sp) +{ + show_stack_loglvl(task, sp, KERN_DEFAULT) +} + static void show_code(unsigned int __user *pc) { long i; @@ -373,7 +380,7 @@ void show_registers(struct pt_regs *regs) if (!user_mode(regs)) /* Necessary for getting the correct stack content */ set_fs(KERNEL_DS); - show_stacktrace(current, regs); + show_stacktrace(current, regs, KERN_DEFAULT); show_code((unsigned int __user *) regs->cp0_epc); printk("\n"); set_fs(old_fs); -- GitLab From 18a4753f90175a6acfefd4cf3da1bcb163e12216 Mon Sep 17 00:00:00 2001 From: Dmitry Safonov Date: Mon, 8 Jun 2020 21:31:02 -0700 Subject: [PATCH 1322/1971] nds32: add show_stack_loglvl() Currently, the log-level of show_stack() depends on a platform realization. It creates situations where the headers are printed with lower log level or higher than the stacktrace (depending on a platform or user). Furthermore, it forces the logic decision from user to an architecture side. In result, some users as sysrq/kdb/etc are doing tricks with temporary rising console_loglevel while printing their messages. And in result it not only may print unwanted messages from other CPUs, but also omit printing at all in the unlucky case where the printk() was deferred. Introducing log-level parameter and KERN_UNSUPPRESSED [1] seems an easier approach than introducing more printk buffers. Also, it will consolidate printings with headers. Introduce show_stack_loglvl(), that eventually will substitute show_stack(). [1]: https://lore.kernel.org/lkml/20190528002412.1625-1-dima@arista.com/T/#u Signed-off-by: Dmitry Safonov Signed-off-by: Andrew Morton Cc: Greentime Hu Cc: Vincent Chen Link: http://lkml.kernel.org/r/20200418201944.482088-23-dima@arista.com Signed-off-by: Linus Torvalds --- arch/nds32/kernel/traps.c | 21 ++++++++++++++------- 1 file changed, 14 insertions(+), 7 deletions(-) diff --git a/arch/nds32/kernel/traps.c b/arch/nds32/kernel/traps.c index 40625760a125e..90f12582c218a 100644 --- a/arch/nds32/kernel/traps.c +++ b/arch/nds32/kernel/traps.c @@ -97,18 +97,19 @@ static void dump_instr(struct pt_regs *regs) } #define LOOP_TIMES (100) -static void __dump(struct task_struct *tsk, unsigned long *base_reg) +static void __dump(struct task_struct *tsk, unsigned long *base_reg, + const char *loglvl) { unsigned long ret_addr; int cnt = LOOP_TIMES, graph = 0; - pr_emerg("Call Trace:\n"); + printk("%sCall Trace:\n", loglvl); if (!IS_ENABLED(CONFIG_FRAME_POINTER)) { while (!kstack_end(base_reg)) { ret_addr = *base_reg++; if (__kernel_text_address(ret_addr)) { ret_addr = ftrace_graph_ret_addr( tsk, &graph, ret_addr, NULL); - print_ip_sym(KERN_EMERG, ret_addr); + print_ip_sym(loglvl, ret_addr); } if (--cnt < 0) break; @@ -124,17 +125,18 @@ static void __dump(struct task_struct *tsk, unsigned long *base_reg) ret_addr = ftrace_graph_ret_addr( tsk, &graph, ret_addr, NULL); - print_ip_sym(KERN_EMERG, ret_addr); + print_ip_sym(loglvl, ret_addr); } if (--cnt < 0) break; base_reg = (unsigned long *)next_fp; } } - pr_emerg("\n"); + printk("%s\n", loglvl); } -void show_stack(struct task_struct *tsk, unsigned long *sp) +void show_stack_loglvl(struct task_struct *tsk, unsigned long *sp, + const char *loglvl) { unsigned long *base_reg; @@ -151,10 +153,15 @@ void show_stack(struct task_struct *tsk, unsigned long *sp) else __asm__ __volatile__("\tori\t%0, $fp, #0\n":"=r"(base_reg)); } - __dump(tsk, base_reg); + __dump(tsk, base_reg, loglvl); barrier(); } +void show_stack(struct task_struct *tsk, unsigned long *sp) +{ + show_stack_loglvl(tsk, sp, KERN_EMERG); +} + DEFINE_SPINLOCK(die_lock); /* -- GitLab From 351dd61c3821fe9b6702205d3a894004a9674295 Mon Sep 17 00:00:00 2001 From: Dmitry Safonov Date: Mon, 8 Jun 2020 21:31:05 -0700 Subject: [PATCH 1323/1971] nios2: add show_stack_loglvl() Currently, the log-level of show_stack() depends on a platform realization. It creates situations where the headers are printed with lower log level or higher than the stacktrace (depending on a platform or user). Furthermore, it forces the logic decision from user to an architecture side. In result, some users as sysrq/kdb/etc are doing tricks with temporary rising console_loglevel while printing their messages. And in result it not only may print unwanted messages from other CPUs, but also omit printing at all in the unlucky case where the printk() was deferred. Introducing log-level parameter and KERN_UNSUPPRESSED [1] seems an easier approach than introducing more printk buffers. Also, it will consolidate printings with headers. Introduce show_stack_loglvl(), that eventually will substitute show_stack(). [1]: https://lore.kernel.org/lkml/20190528002412.1625-1-dima@arista.com/T/#u Signed-off-by: Dmitry Safonov Signed-off-by: Andrew Morton Cc: Ley Foon Tan Link: http://lkml.kernel.org/r/20200418201944.482088-24-dima@arista.com Signed-off-by: Linus Torvalds --- arch/nios2/kernel/traps.c | 23 +++++++++++++++-------- 1 file changed, 15 insertions(+), 8 deletions(-) diff --git a/arch/nios2/kernel/traps.c b/arch/nios2/kernel/traps.c index 486db793923c0..08071caa9b36f 100644 --- a/arch/nios2/kernel/traps.c +++ b/arch/nios2/kernel/traps.c @@ -52,12 +52,14 @@ void _exception(int signo, struct pt_regs *regs, int code, unsigned long addr) } /* - * The show_stack is an external API which we do not use ourselves. + * The show_stack(), show_stack_loglvl() are external API + * which we do not use ourselves. */ int kstack_depth_to_print = 48; -void show_stack(struct task_struct *task, unsigned long *stack) +void show_stack_loglvl(struct task_struct *task, unsigned long *stack, + const char *loglvl) { unsigned long *endstack, addr; int i; @@ -72,16 +74,16 @@ void show_stack(struct task_struct *task, unsigned long *stack) addr = (unsigned long) stack; endstack = (unsigned long *) PAGE_ALIGN(addr); - pr_emerg("Stack from %08lx:", (unsigned long)stack); + printk("%sStack from %08lx:", loglvl, (unsigned long)stack); for (i = 0; i < kstack_depth_to_print; i++) { if (stack + 1 > endstack) break; if (i % 8 == 0) - pr_emerg("\n "); - pr_emerg(" %08lx", *stack++); + printk("%s\n ", loglvl); + printk("%s %08lx", loglvl, *stack++); } - pr_emerg("\nCall Trace:"); + printk("%s\nCall Trace:", loglvl); i = 0; while (stack + 1 <= endstack) { addr = *stack++; @@ -97,11 +99,16 @@ void show_stack(struct task_struct *task, unsigned long *stack) (addr <= (unsigned long) _etext))) { if (i % 4 == 0) pr_emerg("\n "); - pr_emerg(" [<%08lx>]", addr); + printk("%s [<%08lx>]", loglvl, addr); i++; } } - pr_emerg("\n"); + printk("%s\n", loglvl); +} + +void show_stack(struct task_struct *task, unsigned long *stack) +{ + show_stack_loglvl(task, stack, KERN_EMERG); } void __init trap_init(void) -- GitLab From 0633032f083a53cd33f1171cca38c8ba835d1eba Mon Sep 17 00:00:00 2001 From: Dmitry Safonov Date: Mon, 8 Jun 2020 21:31:08 -0700 Subject: [PATCH 1324/1971] openrisc: add show_stack_loglvl() Currently, the log-level of show_stack() depends on a platform realization. It creates situations where the headers are printed with lower log level or higher than the stacktrace (depending on a platform or user). Furthermore, it forces the logic decision from user to an architecture side. In result, some users as sysrq/kdb/etc are doing tricks with temporary rising console_loglevel while printing their messages. And in result it not only may print unwanted messages from other CPUs, but also omit printing at all in the unlucky case where the printk() was deferred. Introducing log-level parameter and KERN_UNSUPPRESSED [1] seems an easier approach than introducing more printk buffers. Also, it will consolidate printings with headers. Introduce show_stack_loglvl(), that eventually will substitute show_stack(). [1]: https://lore.kernel.org/lkml/20190528002412.1625-1-dima@arista.com/T/#u Signed-off-by: Dmitry Safonov Signed-off-by: Andrew Morton Cc: Jonas Bonn Cc: Stafford Horne Cc: Stefan Kristiansson Link: http://lkml.kernel.org/r/20200418201944.482088-25-dima@arista.com Signed-off-by: Linus Torvalds --- arch/openrisc/kernel/traps.c | 16 ++++++++++++---- 1 file changed, 12 insertions(+), 4 deletions(-) diff --git a/arch/openrisc/kernel/traps.c b/arch/openrisc/kernel/traps.c index c11aa2e17ce05..3b7978a22d685 100644 --- a/arch/openrisc/kernel/traps.c +++ b/arch/openrisc/kernel/traps.c @@ -41,18 +41,26 @@ unsigned long __user *lwa_addr; void print_trace(void *data, unsigned long addr, int reliable) { - pr_emerg("[<%p>] %s%pS\n", (void *) addr, reliable ? "" : "? ", + const char *loglvl = data; + + printk("%s[<%p>] %s%pS\n", loglvl, (void *) addr, reliable ? "" : "? ", (void *) addr); } /* displays a short stack trace */ -void show_stack(struct task_struct *task, unsigned long *esp) +void show_stack_loglvl(struct task_struct *task, unsigned long *esp, + const char *loglvl) { if (esp == NULL) esp = (unsigned long *)&esp; - pr_emerg("Call trace:\n"); - unwind_stack(NULL, esp, print_trace); + printk("%sCall trace:\n", loglvl); + unwind_stack((void *)loglvl, esp, print_trace); +} + +void show_stack(struct task_struct *task, unsigned long *esp) +{ + show_stack_loglvl(task, esp, KERN_EMERG); } void show_registers(struct pt_regs *regs) -- GitLab From 3481d31bf7473ced5a39fbfd2786b141798b6764 Mon Sep 17 00:00:00 2001 From: Dmitry Safonov Date: Mon, 8 Jun 2020 21:31:11 -0700 Subject: [PATCH 1325/1971] parisc: add show_stack_loglvl() Currently, the log-level of show_stack() depends on a platform realization. It creates situations where the headers are printed with lower log level or higher than the stacktrace (depending on a platform or user). Furthermore, it forces the logic decision from user to an architecture side. In result, some users as sysrq/kdb/etc are doing tricks with temporary rising console_loglevel while printing their messages. And in result it not only may print unwanted messages from other CPUs, but also omit printing at all in the unlucky case where the printk() was deferred. Introducing log-level parameter and KERN_UNSUPPRESSED [1] seems an easier approach than introducing more printk buffers. Also, it will consolidate printings with headers. Introduce show_stack_loglvl(), that eventually will substitute show_stack(). [1]: https://lore.kernel.org/lkml/20190528002412.1625-1-dima@arista.com/T/#u Signed-off-by: Dmitry Safonov Signed-off-by: Andrew Morton Cc: Helge Deller Cc: "James E.J. Bottomley" Link: http://lkml.kernel.org/r/20200418201944.482088-26-dima@arista.com Signed-off-by: Linus Torvalds --- arch/parisc/kernel/traps.c | 28 +++++++++++++++++----------- 1 file changed, 17 insertions(+), 11 deletions(-) diff --git a/arch/parisc/kernel/traps.c b/arch/parisc/kernel/traps.c index 82fc011894889..c2411de3730f2 100644 --- a/arch/parisc/kernel/traps.c +++ b/arch/parisc/kernel/traps.c @@ -49,7 +49,7 @@ #include "../math-emu/math-emu.h" /* for handle_fpe() */ static void parisc_show_stack(struct task_struct *task, - struct pt_regs *regs); + struct pt_regs *regs, const char *loglvl); static int printbinary(char *buf, unsigned long x, int nbits) { @@ -155,7 +155,7 @@ void show_regs(struct pt_regs *regs) printk("%s IAOQ[1]: %pS\n", level, (void *) regs->iaoq[1]); printk("%s RP(r2): %pS\n", level, (void *) regs->gr[2]); - parisc_show_stack(current, regs); + parisc_show_stack(current, regs, KERN_DEFAULT); } } @@ -170,37 +170,43 @@ static DEFINE_RATELIMIT_STATE(_hppa_rs, } -static void do_show_stack(struct unwind_frame_info *info) +static void do_show_stack(struct unwind_frame_info *info, const char *loglvl) { int i = 1; - printk(KERN_CRIT "Backtrace:\n"); + printk("%sBacktrace:\n", loglvl); while (i <= MAX_UNWIND_ENTRIES) { if (unwind_once(info) < 0 || info->ip == 0) break; if (__kernel_text_address(info->ip)) { - printk(KERN_CRIT " [<" RFMT ">] %pS\n", - info->ip, (void *) info->ip); + printk("%s [<" RFMT ">] %pS\n", + loglvl, info->ip, (void *) info->ip); i++; } } - printk(KERN_CRIT "\n"); + printk("%s\n", loglvl); } static void parisc_show_stack(struct task_struct *task, - struct pt_regs *regs) + struct pt_regs *regs, const char *loglvl) { struct unwind_frame_info info; unwind_frame_init_task(&info, task, regs); - do_show_stack(&info); + do_show_stack(&info, loglvl); +} + +void show_stack_loglvl(struct task_struct *t, unsigned long *sp, + const char *loglvl) +{ + parisc_show_stack(t, NULL, loglvl); } void show_stack(struct task_struct *t, unsigned long *sp) { - parisc_show_stack(t, NULL); + show_stack_loglvl(t, sp, KERN_CRIT) } int is_valid_bugaddr(unsigned long iaoq) @@ -446,7 +452,7 @@ void parisc_terminate(char *msg, struct pt_regs *regs, int code, unsigned long o /* show_stack(NULL, (unsigned long *)regs->gr[30]); */ struct unwind_frame_info info; unwind_frame_init(&info, current, regs); - do_show_stack(&info); + do_show_stack(&info, KERN_CRIT); } printk("\n"); -- GitLab From b9677a8cf60995acdd1b36ff59e6b437154bff9e Mon Sep 17 00:00:00 2001 From: Dmitry Safonov Date: Mon, 8 Jun 2020 21:31:14 -0700 Subject: [PATCH 1326/1971] powerpc: add show_stack_loglvl() Currently, the log-level of show_stack() depends on a platform realization. It creates situations where the headers are printed with lower log level or higher than the stacktrace (depending on a platform or user). Furthermore, it forces the logic decision from user to an architecture side. In result, some users as sysrq/kdb/etc are doing tricks with temporary rising console_loglevel while printing their messages. And in result it not only may print unwanted messages from other CPUs, but also omit printing at all in the unlucky case where the printk() was deferred. Introducing log-level parameter and KERN_UNSUPPRESSED [1] seems an easier approach than introducing more printk buffers. Also, it will consolidate printings with headers. Introduce show_stack_loglvl(), that eventually will substitute show_stack(). [1]: https://lore.kernel.org/lkml/20190528002412.1625-1-dima@arista.com/T/#u Signed-off-by: Dmitry Safonov Signed-off-by: Andrew Morton Acked-by: Michael Ellerman (powerpc) Cc: Benjamin Herrenschmidt Cc: Paul Mackerras Link: http://lkml.kernel.org/r/20200418201944.482088-27-dima@arista.com Signed-off-by: Linus Torvalds --- arch/powerpc/kernel/process.c | 18 +++++++++++++----- 1 file changed, 13 insertions(+), 5 deletions(-) diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c index 048d64c4e1158..a456b4454b3fd 100644 --- a/arch/powerpc/kernel/process.c +++ b/arch/powerpc/kernel/process.c @@ -2063,7 +2063,8 @@ unsigned long get_wchan(struct task_struct *p) static int kstack_depth_to_print = CONFIG_PRINT_STACK_DEPTH; -void show_stack(struct task_struct *tsk, unsigned long *stack) +void show_stack_loglvl(struct task_struct *tsk, unsigned long *stack, + const char *loglvl) { unsigned long sp, ip, lr, newsp; int count = 0; @@ -2088,7 +2089,7 @@ void show_stack(struct task_struct *tsk, unsigned long *stack) } lr = 0; - printk("Call Trace:\n"); + printk("%sCall Trace:\n", loglvl); do { if (!validate_sp(sp, tsk, STACK_FRAME_OVERHEAD)) break; @@ -2097,7 +2098,8 @@ void show_stack(struct task_struct *tsk, unsigned long *stack) newsp = stack[0]; ip = stack[STACK_FRAME_LR_SAVE]; if (!firstframe || ip != lr) { - printk("["REG"] ["REG"] %pS", sp, ip, (void *)ip); + printk("%s["REG"] ["REG"] %pS", + loglvl, sp, ip, (void *)ip); #ifdef CONFIG_FUNCTION_GRAPH_TRACER ret_addr = ftrace_graph_ret_addr(current, &ftrace_idx, ip, stack); @@ -2119,8 +2121,9 @@ void show_stack(struct task_struct *tsk, unsigned long *stack) struct pt_regs *regs = (struct pt_regs *) (sp + STACK_FRAME_OVERHEAD); lr = regs->link; - printk("--- interrupt: %lx at %pS\n LR = %pS\n", - regs->trap, (void *)regs->nip, (void *)lr); + printk("%s--- interrupt: %lx at %pS\n LR = %pS\n", + loglvl, regs->trap, + (void *)regs->nip, (void *)lr); firstframe = 1; } @@ -2130,6 +2133,11 @@ void show_stack(struct task_struct *tsk, unsigned long *stack) put_task_stack(tsk); } +void show_stack(struct task_struct *tsk, unsigned long *stack) +{ + show_stack_loglvl(tsk, stack, KERN_DEFAULT); +} + #ifdef CONFIG_PPC64 /* Called with hard IRQs off */ void notrace __ppc64_runlatch_on(void) -- GitLab From 0b3d43657489711bf927998fde82b5cc575d9400 Mon Sep 17 00:00:00 2001 From: Dmitry Safonov Date: Mon, 8 Jun 2020 21:31:17 -0700 Subject: [PATCH 1327/1971] riscv: add show_stack_loglvl() Currently, the log-level of show_stack() depends on a platform realization. It creates situations where the headers are printed with lower log level or higher than the stacktrace (depending on a platform or user). Furthermore, it forces the logic decision from user to an architecture side. In result, some users as sysrq/kdb/etc are doing tricks with temporary rising console_loglevel while printing their messages. And in result it not only may print unwanted messages from other CPUs, but also omit printing at all in the unlucky case where the printk() was deferred. Introducing log-level parameter and KERN_UNSUPPRESSED [1] seems an easier approach than introducing more printk buffers. Also, it will consolidate printings with headers. Introduce show_stack_loglvl(), that eventually will substitute show_stack(). [1]: https://lore.kernel.org/lkml/20190528002412.1625-1-dima@arista.com/T/#u Signed-off-by: Dmitry Safonov Signed-off-by: Andrew Morton Cc: Albert Ou Cc: Palmer Dabbelt Cc: Paul Walmsley Link: http://lkml.kernel.org/r/20200418201944.482088-28-dima@arista.com Signed-off-by: Linus Torvalds --- arch/riscv/kernel/stacktrace.c | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) diff --git a/arch/riscv/kernel/stacktrace.c b/arch/riscv/kernel/stacktrace.c index 9f1ac258482fd..aaa64bf007f82 100644 --- a/arch/riscv/kernel/stacktrace.c +++ b/arch/riscv/kernel/stacktrace.c @@ -99,16 +99,23 @@ void notrace walk_stackframe(struct task_struct *task, static bool print_trace_address(unsigned long pc, void *arg) { - print_ip_sym(KERN_DEFAULT, pc); + const char *loglvl = arg; + + print_ip_sym(loglvl, pc); return false; } -void show_stack(struct task_struct *task, unsigned long *sp) +void show_stack_loglvl(struct task_struct *task, unsigned long *sp, + const char *loglvl) { pr_cont("Call Trace:\n"); - walk_stackframe(task, NULL, print_trace_address, NULL); + walk_stackframe(task, NULL, print_trace_address, (void *)loglvl); } +void show_stack(struct task_struct *task, unsigned long *sp) +{ + show_stack_loglvl(task, sp, KERN_DEFAULT); +} static bool save_wchan(unsigned long pc, void *arg) { -- GitLab From 8539c1288ddc23eef0fe71e1bcecc04b3fc6a9ee Mon Sep 17 00:00:00 2001 From: Dmitry Safonov Date: Mon, 8 Jun 2020 21:31:20 -0700 Subject: [PATCH 1328/1971] s390: add show_stack_loglvl() Currently, the log-level of show_stack() depends on a platform realization. It creates situations where the headers are printed with lower log level or higher than the stacktrace (depending on a platform or user). Furthermore, it forces the logic decision from user to an architecture side. In result, some users as sysrq/kdb/etc are doing tricks with temporary rising console_loglevel while printing their messages. And in result it not only may print unwanted messages from other CPUs, but also omit printing at all in the unlucky case where the printk() was deferred. Introducing log-level parameter and KERN_UNSUPPRESSED [1] seems an easier approach than introducing more printk buffers. Also, it will consolidate printings with headers. Introduce show_stack_loglvl(), that eventually will substitute show_stack(). [1]: https://lore.kernel.org/lkml/20190528002412.1625-1-dima@arista.com/T/#u Signed-off-by: Dmitry Safonov Signed-off-by: Andrew Morton Cc: Christian Borntraeger Cc: Heiko Carstens Cc: Vasily Gorbik Link: http://lkml.kernel.org/r/20200418201944.482088-29-dima@arista.com Signed-off-by: Linus Torvalds --- arch/s390/kernel/dumpstack.c | 16 +++++++++++----- 1 file changed, 11 insertions(+), 5 deletions(-) diff --git a/arch/s390/kernel/dumpstack.c b/arch/s390/kernel/dumpstack.c index 2c122d8bab935..887a054919fc1 100644 --- a/arch/s390/kernel/dumpstack.c +++ b/arch/s390/kernel/dumpstack.c @@ -126,18 +126,24 @@ unknown: return -EINVAL; } -void show_stack(struct task_struct *task, unsigned long *stack) +void show_stack_loglvl(struct task_struct *task, unsigned long *stack, + const char *loglvl) { struct unwind_state state; - printk("Call Trace:\n"); + printk("%sCall Trace:\n", loglvl); unwind_for_each_frame(&state, task, NULL, (unsigned long) stack) - printk(state.reliable ? " [<%016lx>] %pSR \n" : - "([<%016lx>] %pSR)\n", - state.ip, (void *) state.ip); + printk(state.reliable ? "%s [<%016lx>] %pSR \n" : + "%s([<%016lx>] %pSR)\n", + loglvl, state.ip, (void *) state.ip); debug_show_held_locks(task ? : current); } +void show_stack(struct task_struct *task, unsigned long *stack) +{ + show_stack_loglvl(task, stack, KERN_DEFAULT); +} + static void show_last_breaking_event(struct pt_regs *regs) { printk("Last Breaking-Event-Address:\n"); -- GitLab From ebf0a36a32b25fe6ba8a1b6bd3135432ebd9aa5c Mon Sep 17 00:00:00 2001 From: Dmitry Safonov Date: Mon, 8 Jun 2020 21:31:24 -0700 Subject: [PATCH 1329/1971] sh: add loglvl to dump_mem() Currently, the log-level of show_stack() depends on a platform realization. It creates situations where the headers are printed with lower log level or higher than the stacktrace (depending on a platform or user). Furthermore, it forces the logic decision from user to an architecture side. In result, some users as sysrq/kdb/etc are doing tricks with temporary rising console_loglevel while printing their messages. And in result it not only may print unwanted messages from other CPUs, but also omit printing at all in the unlucky case where the printk() was deferred. Introducing log-level parameter and KERN_UNSUPPRESSED [1] seems an easier approach than introducing more printk buffers. Also, it will consolidate printings with headers. Add log level argument to dump_mem() as a preparation to introduce show_stack_loglvl(). [1]: https://lore.kernel.org/lkml/20190528002412.1625-1-dima@arista.com/T/#u Signed-off-by: Dmitry Safonov Signed-off-by: Andrew Morton Cc: Rich Felker Cc: Yoshinori Sato Link: http://lkml.kernel.org/r/20200418201944.482088-30-dima@arista.com Signed-off-by: Linus Torvalds --- arch/sh/include/asm/kdebug.h | 3 ++- arch/sh/kernel/dumpstack.c | 17 +++++++++-------- arch/sh/kernel/traps.c | 4 ++-- 3 files changed, 13 insertions(+), 11 deletions(-) diff --git a/arch/sh/include/asm/kdebug.h b/arch/sh/include/asm/kdebug.h index 5212f5fcd7520..de8693fabb1d9 100644 --- a/arch/sh/include/asm/kdebug.h +++ b/arch/sh/include/asm/kdebug.h @@ -13,6 +13,7 @@ enum die_val { /* arch/sh/kernel/dumpstack.c */ extern void printk_address(unsigned long address, int reliable); -extern void dump_mem(const char *str, unsigned long bottom, unsigned long top); +extern void dump_mem(const char *str, const char *loglvl, + unsigned long bottom, unsigned long top); #endif /* __ASM_SH_KDEBUG_H */ diff --git a/arch/sh/kernel/dumpstack.c b/arch/sh/kernel/dumpstack.c index 9f1c9c11d62df..6784b914fba01 100644 --- a/arch/sh/kernel/dumpstack.c +++ b/arch/sh/kernel/dumpstack.c @@ -16,30 +16,31 @@ #include #include -void dump_mem(const char *str, unsigned long bottom, unsigned long top) +void dump_mem(const char *str, const char *loglvl, + unsigned long bottom, unsigned long top) { unsigned long p; int i; - printk("%s(0x%08lx to 0x%08lx)\n", str, bottom, top); + printk("%s%s(0x%08lx to 0x%08lx)\n", loglvl, str, bottom, top); for (p = bottom & ~31; p < top; ) { - printk("%04lx: ", p & 0xffff); + printk("%s%04lx: ", loglvl, p & 0xffff); for (i = 0; i < 8; i++, p += 4) { unsigned int val; if (p < bottom || p >= top) - printk(" "); + printk("%s ", loglvl); else { if (__get_user(val, (unsigned int __user *)p)) { - printk("\n"); + printk("%s\n", loglvl); return; } - printk("%08x ", val); + printk("%s%08x ", loglvl, val); } } - printk("\n"); + printk("%s\n", loglvl); } } @@ -156,7 +157,7 @@ void show_stack(struct task_struct *tsk, unsigned long *sp) sp = (unsigned long *)tsk->thread.sp; stack = (unsigned long)sp; - dump_mem("Stack: ", stack, THREAD_SIZE + + dump_mem("Stack: ", KERN_DEFAULT, stack, THREAD_SIZE + (unsigned long)task_stack_page(tsk)); show_trace(tsk, sp, NULL); } diff --git a/arch/sh/kernel/traps.c b/arch/sh/kernel/traps.c index 2130381c9d574..a33025451fcd0 100644 --- a/arch/sh/kernel/traps.c +++ b/arch/sh/kernel/traps.c @@ -38,8 +38,8 @@ void die(const char *str, struct pt_regs *regs, long err) task_pid_nr(current), task_stack_page(current) + 1); if (!user_mode(regs) || in_interrupt()) - dump_mem("Stack: ", regs->regs[15], THREAD_SIZE + - (unsigned long)task_stack_page(current)); + dump_mem("Stack: ", KERN_DEFAULT, regs->regs[15], + THREAD_SIZE + (unsigned long)task_stack_page(current)); notify_die(DIE_OOPS, str, regs, err, 255, SIGSEGV); -- GitLab From 8b92f34877225c8eb85e3ab7f1177fc248ba26d0 Mon Sep 17 00:00:00 2001 From: Dmitry Safonov Date: Mon, 8 Jun 2020 21:31:27 -0700 Subject: [PATCH 1330/1971] sh: remove needless printk() Currently `data' is always an empty line "". No need for additional printk() call. Signed-off-by: Dmitry Safonov Signed-off-by: Andrew Morton Cc: Rich Felker Cc: Yoshinori Sato Link: http://lkml.kernel.org/r/20200418201944.482088-31-dima@arista.com Signed-off-by: Linus Torvalds --- arch/sh/kernel/dumpstack.c | 1 - 1 file changed, 1 deletion(-) diff --git a/arch/sh/kernel/dumpstack.c b/arch/sh/kernel/dumpstack.c index 6784b914fba01..2c1a78e5776b8 100644 --- a/arch/sh/kernel/dumpstack.c +++ b/arch/sh/kernel/dumpstack.c @@ -118,7 +118,6 @@ static int print_trace_stack(void *data, char *name) */ static void print_trace_address(void *data, unsigned long addr, int reliable) { - printk("%s", (char *)data); printk_address(addr, reliable); } -- GitLab From 2deebe4d56d638269a4a728086d64de5734b460a Mon Sep 17 00:00:00 2001 From: Dmitry Safonov Date: Mon, 8 Jun 2020 21:31:30 -0700 Subject: [PATCH 1331/1971] sh: add loglvl to printk_address() Currently, the log-level of show_stack() depends on a platform realization. It creates situations where the headers are printed with lower log level or higher than the stacktrace (depending on a platform or user). Furthermore, it forces the logic decision from user to an architecture side. In result, some users as sysrq/kdb/etc are doing tricks with temporary rising console_loglevel while printing their messages. And in result it not only may print unwanted messages from other CPUs, but also omit printing at all in the unlucky case where the printk() was deferred. Introducing log-level parameter and KERN_UNSUPPRESSED [1] seems an easier approach than introducing more printk buffers. Also, it will consolidate printings with headers. Add log level argument to printk_address() as a preparation to introduce show_stack_loglvl(). As a good side-effect show_fault_oops() now prints the address with KERN_EMREG as the rest of output, making sure there won't be situation where "PC: " is printed without actual address. [1]: https://lore.kernel.org/lkml/20190528002412.1625-1-dima@arista.com/T/#u Signed-off-by: Dmitry Safonov Signed-off-by: Andrew Morton Cc: Rich Felker Link: http://lkml.kernel.org/r/20200418201944.482088-32-dima@arista.com Signed-off-by: Linus Torvalds --- arch/sh/include/asm/kdebug.h | 3 ++- arch/sh/kernel/dumpstack.c | 6 +++--- arch/sh/mm/fault.c | 2 +- 3 files changed, 6 insertions(+), 5 deletions(-) diff --git a/arch/sh/include/asm/kdebug.h b/arch/sh/include/asm/kdebug.h index de8693fabb1d9..960545306afa4 100644 --- a/arch/sh/include/asm/kdebug.h +++ b/arch/sh/include/asm/kdebug.h @@ -12,7 +12,8 @@ enum die_val { }; /* arch/sh/kernel/dumpstack.c */ -extern void printk_address(unsigned long address, int reliable); +extern void printk_address(unsigned long address, int reliable, + const char *loglvl); extern void dump_mem(const char *str, const char *loglvl, unsigned long bottom, unsigned long top); diff --git a/arch/sh/kernel/dumpstack.c b/arch/sh/kernel/dumpstack.c index 2c1a78e5776b8..959064b900555 100644 --- a/arch/sh/kernel/dumpstack.c +++ b/arch/sh/kernel/dumpstack.c @@ -44,9 +44,9 @@ void dump_mem(const char *str, const char *loglvl, } } -void printk_address(unsigned long address, int reliable) +void printk_address(unsigned long address, int reliable, const char *loglvl) { - printk(" [<%p>] %s%pS\n", (void *) address, + printk("%s [<%p>] %s%pS\n", loglvl, (void *) address, reliable ? "" : "? ", (void *) address); } @@ -118,7 +118,7 @@ static int print_trace_stack(void *data, char *name) */ static void print_trace_address(void *data, unsigned long addr, int reliable) { - printk_address(addr, reliable); + printk_address(addr, reliable, (char *)data); } static const struct stacktrace_ops print_trace_ops = { diff --git a/arch/sh/mm/fault.c b/arch/sh/mm/fault.c index 7260a1a7fdca5..67d0e739897ce 100644 --- a/arch/sh/mm/fault.c +++ b/arch/sh/mm/fault.c @@ -214,7 +214,7 @@ show_fault_oops(struct pt_regs *regs, unsigned long address) : "paging request", address); pr_alert("PC:"); - printk_address(regs->pc, 1); + printk_address(regs->pc, 1, KERN_ALERT); show_pte(NULL, address); } -- GitLab From 539e786cc37ee5cb6e051ef5eb72b7a7c03022cf Mon Sep 17 00:00:00 2001 From: Dmitry Safonov Date: Mon, 8 Jun 2020 21:31:33 -0700 Subject: [PATCH 1332/1971] sh: add loglvl to show_trace() Currently, the log-level of show_stack() depends on a platform realization. It creates situations where the headers are printed with lower log level or higher than the stacktrace (depending on a platform or user). Furthermore, it forces the logic decision from user to an architecture side. In result, some users as sysrq/kdb/etc are doing tricks with temporary rising console_loglevel while printing their messages. And in result it not only may print unwanted messages from other CPUs, but also omit printing at all in the unlucky case where the printk() was deferred. Introducing log-level parameter and KERN_UNSUPPRESSED [1] seems an easier approach than introducing more printk buffers. Also, it will consolidate printings with headers. Add log level parameter to show_trace() as a preparation to introduce show_stack_loglvl(). [1]: https://lore.kernel.org/lkml/20190528002412.1625-1-dima@arista.com/T/#u Signed-off-by: Dmitry Safonov Signed-off-by: Andrew Morton Cc: Rich Felker Link: http://lkml.kernel.org/r/20200418201944.482088-33-dima@arista.com Signed-off-by: Linus Torvalds --- arch/sh/include/asm/processor_32.h | 2 +- arch/sh/kernel/dumpstack.c | 10 +++++----- arch/sh/kernel/process_32.c | 2 +- 3 files changed, 7 insertions(+), 7 deletions(-) diff --git a/arch/sh/include/asm/processor_32.h b/arch/sh/include/asm/processor_32.h index 0e0ecc0132e3b..d44409413418a 100644 --- a/arch/sh/include/asm/processor_32.h +++ b/arch/sh/include/asm/processor_32.h @@ -171,7 +171,7 @@ static __inline__ void enable_fpu(void) #define thread_saved_pc(tsk) (tsk->thread.pc) void show_trace(struct task_struct *tsk, unsigned long *sp, - struct pt_regs *regs); + struct pt_regs *regs, const char *loglvl); #ifdef CONFIG_DUMP_CODE void show_code(struct pt_regs *regs); diff --git a/arch/sh/kernel/dumpstack.c b/arch/sh/kernel/dumpstack.c index 959064b900555..d488a47a1f0fc 100644 --- a/arch/sh/kernel/dumpstack.c +++ b/arch/sh/kernel/dumpstack.c @@ -127,16 +127,16 @@ static const struct stacktrace_ops print_trace_ops = { }; void show_trace(struct task_struct *tsk, unsigned long *sp, - struct pt_regs *regs) + struct pt_regs *regs, const char *loglvl) { if (regs && user_mode(regs)) return; - printk("\nCall trace:\n"); + printk("%s\nCall trace:\n", loglvl); - unwind_stack(tsk, regs, sp, &print_trace_ops, ""); + unwind_stack(tsk, regs, sp, &print_trace_ops, (void *)loglvl); - printk("\n"); + printk("%s\n", loglvl); if (!tsk) tsk = current; @@ -158,5 +158,5 @@ void show_stack(struct task_struct *tsk, unsigned long *sp) stack = (unsigned long)sp; dump_mem("Stack: ", KERN_DEFAULT, stack, THREAD_SIZE + (unsigned long)task_stack_page(tsk)); - show_trace(tsk, sp, NULL); + show_trace(tsk, sp, NULL, KERN_DEFAULT); } diff --git a/arch/sh/kernel/process_32.c b/arch/sh/kernel/process_32.c index a094633874c37..456cc8d171f72 100644 --- a/arch/sh/kernel/process_32.c +++ b/arch/sh/kernel/process_32.c @@ -59,7 +59,7 @@ void show_regs(struct pt_regs * regs) printk("MACH: %08lx MACL: %08lx GBR : %08lx PR : %08lx\n", regs->mach, regs->macl, regs->gbr, regs->pr); - show_trace(NULL, (unsigned long *)regs->regs[15], regs); + show_trace(NULL, (unsigned long *)regs->regs[15], regs, KERN_DEFAULT); show_code(regs); } -- GitLab From e6e371c4f653db7581072a0a20b32a11a751ebda Mon Sep 17 00:00:00 2001 From: Dmitry Safonov Date: Mon, 8 Jun 2020 21:31:36 -0700 Subject: [PATCH 1333/1971] sh: add show_stack_loglvl() Currently, the log-level of show_stack() depends on a platform realization. It creates situations where the headers are printed with lower log level or higher than the stacktrace (depending on a platform or user). Furthermore, it forces the logic decision from user to an architecture side. In result, some users as sysrq/kdb/etc are doing tricks with temporary rising console_loglevel while printing their messages. And in result it not only may print unwanted messages from other CPUs, but also omit printing at all in the unlucky case where the printk() was deferred. Introducing log-level parameter and KERN_UNSUPPRESSED [1] seems an easier approach than introducing more printk buffers. Also, it will consolidate printings with headers. Introduce show_stack_loglvl(), that eventually will substitute show_stack(). [1]: https://lore.kernel.org/lkml/20190528002412.1625-1-dima@arista.com/T/#u Signed-off-by: Dmitry Safonov Signed-off-by: Andrew Morton Cc: Rich Felker Cc: Yoshinori Sato Link: http://lkml.kernel.org/r/20200418201944.482088-34-dima@arista.com Signed-off-by: Linus Torvalds --- arch/sh/kernel/dumpstack.c | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/arch/sh/kernel/dumpstack.c b/arch/sh/kernel/dumpstack.c index d488a47a1f0fc..cc51e9d746674 100644 --- a/arch/sh/kernel/dumpstack.c +++ b/arch/sh/kernel/dumpstack.c @@ -144,7 +144,8 @@ void show_trace(struct task_struct *tsk, unsigned long *sp, debug_show_held_locks(tsk); } -void show_stack(struct task_struct *tsk, unsigned long *sp) +void show_stack_loglvl(struct task_struct *tsk, unsigned long *sp, + const char *loglvl) { unsigned long stack; @@ -156,7 +157,12 @@ void show_stack(struct task_struct *tsk, unsigned long *sp) sp = (unsigned long *)tsk->thread.sp; stack = (unsigned long)sp; - dump_mem("Stack: ", KERN_DEFAULT, stack, THREAD_SIZE + + dump_mem("Stack: ", loglvl, stack, THREAD_SIZE + (unsigned long)task_stack_page(tsk)); - show_trace(tsk, sp, NULL, KERN_DEFAULT); + show_trace(tsk, sp, NULL, loglvl); +} + +void show_stack(struct task_struct *task, unsigned long *sp) +{ + show_stack_loglvl(task, sp, KERN_DEFAULT); } -- GitLab From 13c6371ae576840bcacc0fc8407d7b11572eb7fb Mon Sep 17 00:00:00 2001 From: Dmitry Safonov Date: Mon, 8 Jun 2020 21:31:39 -0700 Subject: [PATCH 1334/1971] sparc: add show_stack_loglvl() Currently, the log-level of show_stack() depends on a platform realization. It creates situations where the headers are printed with lower log level or higher than the stacktrace (depending on a platform or user). Furthermore, it forces the logic decision from user to an architecture side. In result, some users as sysrq/kdb/etc are doing tricks with temporary rising console_loglevel while printing their messages. And in result it not only may print unwanted messages from other CPUs, but also omit printing at all in the unlucky case where the printk() was deferred. Introducing log-level parameter and KERN_UNSUPPRESSED [1] seems an easier approach than introducing more printk buffers. Also, it will consolidate printings with headers. Introduce show_stack_loglvl(), that eventually will substitute show_stack(). [1]: https://lore.kernel.org/lkml/20190528002412.1625-1-dima@arista.com/T/#u Signed-off-by: Dmitry Safonov Signed-off-by: Andrew Morton Acked-by: David S. Miller Link: http://lkml.kernel.org/r/20200418201944.482088-35-dima@arista.com Signed-off-by: Linus Torvalds --- arch/sparc/kernel/process_32.c | 17 ++++++++++++----- arch/sparc/kernel/traps_64.c | 15 +++++++++++---- 2 files changed, 23 insertions(+), 9 deletions(-) diff --git a/arch/sparc/kernel/process_32.c b/arch/sparc/kernel/process_32.c index 26cca65e92465..0b07de5618e50 100644 --- a/arch/sparc/kernel/process_32.c +++ b/arch/sparc/kernel/process_32.c @@ -145,10 +145,12 @@ void show_regs(struct pt_regs *r) } /* - * The show_stack is an external API which we do not use ourselves. + * The show_stack(), show_stack_loglvl() are external APIs which + * we do not use ourselves. * The oops is printed in die_if_kernel. */ -void show_stack(struct task_struct *tsk, unsigned long *_ksp) +void show_stack_loglvl(struct task_struct *tsk, unsigned long *_ksp, + const char *loglvl) { unsigned long pc, fp; unsigned long task_base; @@ -170,11 +172,16 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp) break; rw = (struct reg_window32 *) fp; pc = rw->ins[7]; - printk("[%08lx : ", pc); - printk("%pS ] ", (void *) pc); + printk("%s[%08lx : ", loglvl, pc); + printk("%s%pS ] ", loglvl, (void *) pc); fp = rw->ins[6]; } while (++count < 16); - printk("\n"); + printk("%s\n", loglvl); +} + +void show_stack(struct task_struct *task, unsigned long *sp) +{ + show_stack_loglvl(task, sp, KERN_DEFAULT); } /* diff --git a/arch/sparc/kernel/traps_64.c b/arch/sparc/kernel/traps_64.c index 27778b65a965e..8715bc93bd9d8 100644 --- a/arch/sparc/kernel/traps_64.c +++ b/arch/sparc/kernel/traps_64.c @@ -18,6 +18,7 @@ #include #include #include +#include #include #include #include @@ -2452,7 +2453,8 @@ static void user_instruction_dump(unsigned int __user *pc) printk("\n"); } -void show_stack(struct task_struct *tsk, unsigned long *_ksp) +void show_stack_loglvl(struct task_struct *tsk, unsigned long *_ksp, + const char *loglvl) { unsigned long fp, ksp; struct thread_info *tp; @@ -2476,7 +2478,7 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp) fp = ksp + STACK_BIAS; - printk("Call Trace:\n"); + printk("%sCall Trace:\n", loglvl); do { struct sparc_stackf *sf; struct pt_regs *regs; @@ -2497,14 +2499,14 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp) fp = (unsigned long)sf->fp + STACK_BIAS; } - printk(" [%016lx] %pS\n", pc, (void *) pc); + print_ip_sym(loglvl, pc); #ifdef CONFIG_FUNCTION_GRAPH_TRACER if ((pc + 8UL) == (unsigned long) &return_to_handler) { struct ftrace_ret_stack *ret_stack; ret_stack = ftrace_graph_get_ret_stack(tsk, graph); if (ret_stack) { pc = ret_stack->ret; - printk(" [%016lx] %pS\n", pc, (void *) pc); + print_ip_sym(loglvl, pc); graph++; } } @@ -2512,6 +2514,11 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp) } while (++count < 16); } +void show_stack(struct task_struct *tsk, unsigned long *_ksp) +{ + show_stack_loglvl(task, sp, KERN_DEFAULT); +} + static inline struct reg_window *kernel_stack_up(struct reg_window *rw) { unsigned long fp = rw->ins[6]; -- GitLab From 3dd923f39a03dede001afe0edcc08613d5f403e5 Mon Sep 17 00:00:00 2001 From: Dmitry Safonov Date: Mon, 8 Jun 2020 21:31:42 -0700 Subject: [PATCH 1335/1971] um/sysrq: remove needless variable sp `sp' is a needless excercise here. Signed-off-by: Dmitry Safonov Signed-off-by: Andrew Morton Cc: Anton Ivanov Cc: Jeff Dike Cc: Richard Weinberger Link: http://lkml.kernel.org/r/20200418201944.482088-36-dima@arista.com Signed-off-by: Linus Torvalds --- arch/um/kernel/sysrq.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/arch/um/kernel/sysrq.c b/arch/um/kernel/sysrq.c index c71b5ef7ea8c3..c831a1c2eb94a 100644 --- a/arch/um/kernel/sysrq.c +++ b/arch/um/kernel/sysrq.c @@ -27,7 +27,6 @@ static const struct stacktrace_ops stackops = { void show_stack(struct task_struct *task, unsigned long *stack) { - unsigned long *sp = stack; struct pt_regs *segv_regs = current->thread.segv_regs; int i; @@ -38,10 +37,9 @@ void show_stack(struct task_struct *task, unsigned long *stack) } if (!stack) - sp = get_stack_pointer(task, segv_regs); + stack = get_stack_pointer(task, segv_regs); pr_info("Stack:\n"); - stack = sp; for (i = 0; i < 3 * STACKSLOTS_PER_LINE; i++) { if (kstack_end(stack)) break; -- GitLab From 1ad87824f4cf16a7f381e1f94943a96bb7a99062 Mon Sep 17 00:00:00 2001 From: Dmitry Safonov Date: Mon, 8 Jun 2020 21:31:45 -0700 Subject: [PATCH 1336/1971] um: add show_stack_loglvl() Currently, the log-level of show_stack() depends on a platform realization. It creates situations where the headers are printed with lower log level or higher than the stacktrace (depending on a platform or user). Furthermore, it forces the logic decision from user to an architecture side. In result, some users as sysrq/kdb/etc are doing tricks with temporary rising console_loglevel while printing their messages. And in result it not only may print unwanted messages from other CPUs, but also omit printing at all in the unlucky case where the printk() was deferred. Introducing log-level parameter and KERN_UNSUPPRESSED [1] seems an easier approach than introducing more printk buffers. Also, it will consolidate printings with headers. Introduce show_stack_loglvl(), that eventually will substitute show_stack(). [1]: https://lore.kernel.org/lkml/20190528002412.1625-1-dima@arista.com/T/#u Signed-off-by: Dmitry Safonov Signed-off-by: Andrew Morton Cc: Anton Ivanov Cc: Jeff Dike Cc: Richard Weinberger Link: http://lkml.kernel.org/r/20200418201944.482088-37-dima@arista.com Signed-off-by: Linus Torvalds --- arch/um/kernel/sysrq.c | 24 ++++++++++++++++-------- 1 file changed, 16 insertions(+), 8 deletions(-) diff --git a/arch/um/kernel/sysrq.c b/arch/um/kernel/sysrq.c index c831a1c2eb94a..1b54b6431499b 100644 --- a/arch/um/kernel/sysrq.c +++ b/arch/um/kernel/sysrq.c @@ -17,7 +17,9 @@ static void _print_addr(void *data, unsigned long address, int reliable) { - pr_info(" [<%08lx>] %s%pS\n", address, reliable ? "" : "? ", + const char *loglvl = data; + + printk("%s [<%08lx>] %s%pS\n", loglvl, address, reliable ? "" : "? ", (void *)address); } @@ -25,7 +27,8 @@ static const struct stacktrace_ops stackops = { .address = _print_addr }; -void show_stack(struct task_struct *task, unsigned long *stack) +void show_stack_loglvl(struct task_struct *task, unsigned long *stack, + const char *loglvl) { struct pt_regs *segv_regs = current->thread.segv_regs; int i; @@ -39,17 +42,22 @@ void show_stack(struct task_struct *task, unsigned long *stack) if (!stack) stack = get_stack_pointer(task, segv_regs); - pr_info("Stack:\n"); + printk("%sStack:\n", loglvl); for (i = 0; i < 3 * STACKSLOTS_PER_LINE; i++) { if (kstack_end(stack)) break; if (i && ((i % STACKSLOTS_PER_LINE) == 0)) - pr_cont("\n"); + printk("%s\n", loglvl); pr_cont(" %08lx", *stack++); } - pr_cont("\n"); + printk("%s\n", loglvl); + + printk("%sCall Trace:\n", loglvl); + dump_trace(current, &stackops, (void *)loglvl); + printk("%s\n", loglvl); +} - pr_info("Call Trace:\n"); - dump_trace(current, &stackops, NULL); - pr_info("\n"); +void show_stack(struct task_struct *task, unsigned long *stack) +{ + show_stack_loglvl(task, stack, KERN_INFO); } -- GitLab From ee1e99009e914e4150774f5a717c179545c7699e Mon Sep 17 00:00:00 2001 From: Dmitry Safonov Date: Mon, 8 Jun 2020 21:31:48 -0700 Subject: [PATCH 1337/1971] unicore32: remove unused pmode argument in c_backtrace() The pmode parameter isn't used in assembly - remove it. Second argument will be reused for printk() log level. Signed-off-by: Dmitry Safonov Signed-off-by: Andrew Morton Cc: Guan Xuetao Link: http://lkml.kernel.org/r/20200418201944.482088-38-dima@arista.com Signed-off-by: Linus Torvalds --- arch/unicore32/kernel/setup.h | 2 +- arch/unicore32/kernel/traps.c | 14 +++++--------- 2 files changed, 6 insertions(+), 10 deletions(-) diff --git a/arch/unicore32/kernel/setup.h b/arch/unicore32/kernel/setup.h index e40d3603c7e78..03e70e37f4726 100644 --- a/arch/unicore32/kernel/setup.h +++ b/arch/unicore32/kernel/setup.h @@ -29,7 +29,7 @@ extern void kernel_thread_helper(void); extern void __init early_signal_init(void); extern asmlinkage void __backtrace(void); -extern asmlinkage void c_backtrace(unsigned long fp, int pmode); +extern asmlinkage void c_backtrace(unsigned long fp); extern void __show_regs(struct pt_regs *); diff --git a/arch/unicore32/kernel/traps.c b/arch/unicore32/kernel/traps.c index e24f67283864c..3682a4c5d9274 100644 --- a/arch/unicore32/kernel/traps.c +++ b/arch/unicore32/kernel/traps.c @@ -137,7 +137,7 @@ static void dump_instr(const char *lvl, struct pt_regs *regs) static void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk) { - unsigned int fp, mode; + unsigned int fp; int ok = 1; printk(KERN_DEFAULT "Backtrace: "); @@ -145,16 +145,12 @@ static void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk) if (!tsk) tsk = current; - if (regs) { + if (regs) fp = regs->UCreg_fp; - mode = processor_mode(regs); - } else if (tsk != current) { + else if (tsk != current) fp = thread_saved_fp(tsk); - mode = 0x10; - } else { + else asm("mov %0, fp" : "=r" (fp) : : "cc"); - mode = 0x10; - } if (!fp) { printk("no frame pointer"); @@ -167,7 +163,7 @@ static void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk) printk("\n"); if (ok) - c_backtrace(fp, mode); + c_backtrace(fp); } void show_stack(struct task_struct *tsk, unsigned long *sp) -- GitLab From de985dd50158fedde5f2916c1bcf949a0acf5cd0 Mon Sep 17 00:00:00 2001 From: Dmitry Safonov Date: Mon, 8 Jun 2020 21:31:51 -0700 Subject: [PATCH 1338/1971] unicore32: add loglvl to c_backtrace() Currently, the log-level of show_stack() depends on a platform realization. It creates situations where the headers are printed with lower log level or higher than the stacktrace (depending on a platform or user). Furthermore, it forces the logic decision from user to an architecture side. In result, some users as sysrq/kdb/etc are doing tricks with temporary rising console_loglevel while printing their messages. And in result it not only may print unwanted messages from other CPUs, but also omit printing at all in the unlucky case where the printk() was deferred. Introducing log-level parameter and KERN_UNSUPPRESSED [1] seems an easier approach than introducing more printk buffers. Also, it will consolidate printings with headers. Add log level parameter to c_backtrace() as a preparation for introducing show_stack_loglvl() [1]: https://lore.kernel.org/lkml/20190528002412.1625-1-dima@arista.com/T/#u Signed-off-by: Dmitry Safonov Signed-off-by: Andrew Morton Cc: Guan Xuetao Link: http://lkml.kernel.org/r/20200418201944.482088-39-dima@arista.com Signed-off-by: Linus Torvalds --- arch/unicore32/kernel/setup.h | 2 +- arch/unicore32/kernel/traps.c | 2 +- arch/unicore32/lib/backtrace.S | 24 ++++++++++++++++-------- 3 files changed, 18 insertions(+), 10 deletions(-) diff --git a/arch/unicore32/kernel/setup.h b/arch/unicore32/kernel/setup.h index 03e70e37f4726..9673523231853 100644 --- a/arch/unicore32/kernel/setup.h +++ b/arch/unicore32/kernel/setup.h @@ -29,7 +29,7 @@ extern void kernel_thread_helper(void); extern void __init early_signal_init(void); extern asmlinkage void __backtrace(void); -extern asmlinkage void c_backtrace(unsigned long fp); +extern asmlinkage void c_backtrace(unsigned long fp, const char *loglvl); extern void __show_regs(struct pt_regs *); diff --git a/arch/unicore32/kernel/traps.c b/arch/unicore32/kernel/traps.c index 3682a4c5d9274..2b7d734568659 100644 --- a/arch/unicore32/kernel/traps.c +++ b/arch/unicore32/kernel/traps.c @@ -163,7 +163,7 @@ static void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk) printk("\n"); if (ok) - c_backtrace(fp); + c_backtrace(fp, KERN_DEFAULT); } void show_stack(struct task_struct *tsk, unsigned long *sp) diff --git a/arch/unicore32/lib/backtrace.S b/arch/unicore32/lib/backtrace.S index f303671e2a4e9..6221944b81f37 100644 --- a/arch/unicore32/lib/backtrace.S +++ b/arch/unicore32/lib/backtrace.S @@ -16,6 +16,7 @@ #define sv_fp v5 #define sv_pc v6 #define offset v8 +#define loglvl v9 ENTRY(__backtrace) mov r0, fp @@ -27,10 +28,11 @@ ENTRY(c_backtrace) ENDPROC(__backtrace) ENDPROC(c_backtrace) #else - stm.w (v4 - v8, lr), [sp-] @ Save an extra register + stm.w (v4 - v10, lr), [sp-] @ Save an extra register @ so we have a location... mov.a frame, r0 @ if frame pointer is zero beq no_frame @ we have no stack frames + mov loglvl, r1 1: stm.w (pc), [sp-] @ calculate offset of PC stored ldw.w r0, [sp]+, #4 @ by stmfd for this CPU @@ -95,9 +97,10 @@ for_each_frame: bua for_each_frame 1006: adr r0, .Lbad - mov r1, frame + mov r1, loglvl + mov r2, frame b.l printk -no_frame: ldm.w (v4 - v8, pc), [sp]+ +no_frame: ldm.w (v4 - v10, pc), [sp]+ ENDPROC(__backtrace) ENDPROC(c_backtrace) @@ -128,8 +131,11 @@ ENDPROC(c_backtrace) add v7, v7, #1 cxor.a v7, #6 cmoveq v7, #1 - cmoveq r1, #'\n' - cmovne r1, #' ' + bne 201f + adr r0, .Lcr + mov r1, loglvl + b.l printk +201: ldw.w r3, [stack]+, #-4 mov r2, reg csub.a r2, #8 @@ -141,18 +147,20 @@ ENDPROC(c_backtrace) add r2, r2, #0x10 @ so r2 need add 16 201: adr r0, .Lfp + mov r1, loglvl b.l printk 2: sub.a reg, reg, #1 bns 1b cxor.a v7, #0 beq 201f adr r0, .Lcr + mov r1, loglvl b.l printk 201: ldm.w (instr, reg, stack, v7, pc), [sp]+ -.Lfp: .asciz "%cr%d:%08x" -.Lcr: .asciz "\n" -.Lbad: .asciz "Backtrace aborted due to bad frame pointer <%p>\n" +.Lfp: .asciz "%sr%d:%08x " +.Lcr: .asciz "%s\n" +.Lbad: .asciz "%sBacktrace aborted due to bad frame pointer <%p>\n" .align .Ldsi: .word 0x92eec000 >> 14 @ stm.w sp, (... fp, ip, lr, pc) .word 0x92e10000 >> 14 @ stm.w sp, () -- GitLab From 5c0884694f7fd1efc846f720f10d190bf708e461 Mon Sep 17 00:00:00 2001 From: Dmitry Safonov Date: Mon, 8 Jun 2020 21:31:54 -0700 Subject: [PATCH 1339/1971] unicore32: add show_stack_loglvl() Currently, the log-level of show_stack() depends on a platform realization. It creates situations where the headers are printed with lower log level or higher than the stacktrace (depending on a platform or user). Furthermore, it forces the logic decision from user to an architecture side. In result, some users as sysrq/kdb/etc are doing tricks with temporary rising console_loglevel while printing their messages. And in result it not only may print unwanted messages from other CPUs, but also omit printing at all in the unlucky case where the printk() was deferred. Introducing log-level parameter and KERN_UNSUPPRESSED [1] seems an easier approach than introducing more printk buffers. Also, it will consolidate printings with headers. Introduce show_stack_loglvl(), that eventually will substitute show_stack(). As a nice side-effect - print backtrace in __die() with the same log level as the rest of function. [1]: https://lore.kernel.org/lkml/20190528002412.1625-1-dima@arista.com/T/#u Signed-off-by: Dmitry Safonov Signed-off-by: Andrew Morton Cc: Guan Xuetao Link: http://lkml.kernel.org/r/20200418201944.482088-40-dima@arista.com Signed-off-by: Linus Torvalds --- arch/unicore32/kernel/traps.c | 27 +++++++++++++++++---------- 1 file changed, 17 insertions(+), 10 deletions(-) diff --git a/arch/unicore32/kernel/traps.c b/arch/unicore32/kernel/traps.c index 2b7d734568659..8b1335997f502 100644 --- a/arch/unicore32/kernel/traps.c +++ b/arch/unicore32/kernel/traps.c @@ -135,12 +135,13 @@ static void dump_instr(const char *lvl, struct pt_regs *regs) set_fs(fs); } -static void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk) +static void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk, + const char *loglvl) { unsigned int fp; int ok = 1; - printk(KERN_DEFAULT "Backtrace: "); + printk("%sBacktrace: ", loglvl); if (!tsk) tsk = current; @@ -153,25 +154,31 @@ static void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk) asm("mov %0, fp" : "=r" (fp) : : "cc"); if (!fp) { - printk("no frame pointer"); + printk("%sno frame pointer", loglvl); ok = 0; } else if (verify_stack(fp)) { - printk("invalid frame pointer 0x%08x", fp); + printk("%sinvalid frame pointer 0x%08x", loglvl, fp); ok = 0; } else if (fp < (unsigned long)end_of_stack(tsk)) - printk("frame pointer underflow"); - printk("\n"); + printk("%sframe pointer underflow", loglvl); + printk("%s\n", loglvl); if (ok) - c_backtrace(fp, KERN_DEFAULT); + c_backtrace(fp, loglvl); } -void show_stack(struct task_struct *tsk, unsigned long *sp) +void show_stack_loglvl(struct task_struct *tsk, unsigned long *sp, + const char *loglvl) { - dump_backtrace(NULL, tsk); + dump_backtrace(NULL, tsk, loglvl); barrier(); } +void show_stack(struct task_struct *tsk, unsigned long *sp) +{ + show_stack_loglvl(tsk, sp, KERN_DEFAULT) +} + static int __die(const char *str, int err, struct thread_info *thread, struct pt_regs *regs) { @@ -196,7 +203,7 @@ static int __die(const char *str, int err, struct thread_info *thread, if (!user_mode(regs) || in_interrupt()) { dump_mem(KERN_EMERG, "Stack: ", regs->UCreg_sp, THREAD_SIZE + (unsigned long)task_stack_page(tsk)); - dump_backtrace(regs, tsk); + dump_backtrace(regs, tsk, KERN_EMERG); dump_instr(KERN_EMERG, regs); } -- GitLab From d46b3df78ad4b4c178f1035a35463cbc0ce768b2 Mon Sep 17 00:00:00 2001 From: Dmitry Safonov Date: Mon, 8 Jun 2020 21:31:57 -0700 Subject: [PATCH 1340/1971] x86: add missing const qualifiers for log_lvl Currently, the log-level of show_stack() depends on a platform realization. It creates situations where the headers are printed with lower log level or higher than the stacktrace (depending on a platform or user). Furthermore, it forces the logic decision from user to an architecture side. In result, some users as sysrq/kdb/etc are doing tricks with temporary rising console_loglevel while printing their messages. And in result it not only may print unwanted messages from other CPUs, but also omit printing at all in the unlucky case where the printk() was deferred. Introducing log-level parameter and KERN_UNSUPPRESSED [1] seems an easier approach than introducing more printk buffers. Also, it will consolidate printings with headers. Keep log_lvl const show_trace_log_lvl() and printk_stack_address() as the new generic show_stack_loglvl() wants to have a proper const qualifier. And gcc rightfully produces warnings in case it's not keept: arch/x86/kernel/dumpstack.c: In function `show_stack': arch/x86/kernel/dumpstack.c:294:37: warning: passing argument 4 of `show_trace_log_lv ' discards `const' qualifier from pointer target type [-Wdiscarded-qualifiers] 294 | show_trace_log_lvl(task, NULL, sp, loglvl); | ^~~~~~ arch/x86/kernel/dumpstack.c:163:32: note: expected `char *' but argument is of type `const char *' 163 | unsigned long *stack, char *log_lvl) | ~~~~~~^~~~~~~ [1]: https://lore.kernel.org/lkml/20190528002412.1625-1-dima@arista.com/T/#u Signed-off-by: Dmitry Safonov Signed-off-by: Andrew Morton Cc: Borislav Petkov Cc: "H. Peter Anvin" Cc: Ingo Molnar Cc: Thomas Gleixner Link: http://lkml.kernel.org/r/20200418201944.482088-41-dima@arista.com Signed-off-by: Linus Torvalds --- arch/x86/include/asm/stacktrace.h | 2 +- arch/x86/kernel/dumpstack.c | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/arch/x86/include/asm/stacktrace.h b/arch/x86/include/asm/stacktrace.h index 14db05086bbf2..5ae5a68e469d3 100644 --- a/arch/x86/include/asm/stacktrace.h +++ b/arch/x86/include/asm/stacktrace.h @@ -87,7 +87,7 @@ get_stack_pointer(struct task_struct *task, struct pt_regs *regs) } void show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs, - unsigned long *stack, char *log_lvl); + unsigned long *stack, const char *log_lvl); /* The form of the top of the frame on the stack */ struct stack_frame { diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c index ae64ec7f752f4..b94bc31a1757b 100644 --- a/arch/x86/kernel/dumpstack.c +++ b/arch/x86/kernel/dumpstack.c @@ -65,7 +65,7 @@ bool in_entry_stack(unsigned long *stack, struct stack_info *info) } static void printk_stack_address(unsigned long address, int reliable, - char *log_lvl) + const char *log_lvl) { touch_nmi_watchdog(); printk("%s %s%pB\n", log_lvl, reliable ? "" : "? ", (void *)address); @@ -160,7 +160,7 @@ static void show_regs_if_on_stack(struct stack_info *info, struct pt_regs *regs, } void show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs, - unsigned long *stack, char *log_lvl) + unsigned long *stack, const char *log_lvl) { struct unwind_state state; struct stack_info stack_info = {0}; -- GitLab From a832ff02244e36da0bf4bb3a1aec0ce9a23b0bad Mon Sep 17 00:00:00 2001 From: Dmitry Safonov Date: Mon, 8 Jun 2020 21:32:00 -0700 Subject: [PATCH 1341/1971] x86: add show_stack_loglvl() Currently, the log-level of show_stack() depends on a platform realization. It creates situations where the headers are printed with lower log level or higher than the stacktrace (depending on a platform or user). Furthermore, it forces the logic decision from user to an architecture side. In result, some users as sysrq/kdb/etc are doing tricks with temporary rising console_loglevel while printing their messages. And in result it not only may print unwanted messages from other CPUs, but also omit printing at all in the unlucky case where the printk() was deferred. Introducing log-level parameter and KERN_UNSUPPRESSED [1] seems an easier approach than introducing more printk buffers. Also, it will consolidate printings with headers. Introduce show_stack_loglvl(), that eventually will substitute show_stack(). [1]: https://lore.kernel.org/lkml/20190528002412.1625-1-dima@arista.com/T/#u Signed-off-by: Dmitry Safonov Signed-off-by: Andrew Morton Cc: Borislav Petkov Cc: "H. Peter Anvin" Cc: Ingo Molnar Cc: Thomas Gleixner Link: http://lkml.kernel.org/r/20200418201944.482088-42-dima@arista.com Signed-off-by: Linus Torvalds --- arch/x86/kernel/dumpstack.c | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c index b94bc31a1757b..4396f2cfad197 100644 --- a/arch/x86/kernel/dumpstack.c +++ b/arch/x86/kernel/dumpstack.c @@ -279,7 +279,8 @@ next: } } -void show_stack(struct task_struct *task, unsigned long *sp) +void show_stack_loglvl(struct task_struct *task, unsigned long *sp, + const char *loglvl) { task = task ? : current; @@ -290,7 +291,12 @@ void show_stack(struct task_struct *task, unsigned long *sp) if (!sp && task == current) sp = get_stack_pointer(current, NULL); - show_trace_log_lvl(task, NULL, sp, KERN_DEFAULT); + show_trace_log_lvl(task, NULL, sp, loglvl); +} + +void show_stack(struct task_struct *task, unsigned long *sp) +{ + show_stack_loglvl(task, sp, KERN_DEFAULT); } void show_stack_regs(struct pt_regs *regs) -- GitLab From 47fb70294976cb1ea110f4fc01ae1bc2f450933a Mon Sep 17 00:00:00 2001 From: Dmitry Safonov Date: Mon, 8 Jun 2020 21:32:04 -0700 Subject: [PATCH 1342/1971] xtensa: add loglvl to show_trace() Currently, the log-level of show_stack() depends on a platform realization. It creates situations where the headers are printed with lower log level or higher than the stacktrace (depending on a platform or user). Furthermore, it forces the logic decision from user to an architecture side. In result, some users as sysrq/kdb/etc are doing tricks with temporary rising console_loglevel while printing their messages. And in result it not only may print unwanted messages from other CPUs, but also omit printing at all in the unlucky case where the printk() was deferred. Introducing log-level parameter and KERN_UNSUPPRESSED [1] seems an easier approach than introducing more printk buffers. Also, it will consolidate printings with headers. Add log level argument to show_trace() as a preparation for introducing show_stack_loglvl(). [1]: https://lore.kernel.org/lkml/20190528002412.1625-1-dima@arista.com/T/#u [rppt@kernel.org: build fix] Link: http://lkml.kernel.org/r/20200511194534.GA1018386@kernel.org Signed-off-by: Dmitry Safonov Signed-off-by: Andrew Morton Cc: Chris Zankel Cc: Max Filippov Cc: Mike Rapoport Link: http://lkml.kernel.org/r/20200418201944.482088-43-dima@arista.com Signed-off-by: Linus Torvalds --- arch/xtensa/kernel/traps.c | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/arch/xtensa/kernel/traps.c b/arch/xtensa/kernel/traps.c index 0976e27b8d5da..5013b62a9943f 100644 --- a/arch/xtensa/kernel/traps.c +++ b/arch/xtensa/kernel/traps.c @@ -479,18 +479,22 @@ void show_regs(struct pt_regs * regs) static int show_trace_cb(struct stackframe *frame, void *data) { + const char *loglvl = data; + if (kernel_text_address(frame->pc)) - pr_cont(" [<%08lx>] %pB\n", frame->pc, (void *)frame->pc); + printk("%s [<%08lx>] %pB\n", + loglvl, frame->pc, (void *)frame->pc); return 0; } -void show_trace(struct task_struct *task, unsigned long *sp) +static void show_trace(struct task_struct *task, unsigned long *sp, + const char *loglvl) { if (!sp) sp = stack_pointer(task); - pr_info("Call Trace:\n"); - walk_stackframe(sp, show_trace_cb, NULL); + printk("%sCall Trace:\n", loglvl); + walk_stackframe(sp, show_trace_cb, (void *)loglvl); } #define STACK_DUMP_ENTRY_SIZE 4 @@ -511,7 +515,7 @@ void show_stack(struct task_struct *task, unsigned long *sp) print_hex_dump(KERN_INFO, " ", DUMP_PREFIX_NONE, STACK_DUMP_LINE_SIZE, STACK_DUMP_ENTRY_SIZE, sp, len, false); - show_trace(task, sp); + show_trace(task, sp, KERN_INFO); } DEFINE_SPINLOCK(die_lock); -- GitLab From 20da1e8bb06d0e3ccd172de3e193c987e88013cb Mon Sep 17 00:00:00 2001 From: Dmitry Safonov Date: Mon, 8 Jun 2020 21:32:07 -0700 Subject: [PATCH 1343/1971] xtensa: add show_stack_loglvl() Currently, the log-level of show_stack() depends on a platform realization. It creates situations where the headers are printed with lower log level or higher than the stacktrace (depending on a platform or user). Furthermore, it forces the logic decision from user to an architecture side. In result, some users as sysrq/kdb/etc are doing tricks with temporary rising console_loglevel while printing their messages. And in result it not only may print unwanted messages from other CPUs, but also omit printing at all in the unlucky case where the printk() was deferred. Introducing log-level parameter and KERN_UNSUPPRESSED [1] seems an easier approach than introducing more printk buffers. Also, it will consolidate printings with headers. Introduce show_stack_loglvl(), that eventually will substitute show_stack(). [1]: https://lore.kernel.org/lkml/20190528002412.1625-1-dima@arista.com/T/#u Signed-off-by: Dmitry Safonov Signed-off-by: Andrew Morton Cc: Chris Zankel Cc: Max Filippov Link: http://lkml.kernel.org/r/20200418201944.482088-44-dima@arista.com Signed-off-by: Linus Torvalds --- arch/xtensa/kernel/traps.c | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) diff --git a/arch/xtensa/kernel/traps.c b/arch/xtensa/kernel/traps.c index 5013b62a9943f..1013acc2e03ea 100644 --- a/arch/xtensa/kernel/traps.c +++ b/arch/xtensa/kernel/traps.c @@ -501,7 +501,8 @@ static void show_trace(struct task_struct *task, unsigned long *sp, #define STACK_DUMP_LINE_SIZE 32 static size_t kstack_depth_to_print = CONFIG_PRINT_STACK_DEPTH; -void show_stack(struct task_struct *task, unsigned long *sp) +void show_stack_loglvl(struct task_struct *task, unsigned long *sp, + const char *loglvl) { size_t len; @@ -511,11 +512,16 @@ void show_stack(struct task_struct *task, unsigned long *sp) len = min((-(size_t)sp) & (THREAD_SIZE - STACK_DUMP_ENTRY_SIZE), kstack_depth_to_print * STACK_DUMP_ENTRY_SIZE); - pr_info("Stack:\n"); - print_hex_dump(KERN_INFO, " ", DUMP_PREFIX_NONE, + printk("%sStack:\n", loglvl); + print_hex_dump(loglvl, " ", DUMP_PREFIX_NONE, STACK_DUMP_LINE_SIZE, STACK_DUMP_ENTRY_SIZE, sp, len, false); - show_trace(task, sp, KERN_INFO); + show_trace(task, sp, loglvl); +} + +void show_stack(struct task_struct *task, unsigned long *sp) +{ + show_stack_loglvl(task, sp, KERN_INFO); } DEFINE_SPINLOCK(die_lock); -- GitLab From ab34b46d1a74e98996e67a7da7e5d683ecfd9f57 Mon Sep 17 00:00:00 2001 From: Dmitry Safonov Date: Mon, 8 Jun 2020 21:32:10 -0700 Subject: [PATCH 1344/1971] sysrq: use show_stack_loglvl() Show the stack trace on a CPU with the same log level as "CPU%d" header. Signed-off-by: Dmitry Safonov Signed-off-by: Andrew Morton Cc: Greg Kroah-Hartman Cc: Jiri Slaby Link: http://lkml.kernel.org/r/20200418201944.482088-45-dima@arista.com Signed-off-by: Linus Torvalds --- drivers/tty/sysrq.c | 2 +- include/linux/sched/debug.h | 2 ++ 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/drivers/tty/sysrq.c b/drivers/tty/sysrq.c index 477cdc1e9cf31..7bd935379dece 100644 --- a/drivers/tty/sysrq.c +++ b/drivers/tty/sysrq.c @@ -235,7 +235,7 @@ static void showacpu(void *dummy) raw_spin_lock_irqsave(&show_lock, flags); pr_info("CPU%d:\n", smp_processor_id()); - show_stack(NULL, NULL); + show_stack_loglvl(NULL, NULL, KERN_INFO); raw_spin_unlock_irqrestore(&show_lock, flags); } diff --git a/include/linux/sched/debug.h b/include/linux/sched/debug.h index 95fb9e025247e..373e4e3faf2ad 100644 --- a/include/linux/sched/debug.h +++ b/include/linux/sched/debug.h @@ -31,6 +31,8 @@ extern void show_regs(struct pt_regs *); * trace (or NULL if the entire call-chain of the task should be shown). */ extern void show_stack(struct task_struct *task, unsigned long *sp); +extern void show_stack_loglvl(struct task_struct *task, unsigned long *sp, + const char *loglvl); extern void sched_show_task(struct task_struct *p); -- GitLab From 9ed5b01a36a0e40a7450b8a5caf82e0552c41bb3 Mon Sep 17 00:00:00 2001 From: Dmitry Safonov Date: Mon, 8 Jun 2020 21:32:13 -0700 Subject: [PATCH 1345/1971] x86/amd_gart: print stacktrace for a leak with KERN_ERR It's under CONFIG_IOMMU_LEAK option which is enabled by debug config. Likely the backtrace is worth to be seen - so aligning with log level of error message in iommu_full(). Signed-off-by: Dmitry Safonov Signed-off-by: Andrew Morton Cc: Borislav Petkov Cc: "H. Peter Anvin" Cc: Ingo Molnar Cc: Thomas Gleixner Link: http://lkml.kernel.org/r/20200418201944.482088-46-dima@arista.com Signed-off-by: Linus Torvalds --- arch/x86/kernel/amd_gart_64.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/x86/kernel/amd_gart_64.c b/arch/x86/kernel/amd_gart_64.c index 16133819415c7..9d2c076be37a6 100644 --- a/arch/x86/kernel/amd_gart_64.c +++ b/arch/x86/kernel/amd_gart_64.c @@ -159,7 +159,7 @@ static void dump_leak(void) return; dump = 1; - show_stack(NULL, NULL); + show_stack_loglvl(NULL, NULL, KERN_ERR); debug_dma_dump_mappings(NULL); } #endif -- GitLab From 3f0543780e09d6f475f043f6ce0824106e610fd4 Mon Sep 17 00:00:00 2001 From: Dmitry Safonov Date: Mon, 8 Jun 2020 21:32:16 -0700 Subject: [PATCH 1346/1971] power: use show_stack_loglvl() Aligning with other watchdog messages just before panic - use KERN_EMERG. Signed-off-by: Dmitry Safonov Signed-off-by: Andrew Morton Acked-by: Rafael J. Wysocki Cc: Greg Kroah-Hartman Cc: Len Brown Cc: Pavel Machek Link: http://lkml.kernel.org/r/20200418201944.482088-47-dima@arista.com Signed-off-by: Linus Torvalds --- drivers/base/power/main.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c index bb98b813554fd..be5af89bbff16 100644 --- a/drivers/base/power/main.c +++ b/drivers/base/power/main.c @@ -519,7 +519,7 @@ static void dpm_watchdog_handler(struct timer_list *t) struct dpm_watchdog *wd = from_timer(wd, t, timer); dev_emerg(wd->dev, "**** DPM device timeout ****\n"); - show_stack(wd->tsk, NULL); + show_stack_loglvl(wd->tsk, NULL, KERN_EMERG); panic("%s %s: unrecoverable failure\n", dev_driver_string(wd->dev), dev_name(wd->dev)); } -- GitLab From 77819daf247aad16beaeb537ae77d1d6d0697ca2 Mon Sep 17 00:00:00 2001 From: Dmitry Safonov Date: Mon, 8 Jun 2020 21:32:19 -0700 Subject: [PATCH 1347/1971] kdb: don't play with console_loglevel Print the stack trace with KERN_EMERG - it should be always visible. Playing with console_loglevel is a bad idea as there may be more messages printed than wanted. Also the stack trace might be not printed at all if printk() was deferred and console_loglevel was raised back before the trace got flushed. Unfortunately, after rebasing on commit 2277b492582d ("kdb: Fix stack crawling on 'running' CPUs that aren't the master"), kdb_show_stack() uses now kdb_dump_stack_on_cpu(), which for now won't be converted as it uses dump_stack() instead of show_stack(). Convert for now the branch that uses show_stack() and remove console_loglevel exercise from that case. Signed-off-by: Dmitry Safonov Signed-off-by: Andrew Morton Reviewed-by: Douglas Anderson Acked-by: Daniel Thompson Cc: Jason Wessel Link: http://lkml.kernel.org/r/20200418201944.482088-48-dima@arista.com Signed-off-by: Linus Torvalds --- kernel/debug/kdb/kdb_bt.c | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/kernel/debug/kdb/kdb_bt.c b/kernel/debug/kdb/kdb_bt.c index 3de0cc780c164..43f5dcd2b9ac7 100644 --- a/kernel/debug/kdb/kdb_bt.c +++ b/kernel/debug/kdb/kdb_bt.c @@ -21,17 +21,18 @@ static void kdb_show_stack(struct task_struct *p, void *addr) { - int old_lvl = console_loglevel; - - console_loglevel = CONSOLE_LOGLEVEL_MOTORMOUTH; kdb_trap_printk++; - if (!addr && kdb_task_has_cpu(p)) + if (!addr && kdb_task_has_cpu(p)) { + int old_lvl = console_loglevel; + + console_loglevel = CONSOLE_LOGLEVEL_MOTORMOUTH; kdb_dump_stack_on_cpu(kdb_process_cpu(p)); - else - show_stack(p, addr); + console_loglevel = old_lvl; + } else { + show_stack_loglvl(p, addr, KERN_EMERG); + } - console_loglevel = old_lvl; kdb_trap_printk--; } -- GitLab From 8ba09b1dc131ff9bb530967c593e298c600f72c0 Mon Sep 17 00:00:00 2001 From: Dmitry Safonov Date: Mon, 8 Jun 2020 21:32:23 -0700 Subject: [PATCH 1348/1971] sched: print stack trace with KERN_INFO Aligning with other messages printed in sched_show_task() - use KERN_INFO to print the backtrace. Signed-off-by: Dmitry Safonov Signed-off-by: Andrew Morton Cc: Ben Segall Cc: Dietmar Eggemann Cc: Ingo Molnar Cc: Juri Lelli Cc: Mel Gorman Cc: Peter Zijlstra Cc: Steven Rostedt Cc: Vincent Guittot Link: http://lkml.kernel.org/r/20200418201944.482088-49-dima@arista.com Signed-off-by: Linus Torvalds --- kernel/sched/core.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kernel/sched/core.c b/kernel/sched/core.c index c06da3c3e317d..c68a6e7b306fe 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -6025,7 +6025,7 @@ void sched_show_task(struct task_struct *p) (unsigned long)task_thread_info(p)->flags); print_worker_info(KERN_INFO, p); - show_stack(p, NULL); + show_stack_loglvl(p, NULL, KERN_INFO); put_task_stack(p); } EXPORT_SYMBOL_GPL(sched_show_task); -- GitLab From fe1993a001094a596576334c56e7a10e4cd69e6c Mon Sep 17 00:00:00 2001 From: Dmitry Safonov Date: Mon, 8 Jun 2020 21:32:26 -0700 Subject: [PATCH 1349/1971] kernel: use show_stack_loglvl() Align the last users of show_stack() by KERN_DEFAULT as the surrounding headers/messages. Signed-off-by: Dmitry Safonov Signed-off-by: Andrew Morton Cc: Ingo Molnar Cc: Peter Zijlstra Cc: Will Deacon Link: http://lkml.kernel.org/r/20200418201944.482088-50-dima@arista.com Signed-off-by: Linus Torvalds --- kernel/locking/rtmutex-debug.c | 2 +- lib/dump_stack.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/kernel/locking/rtmutex-debug.c b/kernel/locking/rtmutex-debug.c index fd4fe1f5b458d..5e63d6e8a2230 100644 --- a/kernel/locking/rtmutex-debug.c +++ b/kernel/locking/rtmutex-debug.c @@ -125,7 +125,7 @@ void debug_rt_mutex_print_deadlock(struct rt_mutex_waiter *waiter) printk("\n%s/%d's [blocked] stackdump:\n\n", task->comm, task_pid_nr(task)); - show_stack(task, NULL); + show_stack_loglvl(task, NULL, KERN_DEFAULT); printk("\n%s/%d's [current] stackdump:\n\n", current->comm, task_pid_nr(current)); dump_stack(); diff --git a/lib/dump_stack.c b/lib/dump_stack.c index 33ffbf3088539..5595e8962cf6b 100644 --- a/lib/dump_stack.c +++ b/lib/dump_stack.c @@ -74,7 +74,7 @@ void show_regs_print_info(const char *log_lvl) static void __dump_stack(void) { dump_stack_print_info(KERN_DEFAULT); - show_stack(NULL, NULL); + show_stack_loglvl(NULL, NULL, KERN_DEFAULT); } /** -- GitLab From 9cb8f069deeed708bf19486d5893e297dc467ae0 Mon Sep 17 00:00:00 2001 From: Dmitry Safonov Date: Mon, 8 Jun 2020 21:32:29 -0700 Subject: [PATCH 1350/1971] kernel: rename show_stack_loglvl() => show_stack() Now the last users of show_stack() got converted to use an explicit log level, show_stack_loglvl() can drop it's redundant suffix and become once again well known show_stack(). Signed-off-by: Dmitry Safonov Signed-off-by: Andrew Morton Link: http://lkml.kernel.org/r/20200418201944.482088-51-dima@arista.com Signed-off-by: Linus Torvalds --- arch/alpha/kernel/traps.c | 8 +------- arch/arc/kernel/stacktrace.c | 8 +------- arch/arm/kernel/traps.c | 8 +------- arch/arm64/kernel/traps.c | 8 +------- arch/c6x/kernel/traps.c | 7 +------ arch/csky/kernel/ptrace.c | 4 ++-- arch/csky/kernel/stacktrace.c | 9 +-------- arch/h8300/kernel/traps.c | 8 +------- arch/hexagon/kernel/traps.c | 8 +------- arch/ia64/kernel/mca.c | 2 +- arch/ia64/kernel/process.c | 11 ++--------- arch/m68k/kernel/traps.c | 11 +++-------- arch/microblaze/kernel/traps.c | 8 +------- arch/mips/kernel/traps.c | 8 +------- arch/nds32/kernel/traps.c | 8 +------- arch/nios2/kernel/traps.c | 12 +++--------- arch/openrisc/kernel/traps.c | 10 ++-------- arch/parisc/kernel/traps.c | 8 +------- arch/powerpc/kernel/process.c | 11 +++-------- arch/powerpc/kernel/stacktrace.c | 2 +- arch/riscv/kernel/stacktrace.c | 8 +------- arch/s390/kernel/dumpstack.c | 9 ++------- arch/sh/kernel/dumpstack.c | 8 +------- arch/sparc/kernel/process_32.c | 11 ++--------- arch/sparc/kernel/process_64.c | 2 +- arch/sparc/kernel/traps_64.c | 8 +------- arch/um/drivers/mconsole_kern.c | 2 +- arch/um/kernel/sysrq.c | 7 +------ arch/unicore32/kernel/traps.c | 7 +------ arch/x86/kernel/amd_gart_64.c | 2 +- arch/x86/kernel/dumpstack.c | 7 +------ arch/xtensa/kernel/traps.c | 10 ++-------- drivers/base/power/main.c | 2 +- drivers/tty/sysrq.c | 2 +- include/linux/sched/debug.h | 5 ++--- kernel/debug/kdb/kdb_bt.c | 2 +- kernel/locking/rtmutex-debug.c | 2 +- kernel/sched/core.c | 2 +- lib/dump_stack.c | 2 +- 39 files changed, 52 insertions(+), 205 deletions(-) diff --git a/arch/alpha/kernel/traps.c b/arch/alpha/kernel/traps.c index 2402f1777f54e..8383ccfaccdc2 100644 --- a/arch/alpha/kernel/traps.c +++ b/arch/alpha/kernel/traps.c @@ -144,8 +144,7 @@ dik_show_trace(unsigned long *sp, const char *loglvl) static int kstack_depth_to_print = 24; -void show_stack_loglvl(struct task_struct *task, unsigned long *sp, - const char *loglvl) +void show_stack(struct task_struct *task, unsigned long *sp, const char *loglvl) { unsigned long *stack; int i; @@ -174,11 +173,6 @@ void show_stack_loglvl(struct task_struct *task, unsigned long *sp, dik_show_trace(sp, loglvl); } -void show_stack(struct task_struct *task, unsigned long *sp) -{ - show_stack_loglvl(task, sp, KERN_DEFAULT); -} - void die_if_kernel(char * str, struct pt_regs *regs, long err, unsigned long *r9_15) { diff --git a/arch/arc/kernel/stacktrace.c b/arch/arc/kernel/stacktrace.c index 24f9cd8a12c94..feba91c9d969c 100644 --- a/arch/arc/kernel/stacktrace.c +++ b/arch/arc/kernel/stacktrace.c @@ -228,17 +228,11 @@ noinline void show_stacktrace(struct task_struct *tsk, struct pt_regs *regs, EXPORT_SYMBOL(show_stacktrace); /* Expected by sched Code */ -void show_stack_loglvl(struct task_struct *tsk, unsigned long *sp, - const char *loglvl) +void show_stack(struct task_struct *tsk, unsigned long *sp, const char *loglvl) { show_stacktrace(tsk, NULL, loglvl); } -void show_stack(struct task_struct *tsk, unsigned long *sp) -{ - show_stack_loglvl(tsk, sp, KERN_DEFAULT); -} - /* Another API expected by schedular, shows up in "ps" as Wait Channel * Of course just returning schedule( ) would be pointless so unwind until * the function is not in schedular code diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c index 2ae2c23799ada..65a3b1e754802 100644 --- a/arch/arm/kernel/traps.c +++ b/arch/arm/kernel/traps.c @@ -247,18 +247,12 @@ static void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk, } #endif -void show_stack_loglvl(struct task_struct *tsk, unsigned long *sp, - const char *loglvl) +void show_stack(struct task_struct *tsk, unsigned long *sp, const char *loglvl) { dump_backtrace(NULL, tsk, loglvl); barrier(); } -void show_stack(struct task_struct *tsk, unsigned long *sp) -{ - show_stack_loglvl(tsk, sp, KERN_DEFAULT); -} - #ifdef CONFIG_PREEMPT #define S_PREEMPT " PREEMPT" #elif defined(CONFIG_PREEMPT_RT) diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c index 3621868b2fcc8..3fd9e2731e0dd 100644 --- a/arch/arm64/kernel/traps.c +++ b/arch/arm64/kernel/traps.c @@ -137,18 +137,12 @@ void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk, put_task_stack(tsk); } -void show_stack_loglvl(struct task_struct *tsk, unsigned long *sp, - const char *loglvl) +void show_stack(struct task_struct *tsk, unsigned long *sp, const char *loglvl) { dump_backtrace(NULL, tsk, loglvl); barrier(); } -void show_stack(struct task_struct *tsk, unsigned long *sp) -{ - show_stack_loglvl(tsk, sp, KERN_DEFAULT); -} - #ifdef CONFIG_PREEMPT #define S_PREEMPT " PREEMPT" #elif defined(CONFIG_PREEMPT_RT) diff --git a/arch/c6x/kernel/traps.c b/arch/c6x/kernel/traps.c index 4afbf48f1ce00..2b9121c755be1 100644 --- a/arch/c6x/kernel/traps.c +++ b/arch/c6x/kernel/traps.c @@ -374,7 +374,7 @@ static void show_trace(unsigned long *stack, unsigned long *endstack, printk("%s\n", loglvl); } -void show_stack_loglvl(struct task_struct *task, unsigned long *stack, +void show_stack(struct task_struct *task, unsigned long *stack, const char *loglvl) { unsigned long *p, *endstack; @@ -403,11 +403,6 @@ void show_stack_loglvl(struct task_struct *task, unsigned long *stack, show_trace(stack, endstack, loglvl); } -void show_stack(struct task_struct *task, unsigned long *stack) -{ - show_stack_loglvl(task, stack, KERN_DEBUG); -} - int is_valid_bugaddr(unsigned long addr) { return __kernel_text_address(addr); diff --git a/arch/csky/kernel/ptrace.c b/arch/csky/kernel/ptrace.c index 5a82230bddf98..bbd801f86eb57 100644 --- a/arch/csky/kernel/ptrace.c +++ b/arch/csky/kernel/ptrace.c @@ -344,7 +344,7 @@ asmlinkage void syscall_trace_exit(struct pt_regs *regs) trace_sys_exit(regs, syscall_get_return_value(current, regs)); } -extern void show_stack(struct task_struct *task, unsigned long *stack); +extern void show_stack(struct task_struct *task, unsigned long *stack, const char *loglvl); void show_regs(struct pt_regs *fp) { unsigned long *sp; @@ -420,6 +420,6 @@ void show_regs(struct pt_regs *fp) } pr_cont("\n"); - show_stack(NULL, (unsigned long *)fp->regs[4]); + show_stack(NULL, (unsigned long *)fp->regs[4], KERN_INFO); return; } diff --git a/arch/csky/kernel/stacktrace.c b/arch/csky/kernel/stacktrace.c index ca135f13cc138..16ae20a0af342 100644 --- a/arch/csky/kernel/stacktrace.c +++ b/arch/csky/kernel/stacktrace.c @@ -95,19 +95,12 @@ static bool print_trace_address(unsigned long pc, void *arg) return false; } -void show_stack_loglvl(struct task_struct *task, unsigned long *sp, - const char *loglvl) +void show_stack(struct task_struct *task, unsigned long *sp, const char *loglvl) { pr_cont("Call Trace:\n"); walk_stackframe(task, NULL, print_trace_address, (void *)loglvl); } -void show_stack(struct task_struct *task, unsigned long *sp) -{ - pr_cont("Call Trace:\n"); - walk_stackframe(task, NULL, print_trace_address, KERN_INFO); -} - static bool save_wchan(unsigned long pc, void *arg) { if (!in_sched_functions(pc)) { diff --git a/arch/h8300/kernel/traps.c b/arch/h8300/kernel/traps.c index 6362446563d6a..5d8b969cd8f34 100644 --- a/arch/h8300/kernel/traps.c +++ b/arch/h8300/kernel/traps.c @@ -115,8 +115,7 @@ void die(const char *str, struct pt_regs *fp, unsigned long err) static int kstack_depth_to_print = 24; -void show_stack_loglvl(struct task_struct *task, unsigned long *esp, - const char *loglvl) +void show_stack(struct task_struct *task, unsigned long *esp, const char *loglvl) { unsigned long *stack, addr; int i; @@ -158,8 +157,3 @@ void show_stack_loglvl(struct task_struct *task, unsigned long *esp, } printk("%s\n", loglvl); } - -void show_stack(struct task_struct *task, unsigned long *esp) -{ - show_stack_loglvl(task, esp, KERN_INFO); -} diff --git a/arch/hexagon/kernel/traps.c b/arch/hexagon/kernel/traps.c index a8a3a210d7810..904134b37232f 100644 --- a/arch/hexagon/kernel/traps.c +++ b/arch/hexagon/kernel/traps.c @@ -175,18 +175,12 @@ static void do_show_stack(struct task_struct *task, unsigned long *fp, } } -void show_stack_loglvl(struct task_struct *task, unsigned long *fp, - const char *loglvl) +void show_stack(struct task_struct *task, unsigned long *fp, const char *loglvl) { /* Saved link reg is one word above FP */ do_show_stack(task, fp, 0, loglvl); } -void show_stack(struct task_struct *task, unsigned long *fp) -{ - show_stack_loglvl(task, fp, 0, KERN_INFO); -} - int die(const char *str, struct pt_regs *regs, long err) { static struct { diff --git a/arch/ia64/kernel/mca.c b/arch/ia64/kernel/mca.c index 6fb54dfa1350d..2703f7795672d 100644 --- a/arch/ia64/kernel/mca.c +++ b/arch/ia64/kernel/mca.c @@ -1631,7 +1631,7 @@ default_monarch_init_process(struct notifier_block *self, unsigned long val, voi if (read_trylock(&tasklist_lock)) { do_each_thread (g, t) { printk("\nBacktrace of pid %d (%s)\n", t->pid, t->comm); - show_stack(t, NULL); + show_stack(t, NULL, KERN_DEFAULT); } while_each_thread (g, t); read_unlock(&tasklist_lock); } diff --git a/arch/ia64/kernel/process.c b/arch/ia64/kernel/process.c index 913d9a01cbf9b..96dfb9e4b16fb 100644 --- a/arch/ia64/kernel/process.c +++ b/arch/ia64/kernel/process.c @@ -85,8 +85,7 @@ ia64_do_show_stack (struct unw_frame_info *info, void *arg) } void -show_stack_loglvl (struct task_struct *task, unsigned long *sp, - const char *loglvl) +show_stack (struct task_struct *task, unsigned long *sp, const char *loglvl) { if (!task) unw_init_running(ia64_do_show_stack, (void *)loglvl); @@ -98,12 +97,6 @@ show_stack_loglvl (struct task_struct *task, unsigned long *sp, } } -void -show_stack (struct task_struct *task, unsigned long *sp) -{ - show_stack_loglvl(task, sp, KERN_DEFAULT); -} - void show_regs (struct pt_regs *regs) { @@ -158,7 +151,7 @@ show_regs (struct pt_regs *regs) ((i == sof - 1) || (i % 3) == 2) ? "\n" : " "); } } else - show_stack(NULL, NULL); + show_stack(NULL, NULL, KERN_DEFAULT); } /* local support for deprecated console_print */ diff --git a/arch/m68k/kernel/traps.c b/arch/m68k/kernel/traps.c index ffcc5ec4fac3b..df6fc782754f7 100644 --- a/arch/m68k/kernel/traps.c +++ b/arch/m68k/kernel/traps.c @@ -916,7 +916,7 @@ void show_registers(struct pt_regs *regs) default: pr_cont("\n"); } - show_stack(NULL, (unsigned long *)addr); + show_stack(NULL, (unsigned long *)addr, KERN_INFO); pr_info("Code:"); set_fs(KERNEL_DS); @@ -935,8 +935,8 @@ void show_registers(struct pt_regs *regs) pr_cont("\n"); } -void show_stack_loglvl(struct task_struct *task, unsigned long *stack, - const char *loglvl) +void show_stack(struct task_struct *task, unsigned long *stack, + const char *loglvl) { unsigned long *p; unsigned long *endstack; @@ -963,11 +963,6 @@ void show_stack_loglvl(struct task_struct *task, unsigned long *stack, show_trace(stack, loglvl); } -void show_stack(struct task_struct *task, unsigned long *stack) -{ - show_stack_loglvl(task, stack, KERN_INFO); -} - /* * The vector number returned in the frame pointer may also contain * the "fs" (Fault Status) bits on ColdFire. These are in the bottom diff --git a/arch/microblaze/kernel/traps.c b/arch/microblaze/kernel/traps.c index 149ae534937ef..94b6fe93147d5 100644 --- a/arch/microblaze/kernel/traps.c +++ b/arch/microblaze/kernel/traps.c @@ -31,8 +31,7 @@ static int __init kstack_setup(char *s) } __setup("kstack=", kstack_setup); -void show_stack_loglvl(struct task_struct *task, unsigned long *sp, - const char *loglvl) +void show_stack(struct task_struct *task, unsigned long *sp, const char *loglvl) { unsigned long words_to_show; u32 fp = (u32) sp; @@ -77,8 +76,3 @@ void show_stack_loglvl(struct task_struct *task, unsigned long *sp, debug_show_held_locks(task); } - -void show_stack(struct task_struct *task, unsigned long *sp) -{ - show_stack_loglvl(task, sp, KERN_INFO); -} diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c index e49040739b61d..320797bd03f64 100644 --- a/arch/mips/kernel/traps.c +++ b/arch/mips/kernel/traps.c @@ -198,8 +198,7 @@ static void show_stacktrace(struct task_struct *task, show_backtrace(task, regs, loglvl); } -void show_stack_loglvl(struct task_struct *task, unsigned long *sp, - const char *loglvl) +void show_stack(struct task_struct *task, unsigned long *sp, const char *loglvl) { struct pt_regs regs; mm_segment_t old_fs = get_fs(); @@ -227,11 +226,6 @@ void show_stack_loglvl(struct task_struct *task, unsigned long *sp, set_fs(old_fs); } -void show_stack(struct task_struct *task, unsigned long *sp) -{ - show_stack_loglvl(task, sp, KERN_DEFAULT) -} - static void show_code(unsigned int __user *pc) { long i; diff --git a/arch/nds32/kernel/traps.c b/arch/nds32/kernel/traps.c index 90f12582c218a..6a9772ba73927 100644 --- a/arch/nds32/kernel/traps.c +++ b/arch/nds32/kernel/traps.c @@ -135,8 +135,7 @@ static void __dump(struct task_struct *tsk, unsigned long *base_reg, printk("%s\n", loglvl); } -void show_stack_loglvl(struct task_struct *tsk, unsigned long *sp, - const char *loglvl) +void show_stack(struct task_struct *tsk, unsigned long *sp, const char *loglvl) { unsigned long *base_reg; @@ -157,11 +156,6 @@ void show_stack_loglvl(struct task_struct *tsk, unsigned long *sp, barrier(); } -void show_stack(struct task_struct *tsk, unsigned long *sp) -{ - show_stack_loglvl(tsk, sp, KERN_EMERG); -} - DEFINE_SPINLOCK(die_lock); /* diff --git a/arch/nios2/kernel/traps.c b/arch/nios2/kernel/traps.c index 08071caa9b36f..b172da4eb1a95 100644 --- a/arch/nios2/kernel/traps.c +++ b/arch/nios2/kernel/traps.c @@ -52,14 +52,13 @@ void _exception(int signo, struct pt_regs *regs, int code, unsigned long addr) } /* - * The show_stack(), show_stack_loglvl() are external API - * which we do not use ourselves. + * The show_stack() is external API which we do not use ourselves. */ int kstack_depth_to_print = 48; -void show_stack_loglvl(struct task_struct *task, unsigned long *stack, - const char *loglvl) +void show_stack(struct task_struct *task, unsigned long *stack, + const char *loglvl) { unsigned long *endstack, addr; int i; @@ -106,11 +105,6 @@ void show_stack_loglvl(struct task_struct *task, unsigned long *stack, printk("%s\n", loglvl); } -void show_stack(struct task_struct *task, unsigned long *stack) -{ - show_stack_loglvl(task, stack, KERN_EMERG); -} - void __init trap_init(void) { /* Nothing to do here */ diff --git a/arch/openrisc/kernel/traps.c b/arch/openrisc/kernel/traps.c index 3b7978a22d685..3022b0ad142cc 100644 --- a/arch/openrisc/kernel/traps.c +++ b/arch/openrisc/kernel/traps.c @@ -48,8 +48,7 @@ void print_trace(void *data, unsigned long addr, int reliable) } /* displays a short stack trace */ -void show_stack_loglvl(struct task_struct *task, unsigned long *esp, - const char *loglvl) +void show_stack(struct task_struct *task, unsigned long *esp, const char *loglvl) { if (esp == NULL) esp = (unsigned long *)&esp; @@ -58,11 +57,6 @@ void show_stack_loglvl(struct task_struct *task, unsigned long *esp, unwind_stack((void *)loglvl, esp, print_trace); } -void show_stack(struct task_struct *task, unsigned long *esp) -{ - show_stack_loglvl(task, esp, KERN_EMERG); -} - void show_registers(struct pt_regs *regs) { int i; @@ -104,7 +98,7 @@ void show_registers(struct pt_regs *regs) if (in_kernel) { printk("\nStack: "); - show_stack(NULL, (unsigned long *)esp); + show_stack(NULL, (unsigned long *)esp, KERN_EMERG); printk("\nCode: "); if (regs->pc < PAGE_OFFSET) diff --git a/arch/parisc/kernel/traps.c b/arch/parisc/kernel/traps.c index c2411de3730f2..0a89899f154a5 100644 --- a/arch/parisc/kernel/traps.c +++ b/arch/parisc/kernel/traps.c @@ -198,17 +198,11 @@ static void parisc_show_stack(struct task_struct *task, do_show_stack(&info, loglvl); } -void show_stack_loglvl(struct task_struct *t, unsigned long *sp, - const char *loglvl) +void show_stack(struct task_struct *t, unsigned long *sp, const char *loglvl) { parisc_show_stack(t, NULL, loglvl); } -void show_stack(struct task_struct *t, unsigned long *sp) -{ - show_stack_loglvl(t, sp, KERN_CRIT) -} - int is_valid_bugaddr(unsigned long iaoq) { return 1; diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c index a456b4454b3fd..99d619f81cb52 100644 --- a/arch/powerpc/kernel/process.c +++ b/arch/powerpc/kernel/process.c @@ -1456,7 +1456,7 @@ void show_regs(struct pt_regs * regs) printk("NIP ["REG"] %pS\n", regs->nip, (void *)regs->nip); printk("LR ["REG"] %pS\n", regs->link, (void *)regs->link); #endif - show_stack(current, (unsigned long *) regs->gpr[1]); + show_stack(current, (unsigned long *) regs->gpr[1], KERN_DEFAULT); if (!user_mode(regs)) show_instructions(regs); } @@ -2063,8 +2063,8 @@ unsigned long get_wchan(struct task_struct *p) static int kstack_depth_to_print = CONFIG_PRINT_STACK_DEPTH; -void show_stack_loglvl(struct task_struct *tsk, unsigned long *stack, - const char *loglvl) +void show_stack(struct task_struct *tsk, unsigned long *stack, + const char *loglvl) { unsigned long sp, ip, lr, newsp; int count = 0; @@ -2133,11 +2133,6 @@ void show_stack_loglvl(struct task_struct *tsk, unsigned long *stack, put_task_stack(tsk); } -void show_stack(struct task_struct *tsk, unsigned long *stack) -{ - show_stack_loglvl(tsk, stack, KERN_DEFAULT); -} - #ifdef CONFIG_PPC64 /* Called with hard IRQs off */ void notrace __ppc64_runlatch_on(void) diff --git a/arch/powerpc/kernel/stacktrace.c b/arch/powerpc/kernel/stacktrace.c index c477b8585a297..b6440657ef92d 100644 --- a/arch/powerpc/kernel/stacktrace.c +++ b/arch/powerpc/kernel/stacktrace.c @@ -260,7 +260,7 @@ static void raise_backtrace_ipi(cpumask_t *mask) pr_cont(" current pointer corrupt? (%px)\n", p->__current); pr_warn("Back trace of paca->saved_r1 (0x%016llx) (possibly stale):\n", p->saved_r1); - show_stack(p->__current, (unsigned long *)p->saved_r1); + show_stack(p->__current, (unsigned long *)p->saved_r1, KERN_WARNING); } } diff --git a/arch/riscv/kernel/stacktrace.c b/arch/riscv/kernel/stacktrace.c index aaa64bf007f82..595342910c3f6 100644 --- a/arch/riscv/kernel/stacktrace.c +++ b/arch/riscv/kernel/stacktrace.c @@ -105,18 +105,12 @@ static bool print_trace_address(unsigned long pc, void *arg) return false; } -void show_stack_loglvl(struct task_struct *task, unsigned long *sp, - const char *loglvl) +void show_stack(struct task_struct *task, unsigned long *sp, const char *loglvl) { pr_cont("Call Trace:\n"); walk_stackframe(task, NULL, print_trace_address, (void *)loglvl); } -void show_stack(struct task_struct *task, unsigned long *sp) -{ - show_stack_loglvl(task, sp, KERN_DEFAULT); -} - static bool save_wchan(unsigned long pc, void *arg) { if (!in_sched_functions(pc)) { diff --git a/arch/s390/kernel/dumpstack.c b/arch/s390/kernel/dumpstack.c index 887a054919fc1..0dc4b258b98d5 100644 --- a/arch/s390/kernel/dumpstack.c +++ b/arch/s390/kernel/dumpstack.c @@ -126,7 +126,7 @@ unknown: return -EINVAL; } -void show_stack_loglvl(struct task_struct *task, unsigned long *stack, +void show_stack(struct task_struct *task, unsigned long *stack, const char *loglvl) { struct unwind_state state; @@ -139,11 +139,6 @@ void show_stack_loglvl(struct task_struct *task, unsigned long *stack, debug_show_held_locks(task ? : current); } -void show_stack(struct task_struct *task, unsigned long *stack) -{ - show_stack_loglvl(task, stack, KERN_DEFAULT); -} - static void show_last_breaking_event(struct pt_regs *regs) { printk("Last Breaking-Event-Address:\n"); @@ -181,7 +176,7 @@ void show_regs(struct pt_regs *regs) show_registers(regs); /* Show stack backtrace if pt_regs is from kernel mode */ if (!user_mode(regs)) - show_stack(NULL, (unsigned long *) regs->gprs[15]); + show_stack(NULL, (unsigned long *) regs->gprs[15], KERN_DEFAULT); show_last_breaking_event(regs); } diff --git a/arch/sh/kernel/dumpstack.c b/arch/sh/kernel/dumpstack.c index cc51e9d746674..a13c045804ed1 100644 --- a/arch/sh/kernel/dumpstack.c +++ b/arch/sh/kernel/dumpstack.c @@ -144,8 +144,7 @@ void show_trace(struct task_struct *tsk, unsigned long *sp, debug_show_held_locks(tsk); } -void show_stack_loglvl(struct task_struct *tsk, unsigned long *sp, - const char *loglvl) +void show_stack(struct task_struct *tsk, unsigned long *sp, const char *loglvl) { unsigned long stack; @@ -161,8 +160,3 @@ void show_stack_loglvl(struct task_struct *tsk, unsigned long *sp, (unsigned long)task_stack_page(tsk)); show_trace(tsk, sp, NULL, loglvl); } - -void show_stack(struct task_struct *task, unsigned long *sp) -{ - show_stack_loglvl(task, sp, KERN_DEFAULT); -} diff --git a/arch/sparc/kernel/process_32.c b/arch/sparc/kernel/process_32.c index 0b07de5618e50..65c0d5207b0cd 100644 --- a/arch/sparc/kernel/process_32.c +++ b/arch/sparc/kernel/process_32.c @@ -145,12 +145,10 @@ void show_regs(struct pt_regs *r) } /* - * The show_stack(), show_stack_loglvl() are external APIs which - * we do not use ourselves. + * The show_stack() is external API which we do not use ourselves. * The oops is printed in die_if_kernel. */ -void show_stack_loglvl(struct task_struct *tsk, unsigned long *_ksp, - const char *loglvl) +void show_stack(struct task_struct *tsk, unsigned long *_ksp, const char *loglvl) { unsigned long pc, fp; unsigned long task_base; @@ -179,11 +177,6 @@ void show_stack_loglvl(struct task_struct *tsk, unsigned long *_ksp, printk("%s\n", loglvl); } -void show_stack(struct task_struct *task, unsigned long *sp) -{ - show_stack_loglvl(task, sp, KERN_DEFAULT); -} - /* * Free current thread data structures etc.. */ diff --git a/arch/sparc/kernel/process_64.c b/arch/sparc/kernel/process_64.c index 423011e609828..5a4d9c8f89033 100644 --- a/arch/sparc/kernel/process_64.c +++ b/arch/sparc/kernel/process_64.c @@ -195,7 +195,7 @@ void show_regs(struct pt_regs *regs) regs->u_regs[15]); printk("RPC: <%pS>\n", (void *) regs->u_regs[15]); show_regwindow(regs); - show_stack(current, (unsigned long *) regs->u_regs[UREG_FP]); + show_stack(current, (unsigned long *)regs->u_regs[UREG_FP], KERN_DEFAULT); } union global_cpu_snapshot global_cpu_snapshot[NR_CPUS]; diff --git a/arch/sparc/kernel/traps_64.c b/arch/sparc/kernel/traps_64.c index 8715bc93bd9d8..96d92f1075514 100644 --- a/arch/sparc/kernel/traps_64.c +++ b/arch/sparc/kernel/traps_64.c @@ -2453,8 +2453,7 @@ static void user_instruction_dump(unsigned int __user *pc) printk("\n"); } -void show_stack_loglvl(struct task_struct *tsk, unsigned long *_ksp, - const char *loglvl) +void show_stack(struct task_struct *tsk, unsigned long *_ksp, const char *loglvl) { unsigned long fp, ksp; struct thread_info *tp; @@ -2514,11 +2513,6 @@ void show_stack_loglvl(struct task_struct *tsk, unsigned long *_ksp, } while (++count < 16); } -void show_stack(struct task_struct *tsk, unsigned long *_ksp) -{ - show_stack_loglvl(task, sp, KERN_DEFAULT); -} - static inline struct reg_window *kernel_stack_up(struct reg_window *rw) { unsigned long fp = rw->ins[6]; diff --git a/arch/um/drivers/mconsole_kern.c b/arch/um/drivers/mconsole_kern.c index 30575bd929752..a2e680f7d39f2 100644 --- a/arch/um/drivers/mconsole_kern.c +++ b/arch/um/drivers/mconsole_kern.c @@ -648,7 +648,7 @@ static void stack_proc(void *arg) { struct task_struct *task = arg; - show_stack(task, NULL); + show_stack(task, NULL, KERN_INFO); } /* diff --git a/arch/um/kernel/sysrq.c b/arch/um/kernel/sysrq.c index 1b54b6431499b..acbc879d27733 100644 --- a/arch/um/kernel/sysrq.c +++ b/arch/um/kernel/sysrq.c @@ -27,7 +27,7 @@ static const struct stacktrace_ops stackops = { .address = _print_addr }; -void show_stack_loglvl(struct task_struct *task, unsigned long *stack, +void show_stack(struct task_struct *task, unsigned long *stack, const char *loglvl) { struct pt_regs *segv_regs = current->thread.segv_regs; @@ -56,8 +56,3 @@ void show_stack_loglvl(struct task_struct *task, unsigned long *stack, dump_trace(current, &stackops, (void *)loglvl); printk("%s\n", loglvl); } - -void show_stack(struct task_struct *task, unsigned long *stack) -{ - show_stack_loglvl(task, stack, KERN_INFO); -} diff --git a/arch/unicore32/kernel/traps.c b/arch/unicore32/kernel/traps.c index 8b1335997f502..a3ac01df1a2e4 100644 --- a/arch/unicore32/kernel/traps.c +++ b/arch/unicore32/kernel/traps.c @@ -167,18 +167,13 @@ static void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk, c_backtrace(fp, loglvl); } -void show_stack_loglvl(struct task_struct *tsk, unsigned long *sp, +void show_stack(struct task_struct *tsk, unsigned long *sp, const char *loglvl) { dump_backtrace(NULL, tsk, loglvl); barrier(); } -void show_stack(struct task_struct *tsk, unsigned long *sp) -{ - show_stack_loglvl(tsk, sp, KERN_DEFAULT) -} - static int __die(const char *str, int err, struct thread_info *thread, struct pt_regs *regs) { diff --git a/arch/x86/kernel/amd_gart_64.c b/arch/x86/kernel/amd_gart_64.c index 9d2c076be37a6..5f816861f5d20 100644 --- a/arch/x86/kernel/amd_gart_64.c +++ b/arch/x86/kernel/amd_gart_64.c @@ -159,7 +159,7 @@ static void dump_leak(void) return; dump = 1; - show_stack_loglvl(NULL, NULL, KERN_ERR); + show_stack(NULL, NULL, KERN_ERR); debug_dma_dump_mappings(NULL); } #endif diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c index 4396f2cfad197..456511b2284ea 100644 --- a/arch/x86/kernel/dumpstack.c +++ b/arch/x86/kernel/dumpstack.c @@ -279,7 +279,7 @@ next: } } -void show_stack_loglvl(struct task_struct *task, unsigned long *sp, +void show_stack(struct task_struct *task, unsigned long *sp, const char *loglvl) { task = task ? : current; @@ -294,11 +294,6 @@ void show_stack_loglvl(struct task_struct *task, unsigned long *sp, show_trace_log_lvl(task, NULL, sp, loglvl); } -void show_stack(struct task_struct *task, unsigned long *sp) -{ - show_stack_loglvl(task, sp, KERN_DEFAULT); -} - void show_stack_regs(struct pt_regs *regs) { show_trace_log_lvl(current, regs, NULL, KERN_DEFAULT); diff --git a/arch/xtensa/kernel/traps.c b/arch/xtensa/kernel/traps.c index 1013acc2e03ea..e880460741d2b 100644 --- a/arch/xtensa/kernel/traps.c +++ b/arch/xtensa/kernel/traps.c @@ -501,8 +501,7 @@ static void show_trace(struct task_struct *task, unsigned long *sp, #define STACK_DUMP_LINE_SIZE 32 static size_t kstack_depth_to_print = CONFIG_PRINT_STACK_DEPTH; -void show_stack_loglvl(struct task_struct *task, unsigned long *sp, - const char *loglvl) +void show_stack(struct task_struct *task, unsigned long *sp, const char *loglvl) { size_t len; @@ -519,11 +518,6 @@ void show_stack_loglvl(struct task_struct *task, unsigned long *sp, show_trace(task, sp, loglvl); } -void show_stack(struct task_struct *task, unsigned long *sp) -{ - show_stack_loglvl(task, sp, KERN_INFO); -} - DEFINE_SPINLOCK(die_lock); void die(const char * str, struct pt_regs * regs, long err) @@ -540,7 +534,7 @@ void die(const char * str, struct pt_regs * regs, long err) pr_info("%s: sig: %ld [#%d]%s\n", str, err, ++die_counter, pr); show_regs(regs); if (!user_mode(regs)) - show_stack(NULL, (unsigned long*)regs->areg[1]); + show_stack(NULL, (unsigned long *)regs->areg[1], KERN_INFO); add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE); spin_unlock_irq(&die_lock); diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c index be5af89bbff16..9dd85bea40260 100644 --- a/drivers/base/power/main.c +++ b/drivers/base/power/main.c @@ -519,7 +519,7 @@ static void dpm_watchdog_handler(struct timer_list *t) struct dpm_watchdog *wd = from_timer(wd, t, timer); dev_emerg(wd->dev, "**** DPM device timeout ****\n"); - show_stack_loglvl(wd->tsk, NULL, KERN_EMERG); + show_stack(wd->tsk, NULL, KERN_EMERG); panic("%s %s: unrecoverable failure\n", dev_driver_string(wd->dev), dev_name(wd->dev)); } diff --git a/drivers/tty/sysrq.c b/drivers/tty/sysrq.c index 7bd935379dece..7c95afa905a08 100644 --- a/drivers/tty/sysrq.c +++ b/drivers/tty/sysrq.c @@ -235,7 +235,7 @@ static void showacpu(void *dummy) raw_spin_lock_irqsave(&show_lock, flags); pr_info("CPU%d:\n", smp_processor_id()); - show_stack_loglvl(NULL, NULL, KERN_INFO); + show_stack(NULL, NULL, KERN_INFO); raw_spin_unlock_irqrestore(&show_lock, flags); } diff --git a/include/linux/sched/debug.h b/include/linux/sched/debug.h index 373e4e3faf2ad..00c45a0e6abe1 100644 --- a/include/linux/sched/debug.h +++ b/include/linux/sched/debug.h @@ -30,9 +30,8 @@ extern void show_regs(struct pt_regs *); * task), SP is the stack pointer of the first frame that should be shown in the back * trace (or NULL if the entire call-chain of the task should be shown). */ -extern void show_stack(struct task_struct *task, unsigned long *sp); -extern void show_stack_loglvl(struct task_struct *task, unsigned long *sp, - const char *loglvl); +extern void show_stack(struct task_struct *task, unsigned long *sp, + const char *loglvl); extern void sched_show_task(struct task_struct *p); diff --git a/kernel/debug/kdb/kdb_bt.c b/kernel/debug/kdb/kdb_bt.c index 43f5dcd2b9ac7..18e03aba2cfc7 100644 --- a/kernel/debug/kdb/kdb_bt.c +++ b/kernel/debug/kdb/kdb_bt.c @@ -30,7 +30,7 @@ static void kdb_show_stack(struct task_struct *p, void *addr) kdb_dump_stack_on_cpu(kdb_process_cpu(p)); console_loglevel = old_lvl; } else { - show_stack_loglvl(p, addr, KERN_EMERG); + show_stack(p, addr, KERN_EMERG); } kdb_trap_printk--; diff --git a/kernel/locking/rtmutex-debug.c b/kernel/locking/rtmutex-debug.c index 5e63d6e8a2230..36e69100e8e06 100644 --- a/kernel/locking/rtmutex-debug.c +++ b/kernel/locking/rtmutex-debug.c @@ -125,7 +125,7 @@ void debug_rt_mutex_print_deadlock(struct rt_mutex_waiter *waiter) printk("\n%s/%d's [blocked] stackdump:\n\n", task->comm, task_pid_nr(task)); - show_stack_loglvl(task, NULL, KERN_DEFAULT); + show_stack(task, NULL, KERN_DEFAULT); printk("\n%s/%d's [current] stackdump:\n\n", current->comm, task_pid_nr(current)); dump_stack(); diff --git a/kernel/sched/core.c b/kernel/sched/core.c index c68a6e7b306fe..8f360326861ec 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -6025,7 +6025,7 @@ void sched_show_task(struct task_struct *p) (unsigned long)task_thread_info(p)->flags); print_worker_info(KERN_INFO, p); - show_stack_loglvl(p, NULL, KERN_INFO); + show_stack(p, NULL, KERN_INFO); put_task_stack(p); } EXPORT_SYMBOL_GPL(sched_show_task); diff --git a/lib/dump_stack.c b/lib/dump_stack.c index 5595e8962cf6b..a00ee6eedc7c3 100644 --- a/lib/dump_stack.c +++ b/lib/dump_stack.c @@ -74,7 +74,7 @@ void show_regs_print_info(const char *log_lvl) static void __dump_stack(void) { dump_stack_print_info(KERN_DEFAULT); - show_stack_loglvl(NULL, NULL, KERN_DEFAULT); + show_stack(NULL, NULL, KERN_DEFAULT); } /** -- GitLab From e31cf2f4ca422ac9b14ecc4a1295b8977a20f812 Mon Sep 17 00:00:00 2001 From: Mike Rapoport Date: Mon, 8 Jun 2020 21:32:33 -0700 Subject: [PATCH 1351/1971] mm: don't include asm/pgtable.h if linux/mm.h is already included Patch series "mm: consolidate definitions of page table accessors", v2. The low level page table accessors (pXY_index(), pXY_offset()) are duplicated across all architectures and sometimes more than once. For instance, we have 31 definition of pgd_offset() for 25 supported architectures. Most of these definitions are actually identical and typically it boils down to, e.g. static inline unsigned long pmd_index(unsigned long address) { return (address >> PMD_SHIFT) & (PTRS_PER_PMD - 1); } static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address) { return (pmd_t *)pud_page_vaddr(*pud) + pmd_index(address); } These definitions can be shared among 90% of the arches provided XYZ_SHIFT, PTRS_PER_XYZ and xyz_page_vaddr() are defined. For architectures that really need a custom version there is always possibility to override the generic version with the usual ifdefs magic. These patches introduce include/linux/pgtable.h that replaces include/asm-generic/pgtable.h and add the definitions of the page table accessors to the new header. This patch (of 12): The linux/mm.h header includes to allow inlining of the functions involving page table manipulations, e.g. pte_alloc() and pmd_alloc(). So, there is no point to explicitly include in the files that include . The include statements in such cases are remove with a simple loop: for f in $(git grep -l "include ") ; do sed -i -e '/include / d' $f done Signed-off-by: Mike Rapoport Signed-off-by: Andrew Morton Cc: Arnd Bergmann Cc: Borislav Petkov Cc: Brian Cain Cc: Catalin Marinas Cc: Chris Zankel Cc: "David S. Miller" Cc: Geert Uytterhoeven Cc: Greentime Hu Cc: Greg Ungerer Cc: Guan Xuetao Cc: Guo Ren Cc: Heiko Carstens Cc: Helge Deller Cc: Ingo Molnar Cc: Ley Foon Tan Cc: Mark Salter Cc: Matthew Wilcox Cc: Matt Turner Cc: Max Filippov Cc: Michael Ellerman Cc: Michal Simek Cc: Mike Rapoport Cc: Nick Hu Cc: Paul Walmsley Cc: Richard Weinberger Cc: Rich Felker Cc: Russell King Cc: Stafford Horne Cc: Thomas Bogendoerfer Cc: Thomas Gleixner Cc: Tony Luck Cc: Vincent Chen Cc: Vineet Gupta Cc: Will Deacon Cc: Yoshinori Sato Link: http://lkml.kernel.org/r/20200514170327.31389-1-rppt@kernel.org Link: http://lkml.kernel.org/r/20200514170327.31389-2-rppt@kernel.org Signed-off-by: Linus Torvalds --- arch/alpha/boot/bootp.c | 1 - arch/alpha/boot/bootpz.c | 1 - arch/alpha/boot/main.c | 1 - arch/alpha/include/asm/io.h | 1 - arch/alpha/kernel/process.c | 1 - arch/alpha/kernel/ptrace.c | 1 - arch/alpha/kernel/setup.c | 1 - arch/alpha/kernel/smp.c | 1 - arch/alpha/kernel/sys_alcor.c | 1 - arch/alpha/kernel/sys_cabriolet.c | 1 - arch/alpha/kernel/sys_dp264.c | 1 - arch/alpha/kernel/sys_eb64p.c | 1 - arch/alpha/kernel/sys_eiger.c | 1 - arch/alpha/kernel/sys_jensen.c | 1 - arch/alpha/kernel/sys_marvel.c | 1 - arch/alpha/kernel/sys_miata.c | 1 - arch/alpha/kernel/sys_mikasa.c | 1 - arch/alpha/kernel/sys_nautilus.c | 1 - arch/alpha/kernel/sys_noritake.c | 1 - arch/alpha/kernel/sys_rawhide.c | 1 - arch/alpha/kernel/sys_ruffian.c | 1 - arch/alpha/kernel/sys_rx164.c | 1 - arch/alpha/kernel/sys_sable.c | 1 - arch/alpha/kernel/sys_sio.c | 1 - arch/alpha/kernel/sys_sx164.c | 1 - arch/alpha/kernel/sys_takara.c | 1 - arch/alpha/kernel/sys_titan.c | 1 - arch/alpha/kernel/sys_wildfire.c | 1 - arch/alpha/mm/init.c | 1 - arch/arm/kernel/machine_kexec.c | 1 - arch/arm/kernel/module.c | 1 - arch/arm/kernel/ptrace.c | 1 - arch/arm/kernel/smp.c | 1 - arch/arm/mach-ebsa110/core.c | 1 - arch/arm/mach-footbridge/common.c | 1 - arch/arm/mach-imx/mm-imx21.c | 1 - arch/arm/mach-imx/mm-imx27.c | 1 - arch/arm/mach-imx/mm-imx3.c | 1 - arch/arm/mach-iop32x/i2c.c | 1 - arch/arm/mach-iop32x/iq31244.c | 1 - arch/arm/mach-iop32x/iq80321.c | 1 - arch/arm/mach-iop32x/n2100.c | 1 - arch/arm/mach-ixp4xx/common.c | 1 - arch/arm/mach-sa1100/assabet.c | 1 - arch/arm/mm/copypage-v4mc.c | 1 - arch/arm/mm/copypage-v6.c | 1 - arch/arm/mm/copypage-xscale.c | 1 - arch/arm/mm/dump.c | 1 - arch/arm/mm/fault-armv.c | 1 - arch/arm/mm/fault.c | 1 - arch/arm/mm/pageattr.c | 1 - arch/arm64/kernel/hibernate.c | 1 - arch/arm64/kernel/ptrace.c | 1 - arch/arm64/kernel/smp.c | 1 - arch/arm64/mm/dump.c | 1 - arch/arm64/mm/fault.c | 1 - arch/arm64/mm/kasan_init.c | 1 - arch/arm64/mm/pageattr.c | 1 - arch/csky/kernel/module.c | 1 - arch/csky/kernel/ptrace.c | 1 - arch/csky/mm/init.c | 1 - arch/csky/mm/tlb.c | 1 - arch/h8300/kernel/process.c | 1 - arch/h8300/kernel/setup.c | 1 - arch/h8300/kernel/signal.c | 1 - arch/h8300/mm/fault.c | 1 - arch/h8300/mm/init.c | 1 - arch/h8300/mm/memory.c | 1 - arch/hexagon/mm/vm_fault.c | 1 - arch/ia64/kernel/efi.c | 1 - arch/ia64/kernel/ptrace.c | 1 - arch/ia64/kernel/smp.c | 1 - arch/ia64/kernel/smpboot.c | 1 - arch/ia64/mm/contig.c | 1 - arch/ia64/mm/fault.c | 1 - arch/m68k/68000/timers.c | 1 - arch/m68k/amiga/config.c | 1 - arch/m68k/apollo/config.c | 1 - arch/m68k/atari/atasound.c | 1 - arch/m68k/atari/stram.c | 1 - arch/m68k/bvme6000/config.c | 1 - arch/m68k/kernel/process.c | 1 - arch/m68k/kernel/ptrace.c | 1 - arch/m68k/kernel/setup_no.c | 1 - arch/m68k/kernel/signal.c | 1 - arch/m68k/kernel/uboot.c | 1 - arch/m68k/mac/config.c | 1 - arch/m68k/mm/mcfmmu.c | 1 - arch/m68k/mm/sun3kmap.c | 1 - arch/m68k/mm/sun3mmu.c | 1 - arch/m68k/mvme147/config.c | 1 - arch/m68k/mvme16x/config.c | 1 - arch/m68k/q40/config.c | 1 - arch/m68k/sun3/config.c | 1 - arch/m68k/sun3/dvma.c | 1 - arch/m68k/sun3/mmu_emu.c | 1 - arch/m68k/sun3/sun3dvma.c | 1 - arch/m68k/sun3x/dvma.c | 1 - arch/m68k/sun3x/prom.c | 1 - arch/microblaze/kernel/signal.c | 1 - arch/microblaze/mm/fault.c | 1 - arch/mips/fw/arc/memory.c | 1 - arch/mips/include/asm/mach-generic/floppy.h | 1 - arch/mips/include/asm/mach-jazz/floppy.h | 1 - arch/mips/jazz/jazzdma.c | 1 - arch/mips/kernel/module.c | 1 - arch/mips/kernel/process.c | 1 - arch/mips/kernel/ptrace.c | 1 - arch/mips/kernel/ptrace32.c | 1 - arch/mips/kernel/smp-bmips.c | 1 - arch/mips/kernel/traps.c | 1 - arch/mips/kvm/tlb.c | 1 - arch/mips/lib/dump_tlb.c | 1 - arch/mips/lib/r3k_dump_tlb.c | 1 - arch/mips/mm/c-octeon.c | 1 - arch/mips/mm/c-r3k.c | 1 - arch/mips/mm/c-r4k.c | 1 - arch/mips/mm/c-tx39.c | 1 - arch/mips/mm/init.c | 1 - arch/mips/mm/page.c | 1 - arch/mips/mm/pgtable-32.c | 1 - arch/mips/mm/pgtable-64.c | 1 - arch/mips/mm/sc-ip22.c | 1 - arch/mips/mm/sc-mips.c | 1 - arch/mips/mm/sc-r5k.c | 1 - arch/mips/mm/tlb-r3k.c | 1 - arch/mips/mm/tlb-r4k.c | 1 - arch/mips/sgi-ip27/ip27-init.c | 1 - arch/mips/sgi-ip27/ip27-timer.c | 1 - arch/mips/sgi-ip32/ip32-memory.c | 1 - arch/nds32/mm/fault.c | 1 - arch/nds32/mm/proc.c | 1 - arch/nios2/kernel/module.c | 1 - arch/nios2/mm/init.c | 1 - arch/nios2/mm/pgtable.c | 1 - arch/nios2/mm/tlb.c | 1 - arch/openrisc/include/asm/tlbflush.h | 1 - arch/openrisc/kernel/asm-offsets.c | 1 - arch/openrisc/kernel/process.c | 1 - arch/openrisc/kernel/ptrace.c | 1 - arch/openrisc/kernel/setup.c | 1 - arch/openrisc/kernel/traps.c | 1 - arch/openrisc/mm/init.c | 1 - arch/openrisc/mm/tlb.c | 1 - arch/parisc/include/asm/mmu_context.h | 1 - arch/parisc/kernel/module.c | 1 - arch/parisc/kernel/ptrace.c | 1 - arch/parisc/kernel/smp.c | 1 - arch/parisc/mm/init.c | 1 - arch/powerpc/include/asm/io.h | 1 - arch/powerpc/kernel/asm-offsets.c | 1 - arch/powerpc/kernel/process.c | 1 - arch/powerpc/kernel/signal_32.c | 1 - arch/powerpc/kernel/signal_64.c | 1 - arch/powerpc/kernel/traps.c | 1 - arch/powerpc/kernel/vdso.c | 1 - arch/powerpc/lib/code-patching.c | 1 - arch/powerpc/mm/book3s64/hash_hugetlbpage.c | 1 - arch/powerpc/mm/book3s64/hash_pgtable.c | 1 - arch/powerpc/mm/book3s64/radix_hugetlbpage.c | 1 - arch/powerpc/mm/book3s64/radix_pgtable.c | 1 - arch/powerpc/mm/fault.c | 1 - arch/powerpc/mm/hugetlbpage.c | 1 - arch/powerpc/mm/init_32.c | 1 - arch/powerpc/mm/init_64.c | 1 - arch/powerpc/mm/mem.c | 1 - arch/powerpc/mm/nohash/40x.c | 1 - arch/powerpc/mm/nohash/fsl_booke.c | 1 - arch/powerpc/mm/pgtable_32.c | 1 - arch/powerpc/mm/pgtable_64.c | 1 - arch/powerpc/mm/ptdump/hashpagetable.c | 1 - arch/powerpc/mm/ptdump/ptdump.c | 1 - arch/powerpc/perf/callchain.c | 1 - arch/powerpc/perf/callchain_32.c | 1 - arch/powerpc/perf/callchain_64.c | 1 - arch/powerpc/platforms/8xx/cpm1.c | 1 - arch/powerpc/platforms/8xx/micropatch.c | 1 - arch/powerpc/platforms/cell/setup.c | 1 - arch/powerpc/platforms/chrp/setup.c | 1 - arch/powerpc/platforms/maple/setup.c | 1 - arch/powerpc/platforms/maple/time.c | 1 - arch/powerpc/platforms/powermac/setup.c | 1 - arch/powerpc/platforms/powermac/time.c | 1 - arch/powerpc/platforms/pseries/setup.c | 1 - arch/powerpc/sysdev/cpm2.c | 1 - arch/powerpc/xmon/xmon.c | 1 - arch/riscv/kernel/setup.c | 1 - arch/riscv/mm/init.c | 1 - arch/s390/include/asm/tlbflush.h | 1 - arch/s390/kernel/machine_kexec.c | 1 - arch/s390/kernel/ptrace.c | 1 - arch/s390/kernel/vdso.c | 1 - arch/s390/mm/dump_pagetables.c | 1 - arch/s390/mm/fault.c | 1 - arch/s390/mm/init.c | 1 - arch/s390/mm/pageattr.c | 1 - arch/s390/mm/pgtable.c | 1 - arch/s390/mm/vmem.c | 1 - arch/sh/kernel/machine_kexec.c | 1 - arch/sh/kernel/ptrace_32.c | 1 - arch/sh/kernel/signal_32.c | 1 - arch/sh/mm/cache-sh3.c | 1 - arch/sh/mm/cache-sh4.c | 1 - arch/sh/mm/cache-sh7705.c | 1 - arch/sh/mm/nommu.c | 1 - arch/sparc/kernel/leon_smp.c | 1 - arch/sparc/kernel/process_32.c | 1 - arch/sparc/kernel/process_64.c | 1 - arch/sparc/kernel/ptrace_32.c | 1 - arch/sparc/kernel/ptrace_64.c | 1 - arch/sparc/kernel/setup_32.c | 1 - arch/sparc/kernel/setup_64.c | 1 - arch/sparc/kernel/signal32.c | 1 - arch/sparc/kernel/signal_32.c | 1 - arch/sparc/kernel/signal_64.c | 1 - arch/sparc/kernel/smp_32.c | 1 - arch/sparc/kernel/smp_64.c | 1 - arch/sparc/kernel/traps_64.c | 1 - arch/sparc/mm/fault_32.c | 1 - arch/sparc/mm/fault_64.c | 1 - arch/sparc/mm/hugetlbpage.c | 1 - arch/sparc/mm/init_32.c | 1 - arch/sparc/mm/init_64.c | 1 - arch/sparc/mm/io-unit.c | 1 - arch/sparc/mm/iommu.c | 1 - arch/sparc/mm/tlb.c | 1 - arch/um/kernel/process.c | 1 - arch/um/kernel/skas/mmu.c | 1 - arch/um/kernel/skas/uaccess.c | 1 - arch/um/kernel/tlb.c | 1 - arch/um/kernel/trap.c | 1 - arch/um/kernel/um_arch.c | 1 - arch/unicore32/kernel/module.c | 1 - arch/unicore32/mm/fault.c | 1 - arch/x86/include/asm/iomap.h | 1 - arch/x86/include/asm/xen/page.h | 1 - arch/x86/kernel/alternative.c | 1 - arch/x86/kernel/amd_gart_64.c | 1 - arch/x86/kernel/doublefault_32.c | 1 - arch/x86/kernel/machine_kexec_32.c | 1 - arch/x86/kernel/machine_kexec_64.c | 1 - arch/x86/kernel/module.c | 1 - arch/x86/kernel/process_32.c | 1 - arch/x86/kernel/process_64.c | 1 - arch/x86/kernel/ptrace.c | 1 - arch/x86/kernel/tboot.c | 1 - arch/x86/mm/dump_pagetables.c | 1 - arch/x86/mm/init_32.c | 1 - arch/x86/mm/init_64.c | 1 - arch/x86/mm/kasan_init_64.c | 1 - arch/x86/mm/pat/cpa-test.c | 1 - arch/x86/mm/pat/memtype.c | 1 - arch/x86/mm/pgtable.c | 1 - arch/x86/mm/pgtable_32.c | 1 - arch/x86/mm/pti.c | 1 - arch/x86/platform/efi/efi_64.c | 1 - arch/x86/xen/enlighten_pv.c | 1 - arch/x86/xen/grant-table.c | 1 - arch/xtensa/kernel/process.c | 1 - arch/xtensa/kernel/ptrace.c | 1 - arch/xtensa/kernel/setup.c | 1 - drivers/char/agp/frontend.c | 1 - drivers/char/agp/generic.c | 1 - drivers/char/bsr.c | 1 - drivers/char/mspec.c | 1 - drivers/gpu/drm/i915/i915_mm.c | 1 - drivers/infiniband/sw/rdmavt/mmap.c | 1 - drivers/infiniband/sw/rxe/rxe_mmap.c | 1 - drivers/media/platform/davinci/vpbe_display.c | 1 - drivers/media/v4l2-core/v4l2-common.c | 1 - drivers/misc/sgi-gru/grufault.c | 1 - drivers/net/ethernet/sun/sunhme.c | 1 - drivers/sbus/char/flash.c | 1 - drivers/sbus/char/uctrl.c | 1 - drivers/scsi/a2091.c | 1 - drivers/scsi/a3000.c | 1 - drivers/scsi/gvp11.c | 1 - drivers/scsi/lasi700.c | 1 - drivers/scsi/mvme147.c | 1 - drivers/scsi/sni_53c710.c | 1 - drivers/video/console/newport_con.c | 1 - drivers/video/fbdev/acornfb.c | 1 - drivers/video/fbdev/atafb.c | 1 - drivers/video/fbdev/cirrusfb.c | 1 - drivers/video/fbdev/cyber2000fb.c | 1 - drivers/video/fbdev/fb-puv3.c | 1 - drivers/video/fbdev/hitfb.c | 1 - drivers/video/fbdev/neofb.c | 1 - drivers/video/fbdev/q40fb.c | 1 - drivers/video/fbdev/savage/savagefb_driver.c | 1 - drivers/xen/balloon.c | 1 - drivers/xen/grant-table.c | 1 - drivers/xen/privcmd.c | 1 - drivers/xen/xenbus/xenbus_probe.c | 1 - drivers/xen/xenbus/xenbus_probe_backend.c | 1 - drivers/xen/xenbus/xenbus_probe_frontend.c | 1 - fs/proc/array.c | 1 - fs/proc/meminfo.c | 1 - fs/proc/nommu.c | 1 - fs/proc/vmcore.c | 1 - include/linux/dax.h | 1 - init/init_task.c | 1 - kernel/exit.c | 1 - kernel/fork.c | 1 - kernel/power/snapshot.c | 1 - lib/ioremap.c | 1 - mm/debug_vm_pgtable.c | 1 - mm/gup.c | 1 - mm/hugetlb.c | 1 - mm/memory.c | 1 - mm/page_io.c | 1 - mm/shmem.c | 1 - mm/sparse-vmemmap.c | 1 - mm/sparse.c | 1 - mm/swap_state.c | 1 - mm/swapfile.c | 1 - mm/vmacache.c | 1 - sound/core/sgbuf.c | 1 - virt/kvm/kvm_main.c | 1 - 319 files changed, 319 deletions(-) diff --git a/arch/alpha/boot/bootp.c b/arch/alpha/boot/bootp.c index 95c0359f48589..00266e6e1b714 100644 --- a/arch/alpha/boot/bootp.c +++ b/arch/alpha/boot/bootp.c @@ -16,7 +16,6 @@ #include #include -#include #include #include diff --git a/arch/alpha/boot/bootpz.c b/arch/alpha/boot/bootpz.c index 99b8d7dc344bf..43af71835adf8 100644 --- a/arch/alpha/boot/bootpz.c +++ b/arch/alpha/boot/bootpz.c @@ -18,7 +18,6 @@ #include #include -#include #include #include diff --git a/arch/alpha/boot/main.c b/arch/alpha/boot/main.c index 8f5ed86109703..e5347a0800086 100644 --- a/arch/alpha/boot/main.c +++ b/arch/alpha/boot/main.c @@ -14,7 +14,6 @@ #include #include -#include #include diff --git a/arch/alpha/include/asm/io.h b/arch/alpha/include/asm/io.h index d1ed5a8133c5d..13bea465f1c06 100644 --- a/arch/alpha/include/asm/io.h +++ b/arch/alpha/include/asm/io.h @@ -7,7 +7,6 @@ #include #include #include -#include #include #include diff --git a/arch/alpha/kernel/process.c b/arch/alpha/kernel/process.c index 48b81d015d8a8..b45f0b0d6511b 100644 --- a/arch/alpha/kernel/process.c +++ b/arch/alpha/kernel/process.c @@ -37,7 +37,6 @@ #include #include #include -#include #include #include diff --git a/arch/alpha/kernel/ptrace.c b/arch/alpha/kernel/ptrace.c index cb8d599e72d66..8c43212ae38e6 100644 --- a/arch/alpha/kernel/ptrace.c +++ b/arch/alpha/kernel/ptrace.c @@ -19,7 +19,6 @@ #include #include -#include #include #include "proto.h" diff --git a/arch/alpha/kernel/setup.c b/arch/alpha/kernel/setup.c index 6fa802c495b46..f5c42a8fcf9c8 100644 --- a/arch/alpha/kernel/setup.c +++ b/arch/alpha/kernel/setup.c @@ -55,7 +55,6 @@ static struct notifier_block alpha_panic_block = { }; #include -#include #include #include #include diff --git a/arch/alpha/kernel/smp.c b/arch/alpha/kernel/smp.c index 52995bf413fea..631cc17410d15 100644 --- a/arch/alpha/kernel/smp.c +++ b/arch/alpha/kernel/smp.c @@ -36,7 +36,6 @@ #include #include -#include #include #include #include diff --git a/arch/alpha/kernel/sys_alcor.c b/arch/alpha/kernel/sys_alcor.c index ce5430056f65f..e063b3857b3d5 100644 --- a/arch/alpha/kernel/sys_alcor.c +++ b/arch/alpha/kernel/sys_alcor.c @@ -23,7 +23,6 @@ #include #include #include -#include #include #include diff --git a/arch/alpha/kernel/sys_cabriolet.c b/arch/alpha/kernel/sys_cabriolet.c index 0aa6a27d0e2f8..47459b73cdb7e 100644 --- a/arch/alpha/kernel/sys_cabriolet.c +++ b/arch/alpha/kernel/sys_cabriolet.c @@ -23,7 +23,6 @@ #include #include #include -#include #include #include #include diff --git a/arch/alpha/kernel/sys_dp264.c b/arch/alpha/kernel/sys_dp264.c index d335086218207..9fb445d7dca52 100644 --- a/arch/alpha/kernel/sys_dp264.c +++ b/arch/alpha/kernel/sys_dp264.c @@ -26,7 +26,6 @@ #include #include #include -#include #include #include #include diff --git a/arch/alpha/kernel/sys_eb64p.c b/arch/alpha/kernel/sys_eb64p.c index 1cdfe55fb9873..3c43fd3475266 100644 --- a/arch/alpha/kernel/sys_eb64p.c +++ b/arch/alpha/kernel/sys_eb64p.c @@ -22,7 +22,6 @@ #include #include #include -#include #include #include #include diff --git a/arch/alpha/kernel/sys_eiger.c b/arch/alpha/kernel/sys_eiger.c index 016f79251141a..bf99dcfd40c4e 100644 --- a/arch/alpha/kernel/sys_eiger.c +++ b/arch/alpha/kernel/sys_eiger.c @@ -23,7 +23,6 @@ #include #include #include -#include #include #include #include diff --git a/arch/alpha/kernel/sys_jensen.c b/arch/alpha/kernel/sys_jensen.c index d0d44f543d77c..0a2ab6cb18db4 100644 --- a/arch/alpha/kernel/sys_jensen.c +++ b/arch/alpha/kernel/sys_jensen.c @@ -25,7 +25,6 @@ #include #include #include -#include #include #include "proto.h" diff --git a/arch/alpha/kernel/sys_marvel.c b/arch/alpha/kernel/sys_marvel.c index 533899a4a1a1d..83d6c53d6d4d1 100644 --- a/arch/alpha/kernel/sys_marvel.c +++ b/arch/alpha/kernel/sys_marvel.c @@ -18,7 +18,6 @@ #include #include #include -#include #include #include #include diff --git a/arch/alpha/kernel/sys_miata.c b/arch/alpha/kernel/sys_miata.c index 702292af2225d..e1bee8f84c587 100644 --- a/arch/alpha/kernel/sys_miata.c +++ b/arch/alpha/kernel/sys_miata.c @@ -22,7 +22,6 @@ #include #include #include -#include #include #include diff --git a/arch/alpha/kernel/sys_mikasa.c b/arch/alpha/kernel/sys_mikasa.c index 3af4f94113e17..7690dfd57cb6b 100644 --- a/arch/alpha/kernel/sys_mikasa.c +++ b/arch/alpha/kernel/sys_mikasa.c @@ -23,7 +23,6 @@ #include #include #include -#include #include #include #include diff --git a/arch/alpha/kernel/sys_nautilus.c b/arch/alpha/kernel/sys_nautilus.c index 32850e45834b0..53adf43dcd44f 100644 --- a/arch/alpha/kernel/sys_nautilus.c +++ b/arch/alpha/kernel/sys_nautilus.c @@ -40,7 +40,6 @@ #include #include #include -#include #include #include #include diff --git a/arch/alpha/kernel/sys_noritake.c b/arch/alpha/kernel/sys_noritake.c index b106f327f7652..47f3ce4f719ad 100644 --- a/arch/alpha/kernel/sys_noritake.c +++ b/arch/alpha/kernel/sys_noritake.c @@ -24,7 +24,6 @@ #include #include #include -#include #include #include #include diff --git a/arch/alpha/kernel/sys_rawhide.c b/arch/alpha/kernel/sys_rawhide.c index b76f65d0e8b54..b5846ffdadce5 100644 --- a/arch/alpha/kernel/sys_rawhide.c +++ b/arch/alpha/kernel/sys_rawhide.c @@ -21,7 +21,6 @@ #include #include #include -#include #include #include diff --git a/arch/alpha/kernel/sys_ruffian.c b/arch/alpha/kernel/sys_ruffian.c index d330740119602..4b1c8d85c4f08 100644 --- a/arch/alpha/kernel/sys_ruffian.c +++ b/arch/alpha/kernel/sys_ruffian.c @@ -23,7 +23,6 @@ #include #include #include -#include #include #include diff --git a/arch/alpha/kernel/sys_rx164.c b/arch/alpha/kernel/sys_rx164.c index 4d85eaeb44aa4..94046f9aea08d 100644 --- a/arch/alpha/kernel/sys_rx164.c +++ b/arch/alpha/kernel/sys_rx164.c @@ -22,7 +22,6 @@ #include #include #include -#include #include #include diff --git a/arch/alpha/kernel/sys_sable.c b/arch/alpha/kernel/sys_sable.c index 3cf0d32da5d80..930005b2f630d 100644 --- a/arch/alpha/kernel/sys_sable.c +++ b/arch/alpha/kernel/sys_sable.c @@ -21,7 +21,6 @@ #include #include #include -#include #include #include diff --git a/arch/alpha/kernel/sys_sio.c b/arch/alpha/kernel/sys_sio.c index a6bdc1da47adb..7c420d8dac53d 100644 --- a/arch/alpha/kernel/sys_sio.c +++ b/arch/alpha/kernel/sys_sio.c @@ -25,7 +25,6 @@ #include #include #include -#include #include #include #include diff --git a/arch/alpha/kernel/sys_sx164.c b/arch/alpha/kernel/sys_sx164.c index 17cc203176c80..dd9de84b630c3 100644 --- a/arch/alpha/kernel/sys_sx164.c +++ b/arch/alpha/kernel/sys_sx164.c @@ -22,7 +22,6 @@ #include #include #include -#include #include #include #include diff --git a/arch/alpha/kernel/sys_takara.c b/arch/alpha/kernel/sys_takara.c index e230c68640883..9e2adb69bc74e 100644 --- a/arch/alpha/kernel/sys_takara.c +++ b/arch/alpha/kernel/sys_takara.c @@ -21,7 +21,6 @@ #include #include #include -#include #include #include diff --git a/arch/alpha/kernel/sys_titan.c b/arch/alpha/kernel/sys_titan.c index c8390d8de1400..b1f3b4fcf99b2 100644 --- a/arch/alpha/kernel/sys_titan.c +++ b/arch/alpha/kernel/sys_titan.c @@ -26,7 +26,6 @@ #include #include #include -#include #include #include #include diff --git a/arch/alpha/kernel/sys_wildfire.c b/arch/alpha/kernel/sys_wildfire.c index 2191bde161fde..2c54d707142ab 100644 --- a/arch/alpha/kernel/sys_wildfire.c +++ b/arch/alpha/kernel/sys_wildfire.c @@ -20,7 +20,6 @@ #include #include #include -#include #include #include #include diff --git a/arch/alpha/mm/init.c b/arch/alpha/mm/init.c index 667cd21393b5b..3c42b3147fd6f 100644 --- a/arch/alpha/mm/init.c +++ b/arch/alpha/mm/init.c @@ -24,7 +24,6 @@ #include #include -#include #include #include #include diff --git a/arch/arm/kernel/machine_kexec.c b/arch/arm/kernel/machine_kexec.c index 76300f3813e89..974b6c64d3e6f 100644 --- a/arch/arm/kernel/machine_kexec.c +++ b/arch/arm/kernel/machine_kexec.c @@ -10,7 +10,6 @@ #include #include #include -#include #include #include #include diff --git a/arch/arm/kernel/module.c b/arch/arm/kernel/module.c index af0a8500a24ea..e15444b25ca05 100644 --- a/arch/arm/kernel/module.c +++ b/arch/arm/kernel/module.c @@ -17,7 +17,6 @@ #include #include -#include #include #include #include diff --git a/arch/arm/kernel/ptrace.c b/arch/arm/kernel/ptrace.c index 4cc6a7eff6359..d0f7c8896c967 100644 --- a/arch/arm/kernel/ptrace.c +++ b/arch/arm/kernel/ptrace.c @@ -25,7 +25,6 @@ #include #include -#include #include #define CREATE_TRACE_POINTS diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c index 46e1be9e57a81..9a6432557871f 100644 --- a/arch/arm/kernel/smp.c +++ b/arch/arm/kernel/smp.c @@ -37,7 +37,6 @@ #include #include #include -#include #include #include #include diff --git a/arch/arm/mach-ebsa110/core.c b/arch/arm/mach-ebsa110/core.c index 575b2e2b6759f..5960e3dfd2bfc 100644 --- a/arch/arm/mach-ebsa110/core.c +++ b/arch/arm/mach-ebsa110/core.c @@ -17,7 +17,6 @@ #include #include #include -#include #include #include diff --git a/arch/arm/mach-footbridge/common.c b/arch/arm/mach-footbridge/common.c index 015f75d1c98d0..eee095f0e2f6c 100644 --- a/arch/arm/mach-footbridge/common.c +++ b/arch/arm/mach-footbridge/common.c @@ -14,7 +14,6 @@ #include #include