diff --git a/Documentation/devicetree/bindings/hwmon/rpi-poe-fan.txt b/Documentation/devicetree/bindings/hwmon/rpi-poe-fan.txt new file mode 100644 index 0000000000000000000000000000000000000000..c71f8569a4dc96c6d791e4e4d9d12f5e4b6fb0fe --- /dev/null +++ b/Documentation/devicetree/bindings/hwmon/rpi-poe-fan.txt @@ -0,0 +1,55 @@ +Bindings for the Raspberry Pi PoE HAT fan + +Required properties: +- compatible : "raspberrypi,rpi-poe-fan" +- firmware : Reference to the RPi firmware device node +- pwms : the PWM that is used to control the PWM fan +- cooling-levels : PWM duty cycle values in a range from 0 to 255 + which correspond to thermal cooling states + +Example: + fan0: rpi-poe-fan@0 { + compatible = "raspberrypi,rpi-poe-fan"; + firmware = <&firmware>; + cooling-min-state = <0>; + cooling-max-state = <3>; + #cooling-cells = <2>; + cooling-levels = <0 50 150 255>; + status = "okay"; + }; + + thermal-zones { + cpu_thermal: cpu-thermal { + trips { + threshold: trip-point@0 { + temperature = <45000>; + hysteresis = <5000>; + type = "active"; + }; + target: trip-point@1 { + temperature = <50000>; + hysteresis = <2000>; + type = "active"; + }; + cpu_hot: cpu_hot@0 { + temperature = <55000>; + hysteresis = <2000>; + type = "active"; + }; + }; + cooling-maps { + map0 { + trip = <&threshold>; + cooling-device = <&fan0 0 1>; + }; + map1 { + trip = <&target>; + cooling-device = <&fan0 1 2>; + }; + map2 { + trip = <&cpu_hot>; + cooling-device = <&fan0 2 3>; + }; + }; + }; + }; diff --git a/Documentation/hwmon/ina2xx b/Documentation/hwmon/ina2xx index cfd31d94c8727251179a0c2afcc4f9b2a4580156..f8bf14055c2f38593b78d3c74545835080ba0c91 100644 --- a/Documentation/hwmon/ina2xx +++ b/Documentation/hwmon/ina2xx @@ -32,7 +32,7 @@ Supported chips: Datasheet: Publicly available at the Texas Instruments website http://www.ti.com/ -Author: Lothar Felten +Author: Lothar Felten Description ----------- diff --git a/Documentation/hwmon/rpi-poe-fan b/Documentation/hwmon/rpi-poe-fan new file mode 100644 index 0000000000000000000000000000000000000000..9182ab6339933895071b7de828f33a49c8f709e3 --- /dev/null +++ b/Documentation/hwmon/rpi-poe-fan @@ -0,0 +1,15 @@ +Kernel driver rpi-poe-fan +===================== + +This driver enables the use of the Raspberry Pi PoE HAT fan. + +Author: Serge Schneider + +Description +----------- + +The driver implements a simple interface for driving the Raspberry Pi PoE +(Power over Ethernet) HAT fan. The driver passes commands to the Raspberry Pi +firmware through the mailbox property interface. The firmware then forwards +the commands to the board over I2C on the ID_EEPROM pins. The driver exposes +the fan to the user space through the hwmon sysfs interface. diff --git a/Documentation/networking/ip-sysctl.txt b/Documentation/networking/ip-sysctl.txt index d499676890d8ecf71de4b02f724c011132145f4f..a054b5ad410ad8ffe2402182fa9851815a37860f 100644 --- a/Documentation/networking/ip-sysctl.txt +++ b/Documentation/networking/ip-sysctl.txt @@ -133,14 +133,11 @@ min_adv_mss - INTEGER IP Fragmentation: -ipfrag_high_thresh - INTEGER - Maximum memory used to reassemble IP fragments. When - ipfrag_high_thresh bytes of memory is allocated for this purpose, - the fragment handler will toss packets until ipfrag_low_thresh - is reached. This also serves as a maximum limit to namespaces - different from the initial one. - -ipfrag_low_thresh - INTEGER +ipfrag_high_thresh - LONG INTEGER + Maximum memory used to reassemble IP fragments. + +ipfrag_low_thresh - LONG INTEGER + (Obsolete since linux-4.17) Maximum memory used to reassemble IP fragments before the kernel begins to remove incomplete fragment queues to free up resources. The kernel still accepts new fragments for defragmentation. diff --git a/Makefile b/Makefile index e69d0d0917427e014675799d6873820d806fe08c..cc0e65a8d7bf3973b64ebd8c7d656d81dd228511 100644 --- a/Makefile +++ b/Makefile @@ -1,7 +1,7 @@ # SPDX-License-Identifier: GPL-2.0 VERSION = 4 PATCHLEVEL = 14 -SUBLEVEL = 66 +SUBLEVEL = 74 EXTRAVERSION = NAME = Petit Gorille @@ -357,9 +357,9 @@ CONFIG_SHELL := $(shell if [ -x "$$BASH" ]; then echo $$BASH; \ else if [ -x /bin/bash ]; then echo /bin/bash; \ else echo sh; fi ; fi) -HOST_LFS_CFLAGS := $(shell getconf LFS_CFLAGS) -HOST_LFS_LDFLAGS := $(shell getconf LFS_LDFLAGS) -HOST_LFS_LIBS := $(shell getconf LFS_LIBS) +HOST_LFS_CFLAGS := $(shell getconf LFS_CFLAGS 2>/dev/null) +HOST_LFS_LDFLAGS := $(shell getconf LFS_LDFLAGS 2>/dev/null) +HOST_LFS_LIBS := $(shell getconf LFS_LIBS 2>/dev/null) HOSTCC = gcc HOSTCXX = g++ @@ -490,9 +490,13 @@ KBUILD_AFLAGS += $(CLANG_TARGET) $(CLANG_GCC_TC) endif RETPOLINE_CFLAGS_GCC := -mindirect-branch=thunk-extern -mindirect-branch-register +RETPOLINE_VDSO_CFLAGS_GCC := -mindirect-branch=thunk-inline -mindirect-branch-register RETPOLINE_CFLAGS_CLANG := -mretpoline-external-thunk +RETPOLINE_VDSO_CFLAGS_CLANG := -mretpoline RETPOLINE_CFLAGS := $(call cc-option,$(RETPOLINE_CFLAGS_GCC),$(call cc-option,$(RETPOLINE_CFLAGS_CLANG))) +RETPOLINE_VDSO_CFLAGS := $(call cc-option,$(RETPOLINE_VDSO_CFLAGS_GCC),$(call cc-option,$(RETPOLINE_VDSO_CFLAGS_CLANG))) export RETPOLINE_CFLAGS +export RETPOLINE_VDSO_CFLAGS ifeq ($(config-targets),1) # =========================================================================== diff --git a/arch/Kconfig b/arch/Kconfig index ff8ae8cdaaf799fd1fc332cdc60afb3bfd590af8..7c61084792097d21f419c8be31804d9fcd4a670c 100644 --- a/arch/Kconfig +++ b/arch/Kconfig @@ -337,6 +337,9 @@ config HAVE_ARCH_JUMP_LABEL config HAVE_RCU_TABLE_FREE bool +config HAVE_RCU_TABLE_INVALIDATE + bool + config ARCH_HAVE_NMI_SAFE_CMPXCHG bool diff --git a/arch/alpha/kernel/osf_sys.c b/arch/alpha/kernel/osf_sys.c index a48976dc9bcd0ad8638aadfd927e0480f3816c3e..918c3938ef6652a24c84edffeb5f3727afba23af 100644 --- a/arch/alpha/kernel/osf_sys.c +++ b/arch/alpha/kernel/osf_sys.c @@ -530,24 +530,19 @@ SYSCALL_DEFINE4(osf_mount, unsigned long, typenr, const char __user *, path, SYSCALL_DEFINE1(osf_utsname, char __user *, name) { int error; + char tmp[5 * 32]; down_read(&uts_sem); - error = -EFAULT; - if (copy_to_user(name + 0, utsname()->sysname, 32)) - goto out; - if (copy_to_user(name + 32, utsname()->nodename, 32)) - goto out; - if (copy_to_user(name + 64, utsname()->release, 32)) - goto out; - if (copy_to_user(name + 96, utsname()->version, 32)) - goto out; - if (copy_to_user(name + 128, utsname()->machine, 32)) - goto out; + memcpy(tmp + 0 * 32, utsname()->sysname, 32); + memcpy(tmp + 1 * 32, utsname()->nodename, 32); + memcpy(tmp + 2 * 32, utsname()->release, 32); + memcpy(tmp + 3 * 32, utsname()->version, 32); + memcpy(tmp + 4 * 32, utsname()->machine, 32); + up_read(&uts_sem); - error = 0; - out: - up_read(&uts_sem); - return error; + if (copy_to_user(name, tmp, sizeof(tmp))) + return -EFAULT; + return 0; } SYSCALL_DEFINE0(getpagesize) @@ -567,18 +562,21 @@ SYSCALL_DEFINE2(osf_getdomainname, char __user *, name, int, namelen) { int len, err = 0; char *kname; + char tmp[32]; - if (namelen > 32) + if (namelen < 0 || namelen > 32) namelen = 32; down_read(&uts_sem); kname = utsname()->domainname; len = strnlen(kname, namelen); - if (copy_to_user(name, kname, min(len + 1, namelen))) - err = -EFAULT; + len = min(len + 1, namelen); + memcpy(tmp, kname, len); up_read(&uts_sem); - return err; + if (copy_to_user(name, tmp, len)) + return -EFAULT; + return 0; } /* @@ -739,13 +737,14 @@ SYSCALL_DEFINE3(osf_sysinfo, int, command, char __user *, buf, long, count) }; unsigned long offset; const char *res; - long len, err = -EINVAL; + long len; + char tmp[__NEW_UTS_LEN + 1]; offset = command-1; if (offset >= ARRAY_SIZE(sysinfo_table)) { /* Digital UNIX has a few unpublished interfaces here */ printk("sysinfo(%d)", command); - goto out; + return -EINVAL; } down_read(&uts_sem); @@ -753,13 +752,11 @@ SYSCALL_DEFINE3(osf_sysinfo, int, command, char __user *, buf, long, count) len = strlen(res)+1; if ((unsigned long)len > (unsigned long)count) len = count; - if (copy_to_user(buf, res, len)) - err = -EFAULT; - else - err = 0; + memcpy(tmp, res, len); up_read(&uts_sem); - out: - return err; + if (copy_to_user(buf, tmp, len)) + return -EFAULT; + return 0; } SYSCALL_DEFINE5(osf_getsysinfo, unsigned long, op, void __user *, buffer, diff --git a/arch/arc/Kconfig b/arch/arc/Kconfig index 5c8caf85c35054722402d86f58033820d3de7640..8ff06609068038541f608df562170b3429193f6b 100644 --- a/arch/arc/Kconfig +++ b/arch/arc/Kconfig @@ -45,6 +45,9 @@ config ARC select HAVE_KERNEL_GZIP select HAVE_KERNEL_LZMA +config ARCH_HAS_CACHE_LINE_SIZE + def_bool y + config MIGHT_HAVE_PCI bool diff --git a/arch/arc/Makefile b/arch/arc/Makefile index d37f49d6a27f40f65d3e34bd3e2df5343a97d1e4..6c1b20dd76ad902655d7317eb44580923d98c690 100644 --- a/arch/arc/Makefile +++ b/arch/arc/Makefile @@ -16,7 +16,7 @@ endif KBUILD_DEFCONFIG := nsim_700_defconfig -cflags-y += -fno-common -pipe -fno-builtin -D__linux__ +cflags-y += -fno-common -pipe -fno-builtin -mmedium-calls -D__linux__ cflags-$(CONFIG_ISA_ARCOMPACT) += -mA7 cflags-$(CONFIG_ISA_ARCV2) += -mcpu=archs @@ -140,16 +140,3 @@ dtbs: scripts archclean: $(Q)$(MAKE) $(clean)=$(boot) - -# Hacks to enable final link due to absence of link-time branch relexation -# and gcc choosing optimal(shorter) branches at -O3 -# -# vineetg Feb 2010: -mlong-calls switched off for overall kernel build -# However lib/decompress_inflate.o (.init.text) calls -# zlib_inflate_workspacesize (.text) causing relocation errors. -# Thus forcing all exten calls in this file to be long calls -export CFLAGS_decompress_inflate.o = -mmedium-calls -export CFLAGS_initramfs.o = -mmedium-calls -ifdef CONFIG_SMP -export CFLAGS_core.o = -mmedium-calls -endif diff --git a/arch/arc/configs/axs101_defconfig b/arch/arc/configs/axs101_defconfig index a8242362e55199e550f4da319f1f5f2bd33f23c2..ece78630d71124cd1557cd987a3f6021c2383486 100644 --- a/arch/arc/configs/axs101_defconfig +++ b/arch/arc/configs/axs101_defconfig @@ -1,5 +1,4 @@ CONFIG_DEFAULT_HOSTNAME="ARCLinux" -# CONFIG_SWAP is not set CONFIG_SYSVIPC=y CONFIG_POSIX_MQUEUE=y # CONFIG_CROSS_MEMORY_ATTACH is not set diff --git a/arch/arc/configs/axs103_defconfig b/arch/arc/configs/axs103_defconfig index ef3c31cd77378c0fcfed9f4e97a05ba1460c5410..240c9251a7d4a19d7745fcb4aeafd1d26c657442 100644 --- a/arch/arc/configs/axs103_defconfig +++ b/arch/arc/configs/axs103_defconfig @@ -1,5 +1,4 @@ CONFIG_DEFAULT_HOSTNAME="ARCLinux" -# CONFIG_SWAP is not set CONFIG_SYSVIPC=y CONFIG_POSIX_MQUEUE=y # CONFIG_CROSS_MEMORY_ATTACH is not set diff --git a/arch/arc/configs/axs103_smp_defconfig b/arch/arc/configs/axs103_smp_defconfig index 1757ac9cecbc10fb189673d23a76ad5137e9ed9f..af54b96abee04a16c0eb65a2690c45c9edaee560 100644 --- a/arch/arc/configs/axs103_smp_defconfig +++ b/arch/arc/configs/axs103_smp_defconfig @@ -1,5 +1,4 @@ CONFIG_DEFAULT_HOSTNAME="ARCLinux" -# CONFIG_SWAP is not set CONFIG_SYSVIPC=y CONFIG_POSIX_MQUEUE=y # CONFIG_CROSS_MEMORY_ATTACH is not set diff --git a/arch/arc/include/asm/cache.h b/arch/arc/include/asm/cache.h index 8486f328cc5d2aea087812ab19be949001e64b73..ff7d3232764a29a41503a213d3bd385e232acf42 100644 --- a/arch/arc/include/asm/cache.h +++ b/arch/arc/include/asm/cache.h @@ -48,7 +48,9 @@ }) /* Largest line length for either L1 or L2 is 128 bytes */ -#define ARCH_DMA_MINALIGN 128 +#define SMP_CACHE_BYTES 128 +#define cache_line_size() SMP_CACHE_BYTES +#define ARCH_DMA_MINALIGN SMP_CACHE_BYTES extern void arc_cache_init(void); extern char *arc_cache_mumbojumbo(int cpu_id, char *buf, int len); diff --git a/arch/arc/include/asm/delay.h b/arch/arc/include/asm/delay.h index d5da2115d78a678e343da2abec51f6c8efbbe0a4..03d6bb0f4e13a2dd49708f12dfb6f2f4788bf8ae 100644 --- a/arch/arc/include/asm/delay.h +++ b/arch/arc/include/asm/delay.h @@ -17,8 +17,11 @@ #ifndef __ASM_ARC_UDELAY_H #define __ASM_ARC_UDELAY_H +#include #include /* HZ */ +extern unsigned long loops_per_jiffy; + static inline void __delay(unsigned long loops) { __asm__ __volatile__( diff --git a/arch/arc/include/asm/mach_desc.h b/arch/arc/include/asm/mach_desc.h index c28e6c347b4900217ad48053c69679bb3da8b607..871f3cb16af9f2ec58c76192ffc098d914588b9d 100644 --- a/arch/arc/include/asm/mach_desc.h +++ b/arch/arc/include/asm/mach_desc.h @@ -34,9 +34,7 @@ struct machine_desc { const char *name; const char **dt_compat; void (*init_early)(void); -#ifdef CONFIG_SMP void (*init_per_cpu)(unsigned int); -#endif void (*init_machine)(void); void (*init_late)(void); diff --git a/arch/arc/kernel/irq.c b/arch/arc/kernel/irq.c index 538b36afe89e7c9871e2c37d2322d839a9b27a26..62b185057c040157132386aaee6ff56eaaebcf25 100644 --- a/arch/arc/kernel/irq.c +++ b/arch/arc/kernel/irq.c @@ -31,10 +31,10 @@ void __init init_IRQ(void) /* a SMP H/w block could do IPI IRQ request here */ if (plat_smp_ops.init_per_cpu) plat_smp_ops.init_per_cpu(smp_processor_id()); +#endif if (machine_desc->init_per_cpu) machine_desc->init_per_cpu(smp_processor_id()); -#endif } /* diff --git a/arch/arc/kernel/process.c b/arch/arc/kernel/process.c index 5ac3b547453fd5b4b5393fdc10dfff34a047941f..4674541eba3fd019a51aeb02db27b2bc04569412 100644 --- a/arch/arc/kernel/process.c +++ b/arch/arc/kernel/process.c @@ -47,7 +47,8 @@ SYSCALL_DEFINE0(arc_gettls) SYSCALL_DEFINE3(arc_usr_cmpxchg, int *, uaddr, int, expected, int, new) { struct pt_regs *regs = current_pt_regs(); - int uval = -EFAULT; + u32 uval; + int ret; /* * This is only for old cores lacking LLOCK/SCOND, which by defintion @@ -60,23 +61,47 @@ SYSCALL_DEFINE3(arc_usr_cmpxchg, int *, uaddr, int, expected, int, new) /* Z indicates to userspace if operation succeded */ regs->status32 &= ~STATUS_Z_MASK; - if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int))) - return -EFAULT; + ret = access_ok(VERIFY_WRITE, uaddr, sizeof(*uaddr)); + if (!ret) + goto fail; +again: preempt_disable(); - if (__get_user(uval, uaddr)) - goto done; + ret = __get_user(uval, uaddr); + if (ret) + goto fault; - if (uval == expected) { - if (!__put_user(new, uaddr)) - regs->status32 |= STATUS_Z_MASK; - } + if (uval != expected) + goto out; -done: - preempt_enable(); + ret = __put_user(new, uaddr); + if (ret) + goto fault; + + regs->status32 |= STATUS_Z_MASK; +out: + preempt_enable(); return uval; + +fault: + preempt_enable(); + + if (unlikely(ret != -EFAULT)) + goto fail; + + down_read(¤t->mm->mmap_sem); + ret = fixup_user_fault(current, current->mm, (unsigned long) uaddr, + FAULT_FLAG_WRITE, NULL); + up_read(¤t->mm->mmap_sem); + + if (likely(!ret)) + goto again; + +fail: + force_sig(SIGSEGV, current); + return ret; } #ifdef CONFIG_ISA_ARCV2 diff --git a/arch/arc/mm/cache.c b/arch/arc/mm/cache.c index eee924dfffa6e1baf08221ee5e7e2cd23937d782..d14499500106df2fc25ac18d28425b62fa56086e 100644 --- a/arch/arc/mm/cache.c +++ b/arch/arc/mm/cache.c @@ -1035,7 +1035,7 @@ void flush_cache_mm(struct mm_struct *mm) void flush_cache_page(struct vm_area_struct *vma, unsigned long u_vaddr, unsigned long pfn) { - unsigned int paddr = pfn << PAGE_SHIFT; + phys_addr_t paddr = pfn << PAGE_SHIFT; u_vaddr &= PAGE_MASK; @@ -1055,8 +1055,9 @@ void flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long u_vaddr) { /* TBD: do we really need to clear the kernel mapping */ - __flush_dcache_page(page_address(page), u_vaddr); - __flush_dcache_page(page_address(page), page_address(page)); + __flush_dcache_page((phys_addr_t)page_address(page), u_vaddr); + __flush_dcache_page((phys_addr_t)page_address(page), + (phys_addr_t)page_address(page)); } diff --git a/arch/arc/plat-eznps/include/plat/ctop.h b/arch/arc/plat-eznps/include/plat/ctop.h index 0c7d11022d0f8875256e64162d2ee0f1f0aa85e1..4f6a1673b3a6eaacc80473108dec4cc8c1e4236c 100644 --- a/arch/arc/plat-eznps/include/plat/ctop.h +++ b/arch/arc/plat-eznps/include/plat/ctop.h @@ -21,6 +21,7 @@ #error "Incorrect ctop.h include" #endif +#include #include /* core auxiliary registers */ @@ -143,6 +144,15 @@ struct nps_host_reg_gim_p_int_dst { }; /* AUX registers definition */ +struct nps_host_reg_aux_dpc { + union { + struct { + u32 ien:1, men:1, hen:1, reserved:29; + }; + u32 value; + }; +}; + struct nps_host_reg_aux_udmc { union { struct { diff --git a/arch/arc/plat-eznps/mtm.c b/arch/arc/plat-eznps/mtm.c index 2388de3d09ef9e71f710ec4625e20a9e85cbdc1e..ed0077ef666eb7bdb8930bb2b52eddbe46f0b946 100644 --- a/arch/arc/plat-eznps/mtm.c +++ b/arch/arc/plat-eznps/mtm.c @@ -15,6 +15,8 @@ */ #include +#include +#include #include #include #include @@ -157,10 +159,10 @@ void mtm_enable_core(unsigned int cpu) /* Verify and set the value of the mtm hs counter */ static int __init set_mtm_hs_ctr(char *ctr_str) { - long hs_ctr; + int hs_ctr; int ret; - ret = kstrtol(ctr_str, 0, &hs_ctr); + ret = kstrtoint(ctr_str, 0, &hs_ctr); if (ret || hs_ctr > MT_HS_CNT_MAX || hs_ctr < MT_HS_CNT_MIN) { pr_err("** Invalid @nps_mtm_hs_ctr [%d] needs to be [%d:%d] (incl)\n", diff --git a/arch/arm/boot/dts/am3517.dtsi b/arch/arm/boot/dts/am3517.dtsi index 00da3f2c4072d4d6a3a370e88b1acfa046b58efd..4b57094a0356e1bb12c2aaef249228859344baea 100644 --- a/arch/arm/boot/dts/am3517.dtsi +++ b/arch/arm/boot/dts/am3517.dtsi @@ -87,6 +87,11 @@ }; }; +/* Table Table 5-79 of the TRM shows 480ab000 is reserved */ +&usb_otg_hs { + status = "disabled"; +}; + &iva { status = "disabled"; }; diff --git a/arch/arm/boot/dts/am437x-sk-evm.dts b/arch/arm/boot/dts/am437x-sk-evm.dts index 2c6bf0684f5055bfe014bd4e433f4cd035e0924a..094fd0ea91a03d1889e7d6fa7c55a61859e3f269 100644 --- a/arch/arm/boot/dts/am437x-sk-evm.dts +++ b/arch/arm/boot/dts/am437x-sk-evm.dts @@ -535,6 +535,8 @@ touchscreen-size-x = <480>; touchscreen-size-y = <272>; + + wakeup-source; }; tlv320aic3106: tlv320aic3106@1b { diff --git a/arch/arm/boot/dts/armada-385-synology-ds116.dts b/arch/arm/boot/dts/armada-385-synology-ds116.dts index 31510eb56f108e8a8708bb6e4a513fb137982c13..874189b4d21866e38a8c8960a66b4ef5916efd7e 100644 --- a/arch/arm/boot/dts/armada-385-synology-ds116.dts +++ b/arch/arm/boot/dts/armada-385-synology-ds116.dts @@ -170,7 +170,7 @@ 3700 5 3900 6 4000 7>; - cooling-cells = <2>; + #cooling-cells = <2>; }; gpio-leds { diff --git a/arch/arm/boot/dts/bcm-cygnus.dtsi b/arch/arm/boot/dts/bcm-cygnus.dtsi index 9a9902974b1b86d16c0a9e8280c0fa6da0de8222..8b2c65cd61a2951dd5e170b81c2230ce449d3cf0 100644 --- a/arch/arm/boot/dts/bcm-cygnus.dtsi +++ b/arch/arm/boot/dts/bcm-cygnus.dtsi @@ -216,7 +216,7 @@ reg = <0x18008000 0x100>; #address-cells = <1>; #size-cells = <0>; - interrupts = ; + interrupts = ; clock-frequency = <100000>; status = "disabled"; }; @@ -245,7 +245,7 @@ reg = <0x1800b000 0x100>; #address-cells = <1>; #size-cells = <0>; - interrupts = ; + interrupts = ; clock-frequency = <100000>; status = "disabled"; }; @@ -256,7 +256,7 @@ #interrupt-cells = <1>; interrupt-map-mask = <0 0 0 0>; - interrupt-map = <0 0 0 0 &gic GIC_SPI 100 IRQ_TYPE_NONE>; + interrupt-map = <0 0 0 0 &gic GIC_SPI 100 IRQ_TYPE_LEVEL_HIGH>; linux,pci-domain = <0>; @@ -278,10 +278,10 @@ compatible = "brcm,iproc-msi"; msi-controller; interrupt-parent = <&gic>; - interrupts = , - , - , - ; + interrupts = , + , + , + ; }; }; @@ -291,7 +291,7 @@ #interrupt-cells = <1>; interrupt-map-mask = <0 0 0 0>; - interrupt-map = <0 0 0 0 &gic GIC_SPI 106 IRQ_TYPE_NONE>; + interrupt-map = <0 0 0 0 &gic GIC_SPI 106 IRQ_TYPE_LEVEL_HIGH>; linux,pci-domain = <1>; @@ -313,10 +313,10 @@ compatible = "brcm,iproc-msi"; msi-controller; interrupt-parent = <&gic>; - interrupts = , - , - , - ; + interrupts = , + , + , + ; }; }; diff --git a/arch/arm/boot/dts/bcm-nsp.dtsi b/arch/arm/boot/dts/bcm-nsp.dtsi index d5f5e92e74889d25fbbc0a853ef9d3e0ea6e7bae..1792192001a22da9e563963d7c715e019d5accde 100644 --- a/arch/arm/boot/dts/bcm-nsp.dtsi +++ b/arch/arm/boot/dts/bcm-nsp.dtsi @@ -391,7 +391,7 @@ reg = <0x38000 0x50>; #address-cells = <1>; #size-cells = <0>; - interrupts = ; + interrupts = ; clock-frequency = <100000>; dma-coherent; status = "disabled"; @@ -496,7 +496,7 @@ #interrupt-cells = <1>; interrupt-map-mask = <0 0 0 0>; - interrupt-map = <0 0 0 0 &gic GIC_SPI 131 IRQ_TYPE_NONE>; + interrupt-map = <0 0 0 0 &gic GIC_SPI 131 IRQ_TYPE_LEVEL_HIGH>; linux,pci-domain = <0>; @@ -519,10 +519,10 @@ compatible = "brcm,iproc-msi"; msi-controller; interrupt-parent = <&gic>; - interrupts = , - , - , - ; + interrupts = , + , + , + ; brcm,pcie-msi-inten; }; }; @@ -533,7 +533,7 @@ #interrupt-cells = <1>; interrupt-map-mask = <0 0 0 0>; - interrupt-map = <0 0 0 0 &gic GIC_SPI 137 IRQ_TYPE_NONE>; + interrupt-map = <0 0 0 0 &gic GIC_SPI 137 IRQ_TYPE_LEVEL_HIGH>; linux,pci-domain = <1>; @@ -556,10 +556,10 @@ compatible = "brcm,iproc-msi"; msi-controller; interrupt-parent = <&gic>; - interrupts = , - , - , - ; + interrupts = , + , + , + ; brcm,pcie-msi-inten; }; }; @@ -570,7 +570,7 @@ #interrupt-cells = <1>; interrupt-map-mask = <0 0 0 0>; - interrupt-map = <0 0 0 0 &gic GIC_SPI 143 IRQ_TYPE_NONE>; + interrupt-map = <0 0 0 0 &gic GIC_SPI 143 IRQ_TYPE_LEVEL_HIGH>; linux,pci-domain = <2>; @@ -593,10 +593,10 @@ compatible = "brcm,iproc-msi"; msi-controller; interrupt-parent = <&gic>; - interrupts = , - , - , - ; + interrupts = , + , + , + ; brcm,pcie-msi-inten; }; }; diff --git a/arch/arm/boot/dts/bcm270x.dtsi b/arch/arm/boot/dts/bcm270x.dtsi index 05828277ff8929af8d71d2143bf64236f2321591..f086ff0deec55a1eb5bd2c8b52b48c61c836910c 100644 --- a/arch/arm/boot/dts/bcm270x.dtsi +++ b/arch/arm/boot/dts/bcm270x.dtsi @@ -152,6 +152,13 @@ regulator-max-microvolt = <3300000>; regulator-always-on; }; + + __overrides__ { + cam0-pwdn-ctrl; + cam0-pwdn; + cam0-led-ctrl; + cam0-led; + }; }; /* Configure and use the auxilliary interrupt controller */ diff --git a/arch/arm/boot/dts/bcm5301x.dtsi b/arch/arm/boot/dts/bcm5301x.dtsi index 045b9bb857f9a030491bc3bdbf82ae67bb841b73..501877e87a5b8fb2f1eb7341211c6215c1aa8be8 100644 --- a/arch/arm/boot/dts/bcm5301x.dtsi +++ b/arch/arm/boot/dts/bcm5301x.dtsi @@ -365,7 +365,7 @@ i2c0: i2c@18009000 { compatible = "brcm,iproc-i2c"; reg = <0x18009000 0x50>; - interrupts = ; + interrupts = ; #address-cells = <1>; #size-cells = <0>; clock-frequency = <100000>; diff --git a/arch/arm/boot/dts/da850.dtsi b/arch/arm/boot/dts/da850.dtsi index 8a15f7193c829bcea8cda3553d0622a4c9222406..77dd62e260db682c457afc18d181d1b4466e685f 100644 --- a/arch/arm/boot/dts/da850.dtsi +++ b/arch/arm/boot/dts/da850.dtsi @@ -518,11 +518,7 @@ gpio-controller; #gpio-cells = <2>; reg = <0x226000 0x1000>; - interrupts = <42 IRQ_TYPE_EDGE_BOTH - 43 IRQ_TYPE_EDGE_BOTH 44 IRQ_TYPE_EDGE_BOTH - 45 IRQ_TYPE_EDGE_BOTH 46 IRQ_TYPE_EDGE_BOTH - 47 IRQ_TYPE_EDGE_BOTH 48 IRQ_TYPE_EDGE_BOTH - 49 IRQ_TYPE_EDGE_BOTH 50 IRQ_TYPE_EDGE_BOTH>; + interrupts = <42 43 44 45 46 47 48 49 50>; ti,ngpio = <144>; ti,davinci-gpio-unbanked = <0>; status = "disabled"; diff --git a/arch/arm/boot/dts/dra7.dtsi b/arch/arm/boot/dts/dra7.dtsi index 02a136a4661aa1ed172e4926a7a8dabad9e271b9..a5bd8f0205e8e58f719a519685e8709df431bfc9 100644 --- a/arch/arm/boot/dts/dra7.dtsi +++ b/arch/arm/boot/dts/dra7.dtsi @@ -1818,7 +1818,7 @@ }; }; - dcan1: can@481cc000 { + dcan1: can@4ae3c000 { compatible = "ti,dra7-d_can"; ti,hwmods = "dcan1"; reg = <0x4ae3c000 0x2000>; @@ -1828,7 +1828,7 @@ status = "disabled"; }; - dcan2: can@481d0000 { + dcan2: can@48480000 { compatible = "ti,dra7-d_can"; ti,hwmods = "dcan2"; reg = <0x48480000 0x2000>; diff --git a/arch/arm/boot/dts/imx6qdl-zii-rdu2.dtsi b/arch/arm/boot/dts/imx6qdl-zii-rdu2.dtsi index eeb7679fd348a4932d123a8bd7ec76fb5107cc63..849eb3443cde2712705bbbe6c74679ddb81095a8 100644 --- a/arch/arm/boot/dts/imx6qdl-zii-rdu2.dtsi +++ b/arch/arm/boot/dts/imx6qdl-zii-rdu2.dtsi @@ -644,7 +644,7 @@ dsa,member = <0 0>; eeprom-length = <512>; interrupt-parent = <&gpio6>; - interrupts = <3 IRQ_TYPE_EDGE_FALLING>; + interrupts = <3 IRQ_TYPE_LEVEL_LOW>; interrupt-controller; #interrupt-cells = <2>; diff --git a/arch/arm/boot/dts/imx7d.dtsi b/arch/arm/boot/dts/imx7d.dtsi index 4d308d17f040c71157db72a8d81abf8de9dd4d8e..119b63ffb0fec4c55ef4c424343e032dd951da37 100644 --- a/arch/arm/boot/dts/imx7d.dtsi +++ b/arch/arm/boot/dts/imx7d.dtsi @@ -144,10 +144,14 @@ interrupt-names = "msi"; #interrupt-cells = <1>; interrupt-map-mask = <0 0 0 0x7>; - interrupt-map = <0 0 0 1 &intc GIC_SPI 122 IRQ_TYPE_LEVEL_HIGH>, - <0 0 0 2 &intc GIC_SPI 123 IRQ_TYPE_LEVEL_HIGH>, - <0 0 0 3 &intc GIC_SPI 124 IRQ_TYPE_LEVEL_HIGH>, - <0 0 0 4 &intc GIC_SPI 125 IRQ_TYPE_LEVEL_HIGH>; + /* + * Reference manual lists pci irqs incorrectly + * Real hardware ordering is same as imx6: D+MSI, C, B, A + */ + interrupt-map = <0 0 0 1 &intc GIC_SPI 125 IRQ_TYPE_LEVEL_HIGH>, + <0 0 0 2 &intc GIC_SPI 124 IRQ_TYPE_LEVEL_HIGH>, + <0 0 0 3 &intc GIC_SPI 123 IRQ_TYPE_LEVEL_HIGH>, + <0 0 0 4 &intc GIC_SPI 122 IRQ_TYPE_LEVEL_HIGH>; clocks = <&clks IMX7D_PCIE_CTRL_ROOT_CLK>, <&clks IMX7D_PLL_ENET_MAIN_100M_CLK>, <&clks IMX7D_PCIE_PHY_ROOT_CLK>; diff --git a/arch/arm/boot/dts/ls1021a.dtsi b/arch/arm/boot/dts/ls1021a.dtsi index 379b4a03cfe2f7b92b2c3bd630d204be12a337d9..2d20f60947b951bd1181cef7326a76f82213f45c 100644 --- a/arch/arm/boot/dts/ls1021a.dtsi +++ b/arch/arm/boot/dts/ls1021a.dtsi @@ -84,6 +84,7 @@ device_type = "cpu"; reg = <0xf01>; clocks = <&clockgen 1 0>; + #cooling-cells = <2>; }; }; diff --git a/arch/arm/boot/dts/mt7623.dtsi b/arch/arm/boot/dts/mt7623.dtsi index 1853573235724419002fd7617ddb9bf0e9a20fc3..028cf4a5887fcecdff631a2c922e2eec0776258d 100644 --- a/arch/arm/boot/dts/mt7623.dtsi +++ b/arch/arm/boot/dts/mt7623.dtsi @@ -98,6 +98,7 @@ compatible = "arm,cortex-a7"; reg = <0x1>; operating-points-v2 = <&cpu_opp_table>; + #cooling-cells = <2>; clock-frequency = <1300000000>; }; @@ -106,6 +107,7 @@ compatible = "arm,cortex-a7"; reg = <0x2>; operating-points-v2 = <&cpu_opp_table>; + #cooling-cells = <2>; clock-frequency = <1300000000>; }; @@ -114,6 +116,7 @@ compatible = "arm,cortex-a7"; reg = <0x3>; operating-points-v2 = <&cpu_opp_table>; + #cooling-cells = <2>; clock-frequency = <1300000000>; }; }; diff --git a/arch/arm/boot/dts/omap4-droid4-xt894.dts b/arch/arm/boot/dts/omap4-droid4-xt894.dts index 8b93d37310f28ba4c10ad81d35f8419914ef4633..bad690b23081bba82637ad6f05122da34a71b0cc 100644 --- a/arch/arm/boot/dts/omap4-droid4-xt894.dts +++ b/arch/arm/boot/dts/omap4-droid4-xt894.dts @@ -314,7 +314,7 @@ &mmc2 { vmmc-supply = <&vsdio>; bus-width = <8>; - non-removable; + ti,non-removable; }; &mmc3 { diff --git a/arch/arm/boot/dts/overlays/Makefile b/arch/arm/boot/dts/overlays/Makefile index bc15f265604091f45360161a5833f0fd78a876dc..7cdd0195e7eb854f009bef926b241e849585c48a 100644 --- a/arch/arm/boot/dts/overlays/Makefile +++ b/arch/arm/boot/dts/overlays/Makefile @@ -7,6 +7,7 @@ dtbo-$(CONFIG_ARCH_BCM2835) += \ ads1115.dtbo \ ads7846.dtbo \ adv7282m.dtbo \ + adv728x-m.dtbo \ akkordion-iqdacplus.dtbo \ allo-boss-dac-pcm512x-audio.dtbo \ allo-digione.dtbo \ @@ -16,6 +17,7 @@ dtbo-$(CONFIG_ARCH_BCM2835) += \ applepi-dac.dtbo \ at86rf233.dtbo \ audioinjector-addons.dtbo \ + audioinjector-ultra.dtbo \ audioinjector-wm8731-audio.dtbo \ audremap.dtbo \ balena-fin.dtbo \ @@ -33,6 +35,7 @@ dtbo-$(CONFIG_ARCH_BCM2835) += \ fe-pi-audio.dtbo \ goodix.dtbo \ googlevoicehat-soundcard.dtbo \ + gpio-fan.dtbo \ gpio-ir.dtbo \ gpio-ir-tx.dtbo \ gpio-key.dtbo \ diff --git a/arch/arm/boot/dts/overlays/README b/arch/arm/boot/dts/overlays/README index 0c6afe53dbdecfecdbcd0135b9b853d459d5ff19..57fa9d83e85d97fca691340e8d925e4ff6cd1203 100644 --- a/arch/arm/boot/dts/overlays/README +++ b/arch/arm/boot/dts/overlays/README @@ -274,6 +274,19 @@ Info: Analog Devices ADV7282M analogue video to CSI2 bridge. Load: dtoverlay=adv7282m,= Params: i2c_pins_28_29 Use pins 28&29 for the I2C instead of 44&45. This is required for Pi B+, 2, 0, and 0W. + addr Overrides the I2C address (default 0x21) + + +Name: adv728x-m +Info: Analog Devices ADV728[0|1|2]-M analogue video to CSI2 bridges. + This is a wrapper for adv7282m, and defaults to ADV7282M. +Load: dtoverlay=adv728x-m,= +Params: i2c_pins_28_29 Use pins 28&29 for the I2C instead of 44&45. + This is required for Pi B+, 2, 0, and 0W. + addr Overrides the I2C address (default 0x21) + adv7280m Select ADV7280-M. + adv7281m Select ADV7281-M. + adv7281ma Select ADV7281-MA. Name: akkordion-iqdacplus @@ -398,6 +411,12 @@ Params: non-stop-clocks Keeps the clocks running even when the stream is paused or stopped (default off) +Name: audioinjector-ultra +Info: Configures the audioinjector.net ultra soundcard +Load: dtoverlay=audioinjector-ultra +Params: + + Name: audioinjector-wm8731-audio Info: Configures the audioinjector.net audio add on soundcard Load: dtoverlay=audioinjector-wm8731-audio @@ -544,6 +563,14 @@ Load: dtoverlay=googlevoicehat-soundcard Params: +Name: gpio-fan +Info: Configure a GPIO pin to control a cooling fan. +Load: dtoverlay=gpio-fan,= +Params: gpiopin GPIO used to control the fan (default 12) + temp Temperature at which the fan switches on, in + millicelcius (default 55000) + + Name: gpio-ir Info: Use GPIO pin as rc-core style infrared receiver input. The rc-core- based gpio_ir_recv driver maps received keys directly to a @@ -1256,15 +1283,7 @@ Info: Omnivision OV5647 camera module. Uses Unicam 1, which is the standard camera connector on most Pi variants. Load: dtoverlay=ov5647,= -Params: cam0-pwdn GPIO used to control the sensor powerdown line. - - cam0-led GPIO used to control the sensor led - Both these fields should be automatically filled - in by the firmware to reflect the default GPIO - configuration of the particular Pi variant in - use. - - i2c_pins_28_29 Use pins 28&29 for the I2C instead of 44&45. +Params: i2c_pins_28_29 Use pins 28&29 for the I2C instead of 44&45. This is required for Pi B+, 2, 0, and 0W. diff --git a/arch/arm/boot/dts/overlays/adv7282m-overlay.dts b/arch/arm/boot/dts/overlays/adv7282m-overlay.dts index 51e7451d4d9b8d42e13c16c8625b0427061aa6fb..fa5fede220bde5b1f8c685c4a77aca9efdc7a57c 100644 --- a/arch/arm/boot/dts/overlays/adv7282m-overlay.dts +++ b/arch/arm/boot/dts/overlays/adv7282m-overlay.dts @@ -12,13 +12,13 @@ #size-cells = <0>; status = "okay"; - adv7282: adv7282@21 { + adv728x: adv728x@21 { compatible = "adi,adv7282-m"; reg = <0x21>; status = "okay"; clock-frequency = <24000000>; port { - adv7282_0: endpoint { + adv728x_0: endpoint { remote-endpoint = <&csi1_ep>; clock-lanes = <0>; data-lanes = <1>; @@ -42,7 +42,7 @@ #address-cells = <1>; #size-cells = <0>; csi1_ep: endpoint { - remote-endpoint = <&adv7282_0>; + remote-endpoint = <&adv728x_0>; }; }; }; @@ -70,6 +70,7 @@ }; __overrides__ { - i2c_pins_28_29 = <0>,"+2-3"; + i2c_pins_28_29 = <0>,"+2-3"; + addr = <&adv728x>,"reg:0"; }; }; diff --git a/arch/arm/boot/dts/overlays/adv728x-m-overlay.dts b/arch/arm/boot/dts/overlays/adv728x-m-overlay.dts new file mode 100644 index 0000000000000000000000000000000000000000..f6415bba85596ab3b6110bce814171ca096e8e2b --- /dev/null +++ b/arch/arm/boot/dts/overlays/adv728x-m-overlay.dts @@ -0,0 +1,36 @@ +// Definitions for Analog Devices ADV728[0|1|2]-M video to CSI2 bridges on VC +// I2C bus + +#include "adv7282m-overlay.dts" + +/{ + compatible = "brcm,bcm2708"; + + // Fragment numbers deliberately high to avoid conflicts with the + // included adv7282m overlay file. + + fragment@101 { + target = <&adv728x>; + __dormant__ { + compatible = "adi,adv7280-m"; + }; + }; + fragment@102 { + target = <&adv728x>; + __dormant__ { + compatible = "adi,adv7281-m"; + }; + }; + fragment@103 { + target = <&adv728x>; + __dormant__ { + compatible = "adi,adv7281-ma"; + }; + }; + + __overrides__ { + adv7280m = <0>, "+101"; + adv7281m = <0>, "+102"; + adv7281ma = <0>, "+103"; + }; +}; diff --git a/arch/arm/boot/dts/overlays/audioinjector-ultra-overlay.dts b/arch/arm/boot/dts/overlays/audioinjector-ultra-overlay.dts new file mode 100644 index 0000000000000000000000000000000000000000..280e983bbf190ff371a3a33cdf59dcf760cf2c25 --- /dev/null +++ b/arch/arm/boot/dts/overlays/audioinjector-ultra-overlay.dts @@ -0,0 +1,71 @@ +// Definitions for audioinjector.net audio add on soundcard +/dts-v1/; +/plugin/; + +/ { + compatible = "brcm,bcm2708"; + + fragment@0 { + target = <&i2s>; + __overlay__ { + status = "okay"; + }; + }; + + fragment@1 { + target = <&i2c1>; + __overlay__ { + #address-cells = <1>; + #size-cells = <0>; + status = "okay"; + + cs4265: cs4265@4e { + #sound-dai-cells = <0>; + compatible = "cirrus,cs4265"; + reg = <0x4e>; + reset-gpios = <&gpio 5 0>; + status = "okay"; + }; + }; + }; + + fragment@2 { + target = <&sound>; + __overlay__ { + compatible = "simple-audio-card"; + i2s-controller = <&i2s>; + status = "okay"; + + simple-audio-card,name = "audioinjector-ultra"; + + simple-audio-card,widgets = + "Line", "OUTPUTS", + "Line", "INPUTS"; + + simple-audio-card,routing = + "OUTPUTS","LINEOUTL", + "OUTPUTS","LINEOUTR", + "OUTPUTS","SPDIFOUT", + "LINEINL","INPUTS", + "LINEINR","INPUTS", + "MICL","INPUTS", + "MICR","INPUTS"; + + simple-audio-card,format = "i2s"; + + simple-audio-card,bitclock-master = <&sound_master>; + simple-audio-card,frame-master = <&sound_master>; + + simple-audio-card,cpu { + sound-dai = <&i2s>; + dai-tdm-slot-num = <2>; + dai-tdm-slot-width = <32>; + }; + + sound_master: simple-audio-card,codec { + sound-dai = <&cs4265>; + system-clock-frequency = <12288000>; + }; + }; + }; +}; diff --git a/arch/arm/boot/dts/overlays/gpio-fan-overlay.dts b/arch/arm/boot/dts/overlays/gpio-fan-overlay.dts new file mode 100644 index 0000000000000000000000000000000000000000..66fd9950c34be0b8642af6eebc35e2863a54d1db --- /dev/null +++ b/arch/arm/boot/dts/overlays/gpio-fan-overlay.dts @@ -0,0 +1,71 @@ +/* + * Overlay for the Raspberry Pi GPIO Fan @ BCM GPIO12. + * Optional parameters: + * - "gpiopin" - default GPIO12 + * - "temp" - default 55000 + * Requires: + * - kernel configurations: CONFIG_SENSORS_GPIO_FAN=m and CONFIG_SENSORS_PWM_FAN=m; + * - kernel rebuid; + * - DC Fan connected to GPIO via a N-MOSFET (2N7002) + * + * ┌─────────────────────┐ + * │Fan negative terminal│ + * └┬────────────────────┘ + * │ + * │──┘ + * [GPIO12]──────┤ │<─┐ 2N7002 + * │──┤ + * │ + * ─┴─ + * GND + * + * sudo dtc -W no-unit_address_vs_reg -@ -I dts -O dtb -o /boot/overlays/gpio-fan.dtbo gpio-fan.dts + * sudo nano /boot/config.txt add "dtoverlay=gpio-fan" or "dtoverlay=gpio-fan,gpiopin=12,temp=45000" + * or + * sudo sh -c "echo '\n# Enable PI GPIO-Fan\ndtoverlay=gpio-fan\n' >> /boot/config.txt" + * sudo sh -c "echo '\n# Enable PI GPIO-Fan\ndtoverlay=gpio-fan,gpiopin=12\n' >> /boot/config.txt" + * + */ +/dts-v1/; +/plugin/; + +/ { + compatible = "brcm,bcm2708"; + + fragment@0 { + target-path = "/"; + __overlay__ { + fan0: gpio-fan@0 { + compatible = "gpio-fan"; + gpios = <&gpio 12 1>; + gpio-fan,speed-map = <0 0>, + <5000 1>; + #cooling-cells = <2>; + }; + }; + }; + + fragment@1 { + target = <&cpu_thermal>; + polling-delay = <2000>; /* milliseconds */ + __overlay__ { + trips { + cpu_hot: trip-point@0 { + temperature = <55000>; /* (millicelsius) Fan started at 55°C */ + hysteresis = <5000>; /* (millicelsius) Fan stopped at 50°C */ + type = "active"; + }; + }; + cooling-maps { + map0 { + trip = <&cpu_hot>; + cooling-device = <&fan0 1 1>; + }; + }; + }; + }; + __overrides__ { + gpiopin = <&fan0>,"gpios:4", <&fan0>,"brcm,pins:0"; + temp = <&cpu_hot>,"temperature:0"; + }; +}; diff --git a/arch/arm/boot/dts/overlays/ov5647-overlay.dts b/arch/arm/boot/dts/overlays/ov5647-overlay.dts index c92306a0076b043b9be0c6a0f489c0b3aa0ef67b..8996e0ed0bb79f5aabfad9db7e379a136708c7ab 100644 --- a/arch/arm/boot/dts/overlays/ov5647-overlay.dts +++ b/arch/arm/boot/dts/overlays/ov5647-overlay.dts @@ -78,9 +78,17 @@ }; }; + fragment@5 { + target-path="/__overrides__"; + __overlay__ { + cam0-pwdn-ctrl = <&ov5647>,"pwdn-gpios:0"; + cam0-pwdn = <&ov5647>,"pwdn-gpios:4"; + cam0-led-ctrl = <&ov5647>,"pwdn-gpios:12"; + cam0-led = <&ov5647>,"pwdn-gpios:16"; + }; + }; + __overrides__ { i2c_pins_28_29 = <0>,"+4-5"; - cam0-pwdn = <&ov5647>,"pwdn-gpios:4"; - cam0-led = <&ov5647>,"pwdn-gpios:16"; }; }; diff --git a/arch/arm/boot/dts/overlays/rpi-poe-overlay.dts b/arch/arm/boot/dts/overlays/rpi-poe-overlay.dts index 9137279bb9237e569b2b7b80d6e7690b839a999d..0a32fff036a7cc4788471663535893312f50b362 100644 --- a/arch/arm/boot/dts/overlays/rpi-poe-overlay.dts +++ b/arch/arm/boot/dts/overlays/rpi-poe-overlay.dts @@ -11,7 +11,7 @@ target-path = "/"; __overlay__ { fan0: rpi-poe-fan@0 { - compatible = "rpi-poe-fan"; + compatible = "raspberrypi,rpi-poe-fan"; firmware = <&firmware>; cooling-min-state = <0>; cooling-max-state = <3>; diff --git a/arch/arm/boot/dts/qcom-msm8974-lge-nexus5-hammerhead.dts b/arch/arm/boot/dts/qcom-msm8974-lge-nexus5-hammerhead.dts index 4dc0b347b1eed522e696ecb85b98f8a9f936ab74..c2dc9d09484abd41d43d13a7a70b0f986a926e19 100644 --- a/arch/arm/boot/dts/qcom-msm8974-lge-nexus5-hammerhead.dts +++ b/arch/arm/boot/dts/qcom-msm8974-lge-nexus5-hammerhead.dts @@ -189,6 +189,8 @@ regulator-max-microvolt = <2950000>; regulator-boot-on; + regulator-system-load = <200000>; + regulator-allow-set-load; }; l21 { diff --git a/arch/arm/boot/dts/tegra30-cardhu.dtsi b/arch/arm/boot/dts/tegra30-cardhu.dtsi index 92a9740c533f217a10cd6fb4421d2fc6871d015d..3b1db7b9ec502c444aedc438c89fda6777347ad6 100644 --- a/arch/arm/boot/dts/tegra30-cardhu.dtsi +++ b/arch/arm/boot/dts/tegra30-cardhu.dtsi @@ -206,6 +206,7 @@ #address-cells = <1>; #size-cells = <0>; reg = <0x70>; + reset-gpio = <&gpio TEGRA_GPIO(BB, 0) GPIO_ACTIVE_LOW>; }; }; diff --git a/arch/arm/configs/bcm2709_defconfig b/arch/arm/configs/bcm2709_defconfig index 1258c0ca7e089b8e26938094cc0c72028b582a8e..c1e55cd96649b7fb0406850a383770006de4d134 100644 --- a/arch/arm/configs/bcm2709_defconfig +++ b/arch/arm/configs/bcm2709_defconfig @@ -654,6 +654,7 @@ CONFIG_BATTERY_DS2760=m CONFIG_BATTERY_GAUGE_LTC2941=m CONFIG_HWMON=m CONFIG_SENSORS_DS1621=m +CONFIG_SENSORS_GPIO_FAN=m CONFIG_SENSORS_JC42=m CONFIG_SENSORS_LM75=m CONFIG_SENSORS_RPI_POE_FAN=m @@ -912,6 +913,7 @@ CONFIG_SND_PISOUND=m CONFIG_SND_SOC_ADAU1701=m CONFIG_SND_SOC_ADAU7002=m CONFIG_SND_SOC_AK4554=m +CONFIG_SND_SOC_CS4265=m CONFIG_SND_SOC_CS4271_I2C=m CONFIG_SND_SOC_SPDIF=m CONFIG_SND_SOC_WM8804_I2C=m @@ -925,6 +927,7 @@ CONFIG_HID_APPLE=m CONFIG_HID_ASUS=m CONFIG_HID_BELKIN=m CONFIG_HID_BETOP_FF=m +CONFIG_HID_BIGBEN_FF=m CONFIG_HID_CHERRY=m CONFIG_HID_CHICONY=m CONFIG_HID_CYPRESS=m diff --git a/arch/arm/configs/bcmrpi_defconfig b/arch/arm/configs/bcmrpi_defconfig index c5cccdb6bcdd89d8b4fbea60466552706ce49a19..eea3751a12e24cff42f67704e93dc59c59a71153 100644 --- a/arch/arm/configs/bcmrpi_defconfig +++ b/arch/arm/configs/bcmrpi_defconfig @@ -648,6 +648,7 @@ CONFIG_BATTERY_DS2760=m CONFIG_BATTERY_GAUGE_LTC2941=m CONFIG_HWMON=m CONFIG_SENSORS_DS1621=m +CONFIG_SENSORS_GPIO_FAN=m CONFIG_SENSORS_JC42=m CONFIG_SENSORS_LM75=m CONFIG_SENSORS_RPI_POE_FAN=m @@ -906,6 +907,7 @@ CONFIG_SND_PISOUND=m CONFIG_SND_SOC_ADAU1701=m CONFIG_SND_SOC_ADAU7002=m CONFIG_SND_SOC_AK4554=m +CONFIG_SND_SOC_CS4265=m CONFIG_SND_SOC_CS4271_I2C=m CONFIG_SND_SOC_SPDIF=m CONFIG_SND_SOC_WM8804_I2C=m @@ -919,6 +921,7 @@ CONFIG_HID_APPLE=m CONFIG_HID_ASUS=m CONFIG_HID_BELKIN=m CONFIG_HID_BETOP_FF=m +CONFIG_HID_BIGBEN_FF=m CONFIG_HID_CHERRY=m CONFIG_HID_CHICONY=m CONFIG_HID_CYPRESS=m diff --git a/arch/arm/configs/imx_v4_v5_defconfig b/arch/arm/configs/imx_v4_v5_defconfig index ca0f13cafe387e0a0009a8fd9ad97d87f8b2b62a..6e5a3d9c23e1027bd9a3c136b8610e9a641f6192 100644 --- a/arch/arm/configs/imx_v4_v5_defconfig +++ b/arch/arm/configs/imx_v4_v5_defconfig @@ -144,9 +144,11 @@ CONFIG_USB_STORAGE=y CONFIG_USB_CHIPIDEA=y CONFIG_USB_CHIPIDEA_UDC=y CONFIG_USB_CHIPIDEA_HOST=y +CONFIG_USB_CHIPIDEA_ULPI=y CONFIG_NOP_USB_XCEIV=y CONFIG_USB_GADGET=y CONFIG_USB_ETH=m +CONFIG_USB_ULPI_BUS=y CONFIG_MMC=y CONFIG_MMC_SDHCI=y CONFIG_MMC_SDHCI_PLTFM=y diff --git a/arch/arm/mach-davinci/board-da850-evm.c b/arch/arm/mach-davinci/board-da850-evm.c index 25f12118c364839c7f8337852789a30c22655e8e..2f6ac1afa8046769e23f8118bad4babc22cc94ce 100644 --- a/arch/arm/mach-davinci/board-da850-evm.c +++ b/arch/arm/mach-davinci/board-da850-evm.c @@ -773,7 +773,7 @@ static struct gpiod_lookup_table mmc_gpios_table = { GPIO_LOOKUP("davinci_gpio.0", DA850_MMCSD_CD_PIN, "cd", GPIO_ACTIVE_LOW), GPIO_LOOKUP("davinci_gpio.0", DA850_MMCSD_WP_PIN, "wp", - GPIO_ACTIVE_LOW), + GPIO_ACTIVE_HIGH), }, }; diff --git a/arch/arm/mach-exynos/suspend.c b/arch/arm/mach-exynos/suspend.c index b529ba04ed16656aeefc80eb5a4bb95119b6753d..eafa26d9f692d5a1ac654bc4167497d68c44dd3e 100644 --- a/arch/arm/mach-exynos/suspend.c +++ b/arch/arm/mach-exynos/suspend.c @@ -209,6 +209,7 @@ static int __init exynos_pmu_irq_init(struct device_node *node, NULL); if (!domain) { iounmap(pmu_base_addr); + pmu_base_addr = NULL; return -ENOMEM; } diff --git a/arch/arm/mach-hisi/hotplug.c b/arch/arm/mach-hisi/hotplug.c index a129aae7260286fcaf77bc110bce3dea71fb679e..909bb24937812028a706fe865de275b228d1f53d 100644 --- a/arch/arm/mach-hisi/hotplug.c +++ b/arch/arm/mach-hisi/hotplug.c @@ -148,13 +148,20 @@ static int hi3xxx_hotplug_init(void) struct device_node *node; node = of_find_compatible_node(NULL, NULL, "hisilicon,sysctrl"); - if (node) { - ctrl_base = of_iomap(node, 0); - id = HI3620_CTRL; - return 0; + if (!node) { + id = ERROR_CTRL; + return -ENOENT; } - id = ERROR_CTRL; - return -ENOENT; + + ctrl_base = of_iomap(node, 0); + of_node_put(node); + if (!ctrl_base) { + id = ERROR_CTRL; + return -ENOMEM; + } + + id = HI3620_CTRL; + return 0; } void hi3xxx_set_cpu(int cpu, bool enable) @@ -173,11 +180,15 @@ static bool hix5hd2_hotplug_init(void) struct device_node *np; np = of_find_compatible_node(NULL, NULL, "hisilicon,cpuctrl"); - if (np) { - ctrl_base = of_iomap(np, 0); - return true; - } - return false; + if (!np) + return false; + + ctrl_base = of_iomap(np, 0); + of_node_put(np); + if (!ctrl_base) + return false; + + return true; } void hix5hd2_set_cpu(int cpu, bool enable) @@ -219,10 +230,10 @@ void hip01_set_cpu(int cpu, bool enable) if (!ctrl_base) { np = of_find_compatible_node(NULL, NULL, "hisilicon,hip01-sysctrl"); - if (np) - ctrl_base = of_iomap(np, 0); - else - BUG(); + BUG_ON(!np); + ctrl_base = of_iomap(np, 0); + of_node_put(np); + BUG_ON(!ctrl_base); } if (enable) { diff --git a/arch/arm/mach-mvebu/pmsu.c b/arch/arm/mach-mvebu/pmsu.c index 27a78c80e5b17352aafc4924d04694217bfa5c15..73d5d72dfc3e520894a85875de94791fac84236d 100644 --- a/arch/arm/mach-mvebu/pmsu.c +++ b/arch/arm/mach-mvebu/pmsu.c @@ -116,8 +116,8 @@ void mvebu_pmsu_set_cpu_boot_addr(int hw_cpu, void *boot_addr) PMSU_BOOT_ADDR_REDIRECT_OFFSET(hw_cpu)); } -extern unsigned char mvebu_boot_wa_start; -extern unsigned char mvebu_boot_wa_end; +extern unsigned char mvebu_boot_wa_start[]; +extern unsigned char mvebu_boot_wa_end[]; /* * This function sets up the boot address workaround needed for SMP @@ -130,7 +130,7 @@ int mvebu_setup_boot_addr_wa(unsigned int crypto_eng_target, phys_addr_t resume_addr_reg) { void __iomem *sram_virt_base; - u32 code_len = &mvebu_boot_wa_end - &mvebu_boot_wa_start; + u32 code_len = mvebu_boot_wa_end - mvebu_boot_wa_start; mvebu_mbus_del_window(BOOTROM_BASE, BOOTROM_SIZE); mvebu_mbus_add_window_by_id(crypto_eng_target, crypto_eng_attribute, diff --git a/arch/arm/mach-omap2/omap-smp.c b/arch/arm/mach-omap2/omap-smp.c index 7cc060865c929b655078f45bf7bea7f83b9bf739..ac4d2f030b874f46d0864782f0ce2acc9f097845 100644 --- a/arch/arm/mach-omap2/omap-smp.c +++ b/arch/arm/mach-omap2/omap-smp.c @@ -109,6 +109,45 @@ void omap5_erratum_workaround_801819(void) static inline void omap5_erratum_workaround_801819(void) { } #endif +#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR +/* + * Configure ACR and enable ACTLR[0] (Enable invalidates of BTB with + * ICIALLU) to activate the workaround for secondary Core. + * NOTE: it is assumed that the primary core's configuration is done + * by the boot loader (kernel will detect a misconfiguration and complain + * if this is not done). + * + * In General Purpose(GP) devices, ACR bit settings can only be done + * by ROM code in "secure world" using the smc call and there is no + * option to update the "firmware" on such devices. This also works for + * High security(HS) devices, as a backup option in case the + * "update" is not done in the "security firmware". + */ +static void omap5_secondary_harden_predictor(void) +{ + u32 acr, acr_mask; + + asm volatile ("mrc p15, 0, %0, c1, c0, 1" : "=r" (acr)); + + /* + * ACTLR[0] (Enable invalidates of BTB with ICIALLU) + */ + acr_mask = BIT(0); + + /* Do we already have it done.. if yes, skip expensive smc */ + if ((acr & acr_mask) == acr_mask) + return; + + acr |= acr_mask; + omap_smc1(OMAP5_DRA7_MON_SET_ACR_INDEX, acr); + + pr_debug("%s: ARM ACR setup for CVE_2017_5715 applied on CPU%d\n", + __func__, smp_processor_id()); +} +#else +static inline void omap5_secondary_harden_predictor(void) { } +#endif + static void omap4_secondary_init(unsigned int cpu) { /* @@ -131,6 +170,8 @@ static void omap4_secondary_init(unsigned int cpu) set_cntfreq(); /* Configure ACR to disable streaming WA for 801819 */ omap5_erratum_workaround_801819(); + /* Enable ACR to allow for ICUALLU workaround */ + omap5_secondary_harden_predictor(); } /* diff --git a/arch/arm/mach-omap2/omap_hwmod_reset.c b/arch/arm/mach-omap2/omap_hwmod_reset.c index b68f9c0aff0b594d97af0c004c2525a7072197f0..d5ddba00bb7310bf0120902e2837dbddc4e90713 100644 --- a/arch/arm/mach-omap2/omap_hwmod_reset.c +++ b/arch/arm/mach-omap2/omap_hwmod_reset.c @@ -92,11 +92,13 @@ static void omap_rtc_wait_not_busy(struct omap_hwmod *oh) */ void omap_hwmod_rtc_unlock(struct omap_hwmod *oh) { - local_irq_disable(); + unsigned long flags; + + local_irq_save(flags); omap_rtc_wait_not_busy(oh); omap_hwmod_write(OMAP_RTC_KICK0_VALUE, oh, OMAP_RTC_KICK0_REG); omap_hwmod_write(OMAP_RTC_KICK1_VALUE, oh, OMAP_RTC_KICK1_REG); - local_irq_enable(); + local_irq_restore(flags); } /** @@ -110,9 +112,11 @@ void omap_hwmod_rtc_unlock(struct omap_hwmod *oh) */ void omap_hwmod_rtc_lock(struct omap_hwmod *oh) { - local_irq_disable(); + unsigned long flags; + + local_irq_save(flags); omap_rtc_wait_not_busy(oh); omap_hwmod_write(0x0, oh, OMAP_RTC_KICK0_REG); omap_hwmod_write(0x0, oh, OMAP_RTC_KICK1_REG); - local_irq_enable(); + local_irq_restore(flags); } diff --git a/arch/arm/mach-pxa/irq.c b/arch/arm/mach-pxa/irq.c index 9c10248fadccc2d03ef3b3bcbddbe0b43347f158..4e8c2116808ecf3d36d36653184dc89d2941885e 100644 --- a/arch/arm/mach-pxa/irq.c +++ b/arch/arm/mach-pxa/irq.c @@ -185,7 +185,7 @@ static int pxa_irq_suspend(void) { int i; - for (i = 0; i < pxa_internal_irq_nr / 32; i++) { + for (i = 0; i < DIV_ROUND_UP(pxa_internal_irq_nr, 32); i++) { void __iomem *base = irq_base(i); saved_icmr[i] = __raw_readl(base + ICMR); @@ -204,7 +204,7 @@ static void pxa_irq_resume(void) { int i; - for (i = 0; i < pxa_internal_irq_nr / 32; i++) { + for (i = 0; i < DIV_ROUND_UP(pxa_internal_irq_nr, 32); i++) { void __iomem *base = irq_base(i); __raw_writel(saved_icmr[i], base + ICMR); diff --git a/arch/arm/mach-rockchip/Kconfig b/arch/arm/mach-rockchip/Kconfig index a4065966881ae44d318ec4193d7091269541970c..57f0bc4cd9b89889c3b6f80b17f41f3b5eb0def8 100644 --- a/arch/arm/mach-rockchip/Kconfig +++ b/arch/arm/mach-rockchip/Kconfig @@ -18,6 +18,7 @@ config ARCH_ROCKCHIP select ARM_GLOBAL_TIMER select CLKSRC_ARM_GLOBAL_TIMER_SCHED_CLOCK select ZONE_DMA if ARM_LPAE + select PM help Support for Rockchip's Cortex-A9 Single-to-Quad-Core-SoCs containing the RK2928, RK30xx and RK31xx series. diff --git a/arch/arm/probes/kprobes/core.c b/arch/arm/probes/kprobes/core.c index 52d1cd14fda45d961ece2d0c4d417d986a372de9..091e9a3c2dcb10d39ae877c2d586384b4ddc52fe 100644 --- a/arch/arm/probes/kprobes/core.c +++ b/arch/arm/probes/kprobes/core.c @@ -291,8 +291,8 @@ void __kprobes kprobe_handler(struct pt_regs *regs) break; case KPROBE_REENTER: /* A nested probe was hit in FIQ, it is a BUG */ - pr_warn("Unrecoverable kprobe detected at %p.\n", - p->addr); + pr_warn("Unrecoverable kprobe detected.\n"); + dump_kprobe(p); /* fall through */ default: /* impossible cases */ diff --git a/arch/arm/probes/kprobes/test-core.c b/arch/arm/probes/kprobes/test-core.c index 1c98a87786ca768adb622f1720df442a272bfb75..a10d7187ad2c51ecc07f6dc2334d7cd5482cb35a 100644 --- a/arch/arm/probes/kprobes/test-core.c +++ b/arch/arm/probes/kprobes/test-core.c @@ -1517,7 +1517,6 @@ fail: print_registers(&result_regs); if (mem) { - pr_err("current_stack=%p\n", current_stack); pr_err("expected_memory:\n"); print_memory(expected_memory, mem_size); pr_err("result_memory:\n"); diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 6ccd878c32c2e926cf8d9a6917e4d89ac5dc941a..f3c5213ea0ec6f25d0f3a117035352ff3e0de66e 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -694,7 +694,6 @@ config NEED_PER_CPU_EMBED_FIRST_CHUNK config HOLES_IN_ZONE def_bool y - depends on NUMA source kernel/Kconfig.preempt source kernel/Kconfig.hz diff --git a/arch/arm64/Kconfig.platforms b/arch/arm64/Kconfig.platforms index e57169584af075eca0e0a2c1900fd64fe886bbb3..19ff97b7efe6971d69f95fbded84d989b1c7591e 100644 --- a/arch/arm64/Kconfig.platforms +++ b/arch/arm64/Kconfig.platforms @@ -142,6 +142,7 @@ config ARCH_ROCKCHIP select GPIOLIB select PINCTRL select PINCTRL_ROCKCHIP + select PM select ROCKCHIP_TIMER help This enables support for the ARMv8 based Rockchip chipsets, diff --git a/arch/arm64/boot/dts/amlogic/meson-gxl-mali.dtsi b/arch/arm64/boot/dts/amlogic/meson-gxl-mali.dtsi index f06cc234693b76511a27a33c140caf8c8a270c49..379abc3d82fe56836a2603a4e446eee3835895be 100644 --- a/arch/arm64/boot/dts/amlogic/meson-gxl-mali.dtsi +++ b/arch/arm64/boot/dts/amlogic/meson-gxl-mali.dtsi @@ -7,7 +7,7 @@ &apb { mali: gpu@c0000 { - compatible = "amlogic,meson-gxbb-mali", "arm,mali-450"; + compatible = "amlogic,meson-gxl-mali", "arm,mali-450"; reg = <0x0 0xc0000 0x0 0x40000>; interrupts = , , diff --git a/arch/arm64/boot/dts/broadcom/northstar2/ns2.dtsi b/arch/arm64/boot/dts/broadcom/northstar2/ns2.dtsi index 35c8457e3d1f23d32c3db46cbf43f95a3e1ef795..0b72094bcf5a2f63ba4e6229b8f67e1084814496 100644 --- a/arch/arm64/boot/dts/broadcom/northstar2/ns2.dtsi +++ b/arch/arm64/boot/dts/broadcom/northstar2/ns2.dtsi @@ -118,7 +118,7 @@ #interrupt-cells = <1>; interrupt-map-mask = <0 0 0 0>; - interrupt-map = <0 0 0 0 &gic 0 GIC_SPI 281 IRQ_TYPE_NONE>; + interrupt-map = <0 0 0 0 &gic 0 GIC_SPI 281 IRQ_TYPE_LEVEL_HIGH>; linux,pci-domain = <0>; @@ -149,7 +149,7 @@ #interrupt-cells = <1>; interrupt-map-mask = <0 0 0 0>; - interrupt-map = <0 0 0 0 &gic 0 GIC_SPI 305 IRQ_TYPE_NONE>; + interrupt-map = <0 0 0 0 &gic 0 GIC_SPI 305 IRQ_TYPE_LEVEL_HIGH>; linux,pci-domain = <4>; @@ -566,7 +566,7 @@ reg = <0x66080000 0x100>; #address-cells = <1>; #size-cells = <0>; - interrupts = ; + interrupts = ; clock-frequency = <100000>; status = "disabled"; }; @@ -594,7 +594,7 @@ reg = <0x660b0000 0x100>; #address-cells = <1>; #size-cells = <0>; - interrupts = ; + interrupts = ; clock-frequency = <100000>; status = "disabled"; }; diff --git a/arch/arm64/boot/dts/broadcom/stingray/bcm958742k.dts b/arch/arm64/boot/dts/broadcom/stingray/bcm958742k.dts index eb6f08cdbd796c3d764393f9e2e70db2129b0e28..77efa28c4dd53db718b22e64569385f6d92c2feb 100644 --- a/arch/arm64/boot/dts/broadcom/stingray/bcm958742k.dts +++ b/arch/arm64/boot/dts/broadcom/stingray/bcm958742k.dts @@ -43,6 +43,10 @@ enet-phy-lane-swap; }; +&sdio0 { + mmc-ddr-1_8v; +}; + &uart2 { status = "okay"; }; diff --git a/arch/arm64/boot/dts/broadcom/stingray/bcm958742t.dts b/arch/arm64/boot/dts/broadcom/stingray/bcm958742t.dts index 5084b037320fd9cb65133ca929517062a245af3b..55ba495ef56e1f54b518483bc9e5369fcb03b441 100644 --- a/arch/arm64/boot/dts/broadcom/stingray/bcm958742t.dts +++ b/arch/arm64/boot/dts/broadcom/stingray/bcm958742t.dts @@ -42,3 +42,7 @@ &gphy0 { enet-phy-lane-swap; }; + +&sdio0 { + mmc-ddr-1_8v; +}; diff --git a/arch/arm64/boot/dts/broadcom/stingray/stingray.dtsi b/arch/arm64/boot/dts/broadcom/stingray/stingray.dtsi index e6f75c633623cf45b9efc67844149551eb343e83..2b76293b51c838d79c03589ec40ce5cdc9479a42 100644 --- a/arch/arm64/boot/dts/broadcom/stingray/stingray.dtsi +++ b/arch/arm64/boot/dts/broadcom/stingray/stingray.dtsi @@ -409,7 +409,7 @@ reg = <0x000b0000 0x100>; #address-cells = <1>; #size-cells = <0>; - interrupts = ; + interrupts = ; clock-frequency = <100000>; status = "disabled"; }; @@ -453,7 +453,7 @@ reg = <0x000e0000 0x100>; #address-cells = <1>; #size-cells = <0>; - interrupts = ; + interrupts = ; clock-frequency = <100000>; status = "disabled"; }; diff --git a/arch/arm64/boot/dts/qcom/apq8016-sbc.dtsi b/arch/arm64/boot/dts/qcom/apq8016-sbc.dtsi index 1d63e6b879de7cc51784077be9b138f9f5635c8e..b6b44fdf7face8b00690bede37d09d59debed242 100644 --- a/arch/arm64/boot/dts/qcom/apq8016-sbc.dtsi +++ b/arch/arm64/boot/dts/qcom/apq8016-sbc.dtsi @@ -187,7 +187,7 @@ led@6 { label = "apq8016-sbc:blue:bt"; gpios = <&pm8916_mpps 3 GPIO_ACTIVE_HIGH>; - linux,default-trigger = "bt"; + linux,default-trigger = "bluetooth-power"; default-state = "off"; }; }; diff --git a/arch/arm64/boot/dts/qcom/msm8916.dtsi b/arch/arm64/boot/dts/qcom/msm8916.dtsi index 61da6e65900b2414c09a8d257f891772556f3e8e..3cc449425a0388530a9e755808cbe6dddee38f89 100644 --- a/arch/arm64/boot/dts/qcom/msm8916.dtsi +++ b/arch/arm64/boot/dts/qcom/msm8916.dtsi @@ -1132,14 +1132,14 @@ port@0 { reg = <0>; - etf_out: endpoint { + etf_in: endpoint { slave-mode; remote-endpoint = <&funnel0_out>; }; }; port@1 { reg = <0>; - etf_in: endpoint { + etf_out: endpoint { remote-endpoint = <&replicator_in>; }; }; diff --git a/arch/arm64/boot/dts/rockchip/rk3328.dtsi b/arch/arm64/boot/dts/rockchip/rk3328.dtsi index d70e409e2b0cbd7a2a1d07f2c84e86475fe8fd02..efac2202b16ec10301a40dea1b10ef464bad8e1a 100644 --- a/arch/arm64/boot/dts/rockchip/rk3328.dtsi +++ b/arch/arm64/boot/dts/rockchip/rk3328.dtsi @@ -331,7 +331,7 @@ reg = <0x0 0xff120000 0x0 0x100>; interrupts = ; clocks = <&cru SCLK_UART1>, <&cru PCLK_UART1>; - clock-names = "sclk_uart", "pclk_uart"; + clock-names = "baudclk", "apb_pclk"; dmas = <&dmac 4>, <&dmac 5>; #dma-cells = <2>; pinctrl-names = "default"; diff --git a/arch/arm64/boot/dts/socionext/uniphier-ld20.dtsi b/arch/arm64/boot/dts/socionext/uniphier-ld20.dtsi index a29c279b6e8e4c484b14c77047d1b8436103f6b3..dba6f0ff810639c3f22f43f744c896617243562b 100644 --- a/arch/arm64/boot/dts/socionext/uniphier-ld20.dtsi +++ b/arch/arm64/boot/dts/socionext/uniphier-ld20.dtsi @@ -55,6 +55,7 @@ clocks = <&sys_clk 32>; enable-method = "psci"; operating-points-v2 = <&cluster0_opp>; + #cooling-cells = <2>; }; cpu2: cpu@100 { @@ -73,6 +74,7 @@ clocks = <&sys_clk 33>; enable-method = "psci"; operating-points-v2 = <&cluster1_opp>; + #cooling-cells = <2>; }; }; diff --git a/arch/arm64/include/asm/cache.h b/arch/arm64/include/asm/cache.h index ea9bb4e0e9bbd002e8dec644bcb03dbb25f55f72..e40f8a2df5457d63cbd81694b0545ea6bfb823cf 100644 --- a/arch/arm64/include/asm/cache.h +++ b/arch/arm64/include/asm/cache.h @@ -20,9 +20,14 @@ #define CTR_L1IP_SHIFT 14 #define CTR_L1IP_MASK 3 +#define CTR_DMINLINE_SHIFT 16 +#define CTR_IMINLINE_SHIFT 0 #define CTR_CWG_SHIFT 24 #define CTR_CWG_MASK 15 +#define CTR_CACHE_MINLINE_MASK \ + (0xf << CTR_DMINLINE_SHIFT | 0xf << CTR_IMINLINE_SHIFT) + #define CTR_L1IP(ctr) (((ctr) >> CTR_L1IP_SHIFT) & CTR_L1IP_MASK) #define ICACHE_POLICY_VPIPT 0 diff --git a/arch/arm64/include/asm/cpucaps.h b/arch/arm64/include/asm/cpucaps.h index 76c0d23ca161fb8a02f05b4ef47d088c2e1e4726..7d6425d426acba13d639fb562eee64b56a4f22fc 100644 --- a/arch/arm64/include/asm/cpucaps.h +++ b/arch/arm64/include/asm/cpucaps.h @@ -44,7 +44,8 @@ #define ARM64_HARDEN_BRANCH_PREDICTOR 24 #define ARM64_HARDEN_BP_POST_GUEST_EXIT 25 #define ARM64_SSBD 26 +#define ARM64_MISMATCHED_CACHE_TYPE 27 -#define ARM64_NCAPS 27 +#define ARM64_NCAPS 28 #endif /* __ASM_CPUCAPS_H */ diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h index e5df3fce00082a3aa6d1711188e6d1055012dc9d..2b55aee7c051844b7ac705f75b85c62301748697 100644 --- a/arch/arm64/include/asm/kvm_emulate.h +++ b/arch/arm64/include/asm/kvm_emulate.h @@ -42,6 +42,11 @@ void kvm_inject_vabt(struct kvm_vcpu *vcpu); void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr); void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr); +static inline bool vcpu_el1_is_32bit(struct kvm_vcpu *vcpu) +{ + return !(vcpu->arch.hcr_el2 & HCR_RW); +} + static inline void vcpu_reset_hcr(struct kvm_vcpu *vcpu) { vcpu->arch.hcr_el2 = HCR_GUEST_FLAGS; diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c index eccdb28b4a39c98c3d55686bd7647a7e3732acc3..3d6d7fae45de523401e0acee29dee4c4b647e29a 100644 --- a/arch/arm64/kernel/cpu_errata.c +++ b/arch/arm64/kernel/cpu_errata.c @@ -16,6 +16,8 @@ * along with this program. If not, see . */ +#include +#include #include #include #include @@ -45,12 +47,18 @@ is_kryo_midr(const struct arm64_cpu_capabilities *entry, int scope) } static bool -has_mismatched_cache_line_size(const struct arm64_cpu_capabilities *entry, - int scope) +has_mismatched_cache_type(const struct arm64_cpu_capabilities *entry, + int scope) { + u64 mask = CTR_CACHE_MINLINE_MASK; + + /* Skip matching the min line sizes for cache type check */ + if (entry->capability == ARM64_MISMATCHED_CACHE_TYPE) + mask ^= arm64_ftr_reg_ctrel0.strict_mask; + WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible()); - return (read_cpuid_cachetype() & arm64_ftr_reg_ctrel0.strict_mask) != - (arm64_ftr_reg_ctrel0.sys_val & arm64_ftr_reg_ctrel0.strict_mask); + return (read_cpuid_cachetype() & mask) != + (arm64_ftr_reg_ctrel0.sys_val & mask); } static int cpu_enable_trap_ctr_access(void *__unused) @@ -511,7 +519,14 @@ const struct arm64_cpu_capabilities arm64_errata[] = { { .desc = "Mismatched cache line size", .capability = ARM64_MISMATCHED_CACHE_LINE_SIZE, - .matches = has_mismatched_cache_line_size, + .matches = has_mismatched_cache_type, + .def_scope = SCOPE_LOCAL_CPU, + .enable = cpu_enable_trap_ctr_access, + }, + { + .desc = "Mismatched cache type", + .capability = ARM64_MISMATCHED_CACHE_TYPE, + .matches = has_mismatched_cache_type, .def_scope = SCOPE_LOCAL_CPU, .enable = cpu_enable_trap_ctr_access, }, diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index 376cf12edf0c5c39201073fd161f64221f5a3aea..003dd39225a0a90cf988d8d91622ec31cc50c5f1 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -180,14 +180,14 @@ static const struct arm64_ftr_bits ftr_ctr[] = { ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, 28, 1, 1), /* IDC */ ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_HIGHER_SAFE, 24, 4, 0), /* CWG */ ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_HIGHER_SAFE, 20, 4, 0), /* ERG */ - ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, 16, 4, 1), /* DminLine */ + ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, CTR_DMINLINE_SHIFT, 4, 1), /* * Linux can handle differing I-cache policies. Userspace JITs will * make use of *minLine. * If we have differing I-cache policies, report it as the weakest - VIPT. */ ARM64_FTR_BITS(FTR_VISIBLE, FTR_NONSTRICT, FTR_EXACT, 14, 2, ICACHE_POLICY_VIPT), /* L1Ip */ - ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, 0, 4, 0), /* IminLine */ + ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, CTR_IMINLINE_SHIFT, 4, 0), ARM64_FTR_END, }; diff --git a/arch/arm64/kernel/probes/kprobes.c b/arch/arm64/kernel/probes/kprobes.c index d849d9804011df6eabb143ed68da8d260dd5074d..22a5921562c7f84b9e6d25b8e3479269720780fe 100644 --- a/arch/arm64/kernel/probes/kprobes.c +++ b/arch/arm64/kernel/probes/kprobes.c @@ -275,7 +275,7 @@ static int __kprobes reenter_kprobe(struct kprobe *p, break; case KPROBE_HIT_SS: case KPROBE_REENTER: - pr_warn("Unrecoverable kprobe detected at %p.\n", p->addr); + pr_warn("Unrecoverable kprobe detected.\n"); dump_kprobe(p); BUG(); break; diff --git a/arch/arm64/kernel/ptrace.c b/arch/arm64/kernel/ptrace.c index edaf346d13d5fe31f4ec1a10d5a9647e46d92274..34d915b6974be24747cefed9bbff8b6c04882444 100644 --- a/arch/arm64/kernel/ptrace.c +++ b/arch/arm64/kernel/ptrace.c @@ -274,19 +274,22 @@ static int ptrace_hbp_set_event(unsigned int note_type, switch (note_type) { case NT_ARM_HW_BREAK: - if (idx < ARM_MAX_BRP) { - tsk->thread.debug.hbp_break[idx] = bp; - err = 0; - } + if (idx >= ARM_MAX_BRP) + goto out; + idx = array_index_nospec(idx, ARM_MAX_BRP); + tsk->thread.debug.hbp_break[idx] = bp; + err = 0; break; case NT_ARM_HW_WATCH: - if (idx < ARM_MAX_WRP) { - tsk->thread.debug.hbp_watch[idx] = bp; - err = 0; - } + if (idx >= ARM_MAX_WRP) + goto out; + idx = array_index_nospec(idx, ARM_MAX_WRP); + tsk->thread.debug.hbp_watch[idx] = bp; + err = 0; break; } +out: return err; } diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c index 9f7195a5773ee66138bc5170508f8c251f501e47..b7ad41d7b6eeafe57179c1adfa6a525d644f7c75 100644 --- a/arch/arm64/kernel/smp.c +++ b/arch/arm64/kernel/smp.c @@ -214,7 +214,7 @@ int __cpu_up(unsigned int cpu, struct task_struct *idle) * This is the secondary CPU boot entry. We're using this CPUs * idle thread stack, but a set of temporary page tables. */ -asmlinkage void secondary_start_kernel(void) +asmlinkage notrace void secondary_start_kernel(void) { struct mm_struct *mm = &init_mm; unsigned int cpu; diff --git a/arch/arm64/kvm/guest.c b/arch/arm64/kvm/guest.c index 811f04c5760e40c5fd36adc6cb931921cbfa850a..76d27edf33cbffe4b30f72104554b5f1d32b8332 100644 --- a/arch/arm64/kvm/guest.c +++ b/arch/arm64/kvm/guest.c @@ -57,6 +57,45 @@ static u64 core_reg_offset_from_id(u64 id) return id & ~(KVM_REG_ARCH_MASK | KVM_REG_SIZE_MASK | KVM_REG_ARM_CORE); } +static int validate_core_offset(const struct kvm_one_reg *reg) +{ + u64 off = core_reg_offset_from_id(reg->id); + int size; + + switch (off) { + case KVM_REG_ARM_CORE_REG(regs.regs[0]) ... + KVM_REG_ARM_CORE_REG(regs.regs[30]): + case KVM_REG_ARM_CORE_REG(regs.sp): + case KVM_REG_ARM_CORE_REG(regs.pc): + case KVM_REG_ARM_CORE_REG(regs.pstate): + case KVM_REG_ARM_CORE_REG(sp_el1): + case KVM_REG_ARM_CORE_REG(elr_el1): + case KVM_REG_ARM_CORE_REG(spsr[0]) ... + KVM_REG_ARM_CORE_REG(spsr[KVM_NR_SPSR - 1]): + size = sizeof(__u64); + break; + + case KVM_REG_ARM_CORE_REG(fp_regs.vregs[0]) ... + KVM_REG_ARM_CORE_REG(fp_regs.vregs[31]): + size = sizeof(__uint128_t); + break; + + case KVM_REG_ARM_CORE_REG(fp_regs.fpsr): + case KVM_REG_ARM_CORE_REG(fp_regs.fpcr): + size = sizeof(__u32); + break; + + default: + return -EINVAL; + } + + if (KVM_REG_SIZE(reg->id) == size && + IS_ALIGNED(off, size / sizeof(__u32))) + return 0; + + return -EINVAL; +} + static int get_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) { /* @@ -76,6 +115,9 @@ static int get_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) (off + (KVM_REG_SIZE(reg->id) / sizeof(__u32))) >= nr_regs) return -ENOENT; + if (validate_core_offset(reg)) + return -EINVAL; + if (copy_to_user(uaddr, ((u32 *)regs) + off, KVM_REG_SIZE(reg->id))) return -EFAULT; @@ -98,6 +140,9 @@ static int set_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) (off + (KVM_REG_SIZE(reg->id) / sizeof(__u32))) >= nr_regs) return -ENOENT; + if (validate_core_offset(reg)) + return -EINVAL; + if (KVM_REG_SIZE(reg->id) > sizeof(tmp)) return -EINVAL; @@ -107,17 +152,25 @@ static int set_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) } if (off == KVM_REG_ARM_CORE_REG(regs.pstate)) { - u32 mode = (*(u32 *)valp) & COMPAT_PSR_MODE_MASK; + u64 mode = (*(u64 *)valp) & COMPAT_PSR_MODE_MASK; switch (mode) { case COMPAT_PSR_MODE_USR: + if (!system_supports_32bit_el0()) + return -EINVAL; + break; case COMPAT_PSR_MODE_FIQ: case COMPAT_PSR_MODE_IRQ: case COMPAT_PSR_MODE_SVC: case COMPAT_PSR_MODE_ABT: case COMPAT_PSR_MODE_UND: + if (!vcpu_el1_is_32bit(vcpu)) + return -EINVAL; + break; case PSR_MODE_EL0t: case PSR_MODE_EL1t: case PSR_MODE_EL1h: + if (vcpu_el1_is_32bit(vcpu)) + return -EINVAL; break; default: err = -EINVAL; diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c index 614af886b7ef4f7348470f8746ea822e8665401a..58470b151bc3fac017024d40abd6308cebefc24a 100644 --- a/arch/arm64/mm/dma-mapping.c +++ b/arch/arm64/mm/dma-mapping.c @@ -629,13 +629,14 @@ static void *__iommu_alloc_attrs(struct device *dev, size_t size, size >> PAGE_SHIFT); return NULL; } - if (!coherent) - __dma_flush_area(page_to_virt(page), iosize); - addr = dma_common_contiguous_remap(page, size, VM_USERMAP, prot, __builtin_return_address(0)); - if (!addr) { + if (addr) { + memset(addr, 0, size); + if (!coherent) + __dma_flush_area(page_to_virt(page), iosize); + } else { iommu_dma_unmap_page(dev, *handle, iosize, 0, attrs); dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT); diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c index 1190d90e01e6dc1c16556c486b13ddfcfc63aa8d..caa295cd5d09aede3219042ae200fe2ef0b9e164 100644 --- a/arch/arm64/mm/init.c +++ b/arch/arm64/mm/init.c @@ -287,7 +287,11 @@ static void __init zone_sizes_init(unsigned long min, unsigned long max) #ifdef CONFIG_HAVE_ARCH_PFN_VALID int pfn_valid(unsigned long pfn) { - return memblock_is_map_memory(pfn << PAGE_SHIFT); + phys_addr_t addr = pfn << PAGE_SHIFT; + + if ((addr >> PAGE_SHIFT) != pfn) + return 0; + return memblock_is_map_memory(addr); } EXPORT_SYMBOL(pfn_valid); #endif diff --git a/arch/m68k/include/asm/mcf_pgalloc.h b/arch/m68k/include/asm/mcf_pgalloc.h index 8b707c249026032ac8bef80c6f1666c105d9b50d..12fe700632f458ea632a18bb9cdccd6660efd241 100644 --- a/arch/m68k/include/asm/mcf_pgalloc.h +++ b/arch/m68k/include/asm/mcf_pgalloc.h @@ -44,6 +44,7 @@ extern inline pmd_t *pmd_alloc_kernel(pgd_t *pgd, unsigned long address) static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t page, unsigned long address) { + pgtable_page_dtor(page); __free_page(page); } @@ -74,8 +75,9 @@ static inline struct page *pte_alloc_one(struct mm_struct *mm, return page; } -extern inline void pte_free(struct mm_struct *mm, struct page *page) +static inline void pte_free(struct mm_struct *mm, struct page *page) { + pgtable_page_dtor(page); __free_page(page); } diff --git a/arch/mips/Makefile b/arch/mips/Makefile index a96d97a806c9906e826ae8a1086be7eb7a8ff65f..5977884b008e6d798755dbf98ae5d29fda1b015c 100644 --- a/arch/mips/Makefile +++ b/arch/mips/Makefile @@ -155,15 +155,11 @@ cflags-$(CONFIG_CPU_R4300) += -march=r4300 -Wa,--trap cflags-$(CONFIG_CPU_VR41XX) += -march=r4100 -Wa,--trap cflags-$(CONFIG_CPU_R4X00) += -march=r4600 -Wa,--trap cflags-$(CONFIG_CPU_TX49XX) += -march=r4600 -Wa,--trap -cflags-$(CONFIG_CPU_MIPS32_R1) += $(call cc-option,-march=mips32,-mips32 -U_MIPS_ISA -D_MIPS_ISA=_MIPS_ISA_MIPS32) \ - -Wa,-mips32 -Wa,--trap -cflags-$(CONFIG_CPU_MIPS32_R2) += $(call cc-option,-march=mips32r2,-mips32r2 -U_MIPS_ISA -D_MIPS_ISA=_MIPS_ISA_MIPS32) \ - -Wa,-mips32r2 -Wa,--trap +cflags-$(CONFIG_CPU_MIPS32_R1) += -march=mips32 -Wa,--trap +cflags-$(CONFIG_CPU_MIPS32_R2) += -march=mips32r2 -Wa,--trap cflags-$(CONFIG_CPU_MIPS32_R6) += -march=mips32r6 -Wa,--trap -modd-spreg -cflags-$(CONFIG_CPU_MIPS64_R1) += $(call cc-option,-march=mips64,-mips64 -U_MIPS_ISA -D_MIPS_ISA=_MIPS_ISA_MIPS64) \ - -Wa,-mips64 -Wa,--trap -cflags-$(CONFIG_CPU_MIPS64_R2) += $(call cc-option,-march=mips64r2,-mips64r2 -U_MIPS_ISA -D_MIPS_ISA=_MIPS_ISA_MIPS64) \ - -Wa,-mips64r2 -Wa,--trap +cflags-$(CONFIG_CPU_MIPS64_R1) += -march=mips64 -Wa,--trap +cflags-$(CONFIG_CPU_MIPS64_R2) += -march=mips64r2 -Wa,--trap cflags-$(CONFIG_CPU_MIPS64_R6) += -march=mips64r6 -Wa,--trap cflags-$(CONFIG_CPU_R5000) += -march=r5000 -Wa,--trap cflags-$(CONFIG_CPU_R5432) += $(call cc-option,-march=r5400,-march=r5000) \ diff --git a/arch/mips/ath79/setup.c b/arch/mips/ath79/setup.c index f206dafbb0a35f57ccd64bfbefd24bbe112ff4f7..26a058d58d37b2ae10ca634b8025d6a01d0b6346 100644 --- a/arch/mips/ath79/setup.c +++ b/arch/mips/ath79/setup.c @@ -40,6 +40,7 @@ static char ath79_sys_type[ATH79_SYS_TYPE_LEN]; static void ath79_restart(char *command) { + local_irq_disable(); ath79_device_reset_set(AR71XX_RESET_FULL_CHIP); for (;;) if (cpu_wait) diff --git a/arch/mips/bcm47xx/setup.c b/arch/mips/bcm47xx/setup.c index 8c9cbf13d32a0a471bc6f2653bbb3c459b1b2c2c..6054d49e608eec038e1bbd49599bc783270aa09a 100644 --- a/arch/mips/bcm47xx/setup.c +++ b/arch/mips/bcm47xx/setup.c @@ -212,12 +212,6 @@ static int __init bcm47xx_cpu_fixes(void) */ if (bcm47xx_bus.bcma.bus.chipinfo.id == BCMA_CHIP_ID_BCM4706) cpu_wait = NULL; - - /* - * BCM47XX Erratum "R10: PCIe Transactions Periodically Fail" - * Enable ExternalSync for sync instruction to take effect - */ - set_c0_config7(MIPS_CONF7_ES); break; #endif } diff --git a/arch/mips/boot/Makefile b/arch/mips/boot/Makefile index c22da16d67b82f7752f6ba39291e2acd4e18fb47..5c7bfa8478e7522a854c93f89e4990170e58eb3b 100644 --- a/arch/mips/boot/Makefile +++ b/arch/mips/boot/Makefile @@ -118,10 +118,12 @@ ifeq ($(ADDR_BITS),64) itb_addr_cells = 2 endif +targets += vmlinux.its.S + quiet_cmd_its_cat = CAT $@ - cmd_its_cat = cat $^ >$@ + cmd_its_cat = cat $(filter-out $(PHONY), $^) >$@ -$(obj)/vmlinux.its.S: $(addprefix $(srctree)/arch/mips/$(PLATFORM)/,$(ITS_INPUTS)) +$(obj)/vmlinux.its.S: $(addprefix $(srctree)/arch/mips/$(PLATFORM)/,$(ITS_INPUTS)) FORCE $(call if_changed,its_cat) quiet_cmd_cpp_its_S = ITS $@ diff --git a/arch/mips/cavium-octeon/octeon-platform.c b/arch/mips/cavium-octeon/octeon-platform.c index 8505db478904b10855eabf266f75ff9dd93e3699..1d92efb82c3729173e01d64b660f961bc42dd9b8 100644 --- a/arch/mips/cavium-octeon/octeon-platform.c +++ b/arch/mips/cavium-octeon/octeon-platform.c @@ -322,6 +322,7 @@ static int __init octeon_ehci_device_init(void) return 0; pd = of_find_device_by_node(ehci_node); + of_node_put(ehci_node); if (!pd) return 0; @@ -384,6 +385,7 @@ static int __init octeon_ohci_device_init(void) return 0; pd = of_find_device_by_node(ohci_node); + of_node_put(ohci_node); if (!pd) return 0; diff --git a/arch/mips/generic/init.c b/arch/mips/generic/init.c index 5ba6fcc26fa726aff50cc3a64315855eb935c1b0..94a78dbbc91f78acb51a8f092ae084646f1c2182 100644 --- a/arch/mips/generic/init.c +++ b/arch/mips/generic/init.c @@ -204,6 +204,7 @@ void __init arch_init_irq(void) "mti,cpu-interrupt-controller"); if (!cpu_has_veic && !intc_node) mips_cpu_irq_init(); + of_node_put(intc_node); irqchip_init(); } diff --git a/arch/mips/include/asm/io.h b/arch/mips/include/asm/io.h index cea8ad864b3f6f416cb45687bfbcb5bd882933a7..57b34257be2bf03826bbf332b4461e834e1f791d 100644 --- a/arch/mips/include/asm/io.h +++ b/arch/mips/include/asm/io.h @@ -141,14 +141,14 @@ static inline void * phys_to_virt(unsigned long address) /* * ISA I/O bus memory addresses are 1:1 with the physical address. */ -static inline unsigned long isa_virt_to_bus(volatile void * address) +static inline unsigned long isa_virt_to_bus(volatile void *address) { - return (unsigned long)address - PAGE_OFFSET; + return virt_to_phys(address); } -static inline void * isa_bus_to_virt(unsigned long address) +static inline void *isa_bus_to_virt(unsigned long address) { - return (void *)(address + PAGE_OFFSET); + return phys_to_virt(address); } #define isa_page_to_bus page_to_phys diff --git a/arch/mips/include/asm/mach-ath79/ath79.h b/arch/mips/include/asm/mach-ath79/ath79.h index 441faa92c3cd488ac97685a1930597f7449a88f4..6e6c0fead776d22b94b444a7ff3c9af91daa9935 100644 --- a/arch/mips/include/asm/mach-ath79/ath79.h +++ b/arch/mips/include/asm/mach-ath79/ath79.h @@ -134,6 +134,7 @@ static inline u32 ath79_pll_rr(unsigned reg) static inline void ath79_reset_wr(unsigned reg, u32 val) { __raw_writel(val, ath79_reset_base + reg); + (void) __raw_readl(ath79_reset_base + reg); /* flush */ } static inline u32 ath79_reset_rr(unsigned reg) diff --git a/arch/mips/include/asm/mipsregs.h b/arch/mips/include/asm/mipsregs.h index 60c787d943b059ed33561bb127e60ce07ef0198c..a6810923b3f0214dfa36eab4a28320a65bba7790 100644 --- a/arch/mips/include/asm/mipsregs.h +++ b/arch/mips/include/asm/mipsregs.h @@ -680,8 +680,6 @@ #define MIPS_CONF7_WII (_ULCAST_(1) << 31) #define MIPS_CONF7_RPS (_ULCAST_(1) << 2) -/* ExternalSync */ -#define MIPS_CONF7_ES (_ULCAST_(1) << 8) #define MIPS_CONF7_IAR (_ULCAST_(1) << 10) #define MIPS_CONF7_AR (_ULCAST_(1) << 16) @@ -2747,7 +2745,6 @@ __BUILD_SET_C0(status) __BUILD_SET_C0(cause) __BUILD_SET_C0(config) __BUILD_SET_C0(config5) -__BUILD_SET_C0(config7) __BUILD_SET_C0(intcontrol) __BUILD_SET_C0(intctl) __BUILD_SET_C0(srsmap) diff --git a/arch/mips/include/asm/processor.h b/arch/mips/include/asm/processor.h index 95b8c471f572b07d8d313fbadc96d5f1b71a035a..eb1f6030ab853d9990a6e59695db438c48c8bc70 100644 --- a/arch/mips/include/asm/processor.h +++ b/arch/mips/include/asm/processor.h @@ -141,7 +141,7 @@ struct mips_fpu_struct { #define NUM_DSP_REGS 6 -typedef __u32 dspreg_t; +typedef unsigned long dspreg_t; struct mips_dsp_state { dspreg_t dspr[NUM_DSP_REGS]; @@ -388,7 +388,20 @@ unsigned long get_wchan(struct task_struct *p); #define KSTK_ESP(tsk) (task_pt_regs(tsk)->regs[29]) #define KSTK_STATUS(tsk) (task_pt_regs(tsk)->cp0_status) +#ifdef CONFIG_CPU_LOONGSON3 +/* + * Loongson-3's SFB (Store-Fill-Buffer) may buffer writes indefinitely when a + * tight read loop is executed, because reads take priority over writes & the + * hardware (incorrectly) doesn't ensure that writes will eventually occur. + * + * Since spin loops of any kind should have a cpu_relax() in them, force an SFB + * flush from cpu_relax() such that any pending writes will become visible as + * expected. + */ +#define cpu_relax() smp_mb() +#else #define cpu_relax() barrier() +#endif /* * Return_address is a replacement for __builtin_return_address(count) diff --git a/arch/mips/jz4740/Platform b/arch/mips/jz4740/Platform index 28448d358c10d42c6ed5600ed3a5e5d67919e7e5..a2a5a85ea1f9360d3ff90e7e0419c423b6a0fbdf 100644 --- a/arch/mips/jz4740/Platform +++ b/arch/mips/jz4740/Platform @@ -1,4 +1,4 @@ platform-$(CONFIG_MACH_INGENIC) += jz4740/ cflags-$(CONFIG_MACH_INGENIC) += -I$(srctree)/arch/mips/include/asm/mach-jz4740 load-$(CONFIG_MACH_INGENIC) += 0xffffffff80010000 -zload-$(CONFIG_MACH_INGENIC) += 0xffffffff80600000 +zload-$(CONFIG_MACH_INGENIC) += 0xffffffff81000000 diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c index e058cd300713d19bf656bc243f3f6cec9728a962..efffdf2464ab76d85b7b5e35d089323c70f0164f 100644 --- a/arch/mips/kernel/ptrace.c +++ b/arch/mips/kernel/ptrace.c @@ -847,7 +847,7 @@ long arch_ptrace(struct task_struct *child, long request, goto out; } dregs = __get_dsp_regs(child); - tmp = (unsigned long) (dregs[addr - DSP_BASE]); + tmp = dregs[addr - DSP_BASE]; break; } case DSP_CONTROL: diff --git a/arch/mips/kernel/ptrace32.c b/arch/mips/kernel/ptrace32.c index 89026d33a07bf7027400e56fa49fcb65da21ea60..6990240785f6c0cd8dbcfeaed237502739f82fe8 100644 --- a/arch/mips/kernel/ptrace32.c +++ b/arch/mips/kernel/ptrace32.c @@ -141,7 +141,7 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request, goto out; } dregs = __get_dsp_regs(child); - tmp = (unsigned long) (dregs[addr - DSP_BASE]); + tmp = dregs[addr - DSP_BASE]; break; } case DSP_CONTROL: diff --git a/arch/mips/kernel/vdso.c b/arch/mips/kernel/vdso.c index 019035d7225c4fd942c96c6628b6605f8d2af1b4..8f845f6e5f4266568288969b9b19b7357b86598b 100644 --- a/arch/mips/kernel/vdso.c +++ b/arch/mips/kernel/vdso.c @@ -13,6 +13,7 @@ #include #include #include +#include #include #include #include @@ -20,6 +21,7 @@ #include #include +#include #include /* Kernel-provided data used by the VDSO. */ @@ -128,12 +130,30 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) vvar_size = gic_size + PAGE_SIZE; size = vvar_size + image->size; + /* + * Find a region that's large enough for us to perform the + * colour-matching alignment below. + */ + if (cpu_has_dc_aliases) + size += shm_align_mask + 1; + base = get_unmapped_area(NULL, 0, size, 0, 0); if (IS_ERR_VALUE(base)) { ret = base; goto out; } + /* + * If we suffer from dcache aliasing, ensure that the VDSO data page + * mapping is coloured the same as the kernel's mapping of that memory. + * This ensures that when the kernel updates the VDSO data userland + * will observe it without requiring cache invalidations. + */ + if (cpu_has_dc_aliases) { + base = __ALIGN_MASK(base, shm_align_mask); + base += ((unsigned long)&vdso_data - gic_size) & shm_align_mask; + } + data_addr = base + gic_size; vdso_addr = data_addr + PAGE_SIZE; diff --git a/arch/mips/lib/multi3.c b/arch/mips/lib/multi3.c index 111ad475aa0cdd111ebe362af928414d9499f666..4c2483f410c26b5b65ded652c1b0694b06a9bf92 100644 --- a/arch/mips/lib/multi3.c +++ b/arch/mips/lib/multi3.c @@ -4,12 +4,12 @@ #include "libgcc.h" /* - * GCC 7 suboptimally generates __multi3 calls for mips64r6, so for that - * specific case only we'll implement it here. + * GCC 7 & older can suboptimally generate __multi3 calls for mips64r6, so for + * that specific case only we implement that intrinsic here. * * See https://gcc.gnu.org/bugzilla/show_bug.cgi?id=82981 */ -#if defined(CONFIG_64BIT) && defined(CONFIG_CPU_MIPSR6) && (__GNUC__ == 7) +#if defined(CONFIG_64BIT) && defined(CONFIG_CPU_MIPSR6) && (__GNUC__ < 8) /* multiply 64-bit values, low 64-bits returned */ static inline long long notrace dmulu(long long a, long long b) diff --git a/arch/mips/loongson64/common/cs5536/cs5536_ohci.c b/arch/mips/loongson64/common/cs5536/cs5536_ohci.c index f7c905e50dc415e21eb258b08a69cc61525bd67b..92dc6bafc1271795b3c66b7c21467f3c22214fd0 100644 --- a/arch/mips/loongson64/common/cs5536/cs5536_ohci.c +++ b/arch/mips/loongson64/common/cs5536/cs5536_ohci.c @@ -138,7 +138,7 @@ u32 pci_ohci_read_reg(int reg) break; case PCI_OHCI_INT_REG: _rdmsr(DIVIL_MSR_REG(PIC_YSEL_LOW), &hi, &lo); - if ((lo & 0x00000f00) == CS5536_USB_INTR) + if (((lo >> PIC_YSEL_LOW_USB_SHIFT) & 0xf) == CS5536_USB_INTR) conf_data = 1; break; default: diff --git a/arch/mips/mm/c-r4k.c b/arch/mips/mm/c-r4k.c index e12dfa48b478dd3ec51369236bb84040c044bd82..a5893b2cdc0e051bb55f69a021c3d4a742309d7b 100644 --- a/arch/mips/mm/c-r4k.c +++ b/arch/mips/mm/c-r4k.c @@ -835,7 +835,8 @@ static void r4k_flush_icache_user_range(unsigned long start, unsigned long end) static void r4k_dma_cache_wback_inv(unsigned long addr, unsigned long size) { /* Catch bad driver code */ - BUG_ON(size == 0); + if (WARN_ON(size == 0)) + return; preempt_disable(); if (cpu_has_inclusive_pcaches) { @@ -871,7 +872,8 @@ static void r4k_dma_cache_wback_inv(unsigned long addr, unsigned long size) static void r4k_dma_cache_inv(unsigned long addr, unsigned long size) { /* Catch bad driver code */ - BUG_ON(size == 0); + if (WARN_ON(size == 0)) + return; preempt_disable(); if (cpu_has_inclusive_pcaches) { diff --git a/arch/openrisc/kernel/entry.S b/arch/openrisc/kernel/entry.S index 1b7160c79646be4e136d0f813b4b55c0bfd63827..b16e95a4e875fd742fbaf3ce9aaa3d63b2ba88ce 100644 --- a/arch/openrisc/kernel/entry.S +++ b/arch/openrisc/kernel/entry.S @@ -221,12 +221,6 @@ EXCEPTION_ENTRY(_data_page_fault_handler) l.addi r3,r1,0 // pt_regs /* r4 set be EXCEPTION_HANDLE */ // effective address of fault - /* - * __PHX__: TODO - * - * all this can be written much simpler. look at - * DTLB miss handler in the CONFIG_GUARD_PROTECTED_CORE part - */ #ifdef CONFIG_OPENRISC_NO_SPR_SR_DSX l.lwz r6,PT_PC(r3) // address of an offending insn l.lwz r6,0(r6) // instruction that caused pf @@ -258,7 +252,7 @@ EXCEPTION_ENTRY(_data_page_fault_handler) #else - l.lwz r6,PT_SR(r3) // SR + l.mfspr r6,r0,SPR_SR // SR l.andi r6,r6,SPR_SR_DSX // check for delay slot exception l.sfne r6,r0 // exception happened in delay slot l.bnf 7f diff --git a/arch/openrisc/kernel/head.S b/arch/openrisc/kernel/head.S index 1e87913576e304e47ea7da5518436d3e0fe3f94e..90979acdf165b4d8ed74e02b9b1a049ac1f6c720 100644 --- a/arch/openrisc/kernel/head.S +++ b/arch/openrisc/kernel/head.S @@ -141,8 +141,7 @@ * r4 - EEAR exception EA * r10 - current pointing to current_thread_info struct * r12 - syscall 0, since we didn't come from syscall - * r13 - temp it actually contains new SR, not needed anymore - * r31 - handler address of the handler we'll jump to + * r30 - handler address of the handler we'll jump to * * handler has to save remaining registers to the exception * ksp frame *before* tainting them! @@ -178,6 +177,7 @@ /* r1 is KSP, r30 is __pa(KSP) */ ;\ tophys (r30,r1) ;\ l.sw PT_GPR12(r30),r12 ;\ + /* r4 use for tmp before EA */ ;\ l.mfspr r12,r0,SPR_EPCR_BASE ;\ l.sw PT_PC(r30),r12 ;\ l.mfspr r12,r0,SPR_ESR_BASE ;\ @@ -197,7 +197,10 @@ /* r12 == 1 if we come from syscall */ ;\ CLEAR_GPR(r12) ;\ /* ----- turn on MMU ----- */ ;\ - l.ori r30,r0,(EXCEPTION_SR) ;\ + /* Carry DSX into exception SR */ ;\ + l.mfspr r30,r0,SPR_SR ;\ + l.andi r30,r30,SPR_SR_DSX ;\ + l.ori r30,r30,(EXCEPTION_SR) ;\ l.mtspr r0,r30,SPR_ESR_BASE ;\ /* r30: EA address of handler */ ;\ LOAD_SYMBOL_2_GPR(r30,handler) ;\ diff --git a/arch/openrisc/kernel/traps.c b/arch/openrisc/kernel/traps.c index 8d8437169b5e7496b589e71c686c06348035fdbd..0d44e8007ad6e16f7cfcac027dabb89903f99d87 100644 --- a/arch/openrisc/kernel/traps.c +++ b/arch/openrisc/kernel/traps.c @@ -358,7 +358,7 @@ static inline int in_delay_slot(struct pt_regs *regs) return 0; } #else - return regs->sr & SPR_SR_DSX; + return mfspr(SPR_SR) & SPR_SR_DSX; #endif } diff --git a/arch/parisc/include/asm/spinlock.h b/arch/parisc/include/asm/spinlock.h index af03359e6ac5685d6fa93361b5a0f4f4817a8cae..a82776592c8e5e8a346dadb8f753e18082bcccaf 100644 --- a/arch/parisc/include/asm/spinlock.h +++ b/arch/parisc/include/asm/spinlock.h @@ -20,7 +20,6 @@ static inline void arch_spin_lock_flags(arch_spinlock_t *x, { volatile unsigned int *a; - mb(); a = __ldcw_align(x); while (__ldcw(a) == 0) while (*a == 0) @@ -30,16 +29,15 @@ static inline void arch_spin_lock_flags(arch_spinlock_t *x, local_irq_disable(); } else cpu_relax(); - mb(); } static inline void arch_spin_unlock(arch_spinlock_t *x) { volatile unsigned int *a; - mb(); + a = __ldcw_align(x); - *a = 1; mb(); + *a = 1; } static inline int arch_spin_trylock(arch_spinlock_t *x) @@ -47,10 +45,8 @@ static inline int arch_spin_trylock(arch_spinlock_t *x) volatile unsigned int *a; int ret; - mb(); a = __ldcw_align(x); ret = __ldcw(a) != 0; - mb(); return ret; } diff --git a/arch/parisc/kernel/syscall.S b/arch/parisc/kernel/syscall.S index 4886a6db42e98f2b1fc4c58916ee68dfc98df008..5f7e57fcaeef0333da7482f8af566a0e4da7fc00 100644 --- a/arch/parisc/kernel/syscall.S +++ b/arch/parisc/kernel/syscall.S @@ -629,12 +629,12 @@ cas_action: stw %r1, 4(%sr2,%r20) #endif /* The load and store could fail */ -1: ldw,ma 0(%r26), %r28 +1: ldw 0(%r26), %r28 sub,<> %r28, %r25, %r0 -2: stw,ma %r24, 0(%r26) +2: stw %r24, 0(%r26) /* Free lock */ sync - stw,ma %r20, 0(%sr2,%r20) + stw %r20, 0(%sr2,%r20) #if ENABLE_LWS_DEBUG /* Clear thread register indicator */ stw %r0, 4(%sr2,%r20) @@ -798,30 +798,30 @@ cas2_action: ldo 1(%r0),%r28 /* 8bit CAS */ -13: ldb,ma 0(%r26), %r29 +13: ldb 0(%r26), %r29 sub,= %r29, %r25, %r0 b,n cas2_end -14: stb,ma %r24, 0(%r26) +14: stb %r24, 0(%r26) b cas2_end copy %r0, %r28 nop nop /* 16bit CAS */ -15: ldh,ma 0(%r26), %r29 +15: ldh 0(%r26), %r29 sub,= %r29, %r25, %r0 b,n cas2_end -16: sth,ma %r24, 0(%r26) +16: sth %r24, 0(%r26) b cas2_end copy %r0, %r28 nop nop /* 32bit CAS */ -17: ldw,ma 0(%r26), %r29 +17: ldw 0(%r26), %r29 sub,= %r29, %r25, %r0 b,n cas2_end -18: stw,ma %r24, 0(%r26) +18: stw %r24, 0(%r26) b cas2_end copy %r0, %r28 nop @@ -829,10 +829,10 @@ cas2_action: /* 64bit CAS */ #ifdef CONFIG_64BIT -19: ldd,ma 0(%r26), %r29 +19: ldd 0(%r26), %r29 sub,*= %r29, %r25, %r0 b,n cas2_end -20: std,ma %r24, 0(%r26) +20: std %r24, 0(%r26) copy %r0, %r28 #else /* Compare first word */ @@ -851,7 +851,7 @@ cas2_action: cas2_end: /* Free lock */ sync - stw,ma %r20, 0(%sr2,%r20) + stw %r20, 0(%sr2,%r20) /* Enable interrupts */ ssm PSW_SM_I, %r0 /* Return to userspace, set no error */ diff --git a/arch/powerpc/include/asm/fadump.h b/arch/powerpc/include/asm/fadump.h index 5a23010af600337b89887cab179cf2e9b1b385b0..1e7a33592e297aae7f6e5001c20d3b005eb6d81c 100644 --- a/arch/powerpc/include/asm/fadump.h +++ b/arch/powerpc/include/asm/fadump.h @@ -195,9 +195,6 @@ struct fadump_crash_info_header { struct cpumask online_mask; }; -/* Crash memory ranges */ -#define INIT_CRASHMEM_RANGES (INIT_MEMBLOCK_REGIONS + 2) - struct fad_crash_memory_ranges { unsigned long long base; unsigned long long size; diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h index 11f4bd07cce0ee0fde57d031634a3b6d2a3b1dda..565cead12be2fb3be3535a44d224a8ff9facc9b3 100644 --- a/arch/powerpc/include/asm/uaccess.h +++ b/arch/powerpc/include/asm/uaccess.h @@ -223,10 +223,17 @@ do { \ } \ } while (0) +/* + * This is a type: either unsigned long, if the argument fits into + * that type, or otherwise unsigned long long. + */ +#define __long_type(x) \ + __typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL)) + #define __get_user_nocheck(x, ptr, size) \ ({ \ long __gu_err; \ - unsigned long __gu_val; \ + __long_type(*(ptr)) __gu_val; \ const __typeof__(*(ptr)) __user *__gu_addr = (ptr); \ __chk_user_ptr(ptr); \ if (!is_kernel_addr((unsigned long)__gu_addr)) \ @@ -239,7 +246,7 @@ do { \ #define __get_user_check(x, ptr, size) \ ({ \ long __gu_err = -EFAULT; \ - unsigned long __gu_val = 0; \ + __long_type(*(ptr)) __gu_val = 0; \ const __typeof__(*(ptr)) __user *__gu_addr = (ptr); \ might_fault(); \ if (access_ok(VERIFY_READ, __gu_addr, (size))) \ @@ -251,7 +258,7 @@ do { \ #define __get_user_nosleep(x, ptr, size) \ ({ \ long __gu_err; \ - unsigned long __gu_val; \ + __long_type(*(ptr)) __gu_val; \ const __typeof__(*(ptr)) __user *__gu_addr = (ptr); \ __chk_user_ptr(ptr); \ __get_user_size(__gu_val, __gu_addr, (size), __gu_err); \ diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S index c09f0a6f84954260775a792c9b600d8a3210d2dd..f65bb53df43bf0e12ad586c1b049b0dc61f8aed1 100644 --- a/arch/powerpc/kernel/exceptions-64s.S +++ b/arch/powerpc/kernel/exceptions-64s.S @@ -1452,6 +1452,8 @@ TRAMP_REAL_BEGIN(stf_barrier_fallback) TRAMP_REAL_BEGIN(rfi_flush_fallback) SET_SCRATCH0(r13); GET_PACA(r13); + std r1,PACA_EXRFI+EX_R12(r13) + ld r1,PACAKSAVE(r13) std r9,PACA_EXRFI+EX_R9(r13) std r10,PACA_EXRFI+EX_R10(r13) std r11,PACA_EXRFI+EX_R11(r13) @@ -1486,12 +1488,15 @@ TRAMP_REAL_BEGIN(rfi_flush_fallback) ld r9,PACA_EXRFI+EX_R9(r13) ld r10,PACA_EXRFI+EX_R10(r13) ld r11,PACA_EXRFI+EX_R11(r13) + ld r1,PACA_EXRFI+EX_R12(r13) GET_SCRATCH0(r13); rfid TRAMP_REAL_BEGIN(hrfi_flush_fallback) SET_SCRATCH0(r13); GET_PACA(r13); + std r1,PACA_EXRFI+EX_R12(r13) + ld r1,PACAKSAVE(r13) std r9,PACA_EXRFI+EX_R9(r13) std r10,PACA_EXRFI+EX_R10(r13) std r11,PACA_EXRFI+EX_R11(r13) @@ -1526,6 +1531,7 @@ TRAMP_REAL_BEGIN(hrfi_flush_fallback) ld r9,PACA_EXRFI+EX_R9(r13) ld r10,PACA_EXRFI+EX_R10(r13) ld r11,PACA_EXRFI+EX_R11(r13) + ld r1,PACA_EXRFI+EX_R12(r13) GET_SCRATCH0(r13); hrfid diff --git a/arch/powerpc/kernel/fadump.c b/arch/powerpc/kernel/fadump.c index d0020bc1f2095c6d1433dc390d30aa327b99a9e6..5a6470383ca3968af8b3f5d3eb0856238d746435 100644 --- a/arch/powerpc/kernel/fadump.c +++ b/arch/powerpc/kernel/fadump.c @@ -47,8 +47,10 @@ static struct fadump_mem_struct fdm; static const struct fadump_mem_struct *fdm_active; static DEFINE_MUTEX(fadump_mutex); -struct fad_crash_memory_ranges crash_memory_ranges[INIT_CRASHMEM_RANGES]; +struct fad_crash_memory_ranges *crash_memory_ranges; +int crash_memory_ranges_size; int crash_mem_ranges; +int max_crash_mem_ranges; /* Scan the Firmware Assisted dump configuration details. */ int __init early_init_dt_scan_fw_dump(unsigned long node, @@ -843,38 +845,88 @@ static int __init process_fadump(const struct fadump_mem_struct *fdm_active) return 0; } -static inline void fadump_add_crash_memory(unsigned long long base, - unsigned long long end) +static void free_crash_memory_ranges(void) +{ + kfree(crash_memory_ranges); + crash_memory_ranges = NULL; + crash_memory_ranges_size = 0; + max_crash_mem_ranges = 0; +} + +/* + * Allocate or reallocate crash memory ranges array in incremental units + * of PAGE_SIZE. + */ +static int allocate_crash_memory_ranges(void) +{ + struct fad_crash_memory_ranges *new_array; + u64 new_size; + + new_size = crash_memory_ranges_size + PAGE_SIZE; + pr_debug("Allocating %llu bytes of memory for crash memory ranges\n", + new_size); + + new_array = krealloc(crash_memory_ranges, new_size, GFP_KERNEL); + if (new_array == NULL) { + pr_err("Insufficient memory for setting up crash memory ranges\n"); + free_crash_memory_ranges(); + return -ENOMEM; + } + + crash_memory_ranges = new_array; + crash_memory_ranges_size = new_size; + max_crash_mem_ranges = (new_size / + sizeof(struct fad_crash_memory_ranges)); + return 0; +} + +static inline int fadump_add_crash_memory(unsigned long long base, + unsigned long long end) { if (base == end) - return; + return 0; + + if (crash_mem_ranges == max_crash_mem_ranges) { + int ret; + + ret = allocate_crash_memory_ranges(); + if (ret) + return ret; + } pr_debug("crash_memory_range[%d] [%#016llx-%#016llx], %#llx bytes\n", crash_mem_ranges, base, end - 1, (end - base)); crash_memory_ranges[crash_mem_ranges].base = base; crash_memory_ranges[crash_mem_ranges].size = end - base; crash_mem_ranges++; + return 0; } -static void fadump_exclude_reserved_area(unsigned long long start, +static int fadump_exclude_reserved_area(unsigned long long start, unsigned long long end) { unsigned long long ra_start, ra_end; + int ret = 0; ra_start = fw_dump.reserve_dump_area_start; ra_end = ra_start + fw_dump.reserve_dump_area_size; if ((ra_start < end) && (ra_end > start)) { if ((start < ra_start) && (end > ra_end)) { - fadump_add_crash_memory(start, ra_start); - fadump_add_crash_memory(ra_end, end); + ret = fadump_add_crash_memory(start, ra_start); + if (ret) + return ret; + + ret = fadump_add_crash_memory(ra_end, end); } else if (start < ra_start) { - fadump_add_crash_memory(start, ra_start); + ret = fadump_add_crash_memory(start, ra_start); } else if (ra_end < end) { - fadump_add_crash_memory(ra_end, end); + ret = fadump_add_crash_memory(ra_end, end); } } else - fadump_add_crash_memory(start, end); + ret = fadump_add_crash_memory(start, end); + + return ret; } static int fadump_init_elfcore_header(char *bufp) @@ -914,10 +966,11 @@ static int fadump_init_elfcore_header(char *bufp) * Traverse through memblock structure and setup crash memory ranges. These * ranges will be used create PT_LOAD program headers in elfcore header. */ -static void fadump_setup_crash_memory_ranges(void) +static int fadump_setup_crash_memory_ranges(void) { struct memblock_region *reg; unsigned long long start, end; + int ret; pr_debug("Setup crash memory ranges.\n"); crash_mem_ranges = 0; @@ -928,7 +981,9 @@ static void fadump_setup_crash_memory_ranges(void) * specified during fadump registration. We need to create a separate * program header for this chunk with the correct offset. */ - fadump_add_crash_memory(RMA_START, fw_dump.boot_memory_size); + ret = fadump_add_crash_memory(RMA_START, fw_dump.boot_memory_size); + if (ret) + return ret; for_each_memblock(memory, reg) { start = (unsigned long long)reg->base; @@ -948,8 +1003,12 @@ static void fadump_setup_crash_memory_ranges(void) } /* add this range excluding the reserved dump area. */ - fadump_exclude_reserved_area(start, end); + ret = fadump_exclude_reserved_area(start, end); + if (ret) + return ret; } + + return 0; } /* @@ -1072,6 +1131,7 @@ static int register_fadump(void) { unsigned long addr; void *vaddr; + int ret; /* * If no memory is reserved then we can not register for firmware- @@ -1080,7 +1140,9 @@ static int register_fadump(void) if (!fw_dump.reserve_dump_area_size) return -ENODEV; - fadump_setup_crash_memory_ranges(); + ret = fadump_setup_crash_memory_ranges(); + if (ret) + return ret; addr = be64_to_cpu(fdm.rmr_region.destination_address) + be64_to_cpu(fdm.rmr_region.source_len); /* Initialize fadump crash info header. */ @@ -1158,6 +1220,7 @@ void fadump_cleanup(void) } else if (fw_dump.dump_registered) { /* Un-register Firmware-assisted dump if it was registered. */ fadump_unregister_dump(&fdm); + free_crash_memory_ranges(); } } diff --git a/arch/powerpc/kernel/machine_kexec.c b/arch/powerpc/kernel/machine_kexec.c index 2694d078741d08c9f32a05c7ea6323867ddde63d..9dafd7af39b8f517b68ab254e1ec134fffd993e8 100644 --- a/arch/powerpc/kernel/machine_kexec.c +++ b/arch/powerpc/kernel/machine_kexec.c @@ -186,7 +186,12 @@ void __init reserve_crashkernel(void) (unsigned long)(crashk_res.start >> 20), (unsigned long)(memblock_phys_mem_size() >> 20)); - memblock_reserve(crashk_res.start, crash_size); + if (!memblock_is_region_memory(crashk_res.start, crash_size) || + memblock_reserve(crashk_res.start, crash_size)) { + pr_err("Failed to reserve memory for crashkernel!\n"); + crashk_res.start = crashk_res.end = 0; + return; + } } int overlaps_crashkernel(unsigned long start, unsigned long size) diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c index 377d1420bd0243cc593e010657ac25351d9770b3..58746328b9bd6df7854973b5bea9fff040b2835c 100644 --- a/arch/powerpc/kvm/book3s_hv.c +++ b/arch/powerpc/kvm/book3s_hv.c @@ -4356,6 +4356,8 @@ static int kvmppc_book3s_init_hv(void) pr_err("KVM-HV: Cannot determine method for accessing XICS\n"); return -ENODEV; } + /* presence of intc confirmed - node can be dropped again */ + of_node_put(np); } #endif diff --git a/arch/powerpc/mm/mmu_context_iommu.c b/arch/powerpc/mm/mmu_context_iommu.c index 816055927ee47ba05db7e1d3675461de66052636..d735937d975c8dca1b32f021e04ecb5fb47e703c 100644 --- a/arch/powerpc/mm/mmu_context_iommu.c +++ b/arch/powerpc/mm/mmu_context_iommu.c @@ -130,6 +130,7 @@ long mm_iommu_get(struct mm_struct *mm, unsigned long ua, unsigned long entries, long i, j, ret = 0, locked_entries = 0; unsigned int pageshift; unsigned long flags; + unsigned long cur_ua; struct page *page = NULL; mutex_lock(&mem_list_mutex); @@ -178,7 +179,8 @@ long mm_iommu_get(struct mm_struct *mm, unsigned long ua, unsigned long entries, } for (i = 0; i < entries; ++i) { - if (1 != get_user_pages_fast(ua + (i << PAGE_SHIFT), + cur_ua = ua + (i << PAGE_SHIFT); + if (1 != get_user_pages_fast(cur_ua, 1/* pages */, 1/* iswrite */, &page)) { ret = -EFAULT; for (j = 0; j < i; ++j) @@ -197,7 +199,7 @@ long mm_iommu_get(struct mm_struct *mm, unsigned long ua, unsigned long entries, if (is_migrate_cma_page(page)) { if (mm_iommu_move_page_from_cma(page)) goto populate; - if (1 != get_user_pages_fast(ua + (i << PAGE_SHIFT), + if (1 != get_user_pages_fast(cur_ua, 1/* pages */, 1/* iswrite */, &page)) { ret = -EFAULT; @@ -211,20 +213,21 @@ long mm_iommu_get(struct mm_struct *mm, unsigned long ua, unsigned long entries, } populate: pageshift = PAGE_SHIFT; - if (PageCompound(page)) { + if (mem->pageshift > PAGE_SHIFT && PageCompound(page)) { pte_t *pte; struct page *head = compound_head(page); unsigned int compshift = compound_order(head); + unsigned int pteshift; local_irq_save(flags); /* disables as well */ - pte = find_linux_pte(mm->pgd, ua, NULL, &pageshift); - local_irq_restore(flags); + pte = find_linux_pte(mm->pgd, cur_ua, NULL, &pteshift); /* Double check it is still the same pinned page */ if (pte && pte_page(*pte) == head && - pageshift == compshift) - pageshift = max_t(unsigned int, pageshift, + pteshift == compshift + PAGE_SHIFT) + pageshift = max_t(unsigned int, pteshift, PAGE_SHIFT); + local_irq_restore(flags); } mem->pageshift = min(mem->pageshift, pageshift); mem->hpas[i] = page_to_pfn(page) << PAGE_SHIFT; diff --git a/arch/powerpc/net/bpf_jit_comp64.c b/arch/powerpc/net/bpf_jit_comp64.c index 254634fb3fc75198b037c3a8e68da1baf16ac424..fee1e1f8c9d3a059db0144009cf523bc69c1854e 100644 --- a/arch/powerpc/net/bpf_jit_comp64.c +++ b/arch/powerpc/net/bpf_jit_comp64.c @@ -322,6 +322,7 @@ static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, u64 imm64; u8 *func; u32 true_cond; + u32 tmp_idx; /* * addrs[] maps a BPF bytecode address into a real offset from @@ -681,11 +682,7 @@ emit_clear: case BPF_STX | BPF_XADD | BPF_W: /* Get EA into TMP_REG_1 */ PPC_ADDI(b2p[TMP_REG_1], dst_reg, off); - /* error if EA is not word-aligned */ - PPC_ANDI(b2p[TMP_REG_2], b2p[TMP_REG_1], 0x03); - PPC_BCC_SHORT(COND_EQ, (ctx->idx * 4) + 12); - PPC_LI(b2p[BPF_REG_0], 0); - PPC_JMP(exit_addr); + tmp_idx = ctx->idx * 4; /* load value from memory into TMP_REG_2 */ PPC_BPF_LWARX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1], 0); /* add value from src_reg into this */ @@ -693,32 +690,16 @@ emit_clear: /* store result back */ PPC_BPF_STWCX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1]); /* we're done if this succeeded */ - PPC_BCC_SHORT(COND_EQ, (ctx->idx * 4) + (7*4)); - /* otherwise, let's try once more */ - PPC_BPF_LWARX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1], 0); - PPC_ADD(b2p[TMP_REG_2], b2p[TMP_REG_2], src_reg); - PPC_BPF_STWCX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1]); - /* exit if the store was not successful */ - PPC_LI(b2p[BPF_REG_0], 0); - PPC_BCC(COND_NE, exit_addr); + PPC_BCC_SHORT(COND_NE, tmp_idx); break; /* *(u64 *)(dst + off) += src */ case BPF_STX | BPF_XADD | BPF_DW: PPC_ADDI(b2p[TMP_REG_1], dst_reg, off); - /* error if EA is not doubleword-aligned */ - PPC_ANDI(b2p[TMP_REG_2], b2p[TMP_REG_1], 0x07); - PPC_BCC_SHORT(COND_EQ, (ctx->idx * 4) + (3*4)); - PPC_LI(b2p[BPF_REG_0], 0); - PPC_JMP(exit_addr); - PPC_BPF_LDARX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1], 0); - PPC_ADD(b2p[TMP_REG_2], b2p[TMP_REG_2], src_reg); - PPC_BPF_STDCX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1]); - PPC_BCC_SHORT(COND_EQ, (ctx->idx * 4) + (7*4)); + tmp_idx = ctx->idx * 4; PPC_BPF_LDARX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1], 0); PPC_ADD(b2p[TMP_REG_2], b2p[TMP_REG_2], src_reg); PPC_BPF_STDCX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1]); - PPC_LI(b2p[BPF_REG_0], 0); - PPC_BCC(COND_NE, exit_addr); + PPC_BCC_SHORT(COND_NE, tmp_idx); break; /* diff --git a/arch/powerpc/platforms/85xx/t1042rdb_diu.c b/arch/powerpc/platforms/85xx/t1042rdb_diu.c index 58fa3d319f1c118247732b6d2e7e7583279ea450..dac36ba82fea8555576cd63c4d2e29c343b605da 100644 --- a/arch/powerpc/platforms/85xx/t1042rdb_diu.c +++ b/arch/powerpc/platforms/85xx/t1042rdb_diu.c @@ -9,8 +9,10 @@ * option) any later version. */ +#include #include #include +#include #include #include @@ -150,3 +152,5 @@ static int __init t1042rdb_diu_init(void) } early_initcall(t1042rdb_diu_init); + +MODULE_LICENSE("GPL"); diff --git a/arch/powerpc/platforms/powernv/npu-dma.c b/arch/powerpc/platforms/powernv/npu-dma.c index 63f007f2de7eb295b9bb78821e2dca596a96e56f..4b95bdde22aa7874499a1a9b4e71c691798d72b1 100644 --- a/arch/powerpc/platforms/powernv/npu-dma.c +++ b/arch/powerpc/platforms/powernv/npu-dma.c @@ -427,8 +427,9 @@ static int get_mmio_atsd_reg(struct npu *npu) int i; for (i = 0; i < npu->mmio_atsd_count; i++) { - if (!test_and_set_bit_lock(i, &npu->mmio_atsd_usage)) - return i; + if (!test_bit(i, &npu->mmio_atsd_usage)) + if (!test_and_set_bit_lock(i, &npu->mmio_atsd_usage)) + return i; } return -ENOSPC; diff --git a/arch/powerpc/platforms/powernv/opal.c b/arch/powerpc/platforms/powernv/opal.c index 65c79ecf5a4d685c3c2d5035a18b196bb9664f63..c8a743af6bf507d2a0f2d63e34bb20234a275d83 100644 --- a/arch/powerpc/platforms/powernv/opal.c +++ b/arch/powerpc/platforms/powernv/opal.c @@ -388,7 +388,7 @@ int opal_put_chars(uint32_t vtermno, const char *data, int total_len) /* Closed or other error drop */ if (rc != OPAL_SUCCESS && rc != OPAL_BUSY && rc != OPAL_BUSY_EVENT) { - written = total_len; + written += total_len; break; } if (rc == OPAL_SUCCESS) { diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c b/arch/powerpc/platforms/powernv/pci-ioda.c index 677b29ef4532b15c8dfa2aec01b2405ced8454f3..ddef22e00ddd74e84e38ec2c6ec2193e36741112 100644 --- a/arch/powerpc/platforms/powernv/pci-ioda.c +++ b/arch/powerpc/platforms/powernv/pci-ioda.c @@ -2787,7 +2787,7 @@ static long pnv_pci_ioda2_table_alloc_pages(int nid, __u64 bus_offset, level_shift = entries_shift + 3; level_shift = max_t(unsigned, level_shift, PAGE_SHIFT); - if ((level_shift - 3) * levels + page_shift >= 60) + if ((level_shift - 3) * levels + page_shift >= 55) return -EINVAL; /* Allocate TCE table */ @@ -3286,12 +3286,49 @@ static void pnv_pci_ioda_create_dbgfs(void) #endif /* CONFIG_DEBUG_FS */ } +static void pnv_pci_enable_bridge(struct pci_bus *bus) +{ + struct pci_dev *dev = bus->self; + struct pci_bus *child; + + /* Empty bus ? bail */ + if (list_empty(&bus->devices)) + return; + + /* + * If there's a bridge associated with that bus enable it. This works + * around races in the generic code if the enabling is done during + * parallel probing. This can be removed once those races have been + * fixed. + */ + if (dev) { + int rc = pci_enable_device(dev); + if (rc) + pci_err(dev, "Error enabling bridge (%d)\n", rc); + pci_set_master(dev); + } + + /* Perform the same to child busses */ + list_for_each_entry(child, &bus->children, node) + pnv_pci_enable_bridge(child); +} + +static void pnv_pci_enable_bridges(void) +{ + struct pci_controller *hose; + + list_for_each_entry(hose, &hose_list, list_node) + pnv_pci_enable_bridge(hose->bus); +} + static void pnv_pci_ioda_fixup(void) { pnv_pci_ioda_setup_PEs(); pnv_pci_ioda_setup_iommu_api(); pnv_pci_ioda_create_dbgfs(); + pnv_pci_enable_bridges(); + #ifdef CONFIG_EEH eeh_init(); eeh_addr_cache_build(); diff --git a/arch/powerpc/platforms/pseries/ras.c b/arch/powerpc/platforms/pseries/ras.c index 5e1ef915018208c3511ef0e91c0064c8c9474389..99d1152ae22413a5b8139ad9c9f0d9328a96d336 100644 --- a/arch/powerpc/platforms/pseries/ras.c +++ b/arch/powerpc/platforms/pseries/ras.c @@ -360,7 +360,7 @@ static struct rtas_error_log *fwnmi_get_errinfo(struct pt_regs *regs) } savep = __va(regs->gpr[3]); - regs->gpr[3] = savep[0]; /* restore original r3 */ + regs->gpr[3] = be64_to_cpu(savep[0]); /* restore original r3 */ /* If it isn't an extended log we can use the per cpu 64bit buffer */ h = (struct rtas_error_log *)&savep[1]; @@ -371,7 +371,7 @@ static struct rtas_error_log *fwnmi_get_errinfo(struct pt_regs *regs) int len, error_log_length; error_log_length = 8 + rtas_error_extended_log_length(h); - len = max_t(int, error_log_length, RTAS_ERROR_LOG_MAX); + len = min_t(int, error_log_length, RTAS_ERROR_LOG_MAX); memset(global_mce_data_buf, 0, RTAS_ERROR_LOG_MAX); memcpy(global_mce_data_buf, h, len); errhdr = (struct rtas_error_log *)global_mce_data_buf; diff --git a/arch/powerpc/sysdev/mpic_msgr.c b/arch/powerpc/sysdev/mpic_msgr.c index eb69a51862431cccc81034f1fb9a1c8decc906d2..280e964e1aa8873db4491f42df93ddc115e85509 100644 --- a/arch/powerpc/sysdev/mpic_msgr.c +++ b/arch/powerpc/sysdev/mpic_msgr.c @@ -196,7 +196,7 @@ static int mpic_msgr_probe(struct platform_device *dev) /* IO map the message register block. */ of_address_to_resource(np, 0, &rsrc); - msgr_block_addr = ioremap(rsrc.start, rsrc.end - rsrc.start); + msgr_block_addr = ioremap(rsrc.start, resource_size(&rsrc)); if (!msgr_block_addr) { dev_err(&dev->dev, "Failed to iomap MPIC message registers"); return -EFAULT; diff --git a/arch/s390/crypto/paes_s390.c b/arch/s390/crypto/paes_s390.c index a4e903ed7e21c0ccc7ceb66944ac6b43662003ea..b429aceff050a7bfd6d7abcf68cd95c08d6238ba 100644 --- a/arch/s390/crypto/paes_s390.c +++ b/arch/s390/crypto/paes_s390.c @@ -212,7 +212,7 @@ static int cbc_paes_crypt(struct blkcipher_desc *desc, unsigned long modifier, walk->dst.virt.addr, walk->src.virt.addr, n); if (k) ret = blkcipher_walk_done(desc, walk, nbytes - k); - if (n < k) { + if (k < n) { if (__cbc_paes_set_key(ctx) != 0) return blkcipher_walk_done(desc, walk, -EIO); memcpy(param.key, ctx->pk.protkey, MAXPROTKEYSIZE); diff --git a/arch/s390/include/asm/qdio.h b/arch/s390/include/asm/qdio.h index de11ecc99c7c46cf967a96c573f1a6c6d2cafc46..9c9970a5dfb10798ddd459dbcff7beae2d9ea42c 100644 --- a/arch/s390/include/asm/qdio.h +++ b/arch/s390/include/asm/qdio.h @@ -262,7 +262,6 @@ struct qdio_outbuf_state { void *user; }; -#define QDIO_OUTBUF_STATE_FLAG_NONE 0x00 #define QDIO_OUTBUF_STATE_FLAG_PENDING 0x01 #define CHSC_AC1_INITIATE_INPUTQ 0x80 diff --git a/arch/s390/kernel/crash_dump.c b/arch/s390/kernel/crash_dump.c index 9f5ea9d870690aea40742e4d5b3181bb637623cc..9b0216d571adc04b6bb9876c14d756cb297a4bbc 100644 --- a/arch/s390/kernel/crash_dump.c +++ b/arch/s390/kernel/crash_dump.c @@ -404,11 +404,13 @@ static void *get_vmcoreinfo_old(unsigned long *size) if (copy_oldmem_kernel(nt_name, addr + sizeof(note), sizeof(nt_name) - 1)) return NULL; - if (strcmp(nt_name, "VMCOREINFO") != 0) + if (strcmp(nt_name, VMCOREINFO_NOTE_NAME) != 0) return NULL; vmcoreinfo = kzalloc_panic(note.n_descsz); - if (copy_oldmem_kernel(vmcoreinfo, addr + 24, note.n_descsz)) + if (copy_oldmem_kernel(vmcoreinfo, addr + 24, note.n_descsz)) { + kfree(vmcoreinfo); return NULL; + } *size = note.n_descsz; return vmcoreinfo; } @@ -418,15 +420,20 @@ static void *get_vmcoreinfo_old(unsigned long *size) */ static void *nt_vmcoreinfo(void *ptr) { + const char *name = VMCOREINFO_NOTE_NAME; unsigned long size; void *vmcoreinfo; vmcoreinfo = os_info_old_entry(OS_INFO_VMCOREINFO, &size); - if (!vmcoreinfo) - vmcoreinfo = get_vmcoreinfo_old(&size); + if (vmcoreinfo) + return nt_init_name(ptr, 0, vmcoreinfo, size, name); + + vmcoreinfo = get_vmcoreinfo_old(&size); if (!vmcoreinfo) return ptr; - return nt_init_name(ptr, 0, vmcoreinfo, size, "VMCOREINFO"); + ptr = nt_init_name(ptr, 0, vmcoreinfo, size, name); + kfree(vmcoreinfo); + return ptr; } /* diff --git a/arch/s390/kernel/sysinfo.c b/arch/s390/kernel/sysinfo.c index a441cba8d165c5c034cfa5bb72628eaf885f1498..b0fad29c1427fccf76584e6d434ca83d6d877949 100644 --- a/arch/s390/kernel/sysinfo.c +++ b/arch/s390/kernel/sysinfo.c @@ -59,6 +59,8 @@ int stsi(void *sysinfo, int fc, int sel1, int sel2) } EXPORT_SYMBOL(stsi); +#ifdef CONFIG_PROC_FS + static bool convert_ext_name(unsigned char encoding, char *name, size_t len) { switch (encoding) { @@ -311,6 +313,8 @@ static int __init sysinfo_create_proc(void) } device_initcall(sysinfo_create_proc); +#endif /* CONFIG_PROC_FS */ + /* * Service levels interface. */ diff --git a/arch/s390/kvm/vsie.c b/arch/s390/kvm/vsie.c index 4f1f5fc8139d881078720f42f4e6d907ee3fa342..061906f98dc5cc95cd3c965535d0dd9cb2083a9e 100644 --- a/arch/s390/kvm/vsie.c +++ b/arch/s390/kvm/vsie.c @@ -170,7 +170,8 @@ static int shadow_crycb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page) return set_validity_icpt(scb_s, 0x0039U); /* copy only the wrapping keys */ - if (read_guest_real(vcpu, crycb_addr + 72, &vsie_page->crycb, 56)) + if (read_guest_real(vcpu, crycb_addr + 72, + vsie_page->crycb.dea_wrapping_key_mask, 56)) return set_validity_icpt(scb_s, 0x0035U); scb_s->ecb3 |= ecb3_flags; diff --git a/arch/s390/lib/mem.S b/arch/s390/lib/mem.S index e1fa974ac5005e7909996d9075fd30c13964cd8a..37e52118d7e9ef23ed28701f67a41514caf40605 100644 --- a/arch/s390/lib/mem.S +++ b/arch/s390/lib/mem.S @@ -17,7 +17,7 @@ ENTRY(memmove) ltgr %r4,%r4 lgr %r1,%r2 - bzr %r14 + jz .Lmemmove_exit aghi %r4,-1 clgr %r2,%r3 jnh .Lmemmove_forward @@ -36,6 +36,7 @@ ENTRY(memmove) .Lmemmove_forward_remainder: larl %r5,.Lmemmove_mvc ex %r4,0(%r5) +.Lmemmove_exit: BR_EX %r14 .Lmemmove_reverse: ic %r0,0(%r4,%r3) @@ -65,7 +66,7 @@ EXPORT_SYMBOL(memmove) */ ENTRY(memset) ltgr %r4,%r4 - bzr %r14 + jz .Lmemset_exit ltgr %r3,%r3 jnz .Lmemset_fill aghi %r4,-1 @@ -80,12 +81,13 @@ ENTRY(memset) .Lmemset_clear_remainder: larl %r3,.Lmemset_xc ex %r4,0(%r3) +.Lmemset_exit: BR_EX %r14 .Lmemset_fill: stc %r3,0(%r2) cghi %r4,1 lgr %r1,%r2 - ber %r14 + je .Lmemset_fill_exit aghi %r4,-2 srlg %r3,%r4,8 ltgr %r3,%r3 @@ -97,6 +99,7 @@ ENTRY(memset) .Lmemset_fill_remainder: larl %r3,.Lmemset_mvc ex %r4,0(%r3) +.Lmemset_fill_exit: BR_EX %r14 .Lmemset_xc: xc 0(1,%r1),0(%r1) @@ -111,7 +114,7 @@ EXPORT_SYMBOL(memset) */ ENTRY(memcpy) ltgr %r4,%r4 - bzr %r14 + jz .Lmemcpy_exit aghi %r4,-1 srlg %r5,%r4,8 ltgr %r5,%r5 @@ -120,6 +123,7 @@ ENTRY(memcpy) .Lmemcpy_remainder: larl %r5,.Lmemcpy_mvc ex %r4,0(%r5) +.Lmemcpy_exit: BR_EX %r14 .Lmemcpy_loop: mvc 0(256,%r1),0(%r3) diff --git a/arch/s390/mm/extmem.c b/arch/s390/mm/extmem.c index 920d408945359351e20e2ae397598390a0ad2f27..290e71e57541cb4c7b1c512aff333797e31684ee 100644 --- a/arch/s390/mm/extmem.c +++ b/arch/s390/mm/extmem.c @@ -80,7 +80,7 @@ struct qin64 { struct dcss_segment { struct list_head list; char dcss_name[8]; - char res_name[15]; + char res_name[16]; unsigned long start_addr; unsigned long end; atomic_t ref_count; @@ -433,7 +433,7 @@ __segment_load (char *name, int do_nonshared, unsigned long *addr, unsigned long memcpy(&seg->res_name, seg->dcss_name, 8); EBCASC(seg->res_name, 8); seg->res_name[8] = '\0'; - strncat(seg->res_name, " (DCSS)", 7); + strlcat(seg->res_name, " (DCSS)", sizeof(seg->res_name)); seg->res->name = seg->res_name; rc = seg->vm_segtype; if (rc == SEG_TYPE_SC || diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c index 242b78c0a9ec84ba7a956d197cdd887bf86c9ebd..40f1888bc4ab7d9504d47190ac36a1692aa3671b 100644 --- a/arch/s390/mm/fault.c +++ b/arch/s390/mm/fault.c @@ -486,6 +486,8 @@ retry: /* No reason to continue if interrupted by SIGKILL. */ if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) { fault = VM_FAULT_SIGNAL; + if (flags & FAULT_FLAG_RETRY_NOWAIT) + goto out_up; goto out; } if (unlikely(fault & VM_FAULT_ERROR)) diff --git a/arch/s390/mm/page-states.c b/arch/s390/mm/page-states.c index 382153ff17e30e5203b217654d6b25fa0ddd78e6..dc3cede7f2ec9df1a886fc3e92821f2b7f9c6195 100644 --- a/arch/s390/mm/page-states.c +++ b/arch/s390/mm/page-states.c @@ -271,7 +271,7 @@ void arch_set_page_states(int make_stable) list_for_each(l, &zone->free_area[order].free_list[t]) { page = list_entry(l, struct page, lru); if (make_stable) - set_page_stable_dat(page, 0); + set_page_stable_dat(page, order); else set_page_unused(page, order); } diff --git a/arch/s390/mm/pgalloc.c b/arch/s390/mm/pgalloc.c index 334b6d103cbd191ce877f7a65b696d1384de96e2..29653f713162d3f2d2377ce55aef5f754f98452f 100644 --- a/arch/s390/mm/pgalloc.c +++ b/arch/s390/mm/pgalloc.c @@ -27,7 +27,7 @@ static struct ctl_table page_table_sysctl[] = { .data = &page_table_allocate_pgste, .maxlen = sizeof(int), .mode = S_IRUGO | S_IWUSR, - .proc_handler = proc_dointvec, + .proc_handler = proc_dointvec_minmax, .extra1 = &page_table_allocate_pgste_min, .extra2 = &page_table_allocate_pgste_max, }, diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c index 11cd151733d43a4a8e9d0c992a4005ca8dc1e509..6b1474fa99ab3f98f68227fa97fc4d0768445ac3 100644 --- a/arch/s390/net/bpf_jit_comp.c +++ b/arch/s390/net/bpf_jit_comp.c @@ -518,8 +518,6 @@ static void bpf_jit_epilogue(struct bpf_jit *jit) /* br %r1 */ _EMIT2(0x07f1); } else { - /* larl %r1,.+14 */ - EMIT6_PCREL_RILB(0xc0000000, REG_1, jit->prg + 14); /* ex 0,S390_lowcore.br_r1_tampoline */ EMIT4_DISP(0x44000000, REG_0, REG_0, offsetof(struct lowcore, br_r1_trampoline)); @@ -1403,6 +1401,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *fp) goto free_addrs; } if (bpf_jit_prog(&jit, fp)) { + bpf_jit_binary_free(header); fp = orig_fp; goto free_addrs; } diff --git a/arch/s390/numa/numa.c b/arch/s390/numa/numa.c index 06a80434cfe63f69c7c7ff2e86caba734161484e..5bd374491f9461467790e1e0522f838827f094df 100644 --- a/arch/s390/numa/numa.c +++ b/arch/s390/numa/numa.c @@ -134,26 +134,14 @@ void __init numa_setup(void) { pr_info("NUMA mode: %s\n", mode->name); nodes_clear(node_possible_map); + /* Initially attach all possible CPUs to node 0. */ + cpumask_copy(&node_to_cpumask_map[0], cpu_possible_mask); if (mode->setup) mode->setup(); numa_setup_memory(); memblock_dump_all(); } -/* - * numa_init_early() - Initialization initcall - * - * This runs when only one CPU is online and before the first - * topology update is called for by the scheduler. - */ -static int __init numa_init_early(void) -{ - /* Attach all possible CPUs to node 0 for now. */ - cpumask_copy(&node_to_cpumask_map[0], cpu_possible_mask); - return 0; -} -early_initcall(numa_init_early); - /* * numa_init_late() - Initialization initcall * diff --git a/arch/s390/pci/pci.c b/arch/s390/pci/pci.c index 0fe649c0d5423a2ed51fcff4dc7d011204fdf4e9..960c4a362d8cdeb525bf020729cd8007bfa836ec 100644 --- a/arch/s390/pci/pci.c +++ b/arch/s390/pci/pci.c @@ -420,6 +420,8 @@ int arch_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type) hwirq = 0; for_each_pci_msi_entry(msi, pdev) { rc = -EIO; + if (hwirq >= msi_vecs) + break; irq = irq_alloc_desc(0); /* Alloc irq on node 0 */ if (irq < 0) return -ENOMEM; diff --git a/arch/sparc/include/asm/Kbuild b/arch/sparc/include/asm/Kbuild index 80ddc01f57ac347957a2e05348ada69ca47e226f..fcbc0c0aa087db6adfb91a4b40c6a666922ae96f 100644 --- a/arch/sparc/include/asm/Kbuild +++ b/arch/sparc/include/asm/Kbuild @@ -14,6 +14,7 @@ generic-y += local64.h generic-y += mcs_spinlock.h generic-y += mm-arch-hooks.h generic-y += module.h +generic-y += msi.h generic-y += preempt.h generic-y += rwsem.h generic-y += serial.h diff --git a/arch/sparc/kernel/sys_sparc_32.c b/arch/sparc/kernel/sys_sparc_32.c index 990703b7cf4d7d47d26152e510706bc32528d488..4b7719b2a73c671d35412d27334c62001e32a866 100644 --- a/arch/sparc/kernel/sys_sparc_32.c +++ b/arch/sparc/kernel/sys_sparc_32.c @@ -204,23 +204,27 @@ SYSCALL_DEFINE5(rt_sigaction, int, sig, asmlinkage long sys_getdomainname(char __user *name, int len) { - int nlen, err; - + int nlen, err; + char tmp[__NEW_UTS_LEN + 1]; + if (len < 0) return -EINVAL; - down_read(&uts_sem); - + down_read(&uts_sem); + nlen = strlen(utsname()->domainname) + 1; err = -EINVAL; if (nlen > len) - goto out; + goto out_unlock; + memcpy(tmp, utsname()->domainname, nlen); - err = -EFAULT; - if (!copy_to_user(name, utsname()->domainname, nlen)) - err = 0; + up_read(&uts_sem); -out: + if (copy_to_user(name, tmp, nlen)) + return -EFAULT; + return 0; + +out_unlock: up_read(&uts_sem); return err; } diff --git a/arch/sparc/kernel/sys_sparc_64.c b/arch/sparc/kernel/sys_sparc_64.c index 55416db482add475baf4c2fc312605577442470f..d79c1c74873cfdcf0ac3e26c521a4623508d14dc 100644 --- a/arch/sparc/kernel/sys_sparc_64.c +++ b/arch/sparc/kernel/sys_sparc_64.c @@ -527,23 +527,27 @@ extern void check_pending(int signum); SYSCALL_DEFINE2(getdomainname, char __user *, name, int, len) { - int nlen, err; + int nlen, err; + char tmp[__NEW_UTS_LEN + 1]; if (len < 0) return -EINVAL; - down_read(&uts_sem); - + down_read(&uts_sem); + nlen = strlen(utsname()->domainname) + 1; err = -EINVAL; if (nlen > len) - goto out; + goto out_unlock; + memcpy(tmp, utsname()->domainname, nlen); + + up_read(&uts_sem); - err = -EFAULT; - if (!copy_to_user(name, utsname()->domainname, nlen)) - err = 0; + if (copy_to_user(name, tmp, nlen)) + return -EFAULT; + return 0; -out: +out_unlock: up_read(&uts_sem); return err; } diff --git a/arch/sparc/kernel/time_64.c b/arch/sparc/kernel/time_64.c index 3b397081047af69a23b6672917e1706786d3134f..83aaf4888999281a3c20f19c014a13d7b7521d65 100644 --- a/arch/sparc/kernel/time_64.c +++ b/arch/sparc/kernel/time_64.c @@ -813,7 +813,7 @@ static void __init get_tick_patch(void) } } -static void init_tick_ops(struct sparc64_tick_ops *ops) +static void __init init_tick_ops(struct sparc64_tick_ops *ops) { unsigned long freq, quotient, tick; diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 74ed661c73df3016c3ad5044fd5c4ffe36b5ee84..7764f936d6ab1fd298e777fff62ae053f384dbc3 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -171,6 +171,7 @@ config X86 select HAVE_PERF_USER_STACK_DUMP select HAVE_PREEMPT_LAZY select HAVE_RCU_TABLE_FREE + select HAVE_RCU_TABLE_INVALIDATE if HAVE_RCU_TABLE_FREE select HAVE_REGS_AND_STACK_ACCESS_API select HAVE_RELIABLE_STACKTRACE if X86_64 && UNWINDER_FRAME_POINTER && STACK_VALIDATION select HAVE_STACK_VALIDATION if X86_64 diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile index 98018a621f6b0c4d0dcc32fed7e5ac94c7d3cf93..3a250ca2406c0d606f6386cdded977b56ddccffc 100644 --- a/arch/x86/boot/compressed/Makefile +++ b/arch/x86/boot/compressed/Makefile @@ -104,9 +104,13 @@ define cmd_check_data_rel done endef +# We need to run two commands under "if_changed", so merge them into a +# single invocation. +quiet_cmd_check-and-link-vmlinux = LD $@ + cmd_check-and-link-vmlinux = $(cmd_check_data_rel); $(cmd_ld) + $(obj)/vmlinux: $(vmlinux-objs-y) FORCE - $(call if_changed,check_data_rel) - $(call if_changed,ld) + $(call if_changed,check-and-link-vmlinux) OBJCOPYFLAGS_vmlinux.bin := -R .comment -S $(obj)/vmlinux.bin: vmlinux FORCE diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S index 0ed8dbf6210d59e4728ac2885f9622857850c1ae..75d42cb8a7c9d8fb28655e4d2c92e35360e18e9c 100644 --- a/arch/x86/entry/entry_64.S +++ b/arch/x86/entry/entry_64.S @@ -88,7 +88,7 @@ END(native_usergs_sysret64) .endm .macro TRACE_IRQS_IRETQ_DEBUG - bt $9, EFLAGS(%rsp) /* interrupts off? */ + btl $9, EFLAGS(%rsp) /* interrupts off? */ jnc 1f TRACE_IRQS_ON_DEBUG 1: @@ -630,7 +630,7 @@ retint_kernel: #ifdef CONFIG_PREEMPT /* Interrupts are off */ /* Check if we need preemption */ - bt $9, EFLAGS(%rsp) /* were interrupts off? */ + btl $9, EFLAGS(%rsp) /* were interrupts off? */ jnc 1f 0: cmpl $0, PER_CPU_VAR(__preempt_count) #ifndef CONFIG_PREEMPT_LAZY diff --git a/arch/x86/entry/vdso/Makefile b/arch/x86/entry/vdso/Makefile index c366c0adeb40da1ae311a2906087c4a73f730af4..b545bf9d232831949a4ddff571aa510e77074905 100644 --- a/arch/x86/entry/vdso/Makefile +++ b/arch/x86/entry/vdso/Makefile @@ -74,9 +74,9 @@ $(obj)/vdso-image-%.c: $(obj)/vdso%.so.dbg $(obj)/vdso%.so $(obj)/vdso2c FORCE CFL := $(PROFILING) -mcmodel=small -fPIC -O2 -fasynchronous-unwind-tables -m64 \ $(filter -g%,$(KBUILD_CFLAGS)) $(call cc-option, -fno-stack-protector) \ -fno-omit-frame-pointer -foptimize-sibling-calls \ - -DDISABLE_BRANCH_PROFILING -DBUILD_VDSO + -DDISABLE_BRANCH_PROFILING -DBUILD_VDSO $(RETPOLINE_VDSO_CFLAGS) -$(vobjs): KBUILD_CFLAGS := $(filter-out $(GCC_PLUGINS_CFLAGS),$(KBUILD_CFLAGS)) $(CFL) +$(vobjs): KBUILD_CFLAGS := $(filter-out $(GCC_PLUGINS_CFLAGS) $(RETPOLINE_CFLAGS),$(KBUILD_CFLAGS)) $(CFL) # # vDSO code runs in userspace and -pg doesn't help with profiling anyway. @@ -147,11 +147,13 @@ KBUILD_CFLAGS_32 := $(filter-out -mcmodel=kernel,$(KBUILD_CFLAGS_32)) KBUILD_CFLAGS_32 := $(filter-out -fno-pic,$(KBUILD_CFLAGS_32)) KBUILD_CFLAGS_32 := $(filter-out -mfentry,$(KBUILD_CFLAGS_32)) KBUILD_CFLAGS_32 := $(filter-out $(GCC_PLUGINS_CFLAGS),$(KBUILD_CFLAGS_32)) +KBUILD_CFLAGS_32 := $(filter-out $(RETPOLINE_CFLAGS),$(KBUILD_CFLAGS_32)) KBUILD_CFLAGS_32 += -m32 -msoft-float -mregparm=0 -fpic KBUILD_CFLAGS_32 += $(call cc-option, -fno-stack-protector) KBUILD_CFLAGS_32 += $(call cc-option, -foptimize-sibling-calls) KBUILD_CFLAGS_32 += -fno-omit-frame-pointer KBUILD_CFLAGS_32 += -DDISABLE_BRANCH_PROFILING +KBUILD_CFLAGS_32 += $(RETPOLINE_VDSO_CFLAGS) $(obj)/vdso32.so.dbg: KBUILD_CFLAGS = $(KBUILD_CFLAGS_32) $(obj)/vdso32.so.dbg: FORCE \ diff --git a/arch/x86/events/amd/ibs.c b/arch/x86/events/amd/ibs.c index 786fd875de9287d8745bb7ec42d52fc7a2a4dac0..8c51844694e2f07f8351b27640a94bad7ab59c76 100644 --- a/arch/x86/events/amd/ibs.c +++ b/arch/x86/events/amd/ibs.c @@ -579,7 +579,7 @@ static int perf_ibs_handle_irq(struct perf_ibs *perf_ibs, struct pt_regs *iregs) { struct cpu_perf_ibs *pcpu = this_cpu_ptr(perf_ibs->pcpu); struct perf_event *event = pcpu->event; - struct hw_perf_event *hwc = &event->hw; + struct hw_perf_event *hwc; struct perf_sample_data data; struct perf_raw_record raw; struct pt_regs regs; @@ -602,6 +602,10 @@ fail: return 0; } + if (WARN_ON_ONCE(!event)) + goto fail; + + hwc = &event->hw; msr = hwc->config_base; buf = ibs_data.regs; rdmsrl(msr, *buf); diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c index 717c9219d00ec3fbdc6ebea65277bfc6025dc8fa..e5097dc85a06cf73d4da6b93aa3f5d34b9bdad6a 100644 --- a/arch/x86/events/core.c +++ b/arch/x86/events/core.c @@ -2462,7 +2462,7 @@ perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs perf_callchain_store(entry, regs->ip); - if (!current->mm) + if (!nmi_uaccess_okay()) return; if (perf_callchain_user32(regs, entry)) diff --git a/arch/x86/events/intel/lbr.c b/arch/x86/events/intel/lbr.c index cf372b90557ed4e8a788c8f97b515ac956d2512e..a4170048a30bc32b75f976dc46cd85452b57de88 100644 --- a/arch/x86/events/intel/lbr.c +++ b/arch/x86/events/intel/lbr.c @@ -346,7 +346,7 @@ static void __intel_pmu_lbr_restore(struct x86_perf_task_context *task_ctx) mask = x86_pmu.lbr_nr - 1; tos = task_ctx->tos; - for (i = 0; i < tos; i++) { + for (i = 0; i < task_ctx->valid_lbrs; i++) { lbr_idx = (tos - i) & mask; wrlbr_from(lbr_idx, task_ctx->lbr_from[i]); wrlbr_to (lbr_idx, task_ctx->lbr_to[i]); @@ -354,6 +354,15 @@ static void __intel_pmu_lbr_restore(struct x86_perf_task_context *task_ctx) if (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_INFO) wrmsrl(MSR_LBR_INFO_0 + lbr_idx, task_ctx->lbr_info[i]); } + + for (; i < x86_pmu.lbr_nr; i++) { + lbr_idx = (tos - i) & mask; + wrlbr_from(lbr_idx, 0); + wrlbr_to(lbr_idx, 0); + if (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_INFO) + wrmsrl(MSR_LBR_INFO_0 + lbr_idx, 0); + } + wrmsrl(x86_pmu.lbr_tos, tos); task_ctx->lbr_stack_state = LBR_NONE; } @@ -361,7 +370,7 @@ static void __intel_pmu_lbr_restore(struct x86_perf_task_context *task_ctx) static void __intel_pmu_lbr_save(struct x86_perf_task_context *task_ctx) { unsigned lbr_idx, mask; - u64 tos; + u64 tos, from; int i; if (task_ctx->lbr_callstack_users == 0) { @@ -371,13 +380,17 @@ static void __intel_pmu_lbr_save(struct x86_perf_task_context *task_ctx) mask = x86_pmu.lbr_nr - 1; tos = intel_pmu_lbr_tos(); - for (i = 0; i < tos; i++) { + for (i = 0; i < x86_pmu.lbr_nr; i++) { lbr_idx = (tos - i) & mask; - task_ctx->lbr_from[i] = rdlbr_from(lbr_idx); + from = rdlbr_from(lbr_idx); + if (!from) + break; + task_ctx->lbr_from[i] = from; task_ctx->lbr_to[i] = rdlbr_to(lbr_idx); if (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_INFO) rdmsrl(MSR_LBR_INFO_0 + lbr_idx, task_ctx->lbr_info[i]); } + task_ctx->valid_lbrs = i; task_ctx->tos = tos; task_ctx->lbr_stack_state = LBR_VALID; } @@ -531,7 +544,7 @@ static void intel_pmu_lbr_read_32(struct cpu_hw_events *cpuc) */ static void intel_pmu_lbr_read_64(struct cpu_hw_events *cpuc) { - bool need_info = false; + bool need_info = false, call_stack = false; unsigned long mask = x86_pmu.lbr_nr - 1; int lbr_format = x86_pmu.intel_cap.lbr_format; u64 tos = intel_pmu_lbr_tos(); @@ -542,7 +555,7 @@ static void intel_pmu_lbr_read_64(struct cpu_hw_events *cpuc) if (cpuc->lbr_sel) { need_info = !(cpuc->lbr_sel->config & LBR_NO_INFO); if (cpuc->lbr_sel->config & LBR_CALL_STACK) - num = tos; + call_stack = true; } for (i = 0; i < num; i++) { @@ -555,6 +568,13 @@ static void intel_pmu_lbr_read_64(struct cpu_hw_events *cpuc) from = rdlbr_from(lbr_idx); to = rdlbr_to(lbr_idx); + /* + * Read LBR call stack entries + * until invalid entry (0s) is detected. + */ + if (call_stack && !from) + break; + if (lbr_format == LBR_FORMAT_INFO && need_info) { u64 info; diff --git a/arch/x86/events/perf_event.h b/arch/x86/events/perf_event.h index dc4728eccfd86c4a2a97894f53b05b658a6e0155..c6698c63c047b170a17bf508071ba25218565464 100644 --- a/arch/x86/events/perf_event.h +++ b/arch/x86/events/perf_event.h @@ -646,6 +646,7 @@ struct x86_perf_task_context { u64 lbr_to[MAX_LBR_ENTRIES]; u64 lbr_info[MAX_LBR_ENTRIES]; int tos; + int valid_lbrs; int lbr_callstack_users; int lbr_stack_state; }; diff --git a/arch/x86/include/asm/fixmap.h b/arch/x86/include/asm/fixmap.h index e203169931c721b7958c940694458c11a270b982..6390bd8c141b44cd988dd21bc4b5b3efbbf1688b 100644 --- a/arch/x86/include/asm/fixmap.h +++ b/arch/x86/include/asm/fixmap.h @@ -14,6 +14,16 @@ #ifndef _ASM_X86_FIXMAP_H #define _ASM_X86_FIXMAP_H +/* + * Exposed to assembly code for setting up initial page tables. Cannot be + * calculated in assembly code (fixmap entries are an enum), but is sanity + * checked in the actual fixmap C code to make sure that the fixmap is + * covered fully. + */ +#define FIXMAP_PMD_NUM 2 +/* fixmap starts downwards from the 507th entry in level2_fixmap_pgt */ +#define FIXMAP_PMD_TOP 507 + #ifndef __ASSEMBLY__ #include #include diff --git a/arch/x86/include/asm/irqflags.h b/arch/x86/include/asm/irqflags.h index c14f2a74b2be7495f1ee00c92322a58cc43d10a6..15450a675031d3562b4a9da3d3b15816c11003fb 100644 --- a/arch/x86/include/asm/irqflags.h +++ b/arch/x86/include/asm/irqflags.h @@ -33,7 +33,8 @@ extern inline unsigned long native_save_fl(void) return flags; } -static inline void native_restore_fl(unsigned long flags) +extern inline void native_restore_fl(unsigned long flags); +extern inline void native_restore_fl(unsigned long flags) { asm volatile("push %0 ; popf" : /* no output */ diff --git a/arch/x86/include/asm/mce.h b/arch/x86/include/asm/mce.h index 340070415c2c3a93b0305ed7624ceaa62bd4362f..90fef69e4c5a1e327e0aee654d0240f70a562ca9 100644 --- a/arch/x86/include/asm/mce.h +++ b/arch/x86/include/asm/mce.h @@ -200,6 +200,7 @@ enum mce_notifier_prios { MCE_PRIO_LOWEST = 0, }; +struct notifier_block; extern void mce_register_decode_chain(struct notifier_block *nb); extern void mce_unregister_decode_chain(struct notifier_block *nb); diff --git a/arch/x86/include/asm/pgtable-3level.h b/arch/x86/include/asm/pgtable-3level.h index 9dc19b4a2a870398e57239c9af545924713b0b49..c5d4931d1ef9b97088dd13e5cf3fbd8a3ab12f5f 100644 --- a/arch/x86/include/asm/pgtable-3level.h +++ b/arch/x86/include/asm/pgtable-3level.h @@ -2,6 +2,8 @@ #ifndef _ASM_X86_PGTABLE_3LEVEL_H #define _ASM_X86_PGTABLE_3LEVEL_H +#include + /* * Intel Physical Address Extension (PAE) Mode - three-level page * tables on PPro+ CPUs. @@ -147,10 +149,7 @@ static inline pte_t native_ptep_get_and_clear(pte_t *ptep) { pte_t res; - /* xchg acts as a barrier before the setting of the high bits */ - res.pte_low = xchg(&ptep->pte_low, 0); - res.pte_high = ptep->pte_high; - ptep->pte_high = 0; + res.pte = (pteval_t)atomic64_xchg((atomic64_t *)ptep, 0); return res; } diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h index 4ecb728319384759eeb9c96eb94bbec7faf79aba..ef938583147ea4d7775c23427f8b13e77ae1a21a 100644 --- a/arch/x86/include/asm/pgtable_64.h +++ b/arch/x86/include/asm/pgtable_64.h @@ -14,6 +14,7 @@ #include #include #include +#include extern p4d_t level4_kernel_pgt[512]; extern p4d_t level4_ident_pgt[512]; @@ -22,7 +23,7 @@ extern pud_t level3_ident_pgt[512]; extern pmd_t level2_kernel_pgt[512]; extern pmd_t level2_fixmap_pgt[512]; extern pmd_t level2_ident_pgt[512]; -extern pte_t level1_fixmap_pgt[512]; +extern pte_t level1_fixmap_pgt[512 * FIXMAP_PMD_NUM]; extern pgd_t init_top_pgt[]; #define swapper_pg_dir init_top_pgt diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h index 0e856c0628b350dd6d1aca368e213a927156da07..b12c8d70dd33d0044c60f2c324717d62d631e086 100644 --- a/arch/x86/include/asm/processor.h +++ b/arch/x86/include/asm/processor.h @@ -132,6 +132,8 @@ struct cpuinfo_x86 { /* Index into per_cpu list: */ u16 cpu_index; u32 microcode; + /* Address space bits used by the cache internally */ + u8 x86_cache_bits; } __randomize_layout; struct cpuid_regs { @@ -180,9 +182,9 @@ extern const struct seq_operations cpuinfo_op; extern void cpu_detect(struct cpuinfo_x86 *c); -static inline unsigned long l1tf_pfn_limit(void) +static inline unsigned long long l1tf_pfn_limit(void) { - return BIT(boot_cpu_data.x86_phys_bits - 1 - PAGE_SHIFT) - 1; + return BIT_ULL(boot_cpu_data.x86_cache_bits - 1 - PAGE_SHIFT); } extern void early_cpu_init(void); diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h index 875ca99b82eef003781464af111214392fdd0f5c..5f00ecb9d2515e693218d0e4a561824fe0279d94 100644 --- a/arch/x86/include/asm/tlbflush.h +++ b/arch/x86/include/asm/tlbflush.h @@ -175,8 +175,16 @@ struct tlb_state { * are on. This means that it may not match current->active_mm, * which will contain the previous user mm when we're in lazy TLB * mode even if we've already switched back to swapper_pg_dir. + * + * During switch_mm_irqs_off(), loaded_mm will be set to + * LOADED_MM_SWITCHING during the brief interrupts-off window + * when CR3 and loaded_mm would otherwise be inconsistent. This + * is for nmi_uaccess_okay()'s benefit. */ struct mm_struct *loaded_mm; + +#define LOADED_MM_SWITCHING ((struct mm_struct *)1) + u16 loaded_mm_asid; u16 next_asid; /* last user mm's ctx id */ @@ -246,6 +254,38 @@ struct tlb_state { }; DECLARE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate); +/* + * Blindly accessing user memory from NMI context can be dangerous + * if we're in the middle of switching the current user task or + * switching the loaded mm. It can also be dangerous if we + * interrupted some kernel code that was temporarily using a + * different mm. + */ +static inline bool nmi_uaccess_okay(void) +{ + struct mm_struct *loaded_mm = this_cpu_read(cpu_tlbstate.loaded_mm); + struct mm_struct *current_mm = current->mm; + + VM_WARN_ON_ONCE(!loaded_mm); + + /* + * The condition we want to check is + * current_mm->pgd == __va(read_cr3_pa()). This may be slow, though, + * if we're running in a VM with shadow paging, and nmi_uaccess_okay() + * is supposed to be reasonably fast. + * + * Instead, we check the almost equivalent but somewhat conservative + * condition below, and we rely on the fact that switch_mm_irqs_off() + * sets loaded_mm to LOADED_MM_SWITCHING before writing to CR3. + */ + if (loaded_mm != current_mm) + return false; + + VM_WARN_ON_ONCE(current_mm->pgd != __va(read_cr3_pa())); + + return true; +} + /* Initialize cr4 shadow for this CPU. */ static inline void cr4_init_shadow(void) { diff --git a/arch/x86/include/asm/vgtod.h b/arch/x86/include/asm/vgtod.h index 52250681f68c7410b30f42d9e424553aaed4f313..d92ccff4e615d5a868e5dd4e69834db1cf73420e 100644 --- a/arch/x86/include/asm/vgtod.h +++ b/arch/x86/include/asm/vgtod.h @@ -93,7 +93,7 @@ static inline unsigned int __getcpu(void) * * If RDPID is available, use it. */ - alternative_io ("lsl %[p],%[seg]", + alternative_io ("lsl %[seg],%[p]", ".byte 0xf3,0x0f,0xc7,0xf8", /* RDPID %eax/rax */ X86_FEATURE_RDPID, [p] "=a" (p), [seg] "r" (__PER_CPU_SEG)); diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c index d07addb99b7180d1aefba2c939cd421acabc1592..3e435f88621d2aa38708c1b2107b933aca62eaeb 100644 --- a/arch/x86/kernel/cpu/bugs.c +++ b/arch/x86/kernel/cpu/bugs.c @@ -652,6 +652,45 @@ EXPORT_SYMBOL_GPL(l1tf_mitigation); enum vmx_l1d_flush_state l1tf_vmx_mitigation = VMENTER_L1D_FLUSH_AUTO; EXPORT_SYMBOL_GPL(l1tf_vmx_mitigation); +/* + * These CPUs all support 44bits physical address space internally in the + * cache but CPUID can report a smaller number of physical address bits. + * + * The L1TF mitigation uses the top most address bit for the inversion of + * non present PTEs. When the installed memory reaches into the top most + * address bit due to memory holes, which has been observed on machines + * which report 36bits physical address bits and have 32G RAM installed, + * then the mitigation range check in l1tf_select_mitigation() triggers. + * This is a false positive because the mitigation is still possible due to + * the fact that the cache uses 44bit internally. Use the cache bits + * instead of the reported physical bits and adjust them on the affected + * machines to 44bit if the reported bits are less than 44. + */ +static void override_cache_bits(struct cpuinfo_x86 *c) +{ + if (c->x86 != 6) + return; + + switch (c->x86_model) { + case INTEL_FAM6_NEHALEM: + case INTEL_FAM6_WESTMERE: + case INTEL_FAM6_SANDYBRIDGE: + case INTEL_FAM6_IVYBRIDGE: + case INTEL_FAM6_HASWELL_CORE: + case INTEL_FAM6_HASWELL_ULT: + case INTEL_FAM6_HASWELL_GT3E: + case INTEL_FAM6_BROADWELL_CORE: + case INTEL_FAM6_BROADWELL_GT3E: + case INTEL_FAM6_SKYLAKE_MOBILE: + case INTEL_FAM6_SKYLAKE_DESKTOP: + case INTEL_FAM6_KABYLAKE_MOBILE: + case INTEL_FAM6_KABYLAKE_DESKTOP: + if (c->x86_cache_bits < 44) + c->x86_cache_bits = 44; + break; + } +} + static void __init l1tf_select_mitigation(void) { u64 half_pa; @@ -659,6 +698,8 @@ static void __init l1tf_select_mitigation(void) if (!boot_cpu_has_bug(X86_BUG_L1TF)) return; + override_cache_bits(&boot_cpu_data); + switch (l1tf_mitigation) { case L1TF_MITIGATION_OFF: case L1TF_MITIGATION_FLUSH_NOWARN: @@ -678,14 +719,13 @@ static void __init l1tf_select_mitigation(void) return; #endif - /* - * This is extremely unlikely to happen because almost all - * systems have far more MAX_PA/2 than RAM can be fit into - * DIMM slots. - */ half_pa = (u64)l1tf_pfn_limit() << PAGE_SHIFT; if (e820__mapped_any(half_pa, ULLONG_MAX - half_pa, E820_TYPE_RAM)) { pr_warn("System has more than MAX_PA/2 memory. L1TF mitigation not effective.\n"); + pr_info("You may make it effective by booting the kernel with mem=%llu parameter.\n", + half_pa); + pr_info("However, doing so will make a part of your RAM unusable.\n"); + pr_info("Reading https://www.kernel.org/doc/html/latest/admin-guide/l1tf.html might help you decide.\n"); return; } diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index dd02ee4fa8cd9f4340d994e941156a279d048b46..7d2a7890a823eb7cabe8c274cb331dd348172058 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c @@ -890,6 +890,7 @@ static void identify_cpu_without_cpuid(struct cpuinfo_x86 *c) } } #endif + c->x86_cache_bits = c->x86_phys_bits; } static const __initconst struct x86_cpu_id cpu_no_speculation[] = { diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c index 278be092b3009e99d4aa6bb7dc81fa4f1622e398..574dcdc092abdcfadfe834424c3e079144ecc51f 100644 --- a/arch/x86/kernel/cpu/intel.c +++ b/arch/x86/kernel/cpu/intel.c @@ -150,6 +150,9 @@ static bool bad_spectre_microcode(struct cpuinfo_x86 *c) if (cpu_has(c, X86_FEATURE_HYPERVISOR)) return false; + if (c->x86 != 6) + return false; + for (i = 0; i < ARRAY_SIZE(spectre_bad_microcodes); i++) { if (c->x86_model == spectre_bad_microcodes[i].model && c->x86_stepping == spectre_bad_microcodes[i].stepping) diff --git a/arch/x86/kernel/cpu/microcode/amd.c b/arch/x86/kernel/cpu/microcode/amd.c index 48179928ff38cf12476ce27cd096383aee1feb93..9d33dbf2489e2b6679131ecb3938aa4c382347f0 100644 --- a/arch/x86/kernel/cpu/microcode/amd.c +++ b/arch/x86/kernel/cpu/microcode/amd.c @@ -504,6 +504,7 @@ static enum ucode_state apply_microcode_amd(int cpu) struct microcode_amd *mc_amd; struct ucode_cpu_info *uci; struct ucode_patch *p; + enum ucode_state ret; u32 rev, dummy; BUG_ON(raw_smp_processor_id() != cpu); @@ -521,9 +522,8 @@ static enum ucode_state apply_microcode_amd(int cpu) /* need to apply patch? */ if (rev >= mc_amd->hdr.patch_id) { - c->microcode = rev; - uci->cpu_sig.rev = rev; - return UCODE_OK; + ret = UCODE_OK; + goto out; } if (__apply_microcode_amd(mc_amd)) { @@ -531,13 +531,21 @@ static enum ucode_state apply_microcode_amd(int cpu) cpu, mc_amd->hdr.patch_id); return UCODE_ERROR; } - pr_info("CPU%d: new patch_level=0x%08x\n", cpu, - mc_amd->hdr.patch_id); - uci->cpu_sig.rev = mc_amd->hdr.patch_id; - c->microcode = mc_amd->hdr.patch_id; + rev = mc_amd->hdr.patch_id; + ret = UCODE_UPDATED; + + pr_info("CPU%d: new patch_level=0x%08x\n", cpu, rev); - return UCODE_UPDATED; +out: + uci->cpu_sig.rev = rev; + c->microcode = rev; + + /* Update boot_cpu_data's revision too, if we're on the BSP: */ + if (c->cpu_index == boot_cpu_data.cpu_index) + boot_cpu_data.microcode = rev; + + return ret; } static int install_equiv_cpu_table(const u8 *buf) diff --git a/arch/x86/kernel/cpu/microcode/intel.c b/arch/x86/kernel/cpu/microcode/intel.c index 1c2cfa0644aa979c97cc01a42925a44c25f9f852..16936a24795c8457b789f7b428216cfc5bd1fb4e 100644 --- a/arch/x86/kernel/cpu/microcode/intel.c +++ b/arch/x86/kernel/cpu/microcode/intel.c @@ -190,8 +190,11 @@ static void save_microcode_patch(void *data, unsigned int size) p = memdup_patch(data, size); if (!p) pr_err("Error allocating buffer %p\n", data); - else + else { list_replace(&iter->plist, &p->plist); + kfree(iter->data); + kfree(iter); + } } } @@ -792,6 +795,7 @@ static enum ucode_state apply_microcode_intel(int cpu) struct ucode_cpu_info *uci = ucode_cpu_info + cpu; struct cpuinfo_x86 *c = &cpu_data(cpu); struct microcode_intel *mc; + enum ucode_state ret; static int prev_rev; u32 rev; @@ -814,9 +818,8 @@ static enum ucode_state apply_microcode_intel(int cpu) */ rev = intel_get_microcode_revision(); if (rev >= mc->hdr.rev) { - uci->cpu_sig.rev = rev; - c->microcode = rev; - return UCODE_OK; + ret = UCODE_OK; + goto out; } /* @@ -845,10 +848,17 @@ static enum ucode_state apply_microcode_intel(int cpu) prev_rev = rev; } + ret = UCODE_UPDATED; + +out: uci->cpu_sig.rev = rev; - c->microcode = rev; + c->microcode = rev; + + /* Update boot_cpu_data's revision too, if we're on the BSP: */ + if (c->cpu_index == boot_cpu_data.cpu_index) + boot_cpu_data.microcode = rev; - return UCODE_UPDATED; + return ret; } static enum ucode_state generic_load_microcode(int cpu, void *data, size_t size, diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c index a2d8a3908670993c898cd0d22cbdd6742e51b4da..224de37821e4ed74ae7d1a4a1f17f848b5584832 100644 --- a/arch/x86/kernel/dumpstack.c +++ b/arch/x86/kernel/dumpstack.c @@ -17,6 +17,7 @@ #include #include #include +#include #include #include @@ -298,7 +299,10 @@ void oops_end(unsigned long flags, struct pt_regs *regs, int signr) * We're not going to return, but we might be on an IST stack or * have very little stack space left. Rewind the stack and kill * the task. + * Before we rewind the stack, we have to tell KASAN that we're going to + * reuse the task stack and that existing poisons are invalid. */ + kasan_unpoison_task_stack(current); rewind_stack_do_exit(signr); } NOKPROBE_SYMBOL(oops_end); diff --git a/arch/x86/kernel/eisa.c b/arch/x86/kernel/eisa.c index f260e452e4f8726237618efe4b9526938d31d81d..e8c8c5d78dbdd38b1089f5dc87cf3450644fe487 100644 --- a/arch/x86/kernel/eisa.c +++ b/arch/x86/kernel/eisa.c @@ -7,11 +7,17 @@ #include #include +#include + static __init int eisa_bus_probe(void) { - void __iomem *p = ioremap(0x0FFFD9, 4); + void __iomem *p; + + if (xen_pv_domain() && !xen_initial_domain()) + return 0; - if (readl(p) == 'E' + ('I'<<8) + ('S'<<16) + ('A'<<24)) + p = ioremap(0x0FFFD9, 4); + if (p && readl(p) == 'E' + ('I' << 8) + ('S' << 16) + ('A' << 24)) EISA_bus = 1; iounmap(p); return 0; diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c index 7ba5d819ebe3b351ff4124ded8c598adfcb1a5f1..45b5c6c4a55edac132a855e47502eacb62225045 100644 --- a/arch/x86/kernel/head64.c +++ b/arch/x86/kernel/head64.c @@ -31,6 +31,7 @@ #include #include #include +#include /* * Manage page tables very early on. @@ -93,7 +94,8 @@ unsigned long __head __startup_64(unsigned long physaddr, pud[511] += load_delta; pmd = fixup_pointer(level2_fixmap_pgt, physaddr); - pmd[506] += load_delta; + for (i = FIXMAP_PMD_TOP; i > FIXMAP_PMD_TOP - FIXMAP_PMD_NUM; i--) + pmd[i] += load_delta; /* * Set up the identity mapping for the switchover. These diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S index 0f545b3cf926787bd986d763c843498540ef530d..8d59dfe629a96e8a53183cfb9cb4d867e039c520 100644 --- a/arch/x86/kernel/head_64.S +++ b/arch/x86/kernel/head_64.S @@ -24,6 +24,7 @@ #include "../entry/calling.h" #include #include +#include #ifdef CONFIG_PARAVIRT #include @@ -438,13 +439,20 @@ NEXT_PAGE(level2_kernel_pgt) KERNEL_IMAGE_SIZE/PMD_SIZE) NEXT_PAGE(level2_fixmap_pgt) - .fill 506,8,0 - .quad level1_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE_NOENC - /* 8MB reserved for vsyscalls + a 2MB hole = 4 + 1 entries */ - .fill 5,8,0 + .fill (512 - 4 - FIXMAP_PMD_NUM),8,0 + pgtno = 0 + .rept (FIXMAP_PMD_NUM) + .quad level1_fixmap_pgt + (pgtno << PAGE_SHIFT) - __START_KERNEL_map \ + + _PAGE_TABLE_NOENC; + pgtno = pgtno + 1 + .endr + /* 6 MB reserved space + a 2MB hole */ + .fill 4,8,0 NEXT_PAGE(level1_fixmap_pgt) + .rept (FIXMAP_PMD_NUM) .fill 512,8,0 + .endr #undef PMDS diff --git a/arch/x86/kernel/kexec-bzimage64.c b/arch/x86/kernel/kexec-bzimage64.c index f24cd9f1799a062a8df2c3c25bb164ba28537f89..928b0c6083c9cc5953fd5f47867ff218efaea910 100644 --- a/arch/x86/kernel/kexec-bzimage64.c +++ b/arch/x86/kernel/kexec-bzimage64.c @@ -532,7 +532,7 @@ static int bzImage64_cleanup(void *loader_data) static int bzImage64_verify_sig(const char *kernel, unsigned long kernel_len) { return verify_pefile_signature(kernel, kernel_len, - NULL, + VERIFY_USE_SECONDARY_KEYRING, VERIFYING_KEXEC_PE_SIGNATURE); } #endif diff --git a/arch/x86/kernel/kvmclock.c b/arch/x86/kernel/kvmclock.c index 5b609e28ce3f40514be95ac47cddd5a0c4d77ab0..48703d430a2fb46470deef90963947c08e7ef945 100644 --- a/arch/x86/kernel/kvmclock.c +++ b/arch/x86/kernel/kvmclock.c @@ -143,6 +143,7 @@ static unsigned long kvm_get_tsc_khz(void) src = &hv_clock[cpu].pvti; tsc_khz = pvclock_tsc_khz(src); put_cpu(); + setup_force_cpu_cap(X86_FEATURE_TSC_KNOWN_FREQ); return tsc_khz; } diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c index fa093b77689f82ee547481067ba85026ebdfd29c..cbeecfcc66d68b3ae44eaa075ba7be9e49f1063b 100644 --- a/arch/x86/kernel/process_64.c +++ b/arch/x86/kernel/process_64.c @@ -370,6 +370,7 @@ start_thread(struct pt_regs *regs, unsigned long new_ip, unsigned long new_sp) start_thread_common(regs, new_ip, new_sp, __USER_CS, __USER_DS, 0); } +EXPORT_SYMBOL_GPL(start_thread); #ifdef CONFIG_COMPAT void compat_start_thread(struct pt_regs *regs, u32 new_ip, u32 new_sp) diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index 5ebb0dbcf4f7884bf12978c5817a0524d9f1313e..30447d210f37e272a08d7af707544ed2622ebf49 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c @@ -223,6 +223,11 @@ static void notrace start_secondary(void *unused) #ifdef CONFIG_X86_32 /* switch away from the initial page table */ load_cr3(swapper_pg_dir); + /* + * Initialize the CR4 shadow before doing anything that could + * try to read it. + */ + cr4_init_shadow(); __flush_tlb_all(); #endif load_current_idt(); diff --git a/arch/x86/kernel/tsc_msr.c b/arch/x86/kernel/tsc_msr.c index 19afdbd7d0a77c8f5511cf2de616d24c5259dbae..5532d1be76879b452ca3c072de807eb4d9509b85 100644 --- a/arch/x86/kernel/tsc_msr.c +++ b/arch/x86/kernel/tsc_msr.c @@ -12,6 +12,7 @@ #include #include #include +#include #define MAX_NUM_FREQS 9 diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index 00e2ae033a0f57b3980767c2beb5388043b35f2b..1dfb808abd23f4da506b17a79a0c8713051c1dbe 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c @@ -220,6 +220,17 @@ static const u64 shadow_acc_track_saved_bits_mask = PT64_EPT_READABLE_MASK | PT64_EPT_EXECUTABLE_MASK; static const u64 shadow_acc_track_saved_bits_shift = PT64_SECOND_AVAIL_BITS_SHIFT; +/* + * This mask must be set on all non-zero Non-Present or Reserved SPTEs in order + * to guard against L1TF attacks. + */ +static u64 __read_mostly shadow_nonpresent_or_rsvd_mask; + +/* + * The number of high-order 1 bits to use in the mask above. + */ +static const u64 shadow_nonpresent_or_rsvd_mask_len = 5; + static void mmu_spte_set(u64 *sptep, u64 spte); static void mmu_free_roots(struct kvm_vcpu *vcpu); @@ -308,9 +319,13 @@ static void mark_mmio_spte(struct kvm_vcpu *vcpu, u64 *sptep, u64 gfn, { unsigned int gen = kvm_current_mmio_generation(vcpu); u64 mask = generation_mmio_spte_mask(gen); + u64 gpa = gfn << PAGE_SHIFT; access &= ACC_WRITE_MASK | ACC_USER_MASK; - mask |= shadow_mmio_value | access | gfn << PAGE_SHIFT; + mask |= shadow_mmio_value | access; + mask |= gpa | shadow_nonpresent_or_rsvd_mask; + mask |= (gpa & shadow_nonpresent_or_rsvd_mask) + << shadow_nonpresent_or_rsvd_mask_len; trace_mark_mmio_spte(sptep, gfn, access, gen); mmu_spte_set(sptep, mask); @@ -323,8 +338,14 @@ static bool is_mmio_spte(u64 spte) static gfn_t get_mmio_spte_gfn(u64 spte) { - u64 mask = generation_mmio_spte_mask(MMIO_GEN_MASK) | shadow_mmio_mask; - return (spte & ~mask) >> PAGE_SHIFT; + u64 mask = generation_mmio_spte_mask(MMIO_GEN_MASK) | shadow_mmio_mask | + shadow_nonpresent_or_rsvd_mask; + u64 gpa = spte & ~mask; + + gpa |= (spte >> shadow_nonpresent_or_rsvd_mask_len) + & shadow_nonpresent_or_rsvd_mask; + + return gpa >> PAGE_SHIFT; } static unsigned get_mmio_spte_access(u64 spte) @@ -381,7 +402,7 @@ void kvm_mmu_set_mask_ptes(u64 user_mask, u64 accessed_mask, } EXPORT_SYMBOL_GPL(kvm_mmu_set_mask_ptes); -void kvm_mmu_clear_all_pte_masks(void) +static void kvm_mmu_reset_all_pte_masks(void) { shadow_user_mask = 0; shadow_accessed_mask = 0; @@ -391,6 +412,18 @@ void kvm_mmu_clear_all_pte_masks(void) shadow_mmio_mask = 0; shadow_present_mask = 0; shadow_acc_track_mask = 0; + + /* + * If the CPU has 46 or less physical address bits, then set an + * appropriate mask to guard against L1TF attacks. Otherwise, it is + * assumed that the CPU is not vulnerable to L1TF. + */ + if (boot_cpu_data.x86_phys_bits < + 52 - shadow_nonpresent_or_rsvd_mask_len) + shadow_nonpresent_or_rsvd_mask = + rsvd_bits(boot_cpu_data.x86_phys_bits - + shadow_nonpresent_or_rsvd_mask_len, + boot_cpu_data.x86_phys_bits - 1); } static int is_cpuid_PSE36(void) @@ -5473,7 +5506,7 @@ static void mmu_destroy_caches(void) int kvm_mmu_module_init(void) { - kvm_mmu_clear_all_pte_masks(); + kvm_mmu_reset_all_pte_masks(); pte_list_desc_cache = kmem_cache_create("pte_list_desc", sizeof(struct pte_list_desc), diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index 282bbcbf3b6a999b449cf1705a27e53a16e4b480..f6bebcec60b4e507cb6a468ed2173c3d629d7b49 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c @@ -5067,8 +5067,6 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu) clgi(); - local_irq_enable(); - /* * If this vCPU has touched SPEC_CTRL, restore the guest's value if * it's non-zero. Since vmentry is serialising on affected CPUs, there @@ -5077,6 +5075,8 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu) */ x86_spec_ctrl_set_guest(svm->spec_ctrl, svm->virt_spec_ctrl); + local_irq_enable(); + asm volatile ( "push %%" _ASM_BP "; \n\t" "mov %c[rbx](%[svm]), %%" _ASM_BX " \n\t" @@ -5199,12 +5199,12 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu) if (unlikely(!msr_write_intercepted(vcpu, MSR_IA32_SPEC_CTRL))) svm->spec_ctrl = native_read_msr(MSR_IA32_SPEC_CTRL); - x86_spec_ctrl_restore_host(svm->spec_ctrl, svm->virt_spec_ctrl); - reload_tss(vcpu); local_irq_disable(); + x86_spec_ctrl_restore_host(svm->spec_ctrl, svm->virt_spec_ctrl); + vcpu->arch.cr2 = svm->vmcb->save.cr2; vcpu->arch.regs[VCPU_REGS_RAX] = svm->vmcb->save.rax; vcpu->arch.regs[VCPU_REGS_RSP] = svm->vmcb->save.rsp; diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index f015ca3997d9216c28c1db88b1e7e7cf0e6de64f..fd46d890296c9e98b472e79d55bbef8ca8c7b0f4 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -200,12 +200,14 @@ static enum vmx_l1d_flush_state __read_mostly vmentry_l1d_flush_param = VMENTER_ static const struct { const char *option; - enum vmx_l1d_flush_state cmd; + bool for_parse; } vmentry_l1d_param[] = { - {"auto", VMENTER_L1D_FLUSH_AUTO}, - {"never", VMENTER_L1D_FLUSH_NEVER}, - {"cond", VMENTER_L1D_FLUSH_COND}, - {"always", VMENTER_L1D_FLUSH_ALWAYS}, + [VMENTER_L1D_FLUSH_AUTO] = {"auto", true}, + [VMENTER_L1D_FLUSH_NEVER] = {"never", true}, + [VMENTER_L1D_FLUSH_COND] = {"cond", true}, + [VMENTER_L1D_FLUSH_ALWAYS] = {"always", true}, + [VMENTER_L1D_FLUSH_EPT_DISABLED] = {"EPT disabled", false}, + [VMENTER_L1D_FLUSH_NOT_REQUIRED] = {"not required", false}, }; #define L1D_CACHE_ORDER 4 @@ -289,8 +291,9 @@ static int vmentry_l1d_flush_parse(const char *s) if (s) { for (i = 0; i < ARRAY_SIZE(vmentry_l1d_param); i++) { - if (sysfs_streq(s, vmentry_l1d_param[i].option)) - return vmentry_l1d_param[i].cmd; + if (vmentry_l1d_param[i].for_parse && + sysfs_streq(s, vmentry_l1d_param[i].option)) + return i; } } return -EINVAL; @@ -300,13 +303,13 @@ static int vmentry_l1d_flush_set(const char *s, const struct kernel_param *kp) { int l1tf, ret; - if (!boot_cpu_has(X86_BUG_L1TF)) - return 0; - l1tf = vmentry_l1d_flush_parse(s); if (l1tf < 0) return l1tf; + if (!boot_cpu_has(X86_BUG_L1TF)) + return 0; + /* * Has vmx_init() run already? If not then this is the pre init * parameter parsing. In that case just store the value and let @@ -326,6 +329,9 @@ static int vmentry_l1d_flush_set(const char *s, const struct kernel_param *kp) static int vmentry_l1d_flush_get(char *s, const struct kernel_param *kp) { + if (WARN_ON_ONCE(l1tf_vmx_mitigation >= ARRAY_SIZE(vmentry_l1d_param))) + return sprintf(s, "???\n"); + return sprintf(s, "%s\n", vmentry_l1d_param[l1tf_vmx_mitigation].option); } @@ -743,17 +749,21 @@ struct vcpu_vmx { /* * loaded_vmcs points to the VMCS currently used in this vcpu. For a * non-nested (L1) guest, it always points to vmcs01. For a nested - * guest (L2), it points to a different VMCS. + * guest (L2), it points to a different VMCS. loaded_cpu_state points + * to the VMCS whose state is loaded into the CPU registers that only + * need to be switched when transitioning to/from the kernel; a NULL + * value indicates that host state is loaded. */ struct loaded_vmcs vmcs01; struct loaded_vmcs *loaded_vmcs; + struct loaded_vmcs *loaded_cpu_state; bool __launched; /* temporary, used in vmx_vcpu_run */ struct msr_autoload { struct vmx_msrs guest; struct vmx_msrs host; } msr_autoload; + struct { - int loaded; u16 fs_sel, gs_sel, ldt_sel; #ifdef CONFIG_X86_64 u16 ds_sel, es_sel; @@ -2330,10 +2340,11 @@ static void vmx_save_host_state(struct kvm_vcpu *vcpu) struct vcpu_vmx *vmx = to_vmx(vcpu); int i; - if (vmx->host_state.loaded) + if (vmx->loaded_cpu_state) return; - vmx->host_state.loaded = 1; + vmx->loaded_cpu_state = vmx->loaded_vmcs; + /* * Set host fs and gs selectors. Unfortunately, 22.2.3 does not * allow segment selectors with cpl > 0 or ti == 1. @@ -2384,11 +2395,14 @@ static void vmx_save_host_state(struct kvm_vcpu *vcpu) static void __vmx_load_host_state(struct vcpu_vmx *vmx) { - if (!vmx->host_state.loaded) + if (!vmx->loaded_cpu_state) return; + WARN_ON_ONCE(vmx->loaded_cpu_state != vmx->loaded_vmcs); + ++vmx->vcpu.stat.host_state_reload; - vmx->host_state.loaded = 0; + vmx->loaded_cpu_state = NULL; + #ifdef CONFIG_X86_64 if (is_long_mode(&vmx->vcpu)) rdmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base); @@ -6951,8 +6965,8 @@ static int handle_ept_misconfig(struct kvm_vcpu *vcpu) if (!static_cpu_has(X86_FEATURE_HYPERVISOR)) return kvm_skip_emulated_instruction(vcpu); else - return x86_emulate_instruction(vcpu, gpa, EMULTYPE_SKIP, - NULL, 0) == EMULATE_DONE; + return emulate_instruction(vcpu, EMULTYPE_SKIP) == + EMULATE_DONE; } ret = kvm_mmu_page_fault(vcpu, gpa, PFERR_RSVD_MASK, NULL, 0); @@ -7576,7 +7590,7 @@ static int handle_vmon(struct kvm_vcpu *vcpu) /* CPL=0 must be checked manually. */ if (vmx_get_cpl(vcpu)) { - kvm_queue_exception(vcpu, UD_VECTOR); + kvm_inject_gp(vcpu, 0); return 1; } @@ -7640,7 +7654,7 @@ static int handle_vmon(struct kvm_vcpu *vcpu) static int nested_vmx_check_permission(struct kvm_vcpu *vcpu) { if (vmx_get_cpl(vcpu)) { - kvm_queue_exception(vcpu, UD_VECTOR); + kvm_inject_gp(vcpu, 0); return 0; } @@ -8108,21 +8122,20 @@ static int handle_vmptrld(struct kvm_vcpu *vcpu) /* Emulate the VMPTRST instruction */ static int handle_vmptrst(struct kvm_vcpu *vcpu) { - unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION); - u32 vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO); - gva_t vmcs_gva; + unsigned long exit_qual = vmcs_readl(EXIT_QUALIFICATION); + u32 instr_info = vmcs_read32(VMX_INSTRUCTION_INFO); + gpa_t current_vmptr = to_vmx(vcpu)->nested.current_vmptr; struct x86_exception e; + gva_t gva; if (!nested_vmx_check_permission(vcpu)) return 1; - if (get_vmx_mem_address(vcpu, exit_qualification, - vmx_instruction_info, true, &vmcs_gva)) + if (get_vmx_mem_address(vcpu, exit_qual, instr_info, true, &gva)) return 1; /* *_system ok, nested_vmx_check_permission has verified cpl=0 */ - if (kvm_write_guest_virt_system(vcpu, vmcs_gva, - (void *)&to_vmx(vcpu)->nested.current_vmptr, - sizeof(u64), &e)) { + if (kvm_write_guest_virt_system(vcpu, gva, (void *)¤t_vmptr, + sizeof(gpa_t), &e)) { kvm_inject_page_fault(vcpu, &e); return 1; } @@ -9171,9 +9184,6 @@ static int vmx_handle_exit(struct kvm_vcpu *vcpu) * information but as all relevant affected CPUs have 32KiB L1D cache size * there is no point in doing so. */ -#define L1D_CACHE_ORDER 4 -static void *vmx_l1d_flush_pages; - static void vmx_l1d_flush(struct kvm_vcpu *vcpu) { int size = PAGE_SIZE << L1D_CACHE_ORDER; @@ -9942,8 +9952,8 @@ static void vmx_switch_vmcs(struct kvm_vcpu *vcpu, struct loaded_vmcs *vmcs) return; cpu = get_cpu(); - vmx->loaded_vmcs = vmcs; vmx_vcpu_put(vcpu); + vmx->loaded_vmcs = vmcs; vmx_vcpu_load(vcpu, cpu); vcpu->cpu = cpu; put_cpu(); diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 13b30aa2f78e10f7610f55bc3c2eaeae544543ba..407658146ae164260431c16c9f5f16679da2d21b 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -6194,20 +6194,22 @@ static void kvm_set_mmio_spte_mask(void) * Set the reserved bits and the present bit of an paging-structure * entry to generate page fault with PFER.RSV = 1. */ - /* Mask the reserved physical address bits. */ - mask = rsvd_bits(maxphyaddr, 51); + + /* + * Mask the uppermost physical address bit, which would be reserved as + * long as the supported physical address width is less than 52. + */ + mask = 1ull << 51; /* Set the present bit. */ mask |= 1ull; -#ifdef CONFIG_X86_64 /* * If reserved bit is not supported, clear the present bit to disable * mmio page fault. */ - if (maxphyaddr == 52) + if (IS_ENABLED(CONFIG_X86_64) && maxphyaddr == 52) mask &= ~1ull; -#endif kvm_mmu_set_mmio_spte_mask(mask, mask); } diff --git a/arch/x86/lib/usercopy.c b/arch/x86/lib/usercopy.c index c8c6ad0d58b89c3621d0fcf11f45e00442ebf2a2..3f435d7fca5e62bc999e48c6a4f3e1c0fc86def9 100644 --- a/arch/x86/lib/usercopy.c +++ b/arch/x86/lib/usercopy.c @@ -7,6 +7,8 @@ #include #include +#include + /* * We rely on the nested NMI work to allow atomic faults from the NMI path; the * nested NMI paths are careful to preserve CR2. @@ -19,6 +21,9 @@ copy_from_user_nmi(void *to, const void __user *from, unsigned long n) if (__range_not_ok(from, n, TASK_SIZE)) return n; + if (!nmi_uaccess_okay()) + return n; + /* * Even though this function is typically called from NMI/IRQ context * disable pagefaults so that its behaviour is consistent even when diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c index c2faff548f59a575024a62e4291bf567a8939dc3..794c35c4ca7360021a50c2a04d185eedbc577d52 100644 --- a/arch/x86/mm/fault.c +++ b/arch/x86/mm/fault.c @@ -317,8 +317,6 @@ static noinline int vmalloc_fault(unsigned long address) if (!(address >= VMALLOC_START && address < VMALLOC_END)) return -1; - WARN_ON_ONCE(in_nmi()); - /* * Synchronize this task's top level page-table * with the 'reference' page table. diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c index 37f60dfd7e4efbeb5f01ccceaaacef6b98537bf6..94b8d90830d10c65ab16c1993cfb4d6f054fb628 100644 --- a/arch/x86/mm/init.c +++ b/arch/x86/mm/init.c @@ -892,7 +892,7 @@ unsigned long max_swapfile_size(void) if (boot_cpu_has_bug(X86_BUG_L1TF)) { /* Limit the swap file size to MAX_PA/2 for L1TF workaround */ - unsigned long l1tf_limit = l1tf_pfn_limit() + 1; + unsigned long long l1tf_limit = l1tf_pfn_limit(); /* * We encode swap offsets also with 3 bits below those for pfn * which makes the usable limit higher. @@ -900,7 +900,7 @@ unsigned long max_swapfile_size(void) #if CONFIG_PGTABLE_LEVELS > 2 l1tf_limit <<= PAGE_SHIFT - SWP_OFFSET_FIRST_BIT; #endif - pages = min_t(unsigned long, l1tf_limit, pages); + pages = min_t(unsigned long long, l1tf_limit, pages); } return pages; } diff --git a/arch/x86/mm/mmap.c b/arch/x86/mm/mmap.c index 5f4805d69aab2af70a9dcbe39877096cb9807fca..53f1c18b15bd361fa19fb75341a9e338b5301a3f 100644 --- a/arch/x86/mm/mmap.c +++ b/arch/x86/mm/mmap.c @@ -191,7 +191,7 @@ bool pfn_modify_allowed(unsigned long pfn, pgprot_t prot) /* If it's real memory always allow */ if (pfn_valid(pfn)) return true; - if (pfn > l1tf_pfn_limit() && !capable(CAP_SYS_ADMIN)) + if (pfn >= l1tf_pfn_limit() && !capable(CAP_SYS_ADMIN)) return false; return true; } diff --git a/arch/x86/mm/numa_emulation.c b/arch/x86/mm/numa_emulation.c index 34a2a3bfde9c144623f4bfafbeb32404e4e3a21d..22cbad56acab8a698a0b0523745ea0be020cd0fc 100644 --- a/arch/x86/mm/numa_emulation.c +++ b/arch/x86/mm/numa_emulation.c @@ -61,7 +61,7 @@ static int __init emu_setup_memblk(struct numa_meminfo *ei, eb->nid = nid; if (emu_nid_to_phys[nid] == NUMA_NO_NODE) - emu_nid_to_phys[nid] = nid; + emu_nid_to_phys[nid] = pb->nid; pb->start += size; if (pb->start >= pb->end) { diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c index 2bdb8e8a9d7c85509d3ed2a70630e7a7c353c0eb..aafd4edfa2ac6416b4332fa171658dcf4428d85b 100644 --- a/arch/x86/mm/pgtable.c +++ b/arch/x86/mm/pgtable.c @@ -573,6 +573,15 @@ void __native_set_fixmap(enum fixed_addresses idx, pte_t pte) { unsigned long address = __fix_to_virt(idx); +#ifdef CONFIG_X86_64 + /* + * Ensure that the static initial page tables are covering the + * fixmap completely. + */ + BUILD_BUG_ON(__end_of_permanent_fixed_addresses > + (FIXMAP_PMD_NUM * PTRS_PER_PTE)); +#endif + if (idx >= __end_of_fixed_addresses) { BUG(); return; diff --git a/arch/x86/mm/pti.c b/arch/x86/mm/pti.c index d6f11accd37a0590a7eb3ff22a8aa4ecedb7670b..60c48f5d6b0eede5ed2456a8196fc7d9e8187afe 100644 --- a/arch/x86/mm/pti.c +++ b/arch/x86/mm/pti.c @@ -162,7 +162,7 @@ static __init p4d_t *pti_user_pagetable_walk_p4d(unsigned long address) if (pgd_none(*pgd)) { unsigned long new_p4d_page = __get_free_page(gfp); - if (!new_p4d_page) + if (WARN_ON_ONCE(!new_p4d_page)) return NULL; set_pgd(pgd, __pgd(_KERNPG_TABLE | __pa(new_p4d_page))); @@ -181,13 +181,17 @@ static __init p4d_t *pti_user_pagetable_walk_p4d(unsigned long address) static __init pmd_t *pti_user_pagetable_walk_pmd(unsigned long address) { gfp_t gfp = (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO); - p4d_t *p4d = pti_user_pagetable_walk_p4d(address); + p4d_t *p4d; pud_t *pud; + p4d = pti_user_pagetable_walk_p4d(address); + if (!p4d) + return NULL; + BUILD_BUG_ON(p4d_large(*p4d) != 0); if (p4d_none(*p4d)) { unsigned long new_pud_page = __get_free_page(gfp); - if (!new_pud_page) + if (WARN_ON_ONCE(!new_pud_page)) return NULL; set_p4d(p4d, __p4d(_KERNPG_TABLE | __pa(new_pud_page))); @@ -201,7 +205,7 @@ static __init pmd_t *pti_user_pagetable_walk_pmd(unsigned long address) } if (pud_none(*pud)) { unsigned long new_pmd_page = __get_free_page(gfp); - if (!new_pmd_page) + if (WARN_ON_ONCE(!new_pmd_page)) return NULL; set_pud(pud, __pud(_KERNPG_TABLE | __pa(new_pmd_page))); @@ -220,12 +224,16 @@ static __init pmd_t *pti_user_pagetable_walk_pmd(unsigned long address) * * Returns a pointer to a PTE on success, or NULL on failure. */ -static __init pte_t *pti_user_pagetable_walk_pte(unsigned long address) +static pte_t *pti_user_pagetable_walk_pte(unsigned long address) { gfp_t gfp = (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO); - pmd_t *pmd = pti_user_pagetable_walk_pmd(address); + pmd_t *pmd; pte_t *pte; + pmd = pti_user_pagetable_walk_pmd(address); + if (!pmd) + return NULL; + /* We can't do anything sensible if we hit a large mapping. */ if (pmd_large(*pmd)) { WARN_ON(1); @@ -283,6 +291,10 @@ pti_clone_pmds(unsigned long start, unsigned long end, pmdval_t clear) p4d_t *p4d; pud_t *pud; + /* Overflow check */ + if (addr < start) + break; + pgd = pgd_offset_k(addr); if (WARN_ON(pgd_none(*pgd))) return; @@ -319,6 +331,9 @@ static void __init pti_clone_p4d(unsigned long addr) pgd_t *kernel_pgd; user_p4d = pti_user_pagetable_walk_p4d(addr); + if (!user_p4d) + return; + kernel_pgd = pgd_offset_k(addr); kernel_p4d = p4d_offset(kernel_pgd, addr); *user_p4d = *kernel_p4d; diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c index 0c936435ea9398727da162fd76c5ee4dc4a91892..83a3f4c935fc5ff00d6d408d8e709b92a86722fb 100644 --- a/arch/x86/mm/tlb.c +++ b/arch/x86/mm/tlb.c @@ -292,6 +292,10 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next, choose_new_asid(next, next_tlb_gen, &new_asid, &need_flush); + /* Let nmi_uaccess_okay() know that we're changing CR3. */ + this_cpu_write(cpu_tlbstate.loaded_mm, LOADED_MM_SWITCHING); + barrier(); + if (need_flush) { this_cpu_write(cpu_tlbstate.ctxs[new_asid].ctx_id, next->context.ctx_id); this_cpu_write(cpu_tlbstate.ctxs[new_asid].tlb_gen, next_tlb_gen); @@ -322,6 +326,9 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next, if (next != &init_mm) this_cpu_write(cpu_tlbstate.last_ctx_id, next->context.ctx_id); + /* Make sure we write CR3 before loaded_mm. */ + barrier(); + this_cpu_write(cpu_tlbstate.loaded_mm, next); this_cpu_write(cpu_tlbstate.loaded_mm_asid, new_asid); } diff --git a/arch/x86/xen/mmu_pv.c b/arch/x86/xen/mmu_pv.c index b3526a98a5a51151f95d26932fe612a79373af7c..8ed11a5b1a9d8b2618ec43daeb264e1245205dce 100644 --- a/arch/x86/xen/mmu_pv.c +++ b/arch/x86/xen/mmu_pv.c @@ -425,14 +425,13 @@ static void xen_set_pud(pud_t *ptr, pud_t val) static void xen_set_pte_atomic(pte_t *ptep, pte_t pte) { trace_xen_mmu_set_pte_atomic(ptep, pte); - set_64bit((u64 *)ptep, native_pte_val(pte)); + __xen_set_pte(ptep, pte); } static void xen_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) { trace_xen_mmu_pte_clear(mm, addr, ptep); - if (!xen_batched_set_pte(ptep, native_make_pte(0))) - native_pte_clear(mm, addr, ptep); + __xen_set_pte(ptep, native_make_pte(0)); } static void xen_pmd_clear(pmd_t *pmdp) @@ -1543,7 +1542,7 @@ static void __init xen_set_pte_init(pte_t *ptep, pte_t pte) pte = __pte_ma(((pte_val_ma(*ptep) & _PAGE_RW) | ~_PAGE_RW) & pte_val_ma(pte)); #endif - native_set_pte(ptep, pte); + __xen_set_pte(ptep, pte); } /* Early in boot, while setting up the initial pagetable, assume @@ -1880,7 +1879,7 @@ void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn) /* L3_k[511] -> level2_fixmap_pgt */ convert_pfn_mfn(level3_kernel_pgt); - /* L3_k[511][506] -> level1_fixmap_pgt */ + /* L3_k[511][508-FIXMAP_PMD_NUM ... 507] -> level1_fixmap_pgt */ convert_pfn_mfn(level2_fixmap_pgt); /* We get [511][511] and have Xen's version of level2_kernel_pgt */ @@ -1925,7 +1924,11 @@ void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn) set_page_prot(level2_ident_pgt, PAGE_KERNEL_RO); set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO); set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO); - set_page_prot(level1_fixmap_pgt, PAGE_KERNEL_RO); + + for (i = 0; i < FIXMAP_PMD_NUM; i++) { + set_page_prot(level1_fixmap_pgt + i * PTRS_PER_PTE, + PAGE_KERNEL_RO); + } /* Pin down new L4 */ pin_pagetable_pfn(MMUEXT_PIN_L4_TABLE, diff --git a/arch/x86/xen/pmu.c b/arch/x86/xen/pmu.c index 7d00d4ad44d44d62f9a6a089a2c8d4a649e6aa08..95997e6c06960073c75b713cb2be76ec74c0886c 100644 --- a/arch/x86/xen/pmu.c +++ b/arch/x86/xen/pmu.c @@ -478,7 +478,7 @@ static void xen_convert_regs(const struct xen_pmu_regs *xen_regs, irqreturn_t xen_pmu_irq_handler(int irq, void *dev_id) { int err, ret = IRQ_NONE; - struct pt_regs regs; + struct pt_regs regs = {0}; const struct xen_pmu_data *xenpmu_data = get_xenpmu_data(); uint8_t xenpmu_flags = get_xenpmu_flags(); diff --git a/arch/xtensa/include/asm/cacheasm.h b/arch/xtensa/include/asm/cacheasm.h index 2041abb10a2352a4b1f4323f3e1f0d9bdfff36ec..34545ecfdd6b7332df7fb486930f1f35c5532640 100644 --- a/arch/xtensa/include/asm/cacheasm.h +++ b/arch/xtensa/include/asm/cacheasm.h @@ -31,16 +31,32 @@ * */ - .macro __loop_cache_all ar at insn size line_width - movi \ar, 0 + .macro __loop_cache_unroll ar at insn size line_width max_immed + + .if (1 << (\line_width)) > (\max_immed) + .set _reps, 1 + .elseif (2 << (\line_width)) > (\max_immed) + .set _reps, 2 + .else + .set _reps, 4 + .endif + + __loopi \ar, \at, \size, (_reps << (\line_width)) + .set _index, 0 + .rep _reps + \insn \ar, _index << (\line_width) + .set _index, _index + 1 + .endr + __endla \ar, \at, _reps << (\line_width) + + .endm + - __loopi \ar, \at, \size, (4 << (\line_width)) - \insn \ar, 0 << (\line_width) - \insn \ar, 1 << (\line_width) - \insn \ar, 2 << (\line_width) - \insn \ar, 3 << (\line_width) - __endla \ar, \at, 4 << (\line_width) + .macro __loop_cache_all ar at insn size line_width max_immed + + movi \ar, 0 + __loop_cache_unroll \ar, \at, \insn, \size, \line_width, \max_immed .endm @@ -57,14 +73,9 @@ .endm - .macro __loop_cache_page ar at insn line_width + .macro __loop_cache_page ar at insn line_width max_immed - __loopi \ar, \at, PAGE_SIZE, 4 << (\line_width) - \insn \ar, 0 << (\line_width) - \insn \ar, 1 << (\line_width) - \insn \ar, 2 << (\line_width) - \insn \ar, 3 << (\line_width) - __endla \ar, \at, 4 << (\line_width) + __loop_cache_unroll \ar, \at, \insn, PAGE_SIZE, \line_width, \max_immed .endm @@ -72,7 +83,8 @@ .macro ___unlock_dcache_all ar at #if XCHAL_DCACHE_LINE_LOCKABLE && XCHAL_DCACHE_SIZE - __loop_cache_all \ar \at diu XCHAL_DCACHE_SIZE XCHAL_DCACHE_LINEWIDTH + __loop_cache_all \ar \at diu XCHAL_DCACHE_SIZE \ + XCHAL_DCACHE_LINEWIDTH 240 #endif .endm @@ -81,7 +93,8 @@ .macro ___unlock_icache_all ar at #if XCHAL_ICACHE_LINE_LOCKABLE && XCHAL_ICACHE_SIZE - __loop_cache_all \ar \at iiu XCHAL_ICACHE_SIZE XCHAL_ICACHE_LINEWIDTH + __loop_cache_all \ar \at iiu XCHAL_ICACHE_SIZE \ + XCHAL_ICACHE_LINEWIDTH 240 #endif .endm @@ -90,7 +103,8 @@ .macro ___flush_invalidate_dcache_all ar at #if XCHAL_DCACHE_SIZE - __loop_cache_all \ar \at diwbi XCHAL_DCACHE_SIZE XCHAL_DCACHE_LINEWIDTH + __loop_cache_all \ar \at diwbi XCHAL_DCACHE_SIZE \ + XCHAL_DCACHE_LINEWIDTH 240 #endif .endm @@ -99,7 +113,8 @@ .macro ___flush_dcache_all ar at #if XCHAL_DCACHE_SIZE - __loop_cache_all \ar \at diwb XCHAL_DCACHE_SIZE XCHAL_DCACHE_LINEWIDTH + __loop_cache_all \ar \at diwb XCHAL_DCACHE_SIZE \ + XCHAL_DCACHE_LINEWIDTH 240 #endif .endm @@ -108,8 +123,8 @@ .macro ___invalidate_dcache_all ar at #if XCHAL_DCACHE_SIZE - __loop_cache_all \ar \at dii __stringify(DCACHE_WAY_SIZE) \ - XCHAL_DCACHE_LINEWIDTH + __loop_cache_all \ar \at dii XCHAL_DCACHE_SIZE \ + XCHAL_DCACHE_LINEWIDTH 1020 #endif .endm @@ -118,8 +133,8 @@ .macro ___invalidate_icache_all ar at #if XCHAL_ICACHE_SIZE - __loop_cache_all \ar \at iii __stringify(ICACHE_WAY_SIZE) \ - XCHAL_ICACHE_LINEWIDTH + __loop_cache_all \ar \at iii XCHAL_ICACHE_SIZE \ + XCHAL_ICACHE_LINEWIDTH 1020 #endif .endm @@ -166,7 +181,7 @@ .macro ___flush_invalidate_dcache_page ar as #if XCHAL_DCACHE_SIZE - __loop_cache_page \ar \as dhwbi XCHAL_DCACHE_LINEWIDTH + __loop_cache_page \ar \as dhwbi XCHAL_DCACHE_LINEWIDTH 1020 #endif .endm @@ -175,7 +190,7 @@ .macro ___flush_dcache_page ar as #if XCHAL_DCACHE_SIZE - __loop_cache_page \ar \as dhwb XCHAL_DCACHE_LINEWIDTH + __loop_cache_page \ar \as dhwb XCHAL_DCACHE_LINEWIDTH 1020 #endif .endm @@ -184,7 +199,7 @@ .macro ___invalidate_dcache_page ar as #if XCHAL_DCACHE_SIZE - __loop_cache_page \ar \as dhi XCHAL_DCACHE_LINEWIDTH + __loop_cache_page \ar \as dhi XCHAL_DCACHE_LINEWIDTH 1020 #endif .endm @@ -193,7 +208,7 @@ .macro ___invalidate_icache_page ar as #if XCHAL_ICACHE_SIZE - __loop_cache_page \ar \as ihi XCHAL_ICACHE_LINEWIDTH + __loop_cache_page \ar \as ihi XCHAL_ICACHE_LINEWIDTH 1020 #endif .endm diff --git a/arch/xtensa/platforms/iss/setup.c b/arch/xtensa/platforms/iss/setup.c index f4bbb28026f8be3006890fe236989db791c79e1d..58709e89a8ed1f41ed7acaad204d4d91e973386a 100644 --- a/arch/xtensa/platforms/iss/setup.c +++ b/arch/xtensa/platforms/iss/setup.c @@ -78,23 +78,28 @@ static struct notifier_block iss_panic_block = { void __init platform_setup(char **p_cmdline) { + static void *argv[COMMAND_LINE_SIZE / sizeof(void *)] __initdata; + static char cmdline[COMMAND_LINE_SIZE] __initdata; int argc = simc_argc(); int argv_size = simc_argv_size(); if (argc > 1) { - void **argv = alloc_bootmem(argv_size); - char *cmdline = alloc_bootmem(argv_size); - int i; + if (argv_size > sizeof(argv)) { + pr_err("%s: command line too long: argv_size = %d\n", + __func__, argv_size); + } else { + int i; - cmdline[0] = 0; - simc_argv((void *)argv); + cmdline[0] = 0; + simc_argv((void *)argv); - for (i = 1; i < argc; ++i) { - if (i > 1) - strcat(cmdline, " "); - strcat(cmdline, argv[i]); + for (i = 1; i < argc; ++i) { + if (i > 1) + strcat(cmdline, " "); + strcat(cmdline, argv[i]); + } + *p_cmdline = cmdline; } - *p_cmdline = cmdline; } atomic_notifier_chain_register(&panic_notifier_list, &iss_panic_block); diff --git a/block/bfq-cgroup.c b/block/bfq-cgroup.c index 5d53e504acae58d02fa3d8c0b5cef4b625e758aa..afbbe5750a1f85b635e7cd9a903314c0729c26d4 100644 --- a/block/bfq-cgroup.c +++ b/block/bfq-cgroup.c @@ -224,9 +224,9 @@ static void bfqg_and_blkg_get(struct bfq_group *bfqg) void bfqg_and_blkg_put(struct bfq_group *bfqg) { - bfqg_put(bfqg); - blkg_put(bfqg_to_blkg(bfqg)); + + bfqg_put(bfqg); } void bfqg_stats_update_io_add(struct bfq_group *bfqg, struct bfq_queue *bfqq, @@ -887,7 +887,8 @@ static ssize_t bfq_io_set_weight(struct kernfs_open_file *of, if (ret) return ret; - return bfq_io_set_weight_legacy(of_css(of), NULL, weight); + ret = bfq_io_set_weight_legacy(of_css(of), NULL, weight); + return ret ?: nbytes; } static int bfqg_print_stat(struct seq_file *sf, void *v) diff --git a/block/bio.c b/block/bio.c index 194d28cdc642b00314567093ab57daa0b5c744c8..2e5d881423b8216e76bc1d06dd9d2ec78549faf5 100644 --- a/block/bio.c +++ b/block/bio.c @@ -156,7 +156,7 @@ out: unsigned int bvec_nr_vecs(unsigned short idx) { - return bvec_slabs[idx].nr_vecs; + return bvec_slabs[--idx].nr_vecs; } void bvec_free(mempool_t *pool, struct bio_vec *bv, unsigned int idx) diff --git a/block/blk-core.c b/block/blk-core.c index 81ab56cf7efa329eeb3562203838670c32fc2e3a..f005077ae2911a34658a9194983eb42616204b44 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -672,9 +672,13 @@ void blk_cleanup_queue(struct request_queue *q) * make sure all in-progress dispatch are completed because * blk_freeze_queue() can only complete all requests, and * dispatch may still be in-progress since we dispatch requests - * from more than one contexts + * from more than one contexts. + * + * No need to quiesce queue if it isn't initialized yet since + * blk_freeze_queue() should be enough for cases of passthrough + * request. */ - if (q->mq_ops) + if (q->mq_ops && blk_queue_init_done(q)) blk_mq_quiesce_queue(q); /* for synchronous bio-based driver finish in-flight integrity i/o */ @@ -1038,6 +1042,7 @@ out_exit_flush_rq: q->exit_rq_fn(q, q->fq->flush_rq); out_free_flush_queue: blk_free_flush_queue(q->fq); + q->fq = NULL; return -ENOMEM; } EXPORT_SYMBOL(blk_init_allocated_queue); @@ -3463,9 +3468,11 @@ EXPORT_SYMBOL(blk_finish_plug); */ void blk_pm_runtime_init(struct request_queue *q, struct device *dev) { - /* not support for RQF_PM and ->rpm_status in blk-mq yet */ - if (q->mq_ops) + /* Don't enable runtime PM for blk-mq until it is ready */ + if (q->mq_ops) { + pm_runtime_disable(dev); return; + } q->dev = dev; q->rpm_status = RPM_ACTIVE; diff --git a/block/blk-mq-sched.c b/block/blk-mq-sched.c index eca011fdfa0ede407443c2de9c1e7d45787f4313..ae5d8f10a27c59a957911dd66fa9b68a7fc5c6e6 100644 --- a/block/blk-mq-sched.c +++ b/block/blk-mq-sched.c @@ -236,7 +236,8 @@ bool __blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio) return e->type->ops.mq.bio_merge(hctx, bio); } - if (hctx->flags & BLK_MQ_F_SHOULD_MERGE) { + if ((hctx->flags & BLK_MQ_F_SHOULD_MERGE) && + !list_empty_careful(&ctx->rq_list)) { /* default per sw-queue merge */ spin_lock(&ctx->lock); ret = blk_mq_attempt_merge(q, ctx, bio); diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c index 6714507aa6c75b716d34a53c708952bf5d0ae619..3d2ab65d2dd15a012d3ff1efb71ad5b5aca6ddd5 100644 --- a/block/blk-mq-tag.c +++ b/block/blk-mq-tag.c @@ -416,8 +416,6 @@ int blk_mq_tag_update_depth(struct blk_mq_hw_ctx *hctx, if (tdepth <= tags->nr_reserved_tags) return -EINVAL; - tdepth -= tags->nr_reserved_tags; - /* * If we are allowed to grow beyond the original size, allocate * a new set of tags before freeing the old one. @@ -437,7 +435,8 @@ int blk_mq_tag_update_depth(struct blk_mq_hw_ctx *hctx, if (tdepth > 16 * BLKDEV_MAX_RQ) return -EINVAL; - new = blk_mq_alloc_rq_map(set, hctx->queue_num, tdepth, 0); + new = blk_mq_alloc_rq_map(set, hctx->queue_num, tdepth, + tags->nr_reserved_tags); if (!new) return -ENOMEM; ret = blk_mq_alloc_rqs(set, new, hctx->queue_num, tdepth); @@ -454,7 +453,8 @@ int blk_mq_tag_update_depth(struct blk_mq_hw_ctx *hctx, * Don't need (or can't) update reserved tags here, they * remain static and should never need resizing. */ - sbitmap_queue_resize(&tags->bitmap_tags, tdepth); + sbitmap_queue_resize(&tags->bitmap_tags, + tdepth - tags->nr_reserved_tags); } return 0; diff --git a/block/blk-settings.c b/block/blk-settings.c index 8559e9563c5255f114309e95a9d09b71550d0af2..474b0b95fcd16e0069639bfc264637a601703a99 100644 --- a/block/blk-settings.c +++ b/block/blk-settings.c @@ -128,7 +128,7 @@ void blk_set_stacking_limits(struct queue_limits *lim) /* Inherit limits from component devices */ lim->max_segments = USHRT_MAX; - lim->max_discard_segments = 1; + lim->max_discard_segments = USHRT_MAX; lim->max_hw_sectors = UINT_MAX; lim->max_segment_size = UINT_MAX; lim->max_sectors = UINT_MAX; diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c index 9f342ef1ad426fa7d60e00fffeb313409cbbb3f4..9c4f1c496c90c9af5d59bb94be5c24fe48c1debb 100644 --- a/block/cfq-iosched.c +++ b/block/cfq-iosched.c @@ -4741,12 +4741,13 @@ USEC_SHOW_FUNCTION(cfq_target_latency_us_show, cfqd->cfq_target_latency); static ssize_t __FUNC(struct elevator_queue *e, const char *page, size_t count) \ { \ struct cfq_data *cfqd = e->elevator_data; \ - unsigned int __data; \ + unsigned int __data, __min = (MIN), __max = (MAX); \ + \ cfq_var_store(&__data, (page)); \ - if (__data < (MIN)) \ - __data = (MIN); \ - else if (__data > (MAX)) \ - __data = (MAX); \ + if (__data < __min) \ + __data = __min; \ + else if (__data > __max) \ + __data = __max; \ if (__CONV) \ *(__PTR) = (u64)__data * NSEC_PER_MSEC; \ else \ @@ -4775,12 +4776,13 @@ STORE_FUNCTION(cfq_target_latency_store, &cfqd->cfq_target_latency, 1, UINT_MAX, static ssize_t __FUNC(struct elevator_queue *e, const char *page, size_t count) \ { \ struct cfq_data *cfqd = e->elevator_data; \ - unsigned int __data; \ + unsigned int __data, __min = (MIN), __max = (MAX); \ + \ cfq_var_store(&__data, (page)); \ - if (__data < (MIN)) \ - __data = (MIN); \ - else if (__data > (MAX)) \ - __data = (MAX); \ + if (__data < __min) \ + __data = __min; \ + else if (__data > __max) \ + __data = __max; \ *(__PTR) = (u64)__data * NSEC_PER_USEC; \ return count; \ } diff --git a/block/partitions/aix.c b/block/partitions/aix.c index 007f95eea0e1a9b6b79751ce6f109d151daa12f4..903f3ed175d0263344fe33ecbe19dfaeffcb202e 100644 --- a/block/partitions/aix.c +++ b/block/partitions/aix.c @@ -178,7 +178,7 @@ int aix_partition(struct parsed_partitions *state) u32 vgda_sector = 0; u32 vgda_len = 0; int numlvs = 0; - struct pvd *pvd; + struct pvd *pvd = NULL; struct lv_info { unsigned short pps_per_lv; unsigned short pps_found; @@ -232,10 +232,11 @@ int aix_partition(struct parsed_partitions *state) if (lvip[i].pps_per_lv) foundlvs += 1; } + /* pvd loops depend on n[].name and lvip[].pps_per_lv */ + pvd = alloc_pvd(state, vgda_sector + 17); } put_dev_sector(sect); } - pvd = alloc_pvd(state, vgda_sector + 17); if (pvd) { int numpps = be16_to_cpu(pvd->pp_count); int psn_part1 = be32_to_cpu(pvd->psn_part1); @@ -282,10 +283,14 @@ int aix_partition(struct parsed_partitions *state) next_lp_ix += 1; } for (i = 0; i < state->limit; i += 1) - if (lvip[i].pps_found && !lvip[i].lv_is_contiguous) + if (lvip[i].pps_found && !lvip[i].lv_is_contiguous) { + char tmp[sizeof(n[i].name) + 1]; // null char + + snprintf(tmp, sizeof(tmp), "%s", n[i].name); pr_warn("partition %s (%u pp's found) is " "not contiguous\n", - n[i].name, lvip[i].pps_found); + tmp, lvip[i].pps_found); + } kfree(pvd); } kfree(n); diff --git a/block/sed-opal.c b/block/sed-opal.c index 9ed51d0c6b1d171fc2eab785ef854b64f93721fe..4f5e70d4abc3cd282db163c6614b24c31a4c0fe5 100644 --- a/block/sed-opal.c +++ b/block/sed-opal.c @@ -877,7 +877,7 @@ static size_t response_get_string(const struct parsed_resp *resp, int n, return 0; } - if (n > resp->num) { + if (n >= resp->num) { pr_debug("Response has %d tokens. Can't access %d\n", resp->num, n); return 0; @@ -899,7 +899,7 @@ static u64 response_get_u64(const struct parsed_resp *resp, int n) return 0; } - if (n > resp->num) { + if (n >= resp->num) { pr_debug("Response has %d tokens. Can't access %d\n", resp->num, n); return 0; diff --git a/certs/system_keyring.c b/certs/system_keyring.c index 6251d1b27f0cbd1414287770c8510774a61ba4bc..81728717523d0513ff5cbff1e497c9e81a7fb7dc 100644 --- a/certs/system_keyring.c +++ b/certs/system_keyring.c @@ -15,6 +15,7 @@ #include #include #include +#include #include #include #include @@ -230,7 +231,7 @@ int verify_pkcs7_signature(const void *data, size_t len, if (!trusted_keys) { trusted_keys = builtin_trusted_keys; - } else if (trusted_keys == (void *)1UL) { + } else if (trusted_keys == VERIFY_USE_SECONDARY_KEYRING) { #ifdef CONFIG_SECONDARY_TRUSTED_KEYRING trusted_keys = secondary_trusted_keys; #else diff --git a/crypto/Makefile b/crypto/Makefile index adaf2c63baeb63cfdd98afd35fe4eca53a3de869..56282e2d75ad9c9a28ce63213bbcafc4bc446f45 100644 --- a/crypto/Makefile +++ b/crypto/Makefile @@ -98,7 +98,7 @@ obj-$(CONFIG_CRYPTO_TWOFISH_COMMON) += twofish_common.o obj-$(CONFIG_CRYPTO_SERPENT) += serpent_generic.o CFLAGS_serpent_generic.o := $(call cc-option,-fsched-pressure) # https://gcc.gnu.org/bugzilla/show_bug.cgi?id=79149 obj-$(CONFIG_CRYPTO_AES) += aes_generic.o -CFLAGS_aes_generic.o := $(call cc-ifversion, -ge, 0701, -Os) # https://gcc.gnu.org/bugzilla/show_bug.cgi?id=83356 +CFLAGS_aes_generic.o := $(call cc-option,-fno-code-hoisting) # https://gcc.gnu.org/bugzilla/show_bug.cgi?id=83356 obj-$(CONFIG_CRYPTO_AES_TI) += aes_ti.o obj-$(CONFIG_CRYPTO_CAMELLIA) += camellia_generic.o obj-$(CONFIG_CRYPTO_CAST_COMMON) += cast_common.o diff --git a/crypto/ablkcipher.c b/crypto/ablkcipher.c index 4ee7c041bb82b7ea6e1133e03f5675479e0eb8a1..8882e90e868eade5b77fc310fb5238dd322e9f03 100644 --- a/crypto/ablkcipher.c +++ b/crypto/ablkcipher.c @@ -368,6 +368,7 @@ static int crypto_ablkcipher_report(struct sk_buff *skb, struct crypto_alg *alg) strncpy(rblkcipher.type, "ablkcipher", sizeof(rblkcipher.type)); strncpy(rblkcipher.geniv, alg->cra_ablkcipher.geniv ?: "", sizeof(rblkcipher.geniv)); + rblkcipher.geniv[sizeof(rblkcipher.geniv) - 1] = '\0'; rblkcipher.blocksize = alg->cra_blocksize; rblkcipher.min_keysize = alg->cra_ablkcipher.min_keysize; @@ -442,6 +443,7 @@ static int crypto_givcipher_report(struct sk_buff *skb, struct crypto_alg *alg) strncpy(rblkcipher.type, "givcipher", sizeof(rblkcipher.type)); strncpy(rblkcipher.geniv, alg->cra_ablkcipher.geniv ?: "", sizeof(rblkcipher.geniv)); + rblkcipher.geniv[sizeof(rblkcipher.geniv) - 1] = '\0'; rblkcipher.blocksize = alg->cra_blocksize; rblkcipher.min_keysize = alg->cra_ablkcipher.min_keysize; diff --git a/crypto/api.c b/crypto/api.c index 2b1cf0c1dcea23593580efb3c41253196cc6240b..089e648d2fa95ac63954225f84c19ac1f1a64f6e 100644 --- a/crypto/api.c +++ b/crypto/api.c @@ -215,7 +215,7 @@ struct crypto_alg *crypto_larval_lookup(const char *name, u32 type, u32 mask) mask &= ~(CRYPTO_ALG_LARVAL | CRYPTO_ALG_DEAD); alg = crypto_alg_lookup(name, type, mask); - if (!alg) { + if (!alg && !(mask & CRYPTO_NOLOAD)) { request_module("crypto-%s", name); if (!((type ^ CRYPTO_ALG_NEED_FALLBACK) & mask & diff --git a/crypto/asymmetric_keys/pkcs7_key_type.c b/crypto/asymmetric_keys/pkcs7_key_type.c index 1063b644efcdb3b2cbde33483207b7fc06e4eb5b..b2aa925a84bc512cabe5446c5c65e769fac1ff96 100644 --- a/crypto/asymmetric_keys/pkcs7_key_type.c +++ b/crypto/asymmetric_keys/pkcs7_key_type.c @@ -62,7 +62,7 @@ static int pkcs7_preparse(struct key_preparsed_payload *prep) return verify_pkcs7_signature(NULL, 0, prep->data, prep->datalen, - (void *)1UL, usage, + VERIFY_USE_SECONDARY_KEYRING, usage, pkcs7_view_content, prep); } diff --git a/crypto/blkcipher.c b/crypto/blkcipher.c index d84c6920ada9fe0b814df033b066edcb1e06e22a..830821f234d2805e9c3ecc8348a06ed0e6f47820 100644 --- a/crypto/blkcipher.c +++ b/crypto/blkcipher.c @@ -511,6 +511,7 @@ static int crypto_blkcipher_report(struct sk_buff *skb, struct crypto_alg *alg) strncpy(rblkcipher.type, "blkcipher", sizeof(rblkcipher.type)); strncpy(rblkcipher.geniv, alg->cra_blkcipher.geniv ?: "", sizeof(rblkcipher.geniv)); + rblkcipher.geniv[sizeof(rblkcipher.geniv) - 1] = '\0'; rblkcipher.blocksize = alg->cra_blocksize; rblkcipher.min_keysize = alg->cra_blkcipher.min_keysize; diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c index 58bc28aff3aafebca3f12afcb15136a5e95fa66d..3d624c72c6c2acb96fd175a617312c366568dea7 100644 --- a/drivers/acpi/ec.c +++ b/drivers/acpi/ec.c @@ -2029,6 +2029,17 @@ static inline void acpi_ec_query_exit(void) } } +static const struct dmi_system_id acpi_ec_no_wakeup[] = { + { + .ident = "Thinkpad X1 Carbon 6th", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_FAMILY, "Thinkpad X1 Carbon 6th"), + }, + }, + { }, +}; + int __init acpi_ec_init(void) { int result; @@ -2039,6 +2050,15 @@ int __init acpi_ec_init(void) if (result) return result; + /* + * Disable EC wakeup on following systems to prevent periodic + * wakeup from EC GPE. + */ + if (dmi_check_system(acpi_ec_no_wakeup)) { + ec_no_wakeup = true; + pr_debug("Disabling EC wakeup on suspend-to-idle\n"); + } + /* Drivers must be started after acpi_ec_query_init() */ dsdt_fail = acpi_bus_register_driver(&acpi_ec_driver); /* diff --git a/drivers/acpi/nfit/core.c b/drivers/acpi/nfit/core.c index d56822f58ab124143baef6fa2a5322db5dcef0f0..8260b90eb64b2e7e020f5e316eef5c0585d7ccb8 100644 --- a/drivers/acpi/nfit/core.c +++ b/drivers/acpi/nfit/core.c @@ -224,6 +224,8 @@ int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm, const guid_t *guid; int rc, i; + if (cmd_rc) + *cmd_rc = -EINVAL; func = cmd; if (cmd == ND_CMD_CALL) { call_pkg = buf; @@ -314,6 +316,8 @@ int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm, * If we return an error (like elsewhere) then caller wouldn't * be able to rely upon data returned to make calculation. */ + if (cmd_rc) + *cmd_rc = 0; return 0; } diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c index c0984d33c4c856dae04fc4eb7b111e24d287a120..2eddbb1fae6a0be9418002ff147968a24195ffb4 100644 --- a/drivers/acpi/scan.c +++ b/drivers/acpi/scan.c @@ -1599,7 +1599,8 @@ static int acpi_add_single_object(struct acpi_device **child, * Note this must be done before the get power-/wakeup_dev-flags calls. */ if (type == ACPI_BUS_TYPE_DEVICE) - acpi_bus_get_status(device); + if (acpi_bus_get_status(device) < 0) + acpi_set_device_status(device, 0); acpi_bus_get_power_flags(device); acpi_bus_get_wakeup_device_flags(device); @@ -1677,7 +1678,7 @@ static int acpi_bus_type_and_status(acpi_handle handle, int *type, * acpi_add_single_object updates this once we've an acpi_device * so that acpi_bus_get_status' quirk handling can be used. */ - *sta = 0; + *sta = ACPI_STA_DEFAULT; break; case ACPI_TYPE_PROCESSOR: *type = ACPI_BUS_TYPE_PROCESSOR; diff --git a/drivers/android/binder_alloc.c b/drivers/android/binder_alloc.c index 6cb14826867605fe1c3d25aa64eb0f242ad73e66..58e4658f9dd6c05779663d8330df3095ddb0d8fc 100644 --- a/drivers/android/binder_alloc.c +++ b/drivers/android/binder_alloc.c @@ -324,6 +324,34 @@ err_no_vma: return vma ? -ENOMEM : -ESRCH; } +static inline void binder_alloc_set_vma(struct binder_alloc *alloc, + struct vm_area_struct *vma) +{ + if (vma) + alloc->vma_vm_mm = vma->vm_mm; + /* + * If we see alloc->vma is not NULL, buffer data structures set up + * completely. Look at smp_rmb side binder_alloc_get_vma. + * We also want to guarantee new alloc->vma_vm_mm is always visible + * if alloc->vma is set. + */ + smp_wmb(); + alloc->vma = vma; +} + +static inline struct vm_area_struct *binder_alloc_get_vma( + struct binder_alloc *alloc) +{ + struct vm_area_struct *vma = NULL; + + if (alloc->vma) { + /* Look at description in binder_alloc_set_vma */ + smp_rmb(); + vma = alloc->vma; + } + return vma; +} + struct binder_buffer *binder_alloc_new_buf_locked(struct binder_alloc *alloc, size_t data_size, size_t offsets_size, @@ -339,7 +367,7 @@ struct binder_buffer *binder_alloc_new_buf_locked(struct binder_alloc *alloc, size_t size, data_offsets_size; int ret; - if (alloc->vma == NULL) { + if (!binder_alloc_get_vma(alloc)) { pr_err("%d: binder_alloc_buf, no vma\n", alloc->pid); return ERR_PTR(-ESRCH); @@ -712,9 +740,7 @@ int binder_alloc_mmap_handler(struct binder_alloc *alloc, buffer->free = 1; binder_insert_free_buffer(alloc, buffer); alloc->free_async_space = alloc->buffer_size / 2; - barrier(); - alloc->vma = vma; - alloc->vma_vm_mm = vma->vm_mm; + binder_alloc_set_vma(alloc, vma); mmgrab(alloc->vma_vm_mm); return 0; @@ -741,10 +767,10 @@ void binder_alloc_deferred_release(struct binder_alloc *alloc) int buffers, page_count; struct binder_buffer *buffer; - BUG_ON(alloc->vma); - buffers = 0; mutex_lock(&alloc->mutex); + BUG_ON(alloc->vma); + while ((n = rb_first(&alloc->allocated_buffers))) { buffer = rb_entry(n, struct binder_buffer, rb_node); @@ -886,7 +912,7 @@ int binder_alloc_get_allocated_count(struct binder_alloc *alloc) */ void binder_alloc_vma_close(struct binder_alloc *alloc) { - WRITE_ONCE(alloc->vma, NULL); + binder_alloc_set_vma(alloc, NULL); } /** @@ -921,7 +947,7 @@ enum lru_status binder_alloc_free_page(struct list_head *item, index = page - alloc->pages; page_addr = (uintptr_t)alloc->buffer + index * PAGE_SIZE; - vma = alloc->vma; + vma = binder_alloc_get_vma(alloc); if (vma) { if (!mmget_not_zero(alloc->vma_vm_mm)) goto err_mmget; diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c index 5ae268b8514e228b9b7830ad8346e029e7984f13..cda9a0b5bdaaa2c9783f460c8c8a1594e55cb1ae 100644 --- a/drivers/ata/libahci.c +++ b/drivers/ata/libahci.c @@ -35,6 +35,7 @@ #include #include #include +#include #include #include #include @@ -1135,10 +1136,12 @@ static ssize_t ahci_led_store(struct ata_port *ap, const char *buf, /* get the slot number from the message */ pmp = (state & EM_MSG_LED_PMP_SLOT) >> 8; - if (pmp < EM_MAX_SLOTS) + if (pmp < EM_MAX_SLOTS) { + pmp = array_index_nospec(pmp, EM_MAX_SLOTS); emp = &pp->em_priv[pmp]; - else + } else { return -EINVAL; + } /* mask off the activity bits if we are in sw_activity * mode, user should turn off sw_activity before setting @@ -2093,7 +2096,7 @@ static void ahci_set_aggressive_devslp(struct ata_port *ap, bool sleep) struct ahci_host_priv *hpriv = ap->host->private_data; void __iomem *port_mmio = ahci_port_base(ap); struct ata_device *dev = ap->link.device; - u32 devslp, dm, dito, mdat, deto; + u32 devslp, dm, dito, mdat, deto, dito_conf; int rc; unsigned int err_mask; @@ -2117,8 +2120,15 @@ static void ahci_set_aggressive_devslp(struct ata_port *ap, bool sleep) return; } - /* device sleep was already enabled */ - if (devslp & PORT_DEVSLP_ADSE) + dm = (devslp & PORT_DEVSLP_DM_MASK) >> PORT_DEVSLP_DM_OFFSET; + dito = devslp_idle_timeout / (dm + 1); + if (dito > 0x3ff) + dito = 0x3ff; + + dito_conf = (devslp >> PORT_DEVSLP_DITO_OFFSET) & 0x3FF; + + /* device sleep was already enabled and same dito */ + if ((devslp & PORT_DEVSLP_ADSE) && (dito_conf == dito)) return; /* set DITO, MDAT, DETO and enable DevSlp, need to stop engine first */ @@ -2126,11 +2136,6 @@ static void ahci_set_aggressive_devslp(struct ata_port *ap, bool sleep) if (rc) return; - dm = (devslp & PORT_DEVSLP_DM_MASK) >> PORT_DEVSLP_DM_OFFSET; - dito = devslp_idle_timeout / (dm + 1); - if (dito > 0x3ff) - dito = 0x3ff; - /* Use the nominal value 10 ms if the read MDAT is zero, * the nominal value of DETO is 20 ms. */ @@ -2148,6 +2153,8 @@ static void ahci_set_aggressive_devslp(struct ata_port *ap, bool sleep) deto = 20; } + /* Make dito, mdat, deto bits to 0s */ + devslp &= ~GENMASK_ULL(24, 2); devslp |= ((dito << PORT_DEVSLP_DITO_OFFSET) | (mdat << PORT_DEVSLP_MDAT_OFFSET) | (deto << PORT_DEVSLP_DETO_OFFSET) | diff --git a/drivers/ata/pata_ftide010.c b/drivers/ata/pata_ftide010.c index 5d4b72e21161a888c44997b448f602cef99a6709..569a4a662dcd4deb9d9edb0dbca4001f798bcab5 100644 --- a/drivers/ata/pata_ftide010.c +++ b/drivers/ata/pata_ftide010.c @@ -256,14 +256,12 @@ static struct ata_port_operations pata_ftide010_port_ops = { .qc_issue = ftide010_qc_issue, }; -static struct ata_port_info ftide010_port_info[] = { - { - .flags = ATA_FLAG_SLAVE_POSS, - .mwdma_mask = ATA_MWDMA2, - .udma_mask = ATA_UDMA6, - .pio_mask = ATA_PIO4, - .port_ops = &pata_ftide010_port_ops, - }, +static struct ata_port_info ftide010_port_info = { + .flags = ATA_FLAG_SLAVE_POSS, + .mwdma_mask = ATA_MWDMA2, + .udma_mask = ATA_UDMA6, + .pio_mask = ATA_PIO4, + .port_ops = &pata_ftide010_port_ops, }; #if IS_ENABLED(CONFIG_SATA_GEMINI) @@ -349,6 +347,7 @@ static int pata_ftide010_gemini_cable_detect(struct ata_port *ap) } static int pata_ftide010_gemini_init(struct ftide010 *ftide, + struct ata_port_info *pi, bool is_ata1) { struct device *dev = ftide->dev; @@ -373,7 +372,13 @@ static int pata_ftide010_gemini_init(struct ftide010 *ftide, /* Flag port as SATA-capable */ if (gemini_sata_bridge_enabled(sg, is_ata1)) - ftide010_port_info[0].flags |= ATA_FLAG_SATA; + pi->flags |= ATA_FLAG_SATA; + + /* This device has broken DMA, only PIO works */ + if (of_machine_is_compatible("itian,sq201")) { + pi->mwdma_mask = 0; + pi->udma_mask = 0; + } /* * We assume that a simple 40-wire cable is used in the PATA mode. @@ -435,6 +440,7 @@ static int pata_ftide010_gemini_init(struct ftide010 *ftide, } #else static int pata_ftide010_gemini_init(struct ftide010 *ftide, + struct ata_port_info *pi, bool is_ata1) { return -ENOTSUPP; @@ -446,7 +452,7 @@ static int pata_ftide010_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct device_node *np = dev->of_node; - const struct ata_port_info pi = ftide010_port_info[0]; + struct ata_port_info pi = ftide010_port_info; const struct ata_port_info *ppi[] = { &pi, NULL }; struct ftide010 *ftide; struct resource *res; @@ -490,6 +496,7 @@ static int pata_ftide010_probe(struct platform_device *pdev) * are ATA0. This will also set up the cable types. */ ret = pata_ftide010_gemini_init(ftide, + &pi, (res->start == 0x63400000)); if (ret) goto err_dis_clk; diff --git a/drivers/base/core.c b/drivers/base/core.c index b054cb2fd2b9afb659b6a3c3b7439afed627418f..eb066cc827ef40b09ea9a54319ee437b0ff999f4 100644 --- a/drivers/base/core.c +++ b/drivers/base/core.c @@ -2783,6 +2783,9 @@ void device_shutdown(void) { struct device *dev, *parent; + wait_for_device_probe(); + device_block_probing(); + spin_lock(&devices_kset->list_lock); /* * Walk the devices list backward, shutting down each in turn. diff --git a/drivers/base/power/clock_ops.c b/drivers/base/power/clock_ops.c index 8e2e4757adcb02c9cd07a0b868a45e0bb3baa3eb..5a42ae4078c27febf8194901d8fa0e30b43bd1aa 100644 --- a/drivers/base/power/clock_ops.c +++ b/drivers/base/power/clock_ops.c @@ -185,7 +185,7 @@ EXPORT_SYMBOL_GPL(of_pm_clk_add_clk); int of_pm_clk_add_clks(struct device *dev) { struct clk **clks; - unsigned int i, count; + int i, count; int ret; if (!dev || !dev->of_node) diff --git a/drivers/block/drbd/drbd_req.c b/drivers/block/drbd/drbd_req.c index de8566e5533499b6170c669a08c2aa8688e67057..c72071c300bbb74cdd4c7a7081f5b6f6a2c7b277 100644 --- a/drivers/block/drbd/drbd_req.c +++ b/drivers/block/drbd/drbd_req.c @@ -1244,8 +1244,8 @@ drbd_request_prepare(struct drbd_device *device, struct bio *bio, unsigned long _drbd_start_io_acct(device, req); /* process discards always from our submitter thread */ - if ((bio_op(bio) & REQ_OP_WRITE_ZEROES) || - (bio_op(bio) & REQ_OP_DISCARD)) + if (bio_op(bio) == REQ_OP_WRITE_ZEROES || + bio_op(bio) == REQ_OP_DISCARD) goto queue_for_submitter_thread; if (rw == WRITE && req->private_bio && req->i.size diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c index 60c086a536094d4c19ad31a023d49274d54c3dd5..3d0287e212fe5fb1a2042909fc4a8891a3e21d66 100644 --- a/drivers/block/floppy.c +++ b/drivers/block/floppy.c @@ -3462,6 +3462,9 @@ static int fd_locked_ioctl(struct block_device *bdev, fmode_t mode, unsigned int (struct floppy_struct **)&outparam); if (ret) return ret; + memcpy(&inparam.g, outparam, + offsetof(struct floppy_struct, name)); + outparam = &inparam.g; break; case FDMSGON: UDP->flags |= FTD_MSG; diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c index 6fb64e73bc9678e079a77823b5e4a3e31dba8dca..fe1414df0f3319cb201eb9fb63b14c4773c1c3fa 100644 --- a/drivers/block/nbd.c +++ b/drivers/block/nbd.c @@ -76,6 +76,7 @@ struct link_dead_args { #define NBD_HAS_CONFIG_REF 4 #define NBD_BOUND 5 #define NBD_DESTROY_ON_DISCONNECT 6 +#define NBD_DISCONNECT_ON_CLOSE 7 struct nbd_config { u32 flags; @@ -111,12 +112,16 @@ struct nbd_device { struct task_struct *task_setup; }; +#define NBD_CMD_REQUEUED 1 + struct nbd_cmd { struct nbd_device *nbd; + struct mutex lock; int index; int cookie; - struct completion send_complete; blk_status_t status; + unsigned long flags; + u32 cmd_cookie; }; #if IS_ENABLED(CONFIG_DEBUG_FS) @@ -138,12 +143,42 @@ static void nbd_config_put(struct nbd_device *nbd); static void nbd_connect_reply(struct genl_info *info, int index); static int nbd_genl_status(struct sk_buff *skb, struct genl_info *info); static void nbd_dead_link_work(struct work_struct *work); +static void nbd_disconnect_and_put(struct nbd_device *nbd); static inline struct device *nbd_to_dev(struct nbd_device *nbd) { return disk_to_dev(nbd->disk); } +static void nbd_requeue_cmd(struct nbd_cmd *cmd) +{ + struct request *req = blk_mq_rq_from_pdu(cmd); + + if (!test_and_set_bit(NBD_CMD_REQUEUED, &cmd->flags)) + blk_mq_requeue_request(req, true); +} + +#define NBD_COOKIE_BITS 32 + +static u64 nbd_cmd_handle(struct nbd_cmd *cmd) +{ + struct request *req = blk_mq_rq_from_pdu(cmd); + u32 tag = blk_mq_unique_tag(req); + u64 cookie = cmd->cmd_cookie; + + return (cookie << NBD_COOKIE_BITS) | tag; +} + +static u32 nbd_handle_to_tag(u64 handle) +{ + return (u32)handle; +} + +static u32 nbd_handle_to_cookie(u64 handle) +{ + return (u32)(handle >> NBD_COOKIE_BITS); +} + static const char *nbdcmd_to_ascii(int cmd) { switch (cmd) { @@ -304,6 +339,9 @@ static enum blk_eh_timer_return nbd_xmit_timeout(struct request *req, } config = nbd->config; + if (!mutex_trylock(&cmd->lock)) + return BLK_EH_RESET_TIMER; + if (config->num_connections > 1) { dev_err_ratelimited(nbd_to_dev(nbd), "Connection timed out, retrying\n"); @@ -326,7 +364,8 @@ static enum blk_eh_timer_return nbd_xmit_timeout(struct request *req, nbd_mark_nsock_dead(nbd, nsock, 1); mutex_unlock(&nsock->tx_lock); } - blk_mq_requeue_request(req, true); + mutex_unlock(&cmd->lock); + nbd_requeue_cmd(cmd); nbd_config_put(nbd); return BLK_EH_NOT_HANDLED; } @@ -336,6 +375,7 @@ static enum blk_eh_timer_return nbd_xmit_timeout(struct request *req, } set_bit(NBD_TIMEDOUT, &config->runtime_flags); cmd->status = BLK_STS_IOERR; + mutex_unlock(&cmd->lock); sock_shutdown(nbd); nbd_config_put(nbd); @@ -412,9 +452,9 @@ static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index) struct iov_iter from; unsigned long size = blk_rq_bytes(req); struct bio *bio; + u64 handle; u32 type; u32 nbd_cmd_flags = 0; - u32 tag = blk_mq_unique_tag(req); int sent = nsock->sent, skip = 0; iov_iter_kvec(&from, WRITE | ITER_KVEC, &iov, 1, sizeof(request)); @@ -456,6 +496,8 @@ static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index) goto send_pages; } iov_iter_advance(&from, sent); + } else { + cmd->cmd_cookie++; } cmd->index = index; cmd->cookie = nsock->cookie; @@ -464,7 +506,8 @@ static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index) request.from = cpu_to_be64((u64)blk_rq_pos(req) << 9); request.len = htonl(size); } - memcpy(request.handle, &tag, sizeof(tag)); + handle = nbd_cmd_handle(cmd); + memcpy(request.handle, &handle, sizeof(handle)); dev_dbg(nbd_to_dev(nbd), "request %p: sending control (%s@%llu,%uB)\n", cmd, nbdcmd_to_ascii(type), @@ -482,6 +525,7 @@ static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index) nsock->pending = req; nsock->sent = sent; } + set_bit(NBD_CMD_REQUEUED, &cmd->flags); return BLK_STS_RESOURCE; } dev_err_ratelimited(disk_to_dev(nbd->disk), @@ -523,6 +567,7 @@ send_pages: */ nsock->pending = req; nsock->sent = sent; + set_bit(NBD_CMD_REQUEUED, &cmd->flags); return BLK_STS_RESOURCE; } dev_err(disk_to_dev(nbd->disk), @@ -555,10 +600,12 @@ static struct nbd_cmd *nbd_read_stat(struct nbd_device *nbd, int index) struct nbd_reply reply; struct nbd_cmd *cmd; struct request *req = NULL; + u64 handle; u16 hwq; u32 tag; struct kvec iov = {.iov_base = &reply, .iov_len = sizeof(reply)}; struct iov_iter to; + int ret = 0; reply.magic = 0; iov_iter_kvec(&to, READ | ITER_KVEC, &iov, 1, sizeof(reply)); @@ -576,8 +623,8 @@ static struct nbd_cmd *nbd_read_stat(struct nbd_device *nbd, int index) return ERR_PTR(-EPROTO); } - memcpy(&tag, reply.handle, sizeof(u32)); - + memcpy(&handle, reply.handle, sizeof(handle)); + tag = nbd_handle_to_tag(handle); hwq = blk_mq_unique_tag_to_hwq(tag); if (hwq < nbd->tag_set.nr_hw_queues) req = blk_mq_tag_to_rq(nbd->tag_set.tags[hwq], @@ -588,11 +635,25 @@ static struct nbd_cmd *nbd_read_stat(struct nbd_device *nbd, int index) return ERR_PTR(-ENOENT); } cmd = blk_mq_rq_to_pdu(req); + + mutex_lock(&cmd->lock); + if (cmd->cmd_cookie != nbd_handle_to_cookie(handle)) { + dev_err(disk_to_dev(nbd->disk), "Double reply on req %p, cmd_cookie %u, handle cookie %u\n", + req, cmd->cmd_cookie, nbd_handle_to_cookie(handle)); + ret = -ENOENT; + goto out; + } + if (test_bit(NBD_CMD_REQUEUED, &cmd->flags)) { + dev_err(disk_to_dev(nbd->disk), "Raced with timeout on req %p\n", + req); + ret = -ENOENT; + goto out; + } if (ntohl(reply.error)) { dev_err(disk_to_dev(nbd->disk), "Other side returned error (%d)\n", ntohl(reply.error)); cmd->status = BLK_STS_IOERR; - return cmd; + goto out; } dev_dbg(nbd_to_dev(nbd), "request %p: got reply\n", cmd); @@ -617,18 +678,18 @@ static struct nbd_cmd *nbd_read_stat(struct nbd_device *nbd, int index) if (nbd_disconnected(config) || config->num_connections <= 1) { cmd->status = BLK_STS_IOERR; - return cmd; + goto out; } - return ERR_PTR(-EIO); + ret = -EIO; + goto out; } dev_dbg(nbd_to_dev(nbd), "request %p: got %d bytes data\n", cmd, bvec.bv_len); } - } else { - /* See the comment in nbd_queue_rq. */ - wait_for_completion(&cmd->send_complete); } - return cmd; +out: + mutex_unlock(&cmd->lock); + return ret ? ERR_PTR(ret) : cmd; } static void recv_work(struct work_struct *work) @@ -791,7 +852,7 @@ again: */ blk_mq_start_request(req); if (unlikely(nsock->pending && nsock->pending != req)) { - blk_mq_requeue_request(req, true); + nbd_requeue_cmd(cmd); ret = 0; goto out; } @@ -804,7 +865,7 @@ again: dev_err_ratelimited(disk_to_dev(nbd->disk), "Request send failed, requeueing\n"); nbd_mark_nsock_dead(nbd, nsock, 1); - blk_mq_requeue_request(req, true); + nbd_requeue_cmd(cmd); ret = 0; } out: @@ -828,7 +889,8 @@ static blk_status_t nbd_queue_rq(struct blk_mq_hw_ctx *hctx, * that the server is misbehaving (or there was an error) before we're * done sending everything over the wire. */ - init_completion(&cmd->send_complete); + mutex_lock(&cmd->lock); + clear_bit(NBD_CMD_REQUEUED, &cmd->flags); /* We can be called directly from the user space process, which means we * could possibly have signals pending so our sendmsg will fail. In @@ -840,7 +902,7 @@ static blk_status_t nbd_queue_rq(struct blk_mq_hw_ctx *hctx, ret = BLK_STS_IOERR; else if (!ret) ret = BLK_STS_OK; - complete(&cmd->send_complete); + mutex_unlock(&cmd->lock); return ret; } @@ -1166,6 +1228,9 @@ static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd, case NBD_SET_SOCK: return nbd_add_socket(nbd, arg, false); case NBD_SET_BLKSIZE: + if (!arg || !is_power_of_2(arg) || arg < 512 || + arg > PAGE_SIZE) + return -EINVAL; nbd_size_set(nbd, arg, div_s64(config->bytesize, arg)); return 0; @@ -1291,6 +1356,12 @@ out: static void nbd_release(struct gendisk *disk, fmode_t mode) { struct nbd_device *nbd = disk->private_data; + struct block_device *bdev = bdget_disk(disk, 0); + + if (test_bit(NBD_DISCONNECT_ON_CLOSE, &nbd->config->runtime_flags) && + bdev->bd_openers == 0) + nbd_disconnect_and_put(nbd); + nbd_config_put(nbd); nbd_put(nbd); } @@ -1438,6 +1509,8 @@ static int nbd_init_request(struct blk_mq_tag_set *set, struct request *rq, { struct nbd_cmd *cmd = blk_mq_rq_to_pdu(rq); cmd->nbd = set->driver_data; + cmd->flags = 0; + mutex_init(&cmd->lock); return 0; } @@ -1690,6 +1763,10 @@ again: &config->runtime_flags); put_dev = true; } + if (flags & NBD_CFLAG_DISCONNECT_ON_CLOSE) { + set_bit(NBD_DISCONNECT_ON_CLOSE, + &config->runtime_flags); + } } if (info->attrs[NBD_ATTR_SOCKETS]) { @@ -1734,6 +1811,16 @@ out: return ret; } +static void nbd_disconnect_and_put(struct nbd_device *nbd) +{ + mutex_lock(&nbd->config_lock); + nbd_disconnect(nbd); + mutex_unlock(&nbd->config_lock); + if (test_and_clear_bit(NBD_HAS_CONFIG_REF, + &nbd->config->runtime_flags)) + nbd_config_put(nbd); +} + static int nbd_genl_disconnect(struct sk_buff *skb, struct genl_info *info) { struct nbd_device *nbd; @@ -1766,12 +1853,7 @@ static int nbd_genl_disconnect(struct sk_buff *skb, struct genl_info *info) nbd_put(nbd); return 0; } - mutex_lock(&nbd->config_lock); - nbd_disconnect(nbd); - mutex_unlock(&nbd->config_lock); - if (test_and_clear_bit(NBD_HAS_CONFIG_REF, - &nbd->config->runtime_flags)) - nbd_config_put(nbd); + nbd_disconnect_and_put(nbd); nbd_config_put(nbd); nbd_put(nbd); return 0; @@ -1782,7 +1864,7 @@ static int nbd_genl_reconfigure(struct sk_buff *skb, struct genl_info *info) struct nbd_device *nbd = NULL; struct nbd_config *config; int index; - int ret = -EINVAL; + int ret = 0; bool put_dev = false; if (!netlink_capable(skb, CAP_SYS_ADMIN)) @@ -1822,6 +1904,7 @@ static int nbd_genl_reconfigure(struct sk_buff *skb, struct genl_info *info) !nbd->task_recv) { dev_err(nbd_to_dev(nbd), "not configured, cannot reconfigure\n"); + ret = -EINVAL; goto out; } @@ -1846,6 +1929,14 @@ static int nbd_genl_reconfigure(struct sk_buff *skb, struct genl_info *info) &config->runtime_flags)) refcount_inc(&nbd->refs); } + + if (flags & NBD_CFLAG_DISCONNECT_ON_CLOSE) { + set_bit(NBD_DISCONNECT_ON_CLOSE, + &config->runtime_flags); + } else { + clear_bit(NBD_DISCONNECT_ON_CLOSE, + &config->runtime_flags); + } } if (info->attrs[NBD_ATTR_SOCKETS]) { diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c index 531a0915066b313462d6208359ecea4102397215..11ec92e47455a3fafc4a853da977adb31ae2bde6 100644 --- a/drivers/block/pktcdvd.c +++ b/drivers/block/pktcdvd.c @@ -67,7 +67,7 @@ #include #include #include - +#include #include #define DRIVER_NAME "pktcdvd" @@ -2231,6 +2231,8 @@ static struct pktcdvd_device *pkt_find_dev_from_minor(unsigned int dev_minor) { if (dev_minor >= MAX_WRITERS) return NULL; + + dev_minor = array_index_nospec(dev_minor, MAX_WRITERS); return pkt_devs[dev_minor]; } diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c index 9839656465eae31174355d2f4bddc4d8a2f60f67..c5d61209eb052c81e134630d89379901da7ffce4 100644 --- a/drivers/block/zram/zram_drv.c +++ b/drivers/block/zram/zram_drv.c @@ -321,6 +321,7 @@ static ssize_t backing_dev_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { char *file_name; + size_t sz; struct file *backing_dev = NULL; struct inode *inode; struct address_space *mapping; @@ -341,7 +342,11 @@ static ssize_t backing_dev_store(struct device *dev, goto out; } - strlcpy(file_name, buf, len); + strlcpy(file_name, buf, PATH_MAX); + /* ignore trailing newline */ + sz = strlen(file_name); + if (sz > 0 && file_name[sz - 1] == '\n') + file_name[sz - 1] = 0x00; backing_dev = filp_open(file_name, O_RDWR|O_LARGEFILE, 0); if (IS_ERR(backing_dev)) { diff --git a/drivers/bluetooth/Kconfig b/drivers/bluetooth/Kconfig index b33c8d6eb8c757f9af96a9b4d34b26fbd2aee4af..500d4d632e4863f7f73b2e84486a4134f636c136 100644 --- a/drivers/bluetooth/Kconfig +++ b/drivers/bluetooth/Kconfig @@ -146,6 +146,7 @@ config BT_HCIUART_LL config BT_HCIUART_3WIRE bool "Three-wire UART (H5) protocol support" depends on BT_HCIUART + depends on BT_HCIUART_SERDEV help The HCI Three-wire UART Transport Layer makes it possible to user the Bluetooth HCI over a serial port interface. The HCI diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c index 819521d5895e1dd3b78105d0b7f52612358ed982..b8dffe937f4ff7df519ed2e106d6216cd910ffc3 100644 --- a/drivers/bluetooth/btusb.c +++ b/drivers/bluetooth/btusb.c @@ -375,6 +375,7 @@ static const struct usb_device_id blacklist_table[] = { { USB_DEVICE(0x7392, 0xa611), .driver_info = BTUSB_REALTEK }, /* Additional Realtek 8723DE Bluetooth devices */ + { USB_DEVICE(0x0bda, 0xb009), .driver_info = BTUSB_REALTEK }, { USB_DEVICE(0x2ff8, 0xb011), .driver_info = BTUSB_REALTEK }, /* Additional Realtek 8821AE Bluetooth devices */ diff --git a/drivers/bluetooth/hci_ldisc.c b/drivers/bluetooth/hci_ldisc.c index c823914b3a80a0ce550dff8244df835b23ec66ec..30bbe19b4b855e42ee981888a8232a00beda8cbc 100644 --- a/drivers/bluetooth/hci_ldisc.c +++ b/drivers/bluetooth/hci_ldisc.c @@ -539,6 +539,8 @@ static void hci_uart_tty_close(struct tty_struct *tty) } clear_bit(HCI_UART_PROTO_SET, &hu->flags); + percpu_free_rwsem(&hu->proto_lock); + kfree(hu); } diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c index bfc566d3f31a40cf5b89d9284b2538644af68dee..8cfa10ab7abcb8cb0da9a9a62d7080b60f81683f 100644 --- a/drivers/cdrom/cdrom.c +++ b/drivers/cdrom/cdrom.c @@ -2542,7 +2542,7 @@ static int cdrom_ioctl_drive_status(struct cdrom_device_info *cdi, if (!CDROM_CAN(CDC_SELECT_DISC) || (arg == CDSL_CURRENT || arg == CDSL_NONE)) return cdi->ops->drive_status(cdi, CDSL_CURRENT); - if (((int)arg >= cdi->capacity)) + if (arg >= cdi->capacity) return -EINVAL; return cdrom_slot_status(cdi, arg); } diff --git a/drivers/char/ipmi/ipmi_ssif.c b/drivers/char/ipmi/ipmi_ssif.c index 6f2eaba1cd6a60f17597d6c7426287ccc228f811..932678617dfa7e212699cdd1afde16c5e480df26 100644 --- a/drivers/char/ipmi/ipmi_ssif.c +++ b/drivers/char/ipmi/ipmi_ssif.c @@ -184,6 +184,8 @@ struct ssif_addr_info { struct device *dev; struct i2c_client *client; + struct i2c_client *added_client; + struct mutex clients_mutex; struct list_head clients; @@ -1710,15 +1712,7 @@ static int ssif_probe(struct i2c_client *client, const struct i2c_device_id *id) out: if (rv) { - /* - * Note that if addr_info->client is assigned, we - * leave it. The i2c client hangs around even if we - * return a failure here, and the failure here is not - * propagated back to the i2c code. This seems to be - * design intent, strange as it may be. But if we - * don't leave it, ssif_platform_remove will not remove - * the client like it should. - */ + addr_info->client = NULL; dev_err(&client->dev, "Unable to start IPMI SSIF: %d\n", rv); kfree(ssif_info); } @@ -1737,7 +1731,8 @@ static int ssif_adapter_handler(struct device *adev, void *opaque) if (adev->type != &i2c_adapter_type) return 0; - i2c_new_device(to_i2c_adapter(adev), &addr_info->binfo); + addr_info->added_client = i2c_new_device(to_i2c_adapter(adev), + &addr_info->binfo); if (!addr_info->adapter_name) return 1; /* Only try the first I2C adapter by default. */ @@ -2018,8 +2013,8 @@ static int ssif_platform_remove(struct platform_device *dev) return 0; mutex_lock(&ssif_infos_mutex); - if (addr_info->client) - i2c_unregister_device(addr_info->client); + if (addr_info->added_client) + i2c_unregister_device(addr_info->added_client); list_del(&addr_info->link); kfree(addr_info); diff --git a/drivers/char/tpm/tpm-interface.c b/drivers/char/tpm/tpm-interface.c index dba5259def6038645dbe24d95f0243c0b6282443..a2070ab86c824c402d142395f19389e578e205c3 100644 --- a/drivers/char/tpm/tpm-interface.c +++ b/drivers/char/tpm/tpm-interface.c @@ -369,10 +369,13 @@ err_len: return -EINVAL; } -static int tpm_request_locality(struct tpm_chip *chip) +static int tpm_request_locality(struct tpm_chip *chip, unsigned int flags) { int rc; + if (flags & TPM_TRANSMIT_RAW) + return 0; + if (!chip->ops->request_locality) return 0; @@ -385,10 +388,13 @@ static int tpm_request_locality(struct tpm_chip *chip) return 0; } -static void tpm_relinquish_locality(struct tpm_chip *chip) +static void tpm_relinquish_locality(struct tpm_chip *chip, unsigned int flags) { int rc; + if (flags & TPM_TRANSMIT_RAW) + return; + if (!chip->ops->relinquish_locality) return; @@ -399,6 +405,28 @@ static void tpm_relinquish_locality(struct tpm_chip *chip) chip->locality = -1; } +static int tpm_cmd_ready(struct tpm_chip *chip, unsigned int flags) +{ + if (flags & TPM_TRANSMIT_RAW) + return 0; + + if (!chip->ops->cmd_ready) + return 0; + + return chip->ops->cmd_ready(chip); +} + +static int tpm_go_idle(struct tpm_chip *chip, unsigned int flags) +{ + if (flags & TPM_TRANSMIT_RAW) + return 0; + + if (!chip->ops->go_idle) + return 0; + + return chip->ops->go_idle(chip); +} + static ssize_t tpm_try_transmit(struct tpm_chip *chip, struct tpm_space *space, u8 *buf, size_t bufsiz, @@ -423,7 +451,7 @@ static ssize_t tpm_try_transmit(struct tpm_chip *chip, header->tag = cpu_to_be16(TPM2_ST_NO_SESSIONS); header->return_code = cpu_to_be32(TPM2_RC_COMMAND_CODE | TSS2_RESMGR_TPM_RC_LAYER); - return bufsiz; + return sizeof(*header); } if (bufsiz > TPM_BUFSIZE) @@ -449,14 +477,15 @@ static ssize_t tpm_try_transmit(struct tpm_chip *chip, /* Store the decision as chip->locality will be changed. */ need_locality = chip->locality == -1; - if (!(flags & TPM_TRANSMIT_RAW) && need_locality) { - rc = tpm_request_locality(chip); + if (need_locality) { + rc = tpm_request_locality(chip, flags); if (rc < 0) goto out_no_locality; } - if (chip->dev.parent) - pm_runtime_get_sync(chip->dev.parent); + rc = tpm_cmd_ready(chip, flags); + if (rc) + goto out; rc = tpm2_prepare_space(chip, space, ordinal, buf); if (rc) @@ -516,13 +545,16 @@ out_recv: } rc = tpm2_commit_space(chip, space, ordinal, buf, &len); + if (rc) + dev_err(&chip->dev, "tpm2_commit_space: error %d\n", rc); out: - if (chip->dev.parent) - pm_runtime_put_sync(chip->dev.parent); + rc = tpm_go_idle(chip, flags); + if (rc) + goto out; if (need_locality) - tpm_relinquish_locality(chip); + tpm_relinquish_locality(chip, flags); out_no_locality: if (chip->ops->clk_enable != NULL) diff --git a/drivers/char/tpm/tpm.h b/drivers/char/tpm/tpm.h index b83b30a3eea5ef4e2de8b60a720d7dd571d45479..4bb9b4aa9b49c11a0e24c6a8d50351575609f015 100644 --- a/drivers/char/tpm/tpm.h +++ b/drivers/char/tpm/tpm.h @@ -511,9 +511,17 @@ extern const struct file_operations tpm_fops; extern const struct file_operations tpmrm_fops; extern struct idr dev_nums_idr; +/** + * enum tpm_transmit_flags + * + * @TPM_TRANSMIT_UNLOCKED: used to lock sequence of tpm_transmit calls. + * @TPM_TRANSMIT_RAW: prevent recursive calls into setup steps + * (go idle, locality,..). Always use with UNLOCKED + * as it will fail on double locking. + */ enum tpm_transmit_flags { - TPM_TRANSMIT_UNLOCKED = BIT(0), - TPM_TRANSMIT_RAW = BIT(1), + TPM_TRANSMIT_UNLOCKED = BIT(0), + TPM_TRANSMIT_RAW = BIT(1), }; ssize_t tpm_transmit(struct tpm_chip *chip, struct tpm_space *space, diff --git a/drivers/char/tpm/tpm2-space.c b/drivers/char/tpm/tpm2-space.c index d26ea7513226c991eb3eeef3dfdbebadee8fa298..dabb2ae4e779a8cfe6360b3686380748922fe41e 100644 --- a/drivers/char/tpm/tpm2-space.c +++ b/drivers/char/tpm/tpm2-space.c @@ -39,7 +39,8 @@ static void tpm2_flush_sessions(struct tpm_chip *chip, struct tpm_space *space) for (i = 0; i < ARRAY_SIZE(space->session_tbl); i++) { if (space->session_tbl[i]) tpm2_flush_context_cmd(chip, space->session_tbl[i], - TPM_TRANSMIT_UNLOCKED); + TPM_TRANSMIT_UNLOCKED | + TPM_TRANSMIT_RAW); } } @@ -84,7 +85,7 @@ static int tpm2_load_context(struct tpm_chip *chip, u8 *buf, tpm_buf_append(&tbuf, &buf[*offset], body_size); rc = tpm_transmit_cmd(chip, NULL, tbuf.data, PAGE_SIZE, 4, - TPM_TRANSMIT_UNLOCKED, NULL); + TPM_TRANSMIT_UNLOCKED | TPM_TRANSMIT_RAW, NULL); if (rc < 0) { dev_warn(&chip->dev, "%s: failed with a system error %d\n", __func__, rc); @@ -133,7 +134,7 @@ static int tpm2_save_context(struct tpm_chip *chip, u32 handle, u8 *buf, tpm_buf_append_u32(&tbuf, handle); rc = tpm_transmit_cmd(chip, NULL, tbuf.data, PAGE_SIZE, 0, - TPM_TRANSMIT_UNLOCKED, NULL); + TPM_TRANSMIT_UNLOCKED | TPM_TRANSMIT_RAW, NULL); if (rc < 0) { dev_warn(&chip->dev, "%s: failed with a system error %d\n", __func__, rc); @@ -170,7 +171,8 @@ static void tpm2_flush_space(struct tpm_chip *chip) for (i = 0; i < ARRAY_SIZE(space->context_tbl); i++) if (space->context_tbl[i] && ~space->context_tbl[i]) tpm2_flush_context_cmd(chip, space->context_tbl[i], - TPM_TRANSMIT_UNLOCKED); + TPM_TRANSMIT_UNLOCKED | + TPM_TRANSMIT_RAW); tpm2_flush_sessions(chip, space); } @@ -377,7 +379,8 @@ static int tpm2_map_response_header(struct tpm_chip *chip, u32 cc, u8 *rsp, return 0; out_no_slots: - tpm2_flush_context_cmd(chip, phandle, TPM_TRANSMIT_UNLOCKED); + tpm2_flush_context_cmd(chip, phandle, + TPM_TRANSMIT_UNLOCKED | TPM_TRANSMIT_RAW); dev_warn(&chip->dev, "%s: out of slots for 0x%08X\n", __func__, phandle); return -ENOMEM; @@ -465,7 +468,8 @@ static int tpm2_save_space(struct tpm_chip *chip) return rc; tpm2_flush_context_cmd(chip, space->context_tbl[i], - TPM_TRANSMIT_UNLOCKED); + TPM_TRANSMIT_UNLOCKED | + TPM_TRANSMIT_RAW); space->context_tbl[i] = ~0; } diff --git a/drivers/char/tpm/tpm_crb.c b/drivers/char/tpm/tpm_crb.c index bb756ad7897e7ca2b445cfa37a16a3edfd7926a6..5c7ce5aaaf6fbbb4c0696b07876ece25e2070f2d 100644 --- a/drivers/char/tpm/tpm_crb.c +++ b/drivers/char/tpm/tpm_crb.c @@ -137,7 +137,7 @@ static bool crb_wait_for_reg_32(u32 __iomem *reg, u32 mask, u32 value, } /** - * crb_go_idle - request tpm crb device to go the idle state + * __crb_go_idle - request tpm crb device to go the idle state * * @dev: crb device * @priv: crb private data @@ -151,7 +151,7 @@ static bool crb_wait_for_reg_32(u32 __iomem *reg, u32 mask, u32 value, * * Return: 0 always */ -static int crb_go_idle(struct device *dev, struct crb_priv *priv) +static int __crb_go_idle(struct device *dev, struct crb_priv *priv) { if ((priv->flags & CRB_FL_ACPI_START) || (priv->flags & CRB_FL_CRB_SMC_START)) @@ -166,11 +166,20 @@ static int crb_go_idle(struct device *dev, struct crb_priv *priv) dev_warn(dev, "goIdle timed out\n"); return -ETIME; } + return 0; } +static int crb_go_idle(struct tpm_chip *chip) +{ + struct device *dev = &chip->dev; + struct crb_priv *priv = dev_get_drvdata(dev); + + return __crb_go_idle(dev, priv); +} + /** - * crb_cmd_ready - request tpm crb device to enter ready state + * __crb_cmd_ready - request tpm crb device to enter ready state * * @dev: crb device * @priv: crb private data @@ -183,7 +192,7 @@ static int crb_go_idle(struct device *dev, struct crb_priv *priv) * * Return: 0 on success -ETIME on timeout; */ -static int crb_cmd_ready(struct device *dev, struct crb_priv *priv) +static int __crb_cmd_ready(struct device *dev, struct crb_priv *priv) { if ((priv->flags & CRB_FL_ACPI_START) || (priv->flags & CRB_FL_CRB_SMC_START)) @@ -201,6 +210,14 @@ static int crb_cmd_ready(struct device *dev, struct crb_priv *priv) return 0; } +static int crb_cmd_ready(struct tpm_chip *chip) +{ + struct device *dev = &chip->dev; + struct crb_priv *priv = dev_get_drvdata(dev); + + return __crb_cmd_ready(dev, priv); +} + static int __crb_request_locality(struct device *dev, struct crb_priv *priv, int loc) { @@ -393,6 +410,8 @@ static const struct tpm_class_ops tpm_crb = { .send = crb_send, .cancel = crb_cancel, .req_canceled = crb_req_canceled, + .go_idle = crb_go_idle, + .cmd_ready = crb_cmd_ready, .request_locality = crb_request_locality, .relinquish_locality = crb_relinquish_locality, .req_complete_mask = CRB_DRV_STS_COMPLETE, @@ -508,7 +527,7 @@ static int crb_map_io(struct acpi_device *device, struct crb_priv *priv, * PTT HW bug w/a: wake up the device to access * possibly not retained registers. */ - ret = crb_cmd_ready(dev, priv); + ret = __crb_cmd_ready(dev, priv); if (ret) return ret; @@ -553,7 +572,7 @@ out: if (!ret) priv->cmd_size = cmd_size; - crb_go_idle(dev, priv); + __crb_go_idle(dev, priv); __crb_relinquish_locality(dev, priv, 0); @@ -624,32 +643,7 @@ static int crb_acpi_add(struct acpi_device *device) chip->acpi_dev_handle = device->handle; chip->flags = TPM_CHIP_FLAG_TPM2; - rc = __crb_request_locality(dev, priv, 0); - if (rc) - return rc; - - rc = crb_cmd_ready(dev, priv); - if (rc) - goto out; - - pm_runtime_get_noresume(dev); - pm_runtime_set_active(dev); - pm_runtime_enable(dev); - - rc = tpm_chip_register(chip); - if (rc) { - crb_go_idle(dev, priv); - pm_runtime_put_noidle(dev); - pm_runtime_disable(dev); - goto out; - } - - pm_runtime_put_sync(dev); - -out: - __crb_relinquish_locality(dev, priv, 0); - - return rc; + return tpm_chip_register(chip); } static int crb_acpi_remove(struct acpi_device *device) @@ -659,52 +653,11 @@ static int crb_acpi_remove(struct acpi_device *device) tpm_chip_unregister(chip); - pm_runtime_disable(dev); - return 0; } -static int __maybe_unused crb_pm_runtime_suspend(struct device *dev) -{ - struct tpm_chip *chip = dev_get_drvdata(dev); - struct crb_priv *priv = dev_get_drvdata(&chip->dev); - - return crb_go_idle(dev, priv); -} - -static int __maybe_unused crb_pm_runtime_resume(struct device *dev) -{ - struct tpm_chip *chip = dev_get_drvdata(dev); - struct crb_priv *priv = dev_get_drvdata(&chip->dev); - - return crb_cmd_ready(dev, priv); -} - -static int __maybe_unused crb_pm_suspend(struct device *dev) -{ - int ret; - - ret = tpm_pm_suspend(dev); - if (ret) - return ret; - - return crb_pm_runtime_suspend(dev); -} - -static int __maybe_unused crb_pm_resume(struct device *dev) -{ - int ret; - - ret = crb_pm_runtime_resume(dev); - if (ret) - return ret; - - return tpm_pm_resume(dev); -} - static const struct dev_pm_ops crb_pm = { - SET_SYSTEM_SLEEP_PM_OPS(crb_pm_suspend, crb_pm_resume) - SET_RUNTIME_PM_OPS(crb_pm_runtime_suspend, crb_pm_runtime_resume, NULL) + SET_SYSTEM_SLEEP_PM_OPS(tpm_pm_suspend, tpm_pm_resume) }; static const struct acpi_device_id crb_device_ids[] = { diff --git a/drivers/char/tpm/tpm_i2c_infineon.c b/drivers/char/tpm/tpm_i2c_infineon.c index d5b44cadac5669f46d36c48cad26c998ae931167..c619e76ce827615caa2c47bd6d6abbb368f8a416 100644 --- a/drivers/char/tpm/tpm_i2c_infineon.c +++ b/drivers/char/tpm/tpm_i2c_infineon.c @@ -117,7 +117,7 @@ static int iic_tpm_read(u8 addr, u8 *buffer, size_t len) /* Lock the adapter for the duration of the whole sequence. */ if (!tpm_dev.client->adapter->algo->master_xfer) return -EOPNOTSUPP; - i2c_lock_adapter(tpm_dev.client->adapter); + i2c_lock_bus(tpm_dev.client->adapter, I2C_LOCK_SEGMENT); if (tpm_dev.chip_type == SLB9645) { /* use a combined read for newer chips @@ -192,7 +192,7 @@ static int iic_tpm_read(u8 addr, u8 *buffer, size_t len) } out: - i2c_unlock_adapter(tpm_dev.client->adapter); + i2c_unlock_bus(tpm_dev.client->adapter, I2C_LOCK_SEGMENT); /* take care of 'guard time' */ usleep_range(SLEEP_DURATION_LOW, SLEEP_DURATION_HI); @@ -224,7 +224,7 @@ static int iic_tpm_write_generic(u8 addr, u8 *buffer, size_t len, if (!tpm_dev.client->adapter->algo->master_xfer) return -EOPNOTSUPP; - i2c_lock_adapter(tpm_dev.client->adapter); + i2c_lock_bus(tpm_dev.client->adapter, I2C_LOCK_SEGMENT); /* prepend the 'register address' to the buffer */ tpm_dev.buf[0] = addr; @@ -243,7 +243,7 @@ static int iic_tpm_write_generic(u8 addr, u8 *buffer, size_t len, usleep_range(sleep_low, sleep_hi); } - i2c_unlock_adapter(tpm_dev.client->adapter); + i2c_unlock_bus(tpm_dev.client->adapter, I2C_LOCK_SEGMENT); /* take care of 'guard time' */ usleep_range(SLEEP_DURATION_LOW, SLEEP_DURATION_HI); diff --git a/drivers/char/tpm/tpm_tis_spi.c b/drivers/char/tpm/tpm_tis_spi.c index 8ab0bd8445f6d2ba818653d642bd32d4a97b8b20..b00388fc41c8d5f13c2203dea6be15ce0b647a72 100644 --- a/drivers/char/tpm/tpm_tis_spi.c +++ b/drivers/char/tpm/tpm_tis_spi.c @@ -188,6 +188,7 @@ static const struct tpm_tis_phy_ops tpm_spi_phy_ops = { static int tpm_tis_spi_probe(struct spi_device *dev) { struct tpm_tis_spi_phy *phy; + int irq; phy = devm_kzalloc(&dev->dev, sizeof(struct tpm_tis_spi_phy), GFP_KERNEL); @@ -200,7 +201,13 @@ static int tpm_tis_spi_probe(struct spi_device *dev) if (!phy->iobuf) return -ENOMEM; - return tpm_tis_core_init(&dev->dev, &phy->priv, -1, &tpm_spi_phy_ops, + /* If the SPI device has an IRQ then use that */ + if (dev->irq > 0) + irq = dev->irq; + else + irq = -1; + + return tpm_tis_core_init(&dev->dev, &phy->priv, irq, &tpm_spi_phy_ops, NULL); } diff --git a/drivers/clk/clk-fixed-factor.c b/drivers/clk/clk-fixed-factor.c index a5d402de5584171d8fc5b75a990b00f4c9ce8862..20724abd38bd132205713cce2e7960324652d12b 100644 --- a/drivers/clk/clk-fixed-factor.c +++ b/drivers/clk/clk-fixed-factor.c @@ -177,8 +177,15 @@ static struct clk *_of_fixed_factor_clk_setup(struct device_node *node) clk = clk_register_fixed_factor(NULL, clk_name, parent_name, flags, mult, div); - if (IS_ERR(clk)) + if (IS_ERR(clk)) { + /* + * If parent clock is not registered, registration would fail. + * Clear OF_POPULATED flag so that clock registration can be + * attempted again from probe function. + */ + of_node_clear_flag(node, OF_POPULATED); return clk; + } ret = of_clk_add_provider(node, of_clk_src_simple_get, clk); if (ret) { diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c index 6f4c98ca6e50802234874763d08715341c28f0b2..a3f52f6782115abd7038f8f0af9e1b78faebd5db 100644 --- a/drivers/clk/clk.c +++ b/drivers/clk/clk.c @@ -2557,6 +2557,7 @@ struct clk *__clk_create_clk(struct clk_hw *hw, const char *dev_id, return clk; } +/* keep in sync with __clk_put */ void __clk_free_clk(struct clk *clk) { clk_prepare_lock(); @@ -2922,6 +2923,7 @@ int __clk_get(struct clk *clk) return 1; } +/* keep in sync with __clk_free_clk */ void __clk_put(struct clk *clk) { struct module *owner; @@ -2943,6 +2945,7 @@ void __clk_put(struct clk *clk) module_put(owner); + kfree_const(clk->con_id); kfree(clk); } diff --git a/drivers/clk/imx/clk-imx6ul.c b/drivers/clk/imx/clk-imx6ul.c index 41c08fc892b97456bf8c9d3d7740337ee051638d..5cc5ff1b4e1f5cd5e1673e7b0fc4293a5af15d54 100644 --- a/drivers/clk/imx/clk-imx6ul.c +++ b/drivers/clk/imx/clk-imx6ul.c @@ -135,6 +135,7 @@ static void __init imx6ul_clocks_init(struct device_node *ccm_node) np = of_find_compatible_node(NULL, NULL, "fsl,imx6ul-anatop"); base = of_iomap(np, 0); + of_node_put(np); WARN_ON(!base); clks[IMX6UL_PLL1_BYPASS_SRC] = imx_clk_mux("pll1_bypass_src", base + 0x00, 14, 1, pll_bypass_src_sels, ARRAY_SIZE(pll_bypass_src_sels)); diff --git a/drivers/clk/rockchip/clk-rk3399.c b/drivers/clk/rockchip/clk-rk3399.c index 6847120b61cdeff87b7de7995c412df5a7d6449c..3acf5f041e3c3cb0a2f17cbf4e8f3a14a4f1b586 100644 --- a/drivers/clk/rockchip/clk-rk3399.c +++ b/drivers/clk/rockchip/clk-rk3399.c @@ -630,7 +630,7 @@ static struct rockchip_clk_branch rk3399_clk_branches[] __initdata = { MUX(0, "clk_i2sout_src", mux_i2sch_p, CLK_SET_RATE_PARENT, RK3399_CLKSEL_CON(31), 0, 2, MFLAGS), COMPOSITE_NODIV(SCLK_I2S_8CH_OUT, "clk_i2sout", mux_i2sout_p, CLK_SET_RATE_PARENT, - RK3399_CLKSEL_CON(30), 8, 2, MFLAGS, + RK3399_CLKSEL_CON(31), 2, 1, MFLAGS, RK3399_CLKGATE_CON(8), 12, GFLAGS), /* uart */ @@ -1522,6 +1522,7 @@ static const char *const rk3399_pmucru_critical_clocks[] __initconst = { "pclk_pmu_src", "fclk_cm0s_src_pmu", "clk_timer_src_pmu", + "pclk_rkpwm_pmu", }; static void __init rk3399_clk_init(struct device_node *np) diff --git a/drivers/clk/tegra/clk-bpmp.c b/drivers/clk/tegra/clk-bpmp.c index 638ace64033b92a54b930c4bdf1be65550241099..6c933b0e29a3bfb384a9fb24c369d0ff1ef7fbd4 100644 --- a/drivers/clk/tegra/clk-bpmp.c +++ b/drivers/clk/tegra/clk-bpmp.c @@ -581,9 +581,15 @@ static struct clk_hw *tegra_bpmp_clk_of_xlate(struct of_phandle_args *clkspec, unsigned int id = clkspec->args[0], i; struct tegra_bpmp *bpmp = data; - for (i = 0; i < bpmp->num_clocks; i++) - if (bpmp->clocks[i]->id == id) - return &bpmp->clocks[i]->hw; + for (i = 0; i < bpmp->num_clocks; i++) { + struct tegra_bpmp_clk *clk = bpmp->clocks[i]; + + if (!clk) + continue; + + if (clk->id == id) + return &clk->hw; + } return NULL; } diff --git a/drivers/cpufreq/cpufreq_governor.c b/drivers/cpufreq/cpufreq_governor.c index 43e14bb512c8da4cd2c0f8a73e37a1fe1205a170..6a16d22bc6043de0b01384e9623569244a1628f1 100644 --- a/drivers/cpufreq/cpufreq_governor.c +++ b/drivers/cpufreq/cpufreq_governor.c @@ -555,12 +555,20 @@ EXPORT_SYMBOL_GPL(cpufreq_dbs_governor_stop); void cpufreq_dbs_governor_limits(struct cpufreq_policy *policy) { - struct policy_dbs_info *policy_dbs = policy->governor_data; + struct policy_dbs_info *policy_dbs; + + /* Protect gov->gdbs_data against cpufreq_dbs_governor_exit() */ + mutex_lock(&gov_dbs_data_mutex); + policy_dbs = policy->governor_data; + if (!policy_dbs) + goto out; mutex_lock(&policy_dbs->update_mutex); cpufreq_policy_apply_limits(policy); gov_update_sample_delay(policy_dbs, 0); - mutex_unlock(&policy_dbs->update_mutex); + +out: + mutex_unlock(&gov_dbs_data_mutex); } EXPORT_SYMBOL_GPL(cpufreq_dbs_governor_limits); diff --git a/drivers/crypto/caam/caamalg_qi.c b/drivers/crypto/caam/caamalg_qi.c index e7966e37a5aaeeb9fe259b5a2c3d460892b14e85..ecc6d755d3c1b9131f8c43e433229e96cf667c6a 100644 --- a/drivers/crypto/caam/caamalg_qi.c +++ b/drivers/crypto/caam/caamalg_qi.c @@ -350,10 +350,8 @@ static int xts_ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher, int ret = 0; if (keylen != 2 * AES_MIN_KEY_SIZE && keylen != 2 * AES_MAX_KEY_SIZE) { - crypto_ablkcipher_set_flags(ablkcipher, - CRYPTO_TFM_RES_BAD_KEY_LEN); dev_err(jrdev, "key size mismatch\n"); - return -EINVAL; + goto badkey; } memcpy(ctx->key, key, keylen); @@ -388,7 +386,7 @@ static int xts_ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher, return ret; badkey: crypto_ablkcipher_set_flags(ablkcipher, CRYPTO_TFM_RES_BAD_KEY_LEN); - return 0; + return -EINVAL; } /* diff --git a/drivers/crypto/caam/caampkc.c b/drivers/crypto/caam/caampkc.c index 7ff4a25440acd9813d4fb19b9ef7355d6765451d..6f3f81bb880b505797bd69bd7288083c1ceef029 100644 --- a/drivers/crypto/caam/caampkc.c +++ b/drivers/crypto/caam/caampkc.c @@ -71,8 +71,8 @@ static void rsa_priv_f2_unmap(struct device *dev, struct rsa_edesc *edesc, dma_unmap_single(dev, pdb->d_dma, key->d_sz, DMA_TO_DEVICE); dma_unmap_single(dev, pdb->p_dma, p_sz, DMA_TO_DEVICE); dma_unmap_single(dev, pdb->q_dma, q_sz, DMA_TO_DEVICE); - dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_TO_DEVICE); - dma_unmap_single(dev, pdb->tmp2_dma, q_sz, DMA_TO_DEVICE); + dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_BIDIRECTIONAL); + dma_unmap_single(dev, pdb->tmp2_dma, q_sz, DMA_BIDIRECTIONAL); } static void rsa_priv_f3_unmap(struct device *dev, struct rsa_edesc *edesc, @@ -90,8 +90,8 @@ static void rsa_priv_f3_unmap(struct device *dev, struct rsa_edesc *edesc, dma_unmap_single(dev, pdb->dp_dma, p_sz, DMA_TO_DEVICE); dma_unmap_single(dev, pdb->dq_dma, q_sz, DMA_TO_DEVICE); dma_unmap_single(dev, pdb->c_dma, p_sz, DMA_TO_DEVICE); - dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_TO_DEVICE); - dma_unmap_single(dev, pdb->tmp2_dma, q_sz, DMA_TO_DEVICE); + dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_BIDIRECTIONAL); + dma_unmap_single(dev, pdb->tmp2_dma, q_sz, DMA_BIDIRECTIONAL); } /* RSA Job Completion handler */ @@ -417,13 +417,13 @@ static int set_rsa_priv_f2_pdb(struct akcipher_request *req, goto unmap_p; } - pdb->tmp1_dma = dma_map_single(dev, key->tmp1, p_sz, DMA_TO_DEVICE); + pdb->tmp1_dma = dma_map_single(dev, key->tmp1, p_sz, DMA_BIDIRECTIONAL); if (dma_mapping_error(dev, pdb->tmp1_dma)) { dev_err(dev, "Unable to map RSA tmp1 memory\n"); goto unmap_q; } - pdb->tmp2_dma = dma_map_single(dev, key->tmp2, q_sz, DMA_TO_DEVICE); + pdb->tmp2_dma = dma_map_single(dev, key->tmp2, q_sz, DMA_BIDIRECTIONAL); if (dma_mapping_error(dev, pdb->tmp2_dma)) { dev_err(dev, "Unable to map RSA tmp2 memory\n"); goto unmap_tmp1; @@ -451,7 +451,7 @@ static int set_rsa_priv_f2_pdb(struct akcipher_request *req, return 0; unmap_tmp1: - dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_TO_DEVICE); + dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_BIDIRECTIONAL); unmap_q: dma_unmap_single(dev, pdb->q_dma, q_sz, DMA_TO_DEVICE); unmap_p: @@ -504,13 +504,13 @@ static int set_rsa_priv_f3_pdb(struct akcipher_request *req, goto unmap_dq; } - pdb->tmp1_dma = dma_map_single(dev, key->tmp1, p_sz, DMA_TO_DEVICE); + pdb->tmp1_dma = dma_map_single(dev, key->tmp1, p_sz, DMA_BIDIRECTIONAL); if (dma_mapping_error(dev, pdb->tmp1_dma)) { dev_err(dev, "Unable to map RSA tmp1 memory\n"); goto unmap_qinv; } - pdb->tmp2_dma = dma_map_single(dev, key->tmp2, q_sz, DMA_TO_DEVICE); + pdb->tmp2_dma = dma_map_single(dev, key->tmp2, q_sz, DMA_BIDIRECTIONAL); if (dma_mapping_error(dev, pdb->tmp2_dma)) { dev_err(dev, "Unable to map RSA tmp2 memory\n"); goto unmap_tmp1; @@ -538,7 +538,7 @@ static int set_rsa_priv_f3_pdb(struct akcipher_request *req, return 0; unmap_tmp1: - dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_TO_DEVICE); + dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_BIDIRECTIONAL); unmap_qinv: dma_unmap_single(dev, pdb->c_dma, p_sz, DMA_TO_DEVICE); unmap_dq: diff --git a/drivers/crypto/caam/jr.c b/drivers/crypto/caam/jr.c index d258953ff488331486334f8fc24a99c1bc5d0ca1..7fa1be1845535cb5b015fd35f8173cf0f080014c 100644 --- a/drivers/crypto/caam/jr.c +++ b/drivers/crypto/caam/jr.c @@ -190,7 +190,8 @@ static void caam_jr_dequeue(unsigned long devarg) BUG_ON(CIRC_CNT(head, tail + i, JOBR_DEPTH) <= 0); /* Unmap just-run descriptor so we can post-process */ - dma_unmap_single(dev, jrp->outring[hw_idx].desc, + dma_unmap_single(dev, + caam_dma_to_cpu(jrp->outring[hw_idx].desc), jrp->entinfo[sw_idx].desc_size, DMA_TO_DEVICE); diff --git a/drivers/crypto/cavium/nitrox/nitrox_dev.h b/drivers/crypto/cavium/nitrox/nitrox_dev.h index 9a476bb6d4c7eace932edb09b040098eccee7aa0..af596455b420f60d68096b8f2d4e9c71745785fc 100644 --- a/drivers/crypto/cavium/nitrox/nitrox_dev.h +++ b/drivers/crypto/cavium/nitrox/nitrox_dev.h @@ -35,6 +35,7 @@ struct nitrox_cmdq { /* requests in backlog queues */ atomic_t backlog_count; + int write_idx; /* command size 32B/64B */ u8 instr_size; u8 qno; @@ -87,7 +88,7 @@ struct nitrox_bh { struct bh_data *slc; }; -/* NITROX-5 driver state */ +/* NITROX-V driver state */ #define NITROX_UCODE_LOADED 0 #define NITROX_READY 1 diff --git a/drivers/crypto/cavium/nitrox/nitrox_lib.c b/drivers/crypto/cavium/nitrox/nitrox_lib.c index 4fdc921ba611b94b9a8d14e3a657a63497a4cce9..9906c00866476a33c80ba740563d60afaaf05697 100644 --- a/drivers/crypto/cavium/nitrox/nitrox_lib.c +++ b/drivers/crypto/cavium/nitrox/nitrox_lib.c @@ -36,6 +36,7 @@ static int cmdq_common_init(struct nitrox_cmdq *cmdq) cmdq->head = PTR_ALIGN(cmdq->head_unaligned, PKT_IN_ALIGN); cmdq->dma = PTR_ALIGN(cmdq->dma_unaligned, PKT_IN_ALIGN); cmdq->qsize = (qsize + PKT_IN_ALIGN); + cmdq->write_idx = 0; spin_lock_init(&cmdq->response_lock); spin_lock_init(&cmdq->cmdq_lock); diff --git a/drivers/crypto/cavium/nitrox/nitrox_reqmgr.c b/drivers/crypto/cavium/nitrox/nitrox_reqmgr.c index 4addc238a6ef361db59cf49359d96ea8fae98f9a..4adf28176a4e439c79f238b1232e7361dfc6207d 100644 --- a/drivers/crypto/cavium/nitrox/nitrox_reqmgr.c +++ b/drivers/crypto/cavium/nitrox/nitrox_reqmgr.c @@ -43,6 +43,16 @@ * Invalid flag options in AES-CCM IV. */ +static inline int incr_index(int index, int count, int max) +{ + if ((index + count) >= max) + index = index + count - max; + else + index += count; + + return index; +} + /** * dma_free_sglist - unmap and free the sg lists. * @ndev: N5 device @@ -427,30 +437,29 @@ static void post_se_instr(struct nitrox_softreq *sr, struct nitrox_cmdq *cmdq) { struct nitrox_device *ndev = sr->ndev; - union nps_pkt_in_instr_baoff_dbell pkt_in_baoff_dbell; - u64 offset; + int idx; u8 *ent; spin_lock_bh(&cmdq->cmdq_lock); - /* get the next write offset */ - offset = NPS_PKT_IN_INSTR_BAOFF_DBELLX(cmdq->qno); - pkt_in_baoff_dbell.value = nitrox_read_csr(ndev, offset); + idx = cmdq->write_idx; /* copy the instruction */ - ent = cmdq->head + pkt_in_baoff_dbell.s.aoff; + ent = cmdq->head + (idx * cmdq->instr_size); memcpy(ent, &sr->instr, cmdq->instr_size); - /* flush the command queue updates */ - dma_wmb(); - sr->tstamp = jiffies; atomic_set(&sr->status, REQ_POSTED); response_list_add(sr, cmdq); + sr->tstamp = jiffies; + /* flush the command queue updates */ + dma_wmb(); /* Ring doorbell with count 1 */ writeq(1, cmdq->dbell_csr_addr); /* orders the doorbell rings */ mmiowb(); + cmdq->write_idx = incr_index(idx, 1, ndev->qlen); + spin_unlock_bh(&cmdq->cmdq_lock); } @@ -460,6 +469,9 @@ static int post_backlog_cmds(struct nitrox_cmdq *cmdq) struct nitrox_softreq *sr, *tmp; int ret = 0; + if (!atomic_read(&cmdq->backlog_count)) + return 0; + spin_lock_bh(&cmdq->backlog_lock); list_for_each_entry_safe(sr, tmp, &cmdq->backlog_head, backlog) { @@ -467,7 +479,7 @@ static int post_backlog_cmds(struct nitrox_cmdq *cmdq) /* submit until space available */ if (unlikely(cmdq_full(cmdq, ndev->qlen))) { - ret = -EBUSY; + ret = -ENOSPC; break; } /* delete from backlog list */ @@ -492,23 +504,20 @@ static int nitrox_enqueue_request(struct nitrox_softreq *sr) { struct nitrox_cmdq *cmdq = sr->cmdq; struct nitrox_device *ndev = sr->ndev; - int ret = -EBUSY; + + /* try to post backlog requests */ + post_backlog_cmds(cmdq); if (unlikely(cmdq_full(cmdq, ndev->qlen))) { if (!(sr->flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) - return -EAGAIN; - + return -ENOSPC; + /* add to backlog list */ backlog_list_add(sr, cmdq); - } else { - ret = post_backlog_cmds(cmdq); - if (ret) { - backlog_list_add(sr, cmdq); - return ret; - } - post_se_instr(sr, cmdq); - ret = -EINPROGRESS; + return -EBUSY; } - return ret; + post_se_instr(sr, cmdq); + + return -EINPROGRESS; } /** @@ -625,11 +634,9 @@ int nitrox_process_se_request(struct nitrox_device *ndev, */ sr->instr.fdata[0] = *((u64 *)&req->gph); sr->instr.fdata[1] = 0; - /* flush the soft_req changes before posting the cmd */ - wmb(); ret = nitrox_enqueue_request(sr); - if (ret == -EAGAIN) + if (ret == -ENOSPC) goto send_fail; return ret; diff --git a/drivers/crypto/sahara.c b/drivers/crypto/sahara.c index 08e7bdcaa6e318c5bae2290bf57ac3c1bae3d95e..085c229eab1de06b1eea7e3ac42255419d4a7b42 100644 --- a/drivers/crypto/sahara.c +++ b/drivers/crypto/sahara.c @@ -1351,7 +1351,7 @@ err_sha_v4_algs: err_sha_v3_algs: for (j = 0; j < k; j++) - crypto_unregister_ahash(&sha_v4_algs[j]); + crypto_unregister_ahash(&sha_v3_algs[j]); err_aes_algs: for (j = 0; j < i; j++) @@ -1367,7 +1367,7 @@ static void sahara_unregister_algs(struct sahara_dev *dev) for (i = 0; i < ARRAY_SIZE(aes_algs); i++) crypto_unregister_alg(&aes_algs[i]); - for (i = 0; i < ARRAY_SIZE(sha_v4_algs); i++) + for (i = 0; i < ARRAY_SIZE(sha_v3_algs); i++) crypto_unregister_ahash(&sha_v3_algs[i]); if (dev->version > SAHARA_VERSION_3) diff --git a/drivers/crypto/vmx/aes_cbc.c b/drivers/crypto/vmx/aes_cbc.c index 5285ece4f33a36df39bfd18068dd14ce6a1213db..b71895871be3f1f2ec0b15d19d4087a16f40016f 100644 --- a/drivers/crypto/vmx/aes_cbc.c +++ b/drivers/crypto/vmx/aes_cbc.c @@ -107,24 +107,23 @@ static int p8_aes_cbc_encrypt(struct blkcipher_desc *desc, ret = crypto_skcipher_encrypt(req); skcipher_request_zero(req); } else { - preempt_disable(); - pagefault_disable(); - enable_kernel_vsx(); - blkcipher_walk_init(&walk, dst, src, nbytes); ret = blkcipher_walk_virt(desc, &walk); while ((nbytes = walk.nbytes)) { + preempt_disable(); + pagefault_disable(); + enable_kernel_vsx(); aes_p8_cbc_encrypt(walk.src.virt.addr, walk.dst.virt.addr, nbytes & AES_BLOCK_MASK, &ctx->enc_key, walk.iv, 1); + disable_kernel_vsx(); + pagefault_enable(); + preempt_enable(); + nbytes &= AES_BLOCK_SIZE - 1; ret = blkcipher_walk_done(desc, &walk, nbytes); } - - disable_kernel_vsx(); - pagefault_enable(); - preempt_enable(); } return ret; @@ -147,24 +146,23 @@ static int p8_aes_cbc_decrypt(struct blkcipher_desc *desc, ret = crypto_skcipher_decrypt(req); skcipher_request_zero(req); } else { - preempt_disable(); - pagefault_disable(); - enable_kernel_vsx(); - blkcipher_walk_init(&walk, dst, src, nbytes); ret = blkcipher_walk_virt(desc, &walk); while ((nbytes = walk.nbytes)) { + preempt_disable(); + pagefault_disable(); + enable_kernel_vsx(); aes_p8_cbc_encrypt(walk.src.virt.addr, walk.dst.virt.addr, nbytes & AES_BLOCK_MASK, &ctx->dec_key, walk.iv, 0); + disable_kernel_vsx(); + pagefault_enable(); + preempt_enable(); + nbytes &= AES_BLOCK_SIZE - 1; ret = blkcipher_walk_done(desc, &walk, nbytes); } - - disable_kernel_vsx(); - pagefault_enable(); - preempt_enable(); } return ret; diff --git a/drivers/crypto/vmx/aes_ctr.c b/drivers/crypto/vmx/aes_ctr.c index 02ba5f2aa0e6e8d09b431d9ad0019fa4c1549f94..cd777c75291dfb72f6df5c2967909da694627b78 100644 --- a/drivers/crypto/vmx/aes_ctr.c +++ b/drivers/crypto/vmx/aes_ctr.c @@ -27,21 +27,23 @@ #include #include #include +#include + #include "aesp8-ppc.h" struct p8_aes_ctr_ctx { - struct crypto_blkcipher *fallback; + struct crypto_skcipher *fallback; struct aes_key enc_key; }; static int p8_aes_ctr_init(struct crypto_tfm *tfm) { const char *alg = crypto_tfm_alg_name(tfm); - struct crypto_blkcipher *fallback; + struct crypto_skcipher *fallback; struct p8_aes_ctr_ctx *ctx = crypto_tfm_ctx(tfm); - fallback = - crypto_alloc_blkcipher(alg, 0, CRYPTO_ALG_NEED_FALLBACK); + fallback = crypto_alloc_skcipher(alg, 0, + CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK); if (IS_ERR(fallback)) { printk(KERN_ERR "Failed to allocate transformation for '%s': %ld\n", @@ -49,9 +51,9 @@ static int p8_aes_ctr_init(struct crypto_tfm *tfm) return PTR_ERR(fallback); } - crypto_blkcipher_set_flags( + crypto_skcipher_set_flags( fallback, - crypto_blkcipher_get_flags((struct crypto_blkcipher *)tfm)); + crypto_skcipher_get_flags((struct crypto_skcipher *)tfm)); ctx->fallback = fallback; return 0; @@ -62,7 +64,7 @@ static void p8_aes_ctr_exit(struct crypto_tfm *tfm) struct p8_aes_ctr_ctx *ctx = crypto_tfm_ctx(tfm); if (ctx->fallback) { - crypto_free_blkcipher(ctx->fallback); + crypto_free_skcipher(ctx->fallback); ctx->fallback = NULL; } } @@ -81,7 +83,7 @@ static int p8_aes_ctr_setkey(struct crypto_tfm *tfm, const u8 *key, pagefault_enable(); preempt_enable(); - ret += crypto_blkcipher_setkey(ctx->fallback, key, keylen); + ret += crypto_skcipher_setkey(ctx->fallback, key, keylen); return ret; } @@ -115,15 +117,14 @@ static int p8_aes_ctr_crypt(struct blkcipher_desc *desc, struct blkcipher_walk walk; struct p8_aes_ctr_ctx *ctx = crypto_tfm_ctx(crypto_blkcipher_tfm(desc->tfm)); - struct blkcipher_desc fallback_desc = { - .tfm = ctx->fallback, - .info = desc->info, - .flags = desc->flags - }; if (in_interrupt()) { - ret = crypto_blkcipher_encrypt(&fallback_desc, dst, src, - nbytes); + SKCIPHER_REQUEST_ON_STACK(req, ctx->fallback); + skcipher_request_set_tfm(req, ctx->fallback); + skcipher_request_set_callback(req, desc->flags, NULL, NULL); + skcipher_request_set_crypt(req, src, dst, nbytes, desc->info); + ret = crypto_skcipher_encrypt(req); + skcipher_request_zero(req); } else { blkcipher_walk_init(&walk, dst, src, nbytes); ret = blkcipher_walk_virt_block(desc, &walk, AES_BLOCK_SIZE); diff --git a/drivers/crypto/vmx/aes_xts.c b/drivers/crypto/vmx/aes_xts.c index 8bd9aff0f55fba6639b67147cf97c2fcfce12bfc..e9954a7d46944d36cd2aeffdfd8202b54c793d71 100644 --- a/drivers/crypto/vmx/aes_xts.c +++ b/drivers/crypto/vmx/aes_xts.c @@ -116,32 +116,39 @@ static int p8_aes_xts_crypt(struct blkcipher_desc *desc, ret = enc? crypto_skcipher_encrypt(req) : crypto_skcipher_decrypt(req); skcipher_request_zero(req); } else { + blkcipher_walk_init(&walk, dst, src, nbytes); + + ret = blkcipher_walk_virt(desc, &walk); + preempt_disable(); pagefault_disable(); enable_kernel_vsx(); - blkcipher_walk_init(&walk, dst, src, nbytes); - - ret = blkcipher_walk_virt(desc, &walk); iv = walk.iv; memset(tweak, 0, AES_BLOCK_SIZE); aes_p8_encrypt(iv, tweak, &ctx->tweak_key); + disable_kernel_vsx(); + pagefault_enable(); + preempt_enable(); + while ((nbytes = walk.nbytes)) { + preempt_disable(); + pagefault_disable(); + enable_kernel_vsx(); if (enc) aes_p8_xts_encrypt(walk.src.virt.addr, walk.dst.virt.addr, nbytes & AES_BLOCK_MASK, &ctx->enc_key, NULL, tweak); else aes_p8_xts_decrypt(walk.src.virt.addr, walk.dst.virt.addr, nbytes & AES_BLOCK_MASK, &ctx->dec_key, NULL, tweak); + disable_kernel_vsx(); + pagefault_enable(); + preempt_enable(); nbytes &= AES_BLOCK_SIZE - 1; ret = blkcipher_walk_done(desc, &walk, nbytes); } - - disable_kernel_vsx(); - pagefault_enable(); - preempt_enable(); } return ret; } diff --git a/drivers/dax/device.c b/drivers/dax/device.c index 7b0bf825c4e73c588ff93183cf5315665d69e082..050e299129acf5167774fbd9d6569feabd58f685 100644 --- a/drivers/dax/device.c +++ b/drivers/dax/device.c @@ -188,14 +188,16 @@ static int check_vma(struct dev_dax *dev_dax, struct vm_area_struct *vma, /* prevent private mappings from being established */ if ((vma->vm_flags & VM_MAYSHARE) != VM_MAYSHARE) { - dev_info(dev, "%s: %s: fail, attempted private mapping\n", + dev_info_ratelimited(dev, + "%s: %s: fail, attempted private mapping\n", current->comm, func); return -EINVAL; } mask = dax_region->align - 1; if (vma->vm_start & mask || vma->vm_end & mask) { - dev_info(dev, "%s: %s: fail, unaligned vma (%#lx - %#lx, %#lx)\n", + dev_info_ratelimited(dev, + "%s: %s: fail, unaligned vma (%#lx - %#lx, %#lx)\n", current->comm, func, vma->vm_start, vma->vm_end, mask); return -EINVAL; @@ -203,13 +205,15 @@ static int check_vma(struct dev_dax *dev_dax, struct vm_area_struct *vma, if ((dax_region->pfn_flags & (PFN_DEV|PFN_MAP)) == PFN_DEV && (vma->vm_flags & VM_DONTCOPY) == 0) { - dev_info(dev, "%s: %s: fail, dax range requires MADV_DONTFORK\n", + dev_info_ratelimited(dev, + "%s: %s: fail, dax range requires MADV_DONTFORK\n", current->comm, func); return -EINVAL; } if (!vma_is_dax(vma)) { - dev_info(dev, "%s: %s: fail, vma is not DAX capable\n", + dev_info_ratelimited(dev, + "%s: %s: fail, vma is not DAX capable\n", current->comm, func); return -EINVAL; } diff --git a/drivers/dma/k3dma.c b/drivers/dma/k3dma.c index 01d2a750a6217c062daedbac6b44d0e59aafd2e2..219ae3b545dbab035d20ce628c7a025159f9e9bb 100644 --- a/drivers/dma/k3dma.c +++ b/drivers/dma/k3dma.c @@ -787,7 +787,7 @@ static struct dma_chan *k3_of_dma_simple_xlate(struct of_phandle_args *dma_spec, struct k3_dma_dev *d = ofdma->of_dma_data; unsigned int request = dma_spec->args[0]; - if (request > d->dma_requests) + if (request >= d->dma_requests) return NULL; return dma_get_slave_channel(&(d->chans[request].vc.chan)); diff --git a/drivers/dma/mv_xor_v2.c b/drivers/dma/mv_xor_v2.c index 3548caa9e9339f17208a62066ad055c842491e3b..75eef589d0ecbe717392df0e0a9ab352e24bdea9 100644 --- a/drivers/dma/mv_xor_v2.c +++ b/drivers/dma/mv_xor_v2.c @@ -898,6 +898,8 @@ static int mv_xor_v2_remove(struct platform_device *pdev) platform_msi_domain_free_irqs(&pdev->dev); + tasklet_kill(&xor_dev->irq_tasklet); + clk_disable_unprepare(xor_dev->clk); return 0; diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c index 7432c8894e321c415bb3f6d18a3ff73e3b181e04..6afd42cfbf5d5014c069b1e750b741dc1dc9c31d 100644 --- a/drivers/dma/pl330.c +++ b/drivers/dma/pl330.c @@ -2142,13 +2142,14 @@ static int pl330_terminate_all(struct dma_chan *chan) pm_runtime_get_sync(pl330->ddma.dev); spin_lock_irqsave(&pch->lock, flags); + spin_lock(&pl330->lock); _stop(pch->thread); - spin_unlock(&pl330->lock); - pch->thread->req[0].desc = NULL; pch->thread->req[1].desc = NULL; pch->thread->req_running = -1; + spin_unlock(&pl330->lock); + power_down = pch->active; pch->active = false; @@ -2923,7 +2924,7 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id) pd->src_addr_widths = PL330_DMA_BUSWIDTHS; pd->dst_addr_widths = PL330_DMA_BUSWIDTHS; pd->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV); - pd->residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT; + pd->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST; pd->max_burst = ((pl330->quirks & PL330_QUIRK_BROKEN_NO_FLUSHP) ? 1 : PL330_MAX_BURST); diff --git a/drivers/edac/edac_mc.c b/drivers/edac/edac_mc.c index 480072139b7aa00db3d27d9381c0e6eace04835e..80801c616395e144def7675192625a4dcf5788cf 100644 --- a/drivers/edac/edac_mc.c +++ b/drivers/edac/edac_mc.c @@ -215,6 +215,7 @@ const char * const edac_mem_types[] = { [MEM_LRDDR3] = "Load-Reduced DDR3 RAM", [MEM_DDR4] = "Unbuffered DDR4 RAM", [MEM_RDDR4] = "Registered DDR4 RAM", + [MEM_LRDDR4] = "Load-Reduced-DDR4-RAM", }; EXPORT_SYMBOL_GPL(edac_mem_types); diff --git a/drivers/edac/edac_mc_sysfs.c b/drivers/edac/edac_mc_sysfs.c index e4fcfa84fbd3503246d144bcbcbb36747ed9171c..79c13301bf41662466b30c7cb3888ddeb047e018 100644 --- a/drivers/edac/edac_mc_sysfs.c +++ b/drivers/edac/edac_mc_sysfs.c @@ -1097,14 +1097,14 @@ int __init edac_mc_sysfs_init(void) err = device_add(mci_pdev); if (err < 0) - goto out_dev_free; + goto out_put_device; edac_dbg(0, "device %s created\n", dev_name(mci_pdev)); return 0; - out_dev_free: - kfree(mci_pdev); + out_put_device: + put_device(mci_pdev); out: return err; } diff --git a/drivers/edac/i7core_edac.c b/drivers/edac/i7core_edac.c index c16c3b931b3d0ef9cd68b01e6d81de4987d5c794..6c7d5f20eacbd8c9652561f7edc1bebb801e6d73 100644 --- a/drivers/edac/i7core_edac.c +++ b/drivers/edac/i7core_edac.c @@ -1177,15 +1177,14 @@ static int i7core_create_sysfs_devices(struct mem_ctl_info *mci) rc = device_add(pvt->addrmatch_dev); if (rc < 0) - return rc; + goto err_put_addrmatch; if (!pvt->is_registered) { pvt->chancounts_dev = kzalloc(sizeof(*pvt->chancounts_dev), GFP_KERNEL); if (!pvt->chancounts_dev) { - put_device(pvt->addrmatch_dev); - device_del(pvt->addrmatch_dev); - return -ENOMEM; + rc = -ENOMEM; + goto err_del_addrmatch; } pvt->chancounts_dev->type = &all_channel_counts_type; @@ -1199,9 +1198,18 @@ static int i7core_create_sysfs_devices(struct mem_ctl_info *mci) rc = device_add(pvt->chancounts_dev); if (rc < 0) - return rc; + goto err_put_chancounts; } return 0; + +err_put_chancounts: + put_device(pvt->chancounts_dev); +err_del_addrmatch: + device_del(pvt->addrmatch_dev); +err_put_addrmatch: + put_device(pvt->addrmatch_dev); + + return rc; } static void i7core_delete_sysfs_devices(struct mem_ctl_info *mci) @@ -1211,11 +1219,11 @@ static void i7core_delete_sysfs_devices(struct mem_ctl_info *mci) edac_dbg(1, "\n"); if (!pvt->is_registered) { - put_device(pvt->chancounts_dev); device_del(pvt->chancounts_dev); + put_device(pvt->chancounts_dev); } - put_device(pvt->addrmatch_dev); device_del(pvt->addrmatch_dev); + put_device(pvt->addrmatch_dev); } /**************************************************************************** diff --git a/drivers/extcon/extcon.c b/drivers/extcon/extcon.c index 35e9fb885486e9da79fb63b1460200785735a44d..95e96f04bf6fd04223f0806f14de52d93fd7bebd 100644 --- a/drivers/extcon/extcon.c +++ b/drivers/extcon/extcon.c @@ -433,8 +433,8 @@ int extcon_sync(struct extcon_dev *edev, unsigned int id) return index; spin_lock_irqsave(&edev->lock, flags); - state = !!(edev->state & BIT(index)); + spin_unlock_irqrestore(&edev->lock, flags); /* * Call functions in a raw notifier chain for the specific one @@ -448,6 +448,7 @@ int extcon_sync(struct extcon_dev *edev, unsigned int id) */ raw_notifier_call_chain(&edev->nh_all, state, edev); + spin_lock_irqsave(&edev->lock, flags); /* This could be in interrupt handler */ prop_buf = (char *)get_zeroed_page(GFP_ATOMIC); if (!prop_buf) { diff --git a/drivers/firmware/efi/arm-init.c b/drivers/firmware/efi/arm-init.c index 80d1a885def5c2dd8bd15591bcc7c62d9b2e3b8c..a7c522eac640feb84d25b436cf54461e797f8d10 100644 --- a/drivers/firmware/efi/arm-init.c +++ b/drivers/firmware/efi/arm-init.c @@ -259,7 +259,6 @@ void __init efi_init(void) reserve_regions(); efi_esrt_init(); - efi_memmap_unmap(); memblock_reserve(params.mmap & PAGE_MASK, PAGE_ALIGN(params.mmap_size + diff --git a/drivers/firmware/efi/arm-runtime.c b/drivers/firmware/efi/arm-runtime.c index 86a1ad17a32e2cd4acf3fb64a774b7cfabc271f1..8995a48bd0676229f5d22396d75a2946fed5b721 100644 --- a/drivers/firmware/efi/arm-runtime.c +++ b/drivers/firmware/efi/arm-runtime.c @@ -122,11 +122,13 @@ static int __init arm_enable_runtime_services(void) { u64 mapsize; - if (!efi_enabled(EFI_BOOT)) { + if (!efi_enabled(EFI_BOOT) || !efi_enabled(EFI_MEMMAP)) { pr_info("EFI services will not be available.\n"); return 0; } + efi_memmap_unmap(); + if (efi_runtime_disabled()) { pr_info("EFI runtime services will be disabled.\n"); return 0; diff --git a/drivers/firmware/efi/esrt.c b/drivers/firmware/efi/esrt.c index c47e0c6ec00f858c0b9960f605974b4f2e4b1294..f3c28777b8c6fed9bb351ad1d93307b3651a8adb 100644 --- a/drivers/firmware/efi/esrt.c +++ b/drivers/firmware/efi/esrt.c @@ -333,7 +333,8 @@ void __init efi_esrt_init(void) end = esrt_data + size; pr_info("Reserving ESRT space from %pa to %pa.\n", &esrt_data, &end); - efi_mem_reserve(esrt_data, esrt_data_size); + if (md.type == EFI_BOOT_SERVICES_DATA) + efi_mem_reserve(esrt_data, esrt_data_size); pr_debug("esrt-init: loaded.\n"); err_memunmap: diff --git a/drivers/firmware/google/vpd.c b/drivers/firmware/google/vpd.c index e4b40f2b46274a0871d1cb881732e8358a324112..9c0f7cf920afe561a83b62ac5b5dfb279bae6c1a 100644 --- a/drivers/firmware/google/vpd.c +++ b/drivers/firmware/google/vpd.c @@ -246,6 +246,7 @@ static int vpd_section_destroy(struct vpd_section *sec) sysfs_remove_bin_file(vpd_kobj, &sec->bin_attr); kfree(sec->raw_name); memunmap(sec->baseaddr); + sec->enabled = false; } return 0; @@ -279,8 +280,10 @@ static int vpd_sections_init(phys_addr_t physaddr) ret = vpd_section_init("rw", &rw_vpd, physaddr + sizeof(struct vpd_cbmem) + header.ro_size, header.rw_size); - if (ret) + if (ret) { + vpd_section_destroy(&ro_vpd); return ret; + } } return 0; diff --git a/drivers/gpio/gpio-menz127.c b/drivers/gpio/gpio-menz127.c index e1037582e34d7f69da7b3e96586f288b1f723f0b..b2635326546e7d3ac2e675b01d21ad234e5cd52a 100644 --- a/drivers/gpio/gpio-menz127.c +++ b/drivers/gpio/gpio-menz127.c @@ -56,9 +56,9 @@ static int men_z127_debounce(struct gpio_chip *gc, unsigned gpio, rnd = fls(debounce) - 1; if (rnd && (debounce & BIT(rnd - 1))) - debounce = round_up(debounce, MEN_Z127_DB_MIN_US); + debounce = roundup(debounce, MEN_Z127_DB_MIN_US); else - debounce = round_down(debounce, MEN_Z127_DB_MIN_US); + debounce = rounddown(debounce, MEN_Z127_DB_MIN_US); if (debounce > MEN_Z127_DB_MAX_US) debounce = MEN_Z127_DB_MAX_US; diff --git a/drivers/gpio/gpio-ml-ioh.c b/drivers/gpio/gpio-ml-ioh.c index 4b80e996d976524398e6ad484dd2f552bcdbed65..1022fe8d09c79c1543b2bb7d6f677b148d779e05 100644 --- a/drivers/gpio/gpio-ml-ioh.c +++ b/drivers/gpio/gpio-ml-ioh.c @@ -497,9 +497,10 @@ static int ioh_gpio_probe(struct pci_dev *pdev, return 0; err_gpiochip_add: + chip = chip_save; while (--i >= 0) { - chip--; gpiochip_remove(&chip->gpio); + chip++; } kfree(chip_save); diff --git a/drivers/gpio/gpio-pxa.c b/drivers/gpio/gpio-pxa.c index 6029899789f3dbcfa51e2227d6b94a115bf0a3e5..2943dfc4c470742511c190dedce079a10066ce94 100644 --- a/drivers/gpio/gpio-pxa.c +++ b/drivers/gpio/gpio-pxa.c @@ -662,6 +662,8 @@ static int pxa_gpio_probe(struct platform_device *pdev) pchip->irq0 = irq0; pchip->irq1 = irq1; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res) + return -EINVAL; gpio_reg_base = devm_ioremap(&pdev->dev, res->start, resource_size(res)); if (!gpio_reg_base) diff --git a/drivers/gpio/gpio-tegra.c b/drivers/gpio/gpio-tegra.c index fbaf974277dffd9c90e57f03037a4b3fe8fa6da5..1eb857e2f62f166339bce3ff3c673b7c256845f4 100644 --- a/drivers/gpio/gpio-tegra.c +++ b/drivers/gpio/gpio-tegra.c @@ -728,4 +728,4 @@ static int __init tegra_gpio_init(void) { return platform_driver_register(&tegra_gpio_driver); } -postcore_initcall(tegra_gpio_init); +subsys_initcall(tegra_gpio_init); diff --git a/drivers/gpio/gpiolib-acpi.c b/drivers/gpio/gpiolib-acpi.c index d6f3d9ee1350e422e3f373427d55d6ce8b6be565..70b3c556f6cf29a377642299e93257f2ab489969 100644 --- a/drivers/gpio/gpiolib-acpi.c +++ b/drivers/gpio/gpiolib-acpi.c @@ -25,6 +25,7 @@ struct acpi_gpio_event { struct list_head node; + struct list_head initial_sync_list; acpi_handle handle; unsigned int pin; unsigned int irq; @@ -50,6 +51,9 @@ struct acpi_gpio_chip { struct list_head events; }; +static LIST_HEAD(acpi_gpio_initial_sync_list); +static DEFINE_MUTEX(acpi_gpio_initial_sync_list_lock); + static int acpi_gpiochip_find(struct gpio_chip *gc, void *data) { if (!gc->parent) @@ -142,6 +146,21 @@ static struct gpio_desc *acpi_get_gpiod(char *path, int pin) return gpiochip_get_desc(chip, offset); } +static void acpi_gpio_add_to_initial_sync_list(struct acpi_gpio_event *event) +{ + mutex_lock(&acpi_gpio_initial_sync_list_lock); + list_add(&event->initial_sync_list, &acpi_gpio_initial_sync_list); + mutex_unlock(&acpi_gpio_initial_sync_list_lock); +} + +static void acpi_gpio_del_from_initial_sync_list(struct acpi_gpio_event *event) +{ + mutex_lock(&acpi_gpio_initial_sync_list_lock); + if (!list_empty(&event->initial_sync_list)) + list_del_init(&event->initial_sync_list); + mutex_unlock(&acpi_gpio_initial_sync_list_lock); +} + static irqreturn_t acpi_gpio_irq_handler(int irq, void *data) { struct acpi_gpio_event *event = data; @@ -193,7 +212,7 @@ static acpi_status acpi_gpiochip_request_interrupt(struct acpi_resource *ares, irq_handler_t handler = NULL; struct gpio_desc *desc; unsigned long irqflags; - int ret, pin, irq; + int ret, pin, irq, value; if (!acpi_gpio_get_irq_resource(ares, &agpio)) return AE_OK; @@ -228,6 +247,8 @@ static acpi_status acpi_gpiochip_request_interrupt(struct acpi_resource *ares, gpiod_direction_input(desc); + value = gpiod_get_value(desc); + ret = gpiochip_lock_as_irq(chip, pin); if (ret) { dev_err(chip->parent, "Failed to lock GPIO as interrupt\n"); @@ -269,6 +290,7 @@ static acpi_status acpi_gpiochip_request_interrupt(struct acpi_resource *ares, event->irq = irq; event->pin = pin; event->desc = desc; + INIT_LIST_HEAD(&event->initial_sync_list); ret = request_threaded_irq(event->irq, NULL, handler, irqflags, "ACPI:Event", event); @@ -283,6 +305,18 @@ static acpi_status acpi_gpiochip_request_interrupt(struct acpi_resource *ares, enable_irq_wake(irq); list_add_tail(&event->node, &acpi_gpio->events); + + /* + * Make sure we trigger the initial state of the IRQ when using RISING + * or FALLING. Note we run the handlers on late_init, the AML code + * may refer to OperationRegions from other (builtin) drivers which + * may be probed after us. + */ + if (handler == acpi_gpio_irq_handler && + (((irqflags & IRQF_TRIGGER_RISING) && value == 1) || + ((irqflags & IRQF_TRIGGER_FALLING) && value == 0))) + acpi_gpio_add_to_initial_sync_list(event); + return AE_OK; fail_free_event: @@ -355,6 +389,8 @@ void acpi_gpiochip_free_interrupts(struct gpio_chip *chip) list_for_each_entry_safe_reverse(event, ep, &acpi_gpio->events, node) { struct gpio_desc *desc; + acpi_gpio_del_from_initial_sync_list(event); + if (irqd_is_wakeup_set(irq_get_irq_data(event->irq))) disable_irq_wake(event->irq); @@ -1210,3 +1246,21 @@ bool acpi_can_fallback_to_crs(struct acpi_device *adev, const char *con_id) return con_id == NULL; } + +/* Sync the initial state of handlers after all builtin drivers have probed */ +static int acpi_gpio_initial_sync(void) +{ + struct acpi_gpio_event *event, *ep; + + mutex_lock(&acpi_gpio_initial_sync_list_lock); + list_for_each_entry_safe(event, ep, &acpi_gpio_initial_sync_list, + initial_sync_list) { + acpi_evaluate_object(event->handle, NULL, NULL, NULL); + list_del_init(&event->initial_sync_list); + } + mutex_unlock(&acpi_gpio_initial_sync_list_lock); + + return 0; +} +/* We must use _sync so that this runs after the first deferred_probe run */ +late_initcall_sync(acpi_gpio_initial_sync); diff --git a/drivers/gpio/gpiolib.h b/drivers/gpio/gpiolib.h index 3d4d0634c9ddd560797e2d85c71f271a83709d3e..7a3d9658d74c51074f4ea54bfb5863e39a7c168c 100644 --- a/drivers/gpio/gpiolib.h +++ b/drivers/gpio/gpiolib.h @@ -88,7 +88,7 @@ struct acpi_gpio_info { }; /* gpio suffixes used for ACPI and device tree lookup */ -static const char * const gpio_suffixes[] = { "gpios", "gpio" }; +static __maybe_unused const char * const gpio_suffixes[] = { "gpios", "gpio" }; #ifdef CONFIG_OF_GPIO struct gpio_desc *of_find_gpio(struct device *dev, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c index fd435a96481c2f1c019bd4d2303ebbb9b590adad..e8d9479615c955ac2c624e12f41e89e4b081b371 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c @@ -715,12 +715,14 @@ static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device, break; case CHIP_POLARIS10: if (type == CGS_UCODE_ID_SMU) { - if ((adev->pdev->device == 0x67df) && - ((adev->pdev->revision == 0xe0) || - (adev->pdev->revision == 0xe3) || - (adev->pdev->revision == 0xe4) || - (adev->pdev->revision == 0xe5) || - (adev->pdev->revision == 0xe7) || + if (((adev->pdev->device == 0x67df) && + ((adev->pdev->revision == 0xe0) || + (adev->pdev->revision == 0xe3) || + (adev->pdev->revision == 0xe4) || + (adev->pdev->revision == 0xe5) || + (adev->pdev->revision == 0xe7) || + (adev->pdev->revision == 0xef))) || + ((adev->pdev->device == 0x6fdf) && (adev->pdev->revision == 0xef))) { info->is_kicker = true; strcpy(fw_name, "amdgpu/polaris10_k_smc.bin"); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c index 0f16986ec5bc44356c19e9a7988a57e4b35f05bb..4dd68d8213538e548a557cff6229261e75ee1306 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c @@ -500,6 +500,7 @@ static const struct pci_device_id pciidlist[] = { {0x1002, 0x67CA, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS10}, {0x1002, 0x67CC, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS10}, {0x1002, 0x67CF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS10}, + {0x1002, 0x6FDF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS10}, /* Polaris12 */ {0x1002, 0x6980, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12}, {0x1002, 0x6981, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12}, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c index 8c2204c7b3847c3ce18042b48d70516155b843e5..7ad8fa891ce6b97bac4390eb0bd377d4b280b1d5 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c @@ -134,6 +134,11 @@ psp_cmd_submit_buf(struct psp_context *psp, msleep(1); } + if (ucode) { + ucode->tmr_mc_addr_lo = psp->cmd_buf_mem->resp.fw_addr_lo; + ucode->tmr_mc_addr_hi = psp->cmd_buf_mem->resp.fw_addr_hi; + } + return ret; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h index 30b5500dc152140ca5ef49f3d6f217e04cdf2fc8..fa7b25e1e5d2990b7e2aa0e1e7ad6374dc3f2d34 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h @@ -172,6 +172,7 @@ enum AMDGPU_UCODE_ID { AMDGPU_UCODE_ID_SMC, AMDGPU_UCODE_ID_UVD, AMDGPU_UCODE_ID_VCE, + AMDGPU_UCODE_ID_VCN, AMDGPU_UCODE_ID_MAXIMUM, }; @@ -204,6 +205,9 @@ struct amdgpu_firmware_info { void *kaddr; /* ucode_size_bytes */ uint32_t ucode_size; + /* starting tmr mc address */ + uint32_t tmr_mc_addr_lo; + uint32_t tmr_mc_addr_hi; }; void amdgpu_ucode_print_mc_hdr(const struct common_firmware_header *hdr); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c index 308a9755eae39b2582bc20e00d8c6b3ce7e91e14..1612d8aa6ad609b03d29713ea620f65883c90677 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c @@ -93,9 +93,10 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev) version_major, version_minor, family_id); - bo_size = AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8) - + AMDGPU_VCN_STACK_SIZE + AMDGPU_VCN_HEAP_SIZE + bo_size = AMDGPU_VCN_STACK_SIZE + AMDGPU_VCN_HEAP_SIZE + AMDGPU_VCN_SESSION_SIZE * 40; + if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) + bo_size += AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8); r = amdgpu_bo_create_kernel(adev, bo_size, PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM, &adev->vcn.vcpu_bo, &adev->vcn.gpu_addr, &adev->vcn.cpu_addr); @@ -191,11 +192,13 @@ int amdgpu_vcn_resume(struct amdgpu_device *adev) unsigned offset; hdr = (const struct common_firmware_header *)adev->vcn.fw->data; - offset = le32_to_cpu(hdr->ucode_array_offset_bytes); - memcpy_toio(adev->vcn.cpu_addr, adev->vcn.fw->data + offset, - le32_to_cpu(hdr->ucode_size_bytes)); - size -= le32_to_cpu(hdr->ucode_size_bytes); - ptr += le32_to_cpu(hdr->ucode_size_bytes); + if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) { + offset = le32_to_cpu(hdr->ucode_array_offset_bytes); + memcpy_toio(adev->vcn.cpu_addr, adev->vcn.fw->data + offset, + le32_to_cpu(hdr->ucode_size_bytes)); + size -= le32_to_cpu(hdr->ucode_size_bytes); + ptr += le32_to_cpu(hdr->ucode_size_bytes); + } memset_io(ptr, 0, size); } diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c index a7e54820a330d70fccf53584722f603fd07ea20c..85bcd236890ec3c2e390e3911834bcf24a5a3f46 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c @@ -5479,6 +5479,11 @@ static int gfx_v8_0_set_powergating_state(void *handle, if (amdgpu_sriov_vf(adev)) return 0; + if (adev->pg_flags & (AMD_PG_SUPPORT_GFX_SMG | + AMD_PG_SUPPORT_RLC_SMU_HS | + AMD_PG_SUPPORT_CP | + AMD_PG_SUPPORT_GFX_DMG)) + adev->gfx.rlc.funcs->enter_safe_mode(adev); switch (adev->asic_type) { case CHIP_CARRIZO: case CHIP_STONEY: @@ -5527,7 +5532,11 @@ static int gfx_v8_0_set_powergating_state(void *handle, default: break; } - + if (adev->pg_flags & (AMD_PG_SUPPORT_GFX_SMG | + AMD_PG_SUPPORT_RLC_SMU_HS | + AMD_PG_SUPPORT_CP | + AMD_PG_SUPPORT_GFX_DMG)) + adev->gfx.rlc.funcs->exit_safe_mode(adev); return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c index 1a30c54a0889f20e451389e495ba562d5549a1c0..3981915e2311f72735841088e939ee7898211954 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c @@ -3113,7 +3113,7 @@ static void gfx_v9_0_enter_rlc_safe_mode(struct amdgpu_device *adev) /* wait for RLC_SAFE_MODE */ for (i = 0; i < adev->usec_timeout; i++) { - if (!REG_GET_FIELD(SOC15_REG_OFFSET(GC, 0, mmRLC_SAFE_MODE), RLC_SAFE_MODE, CMD)) + if (!REG_GET_FIELD(RREG32_SOC15(GC, 0, mmRLC_SAFE_MODE), RLC_SAFE_MODE, CMD)) break; udelay(1); } diff --git a/drivers/gpu/drm/amd/amdgpu/kv_dpm.c b/drivers/gpu/drm/amd/amdgpu/kv_dpm.c index 3bbf2ccfca89c547f5e37623acb62e12e6d3d512..c76073b422d6cd1ce6a0edb8783e95acc029dab9 100644 --- a/drivers/gpu/drm/amd/amdgpu/kv_dpm.c +++ b/drivers/gpu/drm/amd/amdgpu/kv_dpm.c @@ -1352,8 +1352,6 @@ static int kv_dpm_enable(struct amdgpu_device *adev) return ret; } - kv_update_current_ps(adev, adev->pm.dpm.boot_ps); - if (adev->irq.installed && amdgpu_is_internal_thermal_sensor(adev->pm.int_thermal_type)) { ret = kv_set_thermal_temperature_range(adev, KV_TEMP_RANGE_MIN, KV_TEMP_RANGE_MAX); @@ -3054,7 +3052,7 @@ static int kv_dpm_hw_init(void *handle) else adev->pm.dpm_enabled = true; mutex_unlock(&adev->pm.mutex); - + amdgpu_pm_compute_clocks(adev); return ret; } diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c index f7cf994b1da2871a4437655f8306cf9bcb284468..86db90ff693a9df799725da38307440366da5cbc 100644 --- a/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c @@ -78,6 +78,9 @@ psp_v10_0_get_fw_type(struct amdgpu_firmware_info *ucode, enum psp_gfx_fw_type * case AMDGPU_UCODE_ID_VCE: *type = GFX_FW_TYPE_VCE; break; + case AMDGPU_UCODE_ID_VCN: + *type = GFX_FW_TYPE_VCN; + break; case AMDGPU_UCODE_ID_MAXIMUM: default: return -EINVAL; diff --git a/drivers/gpu/drm/amd/amdgpu/si_dpm.c b/drivers/gpu/drm/amd/amdgpu/si_dpm.c index 6f1dea157a7753b70570e4a9d3d7064f1c4aeaa0..55613f425931ddfff43dc2370f55b5c97c0b48af 100644 --- a/drivers/gpu/drm/amd/amdgpu/si_dpm.c +++ b/drivers/gpu/drm/amd/amdgpu/si_dpm.c @@ -6884,7 +6884,6 @@ static int si_dpm_enable(struct amdgpu_device *adev) si_enable_auto_throttle_source(adev, AMDGPU_DPM_AUTO_THROTTLE_SRC_THERMAL, true); si_thermal_start_thermal_controller(adev); - ni_update_current_ps(adev, boot_ps); return 0; } @@ -7758,7 +7757,7 @@ static int si_dpm_hw_init(void *handle) else adev->pm.dpm_enabled = true; mutex_unlock(&adev->pm.mutex); - + amdgpu_pm_compute_clocks(adev); return ret; } diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c index 5183b46563f6cd4311b94cd2bd628f558427aec4..242dfb1433d263cb270a185b11c21486562bf489 100644 --- a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c @@ -899,7 +899,7 @@ static const struct amdgpu_ring_funcs vce_v3_0_ring_phys_funcs = { .emit_frame_size = 4 + /* vce_v3_0_emit_pipeline_sync */ 6, /* amdgpu_vce_ring_emit_fence x1 no user fence */ - .emit_ib_size = 5, /* vce_v3_0_ring_emit_ib */ + .emit_ib_size = 4, /* amdgpu_vce_ring_emit_ib */ .emit_ib = amdgpu_vce_ring_emit_ib, .emit_fence = amdgpu_vce_ring_emit_fence, .test_ring = amdgpu_vce_ring_test_ring, @@ -923,7 +923,7 @@ static const struct amdgpu_ring_funcs vce_v3_0_ring_vm_funcs = { 6 + /* vce_v3_0_emit_vm_flush */ 4 + /* vce_v3_0_emit_pipeline_sync */ 6 + 6, /* amdgpu_vce_ring_emit_fence x2 vm fence */ - .emit_ib_size = 4, /* amdgpu_vce_ring_emit_ib */ + .emit_ib_size = 5, /* vce_v3_0_ring_emit_ib */ .emit_ib = vce_v3_0_ring_emit_ib, .emit_vm_flush = vce_v3_0_emit_vm_flush, .emit_pipeline_sync = vce_v3_0_emit_pipeline_sync, diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c index a098712bdd2fff7291010b3536dfed0ad514eb69..f7b8caccab9fe1ab2d0c53eb8a9acdaaef98efa0 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c @@ -91,6 +91,16 @@ static int vcn_v1_0_sw_init(void *handle) if (r) return r; + if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) { + const struct common_firmware_header *hdr; + hdr = (const struct common_firmware_header *)adev->vcn.fw->data; + adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].ucode_id = AMDGPU_UCODE_ID_VCN; + adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].fw = adev->vcn.fw; + adev->firmware.fw_size += + ALIGN(le32_to_cpu(hdr->ucode_size_bytes), PAGE_SIZE); + DRM_INFO("PSP loading VCN firmware\n"); + } + r = amdgpu_vcn_resume(adev); if (r) return r; @@ -248,26 +258,38 @@ static int vcn_v1_0_resume(void *handle) static void vcn_v1_0_mc_resume(struct amdgpu_device *adev) { uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4); - - WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW, + uint32_t offset; + + if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) { + WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW, + (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].tmr_mc_addr_lo)); + WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH, + (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].tmr_mc_addr_hi)); + WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET0, 0); + offset = 0; + } else { + WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW, lower_32_bits(adev->vcn.gpu_addr)); - WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH, + WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH, upper_32_bits(adev->vcn.gpu_addr)); - WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET0, - AMDGPU_UVD_FIRMWARE_OFFSET >> 3); + offset = size; + WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET0, + AMDGPU_UVD_FIRMWARE_OFFSET >> 3); + } + WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_SIZE0, size); WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW, - lower_32_bits(adev->vcn.gpu_addr + size)); + lower_32_bits(adev->vcn.gpu_addr + offset)); WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH, - upper_32_bits(adev->vcn.gpu_addr + size)); + upper_32_bits(adev->vcn.gpu_addr + offset)); WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET1, 0); WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_SIZE1, AMDGPU_VCN_HEAP_SIZE); WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW, - lower_32_bits(adev->vcn.gpu_addr + size + AMDGPU_VCN_HEAP_SIZE)); + lower_32_bits(adev->vcn.gpu_addr + offset + AMDGPU_VCN_HEAP_SIZE)); WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH, - upper_32_bits(adev->vcn.gpu_addr + size + AMDGPU_VCN_HEAP_SIZE)); + upper_32_bits(adev->vcn.gpu_addr + offset + AMDGPU_VCN_HEAP_SIZE)); WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET2, 0); WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_SIZE2, AMDGPU_VCN_STACK_SIZE + (AMDGPU_VCN_SESSION_SIZE * 40)); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c index c74cf22a1ed9d5d7d0c7c7e35e31786f6464161c..3ae88dcff08d1f9dc09b84993dafcec44f702cbf 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c @@ -123,6 +123,8 @@ struct kfd_process *kfd_get_process(const struct task_struct *thread) return ERR_PTR(-EINVAL); process = find_process(thread); + if (!process) + return ERR_PTR(-EINVAL); return process; } diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c index 1dc31aa7278178010be505364a03f4d1ce4df2f0..12856de09f572b4a77be436b25364fca1e3be735 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c @@ -403,6 +403,49 @@ static const struct gpu_pt_config_reg DIDTConfig_Polaris12[] = { { ixDIDT_SQ_CTRL1, DIDT_SQ_CTRL1__MAX_POWER_MASK, DIDT_SQ_CTRL1__MAX_POWER__SHIFT, 0xffff, GPU_CONFIGREG_DIDT_IND }, { ixDIDT_SQ_CTRL_OCP, DIDT_SQ_CTRL_OCP__UNUSED_0_MASK, DIDT_SQ_CTRL_OCP__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_CTRL_OCP, DIDT_SQ_CTRL_OCP__OCP_MAX_POWER_MASK, DIDT_SQ_CTRL_OCP__OCP_MAX_POWER__SHIFT, 0xffff, GPU_CONFIGREG_DIDT_IND }, + + { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__MAX_POWER_DELTA_MASK, DIDT_SQ_CTRL2__MAX_POWER_DELTA__SHIFT, 0x3853, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__UNUSED_0_MASK, DIDT_SQ_CTRL2__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__SHORT_TERM_INTERVAL_SIZE_MASK, DIDT_SQ_CTRL2__SHORT_TERM_INTERVAL_SIZE__SHIFT, 0x005a, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__UNUSED_1_MASK, DIDT_SQ_CTRL2__UNUSED_1__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__LONG_TERM_INTERVAL_RATIO_MASK, DIDT_SQ_CTRL2__LONG_TERM_INTERVAL_RATIO__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__UNUSED_2_MASK, DIDT_SQ_CTRL2__UNUSED_2__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + + { ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__DIDT_STALL_CTRL_ENABLE_MASK, DIDT_SQ_STALL_CTRL__DIDT_STALL_CTRL_ENABLE__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_HI_MASK, DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_HI__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_LO_MASK, DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_LO__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__DIDT_HI_POWER_THRESHOLD_MASK, DIDT_SQ_STALL_CTRL__DIDT_HI_POWER_THRESHOLD__SHIFT, 0x0ebb, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__UNUSED_0_MASK, DIDT_SQ_STALL_CTRL__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + + { ixDIDT_SQ_TUNING_CTRL, DIDT_SQ_TUNING_CTRL__DIDT_TUNING_ENABLE_MASK, DIDT_SQ_TUNING_CTRL__DIDT_TUNING_ENABLE__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_TUNING_CTRL, DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_HI_MASK, DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_HI__SHIFT, 0x3853, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_TUNING_CTRL, DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_LO_MASK, DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_LO__SHIFT, 0x3153, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_TUNING_CTRL, DIDT_SQ_TUNING_CTRL__UNUSED_0_MASK, DIDT_SQ_TUNING_CTRL__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + + { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_CTRL_EN_MASK, DIDT_SQ_CTRL0__DIDT_CTRL_EN__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__USE_REF_CLOCK_MASK, DIDT_SQ_CTRL0__USE_REF_CLOCK__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__PHASE_OFFSET_MASK, DIDT_SQ_CTRL0__PHASE_OFFSET__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_CTRL_RST_MASK, DIDT_SQ_CTRL0__DIDT_CTRL_RST__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_CLK_EN_OVERRIDE_MASK, DIDT_SQ_CTRL0__DIDT_CLK_EN_OVERRIDE__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI_MASK, DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI__SHIFT, 0x0010, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO_MASK, DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO__SHIFT, 0x0010, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__UNUSED_0_MASK, DIDT_SQ_CTRL0__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + + { ixDIDT_TD_WEIGHT0_3, DIDT_TD_WEIGHT0_3__WEIGHT0_MASK, DIDT_TD_WEIGHT0_3__WEIGHT0__SHIFT, 0x000a, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_WEIGHT0_3, DIDT_TD_WEIGHT0_3__WEIGHT1_MASK, DIDT_TD_WEIGHT0_3__WEIGHT1__SHIFT, 0x0010, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_WEIGHT0_3, DIDT_TD_WEIGHT0_3__WEIGHT2_MASK, DIDT_TD_WEIGHT0_3__WEIGHT2__SHIFT, 0x0017, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_WEIGHT0_3, DIDT_TD_WEIGHT0_3__WEIGHT3_MASK, DIDT_TD_WEIGHT0_3__WEIGHT3__SHIFT, 0x002f, GPU_CONFIGREG_DIDT_IND }, + + { ixDIDT_TD_WEIGHT4_7, DIDT_TD_WEIGHT4_7__WEIGHT4_MASK, DIDT_TD_WEIGHT4_7__WEIGHT4__SHIFT, 0x0046, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_WEIGHT4_7, DIDT_TD_WEIGHT4_7__WEIGHT5_MASK, DIDT_TD_WEIGHT4_7__WEIGHT5__SHIFT, 0x005d, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_WEIGHT4_7, DIDT_TD_WEIGHT4_7__WEIGHT6_MASK, DIDT_TD_WEIGHT4_7__WEIGHT6__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_WEIGHT4_7, DIDT_TD_WEIGHT4_7__WEIGHT7_MASK, DIDT_TD_WEIGHT4_7__WEIGHT7__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + + { ixDIDT_TD_CTRL1, DIDT_TD_CTRL1__MIN_POWER_MASK, DIDT_TD_CTRL1__MIN_POWER__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_CTRL1, DIDT_TD_CTRL1__MAX_POWER_MASK, DIDT_TD_CTRL1__MAX_POWER__SHIFT, 0xffff, GPU_CONFIGREG_DIDT_IND }, + + { ixDIDT_TD_CTRL_OCP, DIDT_TD_CTRL_OCP__UNUSED_0_MASK, DIDT_TD_CTRL_OCP__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, { ixDIDT_TD_CTRL_OCP, DIDT_TD_CTRL_OCP__OCP_MAX_POWER_MASK, DIDT_TD_CTRL_OCP__OCP_MAX_POWER__SHIFT, 0x00ff, GPU_CONFIGREG_DIDT_IND }, { ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__MAX_POWER_DELTA_MASK, DIDT_TD_CTRL2__MAX_POWER_DELTA__SHIFT, 0x3fff, GPU_CONFIGREG_DIDT_IND }, diff --git a/drivers/gpu/drm/arm/malidp_hw.c b/drivers/gpu/drm/arm/malidp_hw.c index 17bca99e8ac825334c982122fec82f22ebac636c..7e2c341dfe5f52d756482b55acf552deaab79470 100644 --- a/drivers/gpu/drm/arm/malidp_hw.c +++ b/drivers/gpu/drm/arm/malidp_hw.c @@ -634,7 +634,8 @@ const struct malidp_hw_device malidp_device[MALIDP_MAX_DEVICES] = { .vsync_irq = MALIDP500_DE_IRQ_VSYNC, }, .se_irq_map = { - .irq_mask = MALIDP500_SE_IRQ_CONF_MODE, + .irq_mask = MALIDP500_SE_IRQ_CONF_MODE | + MALIDP500_SE_IRQ_GLOBAL, .vsync_irq = 0, }, .dc_irq_map = { diff --git a/drivers/gpu/drm/arm/malidp_planes.c b/drivers/gpu/drm/arm/malidp_planes.c index 94e7e3fa3408cf163fda7f81b40e76ca73ac4c96..16b8b310ae5c7036760b3d8b538b15b5f07bcfa6 100644 --- a/drivers/gpu/drm/arm/malidp_planes.c +++ b/drivers/gpu/drm/arm/malidp_planes.c @@ -23,6 +23,7 @@ /* Layer specific register offsets */ #define MALIDP_LAYER_FORMAT 0x000 +#define LAYER_FORMAT_MASK 0x3f #define MALIDP_LAYER_CONTROL 0x004 #define LAYER_ENABLE (1 << 0) #define LAYER_FLOWCFG_MASK 7 @@ -278,7 +279,9 @@ static void malidp_de_plane_update(struct drm_plane *plane, dest_w = plane->state->crtc_w; dest_h = plane->state->crtc_h; - malidp_hw_write(mp->hwdev, ms->format, mp->layer->base); + val = malidp_hw_read(mp->hwdev, mp->layer->base); + val = (val & ~LAYER_FORMAT_MASK) | ms->format; + malidp_hw_write(mp->hwdev, val, mp->layer->base); for (i = 0; i < ms->n_planes; i++) { /* calculate the offset for the layer's plane registers */ diff --git a/drivers/gpu/drm/armada/armada_crtc.c b/drivers/gpu/drm/armada/armada_crtc.c index 79ce877bf45f258abb54359a1cb4322b8e27db15..3039936f8f3fc8a589f24cbb84a6e6566a39df6a 100644 --- a/drivers/gpu/drm/armada/armada_crtc.c +++ b/drivers/gpu/drm/armada/armada_crtc.c @@ -483,8 +483,9 @@ static irqreturn_t armada_drm_irq(int irq, void *arg) u32 v, stat = readl_relaxed(dcrtc->base + LCD_SPU_IRQ_ISR); /* - * This is rediculous - rather than writing bits to clear, we - * have to set the actual status register value. This is racy. + * Reading the ISR appears to clear bits provided CLEAN_SPU_IRQ_ISR + * is set. Writing has some other effect to acknowledge the IRQ - + * without this, we only get a single IRQ. */ writel_relaxed(0, dcrtc->base + LCD_SPU_IRQ_ISR); @@ -1104,16 +1105,22 @@ armada_drm_crtc_set_property(struct drm_crtc *crtc, static int armada_drm_crtc_enable_vblank(struct drm_crtc *crtc) { struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc); + unsigned long flags; + spin_lock_irqsave(&dcrtc->irq_lock, flags); armada_drm_crtc_enable_irq(dcrtc, VSYNC_IRQ_ENA); + spin_unlock_irqrestore(&dcrtc->irq_lock, flags); return 0; } static void armada_drm_crtc_disable_vblank(struct drm_crtc *crtc) { struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc); + unsigned long flags; + spin_lock_irqsave(&dcrtc->irq_lock, flags); armada_drm_crtc_disable_irq(dcrtc, VSYNC_IRQ_ENA); + spin_unlock_irqrestore(&dcrtc->irq_lock, flags); } static const struct drm_crtc_funcs armada_crtc_funcs = { @@ -1221,6 +1228,7 @@ static int armada_drm_crtc_create(struct drm_device *drm, struct device *dev, CFG_PDWN64x66, dcrtc->base + LCD_SPU_SRAM_PARA1); writel_relaxed(0x2032ff81, dcrtc->base + LCD_SPU_DMA_CTRL1); writel_relaxed(dcrtc->irq_ena, dcrtc->base + LCD_SPU_IRQ_ENA); + readl_relaxed(dcrtc->base + LCD_SPU_IRQ_ISR); writel_relaxed(0, dcrtc->base + LCD_SPU_IRQ_ISR); ret = devm_request_irq(dev, irq, armada_drm_irq, 0, "armada_drm_crtc", diff --git a/drivers/gpu/drm/armada/armada_hw.h b/drivers/gpu/drm/armada/armada_hw.h index 27319a8335e258cf12cb093c5c598523fbe307f3..345dc4d0851ef43bd73069679c3ec7fef8bfa073 100644 --- a/drivers/gpu/drm/armada/armada_hw.h +++ b/drivers/gpu/drm/armada/armada_hw.h @@ -160,6 +160,7 @@ enum { CFG_ALPHAM_GRA = 0x1 << 16, CFG_ALPHAM_CFG = 0x2 << 16, CFG_ALPHA_MASK = 0xff << 8, +#define CFG_ALPHA(x) ((x) << 8) CFG_PIXCMD_MASK = 0xff, }; diff --git a/drivers/gpu/drm/armada/armada_overlay.c b/drivers/gpu/drm/armada/armada_overlay.c index edc44910d79fc7c65bce893d31b2caf9ce1d34e4..2076346b09ee98759eafd674582890d3693924c3 100644 --- a/drivers/gpu/drm/armada/armada_overlay.c +++ b/drivers/gpu/drm/armada/armada_overlay.c @@ -28,6 +28,7 @@ struct armada_ovl_plane_properties { uint16_t contrast; uint16_t saturation; uint32_t colorkey_mode; + uint32_t colorkey_enable; }; struct armada_ovl_plane { @@ -59,11 +60,13 @@ armada_ovl_update_attr(struct armada_ovl_plane_properties *prop, writel_relaxed(0x00002000, dcrtc->base + LCD_SPU_CBSH_HUE); spin_lock_irq(&dcrtc->irq_lock); - armada_updatel(prop->colorkey_mode | CFG_ALPHAM_GRA, - CFG_CKMODE_MASK | CFG_ALPHAM_MASK | CFG_ALPHA_MASK, - dcrtc->base + LCD_SPU_DMA_CTRL1); - - armada_updatel(ADV_GRACOLORKEY, 0, dcrtc->base + LCD_SPU_ADV_REG); + armada_updatel(prop->colorkey_mode, + CFG_CKMODE_MASK | CFG_ALPHAM_MASK | CFG_ALPHA_MASK, + dcrtc->base + LCD_SPU_DMA_CTRL1); + if (dcrtc->variant->has_spu_adv_reg) + armada_updatel(prop->colorkey_enable, + ADV_GRACOLORKEY | ADV_VIDCOLORKEY, + dcrtc->base + LCD_SPU_ADV_REG); spin_unlock_irq(&dcrtc->irq_lock); } @@ -339,8 +342,17 @@ static int armada_ovl_plane_set_property(struct drm_plane *plane, dplane->prop.colorkey_vb |= K2B(val); update_attr = true; } else if (property == priv->colorkey_mode_prop) { - dplane->prop.colorkey_mode &= ~CFG_CKMODE_MASK; - dplane->prop.colorkey_mode |= CFG_CKMODE(val); + if (val == CKMODE_DISABLE) { + dplane->prop.colorkey_mode = + CFG_CKMODE(CKMODE_DISABLE) | + CFG_ALPHAM_CFG | CFG_ALPHA(255); + dplane->prop.colorkey_enable = 0; + } else { + dplane->prop.colorkey_mode = + CFG_CKMODE(val) | + CFG_ALPHAM_GRA | CFG_ALPHA(0); + dplane->prop.colorkey_enable = ADV_GRACOLORKEY; + } update_attr = true; } else if (property == priv->brightness_prop) { dplane->prop.brightness = val - 256; @@ -470,7 +482,9 @@ int armada_overlay_plane_create(struct drm_device *dev, unsigned long crtcs) dplane->prop.colorkey_yr = 0xfefefe00; dplane->prop.colorkey_ug = 0x01010100; dplane->prop.colorkey_vb = 0x01010100; - dplane->prop.colorkey_mode = CFG_CKMODE(CKMODE_RGB); + dplane->prop.colorkey_mode = CFG_CKMODE(CKMODE_RGB) | + CFG_ALPHAM_GRA | CFG_ALPHA(0); + dplane->prop.colorkey_enable = ADV_GRACOLORKEY; dplane->prop.brightness = 0; dplane->prop.contrast = 0x4000; dplane->prop.saturation = 0x4000; diff --git a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c index b2431aee788795cfa078dbf4b5515c75dbe1961c..f5091827628ab2ab9d6040b42786b5231cffa13e 100644 --- a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c +++ b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c @@ -424,6 +424,18 @@ static void adv7511_hpd_work(struct work_struct *work) else status = connector_status_disconnected; + /* + * The bridge resets its registers on unplug. So when we get a plug + * event and we're already supposed to be powered, cycle the bridge to + * restore its state. + */ + if (status == connector_status_connected && + adv7511->connector.status == connector_status_disconnected && + adv7511->powered) { + regcache_mark_dirty(adv7511->regmap); + adv7511_power_on(adv7511); + } + if (adv7511->connector.status != status) { adv7511->connector.status = status; drm_kms_helper_hotplug_event(adv7511->connector.dev); diff --git a/drivers/gpu/drm/bridge/sil-sii8620.c b/drivers/gpu/drm/bridge/sil-sii8620.c index 5131bfb94f065ceb20ba61713f990b76f11103cb..0cb69ee94ac16822965632f265202c8ffeea482a 100644 --- a/drivers/gpu/drm/bridge/sil-sii8620.c +++ b/drivers/gpu/drm/bridge/sil-sii8620.c @@ -788,6 +788,7 @@ static void sii8620_burst_rx_all(struct sii8620 *ctx) static void sii8620_fetch_edid(struct sii8620 *ctx) { u8 lm_ddc, ddc_cmd, int3, cbus; + unsigned long timeout; int fetched, i; int edid_len = EDID_LENGTH; u8 *edid; @@ -837,23 +838,31 @@ static void sii8620_fetch_edid(struct sii8620 *ctx) REG_DDC_CMD, ddc_cmd | VAL_DDC_CMD_ENH_DDC_READ_NO_ACK ); - do { - int3 = sii8620_readb(ctx, REG_INTR3); + int3 = 0; + timeout = jiffies + msecs_to_jiffies(200); + for (;;) { cbus = sii8620_readb(ctx, REG_CBUS_STATUS); - - if (int3 & BIT_DDC_CMD_DONE) - break; - - if (!(cbus & BIT_CBUS_STATUS_CBUS_CONNECTED)) { + if (~cbus & BIT_CBUS_STATUS_CBUS_CONNECTED) { + kfree(edid); + edid = NULL; + goto end; + } + if (int3 & BIT_DDC_CMD_DONE) { + if (sii8620_readb(ctx, REG_DDC_DOUT_CNT) + >= FETCH_SIZE) + break; + } else { + int3 = sii8620_readb(ctx, REG_INTR3); + } + if (time_is_before_jiffies(timeout)) { + ctx->error = -ETIMEDOUT; + dev_err(ctx->dev, "timeout during EDID read\n"); kfree(edid); edid = NULL; goto end; } - } while (1); - - sii8620_readb(ctx, REG_DDC_STATUS); - while (sii8620_readb(ctx, REG_DDC_DOUT_CNT) < FETCH_SIZE) usleep_range(10, 20); + } sii8620_read_buf(ctx, REG_DDC_DATA, edid + fetched, FETCH_SIZE); if (fetched + FETCH_SIZE == EDID_LENGTH) { @@ -1036,23 +1045,23 @@ static void sii8620_set_format(struct sii8620 *ctx) BIT_M3_P0CTRL_MHL3_P0_PIXEL_MODE_PACKED, ctx->use_packed_pixel ? ~0 : 0); } else { - if (ctx->use_packed_pixel) + if (ctx->use_packed_pixel) { sii8620_write_seq_static(ctx, REG_VID_MODE, BIT_VID_MODE_M1080P, REG_MHL_TOP_CTL, BIT_MHL_TOP_CTL_MHL_PP_SEL | 1, REG_MHLTX_CTL6, 0x60 ); - else + } else { sii8620_write_seq_static(ctx, REG_VID_MODE, 0, REG_MHL_TOP_CTL, 1, REG_MHLTX_CTL6, 0xa0 ); + } } if (ctx->use_packed_pixel) - out_fmt = VAL_TPI_FORMAT(YCBCR422, FULL) | - BIT_TPI_OUTPUT_CSCMODE709; + out_fmt = VAL_TPI_FORMAT(YCBCR422, FULL); else out_fmt = VAL_TPI_FORMAT(RGB, FULL); @@ -1187,7 +1196,7 @@ static void sii8620_start_hdmi(struct sii8620 *ctx) int clk = ctx->pixel_clock * (ctx->use_packed_pixel ? 2 : 3); int i; - for (i = 0; i < ARRAY_SIZE(clk_spec); ++i) + for (i = 0; i < ARRAY_SIZE(clk_spec) - 1; ++i) if (clk < clk_spec[i].max_clk) break; diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c index 1f1fd3139c5b8c2b9793b625b008166e81b43973..c29dea89560563485532dd5f48a8fc6c77fd33bb 100644 --- a/drivers/gpu/drm/drm_edid.c +++ b/drivers/gpu/drm/drm_edid.c @@ -114,6 +114,9 @@ static const struct edid_quirk { /* CPT panel of Asus UX303LA reports 8 bpc, but is a 6 bpc panel */ { "CPT", 0x17df, EDID_QUIRK_FORCE_6BPC }, + /* SDC panel of Lenovo B50-80 reports 8 bpc, but is a 6 bpc panel */ + { "SDC", 0x3652, EDID_QUIRK_FORCE_6BPC }, + /* Belinea 10 15 55 */ { "MAX", 1516, EDID_QUIRK_PREFER_LARGE_60 }, { "MAX", 0x77e, EDID_QUIRK_PREFER_LARGE_60 }, diff --git a/drivers/gpu/drm/exynos/exynos5433_drm_decon.c b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c index 6be5b53c3b279f42ee6dbcc445173fa539721ce5..f905c214fdd0e65a2cf1911680f9cfd7abad25e1 100644 --- a/drivers/gpu/drm/exynos/exynos5433_drm_decon.c +++ b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c @@ -261,7 +261,7 @@ static void decon_win_set_pixfmt(struct decon_context *ctx, unsigned int win, unsigned long val; val = readl(ctx->addr + DECON_WINCONx(win)); - val &= ~WINCONx_BPPMODE_MASK; + val &= WINCONx_ENWIN_F; switch (fb->format->format) { case DRM_FORMAT_XRGB1555: @@ -352,8 +352,8 @@ static void decon_update_plane(struct exynos_drm_crtc *crtc, writel(val, ctx->addr + DECON_VIDOSDxB(win)); } - val = VIDOSD_Wx_ALPHA_R_F(0x0) | VIDOSD_Wx_ALPHA_G_F(0x0) | - VIDOSD_Wx_ALPHA_B_F(0x0); + val = VIDOSD_Wx_ALPHA_R_F(0xff) | VIDOSD_Wx_ALPHA_G_F(0xff) | + VIDOSD_Wx_ALPHA_B_F(0xff); writel(val, ctx->addr + DECON_VIDOSDxC(win)); val = VIDOSD_Wx_ALPHA_R_F(0x0) | VIDOSD_Wx_ALPHA_G_F(0x0) | diff --git a/drivers/gpu/drm/exynos/exynos_drm_gsc.c b/drivers/gpu/drm/exynos/exynos_drm_gsc.c index 0506b2b17ac1c45a1bea639cb37ed90075be3677..48f913d8208c45134e4f75977f49fb3928c5dd41 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_gsc.c +++ b/drivers/gpu/drm/exynos/exynos_drm_gsc.c @@ -532,21 +532,25 @@ static int gsc_src_set_fmt(struct device *dev, u32 fmt) GSC_IN_CHROMA_ORDER_CRCB); break; case DRM_FORMAT_NV21: + cfg |= (GSC_IN_CHROMA_ORDER_CRCB | GSC_IN_YUV420_2P); + break; case DRM_FORMAT_NV61: - cfg |= (GSC_IN_CHROMA_ORDER_CRCB | - GSC_IN_YUV420_2P); + cfg |= (GSC_IN_CHROMA_ORDER_CRCB | GSC_IN_YUV422_2P); break; case DRM_FORMAT_YUV422: cfg |= GSC_IN_YUV422_3P; break; case DRM_FORMAT_YUV420: + cfg |= (GSC_IN_CHROMA_ORDER_CBCR | GSC_IN_YUV420_3P); + break; case DRM_FORMAT_YVU420: - cfg |= GSC_IN_YUV420_3P; + cfg |= (GSC_IN_CHROMA_ORDER_CRCB | GSC_IN_YUV420_3P); break; case DRM_FORMAT_NV12: + cfg |= (GSC_IN_CHROMA_ORDER_CBCR | GSC_IN_YUV420_2P); + break; case DRM_FORMAT_NV16: - cfg |= (GSC_IN_CHROMA_ORDER_CBCR | - GSC_IN_YUV420_2P); + cfg |= (GSC_IN_CHROMA_ORDER_CBCR | GSC_IN_YUV422_2P); break; default: dev_err(ippdrv->dev, "invalid target yuv order 0x%x.\n", fmt); @@ -806,18 +810,25 @@ static int gsc_dst_set_fmt(struct device *dev, u32 fmt) GSC_OUT_CHROMA_ORDER_CRCB); break; case DRM_FORMAT_NV21: - case DRM_FORMAT_NV61: cfg |= (GSC_OUT_CHROMA_ORDER_CRCB | GSC_OUT_YUV420_2P); break; + case DRM_FORMAT_NV61: + cfg |= (GSC_OUT_CHROMA_ORDER_CRCB | GSC_OUT_YUV422_2P); + break; case DRM_FORMAT_YUV422: + cfg |= GSC_OUT_YUV422_3P; + break; case DRM_FORMAT_YUV420: + cfg |= (GSC_OUT_CHROMA_ORDER_CBCR | GSC_OUT_YUV420_3P); + break; case DRM_FORMAT_YVU420: - cfg |= GSC_OUT_YUV420_3P; + cfg |= (GSC_OUT_CHROMA_ORDER_CRCB | GSC_OUT_YUV420_3P); break; case DRM_FORMAT_NV12: + cfg |= (GSC_OUT_CHROMA_ORDER_CBCR | GSC_OUT_YUV420_2P); + break; case DRM_FORMAT_NV16: - cfg |= (GSC_OUT_CHROMA_ORDER_CBCR | - GSC_OUT_YUV420_2P); + cfg |= (GSC_OUT_CHROMA_ORDER_CBCR | GSC_OUT_YUV422_2P); break; default: dev_err(ippdrv->dev, "invalid target yuv order 0x%x.\n", fmt); diff --git a/drivers/gpu/drm/exynos/regs-gsc.h b/drivers/gpu/drm/exynos/regs-gsc.h index 4704a993cbb7f003a51b901ae9b291a9adc9fe09..16b39734115c93855d82a5de94e16cc5e2570940 100644 --- a/drivers/gpu/drm/exynos/regs-gsc.h +++ b/drivers/gpu/drm/exynos/regs-gsc.h @@ -138,6 +138,7 @@ #define GSC_OUT_YUV420_3P (3 << 4) #define GSC_OUT_YUV422_1P (4 << 4) #define GSC_OUT_YUV422_2P (5 << 4) +#define GSC_OUT_YUV422_3P (6 << 4) #define GSC_OUT_YUV444 (7 << 4) #define GSC_OUT_TILE_TYPE_MASK (1 << 2) #define GSC_OUT_TILE_C_16x8 (0 << 2) diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c index 9bf4045cd679ab6598494e13440e3b4f23d60c78..73c672fc17c4a548314269de4d82d081be249cd0 100644 --- a/drivers/gpu/drm/i915/gvt/kvmgt.c +++ b/drivers/gpu/drm/i915/gvt/kvmgt.c @@ -42,6 +42,8 @@ #include #include +#include + #include "i915_drv.h" #include "gvt.h" @@ -953,7 +955,8 @@ static long intel_vgpu_ioctl(struct mdev_device *mdev, unsigned int cmd, } else if (cmd == VFIO_DEVICE_GET_REGION_INFO) { struct vfio_region_info info; struct vfio_info_cap caps = { .buf = NULL, .size = 0 }; - int i, ret; + unsigned int i; + int ret; struct vfio_region_info_cap_sparse_mmap *sparse = NULL; size_t size; int nr_areas = 1; @@ -1030,6 +1033,10 @@ static long intel_vgpu_ioctl(struct mdev_device *mdev, unsigned int cmd, if (info.index >= VFIO_PCI_NUM_REGIONS + vgpu->vdev.num_regions) return -EINVAL; + info.index = + array_index_nospec(info.index, + VFIO_PCI_NUM_REGIONS + + vgpu->vdev.num_regions); i = info.index - VFIO_PCI_NUM_REGIONS; diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 90359c7954c8d66532808e82f8116121ac576ca5..f354cfe63f7be34a398c57209dd7fb5a74c0e5fe 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -687,10 +687,10 @@ flush_write_domain(struct drm_i915_gem_object *obj, unsigned int flush_domains) switch (obj->base.write_domain) { case I915_GEM_DOMAIN_GTT: - if (INTEL_GEN(dev_priv) >= 6 && !HAS_LLC(dev_priv)) { + if (!HAS_LLC(dev_priv)) { intel_runtime_pm_get(dev_priv); spin_lock_irq(&dev_priv->uncore.lock); - POSTING_READ_FW(RING_ACTHD(dev_priv->engine[RCS]->mmio_base)); + POSTING_READ_FW(RING_HEAD(dev_priv->engine[RCS]->mmio_base)); spin_unlock_irq(&dev_priv->uncore.lock); intel_runtime_pm_put(dev_priv); } @@ -3608,7 +3608,8 @@ restart: return -EBUSY; } - if (i915_gem_valid_gtt_space(vma, cache_level)) + if (!i915_vma_is_closed(vma) && + i915_gem_valid_gtt_space(vma, cache_level)) continue; ret = i915_vma_unbind(vma); diff --git a/drivers/gpu/drm/i915/i915_gem_userptr.c b/drivers/gpu/drm/i915/i915_gem_userptr.c index 709efe2357eac9948ba71a76e11c02ace9f6ae4c..05ae8c4a8a1b615e272d4bb20d09b748f6b1e885 100644 --- a/drivers/gpu/drm/i915/i915_gem_userptr.c +++ b/drivers/gpu/drm/i915/i915_gem_userptr.c @@ -782,6 +782,9 @@ i915_gem_userptr_ioctl(struct drm_device *dev, void *data, struct drm_file *file I915_USERPTR_UNSYNCHRONIZED)) return -EINVAL; + if (!args->user_size) + return -EINVAL; + if (offset_in_page(args->user_ptr | args->user_size)) return -EINVAL; diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index be813b2738c1526bcd4a1b7395ef1453e279910f..2e706f1abe64f0da52a0f2406b1b308113a02b7c 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -8462,6 +8462,7 @@ enum skl_power_gate { #define TRANS_MSA_10_BPC (2<<5) #define TRANS_MSA_12_BPC (3<<5) #define TRANS_MSA_16_BPC (4<<5) +#define TRANS_MSA_CEA_RANGE (1<<3) /* LCPLL Control */ #define LCPLL_CTL _MMIO(0x130040) diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c index 02d1a5eacb00ef5fa89deeaaa41318f81ebac902..76eed1fdac0960f19ccdefe91d6cb7dbe4da7358 100644 --- a/drivers/gpu/drm/i915/i915_vma.c +++ b/drivers/gpu/drm/i915/i915_vma.c @@ -430,6 +430,7 @@ i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags) u64 start, end; int ret; + GEM_BUG_ON(i915_vma_is_closed(vma)); GEM_BUG_ON(vma->flags & (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND)); GEM_BUG_ON(drm_mm_node_allocated(&vma->node)); @@ -590,7 +591,9 @@ static void i915_vma_destroy(struct i915_vma *vma) GEM_BUG_ON(i915_gem_active_isset(&vma->last_read[i])); GEM_BUG_ON(i915_gem_active_isset(&vma->last_fence)); + list_del(&vma->obj_link); list_del(&vma->vm_link); + if (!i915_vma_is_ggtt(vma)) i915_ppgtt_put(i915_vm_to_ppgtt(vma->vm)); @@ -602,7 +605,6 @@ void i915_vma_close(struct i915_vma *vma) GEM_BUG_ON(i915_vma_is_closed(vma)); vma->flags |= I915_VMA_CLOSED; - list_del(&vma->obj_link); rb_erase(&vma->obj_node, &vma->obj->vma_tree); if (!i915_vma_is_active(vma) && !i915_vma_is_pinned(vma)) diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c index 5e5fe03b638cbf2ee17206ccd4c6ee985134645e..3a4a581345c431dd3f2ba7235103913c77101751 100644 --- a/drivers/gpu/drm/i915/intel_ddi.c +++ b/drivers/gpu/drm/i915/intel_ddi.c @@ -1396,6 +1396,10 @@ void intel_ddi_set_pipe_settings(const struct intel_crtc_state *crtc_state) WARN_ON(transcoder_is_dsi(cpu_transcoder)); temp = TRANS_MSA_SYNC_CLK; + + if (crtc_state->limited_color_range) + temp |= TRANS_MSA_CEA_RANGE; + switch (crtc_state->pipe_bpp) { case 18: temp |= TRANS_MSA_6_BPC; diff --git a/drivers/gpu/drm/i915/intel_lpe_audio.c b/drivers/gpu/drm/i915/intel_lpe_audio.c index 2fdf302ebdad096e39f1bf2e853313ffe14a0caa..8a541d0e3e80d512e8ab35f339743f99bce4cb9b 100644 --- a/drivers/gpu/drm/i915/intel_lpe_audio.c +++ b/drivers/gpu/drm/i915/intel_lpe_audio.c @@ -128,9 +128,7 @@ lpe_audio_platdev_create(struct drm_i915_private *dev_priv) kfree(rsc); - pm_runtime_forbid(&platdev->dev); - pm_runtime_set_active(&platdev->dev); - pm_runtime_enable(&platdev->dev); + pm_runtime_no_callbacks(&platdev->dev); return platdev; diff --git a/drivers/gpu/drm/i915/intel_lspcon.c b/drivers/gpu/drm/i915/intel_lspcon.c index beb9baaf2f2e4e573956f1ea842c2963dce95bd7..f71fef10ecc6180a3fc8835bb864dbf24771f705 100644 --- a/drivers/gpu/drm/i915/intel_lspcon.c +++ b/drivers/gpu/drm/i915/intel_lspcon.c @@ -75,7 +75,7 @@ static enum drm_lspcon_mode lspcon_wait_mode(struct intel_lspcon *lspcon, lspcon_mode_name(mode)); wait_for((current_mode = lspcon_get_current_mode(lspcon)) == mode || - current_mode == DRM_LSPCON_MODE_INVALID, 100); + current_mode == DRM_LSPCON_MODE_INVALID, 400); if (current_mode != mode) DRM_DEBUG_KMS("LSPCON mode hasn't settled\n"); diff --git a/drivers/gpu/drm/imx/imx-ldb.c b/drivers/gpu/drm/imx/imx-ldb.c index 56dd7a9a8e254a3e03e4bebb5fda75b8555abbba..dd5312b02a8d21749fb004d41db74ff4bc511b1a 100644 --- a/drivers/gpu/drm/imx/imx-ldb.c +++ b/drivers/gpu/drm/imx/imx-ldb.c @@ -612,6 +612,9 @@ static int imx_ldb_bind(struct device *dev, struct device *master, void *data) return PTR_ERR(imx_ldb->regmap); } + /* disable LDB by resetting the control register to POR default */ + regmap_write(imx_ldb->regmap, IOMUXC_GPR2, 0); + imx_ldb->dev = dev; if (of_id) @@ -652,14 +655,14 @@ static int imx_ldb_bind(struct device *dev, struct device *master, void *data) if (ret || i < 0 || i > 1) return -EINVAL; + if (!of_device_is_available(child)) + continue; + if (dual && i > 0) { dev_warn(dev, "dual-channel mode, ignoring second output\n"); continue; } - if (!of_device_is_available(child)) - continue; - channel = &imx_ldb->channel[i]; channel->ldb = imx_ldb; channel->chno = i; diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c index 430830d63a33dcd7aad0ecf17ed40b8cd5051162..2c6d19683688623652a5c24f4dbded829b2dd441 100644 --- a/drivers/gpu/drm/nouveau/nouveau_connector.c +++ b/drivers/gpu/drm/nouveau/nouveau_connector.c @@ -570,12 +570,16 @@ nouveau_connector_detect(struct drm_connector *connector, bool force) nv_connector->edid = NULL; } - /* Outputs are only polled while runtime active, so acquiring a - * runtime PM ref here is unnecessary (and would deadlock upon - * runtime suspend because it waits for polling to finish). + /* Outputs are only polled while runtime active, so resuming the + * device here is unnecessary (and would deadlock upon runtime suspend + * because it waits for polling to finish). We do however, want to + * prevent the autosuspend timer from elapsing during this operation + * if possible. */ - if (!drm_kms_helper_is_poll_worker()) { - ret = pm_runtime_get_sync(connector->dev->dev); + if (drm_kms_helper_is_poll_worker()) { + pm_runtime_get_noresume(dev->dev); + } else { + ret = pm_runtime_get_sync(dev->dev); if (ret < 0 && ret != -EACCES) return conn_status; } @@ -653,10 +657,8 @@ detect_analog: out: - if (!drm_kms_helper_is_poll_worker()) { - pm_runtime_mark_last_busy(connector->dev->dev); - pm_runtime_put_autosuspend(connector->dev->dev); - } + pm_runtime_mark_last_busy(dev->dev); + pm_runtime_put_autosuspend(dev->dev); return conn_status; } @@ -1120,6 +1122,26 @@ nouveau_connector_hotplug(struct nvif_notify *notify) const struct nvif_notify_conn_rep_v0 *rep = notify->data; const char *name = connector->name; struct nouveau_encoder *nv_encoder; + int ret; + + ret = pm_runtime_get(drm->dev->dev); + if (ret == 0) { + /* We can't block here if there's a pending PM request + * running, as we'll deadlock nouveau_display_fini() when it + * calls nvif_put() on our nvif_notify struct. So, simply + * defer the hotplug event until the device finishes resuming + */ + NV_DEBUG(drm, "Deferring HPD on %s until runtime resume\n", + name); + schedule_work(&drm->hpd_work); + + pm_runtime_put_noidle(drm->dev->dev); + return NVIF_NOTIFY_KEEP; + } else if (ret != 1 && ret != -EACCES) { + NV_WARN(drm, "HPD on %s dropped due to RPM failure: %d\n", + name, ret); + return NVIF_NOTIFY_DROP; + } if (rep->mask & NVIF_NOTIFY_CONN_V0_IRQ) { NV_DEBUG(drm, "service %s\n", name); @@ -1137,6 +1159,8 @@ nouveau_connector_hotplug(struct nvif_notify *notify) drm_helper_hpd_irq_event(connector->dev); } + pm_runtime_mark_last_busy(drm->dev->dev); + pm_runtime_put_autosuspend(drm->dev->dev); return NVIF_NOTIFY_KEEP; } diff --git a/drivers/gpu/drm/nouveau/nouveau_debugfs.c b/drivers/gpu/drm/nouveau/nouveau_debugfs.c index 963a4dba8213eb6080ff3713cbbdbb20ddb4b61e..9109b69cd052958bbc126b4bad4f490720e11f4a 100644 --- a/drivers/gpu/drm/nouveau/nouveau_debugfs.c +++ b/drivers/gpu/drm/nouveau/nouveau_debugfs.c @@ -160,7 +160,11 @@ nouveau_debugfs_pstate_set(struct file *file, const char __user *ubuf, args.ustate = value; } + ret = pm_runtime_get_sync(drm->dev); + if (IS_ERR_VALUE(ret) && ret != -EACCES) + return ret; ret = nvif_mthd(ctrl, NVIF_CONTROL_PSTATE_USER, &args, sizeof(args)); + pm_runtime_put_autosuspend(drm->dev); if (ret < 0) return ret; diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c index caf53503c0f7a5cea1265c2b333212f76e3b9014..2612702c3a3ffd8daed50a1e253b11d96c71dab9 100644 --- a/drivers/gpu/drm/nouveau/nouveau_display.c +++ b/drivers/gpu/drm/nouveau/nouveau_display.c @@ -356,8 +356,6 @@ nouveau_display_hpd_work(struct work_struct *work) pm_runtime_get_sync(drm->dev->dev); drm_helper_hpd_irq_event(drm->dev); - /* enable polling for external displays */ - drm_kms_helper_poll_enable(drm->dev); pm_runtime_mark_last_busy(drm->dev->dev); pm_runtime_put_sync(drm->dev->dev); @@ -380,15 +378,29 @@ nouveau_display_acpi_ntfy(struct notifier_block *nb, unsigned long val, { struct nouveau_drm *drm = container_of(nb, typeof(*drm), acpi_nb); struct acpi_bus_event *info = data; + int ret; if (!strcmp(info->device_class, ACPI_VIDEO_CLASS)) { if (info->type == ACPI_VIDEO_NOTIFY_PROBE) { - /* - * This may be the only indication we receive of a - * connector hotplug on a runtime suspended GPU, - * schedule hpd_work to check. - */ - schedule_work(&drm->hpd_work); + ret = pm_runtime_get(drm->dev->dev); + if (ret == 1 || ret == -EACCES) { + /* If the GPU is already awake, or in a state + * where we can't wake it up, it can handle + * it's own hotplug events. + */ + pm_runtime_put_autosuspend(drm->dev->dev); + } else if (ret == 0) { + /* This may be the only indication we receive + * of a connector hotplug on a runtime + * suspended GPU, schedule hpd_work to check. + */ + NV_DEBUG(drm, "ACPI requested connector reprobe\n"); + schedule_work(&drm->hpd_work); + pm_runtime_put_noidle(drm->dev->dev); + } else { + NV_WARN(drm, "Dropped ACPI reprobe event due to RPM error: %d\n", + ret); + } /* acpi-video should not generate keypresses for this */ return NOTIFY_BAD; @@ -412,6 +424,11 @@ nouveau_display_init(struct drm_device *dev) if (ret) return ret; + /* enable connector detection and polling for connectors without HPD + * support + */ + drm_kms_helper_poll_enable(dev); + /* enable hotplug interrupts */ drm_connector_list_iter_begin(dev, &conn_iter); nouveau_for_each_non_mst_connector_iter(connector, &conn_iter) { @@ -426,7 +443,7 @@ nouveau_display_init(struct drm_device *dev) } void -nouveau_display_fini(struct drm_device *dev, bool suspend) +nouveau_display_fini(struct drm_device *dev, bool suspend, bool runtime) { struct nouveau_display *disp = nouveau_display(dev); struct nouveau_drm *drm = nouveau_drm(dev); @@ -451,6 +468,9 @@ nouveau_display_fini(struct drm_device *dev, bool suspend) } drm_connector_list_iter_end(&conn_iter); + if (!runtime) + cancel_work_sync(&drm->hpd_work); + drm_kms_helper_poll_disable(dev); disp->fini(dev); } @@ -640,11 +660,11 @@ nouveau_display_suspend(struct drm_device *dev, bool runtime) } } - nouveau_display_fini(dev, true); + nouveau_display_fini(dev, true, runtime); return 0; } - nouveau_display_fini(dev, true); + nouveau_display_fini(dev, true, runtime); list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { struct nouveau_framebuffer *nouveau_fb; diff --git a/drivers/gpu/drm/nouveau/nouveau_display.h b/drivers/gpu/drm/nouveau/nouveau_display.h index 34cd144681b9ba71a0e5fcc94a48ba8cdf4be517..5e7dae79c2ee3b763ae3c52965f3d7b8245a5c72 100644 --- a/drivers/gpu/drm/nouveau/nouveau_display.h +++ b/drivers/gpu/drm/nouveau/nouveau_display.h @@ -64,7 +64,7 @@ nouveau_display(struct drm_device *dev) int nouveau_display_create(struct drm_device *dev); void nouveau_display_destroy(struct drm_device *dev); int nouveau_display_init(struct drm_device *dev); -void nouveau_display_fini(struct drm_device *dev, bool suspend); +void nouveau_display_fini(struct drm_device *dev, bool suspend, bool runtime); int nouveau_display_suspend(struct drm_device *dev, bool runtime); void nouveau_display_resume(struct drm_device *dev, bool runtime); int nouveau_display_vblank_enable(struct drm_device *, unsigned int); diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c index 362a34cb435db7ad2b8dd5810c9dd25a0d5b744e..7d3c7bb0ebfa9985aeb7482948b41319a6dffaae 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drm.c +++ b/drivers/gpu/drm/nouveau/nouveau_drm.c @@ -546,7 +546,7 @@ nouveau_drm_unload(struct drm_device *dev) nouveau_debugfs_fini(drm); if (dev->mode_config.num_crtc) - nouveau_display_fini(dev, false); + nouveau_display_fini(dev, false, false); nouveau_display_destroy(dev); nouveau_bios_takedown(dev); @@ -848,8 +848,10 @@ nouveau_drm_open(struct drm_device *dev, struct drm_file *fpriv) get_task_comm(tmpname, current); snprintf(name, sizeof(name), "%s[%d]", tmpname, pid_nr(fpriv->pid)); - if (!(cli = kzalloc(sizeof(*cli), GFP_KERNEL))) - return ret; + if (!(cli = kzalloc(sizeof(*cli), GFP_KERNEL))) { + ret = -ENOMEM; + goto done; + } ret = nouveau_cli_init(drm, name, cli); if (ret) diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c index 2170534101cafda1e66ccdf0ee5262a62fdde7bf..60ffb70bb9089c08040c8a2fe918cb471d998fb9 100644 --- a/drivers/gpu/drm/nouveau/nouveau_gem.c +++ b/drivers/gpu/drm/nouveau/nouveau_gem.c @@ -599,7 +599,7 @@ nouveau_gem_pushbuf_reloc_apply(struct nouveau_cli *cli, struct nouveau_bo *nvbo; uint32_t data; - if (unlikely(r->bo_index > req->nr_buffers)) { + if (unlikely(r->bo_index >= req->nr_buffers)) { NV_PRINTK(err, cli, "reloc bo index invalid\n"); ret = -EINVAL; break; @@ -609,7 +609,7 @@ nouveau_gem_pushbuf_reloc_apply(struct nouveau_cli *cli, if (b->presumed.valid) continue; - if (unlikely(r->reloc_bo_index > req->nr_buffers)) { + if (unlikely(r->reloc_bo_index >= req->nr_buffers)) { NV_PRINTK(err, cli, "reloc container bo index invalid\n"); ret = -EINVAL; break; diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c index 189ed80e21ffbd428174e458bb946f2880c3da57..fa1ead337c237cbca4fd53f6a84440ad9785bd59 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c @@ -23,6 +23,10 @@ #ifdef CONFIG_NOUVEAU_PLATFORM_DRIVER #include "priv.h" +#if IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU) +#include +#endif + static int nvkm_device_tegra_power_up(struct nvkm_device_tegra *tdev) { @@ -105,6 +109,15 @@ nvkm_device_tegra_probe_iommu(struct nvkm_device_tegra *tdev) unsigned long pgsize_bitmap; int ret; +#if IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU) + if (dev->archdata.mapping) { + struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev); + + arm_iommu_detach_device(dev); + arm_iommu_release_mapping(mapping); + } +#endif + if (!tdev->func->iommu_bit) return; diff --git a/drivers/gpu/drm/panel/panel-samsung-s6e8aa0.c b/drivers/gpu/drm/panel/panel-samsung-s6e8aa0.c index a188a3959f1ad33384fb266b967853a61bb4f973..6ad827b93ae19a5082ddec0e08dd627b568a2af0 100644 --- a/drivers/gpu/drm/panel/panel-samsung-s6e8aa0.c +++ b/drivers/gpu/drm/panel/panel-samsung-s6e8aa0.c @@ -823,7 +823,7 @@ static void s6e8aa0_read_mtp_id(struct s6e8aa0 *ctx) int ret, i; ret = s6e8aa0_dcs_read(ctx, 0xd1, id, ARRAY_SIZE(id)); - if (ret < ARRAY_SIZE(id) || id[0] == 0x00) { + if (ret < 0 || ret < ARRAY_SIZE(id) || id[0] == 0x00) { dev_err(ctx->dev, "read id failed\n"); ctx->error = -EIO; return; diff --git a/drivers/gpu/drm/sun4i/sun4i_drv.c b/drivers/gpu/drm/sun4i/sun4i_drv.c index ace59651892fb636400088e2ac9136a0917d45dd..8d3c8070ed86238206f208f22cb84a10d90922cf 100644 --- a/drivers/gpu/drm/sun4i/sun4i_drv.c +++ b/drivers/gpu/drm/sun4i/sun4i_drv.c @@ -241,7 +241,6 @@ static int sun4i_drv_add_endpoints(struct device *dev, remote = of_graph_get_remote_port_parent(ep); if (!remote) { DRM_DEBUG_DRIVER("Error retrieving the output node\n"); - of_node_put(remote); continue; } @@ -255,11 +254,13 @@ static int sun4i_drv_add_endpoints(struct device *dev, if (of_graph_parse_endpoint(ep, &endpoint)) { DRM_DEBUG_DRIVER("Couldn't parse endpoint\n"); + of_node_put(remote); continue; } if (!endpoint.id) { DRM_DEBUG_DRIVER("Endpoint is our panel... skipping\n"); + of_node_put(remote); continue; } } diff --git a/drivers/gpu/drm/tegra/drm.c b/drivers/gpu/drm/tegra/drm.c index 0598b4c18c253bb9f10076174a0b6c2774a86e87..75b1c8c03ce95eff8e9f824c1a161a70ffced508 100644 --- a/drivers/gpu/drm/tegra/drm.c +++ b/drivers/gpu/drm/tegra/drm.c @@ -470,7 +470,7 @@ int tegra_drm_submit(struct tegra_drm_context *context, * unaligned offset is malformed and cause commands stream * corruption on the buffer address relocation. */ - if (offset & 3 || offset >= obj->gem.size) { + if (offset & 3 || offset > obj->gem.size) { err = -EINVAL; goto fail; } diff --git a/drivers/gpu/drm/udl/udl_drv.h b/drivers/gpu/drm/udl/udl_drv.h index 2a75ab80527a954990c6ee2305883a97fa2ea41b..2c149b841cf1edcc4e86032a73753ffefad53d25 100644 --- a/drivers/gpu/drm/udl/udl_drv.h +++ b/drivers/gpu/drm/udl/udl_drv.h @@ -110,7 +110,7 @@ udl_fb_user_fb_create(struct drm_device *dev, struct drm_file *file, const struct drm_mode_fb_cmd2 *mode_cmd); -int udl_render_hline(struct drm_device *dev, int bpp, struct urb **urb_ptr, +int udl_render_hline(struct drm_device *dev, int log_bpp, struct urb **urb_ptr, const char *front, char **urb_buf_ptr, u32 byte_offset, u32 device_byte_offset, u32 byte_width, int *ident_ptr, int *sent_ptr); diff --git a/drivers/gpu/drm/udl/udl_fb.c b/drivers/gpu/drm/udl/udl_fb.c index d5583190f3e44de77560d76c2d5a5a4cf7db62de..491f1892b50e7995de815f44909f9cca6ebd07cd 100644 --- a/drivers/gpu/drm/udl/udl_fb.c +++ b/drivers/gpu/drm/udl/udl_fb.c @@ -90,7 +90,10 @@ int udl_handle_damage(struct udl_framebuffer *fb, int x, int y, int bytes_identical = 0; struct urb *urb; int aligned_x; - int bpp = fb->base.format->cpp[0]; + int log_bpp; + + BUG_ON(!is_power_of_2(fb->base.format->cpp[0])); + log_bpp = __ffs(fb->base.format->cpp[0]); if (!fb->active_16) return 0; @@ -125,12 +128,12 @@ int udl_handle_damage(struct udl_framebuffer *fb, int x, int y, for (i = y; i < y + height ; i++) { const int line_offset = fb->base.pitches[0] * i; - const int byte_offset = line_offset + (x * bpp); - const int dev_byte_offset = (fb->base.width * bpp * i) + (x * bpp); - if (udl_render_hline(dev, bpp, &urb, + const int byte_offset = line_offset + (x << log_bpp); + const int dev_byte_offset = (fb->base.width * i + x) << log_bpp; + if (udl_render_hline(dev, log_bpp, &urb, (char *) fb->obj->vmapping, &cmd, byte_offset, dev_byte_offset, - width * bpp, + width << log_bpp, &bytes_identical, &bytes_sent)) goto error; } @@ -149,7 +152,7 @@ int udl_handle_damage(struct udl_framebuffer *fb, int x, int y, error: atomic_add(bytes_sent, &udl->bytes_sent); atomic_add(bytes_identical, &udl->bytes_identical); - atomic_add(width*height*bpp, &udl->bytes_rendered); + atomic_add((width * height) << log_bpp, &udl->bytes_rendered); end_cycles = get_cycles(); atomic_add(((unsigned int) ((end_cycles - start_cycles) >> 10)), /* Kcycles */ @@ -221,7 +224,7 @@ static int udl_fb_open(struct fb_info *info, int user) struct fb_deferred_io *fbdefio; - fbdefio = kmalloc(sizeof(struct fb_deferred_io), GFP_KERNEL); + fbdefio = kzalloc(sizeof(struct fb_deferred_io), GFP_KERNEL); if (fbdefio) { fbdefio->delay = DL_DEFIO_WRITE_DELAY; @@ -429,9 +432,11 @@ static void udl_fbdev_destroy(struct drm_device *dev, { drm_fb_helper_unregister_fbi(&ufbdev->helper); drm_fb_helper_fini(&ufbdev->helper); - drm_framebuffer_unregister_private(&ufbdev->ufb.base); - drm_framebuffer_cleanup(&ufbdev->ufb.base); - drm_gem_object_put_unlocked(&ufbdev->ufb.obj->base); + if (ufbdev->ufb.obj) { + drm_framebuffer_unregister_private(&ufbdev->ufb.base); + drm_framebuffer_cleanup(&ufbdev->ufb.base); + drm_gem_object_put_unlocked(&ufbdev->ufb.obj->base); + } } int udl_fbdev_init(struct drm_device *dev) diff --git a/drivers/gpu/drm/udl/udl_main.c b/drivers/gpu/drm/udl/udl_main.c index 0328b2c7b210a0331f8721e93c3b25124ad27daf..f8ea3c99b523234273f705a3884a383df0949ae1 100644 --- a/drivers/gpu/drm/udl/udl_main.c +++ b/drivers/gpu/drm/udl/udl_main.c @@ -169,18 +169,13 @@ static void udl_free_urb_list(struct drm_device *dev) struct list_head *node; struct urb_node *unode; struct urb *urb; - int ret; unsigned long flags; DRM_DEBUG("Waiting for completes and freeing all render urbs\n"); /* keep waiting and freeing, until we've got 'em all */ while (count--) { - - /* Getting interrupted means a leak, but ok at shutdown*/ - ret = down_interruptible(&udl->urbs.limit_sem); - if (ret) - break; + down(&udl->urbs.limit_sem); spin_lock_irqsave(&udl->urbs.lock, flags); @@ -204,17 +199,22 @@ static void udl_free_urb_list(struct drm_device *dev) static int udl_alloc_urb_list(struct drm_device *dev, int count, size_t size) { struct udl_device *udl = dev->dev_private; - int i = 0; struct urb *urb; struct urb_node *unode; char *buf; + size_t wanted_size = count * size; spin_lock_init(&udl->urbs.lock); +retry: udl->urbs.size = size; INIT_LIST_HEAD(&udl->urbs.list); - while (i < count) { + sema_init(&udl->urbs.limit_sem, 0); + udl->urbs.count = 0; + udl->urbs.available = 0; + + while (udl->urbs.count * size < wanted_size) { unode = kzalloc(sizeof(struct urb_node), GFP_KERNEL); if (!unode) break; @@ -230,11 +230,16 @@ static int udl_alloc_urb_list(struct drm_device *dev, int count, size_t size) } unode->urb = urb; - buf = usb_alloc_coherent(udl->udev, MAX_TRANSFER, GFP_KERNEL, + buf = usb_alloc_coherent(udl->udev, size, GFP_KERNEL, &urb->transfer_dma); if (!buf) { kfree(unode); usb_free_urb(urb); + if (size > PAGE_SIZE) { + size /= 2; + udl_free_urb_list(dev); + goto retry; + } break; } @@ -245,16 +250,14 @@ static int udl_alloc_urb_list(struct drm_device *dev, int count, size_t size) list_add_tail(&unode->entry, &udl->urbs.list); - i++; + up(&udl->urbs.limit_sem); + udl->urbs.count++; + udl->urbs.available++; } - sema_init(&udl->urbs.limit_sem, i); - udl->urbs.count = i; - udl->urbs.available = i; - - DRM_DEBUG("allocated %d %d byte urbs\n", i, (int) size); + DRM_DEBUG("allocated %d %d byte urbs\n", udl->urbs.count, (int) size); - return i; + return udl->urbs.count; } struct urb *udl_get_urb(struct drm_device *dev) diff --git a/drivers/gpu/drm/udl/udl_transfer.c b/drivers/gpu/drm/udl/udl_transfer.c index b992644c17e6b565b414351b1e262d4b61c9f38d..f3331d33547a1708fc7be2ee4eaeda676da3ff13 100644 --- a/drivers/gpu/drm/udl/udl_transfer.c +++ b/drivers/gpu/drm/udl/udl_transfer.c @@ -83,12 +83,12 @@ static inline u16 pixel32_to_be16(const uint32_t pixel) ((pixel >> 8) & 0xf800)); } -static inline u16 get_pixel_val16(const uint8_t *pixel, int bpp) +static inline u16 get_pixel_val16(const uint8_t *pixel, int log_bpp) { - u16 pixel_val16 = 0; - if (bpp == 2) + u16 pixel_val16; + if (log_bpp == 1) pixel_val16 = *(const uint16_t *)pixel; - else if (bpp == 4) + else pixel_val16 = pixel32_to_be16(*(const uint32_t *)pixel); return pixel_val16; } @@ -125,8 +125,9 @@ static void udl_compress_hline16( const u8 *const pixel_end, uint32_t *device_address_ptr, uint8_t **command_buffer_ptr, - const uint8_t *const cmd_buffer_end, int bpp) + const uint8_t *const cmd_buffer_end, int log_bpp) { + const int bpp = 1 << log_bpp; const u8 *pixel = *pixel_start_ptr; uint32_t dev_addr = *device_address_ptr; uint8_t *cmd = *command_buffer_ptr; @@ -153,12 +154,12 @@ static void udl_compress_hline16( raw_pixels_count_byte = cmd++; /* we'll know this later */ raw_pixel_start = pixel; - cmd_pixel_end = pixel + min3(MAX_CMD_PIXELS + 1UL, - (unsigned long)(pixel_end - pixel) / bpp, - (unsigned long)(cmd_buffer_end - 1 - cmd) / 2) * bpp; + cmd_pixel_end = pixel + (min3(MAX_CMD_PIXELS + 1UL, + (unsigned long)(pixel_end - pixel) >> log_bpp, + (unsigned long)(cmd_buffer_end - 1 - cmd) / 2) << log_bpp); prefetch_range((void *) pixel, cmd_pixel_end - pixel); - pixel_val16 = get_pixel_val16(pixel, bpp); + pixel_val16 = get_pixel_val16(pixel, log_bpp); while (pixel < cmd_pixel_end) { const u8 *const start = pixel; @@ -170,7 +171,7 @@ static void udl_compress_hline16( pixel += bpp; while (pixel < cmd_pixel_end) { - pixel_val16 = get_pixel_val16(pixel, bpp); + pixel_val16 = get_pixel_val16(pixel, log_bpp); if (pixel_val16 != repeating_pixel_val16) break; pixel += bpp; @@ -179,10 +180,10 @@ static void udl_compress_hline16( if (unlikely(pixel > start + bpp)) { /* go back and fill in raw pixel count */ *raw_pixels_count_byte = (((start - - raw_pixel_start) / bpp) + 1) & 0xFF; + raw_pixel_start) >> log_bpp) + 1) & 0xFF; /* immediately after raw data is repeat byte */ - *cmd++ = (((pixel - start) / bpp) - 1) & 0xFF; + *cmd++ = (((pixel - start) >> log_bpp) - 1) & 0xFF; /* Then start another raw pixel span */ raw_pixel_start = pixel; @@ -192,14 +193,14 @@ static void udl_compress_hline16( if (pixel > raw_pixel_start) { /* finalize last RAW span */ - *raw_pixels_count_byte = ((pixel-raw_pixel_start) / bpp) & 0xFF; + *raw_pixels_count_byte = ((pixel - raw_pixel_start) >> log_bpp) & 0xFF; } else { /* undo unused byte */ cmd--; } - *cmd_pixels_count_byte = ((pixel - cmd_pixel_start) / bpp) & 0xFF; - dev_addr += ((pixel - cmd_pixel_start) / bpp) * 2; + *cmd_pixels_count_byte = ((pixel - cmd_pixel_start) >> log_bpp) & 0xFF; + dev_addr += ((pixel - cmd_pixel_start) >> log_bpp) * 2; } if (cmd_buffer_end <= MIN_RLX_CMD_BYTES + cmd) { @@ -222,19 +223,19 @@ static void udl_compress_hline16( * (that we can only write to, slowly, and can never read), and (optionally) * our shadow copy that tracks what's been sent to that hardware buffer. */ -int udl_render_hline(struct drm_device *dev, int bpp, struct urb **urb_ptr, +int udl_render_hline(struct drm_device *dev, int log_bpp, struct urb **urb_ptr, const char *front, char **urb_buf_ptr, u32 byte_offset, u32 device_byte_offset, u32 byte_width, int *ident_ptr, int *sent_ptr) { const u8 *line_start, *line_end, *next_pixel; - u32 base16 = 0 + (device_byte_offset / bpp) * 2; + u32 base16 = 0 + (device_byte_offset >> log_bpp) * 2; struct urb *urb = *urb_ptr; u8 *cmd = *urb_buf_ptr; u8 *cmd_end = (u8 *) urb->transfer_buffer + urb->transfer_buffer_length; - BUG_ON(!(bpp == 2 || bpp == 4)); + BUG_ON(!(log_bpp == 1 || log_bpp == 2)); line_start = (u8 *) (front + byte_offset); next_pixel = line_start; @@ -244,7 +245,7 @@ int udl_render_hline(struct drm_device *dev, int bpp, struct urb **urb_ptr, udl_compress_hline16(&next_pixel, line_end, &base16, - (u8 **) &cmd, (u8 *) cmd_end, bpp); + (u8 **) &cmd, (u8 *) cmd_end, log_bpp); if (cmd >= cmd_end) { int len = cmd - (u8 *) urb->transfer_buffer; diff --git a/drivers/gpu/drm/vc4/vc4_plane.c b/drivers/gpu/drm/vc4/vc4_plane.c index f7fa80287500ff2bbd366045871644d5015351e6..476198a4224e44cb0714d3b5a29bcd5807507a6a 100644 --- a/drivers/gpu/drm/vc4/vc4_plane.c +++ b/drivers/gpu/drm/vc4/vc4_plane.c @@ -297,6 +297,9 @@ static int vc4_plane_setup_clipping_and_scaling(struct drm_plane_state *state) vc4_state->y_scaling[0] = vc4_get_scaling_mode(vc4_state->src_h[0], vc4_state->crtc_h); + vc4_state->is_unity = (vc4_state->x_scaling[0] == VC4_SCALING_NONE && + vc4_state->y_scaling[0] == VC4_SCALING_NONE); + if (num_planes > 1) { vc4_state->is_yuv = true; @@ -312,24 +315,17 @@ static int vc4_plane_setup_clipping_and_scaling(struct drm_plane_state *state) vc4_get_scaling_mode(vc4_state->src_h[1], vc4_state->crtc_h); - /* YUV conversion requires that scaling be enabled, - * even on a plane that's otherwise 1:1. Choose TPZ - * for simplicity. + /* YUV conversion requires that horizontal scaling be enabled, + * even on a plane that's otherwise 1:1. Looks like only PPF + * works in that case, so let's pick that one. */ - if (vc4_state->x_scaling[0] == VC4_SCALING_NONE) - vc4_state->x_scaling[0] = VC4_SCALING_TPZ; - if (vc4_state->y_scaling[0] == VC4_SCALING_NONE) - vc4_state->y_scaling[0] = VC4_SCALING_TPZ; + if (vc4_state->is_unity) + vc4_state->x_scaling[0] = VC4_SCALING_PPF; } else { vc4_state->x_scaling[1] = VC4_SCALING_NONE; vc4_state->y_scaling[1] = VC4_SCALING_NONE; } - vc4_state->is_unity = (vc4_state->x_scaling[0] == VC4_SCALING_NONE && - vc4_state->y_scaling[0] == VC4_SCALING_NONE && - vc4_state->x_scaling[1] == VC4_SCALING_NONE && - vc4_state->y_scaling[1] == VC4_SCALING_NONE); - /* No configuring scaling on the cursor plane, since it gets non-vblank-synced updates, and scaling requires requires LBM changes which have to be vblank-synced. @@ -662,7 +658,10 @@ static int vc4_plane_mode_set(struct drm_plane *plane, vc4_dlist_write(vc4_state, SCALER_CSC2_ITR_R_601_5); } - if (!vc4_state->is_unity) { + if (vc4_state->x_scaling[0] != VC4_SCALING_NONE || + vc4_state->x_scaling[1] != VC4_SCALING_NONE || + vc4_state->y_scaling[0] != VC4_SCALING_NONE || + vc4_state->y_scaling[1] != VC4_SCALING_NONE) { /* LBM Base Address. */ if (vc4_state->y_scaling[0] != VC4_SCALING_NONE || vc4_state->y_scaling[1] != VC4_SCALING_NONE) { diff --git a/drivers/gpu/host1x/job.c b/drivers/gpu/host1x/job.c index db509ab8874e58fd1f798da270fa8035ed4563d3..acd99783bbca1ca8fffc8e8ba5b6105e67cdc09e 100644 --- a/drivers/gpu/host1x/job.c +++ b/drivers/gpu/host1x/job.c @@ -686,7 +686,8 @@ void host1x_job_unpin(struct host1x_job *job) for (i = 0; i < job->num_unpins; i++) { struct host1x_job_unpin_data *unpin = &job->unpins[i]; - if (!IS_ENABLED(CONFIG_TEGRA_HOST1X_FIREWALL) && host->domain) { + if (!IS_ENABLED(CONFIG_TEGRA_HOST1X_FIREWALL) && + unpin->size && host->domain) { iommu_unmap(host->domain, job->addr_phys[i], unpin->size); free_iova(&host->iova, diff --git a/drivers/gpu/ipu-v3/ipu-common.c b/drivers/gpu/ipu-v3/ipu-common.c index 658fa2d3e40c260d051d4299bda4eddb0af5abeb..2c8411b8d050d6d758522a9866e1cd2d3dfebb7d 100644 --- a/drivers/gpu/ipu-v3/ipu-common.c +++ b/drivers/gpu/ipu-v3/ipu-common.c @@ -1401,6 +1401,8 @@ static int ipu_probe(struct platform_device *pdev) return -ENODEV; ipu->id = of_alias_get_id(np, "ipu"); + if (ipu->id < 0) + ipu->id = 0; if (of_device_is_compatible(np, "fsl,imx6qp-ipu") && IS_ENABLED(CONFIG_DRM)) { diff --git a/drivers/gpu/ipu-v3/ipu-csi.c b/drivers/gpu/ipu-v3/ipu-csi.c index 24e12b87a0cbe387b24557a230a1ed1d05e150ca..2bc51d4d3f1e6adf923e01e69f4e3ec92ee186d2 100644 --- a/drivers/gpu/ipu-v3/ipu-csi.c +++ b/drivers/gpu/ipu-v3/ipu-csi.c @@ -316,13 +316,17 @@ static int mbus_code_to_bus_cfg(struct ipu_csi_bus_config *cfg, u32 mbus_code) /* * Fill a CSI bus config struct from mbus_config and mbus_framefmt. */ -static void fill_csi_bus_cfg(struct ipu_csi_bus_config *csicfg, +static int fill_csi_bus_cfg(struct ipu_csi_bus_config *csicfg, struct v4l2_mbus_config *mbus_cfg, struct v4l2_mbus_framefmt *mbus_fmt) { + int ret; + memset(csicfg, 0, sizeof(*csicfg)); - mbus_code_to_bus_cfg(csicfg, mbus_fmt->code); + ret = mbus_code_to_bus_cfg(csicfg, mbus_fmt->code); + if (ret < 0) + return ret; switch (mbus_cfg->type) { case V4L2_MBUS_PARALLEL: @@ -353,6 +357,8 @@ static void fill_csi_bus_cfg(struct ipu_csi_bus_config *csicfg, /* will never get here, keep compiler quiet */ break; } + + return 0; } int ipu_csi_init_interface(struct ipu_csi *csi, @@ -362,8 +368,11 @@ int ipu_csi_init_interface(struct ipu_csi *csi, struct ipu_csi_bus_config cfg; unsigned long flags; u32 width, height, data = 0; + int ret; - fill_csi_bus_cfg(&cfg, mbus_cfg, mbus_fmt); + ret = fill_csi_bus_cfg(&cfg, mbus_cfg, mbus_fmt); + if (ret < 0) + return ret; /* set default sensor frame width and height */ width = mbus_fmt->width; @@ -584,11 +593,14 @@ int ipu_csi_set_mipi_datatype(struct ipu_csi *csi, u32 vc, struct ipu_csi_bus_config cfg; unsigned long flags; u32 temp; + int ret; if (vc > 3) return -EINVAL; - mbus_code_to_bus_cfg(&cfg, mbus_fmt->code); + ret = mbus_code_to_bus_cfg(&cfg, mbus_fmt->code); + if (ret < 0) + return ret; spin_lock_irqsave(&csi->lock, flags); diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig index eca4c9d97110c2e5a1de2eed8457fd44133515ba..4575ee91d9878a861e60cf9df828727b8609772f 100644 --- a/drivers/hid/Kconfig +++ b/drivers/hid/Kconfig @@ -182,6 +182,19 @@ config HID_BETOP_FF Currently the following devices are known to be supported: - BETOP 2185 PC & BFM MODE +config HID_BIGBEN_FF + tristate "BigBen Interactive Kids' gamepad support" + depends on USB_HID + depends on NEW_LEDS + depends on LEDS_CLASS + select INPUT_FF_MEMLESS + default !EXPERT + help + Support for the "Kid-friendly Wired Controller" PS3OFMINIPAD + gamepad made by BigBen Interactive, originally sold as a PS3 + accessory. This driver fixes input mapping and adds support for + force feedback effects and LEDs on the device. + config HID_CHERRY tristate "Cherry Cymotion keyboard" depends on HID diff --git a/drivers/hid/Makefile b/drivers/hid/Makefile index 235bd2a7b333a4da9650ea4dc6d094318c90f7b2..54ff911a03cbce2c15596957636943c534226cc9 100644 --- a/drivers/hid/Makefile +++ b/drivers/hid/Makefile @@ -31,6 +31,7 @@ obj-$(CONFIG_HID_ASUS) += hid-asus.o obj-$(CONFIG_HID_AUREAL) += hid-aureal.o obj-$(CONFIG_HID_BELKIN) += hid-belkin.o obj-$(CONFIG_HID_BETOP_FF) += hid-betopff.o +obj-$(CONFIG_HID_BIGBEN_FF) += hid-bigbenff.o obj-$(CONFIG_HID_CHERRY) += hid-cherry.o obj-$(CONFIG_HID_CHICONY) += hid-chicony.o obj-$(CONFIG_HID_CMEDIA) += hid-cmedia.o diff --git a/drivers/hid/hid-bigbenff.c b/drivers/hid/hid-bigbenff.c new file mode 100644 index 0000000000000000000000000000000000000000..3f6abd190df43ef17cb6080505fe7761c38324fe --- /dev/null +++ b/drivers/hid/hid-bigbenff.c @@ -0,0 +1,414 @@ +// SPDX-License-Identifier: GPL-2.0+ + +/* + * LED & force feedback support for BigBen Interactive + * + * 0x146b:0x0902 "Bigben Interactive Bigben Game Pad" + * "Kid-friendly Wired Controller" PS3OFMINIPAD SONY + * sold for use with the PS3 + * + * Copyright (c) 2018 Hanno Zulla + */ + +#include +#include +#include +#include +#include + +#include "hid-ids.h" + + +/* + * The original descriptor for 0x146b:0x0902 + * + * 0x05, 0x01, // Usage Page (Generic Desktop Ctrls) + * 0x09, 0x05, // Usage (Game Pad) + * 0xA1, 0x01, // Collection (Application) + * 0x15, 0x00, // Logical Minimum (0) + * 0x25, 0x01, // Logical Maximum (1) + * 0x35, 0x00, // Physical Minimum (0) + * 0x45, 0x01, // Physical Maximum (1) + * 0x75, 0x01, // Report Size (1) + * 0x95, 0x0D, // Report Count (13) + * 0x05, 0x09, // Usage Page (Button) + * 0x19, 0x01, // Usage Minimum (0x01) + * 0x29, 0x0D, // Usage Maximum (0x0D) + * 0x81, 0x02, // Input (Data,Var,Abs,No Wrap,Linear,Preferred State,No Null Position) + * 0x95, 0x03, // Report Count (3) + * 0x81, 0x01, // Input (Const,Array,Abs,No Wrap,Linear,Preferred State,No Null Position) + * 0x05, 0x01, // Usage Page (Generic Desktop Ctrls) + * 0x25, 0x07, // Logical Maximum (7) + * 0x46, 0x3B, 0x01, // Physical Maximum (315) + * 0x75, 0x04, // Report Size (4) + * 0x95, 0x01, // Report Count (1) + * 0x65, 0x14, // Unit (System: English Rotation, Length: Centimeter) + * 0x09, 0x39, // Usage (Hat switch) + * 0x81, 0x42, // Input (Data,Var,Abs,No Wrap,Linear,Preferred State,Null State) + * 0x65, 0x00, // Unit (None) + * 0x95, 0x01, // Report Count (1) + * 0x81, 0x01, // Input (Const,Array,Abs,No Wrap,Linear,Preferred State,No Null Position) + * 0x26, 0xFF, 0x00, // Logical Maximum (255) + * 0x46, 0xFF, 0x00, // Physical Maximum (255) + * 0x09, 0x30, // Usage (X) + * 0x09, 0x31, // Usage (Y) + * 0x09, 0x32, // Usage (Z) + * 0x09, 0x35, // Usage (Rz) + * 0x75, 0x08, // Report Size (8) + * 0x95, 0x04, // Report Count (4) + * 0x81, 0x02, // Input (Data,Var,Abs,No Wrap,Linear,Preferred State,No Null Position) + * 0x06, 0x00, 0xFF, // Usage Page (Vendor Defined 0xFF00) + * 0x09, 0x20, // Usage (0x20) + * 0x09, 0x21, // Usage (0x21) + * 0x09, 0x22, // Usage (0x22) + * 0x09, 0x23, // Usage (0x23) + * 0x09, 0x24, // Usage (0x24) + * 0x09, 0x25, // Usage (0x25) + * 0x09, 0x26, // Usage (0x26) + * 0x09, 0x27, // Usage (0x27) + * 0x09, 0x28, // Usage (0x28) + * 0x09, 0x29, // Usage (0x29) + * 0x09, 0x2A, // Usage (0x2A) + * 0x09, 0x2B, // Usage (0x2B) + * 0x95, 0x0C, // Report Count (12) + * 0x81, 0x02, // Input (Data,Var,Abs,No Wrap,Linear,Preferred State,No Null Position) + * 0x0A, 0x21, 0x26, // Usage (0x2621) + * 0x95, 0x08, // Report Count (8) + * 0xB1, 0x02, // Feature (Data,Var,Abs,No Wrap,Linear,Preferred State,No Null Position,Non-volatile) + * 0x0A, 0x21, 0x26, // Usage (0x2621) + * 0x91, 0x02, // Output (Data,Var,Abs,No Wrap,Linear,Preferred State,No Null Position,Non-volatile) + * 0x26, 0xFF, 0x03, // Logical Maximum (1023) + * 0x46, 0xFF, 0x03, // Physical Maximum (1023) + * 0x09, 0x2C, // Usage (0x2C) + * 0x09, 0x2D, // Usage (0x2D) + * 0x09, 0x2E, // Usage (0x2E) + * 0x09, 0x2F, // Usage (0x2F) + * 0x75, 0x10, // Report Size (16) + * 0x95, 0x04, // Report Count (4) + * 0x81, 0x02, // Input (Data,Var,Abs,No Wrap,Linear,Preferred State,No Null Position) + * 0xC0, // End Collection + */ + +#define PID0902_RDESC_ORIG_SIZE 137 + +/* + * The fixed descriptor for 0x146b:0x0902 + * + * - map buttons according to gamepad.rst + * - assign right stick from Z/Rz to Rx/Ry + * - map previously unused analog trigger data to Z/RZ + * - simplify feature and output descriptor + */ +static __u8 pid0902_rdesc_fixed[] = { + 0x05, 0x01, /* Usage Page (Generic Desktop Ctrls) */ + 0x09, 0x05, /* Usage (Game Pad) */ + 0xA1, 0x01, /* Collection (Application) */ + 0x15, 0x00, /* Logical Minimum (0) */ + 0x25, 0x01, /* Logical Maximum (1) */ + 0x35, 0x00, /* Physical Minimum (0) */ + 0x45, 0x01, /* Physical Maximum (1) */ + 0x75, 0x01, /* Report Size (1) */ + 0x95, 0x0D, /* Report Count (13) */ + 0x05, 0x09, /* Usage Page (Button) */ + 0x09, 0x05, /* Usage (BTN_WEST) */ + 0x09, 0x01, /* Usage (BTN_SOUTH) */ + 0x09, 0x02, /* Usage (BTN_EAST) */ + 0x09, 0x04, /* Usage (BTN_NORTH) */ + 0x09, 0x07, /* Usage (BTN_TL) */ + 0x09, 0x08, /* Usage (BTN_TR) */ + 0x09, 0x09, /* Usage (BTN_TL2) */ + 0x09, 0x0A, /* Usage (BTN_TR2) */ + 0x09, 0x0B, /* Usage (BTN_SELECT) */ + 0x09, 0x0C, /* Usage (BTN_START) */ + 0x09, 0x0E, /* Usage (BTN_THUMBL) */ + 0x09, 0x0F, /* Usage (BTN_THUMBR) */ + 0x09, 0x0D, /* Usage (BTN_MODE) */ + 0x81, 0x02, /* Input (Data,Var,Abs,No Wrap,Linear,Preferred State,No Null Position) */ + 0x75, 0x01, /* Report Size (1) */ + 0x95, 0x03, /* Report Count (3) */ + 0x81, 0x01, /* Input (Const,Array,Abs,No Wrap,Linear,Preferred State,No Null Position) */ + 0x05, 0x01, /* Usage Page (Generic Desktop Ctrls) */ + 0x25, 0x07, /* Logical Maximum (7) */ + 0x46, 0x3B, 0x01, /* Physical Maximum (315) */ + 0x75, 0x04, /* Report Size (4) */ + 0x95, 0x01, /* Report Count (1) */ + 0x65, 0x14, /* Unit (System: English Rotation, Length: Centimeter) */ + 0x09, 0x39, /* Usage (Hat switch) */ + 0x81, 0x42, /* Input (Data,Var,Abs,No Wrap,Linear,Preferred State,Null State) */ + 0x65, 0x00, /* Unit (None) */ + 0x95, 0x01, /* Report Count (1) */ + 0x81, 0x01, /* Input (Const,Array,Abs,No Wrap,Linear,Preferred State,No Null Position) */ + 0x26, 0xFF, 0x00, /* Logical Maximum (255) */ + 0x46, 0xFF, 0x00, /* Physical Maximum (255) */ + 0x09, 0x30, /* Usage (X) */ + 0x09, 0x31, /* Usage (Y) */ + 0x09, 0x33, /* Usage (Rx) */ + 0x09, 0x34, /* Usage (Ry) */ + 0x75, 0x08, /* Report Size (8) */ + 0x95, 0x04, /* Report Count (4) */ + 0x81, 0x02, /* Input (Data,Var,Abs,No Wrap,Linear,Preferred State,No Null Position) */ + 0x95, 0x0A, /* Report Count (10) */ + 0x81, 0x01, /* Input (Const,Array,Abs,No Wrap,Linear,Preferred State,No Null Position) */ + 0x05, 0x01, /* Usage Page (Generic Desktop Ctrls) */ + 0x26, 0xFF, 0x00, /* Logical Maximum (255) */ + 0x46, 0xFF, 0x00, /* Physical Maximum (255) */ + 0x09, 0x32, /* Usage (Z) */ + 0x09, 0x35, /* Usage (Rz) */ + 0x95, 0x02, /* Report Count (2) */ + 0x81, 0x02, /* Input (Data,Var,Abs,No Wrap,Linear,Preferred State,No Null Position) */ + 0x95, 0x08, /* Report Count (8) */ + 0x81, 0x01, /* Input (Const,Array,Abs,No Wrap,Linear,Preferred State,No Null Position) */ + 0x06, 0x00, 0xFF, /* Usage Page (Vendor Defined 0xFF00) */ + 0xB1, 0x02, /* Feature (Data,Var,Abs,No Wrap,Linear,Preferred State,No Null Position,Non-volatile) */ + 0x0A, 0x21, 0x26, /* Usage (0x2621) */ + 0x95, 0x08, /* Report Count (8) */ + 0x91, 0x02, /* Output (Data,Var,Abs,No Wrap,Linear,Preferred State,No Null Position,Non-volatile) */ + 0x0A, 0x21, 0x26, /* Usage (0x2621) */ + 0x95, 0x08, /* Report Count (8) */ + 0x81, 0x02, /* Input (Data,Var,Abs,No Wrap,Linear,Preferred State,No Null Position) */ + 0xC0, /* End Collection */ +}; + +#define NUM_LEDS 4 + +struct bigben_device { + struct hid_device *hid; + struct hid_report *report; + u8 led_state; /* LED1 = 1 .. LED4 = 8 */ + u8 right_motor_on; /* right motor off/on 0/1 */ + u8 left_motor_force; /* left motor force 0-255 */ + struct led_classdev *leds[NUM_LEDS]; + bool work_led; + bool work_ff; + struct work_struct worker; +}; + + +static void bigben_worker(struct work_struct *work) +{ + struct bigben_device *bigben = container_of(work, + struct bigben_device, worker); + struct hid_field *report_field = bigben->report->field[0]; + + if (bigben->work_led) { + bigben->work_led = false; + report_field->value[0] = 0x01; /* 1 = led message */ + report_field->value[1] = 0x08; /* reserved value, always 8 */ + report_field->value[2] = bigben->led_state; + report_field->value[3] = 0x00; /* padding */ + report_field->value[4] = 0x00; /* padding */ + report_field->value[5] = 0x00; /* padding */ + report_field->value[6] = 0x00; /* padding */ + report_field->value[7] = 0x00; /* padding */ + hid_hw_request(bigben->hid, bigben->report, HID_REQ_SET_REPORT); + } + + if (bigben->work_ff) { + bigben->work_ff = false; + report_field->value[0] = 0x02; /* 2 = rumble effect message */ + report_field->value[1] = 0x08; /* reserved value, always 8 */ + report_field->value[2] = bigben->right_motor_on; + report_field->value[3] = bigben->left_motor_force; + report_field->value[4] = 0xff; /* duration 0-254 (255 = nonstop) */ + report_field->value[5] = 0x00; /* padding */ + report_field->value[6] = 0x00; /* padding */ + report_field->value[7] = 0x00; /* padding */ + hid_hw_request(bigben->hid, bigben->report, HID_REQ_SET_REPORT); + } +} + +static int hid_bigben_play_effect(struct input_dev *dev, void *data, + struct ff_effect *effect) +{ + struct bigben_device *bigben = data; + u8 right_motor_on; + u8 left_motor_force; + + if (effect->type != FF_RUMBLE) + return 0; + + right_motor_on = effect->u.rumble.weak_magnitude ? 1 : 0; + left_motor_force = effect->u.rumble.strong_magnitude / 256; + + if (right_motor_on != bigben->right_motor_on || + left_motor_force != bigben->left_motor_force) { + bigben->right_motor_on = right_motor_on; + bigben->left_motor_force = left_motor_force; + bigben->work_ff = true; + schedule_work(&bigben->worker); + } + + return 0; +} + +static void bigben_set_led(struct led_classdev *led, + enum led_brightness value) +{ + struct device *dev = led->dev->parent; + struct hid_device *hid = to_hid_device(dev); + struct bigben_device *bigben = hid_get_drvdata(hid); + int n; + bool work; + + if (!bigben) { + hid_err(hid, "no device data\n"); + return; + } + + for (n = 0; n < NUM_LEDS; n++) { + if (led == bigben->leds[n]) { + if (value == LED_OFF) { + work = (bigben->led_state & BIT(n)); + bigben->led_state &= ~BIT(n); + } else { + work = !(bigben->led_state & BIT(n)); + bigben->led_state |= BIT(n); + } + + if (work) { + bigben->work_led = true; + schedule_work(&bigben->worker); + } + return; + } + } +} + +static enum led_brightness bigben_get_led(struct led_classdev *led) +{ + struct device *dev = led->dev->parent; + struct hid_device *hid = to_hid_device(dev); + struct bigben_device *bigben = hid_get_drvdata(hid); + int n; + + if (!bigben) { + hid_err(hid, "no device data\n"); + return LED_OFF; + } + + for (n = 0; n < NUM_LEDS; n++) { + if (led == bigben->leds[n]) + return (bigben->led_state & BIT(n)) ? LED_ON : LED_OFF; + } + + return LED_OFF; +} + +static void bigben_remove(struct hid_device *hid) +{ + struct bigben_device *bigben = hid_get_drvdata(hid); + + cancel_work_sync(&bigben->worker); + hid_hw_close(hid); + hid_hw_stop(hid); +} + +static int bigben_probe(struct hid_device *hid, + const struct hid_device_id *id) +{ + struct bigben_device *bigben; + struct hid_input *hidinput; + struct list_head *report_list; + struct led_classdev *led; + char *name; + size_t name_sz; + int n, error; + + bigben = devm_kzalloc(&hid->dev, sizeof(*bigben), GFP_KERNEL); + if (!bigben) + return -ENOMEM; + hid_set_drvdata(hid, bigben); + bigben->hid = hid; + + error = hid_parse(hid); + if (error) { + hid_err(hid, "parse failed\n"); + return error; + } + + error = hid_hw_start(hid, HID_CONNECT_DEFAULT & ~HID_CONNECT_FF); + if (error) { + hid_err(hid, "hw start failed\n"); + return error; + } + + report_list = &hid->report_enum[HID_OUTPUT_REPORT].report_list; + bigben->report = list_entry(report_list->next, + struct hid_report, list); + + hidinput = list_first_entry(&hid->inputs, struct hid_input, list); + set_bit(FF_RUMBLE, hidinput->input->ffbit); + + INIT_WORK(&bigben->worker, bigben_worker); + + error = input_ff_create_memless(hidinput->input, bigben, + hid_bigben_play_effect); + if (error) + return error; + + name_sz = strlen(dev_name(&hid->dev)) + strlen(":red:bigben#") + 1; + + for (n = 0; n < NUM_LEDS; n++) { + led = devm_kzalloc( + &hid->dev, + sizeof(struct led_classdev) + name_sz, + GFP_KERNEL + ); + if (!led) + return -ENOMEM; + name = (void *)(&led[1]); + snprintf(name, name_sz, + "%s:red:bigben%d", + dev_name(&hid->dev), n + 1 + ); + led->name = name; + led->brightness = (n == 0) ? LED_ON : LED_OFF; + led->max_brightness = 1; + led->brightness_get = bigben_get_led; + led->brightness_set = bigben_set_led; + bigben->leds[n] = led; + error = devm_led_classdev_register(&hid->dev, led); + if (error) + return error; + } + + /* initial state: LED1 is on, no rumble effect */ + bigben->led_state = BIT(0); + bigben->right_motor_on = 0; + bigben->left_motor_force = 0; + bigben->work_led = true; + bigben->work_ff = true; + schedule_work(&bigben->worker); + + hid_info(hid, "LED and force feedback support for BigBen gamepad\n"); + + return 0; +} + +static __u8 *bigben_report_fixup(struct hid_device *hid, __u8 *rdesc, + unsigned int *rsize) +{ + if (*rsize == PID0902_RDESC_ORIG_SIZE) { + rdesc = pid0902_rdesc_fixed; + *rsize = sizeof(pid0902_rdesc_fixed); + } else + hid_warn(hid, "unexpected rdesc, please submit for review\n"); + return rdesc; +} + +static const struct hid_device_id bigben_devices[] = { + { HID_USB_DEVICE(USB_VENDOR_ID_BIGBEN, USB_DEVICE_ID_BIGBEN_PS3OFMINIPAD) }, + { } +}; +MODULE_DEVICE_TABLE(hid, bigben_devices); + +static struct hid_driver bigben_driver = { + .name = "bigben", + .id_table = bigben_devices, + .probe = bigben_probe, + .report_fixup = bigben_report_fixup, + .remove = bigben_remove, +}; +module_hid_driver(bigben_driver); + +MODULE_LICENSE("GPL"); diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h index 9e478f03e8456f18ee2df447602ebf7bc42c2ea6..3615f732aa63bf13728a2f0429ca493334d1f995 100644 --- a/drivers/hid/hid-ids.h +++ b/drivers/hid/hid-ids.h @@ -222,6 +222,9 @@ #define USB_VENDOR_ID_BETOP_2185V2PC 0x8380 #define USB_VENDOR_ID_BETOP_2185V2BFM 0x20bc +#define USB_VENDOR_ID_BIGBEN 0x146b +#define USB_DEVICE_ID_BIGBEN_PS3OFMINIPAD 0x0902 + #define USB_VENDOR_ID_BTC 0x046e #define USB_DEVICE_ID_BTC_EMPREX_REMOTE 0x5578 #define USB_DEVICE_ID_BTC_EMPREX_REMOTE_2 0x5577 @@ -528,6 +531,7 @@ #define USB_PRODUCT_ID_HP_LOGITECH_OEM_USB_OPTICAL_MOUSE_0A4A 0x0a4a #define USB_PRODUCT_ID_HP_LOGITECH_OEM_USB_OPTICAL_MOUSE_0B4A 0x0b4a #define USB_PRODUCT_ID_HP_PIXART_OEM_USB_OPTICAL_MOUSE 0x134a +#define USB_PRODUCT_ID_HP_PIXART_OEM_USB_OPTICAL_MOUSE_094A 0x094a #define USB_VENDOR_ID_HUION 0x256c #define USB_DEVICE_ID_HUION_TABLET 0x006e diff --git a/drivers/hid/hid-ntrig.c b/drivers/hid/hid-ntrig.c index 3d121d8ee980b28458021e591129ae369c813721..5d2d746e35f60aabeb951d3cbd27316ee0fcde03 100644 --- a/drivers/hid/hid-ntrig.c +++ b/drivers/hid/hid-ntrig.c @@ -955,6 +955,8 @@ static int ntrig_probe(struct hid_device *hdev, const struct hid_device_id *id) ret = sysfs_create_group(&hdev->dev.kobj, &ntrig_attribute_group); + if (ret) + hid_err(hdev, "cannot create sysfs group\n"); return 0; err_free: diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c index f489a5cfcb48cf5216a532091482ac669786e61d..e10eda031b0103d9ca537072e4fe9b805a7f3756 100644 --- a/drivers/hid/usbhid/hid-quirks.c +++ b/drivers/hid/usbhid/hid-quirks.c @@ -99,6 +99,7 @@ static const struct hid_blacklist { { USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_LOGITECH_OEM_USB_OPTICAL_MOUSE_0A4A, HID_QUIRK_ALWAYS_POLL }, { USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_LOGITECH_OEM_USB_OPTICAL_MOUSE_0B4A, HID_QUIRK_ALWAYS_POLL }, { USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_PIXART_OEM_USB_OPTICAL_MOUSE, HID_QUIRK_ALWAYS_POLL }, + { USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_PIXART_OEM_USB_OPTICAL_MOUSE_094A, HID_QUIRK_ALWAYS_POLL }, { USB_VENDOR_ID_IDEACOM, USB_DEVICE_ID_IDEACOM_IDC6680, HID_QUIRK_MULTI_INPUT }, { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_C007, HID_QUIRK_ALWAYS_POLL }, { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_C077, HID_QUIRK_ALWAYS_POLL }, diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c index c401b5b63f4c59c696acfcd21c6943b00b75ae54..4c72e68637c2831848df7c3fdaee8201b651f4d0 100644 --- a/drivers/hid/wacom_wac.c +++ b/drivers/hid/wacom_wac.c @@ -3212,8 +3212,14 @@ void wacom_setup_device_quirks(struct wacom *wacom) if (features->type >= INTUOSHT && features->type <= BAMBOO_PT) features->device_type |= WACOM_DEVICETYPE_PAD; - features->x_max = 4096; - features->y_max = 4096; + if (features->type == INTUOSHT2) { + features->x_max = features->x_max / 10; + features->y_max = features->y_max / 10; + } + else { + features->x_max = 4096; + features->y_max = 4096; + } } else if (features->pktlen == WACOM_PKGLEN_BBTOUCH) { features->device_type |= WACOM_DEVICETYPE_PAD; diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c index 05964347008d9c51c1457b65ddd9ef0d1815f584..d96b09fea8358162df586a20f706ba88e2de5491 100644 --- a/drivers/hv/channel.c +++ b/drivers/hv/channel.c @@ -541,11 +541,8 @@ static void reset_channel_cb(void *arg) channel->onchannel_callback = NULL; } -static int vmbus_close_internal(struct vmbus_channel *channel) +void vmbus_reset_channel_cb(struct vmbus_channel *channel) { - struct vmbus_channel_close_channel *msg; - int ret; - /* * vmbus_on_event(), running in the per-channel tasklet, can race * with vmbus_close_internal() in the case of SMP guest, e.g., when @@ -555,6 +552,29 @@ static int vmbus_close_internal(struct vmbus_channel *channel) */ tasklet_disable(&channel->callback_event); + channel->sc_creation_callback = NULL; + + /* Stop the callback asap */ + if (channel->target_cpu != get_cpu()) { + put_cpu(); + smp_call_function_single(channel->target_cpu, reset_channel_cb, + channel, true); + } else { + reset_channel_cb(channel); + put_cpu(); + } + + /* Re-enable tasklet for use on re-open */ + tasklet_enable(&channel->callback_event); +} + +static int vmbus_close_internal(struct vmbus_channel *channel) +{ + struct vmbus_channel_close_channel *msg; + int ret; + + vmbus_reset_channel_cb(channel); + /* * In case a device driver's probe() fails (e.g., * util_probe() -> vmbus_open() returns -ENOMEM) and the device is @@ -568,16 +588,6 @@ static int vmbus_close_internal(struct vmbus_channel *channel) } channel->state = CHANNEL_OPEN_STATE; - channel->sc_creation_callback = NULL; - /* Stop callback and cancel the timer asap */ - if (channel->target_cpu != get_cpu()) { - put_cpu(); - smp_call_function_single(channel->target_cpu, reset_channel_cb, - channel, true); - } else { - reset_channel_cb(channel); - put_cpu(); - } /* Send a closing message */ @@ -620,8 +630,6 @@ static int vmbus_close_internal(struct vmbus_channel *channel) get_order(channel->ringbuffer_pagecount * PAGE_SIZE)); out: - /* re-enable tasklet for use on re-open */ - tasklet_enable(&channel->callback_event); return ret; } diff --git a/drivers/hv/channel_mgmt.c b/drivers/hv/channel_mgmt.c index 1939c0ca374152d50a68dd0c6122464bef9c2691..1700b4e7758d473282a69bdc95b15f0aa0ed7585 100644 --- a/drivers/hv/channel_mgmt.c +++ b/drivers/hv/channel_mgmt.c @@ -881,6 +881,12 @@ static void vmbus_onoffer_rescind(struct vmbus_channel_message_header *hdr) return; } + /* + * Before setting channel->rescind in vmbus_rescind_cleanup(), we + * should make sure the channel callback is not running any more. + */ + vmbus_reset_channel_cb(channel); + /* * Now wait for offer handling to complete. */ diff --git a/drivers/hv/hv.c b/drivers/hv/hv.c index 8267439dd1eec0ac32ed217edda4ff324f8e57fe..d8101cd28dfa3ff6c27e1db7ee5f0b3ff2950f76 100644 --- a/drivers/hv/hv.c +++ b/drivers/hv/hv.c @@ -196,6 +196,10 @@ int hv_synic_alloc(void) return 0; err: + /* + * Any memory allocations that succeeded will be freed when + * the caller cleans up by calling hv_synic_free() + */ return -ENOMEM; } @@ -208,12 +212,10 @@ void hv_synic_free(void) struct hv_per_cpu_context *hv_cpu = per_cpu_ptr(hv_context.cpu_context, cpu); - if (hv_cpu->synic_event_page) - free_page((unsigned long)hv_cpu->synic_event_page); - if (hv_cpu->synic_message_page) - free_page((unsigned long)hv_cpu->synic_message_page); - if (hv_cpu->post_msg_page) - free_page((unsigned long)hv_cpu->post_msg_page); + kfree(hv_cpu->clk_evt); + free_page((unsigned long)hv_cpu->synic_event_page); + free_page((unsigned long)hv_cpu->synic_message_page); + free_page((unsigned long)hv_cpu->post_msg_page); } kfree(hv_context.hv_numa_map); diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig index 95357b084b025c73a487051448c090398f672659..af4ccd8a88e04edba7fcedf2596aecaf28a1a297 100644 --- a/drivers/hwmon/Kconfig +++ b/drivers/hwmon/Kconfig @@ -1287,11 +1287,12 @@ config SENSORS_PWM_FAN will be called pwm-fan. config SENSORS_RPI_POE_FAN - tristate "Raspberry Pi POE HAT fan" + tristate "Raspberry Pi PoE HAT fan" depends on RASPBERRYPI_FIRMWARE depends on THERMAL || THERMAL=n help - If you say yes here you get support for Raspberry Pi POE HAT fan. + If you say yes here you get support for Raspberry Pi PoE (Power over + Ethernet) HAT fan. This driver can also be built as a module. If so, the module will be called rpi-poe-fan. diff --git a/drivers/hwmon/adt7475.c b/drivers/hwmon/adt7475.c index 9ef84998c7f382b114cd11d1c4ea04e7c916dda4..37db2eb66ed790624b0da69355d4414e24587269 100644 --- a/drivers/hwmon/adt7475.c +++ b/drivers/hwmon/adt7475.c @@ -303,14 +303,18 @@ static inline u16 volt2reg(int channel, long volt, u8 bypass_attn) return clamp_val(reg, 0, 1023) & (0xff << 2); } -static u16 adt7475_read_word(struct i2c_client *client, int reg) +static int adt7475_read_word(struct i2c_client *client, int reg) { - u16 val; + int val1, val2; - val = i2c_smbus_read_byte_data(client, reg); - val |= (i2c_smbus_read_byte_data(client, reg + 1) << 8); + val1 = i2c_smbus_read_byte_data(client, reg); + if (val1 < 0) + return val1; + val2 = i2c_smbus_read_byte_data(client, reg + 1); + if (val2 < 0) + return val2; - return val; + return val1 | (val2 << 8); } static void adt7475_write_word(struct i2c_client *client, int reg, u16 val) diff --git a/drivers/hwmon/ina2xx.c b/drivers/hwmon/ina2xx.c index e9e6aeabbf842ffd941e0476a185884adac85147..71d3445ba869c85654ae3dcaf3a5460e8fadb268 100644 --- a/drivers/hwmon/ina2xx.c +++ b/drivers/hwmon/ina2xx.c @@ -17,7 +17,7 @@ * Bi-directional Current/Power Monitor with I2C Interface * Datasheet: http://www.ti.com/product/ina230 * - * Copyright (C) 2012 Lothar Felten + * Copyright (C) 2012 Lothar Felten * Thanks to Jan Volkering * * This program is free software; you can redistribute it and/or modify @@ -329,6 +329,15 @@ static int ina2xx_set_shunt(struct ina2xx_data *data, long val) return 0; } +static ssize_t ina2xx_show_shunt(struct device *dev, + struct device_attribute *da, + char *buf) +{ + struct ina2xx_data *data = dev_get_drvdata(dev); + + return snprintf(buf, PAGE_SIZE, "%li\n", data->rshunt); +} + static ssize_t ina2xx_store_shunt(struct device *dev, struct device_attribute *da, const char *buf, size_t count) @@ -403,7 +412,7 @@ static SENSOR_DEVICE_ATTR(power1_input, S_IRUGO, ina2xx_show_value, NULL, /* shunt resistance */ static SENSOR_DEVICE_ATTR(shunt_resistor, S_IRUGO | S_IWUSR, - ina2xx_show_value, ina2xx_store_shunt, + ina2xx_show_shunt, ina2xx_store_shunt, INA2XX_CALIBRATION); /* update interval (ina226 only) */ diff --git a/drivers/hwmon/nct6775.c b/drivers/hwmon/nct6775.c index f5f3f8cf57ea66d1a95bd4511d767611fb0fb338..ca9941fa741b7e04e971f70e4e8055e442e92c43 100644 --- a/drivers/hwmon/nct6775.c +++ b/drivers/hwmon/nct6775.c @@ -63,6 +63,7 @@ #include #include #include +#include #include "lm75.h" #define USE_ALTERNATE @@ -2642,6 +2643,7 @@ store_pwm_weight_temp_sel(struct device *dev, struct device_attribute *attr, return err; if (val > NUM_TEMP) return -EINVAL; + val = array_index_nospec(val, NUM_TEMP + 1); if (val && (!(data->have_temp & BIT(val - 1)) || !data->temp_src[val - 1])) return -EINVAL; @@ -4107,7 +4109,7 @@ static int nct6775_probe(struct platform_device *pdev) * The temperature is already monitored if the respective bit in * is set. */ - for (i = 0; i < 32; i++) { + for (i = 0; i < 31; i++) { if (!(data->temp_mask & BIT(i + 1))) continue; if (!reg_temp_alternate[i]) diff --git a/drivers/hwmon/rpi-poe-fan.c b/drivers/hwmon/rpi-poe-fan.c index e65cb178412b8d74c12494036ed4aaa273c7ddff..3effaf2eb86db6a712525fddbd08b5f6da0d256f 100644 --- a/drivers/hwmon/rpi-poe-fan.c +++ b/drivers/hwmon/rpi-poe-fan.c @@ -1,20 +1,11 @@ +// SPDX-License-Identifier: GPL-2.0 /* - * rpi-poe-fan.c - Hwmon driver for Raspberry Pi POE HAT fan. + * rpi-poe-fan.c - Hwmon driver for Raspberry Pi PoE HAT fan. * * Copyright (C) 2018 Raspberry Pi (Trading) Ltd. * Based on pwm-fan.c by Kamil Debski * * Author: Serge Schneider - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. */ #include @@ -46,41 +37,41 @@ struct rpi_poe_fan_ctx { struct notifier_block nb; }; -struct m_data_s{ +struct fw_tag_data_s{ u32 reg; u32 val; u32 ret; }; static int write_reg(struct rpi_firmware *fw, u32 reg, u32 *val){ - struct m_data_s m_data = { + struct fw_tag_data_s fw_tag_data = { .reg = reg, .val = *val }; int ret; ret = rpi_firmware_property(fw, RPI_FIRMWARE_SET_POE_HAT_VAL, - &m_data, sizeof(m_data)); + &fw_tag_data, sizeof(fw_tag_data)); if (ret) { return ret; - } else if (m_data.ret) { + } else if (fw_tag_data.ret) { return -EIO; } return 0; } static int read_reg(struct rpi_firmware *fw, u32 reg, u32 *val){ - struct m_data_s m_data = { + struct fw_tag_data_s fw_tag_data = { .reg = reg, }; int ret; ret = rpi_firmware_property(fw, RPI_FIRMWARE_GET_POE_HAT_VAL, - &m_data, sizeof(m_data)); + &fw_tag_data, sizeof(fw_tag_data)); if (ret) { return ret; - } else if (m_data.ret) { + } else if (fw_tag_data.ret) { return -EIO; } - *val = m_data.val; + *val = fw_tag_data.val; return 0; } @@ -268,7 +259,8 @@ static int rpi_poe_fan_of_get_cooling_data(struct device *dev, ret = of_property_count_u32_elems(np, "cooling-levels"); if (ret <= 0) { - dev_err(dev, "Wrong data!\n"); + dev_err(dev, "cooling-levels property missing or invalid: %d\n", + ret); return ret ? : -EINVAL; } @@ -397,10 +389,11 @@ static int rpi_poe_fan_suspend(struct device *dev) { struct rpi_poe_fan_ctx *ctx = dev_get_drvdata(dev); u32 value = 0; + int ret = 0; if (ctx->pwm_value != value) ret = write_reg(ctx->fw, POE_CUR_PWM, &value); - return 0; + return ret; } static int rpi_poe_fan_resume(struct device *dev) @@ -420,7 +413,7 @@ static SIMPLE_DEV_PM_OPS(rpi_poe_fan_pm, rpi_poe_fan_suspend, rpi_poe_fan_resume); static const struct of_device_id of_rpi_poe_fan_match[] = { - { .compatible = "rpi-poe-fan", }, + { .compatible = "raspberrypi,rpi-poe-fan", }, {}, }; MODULE_DEVICE_TABLE(of, of_rpi_poe_fan_match); @@ -439,5 +432,5 @@ module_platform_driver(rpi_poe_fan_driver); MODULE_AUTHOR("Serge Schneider "); MODULE_ALIAS("platform:rpi-poe-fan"); -MODULE_DESCRIPTION("Raspberry Pi POE HAT fan driver"); +MODULE_DESCRIPTION("Raspberry Pi PoE HAT fan driver"); MODULE_LICENSE("GPL"); diff --git a/drivers/hwtracing/coresight/coresight-etm4x.c b/drivers/hwtracing/coresight/coresight-etm4x.c index cf364a514c121baa71198f40963f2e3b2b873afa..5a1a14bcae72addb816c289caeb366b88e13cd5c 100644 --- a/drivers/hwtracing/coresight/coresight-etm4x.c +++ b/drivers/hwtracing/coresight/coresight-etm4x.c @@ -1034,7 +1034,8 @@ static int etm4_probe(struct amba_device *adev, const struct amba_id *id) } pm_runtime_put(&adev->dev); - dev_info(dev, "%s initialized\n", (char *)id->data); + dev_info(dev, "CPU%d: ETM v%d.%d initialized\n", + drvdata->cpu, drvdata->arch >> 4, drvdata->arch & 0xf); if (boot_enable) { coresight_enable(drvdata->csdev); @@ -1052,23 +1053,19 @@ err_arch_supported: return ret; } +#define ETM4x_AMBA_ID(pid) \ + { \ + .id = pid, \ + .mask = 0x000fffff, \ + } + static const struct amba_id etm4_ids[] = { - { /* ETM 4.0 - Cortex-A53 */ - .id = 0x000bb95d, - .mask = 0x000fffff, - .data = "ETM 4.0", - }, - { /* ETM 4.0 - Cortex-A57 */ - .id = 0x000bb95e, - .mask = 0x000fffff, - .data = "ETM 4.0", - }, - { /* ETM 4.0 - A72, Maia, HiSilicon */ - .id = 0x000bb95a, - .mask = 0x000fffff, - .data = "ETM 4.0", - }, - { 0, 0}, + ETM4x_AMBA_ID(0x000bb95d), /* Cortex-A53 */ + ETM4x_AMBA_ID(0x000bb95e), /* Cortex-A57 */ + ETM4x_AMBA_ID(0x000bb95a), /* Cortex-A72 */ + ETM4x_AMBA_ID(0x000bb959), /* Cortex-A73 */ + ETM4x_AMBA_ID(0x000bb9da), /* Cortex-A35 */ + {}, }; static struct amba_driver etm4x_driver = { diff --git a/drivers/hwtracing/coresight/coresight-tpiu.c b/drivers/hwtracing/coresight/coresight-tpiu.c index 735dca089389ac7904bf533317d8ab8c8a4ed060..15dd01f8c197657bec42a61b909ba84a22df3deb 100644 --- a/drivers/hwtracing/coresight/coresight-tpiu.c +++ b/drivers/hwtracing/coresight/coresight-tpiu.c @@ -47,8 +47,9 @@ /** register definition **/ /* FFSR - 0x300 */ -#define FFSR_FT_STOPPED BIT(1) +#define FFSR_FT_STOPPED_BIT 1 /* FFCR - 0x304 */ +#define FFCR_FON_MAN_BIT 6 #define FFCR_FON_MAN BIT(6) #define FFCR_STOP_FI BIT(12) @@ -93,9 +94,9 @@ static void tpiu_disable_hw(struct tpiu_drvdata *drvdata) /* Generate manual flush */ writel_relaxed(FFCR_STOP_FI | FFCR_FON_MAN, drvdata->base + TPIU_FFCR); /* Wait for flush to complete */ - coresight_timeout(drvdata->base, TPIU_FFCR, FFCR_FON_MAN, 0); + coresight_timeout(drvdata->base, TPIU_FFCR, FFCR_FON_MAN_BIT, 0); /* Wait for formatter to stop */ - coresight_timeout(drvdata->base, TPIU_FFSR, FFSR_FT_STOPPED, 1); + coresight_timeout(drvdata->base, TPIU_FFSR, FFSR_FT_STOPPED_BIT, 1); CS_LOCK(drvdata->base); } diff --git a/drivers/hwtracing/coresight/coresight.c b/drivers/hwtracing/coresight/coresight.c index b8091bef21dc79085c450c783b5784d229c3b333..e571e4010dff0c214c4aca8f8463a59116e4ca2a 100644 --- a/drivers/hwtracing/coresight/coresight.c +++ b/drivers/hwtracing/coresight/coresight.c @@ -115,7 +115,7 @@ static int coresight_find_link_inport(struct coresight_device *csdev, dev_err(&csdev->dev, "couldn't find inport, parent: %s, child: %s\n", dev_name(&parent->dev), dev_name(&csdev->dev)); - return 0; + return -ENODEV; } static int coresight_find_link_outport(struct coresight_device *csdev, @@ -133,7 +133,7 @@ static int coresight_find_link_outport(struct coresight_device *csdev, dev_err(&csdev->dev, "couldn't find outport, parent: %s, child: %s\n", dev_name(&csdev->dev), dev_name(&child->dev)); - return 0; + return -ENODEV; } static int coresight_enable_sink(struct coresight_device *csdev, u32 mode) @@ -186,6 +186,9 @@ static int coresight_enable_link(struct coresight_device *csdev, else refport = 0; + if (refport < 0) + return refport; + if (atomic_inc_return(&csdev->refcnt[refport]) == 1) { if (link_ops(csdev)->enable) { ret = link_ops(csdev)->enable(csdev, inport, outport); diff --git a/drivers/hwtracing/intel_th/core.c b/drivers/hwtracing/intel_th/core.c index c1793313bb0873fea10e263062f6e580d34c7670..757801d2760439ac116acc52a0995af618331125 100644 --- a/drivers/hwtracing/intel_th/core.c +++ b/drivers/hwtracing/intel_th/core.c @@ -147,7 +147,8 @@ static int intel_th_remove(struct device *dev) th->thdev[i] = NULL; } - th->num_thdevs = lowest; + if (lowest >= 0) + th->num_thdevs = lowest; } if (thdrv->attr_group) diff --git a/drivers/i2c/busses/i2c-aspeed.c b/drivers/i2c/busses/i2c-aspeed.c index 284f8670dbeb748bd8d0c6f240bbf41e62ce95f7..a074735456bc7571eab7e153168cd913ce9e4651 100644 --- a/drivers/i2c/busses/i2c-aspeed.c +++ b/drivers/i2c/busses/i2c-aspeed.c @@ -110,22 +110,22 @@ #define ASPEED_I2CD_DEV_ADDR_MASK GENMASK(6, 0) enum aspeed_i2c_master_state { + ASPEED_I2C_MASTER_INACTIVE, ASPEED_I2C_MASTER_START, ASPEED_I2C_MASTER_TX_FIRST, ASPEED_I2C_MASTER_TX, ASPEED_I2C_MASTER_RX_FIRST, ASPEED_I2C_MASTER_RX, ASPEED_I2C_MASTER_STOP, - ASPEED_I2C_MASTER_INACTIVE, }; enum aspeed_i2c_slave_state { + ASPEED_I2C_SLAVE_STOP, ASPEED_I2C_SLAVE_START, ASPEED_I2C_SLAVE_READ_REQUESTED, ASPEED_I2C_SLAVE_READ_PROCESSED, ASPEED_I2C_SLAVE_WRITE_REQUESTED, ASPEED_I2C_SLAVE_WRITE_RECEIVED, - ASPEED_I2C_SLAVE_STOP, }; struct aspeed_i2c_bus { @@ -859,7 +859,7 @@ static int aspeed_i2c_probe_bus(struct platform_device *pdev) if (!match) bus->get_clk_reg_val = aspeed_i2c_24xx_get_clk_reg_val; else - bus->get_clk_reg_val = match->data; + bus->get_clk_reg_val = (u32 (*)(u32))match->data; /* Initialize the I2C adapter */ spin_lock_init(&bus->lock); diff --git a/drivers/i2c/busses/i2c-davinci.c b/drivers/i2c/busses/i2c-davinci.c index b8c43535f16cf29b8f8b517fe2e7ecaba1c80156..5cf670f57be71c3f81b7ebc57f7a0aec325e5ce6 100644 --- a/drivers/i2c/busses/i2c-davinci.c +++ b/drivers/i2c/busses/i2c-davinci.c @@ -234,12 +234,16 @@ static void i2c_davinci_calc_clk_dividers(struct davinci_i2c_dev *dev) /* * It's not always possible to have 1 to 2 ratio when d=7, so fall back * to minimal possible clkh in this case. + * + * Note: + * CLKH is not allowed to be 0, in this case I2C clock is not generated + * at all */ - if (clk >= clkl + d) { + if (clk > clkl + d) { clkh = clk - clkl - d; clkl -= d; } else { - clkh = 0; + clkh = 1; clkl = clk - (d << 1); } diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c index ba8df2fde1b2769fe65bdf49f338625577b6ced9..06debfa903b9c582f25fdc13fe7a0a359c055630 100644 --- a/drivers/i2c/busses/i2c-i801.c +++ b/drivers/i2c/busses/i2c-i801.c @@ -138,6 +138,7 @@ #define SBREG_BAR 0x10 #define SBREG_SMBCTRL 0xc6000c +#define SBREG_SMBCTRL_DNV 0xcf000c /* Host status bits for SMBPCISTS */ #define SMBPCISTS_INTS BIT(3) @@ -1395,7 +1396,11 @@ static void i801_add_tco(struct i801_priv *priv) spin_unlock(&p2sb_spinlock); res = &tco_res[ICH_RES_MEM_OFF]; - res->start = (resource_size_t)base64_addr + SBREG_SMBCTRL; + if (pci_dev->device == PCI_DEVICE_ID_INTEL_DNV_SMBUS) + res->start = (resource_size_t)base64_addr + SBREG_SMBCTRL_DNV; + else + res->start = (resource_size_t)base64_addr + SBREG_SMBCTRL; + res->end = res->start + 3; res->flags = IORESOURCE_MEM; @@ -1411,6 +1416,13 @@ static void i801_add_tco(struct i801_priv *priv) } #ifdef CONFIG_ACPI +static bool i801_acpi_is_smbus_ioport(const struct i801_priv *priv, + acpi_physical_address address) +{ + return address >= priv->smba && + address <= pci_resource_end(priv->pci_dev, SMBBAR); +} + static acpi_status i801_acpi_io_handler(u32 function, acpi_physical_address address, u32 bits, u64 *value, void *handler_context, void *region_context) @@ -1426,7 +1438,7 @@ i801_acpi_io_handler(u32 function, acpi_physical_address address, u32 bits, */ mutex_lock(&priv->acpi_lock); - if (!priv->acpi_reserved) { + if (!priv->acpi_reserved && i801_acpi_is_smbus_ioport(priv, address)) { priv->acpi_reserved = true; dev_warn(&pdev->dev, "BIOS is accessing SMBus registers\n"); diff --git a/drivers/i2c/busses/i2c-imx.c b/drivers/i2c/busses/i2c-imx.c index 75c6b98585ba2e545ac0b3239e237796b68881b7..b73dd837fb533ae06491074b2a17a2a37d0db66f 100644 --- a/drivers/i2c/busses/i2c-imx.c +++ b/drivers/i2c/busses/i2c-imx.c @@ -665,9 +665,6 @@ static int i2c_imx_dma_read(struct imx_i2c_struct *i2c_imx, struct imx_i2c_dma *dma = i2c_imx->dma; struct device *dev = &i2c_imx->adapter.dev; - temp = imx_i2c_read_reg(i2c_imx, IMX_I2C_I2CR); - temp |= I2CR_DMAEN; - imx_i2c_write_reg(temp, i2c_imx, IMX_I2C_I2CR); dma->chan_using = dma->chan_rx; dma->dma_transfer_dir = DMA_DEV_TO_MEM; @@ -780,6 +777,7 @@ static int i2c_imx_read(struct imx_i2c_struct *i2c_imx, struct i2c_msg *msgs, bo int i, result; unsigned int temp; int block_data = msgs->flags & I2C_M_RECV_LEN; + int use_dma = i2c_imx->dma && msgs->len >= DMA_THRESHOLD && !block_data; dev_dbg(&i2c_imx->adapter.dev, "<%s> write slave address: addr=0x%x\n", @@ -806,12 +804,14 @@ static int i2c_imx_read(struct imx_i2c_struct *i2c_imx, struct i2c_msg *msgs, bo */ if ((msgs->len - 1) || block_data) temp &= ~I2CR_TXAK; + if (use_dma) + temp |= I2CR_DMAEN; imx_i2c_write_reg(temp, i2c_imx, IMX_I2C_I2CR); imx_i2c_read_reg(i2c_imx, IMX_I2C_I2DR); /* dummy read */ dev_dbg(&i2c_imx->adapter.dev, "<%s> read data\n", __func__); - if (i2c_imx->dma && msgs->len >= DMA_THRESHOLD && !block_data) + if (use_dma) return i2c_imx_dma_read(i2c_imx, msgs, is_lastmsg); /* read data */ diff --git a/drivers/i2c/busses/i2c-xiic.c b/drivers/i2c/busses/i2c-xiic.c index ae6ed254e01db5c10eb79e7bed4718da4d94536a..732d6c456a6f1cdb9e9687125930bd6722216ef0 100644 --- a/drivers/i2c/busses/i2c-xiic.c +++ b/drivers/i2c/busses/i2c-xiic.c @@ -538,6 +538,7 @@ static void xiic_start_recv(struct xiic_i2c *i2c) { u8 rx_watermark; struct i2c_msg *msg = i2c->rx_msg = i2c->tx_msg; + unsigned long flags; /* Clear and enable Rx full interrupt. */ xiic_irq_clr_en(i2c, XIIC_INTR_RX_FULL_MASK | XIIC_INTR_TX_ERROR_MASK); @@ -553,6 +554,7 @@ static void xiic_start_recv(struct xiic_i2c *i2c) rx_watermark = IIC_RX_FIFO_DEPTH; xiic_setreg8(i2c, XIIC_RFD_REG_OFFSET, rx_watermark - 1); + local_irq_save(flags); if (!(msg->flags & I2C_M_NOSTART)) /* write the address */ xiic_setreg16(i2c, XIIC_DTR_REG_OFFSET, @@ -563,6 +565,8 @@ static void xiic_start_recv(struct xiic_i2c *i2c) xiic_setreg16(i2c, XIIC_DTR_REG_OFFSET, msg->len | ((i2c->nmsgs == 1) ? XIIC_TX_DYN_STOP_MASK : 0)); + local_irq_restore(flags); + if (i2c->nmsgs == 1) /* very last, enable bus not busy as well */ xiic_irq_clr_en(i2c, XIIC_INTR_BNB_MASK); diff --git a/drivers/i2c/i2c-core-acpi.c b/drivers/i2c/i2c-core-acpi.c index a9126b3cda61bc95f6a9d1282821ab7552484534..847d9bf6744c21bde84d1c04aec88ad64a059a68 100644 --- a/drivers/i2c/i2c-core-acpi.c +++ b/drivers/i2c/i2c-core-acpi.c @@ -475,11 +475,16 @@ static int acpi_gsb_i2c_write_bytes(struct i2c_client *client, msgs[0].buf = buffer; ret = i2c_transfer(client->adapter, msgs, ARRAY_SIZE(msgs)); - if (ret < 0) - dev_err(&client->adapter->dev, "i2c write failed\n"); kfree(buffer); - return ret; + + if (ret < 0) { + dev_err(&client->adapter->dev, "i2c write failed: %d\n", ret); + return ret; + } + + /* 1 transfer must have completed successfully */ + return (ret == 1) ? 0 : -EIO; } static acpi_status diff --git a/drivers/i2c/i2c-core-base.c b/drivers/i2c/i2c-core-base.c index 6f2fe63e8f5aa80ade4daa76bdfe0b48516b2256..7b961c9c62eff5175bfdb193f215f1a78be8b83c 100644 --- a/drivers/i2c/i2c-core-base.c +++ b/drivers/i2c/i2c-core-base.c @@ -638,7 +638,7 @@ static int i2c_check_addr_busy(struct i2c_adapter *adapter, int addr) static void i2c_adapter_lock_bus(struct i2c_adapter *adapter, unsigned int flags) { - rt_mutex_lock(&adapter->bus_lock); + rt_mutex_lock_nested(&adapter->bus_lock, i2c_adapter_depth(adapter)); } /** diff --git a/drivers/i2c/i2c-mux.c b/drivers/i2c/i2c-mux.c index 9669ca4937b891063d4cd63e552344d818f5eefa..7ba31f6bf1488e5a3ffef565d0d7c85091c91f0c 100644 --- a/drivers/i2c/i2c-mux.c +++ b/drivers/i2c/i2c-mux.c @@ -144,7 +144,7 @@ static void i2c_mux_lock_bus(struct i2c_adapter *adapter, unsigned int flags) struct i2c_mux_priv *priv = adapter->algo_data; struct i2c_adapter *parent = priv->muxc->parent; - rt_mutex_lock(&parent->mux_lock); + rt_mutex_lock_nested(&parent->mux_lock, i2c_adapter_depth(adapter)); if (!(flags & I2C_LOCK_ROOT_ADAPTER)) return; i2c_lock_bus(parent, flags); @@ -181,7 +181,7 @@ static void i2c_parent_lock_bus(struct i2c_adapter *adapter, struct i2c_mux_priv *priv = adapter->algo_data; struct i2c_adapter *parent = priv->muxc->parent; - rt_mutex_lock(&parent->mux_lock); + rt_mutex_lock_nested(&parent->mux_lock, i2c_adapter_depth(adapter)); i2c_lock_bus(parent, flags); } diff --git a/drivers/iio/accel/adxl345_core.c b/drivers/iio/accel/adxl345_core.c index 9ccb5828db986def7c1531b5196457995b909190..3dda7afe8a11632bbdec2d12d17eb131198aaa0e 100644 --- a/drivers/iio/accel/adxl345_core.c +++ b/drivers/iio/accel/adxl345_core.c @@ -21,6 +21,8 @@ #define ADXL345_REG_DATAX0 0x32 #define ADXL345_REG_DATAY0 0x34 #define ADXL345_REG_DATAZ0 0x36 +#define ADXL345_REG_DATA_AXIS(index) \ + (ADXL345_REG_DATAX0 + (index) * sizeof(__le16)) #define ADXL345_POWER_CTL_MEASURE BIT(3) #define ADXL345_POWER_CTL_STANDBY 0x00 @@ -47,19 +49,19 @@ struct adxl345_data { u8 data_range; }; -#define ADXL345_CHANNEL(reg, axis) { \ +#define ADXL345_CHANNEL(index, axis) { \ .type = IIO_ACCEL, \ .modified = 1, \ .channel2 = IIO_MOD_##axis, \ - .address = reg, \ + .address = index, \ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \ } static const struct iio_chan_spec adxl345_channels[] = { - ADXL345_CHANNEL(ADXL345_REG_DATAX0, X), - ADXL345_CHANNEL(ADXL345_REG_DATAY0, Y), - ADXL345_CHANNEL(ADXL345_REG_DATAZ0, Z), + ADXL345_CHANNEL(0, X), + ADXL345_CHANNEL(1, Y), + ADXL345_CHANNEL(2, Z), }; static int adxl345_read_raw(struct iio_dev *indio_dev, @@ -67,7 +69,7 @@ static int adxl345_read_raw(struct iio_dev *indio_dev, int *val, int *val2, long mask) { struct adxl345_data *data = iio_priv(indio_dev); - __le16 regval; + __le16 accel; int ret; switch (mask) { @@ -77,12 +79,13 @@ static int adxl345_read_raw(struct iio_dev *indio_dev, * ADXL345_REG_DATA(X0/Y0/Z0) contain the least significant byte * and ADXL345_REG_DATA(X0/Y0/Z0) + 1 the most significant byte */ - ret = regmap_bulk_read(data->regmap, chan->address, ®val, - sizeof(regval)); + ret = regmap_bulk_read(data->regmap, + ADXL345_REG_DATA_AXIS(chan->address), + &accel, sizeof(accel)); if (ret < 0) return ret; - *val = sign_extend32(le16_to_cpu(regval), 12); + *val = sign_extend32(le16_to_cpu(accel), 12); return IIO_VAL_INT; case IIO_CHAN_INFO_SCALE: *val = 0; diff --git a/drivers/iio/accel/sca3000.c b/drivers/iio/accel/sca3000.c index 565f7d8d3304a3eba3b13d69ad568cc7f916b9bd..f2761b3855411d6db48ef93291c7d4ac256ac93f 100644 --- a/drivers/iio/accel/sca3000.c +++ b/drivers/iio/accel/sca3000.c @@ -797,6 +797,7 @@ static int sca3000_write_raw(struct iio_dev *indio_dev, mutex_lock(&st->lock); ret = sca3000_write_3db_freq(st, val); mutex_unlock(&st->lock); + return ret; default: return -EINVAL; } diff --git a/drivers/iio/adc/ina2xx-adc.c b/drivers/iio/adc/ina2xx-adc.c index 59f99b3a180d7332c4bc41f45d14bdbc12ada06c..d5b9f831eba77e0ce32db8f812cee197df7b44a7 100644 --- a/drivers/iio/adc/ina2xx-adc.c +++ b/drivers/iio/adc/ina2xx-adc.c @@ -30,6 +30,7 @@ #include #include #include +#include #include #include @@ -701,6 +702,7 @@ static int ina2xx_buffer_enable(struct iio_dev *indio_dev) { struct ina2xx_chip_info *chip = iio_priv(indio_dev); unsigned int sampling_us = SAMPLING_PERIOD(chip); + struct task_struct *task; dev_dbg(&indio_dev->dev, "Enabling buffer w/ scan_mask %02x, freq = %d, avg =%u\n", (unsigned int)(*indio_dev->active_scan_mask), @@ -710,11 +712,17 @@ static int ina2xx_buffer_enable(struct iio_dev *indio_dev) dev_dbg(&indio_dev->dev, "Async readout mode: %d\n", chip->allow_async_readout); - chip->task = kthread_run(ina2xx_capture_thread, (void *)indio_dev, - "%s:%d-%uus", indio_dev->name, indio_dev->id, - sampling_us); + task = kthread_create(ina2xx_capture_thread, (void *)indio_dev, + "%s:%d-%uus", indio_dev->name, indio_dev->id, + sampling_us); + if (IS_ERR(task)) + return PTR_ERR(task); + + get_task_struct(task); + wake_up_process(task); + chip->task = task; - return PTR_ERR_OR_ZERO(chip->task); + return 0; } static int ina2xx_buffer_disable(struct iio_dev *indio_dev) @@ -723,6 +731,7 @@ static int ina2xx_buffer_disable(struct iio_dev *indio_dev) if (chip->task) { kthread_stop(chip->task); + put_task_struct(chip->task); chip->task = NULL; } diff --git a/drivers/iio/counter/104-quad-8.c b/drivers/iio/counter/104-quad-8.c index ba3d9030cd514f6ac433fb8ca7703eb97c71c0ac..181585ae6e171cc40bbe5dc3356335e990cd034c 100644 --- a/drivers/iio/counter/104-quad-8.c +++ b/drivers/iio/counter/104-quad-8.c @@ -138,7 +138,7 @@ static int quad8_write_raw(struct iio_dev *indio_dev, outb(val >> (8 * i), base_offset); /* Reset Borrow, Carry, Compare, and Sign flags */ - outb(0x02, base_offset + 1); + outb(0x04, base_offset + 1); /* Reset Error flag */ outb(0x06, base_offset + 1); diff --git a/drivers/iio/frequency/ad9523.c b/drivers/iio/frequency/ad9523.c index 99eba524f6ddbe063d2cf2ac359491287177d10b..1642b55f70da3ab28fa604c3e373bbab429ac593 100644 --- a/drivers/iio/frequency/ad9523.c +++ b/drivers/iio/frequency/ad9523.c @@ -508,7 +508,7 @@ static ssize_t ad9523_store(struct device *dev, return ret; if (!state) - return 0; + return len; mutex_lock(&indio_dev->mlock); switch ((u32)this_attr->address) { @@ -642,7 +642,7 @@ static int ad9523_read_raw(struct iio_dev *indio_dev, code = (AD9523_CLK_DIST_DIV_PHASE_REV(ret) * 3141592) / AD9523_CLK_DIST_DIV_REV(ret); *val = code / 1000000; - *val2 = (code % 1000000) * 10; + *val2 = code % 1000000; return IIO_VAL_INT_PLUS_MICRO; default: return -EINVAL; diff --git a/drivers/iio/pressure/bmp280-core.c b/drivers/iio/pressure/bmp280-core.c index 8f26428804a236fa1a38e612c0cd6aa3245b7eff..5f625ffa2a88d8b12d10491d85e920e519c8a97c 100644 --- a/drivers/iio/pressure/bmp280-core.c +++ b/drivers/iio/pressure/bmp280-core.c @@ -362,10 +362,9 @@ static int bmp280_read_humid(struct bmp280_data *data, int *val, int *val2) } comp_humidity = bmp280_compensate_humidity(data, adc_humidity); - *val = comp_humidity; - *val2 = 1024; + *val = comp_humidity * 1000 / 1024; - return IIO_VAL_FRACTIONAL; + return IIO_VAL_INT; } static int bmp280_read_raw(struct iio_dev *indio_dev, diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c index 79843a3ca9dcd53d8cc4362bb72b9124ed33bd65..7c5eca312aa884d8a3a11a3651a361d6af12f3ea 100644 --- a/drivers/infiniband/core/cma.c +++ b/drivers/infiniband/core/cma.c @@ -730,6 +730,7 @@ static int cma_resolve_ib_dev(struct rdma_id_private *id_priv) dgid = (union ib_gid *) &addr->sib_addr; pkey = ntohs(addr->sib_pkey); + mutex_lock(&lock); list_for_each_entry(cur_dev, &dev_list, list) { for (p = 1; p <= cur_dev->device->phys_port_cnt; ++p) { if (!rdma_cap_af_ib(cur_dev->device, p)) @@ -756,18 +757,19 @@ static int cma_resolve_ib_dev(struct rdma_id_private *id_priv) cma_dev = cur_dev; sgid = gid; id_priv->id.port_num = p; + goto found; } } } } - - if (!cma_dev) - return -ENODEV; + mutex_unlock(&lock); + return -ENODEV; found: cma_attach_to_dev(id_priv, cma_dev); - addr = (struct sockaddr_ib *) cma_src_addr(id_priv); - memcpy(&addr->sib_addr, &sgid, sizeof sgid); + mutex_unlock(&lock); + addr = (struct sockaddr_ib *)cma_src_addr(id_priv); + memcpy(&addr->sib_addr, &sgid, sizeof(sgid)); cma_translate_ib(addr, &id_priv->id.route.addr.dev_addr); return 0; } @@ -1459,9 +1461,16 @@ static bool cma_match_net_dev(const struct rdma_cm_id *id, (addr->src_addr.ss_family == AF_IB || cma_protocol_roce_dev_port(id->device, port_num)); - return !addr->dev_addr.bound_dev_if || - (net_eq(dev_net(net_dev), addr->dev_addr.net) && - addr->dev_addr.bound_dev_if == net_dev->ifindex); + /* + * Net namespaces must match, and if the listner is listening + * on a specific netdevice than netdevice must match as well. + */ + if (net_eq(dev_net(net_dev), addr->dev_addr.net) && + (!!addr->dev_addr.bound_dev_if == + (addr->dev_addr.bound_dev_if == net_dev->ifindex))) + return true; + else + return false; } static struct rdma_id_private *cma_find_listener( diff --git a/drivers/infiniband/core/rw.c b/drivers/infiniband/core/rw.c index 6ca607e8e293cab4405f6da09af5df8c0b7710ff..9939dcfb1b6a5b434879c3a168c5273aec142f22 100644 --- a/drivers/infiniband/core/rw.c +++ b/drivers/infiniband/core/rw.c @@ -87,7 +87,7 @@ static int rdma_rw_init_one_mr(struct ib_qp *qp, u8 port_num, } ret = ib_map_mr_sg(reg->mr, sg, nents, &offset, PAGE_SIZE); - if (ret < nents) { + if (ret < 0 || ret < nents) { ib_mr_pool_put(qp, &qp->rdma_mrs, reg->mr); return -EINVAL; } diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c index 0f70ff91276ef658db38d2cb856402c4336b4a6f..aff6ef3ad52c2f3648dbf5356ef9b7a5b94915fa 100644 --- a/drivers/infiniband/core/uverbs_main.c +++ b/drivers/infiniband/core/uverbs_main.c @@ -424,6 +424,7 @@ static int ib_uverbs_comp_event_close(struct inode *inode, struct file *filp) list_del(&entry->obj_list); kfree(entry); } + file->ev_queue.is_closed = 1; spin_unlock_irq(&file->ev_queue.lock); uverbs_close_fd(filp); diff --git a/drivers/infiniband/hw/bnxt_re/qplib_sp.c b/drivers/infiniband/hw/bnxt_re/qplib_sp.c index 9536de8c5fb8b3d977b78a43af180b6d4b035b8d..124c8915b9eeee88f599b129e2760f2c767281e6 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_sp.c +++ b/drivers/infiniband/hw/bnxt_re/qplib_sp.c @@ -156,7 +156,7 @@ int bnxt_qplib_get_sgid(struct bnxt_qplib_res *res, struct bnxt_qplib_sgid_tbl *sgid_tbl, int index, struct bnxt_qplib_gid *gid) { - if (index > sgid_tbl->max) { + if (index >= sgid_tbl->max) { dev_err(&res->pdev->dev, "QPLIB: Index %d exceeded SGID table max (%d)", index, sgid_tbl->max); @@ -361,7 +361,7 @@ int bnxt_qplib_get_pkey(struct bnxt_qplib_res *res, *pkey = 0xFFFF; return 0; } - if (index > pkey_tbl->max) { + if (index >= pkey_tbl->max) { dev_err(&res->pdev->dev, "QPLIB: Index %d exceeded PKEY table max (%d)", index, pkey_tbl->max); diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c index a8a8f65a1e51bd5e3868951fd801beca9e0e5d1a..24952af51a546bc325adb095d713d435a1d422f7 100644 --- a/drivers/infiniband/hw/cxgb4/qp.c +++ b/drivers/infiniband/hw/cxgb4/qp.c @@ -1395,6 +1395,12 @@ static void flush_qp(struct c4iw_qp *qhp) schp = to_c4iw_cq(qhp->ibqp.send_cq); if (qhp->ibqp.uobject) { + + /* for user qps, qhp->wq.flushed is protected by qhp->mutex */ + if (qhp->wq.flushed) + return; + + qhp->wq.flushed = 1; t4_set_wq_in_error(&qhp->wq); t4_set_cq_in_error(&rchp->cq); spin_lock_irqsave(&rchp->comp_handler_lock, flag); diff --git a/drivers/infiniband/hw/hfi1/affinity.c b/drivers/infiniband/hw/hfi1/affinity.c index 4b32a4e8d5da53522e521b97a7649cfee66e3ad2..95ac319c8e69bc4ae94c7d4c47715c8b029ed5f7 100644 --- a/drivers/infiniband/hw/hfi1/affinity.c +++ b/drivers/infiniband/hw/hfi1/affinity.c @@ -146,7 +146,7 @@ int node_affinity_init(void) while ((dev = pci_get_device(ids->vendor, ids->device, dev))) { node = pcibus_to_node(dev->bus); if (node < 0) - node = numa_node_id(); + goto out; hfi1_per_node_cntr[node]++; } @@ -154,6 +154,18 @@ int node_affinity_init(void) } return 0; + +out: + /* + * Invalid PCI NUMA node information found, note it, and populate + * our database 1:1. + */ + pr_err("HFI: Invalid PCI NUMA node. Performance may be affected\n"); + pr_err("HFI: System BIOS may need to be upgraded\n"); + for (node = 0; node < node_affinity.num_possible_nodes; node++) + hfi1_per_node_cntr[node] = 1; + + return 0; } void node_affinity_destroy(void) @@ -227,8 +239,14 @@ int hfi1_dev_affinity_init(struct hfi1_devdata *dd) const struct cpumask *local_mask; int curr_cpu, possible, i; - if (node < 0) - node = numa_node_id(); + /* + * If the BIOS does not have the NUMA node information set, select + * NUMA 0 so we get consistent performance. + */ + if (node < 0) { + dd_dev_err(dd, "Invalid PCI NUMA node. Performance may be affected\n"); + node = 0; + } dd->node = node; local_mask = cpumask_of_node(dd->node); diff --git a/drivers/infiniband/hw/hfi1/pio.c b/drivers/infiniband/hw/hfi1/pio.c index a95ac62465592080ea60a2db31416e2b7b335288..19a8e6052820fec0344f3b81700017a49f21ab01 100644 --- a/drivers/infiniband/hw/hfi1/pio.c +++ b/drivers/infiniband/hw/hfi1/pio.c @@ -86,6 +86,7 @@ void pio_send_control(struct hfi1_devdata *dd, int op) unsigned long flags; int write = 1; /* write sendctrl back */ int flush = 0; /* re-read sendctrl to make sure it is flushed */ + int i; spin_lock_irqsave(&dd->sendctrl_lock, flags); @@ -95,9 +96,13 @@ void pio_send_control(struct hfi1_devdata *dd, int op) reg |= SEND_CTRL_SEND_ENABLE_SMASK; /* Fall through */ case PSC_DATA_VL_ENABLE: + mask = 0; + for (i = 0; i < ARRAY_SIZE(dd->vld); i++) + if (!dd->vld[i].mtu) + mask |= BIT_ULL(i); /* Disallow sending on VLs not enabled */ - mask = (((~0ull) << num_vls) & SEND_CTRL_UNSUPPORTED_VL_MASK) << - SEND_CTRL_UNSUPPORTED_VL_SHIFT; + mask = (mask & SEND_CTRL_UNSUPPORTED_VL_MASK) << + SEND_CTRL_UNSUPPORTED_VL_SHIFT; reg = (reg & ~SEND_CTRL_UNSUPPORTED_VL_SMASK) | mask; break; case PSC_GLOBAL_DISABLE: diff --git a/drivers/infiniband/hw/hfi1/user_sdma.c b/drivers/infiniband/hw/hfi1/user_sdma.c index c0c0e0445cbfbd72938fe07520f574515992298b..8c954a0ae3b69d9e0834f6906b2ba68f0acdeba0 100644 --- a/drivers/infiniband/hw/hfi1/user_sdma.c +++ b/drivers/infiniband/hw/hfi1/user_sdma.c @@ -828,7 +828,7 @@ static int user_sdma_send_pkts(struct user_sdma_request *req, unsigned maxpkts) if (ACCESS_ONCE(iovec->offset) == iovec->iov.iov_len) { if (++req->iov_idx == req->data_iovs) { ret = -EFAULT; - goto free_txreq; + goto free_tx; } iovec = &req->iovs[req->iov_idx]; WARN_ON(iovec->offset); diff --git a/drivers/infiniband/hw/hfi1/verbs.c b/drivers/infiniband/hw/hfi1/verbs.c index e232f3c608b41795a2da43b5439687756c9ec24d..63d404a6752a19db474303f31205e2b2e087d38e 100644 --- a/drivers/infiniband/hw/hfi1/verbs.c +++ b/drivers/infiniband/hw/hfi1/verbs.c @@ -1573,6 +1573,7 @@ static int hfi1_check_ah(struct ib_device *ibdev, struct rdma_ah_attr *ah_attr) struct hfi1_pportdata *ppd; struct hfi1_devdata *dd; u8 sc5; + u8 sl; if (hfi1_check_mcast(rdma_ah_get_dlid(ah_attr)) && !(rdma_ah_get_ah_flags(ah_attr) & IB_AH_GRH)) @@ -1581,8 +1582,13 @@ static int hfi1_check_ah(struct ib_device *ibdev, struct rdma_ah_attr *ah_attr) /* test the mapping for validity */ ibp = to_iport(ibdev, rdma_ah_get_port_num(ah_attr)); ppd = ppd_from_ibp(ibp); - sc5 = ibp->sl_to_sc[rdma_ah_get_sl(ah_attr)]; dd = dd_from_ppd(ppd); + + sl = rdma_ah_get_sl(ah_attr); + if (sl >= ARRAY_SIZE(ibp->sl_to_sc)) + return -EINVAL; + + sc5 = ibp->sl_to_sc[sl]; if (sc_to_vlt(dd, sc5) > num_vls && sc_to_vlt(dd, sc5) != 0xf) return -EINVAL; return 0; diff --git a/drivers/infiniband/hw/hns/hns_roce_pd.c b/drivers/infiniband/hw/hns/hns_roce_pd.c index a64500fa114531e769b7816d1e5304559642d45b..3cef53c651331ffc2325b5868106c3b91b7b1358 100644 --- a/drivers/infiniband/hw/hns/hns_roce_pd.c +++ b/drivers/infiniband/hw/hns/hns_roce_pd.c @@ -35,7 +35,7 @@ static int hns_roce_pd_alloc(struct hns_roce_dev *hr_dev, unsigned long *pdn) { - return hns_roce_bitmap_alloc(&hr_dev->pd_bitmap, pdn); + return hns_roce_bitmap_alloc(&hr_dev->pd_bitmap, pdn) ? -ENOMEM : 0; } static void hns_roce_pd_free(struct hns_roce_dev *hr_dev, unsigned long pdn) diff --git a/drivers/infiniband/hw/hns/hns_roce_qp.c b/drivers/infiniband/hw/hns/hns_roce_qp.c index f5dd21c2d275eb6eb0db9cfe335f230cc9e33874..3a37d26889df169a61cd785cb455fb47d3863019 100644 --- a/drivers/infiniband/hw/hns/hns_roce_qp.c +++ b/drivers/infiniband/hw/hns/hns_roce_qp.c @@ -114,7 +114,10 @@ static int hns_roce_reserve_range_qp(struct hns_roce_dev *hr_dev, int cnt, { struct hns_roce_qp_table *qp_table = &hr_dev->qp_table; - return hns_roce_bitmap_alloc_range(&qp_table->bitmap, cnt, align, base); + return hns_roce_bitmap_alloc_range(&qp_table->bitmap, cnt, align, + base) ? + -ENOMEM : + 0; } enum hns_roce_qp_state to_hns_roce_state(enum ib_qp_state state) diff --git a/drivers/infiniband/hw/i40iw/i40iw_verbs.c b/drivers/infiniband/hw/i40iw/i40iw_verbs.c index b7961f21b555b5a348c520aa184edfe00dacd116..39398dd074d662778066b4deb7d10aac22c174dd 100644 --- a/drivers/infiniband/hw/i40iw/i40iw_verbs.c +++ b/drivers/infiniband/hw/i40iw/i40iw_verbs.c @@ -1408,6 +1408,7 @@ static void i40iw_set_hugetlb_values(u64 addr, struct i40iw_mr *iwmr) struct vm_area_struct *vma; struct hstate *h; + down_read(¤t->mm->mmap_sem); vma = find_vma(current->mm, addr); if (vma && is_vm_hugetlb_page(vma)) { h = hstate_vma(vma); @@ -1416,6 +1417,7 @@ static void i40iw_set_hugetlb_values(u64 addr, struct i40iw_mr *iwmr) iwmr->page_msk = huge_page_mask(h); } } + up_read(¤t->mm->mmap_sem); } /** diff --git a/drivers/infiniband/hw/mlx4/mr.c b/drivers/infiniband/hw/mlx4/mr.c index 1587cedee13e2df7a1515700d6eaabdce862bccc..761d3ce6a63a0a2cc31e8ed5787dc35c13881d58 100644 --- a/drivers/infiniband/hw/mlx4/mr.c +++ b/drivers/infiniband/hw/mlx4/mr.c @@ -247,8 +247,11 @@ int mlx4_ib_rereg_user_mr(struct ib_mr *mr, int flags, } if (flags & IB_MR_REREG_ACCESS) { - if (ib_access_writable(mr_access_flags) && !mmr->umem->writable) - return -EPERM; + if (ib_access_writable(mr_access_flags) && + !mmr->umem->writable) { + err = -EPERM; + goto release_mpt_entry; + } err = mlx4_mr_hw_change_access(dev->dev, *pmpt_entry, convert_access(mr_access_flags)); diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c index 9354fec8efe78321c6d909d54d6d843933f003d3..e10c3d915e389156fb26debfbd33cbec7c6cd86b 100644 --- a/drivers/infiniband/hw/mlx4/qp.c +++ b/drivers/infiniband/hw/mlx4/qp.c @@ -4014,9 +4014,9 @@ static void to_rdma_ah_attr(struct mlx4_ib_dev *ibdev, u8 port_num = path->sched_queue & 0x40 ? 2 : 1; memset(ah_attr, 0, sizeof(*ah_attr)); - ah_attr->type = rdma_ah_find_type(&ibdev->ib_dev, port_num); if (port_num == 0 || port_num > dev->caps.num_ports) return; + ah_attr->type = rdma_ah_find_type(&ibdev->ib_dev, port_num); if (ah_attr->type == RDMA_AH_ATTR_TYPE_ROCE) rdma_ah_set_sl(ah_attr, ((path->sched_queue >> 3) & 0x7) | diff --git a/drivers/infiniband/hw/mlx5/srq.c b/drivers/infiniband/hw/mlx5/srq.c index 3c7522d025f2b559f638fd2bde05200702488dde..93d67d97c279441f939705bb83298c3612563662 100644 --- a/drivers/infiniband/hw/mlx5/srq.c +++ b/drivers/infiniband/hw/mlx5/srq.c @@ -266,18 +266,24 @@ struct ib_srq *mlx5_ib_create_srq(struct ib_pd *pd, desc_size = sizeof(struct mlx5_wqe_srq_next_seg) + srq->msrq.max_gs * sizeof(struct mlx5_wqe_data_seg); - if (desc_size == 0 || srq->msrq.max_gs > desc_size) - return ERR_PTR(-EINVAL); + if (desc_size == 0 || srq->msrq.max_gs > desc_size) { + err = -EINVAL; + goto err_srq; + } desc_size = roundup_pow_of_two(desc_size); desc_size = max_t(size_t, 32, desc_size); - if (desc_size < sizeof(struct mlx5_wqe_srq_next_seg)) - return ERR_PTR(-EINVAL); + if (desc_size < sizeof(struct mlx5_wqe_srq_next_seg)) { + err = -EINVAL; + goto err_srq; + } srq->msrq.max_avail_gather = (desc_size - sizeof(struct mlx5_wqe_srq_next_seg)) / sizeof(struct mlx5_wqe_data_seg); srq->msrq.wqe_shift = ilog2(desc_size); buf_size = srq->msrq.max * desc_size; - if (buf_size < desc_size) - return ERR_PTR(-EINVAL); + if (buf_size < desc_size) { + err = -EINVAL; + goto err_srq; + } in.type = init_attr->srq_type; if (pd->uobject) diff --git a/drivers/infiniband/sw/rxe/rxe_comp.c b/drivers/infiniband/sw/rxe/rxe_comp.c index 9eb12c2e3c74cf69499124e21fcb1b4a8be4a40f..83cfe44f070ec47d85f43a02e7ce4a44158bab35 100644 --- a/drivers/infiniband/sw/rxe/rxe_comp.c +++ b/drivers/infiniband/sw/rxe/rxe_comp.c @@ -276,6 +276,7 @@ static inline enum comp_state check_ack(struct rxe_qp *qp, case IB_OPCODE_RC_RDMA_READ_RESPONSE_MIDDLE: if (wqe->wr.opcode != IB_WR_RDMA_READ && wqe->wr.opcode != IB_WR_RDMA_READ_WITH_INV) { + wqe->status = IB_WC_FATAL_ERR; return COMPST_ERROR; } reset_retry_counters(qp); diff --git a/drivers/infiniband/sw/rxe/rxe_recv.c b/drivers/infiniband/sw/rxe/rxe_recv.c index fb8c83e055e14c653eb1f05f42a3265005d8d737..83412df726a5184cd049dea96d28724c13d3f6ae 100644 --- a/drivers/infiniband/sw/rxe/rxe_recv.c +++ b/drivers/infiniband/sw/rxe/rxe_recv.c @@ -225,9 +225,14 @@ static int hdr_check(struct rxe_pkt_info *pkt) goto err1; } + if (unlikely(qpn == 0)) { + pr_warn_once("QP 0 not supported"); + goto err1; + } + if (qpn != IB_MULTICAST_QPN) { - index = (qpn == 0) ? port->qp_smi_index : - ((qpn == 1) ? port->qp_gsi_index : qpn); + index = (qpn == 1) ? port->qp_gsi_index : qpn; + qp = rxe_pool_get_index(&rxe->qp_pool, index); if (unlikely(!qp)) { pr_warn_ratelimited("no qp matches qpn 0x%x\n", qpn); diff --git a/drivers/infiniband/sw/rxe/rxe_req.c b/drivers/infiniband/sw/rxe/rxe_req.c index 54cc9cb1e3b765b848782ad9779225a1dde7e1b6..de853bcc2384d92ede39f6f8d30c5c3749445d6d 100644 --- a/drivers/infiniband/sw/rxe/rxe_req.c +++ b/drivers/infiniband/sw/rxe/rxe_req.c @@ -645,6 +645,9 @@ next_wqe: } else { goto exit; } + if ((wqe->wr.send_flags & IB_SEND_SIGNALED) || + qp->sq_sig_type == IB_SIGNAL_ALL_WR) + rxe_run_task(&qp->comp.task, 1); qp->req.wqe_index = next_index(qp->sq.queue, qp->req.wqe_index); goto next_wqe; diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c index 7a5ed5a5391e12b1727a70faac5f0895e01b5b3a..9939f32d015487fc1ed8617d18e6f97f305751ba 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c @@ -1018,12 +1018,14 @@ static int ipoib_cm_rep_handler(struct ib_cm_id *cm_id, struct ib_cm_event *even skb_queue_head_init(&skqueue); + netif_tx_lock_bh(p->dev); spin_lock_irq(&priv->lock); set_bit(IPOIB_FLAG_OPER_UP, &p->flags); if (p->neigh) while ((skb = __skb_dequeue(&p->neigh->queue))) __skb_queue_tail(&skqueue, skb); spin_unlock_irq(&priv->lock); + netif_tx_unlock_bh(p->dev); while ((skb = __skb_dequeue(&skqueue))) { skb->dev = p->dev; diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c index 6bc9a768f72193ef0a9b3ad00ed4e9f4da8f23d1..e6ff16b27acdb6c65a234c8536ffcdb87b690cd5 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_main.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c @@ -1752,7 +1752,8 @@ int ipoib_dev_init(struct net_device *dev, struct ib_device *ca, int port) goto out_free_pd; } - if (ipoib_neigh_hash_init(priv) < 0) { + ret = ipoib_neigh_hash_init(priv); + if (ret) { pr_warn("%s failed to init neigh hash\n", dev->name); goto out_dev_uninit; } diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c index 299a97b7e17fff2982e8762e4620afb904683870..ade98c234dcb39e1c860bb8b7a115964ac61d540 100644 --- a/drivers/infiniband/ulp/srp/ib_srp.c +++ b/drivers/infiniband/ulp/srp/ib_srp.c @@ -2669,7 +2669,7 @@ static int srp_reset_device(struct scsi_cmnd *scmnd) { struct srp_target_port *target = host_to_target(scmnd->device->host); struct srp_rdma_ch *ch; - int i; + int i, j; u8 status; shost_printk(KERN_ERR, target->scsi_host, "SRP reset_device called\n"); @@ -2683,8 +2683,8 @@ static int srp_reset_device(struct scsi_cmnd *scmnd) for (i = 0; i < target->ch_count; i++) { ch = &target->ch[i]; - for (i = 0; i < target->req_ring_size; ++i) { - struct srp_request *req = &ch->req_ring[i]; + for (j = 0; j < target->req_ring_size; ++j) { + struct srp_request *req = &ch->req_ring[j]; srp_finish_req(ch, req, scmnd->device, DID_RESET << 16); } diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c index 97c2225829eaa545a7e8a5b72762ba413d2e618a..60105ba77889eb14d1106e13f7aa7351e5be693b 100644 --- a/drivers/infiniband/ulp/srpt/ib_srpt.c +++ b/drivers/infiniband/ulp/srpt/ib_srpt.c @@ -1713,8 +1713,7 @@ static bool srpt_close_ch(struct srpt_rdma_ch *ch) int ret; if (!srpt_set_ch_state(ch, CH_DRAINING)) { - pr_debug("%s-%d: already closed\n", ch->sess_name, - ch->qp->qp_num); + pr_debug("%s: already closed\n", ch->sess_name); return false; } diff --git a/drivers/input/input.c b/drivers/input/input.c index 762bfb9487dc961cf1c7d12a18a0d10dd3386b4c..50d425fe6706faf84596851a730a0e62b860d1cb 100644 --- a/drivers/input/input.c +++ b/drivers/input/input.c @@ -480,11 +480,19 @@ EXPORT_SYMBOL(input_inject_event); */ void input_alloc_absinfo(struct input_dev *dev) { - if (!dev->absinfo) - dev->absinfo = kcalloc(ABS_CNT, sizeof(*dev->absinfo), - GFP_KERNEL); + if (dev->absinfo) + return; - WARN(!dev->absinfo, "%s(): kcalloc() failed?\n", __func__); + dev->absinfo = kcalloc(ABS_CNT, sizeof(*dev->absinfo), GFP_KERNEL); + if (!dev->absinfo) { + dev_err(dev->dev.parent ?: &dev->dev, + "%s: unable to allocate memory\n", __func__); + /* + * We will handle this allocation failure in + * input_register_device() when we refuse to register input + * device with ABS bits but without absinfo. + */ + } } EXPORT_SYMBOL(input_alloc_absinfo); diff --git a/drivers/input/misc/xen-kbdfront.c b/drivers/input/misc/xen-kbdfront.c index d91f3b1c53755f44dc9e6a469359d0173dbbf264..92d7396490225732c2aa54fa7951fe771187c82d 100644 --- a/drivers/input/misc/xen-kbdfront.c +++ b/drivers/input/misc/xen-kbdfront.c @@ -229,7 +229,7 @@ static int xenkbd_probe(struct xenbus_device *dev, } } - touch = xenbus_read_unsigned(dev->nodename, + touch = xenbus_read_unsigned(dev->otherend, XENKBD_FIELD_FEAT_MTOUCH, 0); if (touch) { ret = xenbus_write(XBT_NIL, dev->nodename, @@ -304,13 +304,13 @@ static int xenkbd_probe(struct xenbus_device *dev, if (!mtouch) goto error_nomem; - num_cont = xenbus_read_unsigned(info->xbdev->nodename, + num_cont = xenbus_read_unsigned(info->xbdev->otherend, XENKBD_FIELD_MT_NUM_CONTACTS, 1); - width = xenbus_read_unsigned(info->xbdev->nodename, + width = xenbus_read_unsigned(info->xbdev->otherend, XENKBD_FIELD_MT_WIDTH, XENFB_WIDTH); - height = xenbus_read_unsigned(info->xbdev->nodename, + height = xenbus_read_unsigned(info->xbdev->otherend, XENKBD_FIELD_MT_HEIGHT, XENFB_HEIGHT); diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c index a250f433eb968b236ae2ddf99cf45f69d7f17fef..84c69e962230bc8f3fb739eb294d2e433147582f 100644 --- a/drivers/input/mouse/elantech.c +++ b/drivers/input/mouse/elantech.c @@ -1180,6 +1180,8 @@ static const struct dmi_system_id elantech_dmi_has_middle_button[] = { static const char * const middle_button_pnp_ids[] = { "LEN2131", /* ThinkPad P52 w/ NFC */ "LEN2132", /* ThinkPad P52 */ + "LEN2133", /* ThinkPad P72 w/ NFC */ + "LEN2134", /* ThinkPad P72 */ NULL }; diff --git a/drivers/input/rmi4/rmi_2d_sensor.c b/drivers/input/rmi4/rmi_2d_sensor.c index 8bb866c7b9855c5025d31b7be3f722d469f73da9..8eeffa066022dadb9f718f77aab1609700f05543 100644 --- a/drivers/input/rmi4/rmi_2d_sensor.c +++ b/drivers/input/rmi4/rmi_2d_sensor.c @@ -32,15 +32,15 @@ void rmi_2d_sensor_abs_process(struct rmi_2d_sensor *sensor, if (obj->type == RMI_2D_OBJECT_NONE) return; - if (axis_align->swap_axes) - swap(obj->x, obj->y); - if (axis_align->flip_x) obj->x = sensor->max_x - obj->x; if (axis_align->flip_y) obj->y = sensor->max_y - obj->y; + if (axis_align->swap_axes) + swap(obj->x, obj->y); + /* * Here checking if X offset or y offset are specified is * redundant. We just add the offsets or clip the values. @@ -120,15 +120,15 @@ void rmi_2d_sensor_rel_report(struct rmi_2d_sensor *sensor, int x, int y) x = min(RMI_2D_REL_POS_MAX, max(RMI_2D_REL_POS_MIN, (int)x)); y = min(RMI_2D_REL_POS_MAX, max(RMI_2D_REL_POS_MIN, (int)y)); - if (axis_align->swap_axes) - swap(x, y); - if (axis_align->flip_x) x = min(RMI_2D_REL_POS_MAX, -x); if (axis_align->flip_y) y = min(RMI_2D_REL_POS_MAX, -y); + if (axis_align->swap_axes) + swap(x, y); + if (x || y) { input_report_rel(sensor->input, REL_X, x); input_report_rel(sensor->input, REL_Y, y); @@ -141,17 +141,10 @@ static void rmi_2d_sensor_set_input_params(struct rmi_2d_sensor *sensor) struct input_dev *input = sensor->input; int res_x; int res_y; + int max_x, max_y; int input_flags = 0; if (sensor->report_abs) { - if (sensor->axis_align.swap_axes) { - swap(sensor->max_x, sensor->max_y); - swap(sensor->axis_align.clip_x_low, - sensor->axis_align.clip_y_low); - swap(sensor->axis_align.clip_x_high, - sensor->axis_align.clip_y_high); - } - sensor->min_x = sensor->axis_align.clip_x_low; if (sensor->axis_align.clip_x_high) sensor->max_x = min(sensor->max_x, @@ -163,14 +156,19 @@ static void rmi_2d_sensor_set_input_params(struct rmi_2d_sensor *sensor) sensor->axis_align.clip_y_high); set_bit(EV_ABS, input->evbit); - input_set_abs_params(input, ABS_MT_POSITION_X, 0, sensor->max_x, - 0, 0); - input_set_abs_params(input, ABS_MT_POSITION_Y, 0, sensor->max_y, - 0, 0); + + max_x = sensor->max_x; + max_y = sensor->max_y; + if (sensor->axis_align.swap_axes) + swap(max_x, max_y); + input_set_abs_params(input, ABS_MT_POSITION_X, 0, max_x, 0, 0); + input_set_abs_params(input, ABS_MT_POSITION_Y, 0, max_y, 0, 0); if (sensor->x_mm && sensor->y_mm) { res_x = (sensor->max_x - sensor->min_x) / sensor->x_mm; res_y = (sensor->max_y - sensor->min_y) / sensor->y_mm; + if (sensor->axis_align.swap_axes) + swap(res_x, res_y); input_abs_set_res(input, ABS_X, res_x); input_abs_set_res(input, ABS_Y, res_y); diff --git a/drivers/input/touchscreen/atmel_mxt_ts.c b/drivers/input/touchscreen/atmel_mxt_ts.c index fc149ea64be795f92616a920dd9ee492291bab74..59aaac43db91f9784edafe7e9a5b2770fed0d98c 100644 --- a/drivers/input/touchscreen/atmel_mxt_ts.c +++ b/drivers/input/touchscreen/atmel_mxt_ts.c @@ -1647,10 +1647,11 @@ static int mxt_parse_object_table(struct mxt_data *data, break; case MXT_TOUCH_MULTI_T9: data->multitouch = MXT_TOUCH_MULTI_T9; + /* Only handle messages from first T9 instance */ data->T9_reportid_min = min_id; - data->T9_reportid_max = max_id; - data->num_touchids = object->num_report_ids - * mxt_obj_instances(object); + data->T9_reportid_max = min_id + + object->num_report_ids - 1; + data->num_touchids = object->num_report_ids; break; case MXT_SPT_MESSAGECOUNT_T44: data->T44_address = object->start_address; diff --git a/drivers/input/touchscreen/rohm_bu21023.c b/drivers/input/touchscreen/rohm_bu21023.c index eeaf6ff035974c836668a0aaec55fb4bd0f05965..fc54e044fecef7be51d394a0ce92620cfe14a529 100644 --- a/drivers/input/touchscreen/rohm_bu21023.c +++ b/drivers/input/touchscreen/rohm_bu21023.c @@ -304,7 +304,7 @@ static int rohm_i2c_burst_read(struct i2c_client *client, u8 start, void *buf, msg[1].len = len; msg[1].buf = buf; - i2c_lock_adapter(adap); + i2c_lock_bus(adap, I2C_LOCK_SEGMENT); for (i = 0; i < 2; i++) { if (__i2c_transfer(adap, &msg[i], 1) < 0) { @@ -313,7 +313,7 @@ static int rohm_i2c_burst_read(struct i2c_client *client, u8 start, void *buf, } } - i2c_unlock_adapter(adap); + i2c_unlock_bus(adap, I2C_LOCK_SEGMENT); return ret; } diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c index b96b8c11a5861f1cd3af7fd741e3796086ed04f0..dab344e7d4e3b32621b50a952d537e160dac66b0 100644 --- a/drivers/iommu/amd_iommu.c +++ b/drivers/iommu/amd_iommu.c @@ -2391,9 +2391,9 @@ static void __unmap_single(struct dma_ops_domain *dma_dom, } if (amd_iommu_unmap_flush) { - dma_ops_free_iova(dma_dom, dma_addr, pages); domain_flush_tlb(&dma_dom->domain); domain_flush_complete(&dma_dom->domain); + dma_ops_free_iova(dma_dom, dma_addr, pages); } else { pages = __roundup_pow_of_two(pages); queue_iova(&dma_dom->iovad, dma_addr >> PAGE_SHIFT, pages, 0); diff --git a/drivers/iommu/arm-smmu-v3.c b/drivers/iommu/arm-smmu-v3.c index 8f7a3c00b6cf3dc5be9868e19b6541a0a938c678..26e99c03390fafa20536e7f9ad8e4dce06e35287 100644 --- a/drivers/iommu/arm-smmu-v3.c +++ b/drivers/iommu/arm-smmu-v3.c @@ -1272,6 +1272,7 @@ static irqreturn_t arm_smmu_priq_thread(int irq, void *dev) /* Sync our overflow flag, as we believe we're up to speed */ q->cons = Q_OVF(q, q->prod) | Q_WRP(q, q->cons) | Q_IDX(q, q->cons); + writel(q->cons, q->cons_reg); return IRQ_HANDLED; } diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c index 3bdb799d3b4b1f5ee1de1e2505d3d0024273c658..2c436376f13eb0570474a9fd3028132162e9bd00 100644 --- a/drivers/iommu/arm-smmu.c +++ b/drivers/iommu/arm-smmu.c @@ -2100,12 +2100,16 @@ static int arm_smmu_device_probe(struct platform_device *pdev) if (err) return err; - if (smmu->version == ARM_SMMU_V2 && - smmu->num_context_banks != smmu->num_context_irqs) { - dev_err(dev, - "found only %d context interrupt(s) but %d required\n", - smmu->num_context_irqs, smmu->num_context_banks); - return -ENODEV; + if (smmu->version == ARM_SMMU_V2) { + if (smmu->num_context_banks > smmu->num_context_irqs) { + dev_err(dev, + "found only %d context irq(s) but %d required\n", + smmu->num_context_irqs, smmu->num_context_banks); + return -ENODEV; + } + + /* Ignore superfluous interrupts */ + smmu->num_context_irqs = smmu->num_context_banks; } for (i = 0; i < smmu->num_global_irqs; ++i) { diff --git a/drivers/iommu/dmar.c b/drivers/iommu/dmar.c index e3dbb6101b4a78fae47a093ddb2c365e4cb3d7af..c0d1c4db5794482c384b4bb6a5f5bd0dde1dab50 100644 --- a/drivers/iommu/dmar.c +++ b/drivers/iommu/dmar.c @@ -1336,8 +1336,8 @@ void qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr, qi_submit_sync(&desc, iommu); } -void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 qdep, - u64 addr, unsigned mask) +void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 pfsid, + u16 qdep, u64 addr, unsigned mask) { struct qi_desc desc; @@ -1352,7 +1352,7 @@ void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 qdep, qdep = 0; desc.low = QI_DEV_IOTLB_SID(sid) | QI_DEV_IOTLB_QDEP(qdep) | - QI_DIOTLB_TYPE; + QI_DIOTLB_TYPE | QI_DEV_IOTLB_PFSID(pfsid); qi_submit_sync(&desc, iommu); } diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c index e8414bcf83904422be5aa819d41d9dd834d6d290..aaf3fed9747712500a3b7fe564beec81aaf422c5 100644 --- a/drivers/iommu/intel-iommu.c +++ b/drivers/iommu/intel-iommu.c @@ -422,6 +422,7 @@ struct device_domain_info { struct list_head global; /* link to global list */ u8 bus; /* PCI bus number */ u8 devfn; /* PCI devfn number */ + u16 pfsid; /* SRIOV physical function source ID */ u8 pasid_supported:3; u8 pasid_enabled:1; u8 pri_supported:1; @@ -1502,6 +1503,20 @@ static void iommu_enable_dev_iotlb(struct device_domain_info *info) return; pdev = to_pci_dev(info->dev); + /* For IOMMU that supports device IOTLB throttling (DIT), we assign + * PFSID to the invalidation desc of a VF such that IOMMU HW can gauge + * queue depth at PF level. If DIT is not set, PFSID will be treated as + * reserved, which should be set to 0. + */ + if (!ecap_dit(info->iommu->ecap)) + info->pfsid = 0; + else { + struct pci_dev *pf_pdev; + + /* pdev will be returned if device is not a vf */ + pf_pdev = pci_physfn(pdev); + info->pfsid = PCI_DEVID(pf_pdev->bus->number, pf_pdev->devfn); + } #ifdef CONFIG_INTEL_IOMMU_SVM /* The PCIe spec, in its wisdom, declares that the behaviour of @@ -1567,7 +1582,8 @@ static void iommu_flush_dev_iotlb(struct dmar_domain *domain, sid = info->bus << 8 | info->devfn; qdep = info->ats_qdep; - qi_flush_dev_iotlb(info->iommu, sid, qdep, addr, mask); + qi_flush_dev_iotlb(info->iommu, sid, info->pfsid, + qdep, addr, mask); } spin_unlock_irqrestore(&device_domain_lock, flags); } diff --git a/drivers/iommu/io-pgtable-arm-v7s.c b/drivers/iommu/io-pgtable-arm-v7s.c index 6961fc393f0b25828f5981fad35ea744c7768b41..29b7a6755fcdaabfee66f9444b8193d4b6400cf6 100644 --- a/drivers/iommu/io-pgtable-arm-v7s.c +++ b/drivers/iommu/io-pgtable-arm-v7s.c @@ -192,6 +192,7 @@ static void *__arm_v7s_alloc_table(int lvl, gfp_t gfp, { struct io_pgtable_cfg *cfg = &data->iop.cfg; struct device *dev = cfg->iommu_dev; + phys_addr_t phys; dma_addr_t dma; size_t size = ARM_V7S_TABLE_SIZE(lvl); void *table = NULL; @@ -200,6 +201,10 @@ static void *__arm_v7s_alloc_table(int lvl, gfp_t gfp, table = (void *)__get_dma_pages(__GFP_ZERO, get_order(size)); else if (lvl == 2) table = kmem_cache_zalloc(data->l2_tables, gfp | GFP_DMA); + phys = virt_to_phys(table); + if (phys != (arm_v7s_iopte)phys) + /* Doesn't fit in PTE */ + goto out_free; if (table && !(cfg->quirks & IO_PGTABLE_QUIRK_NO_DMA)) { dma = dma_map_single(dev, table, size, DMA_TO_DEVICE); if (dma_mapping_error(dev, dma)) @@ -209,7 +214,7 @@ static void *__arm_v7s_alloc_table(int lvl, gfp_t gfp, * address directly, so if the DMA layer suggests otherwise by * translating or truncating them, that bodes very badly... */ - if (dma != virt_to_phys(table)) + if (dma != phys) goto out_unmap; } kmemleak_ignore(table); diff --git a/drivers/iommu/ipmmu-vmsa.c b/drivers/iommu/ipmmu-vmsa.c index 195d6e93ac7185aa53ee075521faf0de56ec53bf..5d0ba5f644c48ca89910102b01c8362fd0b796cf 100644 --- a/drivers/iommu/ipmmu-vmsa.c +++ b/drivers/iommu/ipmmu-vmsa.c @@ -54,7 +54,7 @@ struct ipmmu_vmsa_domain { struct io_pgtable_ops *iop; unsigned int context_id; - spinlock_t lock; /* Protects mappings */ + struct mutex mutex; /* Protects mappings */ }; struct ipmmu_vmsa_iommu_priv { @@ -523,7 +523,7 @@ static struct iommu_domain *__ipmmu_domain_alloc(unsigned type) if (!domain) return NULL; - spin_lock_init(&domain->lock); + mutex_init(&domain->mutex); return &domain->io_domain; } @@ -548,7 +548,6 @@ static int ipmmu_attach_device(struct iommu_domain *io_domain, struct iommu_fwspec *fwspec = dev->iommu_fwspec; struct ipmmu_vmsa_device *mmu = priv->mmu; struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain); - unsigned long flags; unsigned int i; int ret = 0; @@ -557,7 +556,7 @@ static int ipmmu_attach_device(struct iommu_domain *io_domain, return -ENXIO; } - spin_lock_irqsave(&domain->lock, flags); + mutex_lock(&domain->mutex); if (!domain->mmu) { /* The domain hasn't been used yet, initialize it. */ @@ -574,7 +573,7 @@ static int ipmmu_attach_device(struct iommu_domain *io_domain, } else dev_info(dev, "Reusing IPMMU context %u\n", domain->context_id); - spin_unlock_irqrestore(&domain->lock, flags); + mutex_unlock(&domain->mutex); if (ret < 0) return ret; diff --git a/drivers/iommu/msm_iommu.c b/drivers/iommu/msm_iommu.c index 04f4d51ffacb1e50ae3f87f04fd3b23306f7b55b..92c8c83ce38c3624359c1906d5a649641c02d81a 100644 --- a/drivers/iommu/msm_iommu.c +++ b/drivers/iommu/msm_iommu.c @@ -395,20 +395,15 @@ static int msm_iommu_add_device(struct device *dev) struct msm_iommu_dev *iommu; struct iommu_group *group; unsigned long flags; - int ret = 0; spin_lock_irqsave(&msm_iommu_lock, flags); - iommu = find_iommu_for_dev(dev); + spin_unlock_irqrestore(&msm_iommu_lock, flags); + if (iommu) iommu_device_link(&iommu->iommu, dev); else - ret = -ENODEV; - - spin_unlock_irqrestore(&msm_iommu_lock, flags); - - if (ret) - return ret; + return -ENODEV; group = iommu_group_get_for_dev(dev); if (IS_ERR(group)) @@ -425,13 +420,12 @@ static void msm_iommu_remove_device(struct device *dev) unsigned long flags; spin_lock_irqsave(&msm_iommu_lock, flags); - iommu = find_iommu_for_dev(dev); + spin_unlock_irqrestore(&msm_iommu_lock, flags); + if (iommu) iommu_device_unlink(&iommu->iommu, dev); - spin_unlock_irqrestore(&msm_iommu_lock, flags); - iommu_group_remove_device(dev); } diff --git a/drivers/iommu/omap-iommu.c b/drivers/iommu/omap-iommu.c index bd67e1b2c64eadf25f5b10d120cf1214b120b54f..57960cb5e04552f01b9d23cfe5e380c8e5004eb2 100644 --- a/drivers/iommu/omap-iommu.c +++ b/drivers/iommu/omap-iommu.c @@ -529,7 +529,7 @@ static u32 *iopte_alloc(struct omap_iommu *obj, u32 *iopgd, pte_ready: iopte = iopte_offset(iopgd, da); - *pt_dma = virt_to_phys(iopte); + *pt_dma = iopgd_page_paddr(iopgd); dev_vdbg(obj->dev, "%s: da:%08x pgd:%p *pgd:%08x pte:%p *pte:%08x\n", __func__, da, iopgd, *iopgd, iopte, *iopte); @@ -717,7 +717,7 @@ static size_t iopgtable_clear_entry_core(struct omap_iommu *obj, u32 da) } bytes *= nent; memset(iopte, 0, nent * sizeof(*iopte)); - pt_dma = virt_to_phys(iopte); + pt_dma = iopgd_page_paddr(iopgd); flush_iopte_range(obj->dev, pt_dma, pt_offset, nent); /* diff --git a/drivers/irqchip/irq-bcm7038-l1.c b/drivers/irqchip/irq-bcm7038-l1.c index 55cfb986225be79386b3d6487b953ff63ca4ab59..0b9a8b709abf892a8af5f3cf82cbde5a344f0182 100644 --- a/drivers/irqchip/irq-bcm7038-l1.c +++ b/drivers/irqchip/irq-bcm7038-l1.c @@ -217,6 +217,7 @@ static int bcm7038_l1_set_affinity(struct irq_data *d, return 0; } +#ifdef CONFIG_SMP static void bcm7038_l1_cpu_offline(struct irq_data *d) { struct cpumask *mask = irq_data_get_affinity_mask(d); @@ -241,6 +242,7 @@ static void bcm7038_l1_cpu_offline(struct irq_data *d) } irq_set_affinity_locked(d, &new_affinity, false); } +#endif static int __init bcm7038_l1_init_one(struct device_node *dn, unsigned int idx, @@ -293,7 +295,9 @@ static struct irq_chip bcm7038_l1_irq_chip = { .irq_mask = bcm7038_l1_mask, .irq_unmask = bcm7038_l1_unmask, .irq_set_affinity = bcm7038_l1_set_affinity, +#ifdef CONFIG_SMP .irq_cpu_offline = bcm7038_l1_cpu_offline, +#endif }; static int bcm7038_l1_map(struct irq_domain *d, unsigned int virq, diff --git a/drivers/lightnvm/pblk-core.c b/drivers/lightnvm/pblk-core.c index 3f0ddc0d7393fdd477aa82578b43d37034e08e77..3fb65778e03d205df6ccd04a728d9241208ea1c0 100644 --- a/drivers/lightnvm/pblk-core.c +++ b/drivers/lightnvm/pblk-core.c @@ -190,7 +190,6 @@ void pblk_bio_free_pages(struct pblk *pblk, struct bio *bio, int off, WARN_ON(off + nr_pages != bio->bi_vcnt); - bio_advance(bio, off * PBLK_EXPOSED_PAGE_SIZE); for (i = off; i < nr_pages + off; i++) { bv = bio->bi_io_vec[i]; mempool_free(bv.bv_page, pblk->page_bio_pool); diff --git a/drivers/lightnvm/pblk-write.c b/drivers/lightnvm/pblk-write.c index 3ad9e56d2473412ae3ead5b2946066643ebd541c..d89ac573f8d838eca3401f671f558c25df19260b 100644 --- a/drivers/lightnvm/pblk-write.c +++ b/drivers/lightnvm/pblk-write.c @@ -33,6 +33,10 @@ static unsigned long pblk_end_w_bio(struct pblk *pblk, struct nvm_rq *rqd, bio_endio(original_bio); } + if (c_ctx->nr_padded) + pblk_bio_free_pages(pblk, rqd->bio, c_ctx->nr_valid, + c_ctx->nr_padded); + #ifdef CONFIG_NVM_DEBUG atomic_long_add(c_ctx->nr_valid, &pblk->sync_writes); #endif @@ -521,7 +525,8 @@ static void pblk_free_write_rqd(struct pblk *pblk, struct nvm_rq *rqd) struct bio *bio = rqd->bio; if (c_ctx->nr_padded) - pblk_bio_free_pages(pblk, bio, rqd->nr_ppas, c_ctx->nr_padded); + pblk_bio_free_pages(pblk, bio, c_ctx->nr_valid, + c_ctx->nr_padded); } static int pblk_submit_write(struct pblk *pblk) diff --git a/drivers/macintosh/via-pmu.c b/drivers/macintosh/via-pmu.c index c4c2b3b85ebc05f4ceea9c484f674ed40d427b42..f6e040fcad9a4936447fcecd1a49525e6a117468 100644 --- a/drivers/macintosh/via-pmu.c +++ b/drivers/macintosh/via-pmu.c @@ -532,8 +532,9 @@ init_pmu(void) int timeout; struct adb_request req; - out_8(&via[B], via[B] | TREQ); /* negate TREQ */ - out_8(&via[DIRB], (via[DIRB] | TREQ) & ~TACK); /* TACK in, TREQ out */ + /* Negate TREQ. Set TACK to input and TREQ to output. */ + out_8(&via[B], in_8(&via[B]) | TREQ); + out_8(&via[DIRB], (in_8(&via[DIRB]) | TREQ) & ~TACK); pmu_request(&req, NULL, 2, PMU_SET_INTR_MASK, pmu_intr_mask); timeout = 100000; @@ -1455,8 +1456,8 @@ pmu_sr_intr(void) struct adb_request *req; int bite = 0; - if (via[B] & TREQ) { - printk(KERN_ERR "PMU: spurious SR intr (%x)\n", via[B]); + if (in_8(&via[B]) & TREQ) { + printk(KERN_ERR "PMU: spurious SR intr (%x)\n", in_8(&via[B])); out_8(&via[IFR], SR_INT); return NULL; } diff --git a/drivers/mailbox/mailbox-xgene-slimpro.c b/drivers/mailbox/mailbox-xgene-slimpro.c index a7040163dd43983d26071faec7ac9f815db9b799..b8b2b3533f466badbf0645efda9ea975c460c998 100644 --- a/drivers/mailbox/mailbox-xgene-slimpro.c +++ b/drivers/mailbox/mailbox-xgene-slimpro.c @@ -195,9 +195,9 @@ static int slimpro_mbox_probe(struct platform_device *pdev) platform_set_drvdata(pdev, ctx); regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); - mb_base = devm_ioremap(&pdev->dev, regs->start, resource_size(regs)); - if (!mb_base) - return -ENOMEM; + mb_base = devm_ioremap_resource(&pdev->dev, regs); + if (IS_ERR(mb_base)) + return PTR_ERR(mb_base); /* Setup mailbox links */ for (i = 0; i < MBOX_CNT; i++) { diff --git a/drivers/md/bcache/writeback.c b/drivers/md/bcache/writeback.c index 930b00f6a3a2a4b780d7c6ea166b7b36181caf22..5adb0c850b6c0800967c03f85810b74992d99c32 100644 --- a/drivers/md/bcache/writeback.c +++ b/drivers/md/bcache/writeback.c @@ -456,8 +456,10 @@ static int bch_writeback_thread(void *arg) * data on cache. BCACHE_DEV_DETACHING flag is set in * bch_cached_dev_detach(). */ - if (test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags)) + if (test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags)) { + up_write(&dc->writeback_lock); break; + } } up_write(&dc->writeback_lock); diff --git a/drivers/md/dm-cache-metadata.c b/drivers/md/dm-cache-metadata.c index 4a4e9c75fc4cddca59c30df2f18ebaa6f0baaa61..0a5a45f3ec5fcd4dbd471e0a83a3a0694b25af85 100644 --- a/drivers/md/dm-cache-metadata.c +++ b/drivers/md/dm-cache-metadata.c @@ -362,7 +362,7 @@ static int __write_initial_superblock(struct dm_cache_metadata *cmd) disk_super->version = cpu_to_le32(cmd->version); memset(disk_super->policy_name, 0, sizeof(disk_super->policy_name)); memset(disk_super->policy_version, 0, sizeof(disk_super->policy_version)); - disk_super->policy_hint_size = 0; + disk_super->policy_hint_size = cpu_to_le32(0); __copy_sm_root(cmd, disk_super); @@ -700,6 +700,7 @@ static int __commit_transaction(struct dm_cache_metadata *cmd, disk_super->policy_version[0] = cpu_to_le32(cmd->policy_version[0]); disk_super->policy_version[1] = cpu_to_le32(cmd->policy_version[1]); disk_super->policy_version[2] = cpu_to_le32(cmd->policy_version[2]); + disk_super->policy_hint_size = cpu_to_le32(cmd->policy_hint_size); disk_super->read_hits = cpu_to_le32(cmd->stats.read_hits); disk_super->read_misses = cpu_to_le32(cmd->stats.read_misses); @@ -1321,6 +1322,7 @@ static int __load_mapping_v1(struct dm_cache_metadata *cmd, dm_oblock_t oblock; unsigned flags; + bool dirty = true; dm_array_cursor_get_value(mapping_cursor, (void **) &mapping_value_le); memcpy(&mapping, mapping_value_le, sizeof(mapping)); @@ -1331,8 +1333,10 @@ static int __load_mapping_v1(struct dm_cache_metadata *cmd, dm_array_cursor_get_value(hint_cursor, (void **) &hint_value_le); memcpy(&hint, hint_value_le, sizeof(hint)); } + if (cmd->clean_when_opened) + dirty = flags & M_DIRTY; - r = fn(context, oblock, to_cblock(cb), flags & M_DIRTY, + r = fn(context, oblock, to_cblock(cb), dirty, le32_to_cpu(hint), hints_valid); if (r) { DMERR("policy couldn't load cache block %llu", @@ -1360,7 +1364,7 @@ static int __load_mapping_v2(struct dm_cache_metadata *cmd, dm_oblock_t oblock; unsigned flags; - bool dirty; + bool dirty = true; dm_array_cursor_get_value(mapping_cursor, (void **) &mapping_value_le); memcpy(&mapping, mapping_value_le, sizeof(mapping)); @@ -1371,8 +1375,9 @@ static int __load_mapping_v2(struct dm_cache_metadata *cmd, dm_array_cursor_get_value(hint_cursor, (void **) &hint_value_le); memcpy(&hint, hint_value_le, sizeof(hint)); } + if (cmd->clean_when_opened) + dirty = dm_bitset_cursor_get_value(dirty_cursor); - dirty = dm_bitset_cursor_get_value(dirty_cursor); r = fn(context, oblock, to_cblock(cb), dirty, le32_to_cpu(hint), hints_valid); if (r) { diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c index 71c3507df9a0ee530c2927f1a8ac4a571d3e28fd..a4b7c26980966065836b6446f3825080b9036373 100644 --- a/drivers/md/dm-cache-target.c +++ b/drivers/md/dm-cache-target.c @@ -2330,7 +2330,7 @@ static int parse_features(struct cache_args *ca, struct dm_arg_set *as, {0, 2, "Invalid number of cache feature arguments"}, }; - int r; + int r, mode_ctr = 0; unsigned argc; const char *arg; struct cache_features *cf = &ca->features; @@ -2344,14 +2344,20 @@ static int parse_features(struct cache_args *ca, struct dm_arg_set *as, while (argc--) { arg = dm_shift_arg(as); - if (!strcasecmp(arg, "writeback")) + if (!strcasecmp(arg, "writeback")) { cf->io_mode = CM_IO_WRITEBACK; + mode_ctr++; + } - else if (!strcasecmp(arg, "writethrough")) + else if (!strcasecmp(arg, "writethrough")) { cf->io_mode = CM_IO_WRITETHROUGH; + mode_ctr++; + } - else if (!strcasecmp(arg, "passthrough")) + else if (!strcasecmp(arg, "passthrough")) { cf->io_mode = CM_IO_PASSTHROUGH; + mode_ctr++; + } else if (!strcasecmp(arg, "metadata2")) cf->metadata_version = 2; @@ -2362,6 +2368,11 @@ static int parse_features(struct cache_args *ca, struct dm_arg_set *as, } } + if (mode_ctr > 1) { + *error = "Duplicate cache io_mode features requested"; + return -EINVAL; + } + return 0; } diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c index f575110454b6a2dd7df499e07fe15141184a7d06..c60d29d09687d73368e0490d096736c8d1456c5a 100644 --- a/drivers/md/dm-crypt.c +++ b/drivers/md/dm-crypt.c @@ -3072,11 +3072,11 @@ static void crypt_io_hints(struct dm_target *ti, struct queue_limits *limits) */ limits->max_segment_size = PAGE_SIZE; - if (cc->sector_size != (1 << SECTOR_SHIFT)) { - limits->logical_block_size = cc->sector_size; - limits->physical_block_size = cc->sector_size; - blk_limits_io_min(limits, cc->sector_size); - } + limits->logical_block_size = + max_t(unsigned short, limits->logical_block_size, cc->sector_size); + limits->physical_block_size = + max_t(unsigned, limits->physical_block_size, cc->sector_size); + limits->io_min = max_t(unsigned, limits->io_min, cc->sector_size); } static struct target_type crypt_target = { diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c index cbc56372ff97b7c82337d51bfb9ca8640b0c697d..898286ed47a1004f00e7ff23f9cb29133b0f9aa3 100644 --- a/drivers/md/dm-integrity.c +++ b/drivers/md/dm-integrity.c @@ -177,7 +177,7 @@ struct dm_integrity_c { __u8 sectors_per_block; unsigned char mode; - bool suspending; + int suspending; int failed; @@ -2209,7 +2209,7 @@ static void dm_integrity_postsuspend(struct dm_target *ti) del_timer_sync(&ic->autocommit_timer); - ic->suspending = true; + WRITE_ONCE(ic->suspending, 1); queue_work(ic->commit_wq, &ic->commit_work); drain_workqueue(ic->commit_wq); @@ -2219,7 +2219,7 @@ static void dm_integrity_postsuspend(struct dm_target *ti) dm_integrity_flush_buffers(ic); } - ic->suspending = false; + WRITE_ONCE(ic->suspending, 0); BUG_ON(!RB_EMPTY_ROOT(&ic->in_progress)); diff --git a/drivers/md/dm-kcopyd.c b/drivers/md/dm-kcopyd.c index cf2c67e35eafe75353e016d48efefd9aadd36291..d4b326914f0687b49bdfc83f3c30ceb60411a493 100644 --- a/drivers/md/dm-kcopyd.c +++ b/drivers/md/dm-kcopyd.c @@ -484,6 +484,8 @@ static int run_complete_job(struct kcopyd_job *job) if (atomic_dec_and_test(&kc->nr_jobs)) wake_up(&kc->destroyq); + cond_resched(); + return 0; } diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c index 72ae5dc50532ec799b77458e7fba31d2480c23c0..6cf9ad4e4e16446815d86a94f1b946e835bbd317 100644 --- a/drivers/md/dm-thin.c +++ b/drivers/md/dm-thin.c @@ -2514,6 +2514,8 @@ static void set_pool_mode(struct pool *pool, enum pool_mode new_mode) case PM_WRITE: if (old_mode != new_mode) notify_of_pool_mode_change(pool, "write"); + if (old_mode == PM_OUT_OF_DATA_SPACE) + cancel_delayed_work_sync(&pool->no_space_timeout); pool->out_of_data_space = false; pool->pf.error_if_no_space = pt->requested_pf.error_if_no_space; dm_pool_metadata_read_write(pool->pmd); diff --git a/drivers/md/md-cluster.c b/drivers/md/md-cluster.c index 72ce0bccc86553a5ecbf6ffb59d3883f01e3dc0e..717aaffc227dfa942821bebf4133670dbc684d95 100644 --- a/drivers/md/md-cluster.c +++ b/drivers/md/md-cluster.c @@ -304,15 +304,6 @@ static void recover_bitmaps(struct md_thread *thread) while (cinfo->recovery_map) { slot = fls64((u64)cinfo->recovery_map) - 1; - /* Clear suspend_area associated with the bitmap */ - spin_lock_irq(&cinfo->suspend_lock); - list_for_each_entry_safe(s, tmp, &cinfo->suspend_list, list) - if (slot == s->slot) { - list_del(&s->list); - kfree(s); - } - spin_unlock_irq(&cinfo->suspend_lock); - snprintf(str, 64, "bitmap%04d", slot); bm_lockres = lockres_init(mddev, str, NULL, 1); if (!bm_lockres) { @@ -331,6 +322,16 @@ static void recover_bitmaps(struct md_thread *thread) pr_err("md-cluster: Could not copy data from bitmap %d\n", slot); goto clear_bit; } + + /* Clear suspend_area associated with the bitmap */ + spin_lock_irq(&cinfo->suspend_lock); + list_for_each_entry_safe(s, tmp, &cinfo->suspend_list, list) + if (slot == s->slot) { + list_del(&s->list); + kfree(s); + } + spin_unlock_irq(&cinfo->suspend_lock); + if (hi > 0) { if (lo < mddev->recovery_cp) mddev->recovery_cp = lo; diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index b20c23f970f494f18d6a4b5b29de8275aef77bc8..262a0f0f8fd5cd9aaa52c75bb4655c7f96b639bd 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c @@ -3754,6 +3754,13 @@ static int raid10_run(struct mddev *mddev) disk->rdev->saved_raid_disk < 0) conf->fullsync = 1; } + + if (disk->replacement && + !test_bit(In_sync, &disk->replacement->flags) && + disk->replacement->saved_raid_disk < 0) { + conf->fullsync = 1; + } + disk->recovery_disabled = mddev->recovery_disabled - 1; } diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 6d8112678ff9354ef93c34c024921c8229a78396..84fadd93f3a06b57d818ff7100d50fc23451c3a4 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -4518,6 +4518,12 @@ static void analyse_stripe(struct stripe_head *sh, struct stripe_head_state *s) s->failed++; if (rdev && !test_bit(Faulty, &rdev->flags)) do_recovery = 1; + else if (!rdev) { + rdev = rcu_dereference( + conf->disks[i].replacement); + if (rdev && !test_bit(Faulty, &rdev->flags)) + do_recovery = 1; + } } if (test_bit(R5_InJournal, &dev->flags)) diff --git a/drivers/media/dvb-frontends/helene.c b/drivers/media/dvb-frontends/helene.c index 2ab8d83e55768310ed826f3e4e36d07042070633..fcfe658a43288d418c5f56948f19ddd7088fe5da 100644 --- a/drivers/media/dvb-frontends/helene.c +++ b/drivers/media/dvb-frontends/helene.c @@ -897,7 +897,10 @@ static int helene_x_pon(struct helene_priv *priv) helene_write_regs(priv, 0x99, cdata, sizeof(cdata)); /* 0x81 - 0x94 */ - data[0] = 0x18; /* xtal 24 MHz */ + if (priv->xtal == SONY_HELENE_XTAL_16000) + data[0] = 0x10; /* xtal 16 MHz */ + else + data[0] = 0x18; /* xtal 24 MHz */ data[1] = (uint8_t)(0x80 | (0x04 & 0x1F)); /* 4 x 25 = 100uA */ data[2] = (uint8_t)(0x80 | (0x26 & 0x7F)); /* 38 x 0.25 = 9.5pF */ data[3] = 0x80; /* REFOUT signal output 500mVpp */ diff --git a/drivers/media/i2c/ov5645.c b/drivers/media/i2c/ov5645.c index a31fe18c71d6d7c7fd9278fdde6113c233e08856..2d96c184975936be0de8e2b3e9b7a0e8f9641104 100644 --- a/drivers/media/i2c/ov5645.c +++ b/drivers/media/i2c/ov5645.c @@ -510,8 +510,8 @@ static const struct reg_value ov5645_setting_full[] = { }; static const s64 link_freq[] = { - 222880000, - 334320000 + 224000000, + 336000000 }; static const struct ov5645_mode_info ov5645_mode_info_data[] = { @@ -520,7 +520,7 @@ static const struct ov5645_mode_info ov5645_mode_info_data[] = { .height = 960, .data = ov5645_setting_sxga, .data_size = ARRAY_SIZE(ov5645_setting_sxga), - .pixel_clock = 111440000, + .pixel_clock = 112000000, .link_freq = 0 /* an index in link_freq[] */ }, { @@ -528,7 +528,7 @@ static const struct ov5645_mode_info ov5645_mode_info_data[] = { .height = 1080, .data = ov5645_setting_1080p, .data_size = ARRAY_SIZE(ov5645_setting_1080p), - .pixel_clock = 167160000, + .pixel_clock = 168000000, .link_freq = 1 /* an index in link_freq[] */ }, { @@ -536,7 +536,7 @@ static const struct ov5645_mode_info ov5645_mode_info_data[] = { .height = 1944, .data = ov5645_setting_full, .data_size = ARRAY_SIZE(ov5645_setting_full), - .pixel_clock = 167160000, + .pixel_clock = 168000000, .link_freq = 1 /* an index in link_freq[] */ }, }; @@ -1157,7 +1157,8 @@ static int ov5645_probe(struct i2c_client *client, return ret; } - if (xclk_freq != 23880000) { + /* external clock must be 24MHz, allow 1% tolerance */ + if (xclk_freq < 23760000 || xclk_freq > 24240000) { dev_err(dev, "external clock frequency %u is not supported\n", xclk_freq); return -EINVAL; diff --git a/drivers/media/i2c/ov5647.c b/drivers/media/i2c/ov5647.c index b7bacaa24303a829ef758e2c3c17a0d9d54a6582..5b5845f6b9ec274044f56f3cdcaab466c2a93e7a 100644 --- a/drivers/media/i2c/ov5647.c +++ b/drivers/media/i2c/ov5647.c @@ -348,7 +348,7 @@ static int ov5647_sensor_power(struct v4l2_subdev *sd, int on) dev_dbg(&client->dev, "OV5647 power on\n"); if (ov5647->pwdn) { - gpiod_set_value(ov5647->pwdn, 0); + gpiod_set_value_cansleep(ov5647->pwdn, 0); msleep(PWDN_ACTIVE_DELAY_MS); } @@ -390,7 +390,7 @@ static int ov5647_sensor_power(struct v4l2_subdev *sd, int on) clk_disable_unprepare(ov5647->xclk); - gpiod_set_value(ov5647->pwdn, 1); + gpiod_set_value_cansleep(ov5647->pwdn, 1); } /* Update the power count. */ @@ -624,13 +624,13 @@ static int ov5647_probe(struct i2c_client *client, goto mutex_remove; if (sensor->pwdn) { - gpiod_set_value(sensor->pwdn, 0); + gpiod_set_value_cansleep(sensor->pwdn, 0); msleep(PWDN_ACTIVE_DELAY_MS); } ret = ov5647_detect(sd); - gpiod_set_value(sensor->pwdn, 1); + gpiod_set_value_cansleep(sensor->pwdn, 1); if (ret < 0) goto error; diff --git a/drivers/media/i2c/soc_camera/ov772x.c b/drivers/media/i2c/soc_camera/ov772x.c index 806383500313e029bcf9bbb14cdbac0f55fcefff..14377af7c888898b2c19e969c2475fe558ebdac3 100644 --- a/drivers/media/i2c/soc_camera/ov772x.c +++ b/drivers/media/i2c/soc_camera/ov772x.c @@ -834,7 +834,7 @@ static int ov772x_set_params(struct ov772x_priv *priv, * set COM8 */ if (priv->band_filter) { - ret = ov772x_mask_set(client, COM8, BNDF_ON_OFF, 1); + ret = ov772x_mask_set(client, COM8, BNDF_ON_OFF, BNDF_ON_OFF); if (!ret) ret = ov772x_mask_set(client, BDBASE, 0xff, 256 - priv->band_filter); diff --git a/drivers/media/i2c/tvp5150.c b/drivers/media/i2c/tvp5150.c index 698fa764999c7a0dab55e13ef1cf363c19f45a67..59b0c1fce9be70e7fdc00dd8dfee8d45ef2a2b05 100644 --- a/drivers/media/i2c/tvp5150.c +++ b/drivers/media/i2c/tvp5150.c @@ -871,7 +871,7 @@ static int tvp5150_fill_fmt(struct v4l2_subdev *sd, f = &format->format; f->width = decoder->rect.width; - f->height = decoder->rect.height; + f->height = decoder->rect.height / 2; f->code = MEDIA_BUS_FMT_UYVY8_2X8; f->field = V4L2_FIELD_ALTERNATE; diff --git a/drivers/media/pci/tw686x/tw686x-video.c b/drivers/media/pci/tw686x/tw686x-video.c index 0ea8dd44026c34e6772b35c391aa03d15aefeffd..3a06c000f97bbf629c8539002cddb7d869baa850 100644 --- a/drivers/media/pci/tw686x/tw686x-video.c +++ b/drivers/media/pci/tw686x/tw686x-video.c @@ -1190,6 +1190,14 @@ int tw686x_video_init(struct tw686x_dev *dev) return err; } + /* Initialize vc->dev and vc->ch for the error path */ + for (ch = 0; ch < max_channels(dev); ch++) { + struct tw686x_video_channel *vc = &dev->video_channels[ch]; + + vc->dev = dev; + vc->ch = ch; + } + for (ch = 0; ch < max_channels(dev); ch++) { struct tw686x_video_channel *vc = &dev->video_channels[ch]; struct video_device *vdev; @@ -1198,9 +1206,6 @@ int tw686x_video_init(struct tw686x_dev *dev) spin_lock_init(&vc->qlock); INIT_LIST_HEAD(&vc->vidq_queued); - vc->dev = dev; - vc->ch = ch; - /* default settings */ err = tw686x_set_standard(vc, V4L2_STD_NTSC); if (err) diff --git a/drivers/media/platform/davinci/vpif_display.c b/drivers/media/platform/davinci/vpif_display.c index 56fe4e5b396e1a068ba601deeabe02af053e678c..4a65861433d6862cf0c7e8775e5d71658ecee3fb 100644 --- a/drivers/media/platform/davinci/vpif_display.c +++ b/drivers/media/platform/davinci/vpif_display.c @@ -1114,6 +1114,14 @@ vpif_init_free_channel_objects: return err; } +static void free_vpif_objs(void) +{ + int i; + + for (i = 0; i < VPIF_DISPLAY_MAX_DEVICES; i++) + kfree(vpif_obj.dev[i]); +} + static int vpif_async_bound(struct v4l2_async_notifier *notifier, struct v4l2_subdev *subdev, struct v4l2_async_subdev *asd) @@ -1250,11 +1258,6 @@ static __init int vpif_probe(struct platform_device *pdev) return -EINVAL; } - if (!pdev->dev.platform_data) { - dev_warn(&pdev->dev, "Missing platform data. Giving up.\n"); - return -EINVAL; - } - vpif_dev = &pdev->dev; err = initialize_vpif(); @@ -1266,7 +1269,7 @@ static __init int vpif_probe(struct platform_device *pdev) err = v4l2_device_register(vpif_dev, &vpif_obj.v4l2_dev); if (err) { v4l2_err(vpif_dev->driver, "Error registering v4l2 device\n"); - return err; + goto vpif_free; } while ((res = platform_get_resource(pdev, IORESOURCE_IRQ, res_idx))) { @@ -1309,7 +1312,10 @@ static __init int vpif_probe(struct platform_device *pdev) if (vpif_obj.sd[i]) vpif_obj.sd[i]->grp_id = 1 << i; } - vpif_probe_complete(); + err = vpif_probe_complete(); + if (err) { + goto probe_subdev_out; + } } else { vpif_obj.notifier.subdevs = vpif_obj.config->asd; vpif_obj.notifier.num_subdevs = vpif_obj.config->asd_sizes[0]; @@ -1330,6 +1336,8 @@ probe_subdev_out: kfree(vpif_obj.sd); vpif_unregister: v4l2_device_unregister(&vpif_obj.v4l2_dev); +vpif_free: + free_vpif_objs(); return err; } @@ -1351,8 +1359,8 @@ static int vpif_remove(struct platform_device *device) ch = vpif_obj.dev[i]; /* Unregister video device */ video_unregister_device(&ch->video_dev); - kfree(vpif_obj.dev[i]); } + free_vpif_objs(); return 0; } diff --git a/drivers/media/platform/exynos4-is/fimc-isp-video.c b/drivers/media/platform/exynos4-is/fimc-isp-video.c index 55ba696b8cf40e13339e42234bfd352e8df90520..a920164f53f1f0ab9de8a9947af72ac6cf653c95 100644 --- a/drivers/media/platform/exynos4-is/fimc-isp-video.c +++ b/drivers/media/platform/exynos4-is/fimc-isp-video.c @@ -384,12 +384,17 @@ static void __isp_video_try_fmt(struct fimc_isp *isp, struct v4l2_pix_format_mplane *pixm, const struct fimc_fmt **fmt) { - *fmt = fimc_isp_find_format(&pixm->pixelformat, NULL, 2); + const struct fimc_fmt *__fmt; + + __fmt = fimc_isp_find_format(&pixm->pixelformat, NULL, 2); + + if (fmt) + *fmt = __fmt; pixm->colorspace = V4L2_COLORSPACE_SRGB; pixm->field = V4L2_FIELD_NONE; - pixm->num_planes = (*fmt)->memplanes; - pixm->pixelformat = (*fmt)->fourcc; + pixm->num_planes = __fmt->memplanes; + pixm->pixelformat = __fmt->fourcc; /* * TODO: double check with the docmentation these width/height * constraints are correct. diff --git a/drivers/media/platform/fsl-viu.c b/drivers/media/platform/fsl-viu.c index fb43025df57379834a99e1be3d6124c827cd91a9..254d696dffd89058449083df4ed8652696dde24d 100644 --- a/drivers/media/platform/fsl-viu.c +++ b/drivers/media/platform/fsl-viu.c @@ -1417,7 +1417,7 @@ static int viu_of_probe(struct platform_device *op) sizeof(struct viu_reg), DRV_NAME)) { dev_err(&op->dev, "Error while requesting mem region\n"); ret = -EBUSY; - goto err; + goto err_irq; } /* remap registers */ @@ -1425,7 +1425,7 @@ static int viu_of_probe(struct platform_device *op) if (!viu_regs) { dev_err(&op->dev, "Can't map register set\n"); ret = -ENOMEM; - goto err; + goto err_irq; } /* Prepare our private structure */ @@ -1433,7 +1433,7 @@ static int viu_of_probe(struct platform_device *op) if (!viu_dev) { dev_err(&op->dev, "Can't allocate private structure\n"); ret = -ENOMEM; - goto err; + goto err_irq; } viu_dev->vr = viu_regs; @@ -1449,16 +1449,21 @@ static int viu_of_probe(struct platform_device *op) ret = v4l2_device_register(viu_dev->dev, &viu_dev->v4l2_dev); if (ret < 0) { dev_err(&op->dev, "v4l2_device_register() failed: %d\n", ret); - goto err; + goto err_irq; } ad = i2c_get_adapter(0); + if (!ad) { + ret = -EFAULT; + dev_err(&op->dev, "couldn't get i2c adapter\n"); + goto err_v4l2; + } v4l2_ctrl_handler_init(&viu_dev->hdl, 5); if (viu_dev->hdl.error) { ret = viu_dev->hdl.error; dev_err(&op->dev, "couldn't register control\n"); - goto err_vdev; + goto err_i2c; } /* This control handler will inherit the control(s) from the sub-device(s). */ @@ -1475,7 +1480,7 @@ static int viu_of_probe(struct platform_device *op) vdev = video_device_alloc(); if (vdev == NULL) { ret = -ENOMEM; - goto err_vdev; + goto err_hdl; } *vdev = viu_template; @@ -1496,7 +1501,7 @@ static int viu_of_probe(struct platform_device *op) ret = video_register_device(viu_dev->vdev, VFL_TYPE_GRABBER, -1); if (ret < 0) { video_device_release(viu_dev->vdev); - goto err_vdev; + goto err_unlock; } /* enable VIU clock */ @@ -1504,12 +1509,12 @@ static int viu_of_probe(struct platform_device *op) if (IS_ERR(clk)) { dev_err(&op->dev, "failed to lookup the clock!\n"); ret = PTR_ERR(clk); - goto err_clk; + goto err_vdev; } ret = clk_prepare_enable(clk); if (ret) { dev_err(&op->dev, "failed to enable the clock!\n"); - goto err_clk; + goto err_vdev; } viu_dev->clk = clk; @@ -1520,7 +1525,7 @@ static int viu_of_probe(struct platform_device *op) if (request_irq(viu_dev->irq, viu_intr, 0, "viu", (void *)viu_dev)) { dev_err(&op->dev, "Request VIU IRQ failed.\n"); ret = -ENODEV; - goto err_irq; + goto err_clk; } mutex_unlock(&viu_dev->lock); @@ -1528,16 +1533,19 @@ static int viu_of_probe(struct platform_device *op) dev_info(&op->dev, "Freescale VIU Video Capture Board\n"); return ret; -err_irq: - clk_disable_unprepare(viu_dev->clk); err_clk: - video_unregister_device(viu_dev->vdev); + clk_disable_unprepare(viu_dev->clk); err_vdev: - v4l2_ctrl_handler_free(&viu_dev->hdl); + video_unregister_device(viu_dev->vdev); +err_unlock: mutex_unlock(&viu_dev->lock); +err_hdl: + v4l2_ctrl_handler_free(&viu_dev->hdl); +err_i2c: i2c_put_adapter(ad); +err_v4l2: v4l2_device_unregister(&viu_dev->v4l2_dev); -err: +err_irq: irq_dispose_mapping(viu_irq); return ret; } diff --git a/drivers/media/platform/omap3isp/isp.c b/drivers/media/platform/omap3isp/isp.c index 9f023bc6e1b7e4dafd861a4a0344bb4868dec6b1..6e6e978263b0218f8f6b8b63f45d380cd9b81ad5 100644 --- a/drivers/media/platform/omap3isp/isp.c +++ b/drivers/media/platform/omap3isp/isp.c @@ -305,7 +305,7 @@ static struct clk *isp_xclk_src_get(struct of_phandle_args *clkspec, void *data) static int isp_xclk_init(struct isp_device *isp) { struct device_node *np = isp->dev->of_node; - struct clk_init_data init; + struct clk_init_data init = { 0 }; unsigned int i; for (i = 0; i < ARRAY_SIZE(isp->xclks); ++i) diff --git a/drivers/media/platform/qcom/camss-8x16/camss-csid.c b/drivers/media/platform/qcom/camss-8x16/camss-csid.c index 64df82817de3c4ce7a9314302273320fe5d1bb7b..4882ee25bd754d5a839fd0f8d41beffa2841412a 100644 --- a/drivers/media/platform/qcom/camss-8x16/camss-csid.c +++ b/drivers/media/platform/qcom/camss-8x16/camss-csid.c @@ -392,9 +392,6 @@ static int csid_set_stream(struct v4l2_subdev *sd, int enable) !media_entity_remote_pad(&csid->pads[MSM_CSID_PAD_SINK])) return -ENOLINK; - dt = csid_get_fmt_entry(csid->fmt[MSM_CSID_PAD_SRC].code)-> - data_type; - if (tg->enabled) { /* Config Test Generator */ struct v4l2_mbus_framefmt *f = @@ -416,6 +413,9 @@ static int csid_set_stream(struct v4l2_subdev *sd, int enable) writel_relaxed(val, csid->base + CAMSS_CSID_TG_DT_n_CGG_0(0)); + dt = csid_get_fmt_entry( + csid->fmt[MSM_CSID_PAD_SRC].code)->data_type; + /* 5:0 data type */ val = dt; writel_relaxed(val, csid->base + @@ -425,6 +425,9 @@ static int csid_set_stream(struct v4l2_subdev *sd, int enable) val = tg->payload_mode; writel_relaxed(val, csid->base + CAMSS_CSID_TG_DT_n_CGG_2(0)); + + df = csid_get_fmt_entry( + csid->fmt[MSM_CSID_PAD_SRC].code)->decode_format; } else { struct csid_phy_config *phy = &csid->phy; @@ -439,13 +442,16 @@ static int csid_set_stream(struct v4l2_subdev *sd, int enable) writel_relaxed(val, csid->base + CAMSS_CSID_CORE_CTRL_1); + + dt = csid_get_fmt_entry( + csid->fmt[MSM_CSID_PAD_SINK].code)->data_type; + df = csid_get_fmt_entry( + csid->fmt[MSM_CSID_PAD_SINK].code)->decode_format; } /* Config LUT */ dt_shift = (cid % 4) * 8; - df = csid_get_fmt_entry(csid->fmt[MSM_CSID_PAD_SINK].code)-> - decode_format; val = readl_relaxed(csid->base + CAMSS_CSID_CID_LUT_VC_n(vc)); val &= ~(0xff << dt_shift); diff --git a/drivers/media/platform/s3c-camif/camif-capture.c b/drivers/media/platform/s3c-camif/camif-capture.c index 0f0324a14d515552939cc697b03ab0f0575ec30a..85d26713cedb911b3fb6d14505f7f2234832137e 100644 --- a/drivers/media/platform/s3c-camif/camif-capture.c +++ b/drivers/media/platform/s3c-camif/camif-capture.c @@ -117,6 +117,8 @@ static int sensor_set_power(struct camif_dev *camif, int on) if (camif->sensor.power_count == !on) err = v4l2_subdev_call(sensor->sd, core, s_power, on); + if (err == -ENOIOCTLCMD) + err = 0; if (!err) sensor->power_count += on ? 1 : -1; diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc.c b/drivers/media/platform/s5p-mfc/s5p_mfc.c index 8e9531f7f83f54fbb7b1df28f017f61058dcc680..9942932ecbf9c04a716fd1c7706d8496a707d7c8 100644 --- a/drivers/media/platform/s5p-mfc/s5p_mfc.c +++ b/drivers/media/platform/s5p-mfc/s5p_mfc.c @@ -254,24 +254,24 @@ static void s5p_mfc_handle_frame_all_extracted(struct s5p_mfc_ctx *ctx) static void s5p_mfc_handle_frame_copy_time(struct s5p_mfc_ctx *ctx) { struct s5p_mfc_dev *dev = ctx->dev; - struct s5p_mfc_buf *dst_buf, *src_buf; - size_t dec_y_addr; + struct s5p_mfc_buf *dst_buf, *src_buf; + u32 dec_y_addr; unsigned int frame_type; /* Make sure we actually have a new frame before continuing. */ frame_type = s5p_mfc_hw_call(dev->mfc_ops, get_dec_frame_type, dev); if (frame_type == S5P_FIMV_DECODE_FRAME_SKIPPED) return; - dec_y_addr = s5p_mfc_hw_call(dev->mfc_ops, get_dec_y_adr, dev); + dec_y_addr = (u32)s5p_mfc_hw_call(dev->mfc_ops, get_dec_y_adr, dev); /* Copy timestamp / timecode from decoded src to dst and set appropriate flags. */ src_buf = list_entry(ctx->src_queue.next, struct s5p_mfc_buf, list); list_for_each_entry(dst_buf, &ctx->dst_queue, list) { - if (vb2_dma_contig_plane_dma_addr(&dst_buf->b->vb2_buf, 0) - == dec_y_addr) { - dst_buf->b->timecode = - src_buf->b->timecode; + u32 addr = (u32)vb2_dma_contig_plane_dma_addr(&dst_buf->b->vb2_buf, 0); + + if (addr == dec_y_addr) { + dst_buf->b->timecode = src_buf->b->timecode; dst_buf->b->vb2_buf.timestamp = src_buf->b->vb2_buf.timestamp; dst_buf->b->flags &= @@ -307,10 +307,10 @@ static void s5p_mfc_handle_frame_new(struct s5p_mfc_ctx *ctx, unsigned int err) { struct s5p_mfc_dev *dev = ctx->dev; struct s5p_mfc_buf *dst_buf; - size_t dspl_y_addr; + u32 dspl_y_addr; unsigned int frame_type; - dspl_y_addr = s5p_mfc_hw_call(dev->mfc_ops, get_dspl_y_adr, dev); + dspl_y_addr = (u32)s5p_mfc_hw_call(dev->mfc_ops, get_dspl_y_adr, dev); if (IS_MFCV6_PLUS(dev)) frame_type = s5p_mfc_hw_call(dev->mfc_ops, get_disp_frame_type, ctx); @@ -329,9 +329,10 @@ static void s5p_mfc_handle_frame_new(struct s5p_mfc_ctx *ctx, unsigned int err) /* The MFC returns address of the buffer, now we have to * check which videobuf does it correspond to */ list_for_each_entry(dst_buf, &ctx->dst_queue, list) { + u32 addr = (u32)vb2_dma_contig_plane_dma_addr(&dst_buf->b->vb2_buf, 0); + /* Check if this is the buffer we're looking for */ - if (vb2_dma_contig_plane_dma_addr(&dst_buf->b->vb2_buf, 0) - == dspl_y_addr) { + if (addr == dspl_y_addr) { list_del(&dst_buf->list); ctx->dst_queue_cnt--; dst_buf->b->sequence = ctx->sequence; diff --git a/drivers/media/spi/Kconfig b/drivers/media/spi/Kconfig index b07ac86fc53c782cf0980fc86f07d09bb5df0e40..f76dea36ab8a32e5bf4795f1e7b80eee2dccc5d1 100644 --- a/drivers/media/spi/Kconfig +++ b/drivers/media/spi/Kconfig @@ -19,6 +19,7 @@ menu "Media SPI Adapters" config CXD2880_SPI_DRV tristate "Sony CXD2880 SPI support" depends on DVB_CORE && SPI + select DVB_CXD2880 if MEDIA_SUBDRV_AUTOSELECT default m if !MEDIA_SUBDRV_AUTOSELECT help Choose if you would like to have SPI interface support for Sony CXD2880. diff --git a/drivers/media/usb/dvb-usb/dw2102.c b/drivers/media/usb/dvb-usb/dw2102.c index b421329b21fae5a6b41af48b0e59377d70bc88d5..3d09e1c879217aba7a4853034b8f749be004daaf 100644 --- a/drivers/media/usb/dvb-usb/dw2102.c +++ b/drivers/media/usb/dvb-usb/dw2102.c @@ -2103,14 +2103,12 @@ static struct dvb_usb_device_properties s6x0_properties = { } }; -static struct dvb_usb_device_properties *p1100; static const struct dvb_usb_device_description d1100 = { "Prof 1100 USB ", {&dw2102_table[PROF_1100], NULL}, {NULL}, }; -static struct dvb_usb_device_properties *s660; static const struct dvb_usb_device_description d660 = { "TeVii S660 USB", {&dw2102_table[TEVII_S660], NULL}, @@ -2129,14 +2127,12 @@ static const struct dvb_usb_device_description d480_2 = { {NULL}, }; -static struct dvb_usb_device_properties *p7500; static const struct dvb_usb_device_description d7500 = { "Prof 7500 USB DVB-S2", {&dw2102_table[PROF_7500], NULL}, {NULL}, }; -static struct dvb_usb_device_properties *s421; static const struct dvb_usb_device_description d421 = { "TeVii S421 PCI", {&dw2102_table[TEVII_S421], NULL}, @@ -2336,6 +2332,11 @@ static int dw2102_probe(struct usb_interface *intf, const struct usb_device_id *id) { int retval = -ENOMEM; + struct dvb_usb_device_properties *p1100; + struct dvb_usb_device_properties *s660; + struct dvb_usb_device_properties *p7500; + struct dvb_usb_device_properties *s421; + p1100 = kmemdup(&s6x0_properties, sizeof(struct dvb_usb_device_properties), GFP_KERNEL); if (!p1100) @@ -2404,8 +2405,16 @@ static int dw2102_probe(struct usb_interface *intf, 0 == dvb_usb_device_init(intf, &t220_properties, THIS_MODULE, NULL, adapter_nr) || 0 == dvb_usb_device_init(intf, &tt_s2_4600_properties, - THIS_MODULE, NULL, adapter_nr)) + THIS_MODULE, NULL, adapter_nr)) { + + /* clean up copied properties */ + kfree(s421); + kfree(p7500); + kfree(s660); + kfree(p1100); + return 0; + } retval = -ENODEV; kfree(s421); diff --git a/drivers/media/usb/tm6000/tm6000-dvb.c b/drivers/media/usb/tm6000/tm6000-dvb.c index 097ac321b7e1e35f1d89bd18456ab1d501a20773..349f578273b6ea480077d80d42eb893528afe3bc 100644 --- a/drivers/media/usb/tm6000/tm6000-dvb.c +++ b/drivers/media/usb/tm6000/tm6000-dvb.c @@ -267,6 +267,11 @@ static int register_dvb(struct tm6000_core *dev) ret = dvb_register_adapter(&dvb->adapter, "Trident TVMaster 6000 DVB-T", THIS_MODULE, &dev->udev->dev, adapter_nr); + if (ret < 0) { + pr_err("tm6000: couldn't register the adapter!\n"); + goto err; + } + dvb->adapter.priv = dev; if (dvb->frontend) { diff --git a/drivers/media/usb/uvc/uvc_driver.c b/drivers/media/usb/uvc/uvc_driver.c index 6d22b22cb35b64e4e1ee4993c8010372c307443b..064d88299adc06ea23b57a596f5fe3745e30cee9 100644 --- a/drivers/media/usb/uvc/uvc_driver.c +++ b/drivers/media/usb/uvc/uvc_driver.c @@ -1865,13 +1865,6 @@ static void uvc_unregister_video(struct uvc_device *dev) { struct uvc_streaming *stream; - /* Unregistering all video devices might result in uvc_delete() being - * called from inside the loop if there's no open file handle. To avoid - * that, increment the refcount before iterating over the streams and - * decrement it when done. - */ - kref_get(&dev->ref); - list_for_each_entry(stream, &dev->streams, list) { if (!video_is_registered(&stream->vdev)) continue; @@ -1880,8 +1873,6 @@ static void uvc_unregister_video(struct uvc_device *dev) uvc_debugfs_cleanup_stream(stream); } - - kref_put(&dev->ref, uvc_delete); } static int uvc_register_video(struct uvc_device *dev, @@ -2129,6 +2120,7 @@ static int uvc_probe(struct usb_interface *intf, error: uvc_unregister_video(dev); + kref_put(&dev->ref, uvc_delete); return -ENODEV; } @@ -2146,6 +2138,7 @@ static void uvc_disconnect(struct usb_interface *intf) return; uvc_unregister_video(dev); + kref_put(&dev->ref, uvc_delete); } static int uvc_suspend(struct usb_interface *intf, pm_message_t message) diff --git a/drivers/media/usb/uvc/uvc_video.c b/drivers/media/usb/uvc/uvc_video.c index fb86d6af398d305a123be15e1b4c574888d1354c..a6d8002918831eebb0e60549e0c68c0fa52e84a6 100644 --- a/drivers/media/usb/uvc/uvc_video.c +++ b/drivers/media/usb/uvc/uvc_video.c @@ -163,14 +163,27 @@ static void uvc_fixup_video_ctrl(struct uvc_streaming *stream, } } +static size_t uvc_video_ctrl_size(struct uvc_streaming *stream) +{ + /* + * Return the size of the video probe and commit controls, which depends + * on the protocol version. + */ + if (stream->dev->uvc_version < 0x0110) + return 26; + else if (stream->dev->uvc_version < 0x0150) + return 34; + else + return 48; +} + static int uvc_get_video_ctrl(struct uvc_streaming *stream, struct uvc_streaming_control *ctrl, int probe, __u8 query) { + __u16 size = uvc_video_ctrl_size(stream); __u8 *data; - __u16 size; int ret; - size = stream->dev->uvc_version >= 0x0110 ? 34 : 26; if ((stream->dev->quirks & UVC_QUIRK_PROBE_DEF) && query == UVC_GET_DEF) return -EIO; @@ -225,7 +238,7 @@ static int uvc_get_video_ctrl(struct uvc_streaming *stream, ctrl->dwMaxVideoFrameSize = get_unaligned_le32(&data[18]); ctrl->dwMaxPayloadTransferSize = get_unaligned_le32(&data[22]); - if (size == 34) { + if (size >= 34) { ctrl->dwClockFrequency = get_unaligned_le32(&data[26]); ctrl->bmFramingInfo = data[30]; ctrl->bPreferedVersion = data[31]; @@ -254,11 +267,10 @@ out: static int uvc_set_video_ctrl(struct uvc_streaming *stream, struct uvc_streaming_control *ctrl, int probe) { + __u16 size = uvc_video_ctrl_size(stream); __u8 *data; - __u16 size; int ret; - size = stream->dev->uvc_version >= 0x0110 ? 34 : 26; data = kzalloc(size, GFP_KERNEL); if (data == NULL) return -ENOMEM; @@ -275,7 +287,7 @@ static int uvc_set_video_ctrl(struct uvc_streaming *stream, put_unaligned_le32(ctrl->dwMaxVideoFrameSize, &data[18]); put_unaligned_le32(ctrl->dwMaxPayloadTransferSize, &data[22]); - if (size == 34) { + if (size >= 34) { put_unaligned_le32(ctrl->dwClockFrequency, &data[26]); data[30] = ctrl->bmFramingInfo; data[31] = ctrl->bPreferedVersion; diff --git a/drivers/media/v4l2-core/v4l2-event.c b/drivers/media/v4l2-core/v4l2-event.c index 968c2eb08b5aba52e18e66221a09b1f351b73337..568dd4affb337a7eb65cc4399db8f0084b4e25ee 100644 --- a/drivers/media/v4l2-core/v4l2-event.c +++ b/drivers/media/v4l2-core/v4l2-event.c @@ -115,14 +115,6 @@ static void __v4l2_event_queue_fh(struct v4l2_fh *fh, const struct v4l2_event *e if (sev == NULL) return; - /* - * If the event has been added to the fh->subscribed list, but its - * add op has not completed yet elems will be 0, treat this as - * not being subscribed. - */ - if (!sev->elems) - return; - /* Increase event sequence number on fh. */ fh->sequence++; @@ -208,6 +200,7 @@ int v4l2_event_subscribe(struct v4l2_fh *fh, struct v4l2_subscribed_event *sev, *found_ev; unsigned long flags; unsigned i; + int ret = 0; if (sub->type == V4L2_EVENT_ALL) return -EINVAL; @@ -226,31 +219,36 @@ int v4l2_event_subscribe(struct v4l2_fh *fh, sev->flags = sub->flags; sev->fh = fh; sev->ops = ops; + sev->elems = elems; + + mutex_lock(&fh->subscribe_lock); spin_lock_irqsave(&fh->vdev->fh_lock, flags); found_ev = v4l2_event_subscribed(fh, sub->type, sub->id); - if (!found_ev) - list_add(&sev->list, &fh->subscribed); spin_unlock_irqrestore(&fh->vdev->fh_lock, flags); if (found_ev) { + /* Already listening */ kvfree(sev); - return 0; /* Already listening */ + goto out_unlock; } if (sev->ops && sev->ops->add) { - int ret = sev->ops->add(sev, elems); + ret = sev->ops->add(sev, elems); if (ret) { - sev->ops = NULL; - v4l2_event_unsubscribe(fh, sub); - return ret; + kvfree(sev); + goto out_unlock; } } - /* Mark as ready for use */ - sev->elems = elems; + spin_lock_irqsave(&fh->vdev->fh_lock, flags); + list_add(&sev->list, &fh->subscribed); + spin_unlock_irqrestore(&fh->vdev->fh_lock, flags); - return 0; +out_unlock: + mutex_unlock(&fh->subscribe_lock); + + return ret; } EXPORT_SYMBOL_GPL(v4l2_event_subscribe); @@ -289,6 +287,8 @@ int v4l2_event_unsubscribe(struct v4l2_fh *fh, return 0; } + mutex_lock(&fh->subscribe_lock); + spin_lock_irqsave(&fh->vdev->fh_lock, flags); sev = v4l2_event_subscribed(fh, sub->type, sub->id); @@ -306,6 +306,8 @@ int v4l2_event_unsubscribe(struct v4l2_fh *fh, if (sev && sev->ops && sev->ops->del) sev->ops->del(sev); + mutex_unlock(&fh->subscribe_lock); + kvfree(sev); return 0; diff --git a/drivers/media/v4l2-core/v4l2-fh.c b/drivers/media/v4l2-core/v4l2-fh.c index 3895999bf8805c3c208a4f042cb59b786a6ec44c..c91a7bd3ecfc7d14853b56a8de273d35ac0ff870 100644 --- a/drivers/media/v4l2-core/v4l2-fh.c +++ b/drivers/media/v4l2-core/v4l2-fh.c @@ -45,6 +45,7 @@ void v4l2_fh_init(struct v4l2_fh *fh, struct video_device *vdev) INIT_LIST_HEAD(&fh->available); INIT_LIST_HEAD(&fh->subscribed); fh->sequence = -1; + mutex_init(&fh->subscribe_lock); } EXPORT_SYMBOL_GPL(v4l2_fh_init); @@ -90,6 +91,7 @@ void v4l2_fh_exit(struct v4l2_fh *fh) return; v4l_disable_media_source(fh->vdev); v4l2_event_unsubscribe_all(fh); + mutex_destroy(&fh->subscribe_lock); fh->vdev = NULL; } EXPORT_SYMBOL_GPL(v4l2_fh_exit); diff --git a/drivers/media/v4l2-core/videobuf2-core.c b/drivers/media/v4l2-core/videobuf2-core.c index 2dbf632c10de350d72dc61f1a0b63343deee84ba..43522a09b11d50e10cad6eda3d39cd21edfbc1c5 100644 --- a/drivers/media/v4l2-core/videobuf2-core.c +++ b/drivers/media/v4l2-core/videobuf2-core.c @@ -1373,6 +1373,11 @@ int vb2_core_qbuf(struct vb2_queue *q, unsigned int index, void *pb) struct vb2_buffer *vb; int ret; + if (q->error) { + dprintk(1, "fatal error occurred on queue\n"); + return -EIO; + } + vb = q->bufs[index]; switch (vb->state) { diff --git a/drivers/mfd/88pm860x-i2c.c b/drivers/mfd/88pm860x-i2c.c index 84e313107233ee8b1e86f0610be093eaa5e1637d..7b9052ea74134a2694f8485f48cb589e9624b793 100644 --- a/drivers/mfd/88pm860x-i2c.c +++ b/drivers/mfd/88pm860x-i2c.c @@ -146,14 +146,14 @@ int pm860x_page_reg_write(struct i2c_client *i2c, int reg, unsigned char zero; int ret; - i2c_lock_adapter(i2c->adapter); + i2c_lock_bus(i2c->adapter, I2C_LOCK_SEGMENT); read_device(i2c, 0xFA, 0, &zero); read_device(i2c, 0xFB, 0, &zero); read_device(i2c, 0xFF, 0, &zero); ret = write_device(i2c, reg, 1, &data); read_device(i2c, 0xFE, 0, &zero); read_device(i2c, 0xFC, 0, &zero); - i2c_unlock_adapter(i2c->adapter); + i2c_unlock_bus(i2c->adapter, I2C_LOCK_SEGMENT); return ret; } EXPORT_SYMBOL(pm860x_page_reg_write); @@ -164,14 +164,14 @@ int pm860x_page_bulk_read(struct i2c_client *i2c, int reg, unsigned char zero = 0; int ret; - i2c_lock_adapter(i2c->adapter); + i2c_lock_bus(i2c->adapter, I2C_LOCK_SEGMENT); read_device(i2c, 0xfa, 0, &zero); read_device(i2c, 0xfb, 0, &zero); read_device(i2c, 0xff, 0, &zero); ret = read_device(i2c, reg, count, buf); read_device(i2c, 0xFE, 0, &zero); read_device(i2c, 0xFC, 0, &zero); - i2c_unlock_adapter(i2c->adapter); + i2c_unlock_bus(i2c->adapter, I2C_LOCK_SEGMENT); return ret; } EXPORT_SYMBOL(pm860x_page_bulk_read); diff --git a/drivers/mfd/hi655x-pmic.c b/drivers/mfd/hi655x-pmic.c index c37ccbfd52f2a14be65c0008b96697f55029698c..96c07fa1802adcce7d48f971410096b4b2a4f479 100644 --- a/drivers/mfd/hi655x-pmic.c +++ b/drivers/mfd/hi655x-pmic.c @@ -49,7 +49,7 @@ static struct regmap_config hi655x_regmap_config = { .reg_bits = 32, .reg_stride = HI655X_STRIDE, .val_bits = 8, - .max_register = HI655X_BUS_ADDR(0xFFF), + .max_register = HI655X_BUS_ADDR(0x400) - HI655X_STRIDE, }; static struct resource pwrkey_resources[] = { diff --git a/drivers/mfd/sm501.c b/drivers/mfd/sm501.c index 40534352e57487191978492991dfe1154d608b02..3270b8dbc94982ebb447d9c3ace5d1000c28a670 100644 --- a/drivers/mfd/sm501.c +++ b/drivers/mfd/sm501.c @@ -714,6 +714,7 @@ sm501_create_subdev(struct sm501_devdata *sm, char *name, smdev->pdev.name = name; smdev->pdev.id = sm->pdev_id; smdev->pdev.dev.parent = sm->dev; + smdev->pdev.dev.coherent_dma_mask = 0xffffffff; if (res_count) { smdev->pdev.resource = (struct resource *)(smdev+1); diff --git a/drivers/mfd/ti_am335x_tscadc.c b/drivers/mfd/ti_am335x_tscadc.c index 0f3fab47fe48052682fc686ee62381dd1e9c719b..7dc1cbcd2fb8951150c633290b02392c4f041543 100644 --- a/drivers/mfd/ti_am335x_tscadc.c +++ b/drivers/mfd/ti_am335x_tscadc.c @@ -210,14 +210,13 @@ static int ti_tscadc_probe(struct platform_device *pdev) * The TSC_ADC_SS controller design assumes the OCP clock is * at least 6x faster than the ADC clock. */ - clk = clk_get(&pdev->dev, "adc_tsc_fck"); + clk = devm_clk_get(&pdev->dev, "adc_tsc_fck"); if (IS_ERR(clk)) { dev_err(&pdev->dev, "failed to get TSC fck\n"); err = PTR_ERR(clk); goto err_disable_clk; } clock_rate = clk_get_rate(clk); - clk_put(clk); tscadc->clk_div = clock_rate / ADC_CLK; /* TSCADC_CLKDIV needs to be configured to the value minus 1 */ diff --git a/drivers/misc/cxl/main.c b/drivers/misc/cxl/main.c index c1ba0d42cbc865467334c25a0b62a3f1f8932a21..e0f29b8a872db807fc7e899d25ecfdc87be156d1 100644 --- a/drivers/misc/cxl/main.c +++ b/drivers/misc/cxl/main.c @@ -287,7 +287,7 @@ int cxl_adapter_context_get(struct cxl *adapter) int rc; rc = atomic_inc_unless_negative(&adapter->contexts_num); - return rc >= 0 ? 0 : -EBUSY; + return rc ? 0 : -EBUSY; } void cxl_adapter_context_put(struct cxl *adapter) diff --git a/drivers/misc/hmc6352.c b/drivers/misc/hmc6352.c index eeb7eef62174c4dfb5c8854efbef2d798f161f1e..38f90e17992717d7cd0f12ad904490abbaa2d1a5 100644 --- a/drivers/misc/hmc6352.c +++ b/drivers/misc/hmc6352.c @@ -27,6 +27,7 @@ #include #include #include +#include static DEFINE_MUTEX(compass_mutex); @@ -50,6 +51,7 @@ static int compass_store(struct device *dev, const char *buf, size_t count, return ret; if (val >= strlen(map)) return -EINVAL; + val = array_index_nospec(val, strlen(map)); mutex_lock(&compass_mutex); ret = compass_command(c, map[val]); mutex_unlock(&compass_mutex); diff --git a/drivers/misc/mei/bus-fixup.c b/drivers/misc/mei/bus-fixup.c index 0208c4b027c52b54dad37bddb13fe1288347b477..fa0236a5e59a39c751f6b3eee150d37e97836684 100644 --- a/drivers/misc/mei/bus-fixup.c +++ b/drivers/misc/mei/bus-fixup.c @@ -267,7 +267,7 @@ static int mei_nfc_if_version(struct mei_cl *cl, ret = 0; bytes_recv = __mei_cl_recv(cl, (u8 *)reply, if_version_length, 0); - if (bytes_recv < if_version_length) { + if (bytes_recv < 0 || bytes_recv < if_version_length) { dev_err(bus->dev, "Could not read IF version\n"); ret = -EIO; goto err; diff --git a/drivers/misc/mei/bus.c b/drivers/misc/mei/bus.c index 1ac10cb64d6ed3e7d5ad20c1d5ebb1ec323fa05d..37b13bc5c16fe4b7d7ab87d6d4cc6d39b3b9ca33 100644 --- a/drivers/misc/mei/bus.c +++ b/drivers/misc/mei/bus.c @@ -465,17 +465,15 @@ int mei_cldev_enable(struct mei_cl_device *cldev) cl = cldev->cl; + mutex_lock(&bus->device_lock); if (cl->state == MEI_FILE_UNINITIALIZED) { - mutex_lock(&bus->device_lock); ret = mei_cl_link(cl); - mutex_unlock(&bus->device_lock); if (ret) - return ret; + goto out; /* update pointers */ cl->cldev = cldev; } - mutex_lock(&bus->device_lock); if (mei_cl_is_connected(cl)) { ret = 0; goto out; @@ -841,12 +839,13 @@ static void mei_cl_bus_dev_release(struct device *dev) mei_me_cl_put(cldev->me_cl); mei_dev_bus_put(cldev->bus); + mei_cl_unlink(cldev->cl); kfree(cldev->cl); kfree(cldev); } static const struct device_type mei_cl_device_type = { - .release = mei_cl_bus_dev_release, + .release = mei_cl_bus_dev_release, }; /** diff --git a/drivers/misc/mei/hbm.c b/drivers/misc/mei/hbm.c index fe6595fe94f1f4229bd1851cef7d5060952cb294..995ff1b7e7b5b7baf423716ab63d39af903e3e0b 100644 --- a/drivers/misc/mei/hbm.c +++ b/drivers/misc/mei/hbm.c @@ -1140,15 +1140,18 @@ int mei_hbm_dispatch(struct mei_device *dev, struct mei_msg_hdr *hdr) props_res = (struct hbm_props_response *)mei_msg; - if (props_res->status) { + if (props_res->status == MEI_HBMS_CLIENT_NOT_FOUND) { + dev_dbg(dev->dev, "hbm: properties response: %d CLIENT_NOT_FOUND\n", + props_res->me_addr); + } else if (props_res->status) { dev_err(dev->dev, "hbm: properties response: wrong status = %d %s\n", props_res->status, mei_hbm_status_str(props_res->status)); return -EPROTO; + } else { + mei_hbm_me_cl_add(dev, props_res); } - mei_hbm_me_cl_add(dev, props_res); - /* request property for the next client */ if (mei_hbm_prop_req(dev, props_res->me_addr + 1)) return -EIO; diff --git a/drivers/misc/mei/main.c b/drivers/misc/mei/main.c index 22efc039f3022237d1bc65b625cabb6ddb9fa3b1..8d1d40dbf7448fb51f2c8b70e5a88e8a404583f8 100644 --- a/drivers/misc/mei/main.c +++ b/drivers/misc/mei/main.c @@ -291,7 +291,6 @@ static ssize_t mei_write(struct file *file, const char __user *ubuf, goto out; } - *offset = 0; cb = mei_cl_alloc_cb(cl, length, MEI_FOP_WRITE, file); if (!cb) { rets = -ENOMEM; diff --git a/drivers/misc/mic/scif/scif_api.c b/drivers/misc/mic/scif/scif_api.c index ddc9e4b08b5cd809f4ce6e43fe1ec313dfb75e0d..56efa9d18a9afec2ddaea8bb76fc7ec34c14ce78 100644 --- a/drivers/misc/mic/scif/scif_api.c +++ b/drivers/misc/mic/scif/scif_api.c @@ -370,11 +370,10 @@ int scif_bind(scif_epd_t epd, u16 pn) goto scif_bind_exit; } } else { - pn = scif_get_new_port(); - if (!pn) { - ret = -ENOSPC; + ret = scif_get_new_port(); + if (ret < 0) goto scif_bind_exit; - } + pn = ret; } ep->state = SCIFEP_BOUND; @@ -648,13 +647,12 @@ int __scif_connect(scif_epd_t epd, struct scif_port_id *dst, bool non_block) err = -EISCONN; break; case SCIFEP_UNBOUND: - ep->port.port = scif_get_new_port(); - if (!ep->port.port) { - err = -ENOSPC; - } else { - ep->port.node = scif_info.nodeid; - ep->conn_async_state = ASYNC_CONN_IDLE; - } + err = scif_get_new_port(); + if (err < 0) + break; + ep->port.port = err; + ep->port.node = scif_info.nodeid; + ep->conn_async_state = ASYNC_CONN_IDLE; /* Fall through */ case SCIFEP_BOUND: /* diff --git a/drivers/misc/sram.c b/drivers/misc/sram.c index 4dd0d868ff88818f7bcb8db51cac461cb6541684..8daefb81ba294ca4a5dd3455b5c9a82ec1ee5f2a 100644 --- a/drivers/misc/sram.c +++ b/drivers/misc/sram.c @@ -391,23 +391,23 @@ static int sram_probe(struct platform_device *pdev) if (IS_ERR(sram->pool)) return PTR_ERR(sram->pool); - ret = sram_reserve_regions(sram, res); - if (ret) - return ret; - sram->clk = devm_clk_get(sram->dev, NULL); if (IS_ERR(sram->clk)) sram->clk = NULL; else clk_prepare_enable(sram->clk); + ret = sram_reserve_regions(sram, res); + if (ret) + goto err_disable_clk; + platform_set_drvdata(pdev, sram); init_func = of_device_get_match_data(&pdev->dev); if (init_func) { ret = init_func(); if (ret) - goto err_disable_clk; + goto err_free_partitions; } dev_dbg(sram->dev, "SRAM pool: %zu KiB @ 0x%p\n", @@ -415,10 +415,11 @@ static int sram_probe(struct platform_device *pdev) return 0; +err_free_partitions: + sram_free_partitions(sram); err_disable_clk: if (sram->clk) clk_disable_unprepare(sram->clk); - sram_free_partitions(sram); return ret; } diff --git a/drivers/misc/ti-st/st_kim.c b/drivers/misc/ti-st/st_kim.c index b77aacafc3fcd96bd52af1bb55a60123b22bdcf6..dda3ed72d05bc6e1284e8e860c6ac03657025bdb 100644 --- a/drivers/misc/ti-st/st_kim.c +++ b/drivers/misc/ti-st/st_kim.c @@ -756,14 +756,14 @@ static int kim_probe(struct platform_device *pdev) err = gpio_request(kim_gdata->nshutdown, "kim"); if (unlikely(err)) { pr_err(" gpio %d request failed ", kim_gdata->nshutdown); - return err; + goto err_sysfs_group; } /* Configure nShutdown GPIO as output=0 */ err = gpio_direction_output(kim_gdata->nshutdown, 0); if (unlikely(err)) { pr_err(" unable to configure gpio %d", kim_gdata->nshutdown); - return err; + goto err_sysfs_group; } /* get reference of pdev for request_firmware */ diff --git a/drivers/misc/tsl2550.c b/drivers/misc/tsl2550.c index adf46072cb37d4a41a3ec95a9fd7d8a52890e677..3fce3b6a36246b28f9c81e339ba83bc67f0a08d2 100644 --- a/drivers/misc/tsl2550.c +++ b/drivers/misc/tsl2550.c @@ -177,7 +177,7 @@ static int tsl2550_calculate_lux(u8 ch0, u8 ch1) } else lux = 0; else - return -EAGAIN; + return 0; /* LUX range check */ return lux > TSL2550_MAX_LUX ? TSL2550_MAX_LUX : lux; diff --git a/drivers/misc/vmw_balloon.c b/drivers/misc/vmw_balloon.c index 56c6f79a5c5af83a862cf76352f172f5e02ee0b8..f74166aa9a0dc09037fec9797838c8f78edf156d 100644 --- a/drivers/misc/vmw_balloon.c +++ b/drivers/misc/vmw_balloon.c @@ -45,6 +45,7 @@ #include #include #include +#include #include MODULE_AUTHOR("VMware, Inc."); @@ -341,7 +342,13 @@ static bool vmballoon_send_start(struct vmballoon *b, unsigned long req_caps) success = false; } - if (b->capabilities & VMW_BALLOON_BATCHED_2M_CMDS) + /* + * 2MB pages are only supported with batching. If batching is for some + * reason disabled, do not use 2MB pages, since otherwise the legacy + * mechanism is used with 2MB pages, causing a failure. + */ + if ((b->capabilities & VMW_BALLOON_BATCHED_2M_CMDS) && + (b->capabilities & VMW_BALLOON_BATCHED_CMDS)) b->supported_page_sizes = 2; else b->supported_page_sizes = 1; @@ -450,7 +457,7 @@ static int vmballoon_send_lock_page(struct vmballoon *b, unsigned long pfn, pfn32 = (u32)pfn; if (pfn32 != pfn) - return -1; + return -EINVAL; STATS_INC(b->stats.lock[false]); @@ -460,7 +467,7 @@ static int vmballoon_send_lock_page(struct vmballoon *b, unsigned long pfn, pr_debug("%s - ppn %lx, hv returns %ld\n", __func__, pfn, status); STATS_INC(b->stats.lock_fail[false]); - return 1; + return -EIO; } static int vmballoon_send_batched_lock(struct vmballoon *b, @@ -597,11 +604,12 @@ static int vmballoon_lock_page(struct vmballoon *b, unsigned int num_pages, locked = vmballoon_send_lock_page(b, page_to_pfn(page), &hv_status, target); - if (locked > 0) { + if (locked) { STATS_INC(b->stats.refused_alloc[false]); - if (hv_status == VMW_BALLOON_ERROR_RESET || - hv_status == VMW_BALLOON_ERROR_PPN_NOTNEEDED) { + if (locked == -EIO && + (hv_status == VMW_BALLOON_ERROR_RESET || + hv_status == VMW_BALLOON_ERROR_PPN_NOTNEEDED)) { vmballoon_free_page(page, false); return -EIO; } @@ -617,7 +625,7 @@ static int vmballoon_lock_page(struct vmballoon *b, unsigned int num_pages, } else { vmballoon_free_page(page, false); } - return -EIO; + return locked; } /* track allocated page */ @@ -1029,29 +1037,30 @@ static void vmballoon_vmci_cleanup(struct vmballoon *b) */ static int vmballoon_vmci_init(struct vmballoon *b) { - int error = 0; + unsigned long error, dummy; - if ((b->capabilities & VMW_BALLOON_SIGNALLED_WAKEUP_CMD) != 0) { - error = vmci_doorbell_create(&b->vmci_doorbell, - VMCI_FLAG_DELAYED_CB, - VMCI_PRIVILEGE_FLAG_RESTRICTED, - vmballoon_doorbell, b); - - if (error == VMCI_SUCCESS) { - VMWARE_BALLOON_CMD(VMCI_DOORBELL_SET, - b->vmci_doorbell.context, - b->vmci_doorbell.resource, error); - STATS_INC(b->stats.doorbell_set); - } - } + if ((b->capabilities & VMW_BALLOON_SIGNALLED_WAKEUP_CMD) == 0) + return 0; - if (error != 0) { - vmballoon_vmci_cleanup(b); + error = vmci_doorbell_create(&b->vmci_doorbell, VMCI_FLAG_DELAYED_CB, + VMCI_PRIVILEGE_FLAG_RESTRICTED, + vmballoon_doorbell, b); - return -EIO; - } + if (error != VMCI_SUCCESS) + goto fail; + + error = VMWARE_BALLOON_CMD(VMCI_DOORBELL_SET, b->vmci_doorbell.context, + b->vmci_doorbell.resource, dummy); + + STATS_INC(b->stats.doorbell_set); + + if (error != VMW_BALLOON_SUCCESS) + goto fail; return 0; +fail: + vmballoon_vmci_cleanup(b); + return -EIO; } /* @@ -1289,7 +1298,14 @@ static int __init vmballoon_init(void) return 0; } -module_init(vmballoon_init); + +/* + * Using late_initcall() instead of module_init() allows the balloon to use the + * VMCI doorbell even when the balloon is built into the kernel. Otherwise the + * VMCI is probed only after the balloon is initialized. If the balloon is used + * as a module, late_initcall() is equivalent to module_init(). + */ +late_initcall(vmballoon_init); static void __exit vmballoon_exit(void) { diff --git a/drivers/misc/vmw_vmci/vmci_queue_pair.c b/drivers/misc/vmw_vmci/vmci_queue_pair.c index 8af5c2672f71cec186af5daf4dc50008fe771c7a..b4570d5c1fe7de4be6b0eeca109ea9797a3b1000 100644 --- a/drivers/misc/vmw_vmci/vmci_queue_pair.c +++ b/drivers/misc/vmw_vmci/vmci_queue_pair.c @@ -755,7 +755,7 @@ static int qp_host_get_user_memory(u64 produce_uva, retval = get_user_pages_fast((uintptr_t) produce_uva, produce_q->kernel_if->num_pages, 1, produce_q->kernel_if->u.h.header_page); - if (retval < produce_q->kernel_if->num_pages) { + if (retval < (int)produce_q->kernel_if->num_pages) { pr_debug("get_user_pages_fast(produce) failed (retval=%d)", retval); qp_release_pages(produce_q->kernel_if->u.h.header_page, @@ -767,7 +767,7 @@ static int qp_host_get_user_memory(u64 produce_uva, retval = get_user_pages_fast((uintptr_t) consume_uva, consume_q->kernel_if->num_pages, 1, consume_q->kernel_if->u.h.header_page); - if (retval < consume_q->kernel_if->num_pages) { + if (retval < (int)consume_q->kernel_if->num_pages) { pr_debug("get_user_pages_fast(consume) failed (retval=%d)", retval); qp_release_pages(consume_q->kernel_if->u.h.header_page, diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c index 3b5e6d11069bb58b80f3da5f257d617af9664571..9e03fada16dcbb7baa93ab445d2deeeaf6aff5c2 100644 --- a/drivers/mmc/host/omap_hsmmc.c +++ b/drivers/mmc/host/omap_hsmmc.c @@ -2194,6 +2194,7 @@ static int omap_hsmmc_remove(struct platform_device *pdev) dma_release_channel(host->tx_chan); dma_release_channel(host->rx_chan); + dev_pm_clear_wake_irq(host->dev); pm_runtime_dont_use_autosuspend(host->dev); pm_runtime_put_sync(host->dev); pm_runtime_disable(host->dev); diff --git a/drivers/mmc/host/renesas_sdhi_internal_dmac.c b/drivers/mmc/host/renesas_sdhi_internal_dmac.c index 8bae88a150fd45b3284b594d2d090d4f78fe1fc7..713658be66614264640aa27db20b3fbf25a5e51c 100644 --- a/drivers/mmc/host/renesas_sdhi_internal_dmac.c +++ b/drivers/mmc/host/renesas_sdhi_internal_dmac.c @@ -44,7 +44,7 @@ /* DM_CM_RST */ #define RST_DTRANRST1 BIT(9) #define RST_DTRANRST0 BIT(8) -#define RST_RESERVED_BITS GENMASK_ULL(32, 0) +#define RST_RESERVED_BITS GENMASK_ULL(31, 0) /* DM_CM_INFO1 and DM_CM_INFO1_MASK */ #define INFO1_CLEAR 0 diff --git a/drivers/mmc/host/sdhci-of-esdhc.c b/drivers/mmc/host/sdhci-of-esdhc.c index 4ffa6b173a21ef4a09a62108830869de16095ee2..8332f56e6c0da1bf2be953176b219b8ca73ea142 100644 --- a/drivers/mmc/host/sdhci-of-esdhc.c +++ b/drivers/mmc/host/sdhci-of-esdhc.c @@ -22,6 +22,7 @@ #include #include #include +#include #include #include "sdhci-pltfm.h" #include "sdhci-esdhc.h" @@ -427,6 +428,11 @@ static void esdhc_of_adma_workaround(struct sdhci_host *host, u32 intmask) static int esdhc_of_enable_dma(struct sdhci_host *host) { u32 value; + struct device *dev = mmc_dev(host->mmc); + + if (of_device_is_compatible(dev->of_node, "fsl,ls1043a-esdhc") || + of_device_is_compatible(dev->of_node, "fsl,ls1046a-esdhc")) + dma_set_mask_and_coherent(dev, DMA_BIT_MASK(40)); value = sdhci_readl(host, ESDHC_DMA_SYSCTL); value |= ESDHC_DMA_SNOOP; diff --git a/drivers/mmc/host/sdhci-tegra.c b/drivers/mmc/host/sdhci-tegra.c index 0cd6fa80db662a3ed2d8b10014047d50af4f2484..ce3f344d2b66ec70cf15c129c1cc89f592e5edfa 100644 --- a/drivers/mmc/host/sdhci-tegra.c +++ b/drivers/mmc/host/sdhci-tegra.c @@ -334,7 +334,8 @@ static const struct sdhci_pltfm_data sdhci_tegra30_pdata = { SDHCI_QUIRK_NO_HISPD_BIT | SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC | SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN, - .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN, + .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN | + SDHCI_QUIRK2_BROKEN_HS200, .ops = &tegra_sdhci_ops, }; diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c index d35deb79965d2203f010c09223c723e4074b4b5b..f063fe569339bbb4b87e549f427c6817ce36ccd0 100644 --- a/drivers/mmc/host/sdhci.c +++ b/drivers/mmc/host/sdhci.c @@ -3631,14 +3631,21 @@ int sdhci_setup_host(struct sdhci_host *host) mmc_gpio_get_cd(host->mmc) < 0) mmc->caps |= MMC_CAP_NEEDS_POLL; - /* If vqmmc regulator and no 1.8V signalling, then there's no UHS */ if (!IS_ERR(mmc->supply.vqmmc)) { ret = regulator_enable(mmc->supply.vqmmc); + + /* If vqmmc provides no 1.8V signalling, then there's no UHS */ if (!regulator_is_supported_voltage(mmc->supply.vqmmc, 1700000, 1950000)) host->caps1 &= ~(SDHCI_SUPPORT_SDR104 | SDHCI_SUPPORT_SDR50 | SDHCI_SUPPORT_DDR50); + + /* In eMMC case vqmmc might be a fixed 1.8V regulator */ + if (!regulator_is_supported_voltage(mmc->supply.vqmmc, 2700000, + 3600000)) + host->flags &= ~SDHCI_SIGNALING_330; + if (ret) { pr_warn("%s: Failed to enable vqmmc regulator: %d\n", mmc_hostname(mmc), ret); diff --git a/drivers/mtd/devices/mtd_dataflash.c b/drivers/mtd/devices/mtd_dataflash.c index 5dc8bd042cc54b2d07407f123d0ee62bce738594..504e34f295180fd97bec354a01b47c5a484665f9 100644 --- a/drivers/mtd/devices/mtd_dataflash.c +++ b/drivers/mtd/devices/mtd_dataflash.c @@ -737,8 +737,8 @@ static struct flash_info dataflash_data[] = { { "AT45DB642x", 0x1f2800, 8192, 1056, 11, SUP_POW2PS}, { "at45db642d", 0x1f2800, 8192, 1024, 10, SUP_POW2PS | IS_POW2PS}, - { "AT45DB641E", 0x1f28000100, 32768, 264, 9, SUP_EXTID | SUP_POW2PS}, - { "at45db641e", 0x1f28000100, 32768, 256, 8, SUP_EXTID | SUP_POW2PS | IS_POW2PS}, + { "AT45DB641E", 0x1f28000100ULL, 32768, 264, 9, SUP_EXTID | SUP_POW2PS}, + { "at45db641e", 0x1f28000100ULL, 32768, 256, 8, SUP_EXTID | SUP_POW2PS | IS_POW2PS}, }; static struct flash_info *jedec_lookup(struct spi_device *spi, diff --git a/drivers/mtd/maps/solutionengine.c b/drivers/mtd/maps/solutionengine.c index bb580bc164459a3dee5b27ef34125af8f31ba2d5..c07f21b20463252e63a0ac0f313f75a7c4c05eaa 100644 --- a/drivers/mtd/maps/solutionengine.c +++ b/drivers/mtd/maps/solutionengine.c @@ -59,9 +59,9 @@ static int __init init_soleng_maps(void) return -ENXIO; } } - printk(KERN_NOTICE "Solution Engine: Flash at 0x%08lx, EPROM at 0x%08lx\n", - soleng_flash_map.phys & 0x1fffffff, - soleng_eprom_map.phys & 0x1fffffff); + printk(KERN_NOTICE "Solution Engine: Flash at 0x%pap, EPROM at 0x%pap\n", + &soleng_flash_map.phys, + &soleng_eprom_map.phys); flash_mtd->owner = THIS_MODULE; eprom_mtd = do_map_probe("map_rom", &soleng_eprom_map); diff --git a/drivers/mtd/mtdchar.c b/drivers/mtd/mtdchar.c index b25f444c5914b09980f54c50f88edd3743fe3097..fa4d12217652b542fdd5683a30b6c9cbbbfd9522 100644 --- a/drivers/mtd/mtdchar.c +++ b/drivers/mtd/mtdchar.c @@ -160,8 +160,12 @@ static ssize_t mtdchar_read(struct file *file, char __user *buf, size_t count, pr_debug("MTD_read\n"); - if (*ppos + count > mtd->size) - count = mtd->size - *ppos; + if (*ppos + count > mtd->size) { + if (*ppos < mtd->size) + count = mtd->size - *ppos; + else + count = 0; + } if (!count) return 0; @@ -246,7 +250,7 @@ static ssize_t mtdchar_write(struct file *file, const char __user *buf, size_t c pr_debug("MTD_write\n"); - if (*ppos == mtd->size) + if (*ppos >= mtd->size) return -ENOSPC; if (*ppos + count > mtd->size) diff --git a/drivers/mtd/nand/atmel/nand-controller.c b/drivers/mtd/nand/atmel/nand-controller.c index 68c9d98a3347c6ad4592979da0693de7cd27ab8e..148744418e82b768c558cf3c7395d77cd9ddba63 100644 --- a/drivers/mtd/nand/atmel/nand-controller.c +++ b/drivers/mtd/nand/atmel/nand-controller.c @@ -129,6 +129,11 @@ #define DEFAULT_TIMEOUT_MS 1000 #define MIN_DMA_LEN 128 +static bool atmel_nand_avoid_dma __read_mostly; + +MODULE_PARM_DESC(avoiddma, "Avoid using DMA"); +module_param_named(avoiddma, atmel_nand_avoid_dma, bool, 0400); + enum atmel_nand_rb_type { ATMEL_NAND_NO_RB, ATMEL_NAND_NATIVE_RB, @@ -1975,7 +1980,7 @@ static int atmel_nand_controller_init(struct atmel_nand_controller *nc, return ret; } - if (nc->caps->has_dma) { + if (nc->caps->has_dma && !atmel_nand_avoid_dma) { dma_cap_mask_t mask; dma_cap_zero(mask); diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c index 23a6986d512b4c75ffd22f5ca51b58a8609e5f75..a8f74d9bba4ff82c6d3430e3656f90dc7d5c4b47 100644 --- a/drivers/mtd/ubi/wl.c +++ b/drivers/mtd/ubi/wl.c @@ -1615,8 +1615,10 @@ int ubi_wl_init(struct ubi_device *ubi, struct ubi_attach_info *ai) cond_resched(); e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL); - if (!e) + if (!e) { + err = -ENOMEM; goto out_free; + } e->pnum = aeb->pnum; e->ec = aeb->ec; @@ -1635,8 +1637,10 @@ int ubi_wl_init(struct ubi_device *ubi, struct ubi_attach_info *ai) cond_resched(); e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL); - if (!e) + if (!e) { + err = -ENOMEM; goto out_free; + } e->pnum = aeb->pnum; e->ec = aeb->ec; diff --git a/drivers/net/appletalk/ipddp.c b/drivers/net/appletalk/ipddp.c index 9375cef2242053c1f1ade3323d04d7dd4bea77af..3d27616d9c85540304a8d78c4a2f050c0866b9a2 100644 --- a/drivers/net/appletalk/ipddp.c +++ b/drivers/net/appletalk/ipddp.c @@ -283,8 +283,12 @@ static int ipddp_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) case SIOCFINDIPDDPRT: spin_lock_bh(&ipddp_route_lock); rp = __ipddp_find_route(&rcp); - if (rp) - memcpy(&rcp2, rp, sizeof(rcp2)); + if (rp) { + memset(&rcp2, 0, sizeof(rcp2)); + rcp2.ip = rp->ip; + rcp2.at = rp->at; + rcp2.flags = rp->flags; + } spin_unlock_bh(&ipddp_route_lock); if (rp) { diff --git a/drivers/net/can/m_can/m_can.c b/drivers/net/can/m_can/m_can.c index ca3fa82316c2a9940865c4b7b056f76fd268db55..d3ce904e929ec81756fdf27ca53921446bbfbe9f 100644 --- a/drivers/net/can/m_can/m_can.c +++ b/drivers/net/can/m_can/m_can.c @@ -1637,8 +1637,6 @@ static int m_can_plat_probe(struct platform_device *pdev) priv->can.clock.freq = clk_get_rate(cclk); priv->mram_base = mram_addr; - m_can_of_parse_mram(priv, mram_config_vals); - platform_set_drvdata(pdev, dev); SET_NETDEV_DEV(dev, &pdev->dev); @@ -1649,6 +1647,8 @@ static int m_can_plat_probe(struct platform_device *pdev) goto failed_free_dev; } + m_can_of_parse_mram(priv, mram_config_vals); + devm_can_led_init(dev); dev_info(&pdev->dev, "%s device registered (irq=%d, version=%d)\n", @@ -1698,8 +1698,6 @@ static __maybe_unused int m_can_resume(struct device *dev) pinctrl_pm_select_default_state(dev); - m_can_init_ram(priv); - priv->can.state = CAN_STATE_ERROR_ACTIVE; if (netif_running(ndev)) { @@ -1709,6 +1707,7 @@ static __maybe_unused int m_can_resume(struct device *dev) if (ret) return ret; + m_can_init_ram(priv); m_can_start(ndev); netif_device_attach(ndev); netif_start_queue(ndev); diff --git a/drivers/net/can/mscan/mpc5xxx_can.c b/drivers/net/can/mscan/mpc5xxx_can.c index c7427bdd3a4bff957aaef3fdb1d2f8ed0ead41cb..2949a381a94dceb2674f150ad5feaf580a201d25 100644 --- a/drivers/net/can/mscan/mpc5xxx_can.c +++ b/drivers/net/can/mscan/mpc5xxx_can.c @@ -86,6 +86,11 @@ static u32 mpc52xx_can_get_clock(struct platform_device *ofdev, return 0; } cdm = of_iomap(np_cdm, 0); + if (!cdm) { + of_node_put(np_cdm); + dev_err(&ofdev->dev, "can't map clock node!\n"); + return 0; + } if (in_8(&cdm->ipb_clk_sel) & 0x1) freq *= 2; diff --git a/drivers/net/ethernet/3com/Kconfig b/drivers/net/ethernet/3com/Kconfig index 5b7658bcf0209546a4577d708123231c55f97960..5c3ef9fc8207e3de01b86e4c538633ecc8a9f273 100644 --- a/drivers/net/ethernet/3com/Kconfig +++ b/drivers/net/ethernet/3com/Kconfig @@ -32,7 +32,7 @@ config EL3 config 3C515 tristate "3c515 ISA \"Fast EtherLink\"" - depends on ISA && ISA_DMA_API + depends on ISA && ISA_DMA_API && !PPC32 ---help--- If you have a 3Com ISA EtherLink XL "Corkscrew" 3c515 Fast Ethernet network card, say Y here. diff --git a/drivers/net/ethernet/amd/Kconfig b/drivers/net/ethernet/amd/Kconfig index d5c15e8bb3de706b12d343ee1a50477b23ab3d3f..a8e8f4e9c1bb6009e78150a4665814381c391158 100644 --- a/drivers/net/ethernet/amd/Kconfig +++ b/drivers/net/ethernet/amd/Kconfig @@ -44,7 +44,7 @@ config AMD8111_ETH config LANCE tristate "AMD LANCE and PCnet (AT1500 and NE2100) support" - depends on ISA && ISA_DMA_API && !ARM + depends on ISA && ISA_DMA_API && !ARM && !PPC32 ---help--- If you have a network (Ethernet) card of this type, say Y here. Some LinkSys cards are of this type. @@ -138,7 +138,7 @@ config PCMCIA_NMCLAN config NI65 tristate "NI6510 support" - depends on ISA && ISA_DMA_API && !ARM + depends on ISA && ISA_DMA_API && !ARM && !PPC32 ---help--- If you have a network (Ethernet) card of this type, say Y here. diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-desc.c b/drivers/net/ethernet/amd/xgbe/xgbe-desc.c index 45d92304068eb5ddb4d48a7fa57996d340732429..a5eaf174d914c3d7057a51c577d1d9fc8ac8ec85 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-desc.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-desc.c @@ -289,7 +289,7 @@ static int xgbe_alloc_pages(struct xgbe_prv_data *pdata, struct page *pages = NULL; dma_addr_t pages_dma; gfp_t gfp; - int order, ret; + int order; again: order = alloc_order; @@ -316,10 +316,9 @@ again: /* Map the pages */ pages_dma = dma_map_page(pdata->dev, pages, 0, PAGE_SIZE << order, DMA_FROM_DEVICE); - ret = dma_mapping_error(pdata->dev, pages_dma); - if (ret) { + if (dma_mapping_error(pdata->dev, pages_dma)) { put_page(pages); - return ret; + return -ENOMEM; } pa->pages = pages; diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c index 8c9986f3fc0186701bd9ae81f27cbb1519e9f25f..3615c2a06fdadf425e45466b58159d3136c88b2d 100644 --- a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c +++ b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c @@ -1685,6 +1685,7 @@ static struct sk_buff *atl1c_alloc_skb(struct atl1c_adapter *adapter) skb = build_skb(page_address(page) + adapter->rx_page_offset, adapter->rx_frag_size); if (likely(skb)) { + skb_reserve(skb, NET_SKB_PAD); adapter->rx_page_offset += adapter->rx_frag_size; if (adapter->rx_page_offset >= PAGE_SIZE) adapter->rx_page = NULL; diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h index 352beff796ae5b090d8e3fa831078cc7182d3a2c..828e2e56b75e7617c03ba328b6a23d1d20299c84 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h @@ -1529,6 +1529,7 @@ struct bnx2x { struct link_vars link_vars; u32 link_cnt; struct bnx2x_link_report_data last_reported_link; + bool force_link_down; struct mdio_if_info mdio; diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c index 6465414dad7478ee9b0cda023512a10517cb9049..8498a357d3895f2fbcbaf374444ae84dbd63e906 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c @@ -1260,6 +1260,11 @@ void __bnx2x_link_report(struct bnx2x *bp) { struct bnx2x_link_report_data cur_data; + if (bp->force_link_down) { + bp->link_vars.link_up = 0; + return; + } + /* reread mf_cfg */ if (IS_PF(bp) && !CHIP_IS_E1(bp)) bnx2x_read_mf_cfg(bp); @@ -2817,6 +2822,7 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode) bp->pending_max = 0; } + bp->force_link_down = false; if (bp->port.pmf) { rc = bnx2x_initial_phy_init(bp, load_mode); if (rc) diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c index 1e33abde4a3e8c833386df176896ad9684c9dc10..3fd1085a093fa95a6a1bcac895d72a1e5d5063fb 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c @@ -3387,14 +3387,18 @@ static int bnx2x_set_rss_flags(struct bnx2x *bp, struct ethtool_rxnfc *info) DP(BNX2X_MSG_ETHTOOL, "rss re-configured, UDP 4-tupple %s\n", udp_rss_requested ? "enabled" : "disabled"); - return bnx2x_rss(bp, &bp->rss_conf_obj, false, true); + if (bp->state == BNX2X_STATE_OPEN) + return bnx2x_rss(bp, &bp->rss_conf_obj, false, + true); } else if ((info->flow_type == UDP_V6_FLOW) && (bp->rss_conf_obj.udp_rss_v6 != udp_rss_requested)) { bp->rss_conf_obj.udp_rss_v6 = udp_rss_requested; DP(BNX2X_MSG_ETHTOOL, "rss re-configured, UDP 4-tupple %s\n", udp_rss_requested ? "enabled" : "disabled"); - return bnx2x_rss(bp, &bp->rss_conf_obj, false, true); + if (bp->state == BNX2X_STATE_OPEN) + return bnx2x_rss(bp, &bp->rss_conf_obj, false, + true); } return 0; @@ -3508,7 +3512,10 @@ static int bnx2x_set_rxfh(struct net_device *dev, const u32 *indir, bp->rss_conf_obj.ind_table[i] = indir[i] + bp->fp->cl_id; } - return bnx2x_config_rss_eth(bp, false); + if (bp->state == BNX2X_STATE_OPEN) + return bnx2x_config_rss_eth(bp, false); + + return 0; } /** diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c index e855a271db48e8e6bc58191708c22c6fef0ff3ba..bd3e3f080ebf835d9df96ea07284b561e0c37180 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c @@ -10279,6 +10279,12 @@ static void bnx2x_sp_rtnl_task(struct work_struct *work) bp->sp_rtnl_state = 0; smp_mb(); + /* Immediately indicate link as down */ + bp->link_vars.link_up = 0; + bp->force_link_down = true; + netif_carrier_off(bp->dev); + BNX2X_ERR("Indicating link is down due to Tx-timeout\n"); + bnx2x_nic_unload(bp, UNLOAD_NORMAL, true); bnx2x_nic_load(bp, LOAD_NORMAL); diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index 94931318587c141c81658c6088c2ce76f9654896..937db801928922358cb8607f2f8ad97fb81b2506 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -6348,7 +6348,7 @@ static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init) rc = bnxt_request_irq(bp); if (rc) { netdev_err(bp->dev, "bnxt_request_irq err: %x\n", rc); - goto open_err; + goto open_err_irq; } } @@ -6386,6 +6386,8 @@ static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init) open_err: bnxt_disable_napi(bp); + +open_err_irq: bnxt_del_napi(bp); open_err_free_mem: @@ -7866,11 +7868,11 @@ int bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx, bool shared) int rx, tx, cp; _bnxt_get_max_rings(bp, &rx, &tx, &cp); + *max_rx = rx; + *max_tx = tx; if (!rx || !tx || !cp) return -ENOMEM; - *max_rx = rx; - *max_tx = tx; return bnxt_trim_rings(bp, max_rx, max_tx, cp, shared); } @@ -7884,8 +7886,11 @@ static int bnxt_get_dflt_rings(struct bnxt *bp, int *max_rx, int *max_tx, /* Not enough rings, try disabling agg rings. */ bp->flags &= ~BNXT_FLAG_AGG_RINGS; rc = bnxt_get_max_rings(bp, max_rx, max_tx, shared); - if (rc) + if (rc) { + /* set BNXT_FLAG_AGG_RINGS back for consistency */ + bp->flags |= BNXT_FLAG_AGG_RINGS; return rc; + } bp->flags |= BNXT_FLAG_NO_AGG_RINGS; bp->dev->hw_features &= ~NETIF_F_LRO; bp->dev->features &= ~NETIF_F_LRO; diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.h b/drivers/net/ethernet/broadcom/genet/bcmgenet.h index 4c49d0b9774839e073d35914f4932f110a1ec32b..9d499c5c8f8aaa2989190c6203170f6d456b17c6 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmgenet.h +++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.h @@ -185,6 +185,9 @@ struct bcmgenet_mib_counters { #define UMAC_MAC1 0x010 #define UMAC_MAX_FRAME_LEN 0x014 +#define UMAC_MODE 0x44 +#define MODE_LINK_STATUS (1 << 5) + #define UMAC_EEE_CTRL 0x064 #define EN_LPI_RX_PAUSE (1 << 0) #define EN_LPI_TX_PFC (1 << 1) diff --git a/drivers/net/ethernet/broadcom/genet/bcmmii.c b/drivers/net/ethernet/broadcom/genet/bcmmii.c index 18f5723be2c91b1e533a21ef14e342fb9502961c..6ad0ca7ed3e919d0c75f3dd41508f18889523c49 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmmii.c +++ b/drivers/net/ethernet/broadcom/genet/bcmmii.c @@ -115,8 +115,14 @@ void bcmgenet_mii_setup(struct net_device *dev) static int bcmgenet_fixed_phy_link_update(struct net_device *dev, struct fixed_phy_status *status) { - if (dev && dev->phydev && status) - status->link = dev->phydev->link; + struct bcmgenet_priv *priv; + u32 reg; + + if (dev && dev->phydev && status) { + priv = netdev_priv(dev); + reg = bcmgenet_umac_readl(priv, UMAC_MODE); + status->link = !!(reg & MODE_LINK_STATUS); + } return 0; } diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c index 6df2cad61647a6d7067df5c30371fbd6eda17923..dfef4ec167c186bd9a1f0b5d917b504240361e14 100644 --- a/drivers/net/ethernet/cadence/macb_main.c +++ b/drivers/net/ethernet/cadence/macb_main.c @@ -1884,14 +1884,17 @@ static void macb_reset_hw(struct macb *bp) { struct macb_queue *queue; unsigned int q; + u32 ctrl = macb_readl(bp, NCR); /* Disable RX and TX (XXX: Should we halt the transmission * more gracefully?) */ - macb_writel(bp, NCR, 0); + ctrl &= ~(MACB_BIT(RE) | MACB_BIT(TE)); /* Clear the stats registers (XXX: Update stats first?) */ - macb_writel(bp, NCR, MACB_BIT(CLRSTAT)); + ctrl |= MACB_BIT(CLRSTAT); + + macb_writel(bp, NCR, ctrl); /* Clear all status flags */ macb_writel(bp, TSR, -1); @@ -2070,7 +2073,7 @@ static void macb_init_hw(struct macb *bp) } /* Enable TX and RX */ - macb_writel(bp, NCR, MACB_BIT(RE) | MACB_BIT(TE) | MACB_BIT(MPE)); + macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(RE) | MACB_BIT(TE)); } /* The hash address register is 64 bits long and takes up two diff --git a/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c b/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c index e8b290473ee25fe3c6f5b9683c86b87254d3767b..2e089b5ff8f327aeb2dbbac6f0076518d75d3de5 100644 --- a/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c +++ b/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c @@ -493,6 +493,9 @@ static void cn23xx_pf_setup_global_output_regs(struct octeon_device *oct) for (q_no = srn; q_no < ern; q_no++) { reg_val = octeon_read_csr(oct, CN23XX_SLI_OQ_PKT_CONTROL(q_no)); + /* clear IPTR */ + reg_val &= ~CN23XX_PKT_OUTPUT_CTL_IPTR; + /* set DPTR */ reg_val |= CN23XX_PKT_OUTPUT_CTL_DPTR; diff --git a/drivers/net/ethernet/cavium/liquidio/cn23xx_vf_device.c b/drivers/net/ethernet/cavium/liquidio/cn23xx_vf_device.c index 9338a00083788059736edefd472710c00b30f9af..1f8b7f65125401ef250f9cf57c266fbb55da3f38 100644 --- a/drivers/net/ethernet/cavium/liquidio/cn23xx_vf_device.c +++ b/drivers/net/ethernet/cavium/liquidio/cn23xx_vf_device.c @@ -165,6 +165,9 @@ static void cn23xx_vf_setup_global_output_regs(struct octeon_device *oct) reg_val = octeon_read_csr(oct, CN23XX_VF_SLI_OQ_PKT_CONTROL(q_no)); + /* clear IPTR */ + reg_val &= ~CN23XX_PKT_OUTPUT_CTL_IPTR; + /* set DPTR */ reg_val |= CN23XX_PKT_OUTPUT_CTL_DPTR; diff --git a/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c b/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c index 2887bcaf6af56890fbcf5e9f915151d5c08dc455..45c51277e0cf094b53136f466f99d74eca4542e5 100644 --- a/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c +++ b/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c @@ -643,13 +643,21 @@ static int octeon_mgmt_set_mac_address(struct net_device *netdev, void *addr) static int octeon_mgmt_change_mtu(struct net_device *netdev, int new_mtu) { struct octeon_mgmt *p = netdev_priv(netdev); - int size_without_fcs = new_mtu + OCTEON_MGMT_RX_HEADROOM; + int max_packet = new_mtu + ETH_HLEN + ETH_FCS_LEN; netdev->mtu = new_mtu; - cvmx_write_csr(p->agl + AGL_GMX_RX_FRM_MAX, size_without_fcs); + /* HW lifts the limit if the frame is VLAN tagged + * (+4 bytes per each tag, up to two tags) + */ + cvmx_write_csr(p->agl + AGL_GMX_RX_FRM_MAX, max_packet); + /* Set the hardware to truncate packets larger than the MTU. The jabber + * register must be set to a multiple of 8 bytes, so round up. JABBER is + * an unconditional limit, so we need to account for two possible VLAN + * tags. + */ cvmx_write_csr(p->agl + AGL_GMX_RX_JABBER, - (size_without_fcs + 7) & 0xfff8); + (max_packet + 7 + VLAN_HLEN * 2) & 0xfff8); return 0; } diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c index 44a0d04dd8a033e05507d40569a0e9b2642ee9a9..74a42f12064b6aced2f5813ef96ed92a10ce0c2a 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c @@ -253,7 +253,7 @@ static void dcb_tx_queue_prio_enable(struct net_device *dev, int enable) "Can't %s DCB Priority on port %d, TX Queue %d: err=%d\n", enable ? "set" : "unset", pi->port_id, i, -err); else - txq->dcb_prio = value; + txq->dcb_prio = enable ? value : 0; } } diff --git a/drivers/net/ethernet/cirrus/Kconfig b/drivers/net/ethernet/cirrus/Kconfig index 5ab912937aff2e8eb34887deb2aa36c8f45bebde..ec0b545197e2dfd7c0443917c0ec0f33861c77bd 100644 --- a/drivers/net/ethernet/cirrus/Kconfig +++ b/drivers/net/ethernet/cirrus/Kconfig @@ -19,6 +19,7 @@ if NET_VENDOR_CIRRUS config CS89x0 tristate "CS89x0 support" depends on ISA || EISA || ARM + depends on !PPC32 ---help--- Support for CS89x0 chipset based Ethernet cards. If you have a network (Ethernet) card of this type, say Y and read the file diff --git a/drivers/net/ethernet/cisco/enic/enic_clsf.c b/drivers/net/ethernet/cisco/enic/enic_clsf.c index 8dc21c9f97168e1a0a12bcf2c0d6b9ea224fa42f..df613a87ccfffb1e4fe19e6107be316a8e67f82a 100644 --- a/drivers/net/ethernet/cisco/enic/enic_clsf.c +++ b/drivers/net/ethernet/cisco/enic/enic_clsf.c @@ -79,7 +79,6 @@ void enic_rfs_flw_tbl_init(struct enic *enic) enic->rfs_h.max = enic->config.num_arfs; enic->rfs_h.free = enic->rfs_h.max; enic->rfs_h.toclean = 0; - enic_rfs_timer_start(enic); } void enic_rfs_flw_tbl_free(struct enic *enic) @@ -88,7 +87,6 @@ void enic_rfs_flw_tbl_free(struct enic *enic) enic_rfs_timer_stop(enic); spin_lock_bh(&enic->rfs_h.lock); - enic->rfs_h.free = 0; for (i = 0; i < (1 << ENIC_RFS_FLW_BITSHIFT); i++) { struct hlist_head *hhead; struct hlist_node *tmp; @@ -99,6 +97,7 @@ void enic_rfs_flw_tbl_free(struct enic *enic) enic_delfltr(enic, n->fltr_id); hlist_del(&n->node); kfree(n); + enic->rfs_h.free++; } } spin_unlock_bh(&enic->rfs_h.lock); diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c index a03a32a4ffca4928bf4132aa31a33c28f9d48c98..2bfaf3e118b1ea7ef28bdab16084dcf470d651ba 100644 --- a/drivers/net/ethernet/cisco/enic/enic_main.c +++ b/drivers/net/ethernet/cisco/enic/enic_main.c @@ -1930,7 +1930,7 @@ static int enic_open(struct net_device *netdev) vnic_intr_unmask(&enic->intr[i]); enic_notify_timer_start(enic); - enic_rfs_flw_tbl_init(enic); + enic_rfs_timer_start(enic); return 0; @@ -2007,28 +2007,42 @@ static int enic_stop(struct net_device *netdev) return 0; } +static int _enic_change_mtu(struct net_device *netdev, int new_mtu) +{ + bool running = netif_running(netdev); + int err = 0; + + ASSERT_RTNL(); + if (running) { + err = enic_stop(netdev); + if (err) + return err; + } + + netdev->mtu = new_mtu; + + if (running) { + err = enic_open(netdev); + if (err) + return err; + } + + return 0; +} + static int enic_change_mtu(struct net_device *netdev, int new_mtu) { struct enic *enic = netdev_priv(netdev); - int running = netif_running(netdev); if (enic_is_dynamic(enic) || enic_is_sriov_vf(enic)) return -EOPNOTSUPP; - if (running) - enic_stop(netdev); - - netdev->mtu = new_mtu; - if (netdev->mtu > enic->port_mtu) netdev_warn(netdev, - "interface MTU (%d) set higher than port MTU (%d)\n", - netdev->mtu, enic->port_mtu); + "interface MTU (%d) set higher than port MTU (%d)\n", + netdev->mtu, enic->port_mtu); - if (running) - enic_open(netdev); - - return 0; + return _enic_change_mtu(netdev, new_mtu); } static void enic_change_mtu_work(struct work_struct *work) @@ -2036,47 +2050,9 @@ static void enic_change_mtu_work(struct work_struct *work) struct enic *enic = container_of(work, struct enic, change_mtu_work); struct net_device *netdev = enic->netdev; int new_mtu = vnic_dev_mtu(enic->vdev); - int err; - unsigned int i; - - new_mtu = max_t(int, ENIC_MIN_MTU, min_t(int, ENIC_MAX_MTU, new_mtu)); rtnl_lock(); - - /* Stop RQ */ - del_timer_sync(&enic->notify_timer); - - for (i = 0; i < enic->rq_count; i++) - napi_disable(&enic->napi[i]); - - vnic_intr_mask(&enic->intr[0]); - enic_synchronize_irqs(enic); - err = vnic_rq_disable(&enic->rq[0]); - if (err) { - rtnl_unlock(); - netdev_err(netdev, "Unable to disable RQ.\n"); - return; - } - vnic_rq_clean(&enic->rq[0], enic_free_rq_buf); - vnic_cq_clean(&enic->cq[0]); - vnic_intr_clean(&enic->intr[0]); - - /* Fill RQ with new_mtu-sized buffers */ - netdev->mtu = new_mtu; - vnic_rq_fill(&enic->rq[0], enic_rq_alloc_buf); - /* Need at least one buffer on ring to get going */ - if (vnic_rq_desc_used(&enic->rq[0]) == 0) { - rtnl_unlock(); - netdev_err(netdev, "Unable to alloc receive buffers.\n"); - return; - } - - /* Start RQ */ - vnic_rq_enable(&enic->rq[0]); - napi_enable(&enic->napi[0]); - vnic_intr_unmask(&enic->intr[0]); - enic_notify_timer_start(enic); - + (void)_enic_change_mtu(netdev, new_mtu); rtnl_unlock(); netdev_info(netdev, "interface MTU set as %d\n", netdev->mtu); @@ -2854,6 +2830,7 @@ static int enic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) enic->notify_timer.function = enic_notify_timer; enic->notify_timer.data = (unsigned long)enic; + enic_rfs_flw_tbl_init(enic); enic_set_rx_coal_setting(enic); INIT_WORK(&enic->reset, enic_reset); INIT_WORK(&enic->tx_hang_reset, enic_tx_hang_reset); @@ -2866,7 +2843,6 @@ static int enic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) */ enic->port_mtu = enic->config.mtu; - (void)enic_change_mtu(netdev, enic->port_mtu); err = enic_set_mac_addr(netdev, enic->mac_addr); if (err) { @@ -2953,6 +2929,7 @@ static int enic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) /* MTU range: 68 - 9000 */ netdev->min_mtu = ENIC_MIN_MTU; netdev->max_mtu = ENIC_MAX_MTU; + netdev->mtu = enic->port_mtu; err = register_netdev(netdev); if (err) { diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c index 02dd5246dfae9a99b20f2bb4b2b13185d3239a3c..1589a568bfe0ab67c2745ea95c28b6749d6895f5 100644 --- a/drivers/net/ethernet/emulex/benet/be_cmds.c +++ b/drivers/net/ethernet/emulex/benet/be_cmds.c @@ -4500,7 +4500,7 @@ int be_cmd_get_profile_config(struct be_adapter *adapter, port_res->max_vfs += le16_to_cpu(pcie->num_vfs); } } - return status; + goto err; } pcie = be_get_pcie_desc(resp->func_param, desc_count, diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c index 519a021c0a25b41a8102819c21785b1af9d77dab..a202c50d6fc7659d87e6d0763442caed78f96254 100644 --- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c +++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c @@ -125,6 +125,9 @@ MODULE_PARM_DESC(tx_timeout, "The Tx timeout in ms"); /* Default alignment for start of data in an Rx FD */ #define DPAA_FD_DATA_ALIGNMENT 16 +/* The DPAA requires 256 bytes reserved and mapped for the SGT */ +#define DPAA_SGT_SIZE 256 + /* Values for the L3R field of the FM Parse Results */ /* L3 Type field: First IP Present IPv4 */ @@ -1622,8 +1625,8 @@ static struct sk_buff *dpaa_cleanup_tx_fd(const struct dpaa_priv *priv, if (unlikely(qm_fd_get_format(fd) == qm_fd_sg)) { nr_frags = skb_shinfo(skb)->nr_frags; - dma_unmap_single(dev, addr, qm_fd_get_offset(fd) + - sizeof(struct qm_sg_entry) * (1 + nr_frags), + dma_unmap_single(dev, addr, + qm_fd_get_offset(fd) + DPAA_SGT_SIZE, dma_dir); /* The sgt buffer has been allocated with netdev_alloc_frag(), @@ -1907,8 +1910,7 @@ static int skb_to_sg_fd(struct dpaa_priv *priv, void *sgt_buf; /* get a page frag to store the SGTable */ - sz = SKB_DATA_ALIGN(priv->tx_headroom + - sizeof(struct qm_sg_entry) * (1 + nr_frags)); + sz = SKB_DATA_ALIGN(priv->tx_headroom + DPAA_SGT_SIZE); sgt_buf = netdev_alloc_frag(sz); if (unlikely(!sgt_buf)) { netdev_err(net_dev, "netdev_alloc_frag() failed for size %d\n", @@ -1976,9 +1978,8 @@ static int skb_to_sg_fd(struct dpaa_priv *priv, skbh = (struct sk_buff **)buffer_start; *skbh = skb; - addr = dma_map_single(dev, buffer_start, priv->tx_headroom + - sizeof(struct qm_sg_entry) * (1 + nr_frags), - dma_dir); + addr = dma_map_single(dev, buffer_start, + priv->tx_headroom + DPAA_SGT_SIZE, dma_dir); if (unlikely(dma_mapping_error(dev, addr))) { dev_err(dev, "DMA mapping failed"); err = -EINVAL; diff --git a/drivers/net/ethernet/freescale/fman/fman_port.c b/drivers/net/ethernet/freescale/fman/fman_port.c index 1789b206be58396f3d1eca908d3f36289375ac9b..495190764155abdea8df91bacd686d3d902ca81a 100644 --- a/drivers/net/ethernet/freescale/fman/fman_port.c +++ b/drivers/net/ethernet/freescale/fman/fman_port.c @@ -324,6 +324,10 @@ struct fman_port_qmi_regs { #define HWP_HXS_PHE_REPORT 0x00000800 #define HWP_HXS_PCAC_PSTAT 0x00000100 #define HWP_HXS_PCAC_PSTOP 0x00000001 +#define HWP_HXS_TCP_OFFSET 0xA +#define HWP_HXS_UDP_OFFSET 0xB +#define HWP_HXS_SH_PAD_REM 0x80000000 + struct fman_port_hwp_regs { struct { u32 ssa; /* Soft Sequence Attachment */ @@ -728,6 +732,10 @@ static void init_hwp(struct fman_port *port) iowrite32be(0xffffffff, ®s->pmda[i].lcv); } + /* Short packet padding removal from checksum calculation */ + iowrite32be(HWP_HXS_SH_PAD_REM, ®s->pmda[HWP_HXS_TCP_OFFSET].ssa); + iowrite32be(HWP_HXS_SH_PAD_REM, ®s->pmda[HWP_HXS_UDP_OFFSET].ssa); + start_port_hwp(port); } diff --git a/drivers/net/ethernet/hisilicon/hns/hnae.h b/drivers/net/ethernet/hisilicon/hns/hnae.h index fa5b30f547f6620a6e761860be6afa341dc185c8..cad52bd331f7b295853b4d2765ad5b1ebd37b0d9 100644 --- a/drivers/net/ethernet/hisilicon/hns/hnae.h +++ b/drivers/net/ethernet/hisilicon/hns/hnae.h @@ -220,10 +220,10 @@ struct hnae_desc_cb { /* priv data for the desc, e.g. skb when use with ip stack*/ void *priv; - u16 page_offset; - u16 reuse_flag; + u32 page_offset; + u32 length; /* length of the buffer */ - u16 length; /* length of the buffer */ + u16 reuse_flag; /* desc type, used by the ring user to mark the type of the priv data */ u16 type; diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.c b/drivers/net/ethernet/hisilicon/hns/hns_enet.c index e77192683dbad9a1fca22816e411abf4c3c25d8c..25a9732afc842935fae6770806248c279018ca8d 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_enet.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.c @@ -530,7 +530,7 @@ static void hns_nic_reuse_page(struct sk_buff *skb, int i, } skb_add_rx_frag(skb, i, desc_cb->priv, desc_cb->page_offset + pull_len, - size - pull_len, truesize - pull_len); + size - pull_len, truesize); /* avoid re-using remote pages,flag default unreuse */ if (unlikely(page_to_nid(desc_cb->priv) != numa_node_id())) diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c index c133491ad9fa0ab3e5bbd3a5356ff64e8e856627..654aad6e748bdb7d23b8ec3dae875c95d73548fd 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c @@ -3105,7 +3105,7 @@ static bool hclge_is_all_function_id_zero(struct hclge_desc *desc) #define HCLGE_FUNC_NUMBER_PER_DESC 6 int i, j; - for (i = 0; i < HCLGE_DESC_NUMBER; i++) + for (i = 1; i < HCLGE_DESC_NUMBER; i++) for (j = 0; j < HCLGE_FUNC_NUMBER_PER_DESC; j++) if (desc[i].data[j]) return false; diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c index f32d719c4f77a2bf4dfd00896250aa27cc29f78c..8f90dd1be6b593e808b466abbd948e8899773ba3 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c @@ -187,6 +187,8 @@ int hclge_mac_start_phy(struct hclge_dev *hdev) if (!phydev) return 0; + phydev->supported &= ~SUPPORTED_FIBRE; + ret = phy_connect_direct(netdev, phydev, hclge_mac_adjust_link, PHY_INTERFACE_MODE_SGMII); diff --git a/drivers/net/ethernet/hp/hp100.c b/drivers/net/ethernet/hp/hp100.c index c8c7ad2eff77ecb5f2bd4ee5012a41b3ac1afcc8..9b5a68b6543287a93107ae2a923ee3bf99a38a82 100644 --- a/drivers/net/ethernet/hp/hp100.c +++ b/drivers/net/ethernet/hp/hp100.c @@ -2634,7 +2634,7 @@ static int hp100_login_to_vg_hub(struct net_device *dev, u_short force_relogin) /* Wait for link to drop */ time = jiffies + (HZ / 10); do { - if (~(hp100_inb(VG_LAN_CFG_1) & HP100_LINK_UP_ST)) + if (!(hp100_inb(VG_LAN_CFG_1) & HP100_LINK_UP_ST)) break; if (!in_interrupt()) schedule_timeout_interruptible(1); diff --git a/drivers/net/ethernet/huawei/hinic/hinic_main.c b/drivers/net/ethernet/huawei/hinic/hinic_main.c index eb53bd93065e0da12ddd744feffdd34f853c952e..a696b5b2d40e6bb72bb4cd7663d8f84ea49588ff 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_main.c +++ b/drivers/net/ethernet/huawei/hinic/hinic_main.c @@ -981,6 +981,7 @@ static int nic_dev_init(struct pci_dev *pdev) hinic_hwdev_cb_register(nic_dev->hwdev, HINIC_MGMT_MSG_CMD_LINK_STATUS, nic_dev, link_status_event_handler); + SET_NETDEV_DEV(netdev, &pdev->dev); err = register_netdev(netdev); if (err) { dev_err(&pdev->dev, "Failed to register netdev\n"); diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c index 98493be7b4afedf1ad367ba6b21155ddcb25478b..046af22a37cbc13938ac4d1de2c33a63babc5368 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.c +++ b/drivers/net/ethernet/ibm/ibmvnic.c @@ -1463,8 +1463,8 @@ static int do_reset(struct ibmvnic_adapter *adapter, rc = ibmvnic_login(netdev); if (rc) { - adapter->state = VNIC_PROBED; - return 0; + adapter->state = reset_state; + return rc; } rc = reset_tx_pools(adapter); diff --git a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c index 3b3983a1ffbba1d6795ead832c5aceb801023969..10df2d60c1814b91b1a282a2a44a7ddc39a8300c 100644 --- a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c +++ b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c @@ -644,14 +644,14 @@ static int e1000_set_ringparam(struct net_device *netdev, adapter->tx_ring = tx_old; e1000_free_all_rx_resources(adapter); e1000_free_all_tx_resources(adapter); - kfree(tx_old); - kfree(rx_old); adapter->rx_ring = rxdr; adapter->tx_ring = txdr; err = e1000_up(adapter); if (err) goto err_setup; } + kfree(tx_old); + kfree(rx_old); clear_bit(__E1000_RESETTING, &adapter->flags); return 0; @@ -664,7 +664,8 @@ err_setup_rx: err_alloc_rx: kfree(txdr); err_alloc_tx: - e1000_up(adapter); + if (netif_running(adapter->netdev)) + e1000_up(adapter); err_setup: clear_bit(__E1000_RESETTING, &adapter->flags); return err; diff --git a/drivers/net/ethernet/intel/e1000e/defines.h b/drivers/net/ethernet/intel/e1000e/defines.h index afb7ebe20b2438e9500f5dff2b1126ccde9c4670..824fd44e25f0d694a20bed7733dfebfaff9d0195 100644 --- a/drivers/net/ethernet/intel/e1000e/defines.h +++ b/drivers/net/ethernet/intel/e1000e/defines.h @@ -400,6 +400,10 @@ #define E1000_ICR_RXDMT0 0x00000010 /* Rx desc min. threshold (0) */ #define E1000_ICR_RXO 0x00000040 /* Receiver Overrun */ #define E1000_ICR_RXT0 0x00000080 /* Rx timer intr (ring 0) */ +#define E1000_ICR_MDAC 0x00000200 /* MDIO Access Complete */ +#define E1000_ICR_SRPD 0x00010000 /* Small Receive Packet Detected */ +#define E1000_ICR_ACK 0x00020000 /* Receive ACK Frame Detected */ +#define E1000_ICR_MNG 0x00040000 /* Manageability Event Detected */ #define E1000_ICR_ECCER 0x00400000 /* Uncorrectable ECC Error */ /* If this bit asserted, the driver should claim the interrupt */ #define E1000_ICR_INT_ASSERTED 0x80000000 @@ -407,7 +411,7 @@ #define E1000_ICR_RXQ1 0x00200000 /* Rx Queue 1 Interrupt */ #define E1000_ICR_TXQ0 0x00400000 /* Tx Queue 0 Interrupt */ #define E1000_ICR_TXQ1 0x00800000 /* Tx Queue 1 Interrupt */ -#define E1000_ICR_OTHER 0x01000000 /* Other Interrupts */ +#define E1000_ICR_OTHER 0x01000000 /* Other Interrupt */ /* PBA ECC Register */ #define E1000_PBA_ECC_COUNTER_MASK 0xFFF00000 /* ECC counter mask */ @@ -431,12 +435,27 @@ E1000_IMS_RXSEQ | \ E1000_IMS_LSC) +/* These are all of the events related to the OTHER interrupt. + */ +#define IMS_OTHER_MASK ( \ + E1000_IMS_LSC | \ + E1000_IMS_RXO | \ + E1000_IMS_MDAC | \ + E1000_IMS_SRPD | \ + E1000_IMS_ACK | \ + E1000_IMS_MNG) + /* Interrupt Mask Set */ #define E1000_IMS_TXDW E1000_ICR_TXDW /* Transmit desc written back */ #define E1000_IMS_LSC E1000_ICR_LSC /* Link Status Change */ #define E1000_IMS_RXSEQ E1000_ICR_RXSEQ /* Rx sequence error */ #define E1000_IMS_RXDMT0 E1000_ICR_RXDMT0 /* Rx desc min. threshold */ +#define E1000_IMS_RXO E1000_ICR_RXO /* Receiver Overrun */ #define E1000_IMS_RXT0 E1000_ICR_RXT0 /* Rx timer intr */ +#define E1000_IMS_MDAC E1000_ICR_MDAC /* MDIO Access Complete */ +#define E1000_IMS_SRPD E1000_ICR_SRPD /* Small Receive Packet */ +#define E1000_IMS_ACK E1000_ICR_ACK /* Receive ACK Frame Detected */ +#define E1000_IMS_MNG E1000_ICR_MNG /* Manageability Event */ #define E1000_IMS_ECCER E1000_ICR_ECCER /* Uncorrectable ECC Error */ #define E1000_IMS_RXQ0 E1000_ICR_RXQ0 /* Rx Queue 0 Interrupt */ #define E1000_IMS_RXQ1 E1000_ICR_RXQ1 /* Rx Queue 1 Interrupt */ diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c index ff308b05d68cc91efa7b195db9952d368553557f..00eedf202e62da10eec8ecf310fa1f689aaa0e57 100644 --- a/drivers/net/ethernet/intel/e1000e/ich8lan.c +++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c @@ -1367,9 +1367,6 @@ out: * Checks to see of the link status of the hardware has changed. If a * change in link status has been detected, then we read the PHY registers * to get the current speed/duplex if link exists. - * - * Returns a negative error code (-E1000_ERR_*) or 0 (link down) or 1 (link - * up). **/ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw) { @@ -1385,7 +1382,8 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw) * Change or Rx Sequence Error interrupt. */ if (!mac->get_link_status) - return 1; + return 0; + mac->get_link_status = false; /* First we want to see if the MII Status Register reports * link. If so, then we want to get the current speed/duplex @@ -1393,12 +1391,12 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw) */ ret_val = e1000e_phy_has_link_generic(hw, 1, 0, &link); if (ret_val) - return ret_val; + goto out; if (hw->mac.type == e1000_pchlan) { ret_val = e1000_k1_gig_workaround_hv(hw, link); if (ret_val) - return ret_val; + goto out; } /* When connected at 10Mbps half-duplex, some parts are excessively @@ -1431,7 +1429,7 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw) ret_val = hw->phy.ops.acquire(hw); if (ret_val) - return ret_val; + goto out; if (hw->mac.type == e1000_pch2lan) emi_addr = I82579_RX_CONFIG; @@ -1453,7 +1451,7 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw) hw->phy.ops.release(hw); if (ret_val) - return ret_val; + goto out; if (hw->mac.type >= e1000_pch_spt) { u16 data; @@ -1462,14 +1460,14 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw) if (speed == SPEED_1000) { ret_val = hw->phy.ops.acquire(hw); if (ret_val) - return ret_val; + goto out; ret_val = e1e_rphy_locked(hw, PHY_REG(776, 20), &data); if (ret_val) { hw->phy.ops.release(hw); - return ret_val; + goto out; } ptr_gap = (data & (0x3FF << 2)) >> 2; @@ -1483,18 +1481,18 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw) } hw->phy.ops.release(hw); if (ret_val) - return ret_val; + goto out; } else { ret_val = hw->phy.ops.acquire(hw); if (ret_val) - return ret_val; + goto out; ret_val = e1e_wphy_locked(hw, PHY_REG(776, 20), 0xC023); hw->phy.ops.release(hw); if (ret_val) - return ret_val; + goto out; } } @@ -1521,7 +1519,7 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw) (hw->adapter->pdev->device == E1000_DEV_ID_PCH_I218_V3)) { ret_val = e1000_k1_workaround_lpt_lp(hw, link); if (ret_val) - return ret_val; + goto out; } if (hw->mac.type >= e1000_pch_lpt) { /* Set platform power management values for @@ -1529,7 +1527,7 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw) */ ret_val = e1000_platform_pm_pch_lpt(hw, link); if (ret_val) - return ret_val; + goto out; } /* Clear link partner's EEE ability */ @@ -1552,9 +1550,7 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw) } if (!link) - return 0; /* No link detected */ - - mac->get_link_status = false; + goto out; switch (hw->mac.type) { case e1000_pch2lan: @@ -1616,12 +1612,14 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw) * different link partner. */ ret_val = e1000e_config_fc_after_link_up(hw); - if (ret_val) { + if (ret_val) e_dbg("Error configuring flow control\n"); - return ret_val; - } - return 1; + return ret_val; + +out: + mac->get_link_status = true; + return ret_val; } static s32 e1000_get_variants_ich8lan(struct e1000_adapter *adapter) diff --git a/drivers/net/ethernet/intel/e1000e/mac.c b/drivers/net/ethernet/intel/e1000e/mac.c index db735644b31214e8c35216d35642531c713abbb1..48cc945fc8b0f96e62d5066ba63b136718ce73d7 100644 --- a/drivers/net/ethernet/intel/e1000e/mac.c +++ b/drivers/net/ethernet/intel/e1000e/mac.c @@ -410,9 +410,6 @@ void e1000e_clear_hw_cntrs_base(struct e1000_hw *hw) * Checks to see of the link status of the hardware has changed. If a * change in link status has been detected, then we read the PHY registers * to get the current speed/duplex if link exists. - * - * Returns a negative error code (-E1000_ERR_*) or 0 (link down) or 1 (link - * up). **/ s32 e1000e_check_for_copper_link(struct e1000_hw *hw) { @@ -426,20 +423,16 @@ s32 e1000e_check_for_copper_link(struct e1000_hw *hw) * Change or Rx Sequence Error interrupt. */ if (!mac->get_link_status) - return 1; + return 0; + mac->get_link_status = false; /* First we want to see if the MII Status Register reports * link. If so, then we want to get the current speed/duplex * of the PHY. */ ret_val = e1000e_phy_has_link_generic(hw, 1, 0, &link); - if (ret_val) - return ret_val; - - if (!link) - return 0; /* No link detected */ - - mac->get_link_status = false; + if (ret_val || !link) + goto out; /* Check if there was DownShift, must be checked * immediately after link-up @@ -464,12 +457,14 @@ s32 e1000e_check_for_copper_link(struct e1000_hw *hw) * different link partner. */ ret_val = e1000e_config_fc_after_link_up(hw); - if (ret_val) { + if (ret_val) e_dbg("Error configuring flow control\n"); - return ret_val; - } - return 1; + return ret_val; + +out: + mac->get_link_status = true; + return ret_val; } /** diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c index 6265ce8915b66132f4e8aee12388491b517b8689..a25dc581a9030a8391bee1a630a36d6ba0057eba 100644 --- a/drivers/net/ethernet/intel/e1000e/netdev.c +++ b/drivers/net/ethernet/intel/e1000e/netdev.c @@ -1910,30 +1910,20 @@ static irqreturn_t e1000_msix_other(int __always_unused irq, void *data) struct net_device *netdev = data; struct e1000_adapter *adapter = netdev_priv(netdev); struct e1000_hw *hw = &adapter->hw; - u32 icr; - bool enable = true; - - icr = er32(ICR); - if (icr & E1000_ICR_RXO) { - ew32(ICR, E1000_ICR_RXO); - enable = false; - /* napi poll will re-enable Other, make sure it runs */ - if (napi_schedule_prep(&adapter->napi)) { - adapter->total_rx_bytes = 0; - adapter->total_rx_packets = 0; - __napi_schedule(&adapter->napi); - } - } + u32 icr = er32(ICR); + + if (icr & adapter->eiac_mask) + ew32(ICS, (icr & adapter->eiac_mask)); + if (icr & E1000_ICR_LSC) { - ew32(ICR, E1000_ICR_LSC); hw->mac.get_link_status = true; /* guard against interrupt when we're going down */ if (!test_bit(__E1000_DOWN, &adapter->state)) mod_timer(&adapter->watchdog_timer, jiffies + 1); } - if (enable && !test_bit(__E1000_DOWN, &adapter->state)) - ew32(IMS, E1000_IMS_OTHER); + if (!test_bit(__E1000_DOWN, &adapter->state)) + ew32(IMS, E1000_IMS_OTHER | IMS_OTHER_MASK); return IRQ_HANDLED; } @@ -2036,7 +2026,6 @@ static void e1000_configure_msix(struct e1000_adapter *adapter) hw->hw_addr + E1000_EITR_82574(vector)); else writel(1, hw->hw_addr + E1000_EITR_82574(vector)); - adapter->eiac_mask |= E1000_IMS_OTHER; /* Cause Tx interrupts on every write back */ ivar |= BIT(31); @@ -2261,7 +2250,8 @@ static void e1000_irq_enable(struct e1000_adapter *adapter) if (adapter->msix_entries) { ew32(EIAC_82574, adapter->eiac_mask & E1000_EIAC_MASK_82574); - ew32(IMS, adapter->eiac_mask | E1000_IMS_LSC); + ew32(IMS, adapter->eiac_mask | E1000_IMS_OTHER | + IMS_OTHER_MASK); } else if (hw->mac.type >= e1000_pch_lpt) { ew32(IMS, IMS_ENABLE_MASK | E1000_IMS_ECCER); } else { @@ -2703,8 +2693,7 @@ static int e1000e_poll(struct napi_struct *napi, int weight) napi_complete_done(napi, work_done); if (!test_bit(__E1000_DOWN, &adapter->state)) { if (adapter->msix_entries) - ew32(IMS, adapter->rx_ring->ims_val | - E1000_IMS_OTHER); + ew32(IMS, adapter->rx_ring->ims_val); else e1000_irq_enable(adapter); } @@ -5100,7 +5089,7 @@ static bool e1000e_has_link(struct e1000_adapter *adapter) case e1000_media_type_copper: if (hw->mac.get_link_status) { ret_val = hw->mac.ops.check_for_link(hw); - link_active = ret_val > 0; + link_active = !hw->mac.get_link_status; } else { link_active = true; } diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c index 64429a14c630f09a2aa4d370d10239046c373d08..815284fe93241ef29cb941465009f07323d9c8b4 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c @@ -1895,7 +1895,12 @@ s32 ixgbe_set_rar_generic(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq, if (enable_addr != 0) rar_high |= IXGBE_RAH_AV; + /* Record lower 32 bits of MAC address and then make + * sure that write is flushed to hardware before writing + * the upper 16 bits and setting the valid bit. + */ IXGBE_WRITE_REG(hw, IXGBE_RAL(index), rar_low); + IXGBE_WRITE_FLUSH(hw); IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high); return 0; @@ -1927,8 +1932,13 @@ s32 ixgbe_clear_rar_generic(struct ixgbe_hw *hw, u32 index) rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(index)); rar_high &= ~(0x0000FFFF | IXGBE_RAH_AV); - IXGBE_WRITE_REG(hw, IXGBE_RAL(index), 0); + /* Clear the address valid bit and upper 16 bits of the address + * before clearing the lower bits. This way we aren't updating + * a live filter. + */ IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high); + IXGBE_WRITE_FLUSH(hw); + IXGBE_WRITE_REG(hw, IXGBE_RAL(index), 0); /* clear VMDq pool/queue selection for this RAR */ hw->mac.ops.clear_vmdq(hw, index, IXGBE_CLEAR_VMDQ_ALL); diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c index 3deaa341331370bf7e88e8e3583d7b4f0fd2b104..074a5b79d691318a3bd2bde35dcb3a5ee93df0ab 100644 --- a/drivers/net/ethernet/marvell/mvneta.c +++ b/drivers/net/ethernet/marvell/mvneta.c @@ -3195,7 +3195,6 @@ static int mvneta_change_mtu(struct net_device *dev, int mtu) on_each_cpu(mvneta_percpu_enable, pp, true); mvneta_start_dev(pp); - mvneta_port_up(pp); netdev_update_features(dev); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c index cf94fdf25155f9882eb3d5d3395644a22259b982..c7654209668bdebbbb969af753ca54298fabdde0 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c @@ -449,6 +449,7 @@ const char *mlx5_command_str(int command) MLX5_COMMAND_STR_CASE(SET_HCA_CAP); MLX5_COMMAND_STR_CASE(QUERY_ISSI); MLX5_COMMAND_STR_CASE(SET_ISSI); + MLX5_COMMAND_STR_CASE(SET_DRIVER_VERSION); MLX5_COMMAND_STR_CASE(CREATE_MKEY); MLX5_COMMAND_STR_CASE(QUERY_MKEY); MLX5_COMMAND_STR_CASE(DESTROY_MKEY); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/dev.c b/drivers/net/ethernet/mellanox/mlx5/core/dev.c index 17b723218b0c0d891a44379e0f69ad804d43c154..9f9c9ff107354eeb4651a40991c96f842d433c50 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/dev.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/dev.c @@ -132,11 +132,11 @@ void mlx5_add_device(struct mlx5_interface *intf, struct mlx5_priv *priv) delayed_event_start(priv); dev_ctx->context = intf->add(dev); - set_bit(MLX5_INTERFACE_ADDED, &dev_ctx->state); - if (intf->attach) - set_bit(MLX5_INTERFACE_ATTACHED, &dev_ctx->state); - if (dev_ctx->context) { + set_bit(MLX5_INTERFACE_ADDED, &dev_ctx->state); + if (intf->attach) + set_bit(MLX5_INTERFACE_ATTACHED, &dev_ctx->state); + spin_lock_irq(&priv->ctx_lock); list_add_tail(&dev_ctx->list, &priv->ctx_list); @@ -211,12 +211,17 @@ static void mlx5_attach_interface(struct mlx5_interface *intf, struct mlx5_priv if (intf->attach) { if (test_bit(MLX5_INTERFACE_ATTACHED, &dev_ctx->state)) goto out; - intf->attach(dev, dev_ctx->context); + if (intf->attach(dev, dev_ctx->context)) + goto out; + set_bit(MLX5_INTERFACE_ATTACHED, &dev_ctx->state); } else { if (test_bit(MLX5_INTERFACE_ADDED, &dev_ctx->state)) goto out; dev_ctx->context = intf->add(dev); + if (!dev_ctx->context) + goto out; + set_bit(MLX5_INTERFACE_ADDED, &dev_ctx->state); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c index f697084937c381113632317f263d0af6abef3eee..de72b66df3e587d88aec9353c344afd66a8d08ee 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c @@ -1525,17 +1525,15 @@ static void esw_disable_vport(struct mlx5_eswitch *esw, int vport_num) } /* Public E-Switch API */ -#define ESW_ALLOWED(esw) ((esw) && MLX5_VPORT_MANAGER((esw)->dev)) +#define ESW_ALLOWED(esw) ((esw) && MLX5_ESWITCH_MANAGER((esw)->dev)) + int mlx5_eswitch_enable_sriov(struct mlx5_eswitch *esw, int nvfs, int mode) { int err; int i, enabled_events; - if (!ESW_ALLOWED(esw)) - return 0; - - if (!MLX5_ESWITCH_MANAGER(esw->dev) || + if (!ESW_ALLOWED(esw) || !MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, ft_support)) { esw_warn(esw->dev, "E-Switch FDB is not supported, aborting ...\n"); return -EOPNOTSUPP; @@ -1728,7 +1726,7 @@ int mlx5_eswitch_set_vport_mac(struct mlx5_eswitch *esw, u64 node_guid; int err = 0; - if (!ESW_ALLOWED(esw)) + if (!MLX5_CAP_GEN(esw->dev, vport_group_manager)) return -EPERM; if (!LEGAL_VPORT(esw, vport) || is_multicast_ether_addr(mac)) return -EINVAL; @@ -1805,7 +1803,7 @@ int mlx5_eswitch_get_vport_config(struct mlx5_eswitch *esw, { struct mlx5_vport *evport; - if (!ESW_ALLOWED(esw)) + if (!MLX5_CAP_GEN(esw->dev, vport_group_manager)) return -EPERM; if (!LEGAL_VPORT(esw, vport)) return -EINVAL; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c index c699055c0ffdecde6ddaac99a10b2f6dbe1af4ce..4b52b722135d98ac20e04ad76056e3382f9d9f65 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c @@ -557,6 +557,7 @@ static int esw_create_offloads_fdb_tables(struct mlx5_eswitch *esw, int nvports) if (err) goto miss_rule_err; + kvfree(flow_group_in); return 0; miss_rule_err: diff --git a/drivers/net/ethernet/mellanox/mlx5/core/health.c b/drivers/net/ethernet/mellanox/mlx5/core/health.c index db86e1506c8b67fa8849940c4adc958fb783ac92..61f284966a8c99e506f3c0d0f76acafa199a05ab 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/health.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/health.c @@ -333,9 +333,17 @@ void mlx5_start_health_poll(struct mlx5_core_dev *dev) add_timer(&health->timer); } -void mlx5_stop_health_poll(struct mlx5_core_dev *dev) +void mlx5_stop_health_poll(struct mlx5_core_dev *dev, bool disable_health) { struct mlx5_core_health *health = &dev->priv.health; + unsigned long flags; + + if (disable_health) { + spin_lock_irqsave(&health->wq_lock, flags); + set_bit(MLX5_DROP_NEW_HEALTH_WORK, &health->flags); + set_bit(MLX5_DROP_NEW_RECOVERY_WORK, &health->flags); + spin_unlock_irqrestore(&health->wq_lock, flags); + } del_timer_sync(&health->timer); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c index 4ddd632d10f9958c34238329f14faed02a021243..e99f1382a4f0ca334421f74da18659010923d707 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c @@ -857,8 +857,10 @@ static int mlx5_pci_init(struct mlx5_core_dev *dev, struct mlx5_priv *priv) priv->numa_node = dev_to_node(&dev->pdev->dev); priv->dbg_root = debugfs_create_dir(dev_name(&pdev->dev), mlx5_debugfs_root); - if (!priv->dbg_root) + if (!priv->dbg_root) { + dev_err(&pdev->dev, "Cannot create debugfs dir, aborting\n"); return -ENOMEM; + } err = mlx5_pci_enable_device(dev); if (err) { @@ -907,7 +909,7 @@ static void mlx5_pci_close(struct mlx5_core_dev *dev, struct mlx5_priv *priv) pci_clear_master(dev->pdev); release_bar(dev->pdev); mlx5_pci_disable_device(dev); - debugfs_remove(priv->dbg_root); + debugfs_remove_recursive(priv->dbg_root); } static int mlx5_init_once(struct mlx5_core_dev *dev, struct mlx5_priv *priv) @@ -1227,7 +1229,7 @@ err_cleanup_once: mlx5_cleanup_once(dev); err_stop_poll: - mlx5_stop_health_poll(dev); + mlx5_stop_health_poll(dev, boot); if (mlx5_cmd_teardown_hca(dev)) { dev_err(&dev->pdev->dev, "tear_down_hca failed, skip cleanup\n"); goto out_err; @@ -1286,7 +1288,7 @@ static int mlx5_unload_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv, mlx5_free_irq_vectors(dev); if (cleanup) mlx5_cleanup_once(dev); - mlx5_stop_health_poll(dev); + mlx5_stop_health_poll(dev, cleanup); err = mlx5_cmd_teardown_hca(dev); if (err) { dev_err(&dev->pdev->dev, "tear_down_hca failed, skip cleanup\n"); @@ -1548,7 +1550,7 @@ static int mlx5_try_fast_unload(struct mlx5_core_dev *dev) * with the HCA, so the health polll is no longer needed. */ mlx5_drain_health_wq(dev); - mlx5_stop_health_poll(dev); + mlx5_stop_health_poll(dev, false); ret = mlx5_cmd_force_teardown_hca(dev); if (ret) { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vport.c b/drivers/net/ethernet/mellanox/mlx5/core/vport.c index 71153c0f16054a2e987972b067c0070a54863e88..aa9a88e84e3bf5c1056ee178b87a659020d5e041 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/vport.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/vport.c @@ -549,8 +549,6 @@ int mlx5_modify_nic_vport_node_guid(struct mlx5_core_dev *mdev, return -EINVAL; if (!MLX5_CAP_GEN(mdev, vport_group_manager)) return -EACCES; - if (!MLX5_CAP_ESW(mdev, nic_vport_node_guid_modify)) - return -EOPNOTSUPP; in = kvzalloc(inlen, GFP_KERNEL); if (!in) diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h index 8c4ce0a0cc825e93999f5130ede9f136052da35c..06eeea6b2f93111be564f22adfe1e0403eab9249 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h @@ -395,6 +395,8 @@ int mlxsw_sp_netdevice_vrf_event(struct net_device *l3_dev, unsigned long event, void mlxsw_sp_port_vlan_router_leave(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan); void mlxsw_sp_rif_destroy(struct mlxsw_sp_rif *rif); +void mlxsw_sp_rif_destroy_by_dev(struct mlxsw_sp *mlxsw_sp, + struct net_device *dev); /* spectrum_kvdl.c */ int mlxsw_sp_kvdl_alloc(struct mlxsw_sp *mlxsw_sp, unsigned int entry_count, diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c index 516e6324460676ce84cf09dc00b74309695e00c5..3ed4fb346f2357fce463bc812f5c22dc21015b39 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c @@ -5131,6 +5131,17 @@ void mlxsw_sp_rif_destroy(struct mlxsw_sp_rif *rif) mlxsw_sp_vr_put(vr); } +void mlxsw_sp_rif_destroy_by_dev(struct mlxsw_sp *mlxsw_sp, + struct net_device *dev) +{ + struct mlxsw_sp_rif *rif; + + rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev); + if (!rif) + return; + mlxsw_sp_rif_destroy(rif); +} + static void mlxsw_sp_rif_subport_params_init(struct mlxsw_sp_rif_params *params, struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan) diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c index 7924f241e3ad068ed68ef295e967384ac559a08d..32c25772f75586dbb4884b6afc2c84d08931702d 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c @@ -140,6 +140,24 @@ bool mlxsw_sp_bridge_device_is_offloaded(const struct mlxsw_sp *mlxsw_sp, return !!mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev); } +static int mlxsw_sp_bridge_device_upper_rif_destroy(struct net_device *dev, + void *data) +{ + struct mlxsw_sp *mlxsw_sp = data; + + mlxsw_sp_rif_destroy_by_dev(mlxsw_sp, dev); + return 0; +} + +static void mlxsw_sp_bridge_device_rifs_destroy(struct mlxsw_sp *mlxsw_sp, + struct net_device *dev) +{ + mlxsw_sp_rif_destroy_by_dev(mlxsw_sp, dev); + netdev_walk_all_upper_dev_rcu(dev, + mlxsw_sp_bridge_device_upper_rif_destroy, + mlxsw_sp); +} + static struct mlxsw_sp_bridge_device * mlxsw_sp_bridge_device_create(struct mlxsw_sp_bridge *bridge, struct net_device *br_dev) @@ -176,6 +194,8 @@ static void mlxsw_sp_bridge_device_destroy(struct mlxsw_sp_bridge *bridge, struct mlxsw_sp_bridge_device *bridge_device) { + mlxsw_sp_bridge_device_rifs_destroy(bridge->mlxsw_sp, + bridge_device->dev); list_del(&bridge_device->list); if (bridge_device->vlan_enabled) bridge->vlan_enabled_exists = false; diff --git a/drivers/net/ethernet/netronome/nfp/flower/main.c b/drivers/net/ethernet/netronome/nfp/flower/main.c index 91fe0361710657ecbd6fba6dcc54b00894019480..72496060e3328e2be01b05be649f13e0f835b219 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/main.c +++ b/drivers/net/ethernet/netronome/nfp/flower/main.c @@ -79,7 +79,7 @@ nfp_flower_repr_get_type_and_port(struct nfp_app *app, u32 port_id, u8 *port) return NFP_REPR_TYPE_VF; } - return NFP_FLOWER_CMSG_PORT_TYPE_UNSPEC; + return __NFP_REPR_TYPE_MAX; } static struct net_device * @@ -90,6 +90,8 @@ nfp_flower_repr_get(struct nfp_app *app, u32 port_id) u8 port = 0; repr_type = nfp_flower_repr_get_type_and_port(app, port_id, &port); + if (repr_type > NFP_REPR_TYPE_MAX) + return NULL; reprs = rcu_dereference(app->reprs[repr_type]); if (!reprs) diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c index 8d53a593fb27435052d5bcbbecac6a3b86aec873..56751990bceefc9e9fa27a1d6cd82980c443fd1c 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c @@ -227,29 +227,16 @@ done: spin_unlock_bh(&nn->reconfig_lock); } -/** - * nfp_net_reconfig() - Reconfigure the firmware - * @nn: NFP Net device to reconfigure - * @update: The value for the update field in the BAR config - * - * Write the update word to the BAR and ping the reconfig queue. The - * poll until the firmware has acknowledged the update by zeroing the - * update word. - * - * Return: Negative errno on error, 0 on success - */ -int nfp_net_reconfig(struct nfp_net *nn, u32 update) +static void nfp_net_reconfig_sync_enter(struct nfp_net *nn) { bool cancelled_timer = false; u32 pre_posted_requests; - int ret; spin_lock_bh(&nn->reconfig_lock); nn->reconfig_sync_present = true; if (nn->reconfig_timer_active) { - del_timer(&nn->reconfig_timer); nn->reconfig_timer_active = false; cancelled_timer = true; } @@ -258,14 +245,43 @@ int nfp_net_reconfig(struct nfp_net *nn, u32 update) spin_unlock_bh(&nn->reconfig_lock); - if (cancelled_timer) + if (cancelled_timer) { + del_timer_sync(&nn->reconfig_timer); nfp_net_reconfig_wait(nn, nn->reconfig_timer.expires); + } /* Run the posted reconfigs which were issued before we started */ if (pre_posted_requests) { nfp_net_reconfig_start(nn, pre_posted_requests); nfp_net_reconfig_wait(nn, jiffies + HZ * NFP_NET_POLL_TIMEOUT); } +} + +static void nfp_net_reconfig_wait_posted(struct nfp_net *nn) +{ + nfp_net_reconfig_sync_enter(nn); + + spin_lock_bh(&nn->reconfig_lock); + nn->reconfig_sync_present = false; + spin_unlock_bh(&nn->reconfig_lock); +} + +/** + * nfp_net_reconfig() - Reconfigure the firmware + * @nn: NFP Net device to reconfigure + * @update: The value for the update field in the BAR config + * + * Write the update word to the BAR and ping the reconfig queue. The + * poll until the firmware has acknowledged the update by zeroing the + * update word. + * + * Return: Negative errno on error, 0 on success + */ +int nfp_net_reconfig(struct nfp_net *nn, u32 update) +{ + int ret; + + nfp_net_reconfig_sync_enter(nn); nfp_net_reconfig_start(nn, update); ret = nfp_net_reconfig_wait(nn, jiffies + HZ * NFP_NET_POLL_TIMEOUT); @@ -1071,7 +1087,7 @@ static bool nfp_net_xdp_complete(struct nfp_net_tx_ring *tx_ring) * @dp: NFP Net data path struct * @tx_ring: TX ring structure * - * Assumes that the device is stopped + * Assumes that the device is stopped, must be idempotent. */ static void nfp_net_tx_ring_reset(struct nfp_net_dp *dp, struct nfp_net_tx_ring *tx_ring) @@ -1273,13 +1289,18 @@ static void nfp_net_rx_give_one(const struct nfp_net_dp *dp, * nfp_net_rx_ring_reset() - Reflect in SW state of freelist after disable * @rx_ring: RX ring structure * - * Warning: Do *not* call if ring buffers were never put on the FW freelist - * (i.e. device was not enabled)! + * Assumes that the device is stopped, must be idempotent. */ static void nfp_net_rx_ring_reset(struct nfp_net_rx_ring *rx_ring) { unsigned int wr_idx, last_idx; + /* wr_p == rd_p means ring was never fed FL bufs. RX rings are always + * kept at cnt - 1 FL bufs. + */ + if (rx_ring->wr_p == 0 && rx_ring->rd_p == 0) + return; + /* Move the empty entry to the end of the list */ wr_idx = D_IDX(rx_ring, rx_ring->wr_p); last_idx = rx_ring->cnt - 1; @@ -2489,6 +2510,8 @@ static void nfp_net_vec_clear_ring_data(struct nfp_net *nn, unsigned int idx) /** * nfp_net_clear_config_and_disable() - Clear control BAR and disable NFP * @nn: NFP Net device to reconfigure + * + * Warning: must be fully idempotent. */ static void nfp_net_clear_config_and_disable(struct nfp_net *nn) { @@ -3560,6 +3583,7 @@ struct nfp_net *nfp_net_alloc(struct pci_dev *pdev, bool needs_netdev, */ void nfp_net_free(struct nfp_net *nn) { + WARN_ON(timer_pending(&nn->reconfig_timer) || nn->reconfig_posted); if (nn->xdp_prog) bpf_prog_put(nn->xdp_prog); @@ -3829,4 +3853,5 @@ void nfp_net_clean(struct nfp_net *nn) return; unregister_netdev(nn->dp.netdev); + nfp_net_reconfig_wait_posted(nn); } diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nffw.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nffw.c index cd34097b79f1be9d313d8f28b9701bb5bd6a3100..37a6d7822a3860647c416efeff47c7a7837a3a85 100644 --- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nffw.c +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nffw.c @@ -232,7 +232,7 @@ struct nfp_nffw_info *nfp_nffw_info_open(struct nfp_cpp *cpp) err = nfp_cpp_read(cpp, nfp_resource_cpp_id(state->res), nfp_resource_address(state->res), fwinf, sizeof(*fwinf)); - if (err < sizeof(*fwinf)) + if (err < (int)sizeof(*fwinf)) goto err_release; if (!nffw_res_flg_init_get(fwinf)) diff --git a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c index b306961b02fdf40aef89db7d5ed1e535a98a65a1..d62dccb85539404f8173e5713d805a096859b09a 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c +++ b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c @@ -255,9 +255,8 @@ qed_dcbx_get_app_protocol_type(struct qed_hwfn *p_hwfn, *type = DCBX_PROTOCOL_ROCE_V2; } else { *type = DCBX_MAX_PROTOCOL_TYPE; - DP_ERR(p_hwfn, - "No action required, App TLV id = 0x%x app_prio_bitmap = 0x%x\n", - id, app_prio_bitmap); + DP_ERR(p_hwfn, "No action required, App TLV entry = 0x%x\n", + app_prio_bitmap); return false; } @@ -1472,8 +1471,8 @@ static u8 qed_dcbnl_getcap(struct qed_dev *cdev, int capid, u8 *cap) *cap = 0x80; break; case DCB_CAP_ATTR_DCBX: - *cap = (DCB_CAP_DCBX_LLD_MANAGED | DCB_CAP_DCBX_VER_CEE | - DCB_CAP_DCBX_VER_IEEE | DCB_CAP_DCBX_STATIC); + *cap = (DCB_CAP_DCBX_VER_CEE | DCB_CAP_DCBX_VER_IEEE | + DCB_CAP_DCBX_STATIC); break; default: *cap = false; @@ -1541,8 +1540,6 @@ static u8 qed_dcbnl_getdcbx(struct qed_dev *cdev) if (!dcbx_info) return 0; - if (dcbx_info->operational.enabled) - mode |= DCB_CAP_DCBX_LLD_MANAGED; if (dcbx_info->operational.ieee) mode |= DCB_CAP_DCBX_VER_IEEE; if (dcbx_info->operational.cee) diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.c b/drivers/net/ethernet/qlogic/qed/qed_l2.c index c5452b445c37c142de2eb3cc083ec1a2516a366d..83c1c4fa102b7bce5ce9822653f0f7688446a50f 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_l2.c +++ b/drivers/net/ethernet/qlogic/qed/qed_l2.c @@ -663,7 +663,7 @@ qed_sp_update_mcast_bin(struct qed_hwfn *p_hwfn, p_ramrod->common.update_approx_mcast_flg = 1; for (i = 0; i < ETH_MULTICAST_MAC_BINS_IN_REGS; i++) { - u32 *p_bins = (u32 *)p_params->bins; + u32 *p_bins = p_params->bins; p_ramrod->approx_mcast.bins[i] = cpu_to_le32(p_bins[i]); } @@ -1474,8 +1474,8 @@ qed_sp_eth_filter_mcast(struct qed_hwfn *p_hwfn, enum spq_mode comp_mode, struct qed_spq_comp_cb *p_comp_data) { - unsigned long bins[ETH_MULTICAST_MAC_BINS_IN_REGS]; struct vport_update_ramrod_data *p_ramrod = NULL; + u32 bins[ETH_MULTICAST_MAC_BINS_IN_REGS]; struct qed_spq_entry *p_ent = NULL; struct qed_sp_init_data init_data; u8 abs_vport_id = 0; @@ -1511,26 +1511,25 @@ qed_sp_eth_filter_mcast(struct qed_hwfn *p_hwfn, /* explicitly clear out the entire vector */ memset(&p_ramrod->approx_mcast.bins, 0, sizeof(p_ramrod->approx_mcast.bins)); - memset(bins, 0, sizeof(unsigned long) * - ETH_MULTICAST_MAC_BINS_IN_REGS); + memset(bins, 0, sizeof(bins)); /* filter ADD op is explicit set op and it removes * any existing filters for the vport */ if (p_filter_cmd->opcode == QED_FILTER_ADD) { for (i = 0; i < p_filter_cmd->num_mc_addrs; i++) { - u32 bit; + u32 bit, nbits; bit = qed_mcast_bin_from_mac(p_filter_cmd->mac[i]); - __set_bit(bit, bins); + nbits = sizeof(u32) * BITS_PER_BYTE; + bins[bit / nbits] |= 1 << (bit % nbits); } /* Convert to correct endianity */ for (i = 0; i < ETH_MULTICAST_MAC_BINS_IN_REGS; i++) { struct vport_update_ramrod_mcast *p_ramrod_bins; - u32 *p_bins = (u32 *)bins; p_ramrod_bins = &p_ramrod->approx_mcast; - p_ramrod_bins->bins[i] = cpu_to_le32(p_bins[i]); + p_ramrod_bins->bins[i] = cpu_to_le32(bins[i]); } } diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.h b/drivers/net/ethernet/qlogic/qed/qed_l2.h index cc1f248551c9d4a13c5235ba00d83ebcd24b61b1..91d383f3a661ff5898d9268166fb33e8ebf64e05 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_l2.h +++ b/drivers/net/ethernet/qlogic/qed/qed_l2.h @@ -214,7 +214,7 @@ struct qed_sp_vport_update_params { u8 anti_spoofing_en; u8 update_accept_any_vlan_flg; u8 accept_any_vlan; - unsigned long bins[8]; + u32 bins[8]; struct qed_rss_params *rss_params; struct qed_filter_accept_flags accept_flags; struct qed_sge_tpa_params *sge_tpa_params; diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.c b/drivers/net/ethernet/qlogic/qed/qed_ll2.c index c06ad4f0758eb755ad9d82ffccecc3def154fc6b..5f52f14761a30472c8a483474256fdb70d6c3861 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_ll2.c +++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.c @@ -201,8 +201,9 @@ void qed_ll2b_complete_rx_packet(void *cxt, struct qed_ll2_comp_rx_data *data) skb = build_skb(buffer->data, 0); if (!skb) { - rc = -ENOMEM; - goto out_post; + DP_INFO(cdev, "Failed to build SKB\n"); + kfree(buffer->data); + goto out_post1; } data->u.placement_offset += NET_SKB_PAD; @@ -224,8 +225,14 @@ void qed_ll2b_complete_rx_packet(void *cxt, struct qed_ll2_comp_rx_data *data) cdev->ll2->cbs->rx_cb(cdev->ll2->cb_cookie, skb, data->opaque_data_0, data->opaque_data_1); + } else { + DP_VERBOSE(p_hwfn, (NETIF_MSG_RX_STATUS | NETIF_MSG_PKTDATA | + QED_MSG_LL2 | QED_MSG_STORAGE), + "Dropping the packet\n"); + kfree(buffer->data); } +out_post1: /* Update Buffer information and update FW producer */ buffer->data = new_data; buffer->phys_addr = new_phys_addr; diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c index 2c958921dfb36f876594b6b0f7f22d67628731fb..954f7ce4cf2850a4130742bbd1e67e07ae1deea9 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_main.c +++ b/drivers/net/ethernet/qlogic/qed/qed_main.c @@ -565,8 +565,16 @@ static irqreturn_t qed_single_int(int irq, void *dev_instance) /* Fastpath interrupts */ for (j = 0; j < 64; j++) { if ((0x2ULL << j) & status) { - hwfn->simd_proto_handler[j].func( - hwfn->simd_proto_handler[j].token); + struct qed_simd_fp_handler *p_handler = + &hwfn->simd_proto_handler[j]; + + if (p_handler->func) + p_handler->func(p_handler->token); + else + DP_NOTICE(hwfn, + "Not calling fastpath handler as it is NULL [handler #%d, status 0x%llx]\n", + j, status); + status &= ~(0x2ULL << j); rc = IRQ_HANDLED; } diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.c b/drivers/net/ethernet/qlogic/qed/qed_mcp.c index 376485d99357dc0ca18dde626300da1327e35721..7938abe9a30106450245ff94f8a28a155dcac54f 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_mcp.c +++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.c @@ -47,7 +47,7 @@ #include "qed_reg_addr.h" #include "qed_sriov.h" -#define CHIP_MCP_RESP_ITER_US 10 +#define QED_MCP_RESP_ITER_US 10 #define QED_DRV_MB_MAX_RETRIES (500 * 1000) /* Account for 5 sec */ #define QED_MCP_RESET_RETRIES (50 * 1000) /* Account for 500 msec */ @@ -182,18 +182,57 @@ int qed_mcp_free(struct qed_hwfn *p_hwfn) return 0; } +/* Maximum of 1 sec to wait for the SHMEM ready indication */ +#define QED_MCP_SHMEM_RDY_MAX_RETRIES 20 +#define QED_MCP_SHMEM_RDY_ITER_MS 50 + static int qed_load_mcp_offsets(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) { struct qed_mcp_info *p_info = p_hwfn->mcp_info; + u8 cnt = QED_MCP_SHMEM_RDY_MAX_RETRIES; + u8 msec = QED_MCP_SHMEM_RDY_ITER_MS; u32 drv_mb_offsize, mfw_mb_offsize; u32 mcp_pf_id = MCP_PF_ID(p_hwfn); p_info->public_base = qed_rd(p_hwfn, p_ptt, MISC_REG_SHARED_MEM_ADDR); - if (!p_info->public_base) - return 0; + if (!p_info->public_base) { + DP_NOTICE(p_hwfn, + "The address of the MCP scratch-pad is not configured\n"); + return -EINVAL; + } p_info->public_base |= GRCBASE_MCP; + /* Get the MFW MB address and number of supported messages */ + mfw_mb_offsize = qed_rd(p_hwfn, p_ptt, + SECTION_OFFSIZE_ADDR(p_info->public_base, + PUBLIC_MFW_MB)); + p_info->mfw_mb_addr = SECTION_ADDR(mfw_mb_offsize, mcp_pf_id); + p_info->mfw_mb_length = (u16)qed_rd(p_hwfn, p_ptt, + p_info->mfw_mb_addr + + offsetof(struct public_mfw_mb, + sup_msgs)); + + /* The driver can notify that there was an MCP reset, and might read the + * SHMEM values before the MFW has completed initializing them. + * To avoid this, the "sup_msgs" field in the MFW mailbox is used as a + * data ready indication. + */ + while (!p_info->mfw_mb_length && --cnt) { + msleep(msec); + p_info->mfw_mb_length = + (u16)qed_rd(p_hwfn, p_ptt, + p_info->mfw_mb_addr + + offsetof(struct public_mfw_mb, sup_msgs)); + } + + if (!cnt) { + DP_NOTICE(p_hwfn, + "Failed to get the SHMEM ready notification after %d msec\n", + QED_MCP_SHMEM_RDY_MAX_RETRIES * msec); + return -EBUSY; + } + /* Calculate the driver and MFW mailbox address */ drv_mb_offsize = qed_rd(p_hwfn, p_ptt, SECTION_OFFSIZE_ADDR(p_info->public_base, @@ -203,13 +242,6 @@ static int qed_load_mcp_offsets(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) "drv_mb_offsiz = 0x%x, drv_mb_addr = 0x%x mcp_pf_id = 0x%x\n", drv_mb_offsize, p_info->drv_mb_addr, mcp_pf_id); - /* Set the MFW MB address */ - mfw_mb_offsize = qed_rd(p_hwfn, p_ptt, - SECTION_OFFSIZE_ADDR(p_info->public_base, - PUBLIC_MFW_MB)); - p_info->mfw_mb_addr = SECTION_ADDR(mfw_mb_offsize, mcp_pf_id); - p_info->mfw_mb_length = (u16)qed_rd(p_hwfn, p_ptt, p_info->mfw_mb_addr); - /* Get the current driver mailbox sequence before sending * the first command */ @@ -284,9 +316,15 @@ static void qed_mcp_reread_offsets(struct qed_hwfn *p_hwfn, int qed_mcp_reset(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) { - u32 org_mcp_reset_seq, seq, delay = CHIP_MCP_RESP_ITER_US, cnt = 0; + u32 org_mcp_reset_seq, seq, delay = QED_MCP_RESP_ITER_US, cnt = 0; int rc = 0; + if (p_hwfn->mcp_info->b_block_cmd) { + DP_NOTICE(p_hwfn, + "The MFW is not responsive. Avoid sending MCP_RESET mailbox command.\n"); + return -EBUSY; + } + /* Ensure that only a single thread is accessing the mailbox */ spin_lock_bh(&p_hwfn->mcp_info->cmd_lock); @@ -412,14 +450,41 @@ static void __qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn, (p_mb_params->cmd | seq_num), p_mb_params->param); } +static void qed_mcp_cmd_set_blocking(struct qed_hwfn *p_hwfn, bool block_cmd) +{ + p_hwfn->mcp_info->b_block_cmd = block_cmd; + + DP_INFO(p_hwfn, "%s sending of mailbox commands to the MFW\n", + block_cmd ? "Block" : "Unblock"); +} + +static void qed_mcp_print_cpu_info(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt) +{ + u32 cpu_mode, cpu_state, cpu_pc_0, cpu_pc_1, cpu_pc_2; + u32 delay = QED_MCP_RESP_ITER_US; + + cpu_mode = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_MODE); + cpu_state = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_STATE); + cpu_pc_0 = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_PROGRAM_COUNTER); + udelay(delay); + cpu_pc_1 = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_PROGRAM_COUNTER); + udelay(delay); + cpu_pc_2 = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_PROGRAM_COUNTER); + + DP_NOTICE(p_hwfn, + "MCP CPU info: mode 0x%08x, state 0x%08x, pc {0x%08x, 0x%08x, 0x%08x}\n", + cpu_mode, cpu_state, cpu_pc_0, cpu_pc_1, cpu_pc_2); +} + static int _qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, struct qed_mcp_mb_params *p_mb_params, - u32 max_retries, u32 delay) + u32 max_retries, u32 usecs) { + u32 cnt = 0, msecs = DIV_ROUND_UP(usecs, 1000); struct qed_mcp_cmd_elem *p_cmd_elem; - u32 cnt = 0; u16 seq_num; int rc = 0; @@ -442,7 +507,11 @@ _qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn, goto err; spin_unlock_bh(&p_hwfn->mcp_info->cmd_lock); - udelay(delay); + + if (QED_MB_FLAGS_IS_SET(p_mb_params, CAN_SLEEP)) + msleep(msecs); + else + udelay(usecs); } while (++cnt < max_retries); if (cnt >= max_retries) { @@ -471,7 +540,11 @@ _qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn, * The spinlock stays locked until the list element is removed. */ - udelay(delay); + if (QED_MB_FLAGS_IS_SET(p_mb_params, CAN_SLEEP)) + msleep(msecs); + else + udelay(usecs); + spin_lock_bh(&p_hwfn->mcp_info->cmd_lock); if (p_cmd_elem->b_is_completed) @@ -490,11 +563,15 @@ _qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn, DP_NOTICE(p_hwfn, "The MFW failed to respond to command 0x%08x [param 0x%08x].\n", p_mb_params->cmd, p_mb_params->param); + qed_mcp_print_cpu_info(p_hwfn, p_ptt); spin_lock_bh(&p_hwfn->mcp_info->cmd_lock); qed_mcp_cmd_del_elem(p_hwfn, p_cmd_elem); spin_unlock_bh(&p_hwfn->mcp_info->cmd_lock); + if (!QED_MB_FLAGS_IS_SET(p_mb_params, AVOID_BLOCK)) + qed_mcp_cmd_set_blocking(p_hwfn, true); + return -EAGAIN; } @@ -506,7 +583,7 @@ _qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn, "MFW mailbox: response 0x%08x param 0x%08x [after %d.%03d ms]\n", p_mb_params->mcp_resp, p_mb_params->mcp_param, - (cnt * delay) / 1000, (cnt * delay) % 1000); + (cnt * usecs) / 1000, (cnt * usecs) % 1000); /* Clear the sequence number from the MFW response */ p_mb_params->mcp_resp &= FW_MSG_CODE_MASK; @@ -524,7 +601,7 @@ static int qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn, { size_t union_data_size = sizeof(union drv_union_data); u32 max_retries = QED_DRV_MB_MAX_RETRIES; - u32 delay = CHIP_MCP_RESP_ITER_US; + u32 usecs = QED_MCP_RESP_ITER_US; /* MCP not initialized */ if (!qed_mcp_is_init(p_hwfn)) { @@ -532,6 +609,13 @@ static int qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn, return -EBUSY; } + if (p_hwfn->mcp_info->b_block_cmd) { + DP_NOTICE(p_hwfn, + "The MFW is not responsive. Avoid sending mailbox command 0x%08x [param 0x%08x].\n", + p_mb_params->cmd, p_mb_params->param); + return -EBUSY; + } + if (p_mb_params->data_src_size > union_data_size || p_mb_params->data_dst_size > union_data_size) { DP_ERR(p_hwfn, @@ -541,8 +625,13 @@ static int qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn, return -EINVAL; } + if (QED_MB_FLAGS_IS_SET(p_mb_params, CAN_SLEEP)) { + max_retries = DIV_ROUND_UP(max_retries, 1000); + usecs *= 1000; + } + return _qed_mcp_cmd_and_union(p_hwfn, p_ptt, p_mb_params, max_retries, - delay); + usecs); } int qed_mcp_cmd(struct qed_hwfn *p_hwfn, @@ -731,6 +820,7 @@ __qed_mcp_load_req(struct qed_hwfn *p_hwfn, mb_params.data_src_size = sizeof(load_req); mb_params.p_data_dst = &load_rsp; mb_params.data_dst_size = sizeof(load_rsp); + mb_params.flags = QED_MB_FLAG_CAN_SLEEP | QED_MB_FLAG_AVOID_BLOCK; DP_VERBOSE(p_hwfn, QED_MSG_SP, "Load Request: param 0x%08x [init_hw %d, drv_type %d, hsi_ver %d, pda 0x%04x]\n", @@ -952,7 +1042,8 @@ int qed_mcp_load_req(struct qed_hwfn *p_hwfn, int qed_mcp_unload_req(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) { - u32 wol_param, mcp_resp, mcp_param; + struct qed_mcp_mb_params mb_params; + u32 wol_param; switch (p_hwfn->cdev->wol_config) { case QED_OV_WOL_DISABLED: @@ -970,8 +1061,12 @@ int qed_mcp_unload_req(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) wol_param = DRV_MB_PARAM_UNLOAD_WOL_MCP; } - return qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_UNLOAD_REQ, wol_param, - &mcp_resp, &mcp_param); + memset(&mb_params, 0, sizeof(mb_params)); + mb_params.cmd = DRV_MSG_CODE_UNLOAD_REQ; + mb_params.param = wol_param; + mb_params.flags = QED_MB_FLAG_CAN_SLEEP | QED_MB_FLAG_AVOID_BLOCK; + + return qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params); } int qed_mcp_unload_done(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) @@ -1182,6 +1277,7 @@ static void qed_mcp_handle_link_change(struct qed_hwfn *p_hwfn, break; default: p_link->speed = 0; + p_link->link_up = 0; } if (p_link->link_up && p_link->speed) @@ -1279,9 +1375,15 @@ int qed_mcp_set_link(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, bool b_up) phy_cfg.pause |= (params->pause.forced_tx) ? ETH_PAUSE_TX : 0; phy_cfg.adv_speed = params->speed.advertised_speeds; phy_cfg.loopback_mode = params->loopback_mode; - if (p_hwfn->mcp_info->capabilities & FW_MB_PARAM_FEATURE_SUPPORT_EEE) { - if (params->eee.enable) - phy_cfg.eee_cfg |= EEE_CFG_EEE_ENABLED; + + /* There are MFWs that share this capability regardless of whether + * this is feasible or not. And given that at the very least adv_caps + * would be set internally by qed, we want to make sure LFA would + * still work. + */ + if ((p_hwfn->mcp_info->capabilities & + FW_MB_PARAM_FEATURE_SUPPORT_EEE) && params->eee.enable) { + phy_cfg.eee_cfg |= EEE_CFG_EEE_ENABLED; if (params->eee.tx_lpi_enable) phy_cfg.eee_cfg |= EEE_CFG_TX_LPI; if (params->eee.adv_caps & QED_EEE_1G_ADV) @@ -1959,31 +2061,65 @@ qed_mcp_send_drv_version(struct qed_hwfn *p_hwfn, return rc; } +/* A maximal 100 msec waiting time for the MCP to halt */ +#define QED_MCP_HALT_SLEEP_MS 10 +#define QED_MCP_HALT_MAX_RETRIES 10 + int qed_mcp_halt(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) { - u32 resp = 0, param = 0; + u32 resp = 0, param = 0, cpu_state, cnt = 0; int rc; rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_MCP_HALT, 0, &resp, ¶m); - if (rc) + if (rc) { DP_ERR(p_hwfn, "MCP response failure, aborting\n"); + return rc; + } - return rc; + do { + msleep(QED_MCP_HALT_SLEEP_MS); + cpu_state = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_STATE); + if (cpu_state & MCP_REG_CPU_STATE_SOFT_HALTED) + break; + } while (++cnt < QED_MCP_HALT_MAX_RETRIES); + + if (cnt == QED_MCP_HALT_MAX_RETRIES) { + DP_NOTICE(p_hwfn, + "Failed to halt the MCP [CPU_MODE = 0x%08x, CPU_STATE = 0x%08x]\n", + qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_MODE), cpu_state); + return -EBUSY; + } + + qed_mcp_cmd_set_blocking(p_hwfn, true); + + return 0; } +#define QED_MCP_RESUME_SLEEP_MS 10 + int qed_mcp_resume(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) { - u32 value, cpu_mode; + u32 cpu_mode, cpu_state; qed_wr(p_hwfn, p_ptt, MCP_REG_CPU_STATE, 0xffffffff); - value = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_MODE); - value &= ~MCP_REG_CPU_MODE_SOFT_HALT; - qed_wr(p_hwfn, p_ptt, MCP_REG_CPU_MODE, value); cpu_mode = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_MODE); + cpu_mode &= ~MCP_REG_CPU_MODE_SOFT_HALT; + qed_wr(p_hwfn, p_ptt, MCP_REG_CPU_MODE, cpu_mode); + msleep(QED_MCP_RESUME_SLEEP_MS); + cpu_state = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_STATE); - return (cpu_mode & MCP_REG_CPU_MODE_SOFT_HALT) ? -EAGAIN : 0; + if (cpu_state & MCP_REG_CPU_STATE_SOFT_HALTED) { + DP_NOTICE(p_hwfn, + "Failed to resume the MCP [CPU_MODE = 0x%08x, CPU_STATE = 0x%08x]\n", + cpu_mode, cpu_state); + return -EBUSY; + } + + qed_mcp_cmd_set_blocking(p_hwfn, false); + + return 0; } int qed_mcp_ov_update_current_config(struct qed_hwfn *p_hwfn, diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.h b/drivers/net/ethernet/qlogic/qed/qed_mcp.h index c7ec2395d1cebd795c9cd33de937da4de6099513..f1fe5e3427ea54f55831563e2a89e4aeea7ad2aa 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_mcp.h +++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.h @@ -540,11 +540,14 @@ struct qed_mcp_info { */ spinlock_t cmd_lock; + /* Flag to indicate whether sending a MFW mailbox command is blocked */ + bool b_block_cmd; + /* Spinlock used for syncing SW link-changes and link-changes * originating from attention context. */ spinlock_t link_lock; - bool block_mb_sending; + u32 public_base; u32 drv_mb_addr; u32 mfw_mb_addr; @@ -565,14 +568,20 @@ struct qed_mcp_info { }; struct qed_mcp_mb_params { - u32 cmd; - u32 param; - void *p_data_src; - u8 data_src_size; - void *p_data_dst; - u8 data_dst_size; - u32 mcp_resp; - u32 mcp_param; + u32 cmd; + u32 param; + void *p_data_src; + void *p_data_dst; + u8 data_src_size; + u8 data_dst_size; + u32 mcp_resp; + u32 mcp_param; + u32 flags; +#define QED_MB_FLAG_CAN_SLEEP (0x1 << 0) +#define QED_MB_FLAG_AVOID_BLOCK (0x1 << 1) +#define QED_MB_FLAGS_IS_SET(params, flag) \ + ({ typeof(params) __params = (params); \ + (__params && (__params->flags & QED_MB_FLAG_ ## flag)); }) }; /** diff --git a/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h b/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h index 0cdb4337b3a057befc98b3c69c0d65085d81b918..d1201bb2d4bb7d6ef28ca19d74f2b32fd2271a42 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h +++ b/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h @@ -554,8 +554,10 @@ 0 #define MCP_REG_CPU_STATE \ 0xe05004UL +#define MCP_REG_CPU_STATE_SOFT_HALTED (0x1UL << 10) #define MCP_REG_CPU_EVENT_MASK \ 0xe05008UL +#define MCP_REG_CPU_PROGRAM_COUNTER 0xe0501cUL #define PGLUE_B_REG_PF_BAR0_SIZE \ 0x2aae60UL #define PGLUE_B_REG_PF_BAR1_SIZE \ diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.c b/drivers/net/ethernet/qlogic/qed/qed_sriov.c index d08fe350ab6cd3f925ce2de8768c893b82796c93..c6411158afd7b4f837c49311515ff4d6c29325fc 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_sriov.c +++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.c @@ -2826,7 +2826,7 @@ qed_iov_vp_update_mcast_bin_param(struct qed_hwfn *p_hwfn, p_data->update_approx_mcast_flg = 1; memcpy(p_data->bins, p_mcast_tlv->bins, - sizeof(unsigned long) * ETH_MULTICAST_MAC_BINS_IN_REGS); + sizeof(u32) * ETH_MULTICAST_MAC_BINS_IN_REGS); *tlvs_mask |= 1 << QED_IOV_VP_UPDATE_MCAST; } diff --git a/drivers/net/ethernet/qlogic/qed/qed_vf.c b/drivers/net/ethernet/qlogic/qed/qed_vf.c index 91b5e9f02a622cdfe4f1cff142d47c7a1b0b61ef..6eb85db69f9a9ada7c128706d60b434cd097d569 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_vf.c +++ b/drivers/net/ethernet/qlogic/qed/qed_vf.c @@ -1126,7 +1126,7 @@ int qed_vf_pf_vport_update(struct qed_hwfn *p_hwfn, resp_size += sizeof(struct pfvf_def_resp_tlv); memcpy(p_mcast_tlv->bins, p_params->bins, - sizeof(unsigned long) * ETH_MULTICAST_MAC_BINS_IN_REGS); + sizeof(u32) * ETH_MULTICAST_MAC_BINS_IN_REGS); } update_rx = p_params->accept_flags.update_rx_mode_config; @@ -1272,7 +1272,7 @@ void qed_vf_pf_filter_mcast(struct qed_hwfn *p_hwfn, u32 bit; bit = qed_mcast_bin_from_mac(p_filter_cmd->mac[i]); - __set_bit(bit, sp_params.bins); + sp_params.bins[bit / 32] |= 1 << (bit % 32); } } diff --git a/drivers/net/ethernet/qlogic/qed/qed_vf.h b/drivers/net/ethernet/qlogic/qed/qed_vf.h index 97d44dfb38ca2a3f9e9ca6e0585afcbe00378ca7..1e93c712fa346279cc2ec2d6038115ee91607c67 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_vf.h +++ b/drivers/net/ethernet/qlogic/qed/qed_vf.h @@ -392,7 +392,12 @@ struct vfpf_vport_update_mcast_bin_tlv { struct channel_tlv tl; u8 padding[4]; - u64 bins[8]; + /* There are only 256 approx bins, and in HSI they're divided into + * 32-bit values. As old VFs used to set-bit to the values on its side, + * the upper half of the array is never expected to contain any data. + */ + u64 bins[4]; + u64 obsolete_bins[4]; }; struct vfpf_vport_update_accept_param_tlv { diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c index 287d89dd086ff76981df302b847d882d887408c6..9c94240bb05ad7e88e97a0351ff6f57cb6b67882 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c @@ -1128,6 +1128,8 @@ static ssize_t qlcnic_83xx_sysfs_flash_write_handler(struct file *filp, struct qlcnic_adapter *adapter = dev_get_drvdata(dev); ret = kstrtoul(buf, 16, &data); + if (ret) + return ret; switch (data) { case QLC_83XX_FLASH_SECTOR_ERASE_CMD: diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_main.c b/drivers/net/ethernet/qlogic/qlge/qlge_main.c index 9feec70094435e19ee039f18ea430291d9f7d1a0..0e3b2890b92589771e11ff32660bde3c07ab6f20 100644 --- a/drivers/net/ethernet/qlogic/qlge/qlge_main.c +++ b/drivers/net/ethernet/qlogic/qlge/qlge_main.c @@ -2386,26 +2386,20 @@ static int qlge_update_hw_vlan_features(struct net_device *ndev, return status; } -static netdev_features_t qlge_fix_features(struct net_device *ndev, - netdev_features_t features) -{ - int err; - - /* Update the behavior of vlan accel in the adapter */ - err = qlge_update_hw_vlan_features(ndev, features); - if (err) - return err; - - return features; -} - static int qlge_set_features(struct net_device *ndev, netdev_features_t features) { netdev_features_t changed = ndev->features ^ features; + int err; + + if (changed & NETIF_F_HW_VLAN_CTAG_RX) { + /* Update the behavior of vlan accel in the adapter */ + err = qlge_update_hw_vlan_features(ndev, features); + if (err) + return err; - if (changed & NETIF_F_HW_VLAN_CTAG_RX) qlge_vlan_mode(ndev, features); + } return 0; } @@ -4719,7 +4713,6 @@ static const struct net_device_ops qlge_netdev_ops = { .ndo_set_mac_address = qlge_set_mac_address, .ndo_validate_addr = eth_validate_addr, .ndo_tx_timeout = qlge_tx_timeout, - .ndo_fix_features = qlge_fix_features, .ndo_set_features = qlge_set_features, .ndo_vlan_rx_add_vid = qlge_vlan_rx_add_vid, .ndo_vlan_rx_kill_vid = qlge_vlan_rx_kill_vid, diff --git a/drivers/net/ethernet/qualcomm/qca_7k.c b/drivers/net/ethernet/qualcomm/qca_7k.c index ffe7a16bdfc840d859292f6939938b75fba15a41..6c8543fb90c0a3ac780edd36aa701b01e7c9d94a 100644 --- a/drivers/net/ethernet/qualcomm/qca_7k.c +++ b/drivers/net/ethernet/qualcomm/qca_7k.c @@ -45,34 +45,33 @@ qcaspi_read_register(struct qcaspi *qca, u16 reg, u16 *result) { __be16 rx_data; __be16 tx_data; - struct spi_transfer *transfer; - struct spi_message *msg; + struct spi_transfer transfer[2]; + struct spi_message msg; int ret; + memset(transfer, 0, sizeof(transfer)); + + spi_message_init(&msg); + tx_data = cpu_to_be16(QCA7K_SPI_READ | QCA7K_SPI_INTERNAL | reg); + *result = 0; + + transfer[0].tx_buf = &tx_data; + transfer[0].len = QCASPI_CMD_LEN; + transfer[1].rx_buf = &rx_data; + transfer[1].len = QCASPI_CMD_LEN; + + spi_message_add_tail(&transfer[0], &msg); if (qca->legacy_mode) { - msg = &qca->spi_msg1; - transfer = &qca->spi_xfer1; - transfer->tx_buf = &tx_data; - transfer->rx_buf = NULL; - transfer->len = QCASPI_CMD_LEN; - spi_sync(qca->spi_dev, msg); - } else { - msg = &qca->spi_msg2; - transfer = &qca->spi_xfer2[0]; - transfer->tx_buf = &tx_data; - transfer->rx_buf = NULL; - transfer->len = QCASPI_CMD_LEN; - transfer = &qca->spi_xfer2[1]; + spi_sync(qca->spi_dev, &msg); + spi_message_init(&msg); } - transfer->tx_buf = NULL; - transfer->rx_buf = &rx_data; - transfer->len = QCASPI_CMD_LEN; - ret = spi_sync(qca->spi_dev, msg); + spi_message_add_tail(&transfer[1], &msg); + ret = spi_sync(qca->spi_dev, &msg); if (!ret) - ret = msg->status; + ret = msg.status; if (ret) qcaspi_spi_error(qca); @@ -86,35 +85,32 @@ int qcaspi_write_register(struct qcaspi *qca, u16 reg, u16 value) { __be16 tx_data[2]; - struct spi_transfer *transfer; - struct spi_message *msg; + struct spi_transfer transfer[2]; + struct spi_message msg; int ret; + memset(&transfer, 0, sizeof(transfer)); + + spi_message_init(&msg); + tx_data[0] = cpu_to_be16(QCA7K_SPI_WRITE | QCA7K_SPI_INTERNAL | reg); tx_data[1] = cpu_to_be16(value); + transfer[0].tx_buf = &tx_data[0]; + transfer[0].len = QCASPI_CMD_LEN; + transfer[1].tx_buf = &tx_data[1]; + transfer[1].len = QCASPI_CMD_LEN; + + spi_message_add_tail(&transfer[0], &msg); if (qca->legacy_mode) { - msg = &qca->spi_msg1; - transfer = &qca->spi_xfer1; - transfer->tx_buf = &tx_data[0]; - transfer->rx_buf = NULL; - transfer->len = QCASPI_CMD_LEN; - spi_sync(qca->spi_dev, msg); - } else { - msg = &qca->spi_msg2; - transfer = &qca->spi_xfer2[0]; - transfer->tx_buf = &tx_data[0]; - transfer->rx_buf = NULL; - transfer->len = QCASPI_CMD_LEN; - transfer = &qca->spi_xfer2[1]; + spi_sync(qca->spi_dev, &msg); + spi_message_init(&msg); } - transfer->tx_buf = &tx_data[1]; - transfer->rx_buf = NULL; - transfer->len = QCASPI_CMD_LEN; - ret = spi_sync(qca->spi_dev, msg); + spi_message_add_tail(&transfer[1], &msg); + ret = spi_sync(qca->spi_dev, &msg); if (!ret) - ret = msg->status; + ret = msg.status; if (ret) qcaspi_spi_error(qca); diff --git a/drivers/net/ethernet/qualcomm/qca_spi.c b/drivers/net/ethernet/qualcomm/qca_spi.c index 9c236298fe2125cf26afef756cfd5f3f7250098b..275fc6f154a71e52db57993d4e5e5c2d598abab9 100644 --- a/drivers/net/ethernet/qualcomm/qca_spi.c +++ b/drivers/net/ethernet/qualcomm/qca_spi.c @@ -99,22 +99,24 @@ static u32 qcaspi_write_burst(struct qcaspi *qca, u8 *src, u32 len) { __be16 cmd; - struct spi_message *msg = &qca->spi_msg2; - struct spi_transfer *transfer = &qca->spi_xfer2[0]; + struct spi_message msg; + struct spi_transfer transfer[2]; int ret; + memset(&transfer, 0, sizeof(transfer)); + spi_message_init(&msg); + cmd = cpu_to_be16(QCA7K_SPI_WRITE | QCA7K_SPI_EXTERNAL); - transfer->tx_buf = &cmd; - transfer->rx_buf = NULL; - transfer->len = QCASPI_CMD_LEN; - transfer = &qca->spi_xfer2[1]; - transfer->tx_buf = src; - transfer->rx_buf = NULL; - transfer->len = len; + transfer[0].tx_buf = &cmd; + transfer[0].len = QCASPI_CMD_LEN; + transfer[1].tx_buf = src; + transfer[1].len = len; - ret = spi_sync(qca->spi_dev, msg); + spi_message_add_tail(&transfer[0], &msg); + spi_message_add_tail(&transfer[1], &msg); + ret = spi_sync(qca->spi_dev, &msg); - if (ret || (msg->actual_length != QCASPI_CMD_LEN + len)) { + if (ret || (msg.actual_length != QCASPI_CMD_LEN + len)) { qcaspi_spi_error(qca); return 0; } @@ -125,17 +127,20 @@ qcaspi_write_burst(struct qcaspi *qca, u8 *src, u32 len) static u32 qcaspi_write_legacy(struct qcaspi *qca, u8 *src, u32 len) { - struct spi_message *msg = &qca->spi_msg1; - struct spi_transfer *transfer = &qca->spi_xfer1; + struct spi_message msg; + struct spi_transfer transfer; int ret; - transfer->tx_buf = src; - transfer->rx_buf = NULL; - transfer->len = len; + memset(&transfer, 0, sizeof(transfer)); + spi_message_init(&msg); + + transfer.tx_buf = src; + transfer.len = len; - ret = spi_sync(qca->spi_dev, msg); + spi_message_add_tail(&transfer, &msg); + ret = spi_sync(qca->spi_dev, &msg); - if (ret || (msg->actual_length != len)) { + if (ret || (msg.actual_length != len)) { qcaspi_spi_error(qca); return 0; } @@ -146,23 +151,25 @@ qcaspi_write_legacy(struct qcaspi *qca, u8 *src, u32 len) static u32 qcaspi_read_burst(struct qcaspi *qca, u8 *dst, u32 len) { - struct spi_message *msg = &qca->spi_msg2; + struct spi_message msg; __be16 cmd; - struct spi_transfer *transfer = &qca->spi_xfer2[0]; + struct spi_transfer transfer[2]; int ret; + memset(&transfer, 0, sizeof(transfer)); + spi_message_init(&msg); + cmd = cpu_to_be16(QCA7K_SPI_READ | QCA7K_SPI_EXTERNAL); - transfer->tx_buf = &cmd; - transfer->rx_buf = NULL; - transfer->len = QCASPI_CMD_LEN; - transfer = &qca->spi_xfer2[1]; - transfer->tx_buf = NULL; - transfer->rx_buf = dst; - transfer->len = len; + transfer[0].tx_buf = &cmd; + transfer[0].len = QCASPI_CMD_LEN; + transfer[1].rx_buf = dst; + transfer[1].len = len; - ret = spi_sync(qca->spi_dev, msg); + spi_message_add_tail(&transfer[0], &msg); + spi_message_add_tail(&transfer[1], &msg); + ret = spi_sync(qca->spi_dev, &msg); - if (ret || (msg->actual_length != QCASPI_CMD_LEN + len)) { + if (ret || (msg.actual_length != QCASPI_CMD_LEN + len)) { qcaspi_spi_error(qca); return 0; } @@ -173,17 +180,20 @@ qcaspi_read_burst(struct qcaspi *qca, u8 *dst, u32 len) static u32 qcaspi_read_legacy(struct qcaspi *qca, u8 *dst, u32 len) { - struct spi_message *msg = &qca->spi_msg1; - struct spi_transfer *transfer = &qca->spi_xfer1; + struct spi_message msg; + struct spi_transfer transfer; int ret; - transfer->tx_buf = NULL; - transfer->rx_buf = dst; - transfer->len = len; + memset(&transfer, 0, sizeof(transfer)); + spi_message_init(&msg); - ret = spi_sync(qca->spi_dev, msg); + transfer.rx_buf = dst; + transfer.len = len; - if (ret || (msg->actual_length != len)) { + spi_message_add_tail(&transfer, &msg); + ret = spi_sync(qca->spi_dev, &msg); + + if (ret || (msg.actual_length != len)) { qcaspi_spi_error(qca); return 0; } @@ -195,19 +205,23 @@ static int qcaspi_tx_cmd(struct qcaspi *qca, u16 cmd) { __be16 tx_data; - struct spi_message *msg = &qca->spi_msg1; - struct spi_transfer *transfer = &qca->spi_xfer1; + struct spi_message msg; + struct spi_transfer transfer; int ret; + memset(&transfer, 0, sizeof(transfer)); + + spi_message_init(&msg); + tx_data = cpu_to_be16(cmd); - transfer->len = sizeof(tx_data); - transfer->tx_buf = &tx_data; - transfer->rx_buf = NULL; + transfer.len = sizeof(cmd); + transfer.tx_buf = &tx_data; + spi_message_add_tail(&transfer, &msg); - ret = spi_sync(qca->spi_dev, msg); + ret = spi_sync(qca->spi_dev, &msg); if (!ret) - ret = msg->status; + ret = msg.status; if (ret) qcaspi_spi_error(qca); @@ -658,7 +672,7 @@ qcaspi_netdev_open(struct net_device *dev) return ret; } - netif_start_queue(qca->net_dev); + /* SPI thread takes care of TX queue */ return 0; } @@ -761,6 +775,9 @@ qcaspi_netdev_tx_timeout(struct net_device *dev) qca->net_dev->stats.tx_errors++; /* Trigger tx queue flush and QCA7000 reset */ qca->sync = QCASPI_SYNC_UNKNOWN; + + if (qca->spi_thread) + wake_up_process(qca->spi_thread); } static int @@ -833,16 +850,6 @@ qcaspi_netdev_setup(struct net_device *dev) qca = netdev_priv(dev); memset(qca, 0, sizeof(struct qcaspi)); - memset(&qca->spi_xfer1, 0, sizeof(struct spi_transfer)); - memset(&qca->spi_xfer2, 0, sizeof(struct spi_transfer) * 2); - - spi_message_init(&qca->spi_msg1); - spi_message_add_tail(&qca->spi_xfer1, &qca->spi_msg1); - - spi_message_init(&qca->spi_msg2); - spi_message_add_tail(&qca->spi_xfer2[0], &qca->spi_msg2); - spi_message_add_tail(&qca->spi_xfer2[1], &qca->spi_msg2); - memset(&qca->txr, 0, sizeof(qca->txr)); qca->txr.count = TX_RING_MAX_LEN; } @@ -879,22 +886,22 @@ qca_spi_probe(struct spi_device *spi) if ((qcaspi_clkspeed < QCASPI_CLK_SPEED_MIN) || (qcaspi_clkspeed > QCASPI_CLK_SPEED_MAX)) { - dev_info(&spi->dev, "Invalid clkspeed: %d\n", - qcaspi_clkspeed); + dev_err(&spi->dev, "Invalid clkspeed: %d\n", + qcaspi_clkspeed); return -EINVAL; } if ((qcaspi_burst_len < QCASPI_BURST_LEN_MIN) || (qcaspi_burst_len > QCASPI_BURST_LEN_MAX)) { - dev_info(&spi->dev, "Invalid burst len: %d\n", - qcaspi_burst_len); + dev_err(&spi->dev, "Invalid burst len: %d\n", + qcaspi_burst_len); return -EINVAL; } if ((qcaspi_pluggable < QCASPI_PLUGGABLE_MIN) || (qcaspi_pluggable > QCASPI_PLUGGABLE_MAX)) { - dev_info(&spi->dev, "Invalid pluggable: %d\n", - qcaspi_pluggable); + dev_err(&spi->dev, "Invalid pluggable: %d\n", + qcaspi_pluggable); return -EINVAL; } @@ -956,8 +963,8 @@ qca_spi_probe(struct spi_device *spi) } if (register_netdev(qcaspi_devs)) { - dev_info(&spi->dev, "Unable to register net device %s\n", - qcaspi_devs->name); + dev_err(&spi->dev, "Unable to register net device %s\n", + qcaspi_devs->name); free_netdev(qcaspi_devs); return -EFAULT; } diff --git a/drivers/net/ethernet/qualcomm/qca_spi.h b/drivers/net/ethernet/qualcomm/qca_spi.h index fc4beb1b32d1a070a101b3c48cc092bd14561150..fc0e98726b3613ddd3774169fa13aa6099a5c6e5 100644 --- a/drivers/net/ethernet/qualcomm/qca_spi.h +++ b/drivers/net/ethernet/qualcomm/qca_spi.h @@ -83,11 +83,6 @@ struct qcaspi { struct tx_ring txr; struct qcaspi_stats stats; - struct spi_message spi_msg1; - struct spi_message spi_msg2; - struct spi_transfer spi_xfer1; - struct spi_transfer spi_xfer2[2]; - u8 *rx_buffer; u32 buffer_size; u8 sync; diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c index b98fcc9e93e5ace2df1ea69f06136f074cd33b77..3669005b9294a15d177cd5ba0af9f3fae211e48a 100644 --- a/drivers/net/ethernet/realtek/r8169.c +++ b/drivers/net/ethernet/realtek/r8169.c @@ -329,6 +329,7 @@ static const struct pci_device_id rtl8169_pci_tbl[] = { { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8161), 0, 0, RTL_CFG_1 }, { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8167), 0, 0, RTL_CFG_0 }, { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8168), 0, 0, RTL_CFG_1 }, + { PCI_DEVICE(PCI_VENDOR_ID_NCUBE, 0x8168), 0, 0, RTL_CFG_1 }, { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8169), 0, 0, RTL_CFG_0 }, { PCI_VENDOR_ID_DLINK, 0x4300, PCI_VENDOR_ID_DLINK, 0x4b10, 0, 0, RTL_CFG_1 }, diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c index fdf30bfa403bf416572fa45ac3dde48f1e92cded..e87a779bfcfe5a38ba91ce13f4224e27ce19c022 100644 --- a/drivers/net/ethernet/renesas/ravb_main.c +++ b/drivers/net/ethernet/renesas/ravb_main.c @@ -959,6 +959,13 @@ static void ravb_adjust_link(struct net_device *ndev) struct ravb_private *priv = netdev_priv(ndev); struct phy_device *phydev = ndev->phydev; bool new_state = false; + unsigned long flags; + + spin_lock_irqsave(&priv->lock, flags); + + /* Disable TX and RX right over here, if E-MAC change is ignored */ + if (priv->no_avb_link) + ravb_rcv_snd_disable(ndev); if (phydev->link) { if (phydev->duplex != priv->duplex) { @@ -976,18 +983,21 @@ static void ravb_adjust_link(struct net_device *ndev) ravb_modify(ndev, ECMR, ECMR_TXF, 0); new_state = true; priv->link = phydev->link; - if (priv->no_avb_link) - ravb_rcv_snd_enable(ndev); } } else if (priv->link) { new_state = true; priv->link = 0; priv->speed = 0; priv->duplex = -1; - if (priv->no_avb_link) - ravb_rcv_snd_disable(ndev); } + /* Enable TX and RX right over here, if E-MAC change is ignored */ + if (priv->no_avb_link && phydev->link) + ravb_rcv_snd_enable(ndev); + + mmiowb(); + spin_unlock_irqrestore(&priv->lock, flags); + if (new_state && netif_msg_link(priv)) phy_print_status(phydev); } @@ -1094,52 +1104,18 @@ static int ravb_get_link_ksettings(struct net_device *ndev, static int ravb_set_link_ksettings(struct net_device *ndev, const struct ethtool_link_ksettings *cmd) { - struct ravb_private *priv = netdev_priv(ndev); - unsigned long flags; - int error; - if (!ndev->phydev) return -ENODEV; - spin_lock_irqsave(&priv->lock, flags); - - /* Disable TX and RX */ - ravb_rcv_snd_disable(ndev); - - error = phy_ethtool_ksettings_set(ndev->phydev, cmd); - if (error) - goto error_exit; - - if (cmd->base.duplex == DUPLEX_FULL) - priv->duplex = 1; - else - priv->duplex = 0; - - ravb_set_duplex(ndev); - -error_exit: - mdelay(1); - - /* Enable TX and RX */ - ravb_rcv_snd_enable(ndev); - - mmiowb(); - spin_unlock_irqrestore(&priv->lock, flags); - - return error; + return phy_ethtool_ksettings_set(ndev->phydev, cmd); } static int ravb_nway_reset(struct net_device *ndev) { - struct ravb_private *priv = netdev_priv(ndev); int error = -ENODEV; - unsigned long flags; - if (ndev->phydev) { - spin_lock_irqsave(&priv->lock, flags); + if (ndev->phydev) error = phy_start_aneg(ndev->phydev); - spin_unlock_irqrestore(&priv->lock, flags); - } return error; } diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c index 38080e95a82dccc6286a0cd40829be35ac9612ee..abfb9faadbc40b10a36dfba58d2801e603961bcb 100644 --- a/drivers/net/ethernet/renesas/sh_eth.c +++ b/drivers/net/ethernet/renesas/sh_eth.c @@ -1821,8 +1821,15 @@ static void sh_eth_adjust_link(struct net_device *ndev) { struct sh_eth_private *mdp = netdev_priv(ndev); struct phy_device *phydev = ndev->phydev; + unsigned long flags; int new_state = 0; + spin_lock_irqsave(&mdp->lock, flags); + + /* Disable TX and RX right over here, if E-MAC change is ignored */ + if (mdp->cd->no_psr || mdp->no_ether_link) + sh_eth_rcv_snd_disable(ndev); + if (phydev->link) { if (phydev->duplex != mdp->duplex) { new_state = 1; @@ -1841,18 +1848,21 @@ static void sh_eth_adjust_link(struct net_device *ndev) sh_eth_modify(ndev, ECMR, ECMR_TXF, 0); new_state = 1; mdp->link = phydev->link; - if (mdp->cd->no_psr || mdp->no_ether_link) - sh_eth_rcv_snd_enable(ndev); } } else if (mdp->link) { new_state = 1; mdp->link = 0; mdp->speed = 0; mdp->duplex = -1; - if (mdp->cd->no_psr || mdp->no_ether_link) - sh_eth_rcv_snd_disable(ndev); } + /* Enable TX and RX right over here, if E-MAC change is ignored */ + if ((mdp->cd->no_psr || mdp->no_ether_link) && phydev->link) + sh_eth_rcv_snd_enable(ndev); + + mmiowb(); + spin_unlock_irqrestore(&mdp->lock, flags); + if (new_state && netif_msg_link(mdp)) phy_print_status(phydev); } @@ -1933,39 +1943,10 @@ static int sh_eth_get_link_ksettings(struct net_device *ndev, static int sh_eth_set_link_ksettings(struct net_device *ndev, const struct ethtool_link_ksettings *cmd) { - struct sh_eth_private *mdp = netdev_priv(ndev); - unsigned long flags; - int ret; - if (!ndev->phydev) return -ENODEV; - spin_lock_irqsave(&mdp->lock, flags); - - /* disable tx and rx */ - sh_eth_rcv_snd_disable(ndev); - - ret = phy_ethtool_ksettings_set(ndev->phydev, cmd); - if (ret) - goto error_exit; - - if (cmd->base.duplex == DUPLEX_FULL) - mdp->duplex = 1; - else - mdp->duplex = 0; - - if (mdp->cd->set_duplex) - mdp->cd->set_duplex(ndev); - -error_exit: - mdelay(1); - - /* enable tx and rx */ - sh_eth_rcv_snd_enable(ndev); - - spin_unlock_irqrestore(&mdp->lock, flags); - - return ret; + return phy_ethtool_ksettings_set(ndev->phydev, cmd); } /* If it is ever necessary to increase SH_ETH_REG_DUMP_MAX_REGS, the @@ -2156,18 +2137,10 @@ static void sh_eth_get_regs(struct net_device *ndev, struct ethtool_regs *regs, static int sh_eth_nway_reset(struct net_device *ndev) { - struct sh_eth_private *mdp = netdev_priv(ndev); - unsigned long flags; - int ret; - if (!ndev->phydev) return -ENODEV; - spin_lock_irqsave(&mdp->lock, flags); - ret = phy_start_aneg(ndev->phydev); - spin_unlock_irqrestore(&mdp->lock, flags); - - return ret; + return phy_start_aneg(ndev->phydev); } static u32 sh_eth_get_msglevel(struct net_device *ndev) diff --git a/drivers/net/ethernet/stmicro/stmmac/Kconfig b/drivers/net/ethernet/stmicro/stmmac/Kconfig index 97035766c291b0a9df2734d1f97d88ac72d5f688..5790cd61436d0b2ca73eeb1d1eeab1b759083784 100644 --- a/drivers/net/ethernet/stmicro/stmmac/Kconfig +++ b/drivers/net/ethernet/stmicro/stmmac/Kconfig @@ -111,7 +111,7 @@ config DWMAC_ROCKCHIP config DWMAC_SOCFPGA tristate "SOCFPGA dwmac support" default ARCH_SOCFPGA - depends on OF && (ARCH_SOCFPGA || COMPILE_TEST) + depends on OF && (ARCH_SOCFPGA || ARCH_STRATIX10 || COMPILE_TEST) select MFD_SYSCON help Support for ethernet controller on Altera SOCFPGA diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c index 6e359572b9f0ea53ed46b553fb1cb51273415f57..5b3b06a0a3bf53e1eac9572ae8d14add0c3835e7 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c @@ -55,6 +55,7 @@ struct socfpga_dwmac { struct device *dev; struct regmap *sys_mgr_base_addr; struct reset_control *stmmac_rst; + struct reset_control *stmmac_ocp_rst; void __iomem *splitter_base; bool f2h_ptp_ref_clk; struct tse_pcs pcs; @@ -262,8 +263,8 @@ static int socfpga_dwmac_set_phy_mode(struct socfpga_dwmac *dwmac) val = SYSMGR_EMACGRP_CTRL_PHYSEL_ENUM_GMII_MII; /* Assert reset to the enet controller before changing the phy mode */ - if (dwmac->stmmac_rst) - reset_control_assert(dwmac->stmmac_rst); + reset_control_assert(dwmac->stmmac_ocp_rst); + reset_control_assert(dwmac->stmmac_rst); regmap_read(sys_mgr_base_addr, reg_offset, &ctrl); ctrl &= ~(SYSMGR_EMACGRP_CTRL_PHYSEL_MASK << reg_shift); @@ -288,8 +289,8 @@ static int socfpga_dwmac_set_phy_mode(struct socfpga_dwmac *dwmac) /* Deassert reset for the phy configuration to be sampled by * the enet controller, and operation to start in requested mode */ - if (dwmac->stmmac_rst) - reset_control_deassert(dwmac->stmmac_rst); + reset_control_deassert(dwmac->stmmac_ocp_rst); + reset_control_deassert(dwmac->stmmac_rst); if (phymode == PHY_INTERFACE_MODE_SGMII) { if (tse_pcs_init(dwmac->pcs.tse_pcs_base, &dwmac->pcs) != 0) { dev_err(dwmac->dev, "Unable to initialize TSE PCS"); @@ -324,6 +325,15 @@ static int socfpga_dwmac_probe(struct platform_device *pdev) goto err_remove_config_dt; } + dwmac->stmmac_ocp_rst = devm_reset_control_get_optional(dev, "stmmaceth-ocp"); + if (IS_ERR(dwmac->stmmac_ocp_rst)) { + ret = PTR_ERR(dwmac->stmmac_ocp_rst); + dev_err(dev, "error getting reset control of ocp %d\n", ret); + goto err_remove_config_dt; + } + + reset_control_deassert(dwmac->stmmac_ocp_rst); + ret = socfpga_dwmac_parse_data(dwmac, dev); if (ret) { dev_err(dev, "Unable to parse OF data\n"); diff --git a/drivers/net/ethernet/ti/davinci_emac.c b/drivers/net/ethernet/ti/davinci_emac.c index 4bb561856af53c0165e1d052a7ebfa2bd279ce68..47a096134043c68037827789ced6f872eb1437d9 100644 --- a/drivers/net/ethernet/ti/davinci_emac.c +++ b/drivers/net/ethernet/ti/davinci_emac.c @@ -1387,6 +1387,10 @@ static int emac_devioctl(struct net_device *ndev, struct ifreq *ifrq, int cmd) static int match_first_device(struct device *dev, void *data) { + if (dev->parent && dev->parent->of_node) + return of_device_is_compatible(dev->parent->of_node, + "ti,davinci_mdio"); + return !strncmp(dev_name(dev), "davinci_mdio", 12); } diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c b/drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c index 16c3bfbe19928dfb56b2719490d0b401fae2ee5e..757a3b37ae8a8af8077001d548b65fe862d03c2d 100644 --- a/drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c +++ b/drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c @@ -218,6 +218,7 @@ issue: ret = of_mdiobus_register(bus, np1); if (ret) { mdiobus_free(bus); + lp->mii_bus = NULL; return ret; } return 0; diff --git a/drivers/net/hamradio/bpqether.c b/drivers/net/hamradio/bpqether.c index 78a6414c5fd994445c7e530065bd6cfa7e5ad213..7d94a78425576c5337fe0445826f887430eb8ac3 100644 --- a/drivers/net/hamradio/bpqether.c +++ b/drivers/net/hamradio/bpqether.c @@ -89,10 +89,6 @@ static const char banner[] __initconst = KERN_INFO \ "AX.25: bpqether driver version 004\n"; -static char bcast_addr[6]={0xFF,0xFF,0xFF,0xFF,0xFF,0xFF}; - -static char bpq_eth_addr[6]; - static int bpq_rcv(struct sk_buff *, struct net_device *, struct packet_type *, struct net_device *); static int bpq_device_event(struct notifier_block *, unsigned long, void *); @@ -515,8 +511,8 @@ static int bpq_new_device(struct net_device *edev) bpq->ethdev = edev; bpq->axdev = ndev; - memcpy(bpq->dest_addr, bcast_addr, sizeof(bpq_eth_addr)); - memcpy(bpq->acpt_addr, bcast_addr, sizeof(bpq_eth_addr)); + eth_broadcast_addr(bpq->dest_addr); + eth_broadcast_addr(bpq->acpt_addr); err = register_netdevice(ndev); if (err) diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c index 6a77ef38c5495cc62b4c547c95f42a22051cdf2a..aba16d81e9bbac683b3b742be3c3edd88ffbffbe 100644 --- a/drivers/net/hyperv/netvsc_drv.c +++ b/drivers/net/hyperv/netvsc_drv.c @@ -29,6 +29,7 @@ #include #include #include +#include #include #include #include @@ -1895,11 +1896,15 @@ static int netvsc_register_vf(struct net_device *vf_netdev) { struct net_device *ndev; struct net_device_context *net_device_ctx; + struct device *pdev = vf_netdev->dev.parent; struct netvsc_device *netvsc_dev; if (vf_netdev->addr_len != ETH_ALEN) return NOTIFY_DONE; + if (!pdev || !dev_is_pci(pdev) || dev_is_pf(pdev)) + return NOTIFY_DONE; + /* * We will use the MAC address to locate the synthetic interface to * associate with the VF interface. If we don't find a matching @@ -2039,6 +2044,16 @@ static int netvsc_probe(struct hv_device *dev, memcpy(net->dev_addr, device_info.mac_adr, ETH_ALEN); + /* We must get rtnl lock before scheduling nvdev->subchan_work, + * otherwise netvsc_subchan_work() can get rtnl lock first and wait + * all subchannels to show up, but that may not happen because + * netvsc_probe() can't get rtnl lock and as a result vmbus_onoffer() + * -> ... -> device_add() -> ... -> __device_attach() can't get + * the device lock, so all the subchannels can't be processed -- + * finally netvsc_subchan_work() hangs for ever. + */ + rtnl_lock(); + if (nvdev->num_chn > 1) schedule_work(&nvdev->subchan_work); @@ -2057,7 +2072,6 @@ static int netvsc_probe(struct hv_device *dev, else net->max_mtu = ETH_DATA_LEN; - rtnl_lock(); ret = register_netdevice(net); if (ret != 0) { pr_err("Unable to register netdev.\n"); diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c index cb03a6ea076a66c7ca856e912ed42a97741e05da..17025d46bdac8412e32b96fc86aaa28afc6d75f0 100644 --- a/drivers/net/hyperv/rndis_filter.c +++ b/drivers/net/hyperv/rndis_filter.c @@ -1299,6 +1299,7 @@ out: /* setting up multiple channels failed */ net_device->max_chn = 1; net_device->num_chn = 1; + return net_device; err_dev_remv: rndis_filter_device_remove(dev, net_device); diff --git a/drivers/net/ieee802154/at86rf230.c b/drivers/net/ieee802154/at86rf230.c index 548d9d026a85e2ff0e329f0f61454f555ab1a522..5c48bdb6f6787acbb74819ad7601ad302f644623 100644 --- a/drivers/net/ieee802154/at86rf230.c +++ b/drivers/net/ieee802154/at86rf230.c @@ -940,7 +940,7 @@ at86rf230_xmit(struct ieee802154_hw *hw, struct sk_buff *skb) static int at86rf230_ed(struct ieee802154_hw *hw, u8 *level) { - BUG_ON(!level); + WARN_ON(!level); *level = 0xbe; return 0; } @@ -1121,8 +1121,7 @@ at86rf230_set_hw_addr_filt(struct ieee802154_hw *hw, if (changed & IEEE802154_AFILT_SADDR_CHANGED) { u16 addr = le16_to_cpu(filt->short_addr); - dev_vdbg(&lp->spi->dev, - "at86rf230_set_hw_addr_filt called for saddr\n"); + dev_vdbg(&lp->spi->dev, "%s called for saddr\n", __func__); __at86rf230_write(lp, RG_SHORT_ADDR_0, addr); __at86rf230_write(lp, RG_SHORT_ADDR_1, addr >> 8); } @@ -1130,8 +1129,7 @@ at86rf230_set_hw_addr_filt(struct ieee802154_hw *hw, if (changed & IEEE802154_AFILT_PANID_CHANGED) { u16 pan = le16_to_cpu(filt->pan_id); - dev_vdbg(&lp->spi->dev, - "at86rf230_set_hw_addr_filt called for pan id\n"); + dev_vdbg(&lp->spi->dev, "%s called for pan id\n", __func__); __at86rf230_write(lp, RG_PAN_ID_0, pan); __at86rf230_write(lp, RG_PAN_ID_1, pan >> 8); } @@ -1140,15 +1138,13 @@ at86rf230_set_hw_addr_filt(struct ieee802154_hw *hw, u8 i, addr[8]; memcpy(addr, &filt->ieee_addr, 8); - dev_vdbg(&lp->spi->dev, - "at86rf230_set_hw_addr_filt called for IEEE addr\n"); + dev_vdbg(&lp->spi->dev, "%s called for IEEE addr\n", __func__); for (i = 0; i < 8; i++) __at86rf230_write(lp, RG_IEEE_ADDR_0 + i, addr[i]); } if (changed & IEEE802154_AFILT_PANC_CHANGED) { - dev_vdbg(&lp->spi->dev, - "at86rf230_set_hw_addr_filt called for panc change\n"); + dev_vdbg(&lp->spi->dev, "%s called for panc change\n", __func__); if (filt->pan_coord) at86rf230_write_subreg(lp, SR_AACK_I_AM_COORD, 1); else @@ -1252,7 +1248,6 @@ at86rf230_set_cca_mode(struct ieee802154_hw *hw, return at86rf230_write_subreg(lp, SR_CCA_MODE, val); } - static int at86rf230_set_cca_ed_level(struct ieee802154_hw *hw, s32 mbm) { diff --git a/drivers/net/ieee802154/fakelb.c b/drivers/net/ieee802154/fakelb.c index 0d673f7682ee065223b64462bc2c4df0a03826a0..176395e4b7bb0ca628bdd22b4f13a23e425bfae2 100644 --- a/drivers/net/ieee802154/fakelb.c +++ b/drivers/net/ieee802154/fakelb.c @@ -49,7 +49,7 @@ struct fakelb_phy { static int fakelb_hw_ed(struct ieee802154_hw *hw, u8 *level) { - BUG_ON(!level); + WARN_ON(!level); *level = 0xbe; return 0; diff --git a/drivers/net/ipvlan/ipvlan_main.c b/drivers/net/ipvlan/ipvlan_main.c index e7f7a1a002ee05bace0a05aab920eb3537a1d40b..58133c9f701b8cc65d0fcffa566511f691c5a5c3 100644 --- a/drivers/net/ipvlan/ipvlan_main.c +++ b/drivers/net/ipvlan/ipvlan_main.c @@ -73,10 +73,23 @@ static int ipvlan_set_port_mode(struct ipvl_port *port, u16 nval) { struct ipvl_dev *ipvlan; struct net_device *mdev = port->dev; - int err = 0; + unsigned int flags; + int err; ASSERT_RTNL(); if (port->mode != nval) { + list_for_each_entry(ipvlan, &port->ipvlans, pnode) { + flags = ipvlan->dev->flags; + if (nval == IPVLAN_MODE_L3 || nval == IPVLAN_MODE_L3S) { + err = dev_change_flags(ipvlan->dev, + flags | IFF_NOARP); + } else { + err = dev_change_flags(ipvlan->dev, + flags & ~IFF_NOARP); + } + if (unlikely(err)) + goto fail; + } if (nval == IPVLAN_MODE_L3S) { /* New mode is L3S */ err = ipvlan_register_nf_hook(read_pnet(&port->pnet)); @@ -84,21 +97,28 @@ static int ipvlan_set_port_mode(struct ipvl_port *port, u16 nval) mdev->l3mdev_ops = &ipvl_l3mdev_ops; mdev->priv_flags |= IFF_L3MDEV_MASTER; } else - return err; + goto fail; } else if (port->mode == IPVLAN_MODE_L3S) { /* Old mode was L3S */ mdev->priv_flags &= ~IFF_L3MDEV_MASTER; ipvlan_unregister_nf_hook(read_pnet(&port->pnet)); mdev->l3mdev_ops = NULL; } - list_for_each_entry(ipvlan, &port->ipvlans, pnode) { - if (nval == IPVLAN_MODE_L3 || nval == IPVLAN_MODE_L3S) - ipvlan->dev->flags |= IFF_NOARP; - else - ipvlan->dev->flags &= ~IFF_NOARP; - } port->mode = nval; } + return 0; + +fail: + /* Undo the flags changes that have been done so far. */ + list_for_each_entry_continue_reverse(ipvlan, &port->ipvlans, pnode) { + flags = ipvlan->dev->flags; + if (port->mode == IPVLAN_MODE_L3 || + port->mode == IPVLAN_MODE_L3S) + dev_change_flags(ipvlan->dev, flags | IFF_NOARP); + else + dev_change_flags(ipvlan->dev, flags & ~IFF_NOARP); + } + return err; } diff --git a/drivers/net/phy/mdio-mux-bcm-iproc.c b/drivers/net/phy/mdio-mux-bcm-iproc.c index 0c5b68e7da51aa8d0c7c73e7c6b3cc632b1e3510..9b31670548433276a431458b69e515aa62debe7f 100644 --- a/drivers/net/phy/mdio-mux-bcm-iproc.c +++ b/drivers/net/phy/mdio-mux-bcm-iproc.c @@ -22,7 +22,7 @@ #include #include -#define MDIO_PARAM_OFFSET 0x00 +#define MDIO_PARAM_OFFSET 0x23c #define MDIO_PARAM_MIIM_CYCLE 29 #define MDIO_PARAM_INTERNAL_SEL 25 #define MDIO_PARAM_BUS_ID 22 @@ -30,20 +30,22 @@ #define MDIO_PARAM_PHY_ID 16 #define MDIO_PARAM_PHY_DATA 0 -#define MDIO_READ_OFFSET 0x04 +#define MDIO_READ_OFFSET 0x240 #define MDIO_READ_DATA_MASK 0xffff -#define MDIO_ADDR_OFFSET 0x08 +#define MDIO_ADDR_OFFSET 0x244 -#define MDIO_CTRL_OFFSET 0x0C +#define MDIO_CTRL_OFFSET 0x248 #define MDIO_CTRL_WRITE_OP 0x1 #define MDIO_CTRL_READ_OP 0x2 -#define MDIO_STAT_OFFSET 0x10 +#define MDIO_STAT_OFFSET 0x24c #define MDIO_STAT_DONE 1 #define BUS_MAX_ADDR 32 #define EXT_BUS_START_ADDR 16 +#define MDIO_REG_ADDR_SPACE_SIZE 0x250 + struct iproc_mdiomux_desc { void *mux_handle; void __iomem *base; @@ -169,6 +171,14 @@ static int mdio_mux_iproc_probe(struct platform_device *pdev) md->dev = &pdev->dev; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (res->start & 0xfff) { + /* For backward compatibility in case the + * base address is specified with an offset. + */ + dev_info(&pdev->dev, "fix base address in dt-blob\n"); + res->start &= ~0xfff; + res->end = res->start + MDIO_REG_ADDR_SPACE_SIZE - 1; + } md->base = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(md->base)) { dev_err(&pdev->dev, "failed to ioremap register\n"); diff --git a/drivers/net/phy/xilinx_gmii2rgmii.c b/drivers/net/phy/xilinx_gmii2rgmii.c index 2e5150b0b8d52c5dd784a3df1818962d64972898..7a14e8170e8263fe84f362f4e04e313e2f3d312b 100644 --- a/drivers/net/phy/xilinx_gmii2rgmii.c +++ b/drivers/net/phy/xilinx_gmii2rgmii.c @@ -40,8 +40,11 @@ static int xgmiitorgmii_read_status(struct phy_device *phydev) { struct gmii2rgmii *priv = phydev->priv; u16 val = 0; + int err; - priv->phy_drv->read_status(phydev); + err = priv->phy_drv->read_status(phydev); + if (err < 0) + return err; val = mdiobus_read(phydev->mdio.bus, priv->addr, XILINX_GMII2RGMII_REG); val &= ~XILINX_GMII2RGMII_SPEED_MASK; @@ -81,6 +84,11 @@ static int xgmiitorgmii_probe(struct mdio_device *mdiodev) return -EPROBE_DEFER; } + if (!priv->phy_dev->drv) { + dev_info(dev, "Attached phy not ready\n"); + return -EPROBE_DEFER; + } + priv->addr = mdiodev->addr; priv->phy_drv = priv->phy_dev->drv; memcpy(&priv->conv_phy_drv, priv->phy_dev->drv, diff --git a/drivers/net/ppp/pppoe.c b/drivers/net/ppp/pppoe.c index 71e2aef6b7a1b5cdb052fed4aeec2a843a1a9a4d..951892da3352e3b5f303d3f8c8659cc6031323cc 100644 --- a/drivers/net/ppp/pppoe.c +++ b/drivers/net/ppp/pppoe.c @@ -429,6 +429,9 @@ static int pppoe_rcv(struct sk_buff *skb, struct net_device *dev, if (!skb) goto out; + if (skb_mac_header_len(skb) < ETH_HLEN) + goto drop; + if (!pskb_may_pull(skb, sizeof(struct pppoe_hdr))) goto drop; diff --git a/drivers/net/tun.c b/drivers/net/tun.c index cb17ffadfc30ef6f9230bfc4149265b921912c0f..e0baea2dfd3cbe9d0b7d992ba97c67c5cfdd1601 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c @@ -534,14 +534,6 @@ static void tun_queue_purge(struct tun_file *tfile) skb_queue_purge(&tfile->sk.sk_error_queue); } -static void tun_cleanup_tx_array(struct tun_file *tfile) -{ - if (tfile->tx_array.ring.queue) { - skb_array_cleanup(&tfile->tx_array); - memset(&tfile->tx_array, 0, sizeof(tfile->tx_array)); - } -} - static void __tun_detach(struct tun_file *tfile, bool clean) { struct tun_file *ntfile; @@ -583,7 +575,7 @@ static void __tun_detach(struct tun_file *tfile, bool clean) tun->dev->reg_state == NETREG_REGISTERED) unregister_netdevice(tun->dev); } - tun_cleanup_tx_array(tfile); + skb_array_cleanup(&tfile->tx_array); sock_put(&tfile->sk); } } @@ -623,13 +615,11 @@ static void tun_detach_all(struct net_device *dev) /* Drop read queue */ tun_queue_purge(tfile); sock_put(&tfile->sk); - tun_cleanup_tx_array(tfile); } list_for_each_entry_safe(tfile, tmp, &tun->disabled, next) { tun_enable_queue(tfile); tun_queue_purge(tfile); sock_put(&tfile->sk); - tun_cleanup_tx_array(tfile); } BUG_ON(tun->numdisabled != 0); @@ -675,7 +665,7 @@ static int tun_attach(struct tun_struct *tun, struct file *file, bool skip_filte } if (!tfile->detached && - skb_array_init(&tfile->tx_array, dev->tx_queue_len, GFP_KERNEL)) { + skb_array_resize(&tfile->tx_array, dev->tx_queue_len, GFP_KERNEL)) { err = -ENOMEM; goto out; } @@ -2624,6 +2614,11 @@ static int tun_chr_open(struct inode *inode, struct file * file) &tun_proto, 0); if (!tfile) return -ENOMEM; + if (skb_array_init(&tfile->tx_array, 0, GFP_KERNEL)) { + sk_free(&tfile->sk); + return -ENOMEM; + } + RCU_INIT_POINTER(tfile->tun, NULL); tfile->flags = 0; tfile->ifindex = 0; @@ -2644,8 +2639,6 @@ static int tun_chr_open(struct inode *inode, struct file * file) sock_set_flag(&tfile->sk, SOCK_ZEROCOPY); - memset(&tfile->tx_array, 0, sizeof(tfile->tx_array)); - return 0; } diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c index 6d3811c869fdddeadd3110f320b2ee5cb2c18f96..0c02822c4c2299e1995efb4ddac0cb1b2e3a1cf7 100644 --- a/drivers/net/usb/qmi_wwan.c +++ b/drivers/net/usb/qmi_wwan.c @@ -1205,13 +1205,13 @@ static const struct usb_device_id products[] = { {QMI_FIXED_INTF(0x1199, 0x9061, 8)}, /* Sierra Wireless Modem */ {QMI_FIXED_INTF(0x1199, 0x9063, 8)}, /* Sierra Wireless EM7305 */ {QMI_FIXED_INTF(0x1199, 0x9063, 10)}, /* Sierra Wireless EM7305 */ - {QMI_FIXED_INTF(0x1199, 0x9071, 8)}, /* Sierra Wireless MC74xx */ - {QMI_FIXED_INTF(0x1199, 0x9071, 10)}, /* Sierra Wireless MC74xx */ - {QMI_FIXED_INTF(0x1199, 0x9079, 8)}, /* Sierra Wireless EM74xx */ - {QMI_FIXED_INTF(0x1199, 0x9079, 10)}, /* Sierra Wireless EM74xx */ - {QMI_FIXED_INTF(0x1199, 0x907b, 8)}, /* Sierra Wireless EM74xx */ - {QMI_FIXED_INTF(0x1199, 0x907b, 10)}, /* Sierra Wireless EM74xx */ - {QMI_FIXED_INTF(0x1199, 0x9091, 8)}, /* Sierra Wireless EM7565 */ + {QMI_QUIRK_SET_DTR(0x1199, 0x9071, 8)}, /* Sierra Wireless MC74xx */ + {QMI_QUIRK_SET_DTR(0x1199, 0x9071, 10)},/* Sierra Wireless MC74xx */ + {QMI_QUIRK_SET_DTR(0x1199, 0x9079, 8)}, /* Sierra Wireless EM74xx */ + {QMI_QUIRK_SET_DTR(0x1199, 0x9079, 10)},/* Sierra Wireless EM74xx */ + {QMI_QUIRK_SET_DTR(0x1199, 0x907b, 8)}, /* Sierra Wireless EM74xx */ + {QMI_QUIRK_SET_DTR(0x1199, 0x907b, 10)},/* Sierra Wireless EM74xx */ + {QMI_QUIRK_SET_DTR(0x1199, 0x9091, 8)}, /* Sierra Wireless EM7565 */ {QMI_FIXED_INTF(0x1bbb, 0x011e, 4)}, /* Telekom Speedstick LTE II (Alcatel One Touch L100V LTE) */ {QMI_FIXED_INTF(0x1bbb, 0x0203, 2)}, /* Alcatel L800MA */ {QMI_FIXED_INTF(0x2357, 0x0201, 4)}, /* TP-LINK HSUPA Modem MA180 */ @@ -1245,11 +1245,11 @@ static const struct usb_device_id products[] = { {QMI_FIXED_INTF(0x413c, 0x81b3, 8)}, /* Dell Wireless 5809e Gobi(TM) 4G LTE Mobile Broadband Card (rev3) */ {QMI_FIXED_INTF(0x413c, 0x81b6, 8)}, /* Dell Wireless 5811e */ {QMI_FIXED_INTF(0x413c, 0x81b6, 10)}, /* Dell Wireless 5811e */ - {QMI_FIXED_INTF(0x413c, 0x81d7, 1)}, /* Dell Wireless 5821e */ + {QMI_FIXED_INTF(0x413c, 0x81d7, 0)}, /* Dell Wireless 5821e */ {QMI_FIXED_INTF(0x03f0, 0x4e1d, 8)}, /* HP lt4111 LTE/EV-DO/HSPA+ Gobi 4G Module */ {QMI_FIXED_INTF(0x03f0, 0x9d1d, 1)}, /* HP lt4120 Snapdragon X5 LTE */ {QMI_FIXED_INTF(0x22de, 0x9061, 3)}, /* WeTelecom WPD-600N */ - {QMI_FIXED_INTF(0x1e0e, 0x9001, 5)}, /* SIMCom 7230E */ + {QMI_QUIRK_SET_DTR(0x1e0e, 0x9001, 5)}, /* SIMCom 7100E, 7230E, 7600E ++ */ {QMI_QUIRK_SET_DTR(0x2c7c, 0x0125, 4)}, /* Quectel EC25, EC20 R2.0 Mini PCIe */ {QMI_QUIRK_SET_DTR(0x2c7c, 0x0121, 4)}, /* Quectel EC21 Mini PCIe */ {QMI_QUIRK_SET_DTR(0x2c7c, 0x0191, 4)}, /* Quectel EG91 */ diff --git a/drivers/net/usb/rtl8150.c b/drivers/net/usb/rtl8150.c index 5f565bd574da3bc7ce741e3b280a9ff5dece4352..48ba80a8ca5ce8e566931979edcff4bcfe47bc2e 100644 --- a/drivers/net/usb/rtl8150.c +++ b/drivers/net/usb/rtl8150.c @@ -681,7 +681,7 @@ static void rtl8150_set_multicast(struct net_device *netdev) (netdev->flags & IFF_ALLMULTI)) { rx_creg &= 0xfffe; rx_creg |= 0x0002; - dev_info(&netdev->dev, "%s: allmulti set\n", netdev->name); + dev_dbg(&netdev->dev, "%s: allmulti set\n", netdev->name); } else { /* ~RX_MULTICAST, ~RX_PROMISCUOUS */ rx_creg &= 0x00fc; diff --git a/drivers/net/usb/smsc75xx.c b/drivers/net/usb/smsc75xx.c index 7a6a1fe793090b8e28f5ef075f5ebc2ad385b5eb..05553d2524469f97e4a02bb48f43f6820ad2b3e5 100644 --- a/drivers/net/usb/smsc75xx.c +++ b/drivers/net/usb/smsc75xx.c @@ -82,6 +82,9 @@ static bool turbo_mode = true; module_param(turbo_mode, bool, 0644); MODULE_PARM_DESC(turbo_mode, "Enable multiple frames per Rx transaction"); +static int smsc75xx_link_ok_nopm(struct usbnet *dev); +static int smsc75xx_phy_gig_workaround(struct usbnet *dev); + static int __must_check __smsc75xx_read_reg(struct usbnet *dev, u32 index, u32 *data, int in_pm) { @@ -852,6 +855,9 @@ static int smsc75xx_phy_initialize(struct usbnet *dev) return -EIO; } + /* phy workaround for gig link */ + smsc75xx_phy_gig_workaround(dev); + smsc75xx_mdio_write(dev->net, dev->mii.phy_id, MII_ADVERTISE, ADVERTISE_ALL | ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM); @@ -987,6 +993,62 @@ static int smsc75xx_wait_ready(struct usbnet *dev, int in_pm) return -EIO; } +static int smsc75xx_phy_gig_workaround(struct usbnet *dev) +{ + struct mii_if_info *mii = &dev->mii; + int ret = 0, timeout = 0; + u32 buf, link_up = 0; + + /* Set the phy in Gig loopback */ + smsc75xx_mdio_write(dev->net, mii->phy_id, MII_BMCR, 0x4040); + + /* Wait for the link up */ + do { + link_up = smsc75xx_link_ok_nopm(dev); + usleep_range(10000, 20000); + timeout++; + } while ((!link_up) && (timeout < 1000)); + + if (timeout >= 1000) { + netdev_warn(dev->net, "Timeout waiting for PHY link up\n"); + return -EIO; + } + + /* phy reset */ + ret = smsc75xx_read_reg(dev, PMT_CTL, &buf); + if (ret < 0) { + netdev_warn(dev->net, "Failed to read PMT_CTL: %d\n", ret); + return ret; + } + + buf |= PMT_CTL_PHY_RST; + + ret = smsc75xx_write_reg(dev, PMT_CTL, buf); + if (ret < 0) { + netdev_warn(dev->net, "Failed to write PMT_CTL: %d\n", ret); + return ret; + } + + timeout = 0; + do { + usleep_range(10000, 20000); + ret = smsc75xx_read_reg(dev, PMT_CTL, &buf); + if (ret < 0) { + netdev_warn(dev->net, "Failed to read PMT_CTL: %d\n", + ret); + return ret; + } + timeout++; + } while ((buf & PMT_CTL_PHY_RST) && (timeout < 100)); + + if (timeout >= 100) { + netdev_warn(dev->net, "timeout waiting for PHY Reset\n"); + return -EIO; + } + + return 0; +} + static int smsc75xx_reset(struct usbnet *dev) { struct smsc75xx_priv *pdata = (struct smsc75xx_priv *)(dev->data[0]); diff --git a/drivers/net/wan/fsl_ucc_hdlc.c b/drivers/net/wan/fsl_ucc_hdlc.c index 33df76405b869a8504415fc8529222616bac9586..18b648648adb2e5676fadf3fc66721e7f84ac972 100644 --- a/drivers/net/wan/fsl_ucc_hdlc.c +++ b/drivers/net/wan/fsl_ucc_hdlc.c @@ -192,7 +192,7 @@ static int uhdlc_init(struct ucc_hdlc_private *priv) priv->ucc_pram_offset = qe_muram_alloc(sizeof(struct ucc_hdlc_param), ALIGNMENT_OF_UCC_HDLC_PRAM); - if (priv->ucc_pram_offset < 0) { + if (IS_ERR_VALUE(priv->ucc_pram_offset)) { dev_err(priv->dev, "Can not allocate MURAM for hdlc parameter.\n"); ret = -ENOMEM; goto free_tx_bd; @@ -228,14 +228,14 @@ static int uhdlc_init(struct ucc_hdlc_private *priv) /* Alloc riptr, tiptr */ riptr = qe_muram_alloc(32, 32); - if (riptr < 0) { + if (IS_ERR_VALUE(riptr)) { dev_err(priv->dev, "Cannot allocate MURAM mem for Receive internal temp data pointer\n"); ret = -ENOMEM; goto free_tx_skbuff; } tiptr = qe_muram_alloc(32, 32); - if (tiptr < 0) { + if (IS_ERR_VALUE(tiptr)) { dev_err(priv->dev, "Cannot allocate MURAM mem for Transmit internal temp data pointer\n"); ret = -ENOMEM; goto free_riptr; diff --git a/drivers/net/wan/lmc/lmc_main.c b/drivers/net/wan/lmc/lmc_main.c index 4698450c77d1ef8cac62c163e3e8f8c06b99dc13..bb43d176eb4e38ec875e37ac2f4985159bcdab04 100644 --- a/drivers/net/wan/lmc/lmc_main.c +++ b/drivers/net/wan/lmc/lmc_main.c @@ -1371,7 +1371,7 @@ static irqreturn_t lmc_interrupt (int irq, void *dev_instance) /*fold00*/ case 0x001: printk(KERN_WARNING "%s: Master Abort (naughty)\n", dev->name); break; - case 0x010: + case 0x002: printk(KERN_WARNING "%s: Target Abort (not so naughty)\n", dev->name); break; default: diff --git a/drivers/net/wireless/ath/ath10k/htt_rx.c b/drivers/net/wireless/ath/ath10k/htt_rx.c index 0aeeb233af780469fcc907ba5ee0bb53086d606a..21642bab485a12d0a6cda34161b9e2821d456b5d 100644 --- a/drivers/net/wireless/ath/ath10k/htt_rx.c +++ b/drivers/net/wireless/ath/ath10k/htt_rx.c @@ -215,11 +215,12 @@ int ath10k_htt_rx_ring_refill(struct ath10k *ar) spin_lock_bh(&htt->rx_ring.lock); ret = ath10k_htt_rx_ring_fill_n(htt, (htt->rx_ring.fill_level - htt->rx_ring.fill_cnt)); - spin_unlock_bh(&htt->rx_ring.lock); if (ret) ath10k_htt_rx_ring_free(htt); + spin_unlock_bh(&htt->rx_ring.lock); + return ret; } @@ -231,7 +232,9 @@ void ath10k_htt_rx_free(struct ath10k_htt *htt) skb_queue_purge(&htt->rx_in_ord_compl_q); skb_queue_purge(&htt->tx_fetch_ind_q); + spin_lock_bh(&htt->rx_ring.lock); ath10k_htt_rx_ring_free(htt); + spin_unlock_bh(&htt->rx_ring.lock); dma_free_coherent(htt->ar->dev, (htt->rx_ring.size * diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c index df11bb44998888c52632c72b856f06444be7a85b..cdcfb175ad9b8cf300af30a70bdaf3c234740b72 100644 --- a/drivers/net/wireless/ath/ath10k/mac.c +++ b/drivers/net/wireless/ath/ath10k/mac.c @@ -3074,6 +3074,13 @@ static int ath10k_update_channel_list(struct ath10k *ar) passive = channel->flags & IEEE80211_CHAN_NO_IR; ch->passive = passive; + /* the firmware is ignoring the "radar" flag of the + * channel and is scanning actively using Probe Requests + * on "Radar detection"/DFS channels which are not + * marked as "available" + */ + ch->passive |= ch->chan_radar; + ch->freq = channel->center_freq; ch->band_center_freq1 = channel->center_freq; ch->min_power = 0; @@ -4008,6 +4015,7 @@ void ath10k_mac_tx_push_pending(struct ath10k *ar) rcu_read_unlock(); spin_unlock_bh(&ar->txqs_lock); } +EXPORT_SYMBOL(ath10k_mac_tx_push_pending); /************/ /* Scanning */ @@ -5923,8 +5931,19 @@ static void ath10k_sta_rc_update_wk(struct work_struct *wk) ath10k_mac_max_vht_nss(vht_mcs_mask))); if (changed & IEEE80211_RC_BW_CHANGED) { - ath10k_dbg(ar, ATH10K_DBG_MAC, "mac update sta %pM peer bw %d\n", - sta->addr, bw); + enum wmi_phy_mode mode; + + mode = chan_to_phymode(&def); + ath10k_dbg(ar, ATH10K_DBG_MAC, "mac update sta %pM peer bw %d phymode %d\n", + sta->addr, bw, mode); + + err = ath10k_wmi_peer_set_param(ar, arvif->vdev_id, sta->addr, + WMI_PEER_PHYMODE, mode); + if (err) { + ath10k_warn(ar, "failed to update STA %pM peer phymode %d: %d\n", + sta->addr, mode, err); + goto exit; + } err = ath10k_wmi_peer_set_param(ar, arvif->vdev_id, sta->addr, WMI_PEER_CHAN_WIDTH, bw); @@ -5965,6 +5984,7 @@ static void ath10k_sta_rc_update_wk(struct work_struct *wk) sta->addr); } +exit: mutex_unlock(&ar->conf_mutex); } diff --git a/drivers/net/wireless/ath/ath10k/sdio.c b/drivers/net/wireless/ath/ath10k/sdio.c index 03a69e5b11165753717517f098480306af881f26..da9dbf3ddaa5e5ec2c5c072641bc8d5cb963f7f4 100644 --- a/drivers/net/wireless/ath/ath10k/sdio.c +++ b/drivers/net/wireless/ath/ath10k/sdio.c @@ -30,6 +30,7 @@ #include "debug.h" #include "hif.h" #include "htc.h" +#include "mac.h" #include "targaddrs.h" #include "trace.h" #include "sdio.h" @@ -396,6 +397,7 @@ static int ath10k_sdio_mbox_rx_process_packet(struct ath10k *ar, int ret; payload_len = le16_to_cpu(htc_hdr->len); + skb->len = payload_len + sizeof(struct ath10k_htc_hdr); if (trailer_present) { trailer = skb->data + sizeof(*htc_hdr) + @@ -434,12 +436,14 @@ static int ath10k_sdio_mbox_rx_process_packets(struct ath10k *ar, enum ath10k_htc_ep_id id; int ret, i, *n_lookahead_local; u32 *lookaheads_local; + int lookahead_idx = 0; for (i = 0; i < ar_sdio->n_rx_pkts; i++) { lookaheads_local = lookaheads; n_lookahead_local = n_lookahead; - id = ((struct ath10k_htc_hdr *)&lookaheads[i])->eid; + id = ((struct ath10k_htc_hdr *) + &lookaheads[lookahead_idx++])->eid; if (id >= ATH10K_HTC_EP_COUNT) { ath10k_warn(ar, "invalid endpoint in look-ahead: %d\n", @@ -462,6 +466,7 @@ static int ath10k_sdio_mbox_rx_process_packets(struct ath10k *ar, /* Only read lookahead's from RX trailers * for the last packet in a bundle. */ + lookahead_idx--; lookaheads_local = NULL; n_lookahead_local = NULL; } @@ -1342,6 +1347,8 @@ static void ath10k_sdio_irq_handler(struct sdio_func *func) break; } while (time_before(jiffies, timeout) && !done); + ath10k_mac_tx_push_pending(ar); + sdio_claim_host(ar_sdio->func); if (ret && ret != -ECANCELED) diff --git a/drivers/net/wireless/ath/ath10k/wmi-tlv.c b/drivers/net/wireless/ath/ath10k/wmi-tlv.c index 7616c1c4bbd32a5f4cdf343fe8ece9a90bf2db22..baec856af90ffccd7bf9f21a6e3bae2aaffba552 100644 --- a/drivers/net/wireless/ath/ath10k/wmi-tlv.c +++ b/drivers/net/wireless/ath/ath10k/wmi-tlv.c @@ -1451,6 +1451,11 @@ static struct sk_buff *ath10k_wmi_tlv_op_gen_init(struct ath10k *ar) cfg->keep_alive_pattern_size = __cpu_to_le32(0); cfg->max_tdls_concurrent_sleep_sta = __cpu_to_le32(1); cfg->max_tdls_concurrent_buffer_sta = __cpu_to_le32(1); + cfg->wmi_send_separate = __cpu_to_le32(0); + cfg->num_ocb_vdevs = __cpu_to_le32(0); + cfg->num_ocb_channels = __cpu_to_le32(0); + cfg->num_ocb_schedules = __cpu_to_le32(0); + cfg->host_capab = __cpu_to_le32(0); ath10k_wmi_put_host_mem_chunks(ar, chunks); diff --git a/drivers/net/wireless/ath/ath10k/wmi-tlv.h b/drivers/net/wireless/ath/ath10k/wmi-tlv.h index 22cf011e839afc190c55c65a387351787369e37d..e75bba0bbf6746ed50f76b4c4096275d402495cc 100644 --- a/drivers/net/wireless/ath/ath10k/wmi-tlv.h +++ b/drivers/net/wireless/ath/ath10k/wmi-tlv.h @@ -1228,6 +1228,11 @@ struct wmi_tlv_resource_config { __le32 keep_alive_pattern_size; __le32 max_tdls_concurrent_sleep_sta; __le32 max_tdls_concurrent_buffer_sta; + __le32 wmi_send_separate; + __le32 num_ocb_vdevs; + __le32 num_ocb_channels; + __le32 num_ocb_schedules; + __le32 host_capab; } __packed; struct wmi_tlv_init_cmd { diff --git a/drivers/net/wireless/ath/ath10k/wmi.h b/drivers/net/wireless/ath/ath10k/wmi.h index bab876cf25fee02279a4a636b2d9aa430c2efab1..d0e05aa437e36549515016d72a35f2cf2dd14629 100644 --- a/drivers/net/wireless/ath/ath10k/wmi.h +++ b/drivers/net/wireless/ath/ath10k/wmi.h @@ -6002,6 +6002,7 @@ enum wmi_peer_param { WMI_PEER_NSS = 0x5, WMI_PEER_USE_4ADDR = 0x6, WMI_PEER_DEBUG = 0xa, + WMI_PEER_PHYMODE = 0xd, WMI_PEER_DUMMY_VAR = 0xff, /* dummy parameter for STA PS workaround */ }; diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c index 8c5c2dd8fa7f13c4d12ebb98fa354deff212e124..a7f506eb7b366475eac78e0393504b29991063ad 100644 --- a/drivers/net/wireless/ath/ath9k/hw.c +++ b/drivers/net/wireless/ath/ath9k/hw.c @@ -2915,16 +2915,19 @@ void ath9k_hw_apply_txpower(struct ath_hw *ah, struct ath9k_channel *chan, struct ath_regulatory *reg = ath9k_hw_regulatory(ah); struct ieee80211_channel *channel; int chan_pwr, new_pwr; + u16 ctl = NO_CTL; if (!chan) return; + if (!test) + ctl = ath9k_regd_get_ctl(reg, chan); + channel = chan->chan; chan_pwr = min_t(int, channel->max_power * 2, MAX_RATE_POWER); new_pwr = min_t(int, chan_pwr, reg->power_limit); - ah->eep_ops->set_txpower(ah, chan, - ath9k_regd_get_ctl(reg, chan), + ah->eep_ops->set_txpower(ah, chan, ctl, get_antenna_gain(ah, chan), new_pwr, test); } diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c index d8b041f48ca8e17ccb59fe4b099c9814d8e76a7c..fa64c1cc94aedb9ca80d1fbf1e9f78230e530ed7 100644 --- a/drivers/net/wireless/ath/ath9k/xmit.c +++ b/drivers/net/wireless/ath/ath9k/xmit.c @@ -86,7 +86,8 @@ static void ath_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb) struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); struct ieee80211_sta *sta = info->status.status_driver_data[0]; - if (info->flags & IEEE80211_TX_CTL_REQ_TX_STATUS) { + if (info->flags & (IEEE80211_TX_CTL_REQ_TX_STATUS | + IEEE80211_TX_STATUS_EOSP)) { ieee80211_tx_status(hw, skb); return; } diff --git a/drivers/net/wireless/broadcom/b43/leds.c b/drivers/net/wireless/broadcom/b43/leds.c index cb987c2ecc6bf5295684c11da9604c03ef4d2e32..87131f6632929963506d0258d1bc20c97c09e5b8 100644 --- a/drivers/net/wireless/broadcom/b43/leds.c +++ b/drivers/net/wireless/broadcom/b43/leds.c @@ -131,7 +131,7 @@ static int b43_register_led(struct b43_wldev *dev, struct b43_led *led, led->wl = dev->wl; led->index = led_index; led->activelow = activelow; - strncpy(led->name, name, sizeof(led->name)); + strlcpy(led->name, name, sizeof(led->name)); atomic_set(&led->state, 0); led->led_dev.name = led->name; diff --git a/drivers/net/wireless/broadcom/b43legacy/leds.c b/drivers/net/wireless/broadcom/b43legacy/leds.c index fd4565389c77df59056d239e2bc778164a622496..bc922118b6ac641aa1caa800489f77472542fe60 100644 --- a/drivers/net/wireless/broadcom/b43legacy/leds.c +++ b/drivers/net/wireless/broadcom/b43legacy/leds.c @@ -101,7 +101,7 @@ static int b43legacy_register_led(struct b43legacy_wldev *dev, led->dev = dev; led->index = led_index; led->activelow = activelow; - strncpy(led->name, name, sizeof(led->name)); + strlcpy(led->name, name, sizeof(led->name)); led->led_dev.name = led->name; led->led_dev.default_trigger = default_trigger; diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c index 952d883477563c577e7e7e7d471be734ae14ad3d..cb0f7a043c39fdb70344a2f3d3b788506adc1d29 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c @@ -4265,6 +4265,13 @@ void brcmf_sdio_remove(struct brcmf_sdio *bus) brcmf_dbg(TRACE, "Enter\n"); if (bus) { + /* Stop watchdog task */ + if (bus->watchdog_tsk) { + send_sig(SIGTERM, bus->watchdog_tsk, 1); + kthread_stop(bus->watchdog_tsk); + bus->watchdog_tsk = NULL; + } + /* De-register interrupt handler */ brcmf_sdiod_intr_unregister(bus->sdiodev); diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_qmath.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_qmath.c index b9672da24a9d3d8f7b58cbe19cc282281cd271ef..b24bc57ca91b8a61bcbf0e26c12e3c3805e820d4 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_qmath.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_qmath.c @@ -213,7 +213,7 @@ static const s16 log_table[] = { 30498, 31267, 32024, - 32768 + 32767 }; #define LOG_TABLE_SIZE 32 /* log_table size */ diff --git a/drivers/net/wireless/marvell/libertas/dev.h b/drivers/net/wireless/marvell/libertas/dev.h index dd1ee1f0af48968046c4ca605e90ae53754fba89..469134930026520bd6253ecee125af5cccee19b3 100644 --- a/drivers/net/wireless/marvell/libertas/dev.h +++ b/drivers/net/wireless/marvell/libertas/dev.h @@ -104,6 +104,7 @@ struct lbs_private { u8 fw_ready; u8 surpriseremoved; u8 setup_fw_on_resume; + u8 power_up_on_resume; int (*hw_host_to_card) (struct lbs_private *priv, u8 type, u8 *payload, u16 nb); void (*reset_card) (struct lbs_private *priv); int (*power_save) (struct lbs_private *priv); diff --git a/drivers/net/wireless/marvell/libertas/if_sdio.c b/drivers/net/wireless/marvell/libertas/if_sdio.c index 2300e796c6ab9e8106e132b3988fa240dc4783a4..43743c26c071f538f1942696aa97d20b0cbf091d 100644 --- a/drivers/net/wireless/marvell/libertas/if_sdio.c +++ b/drivers/net/wireless/marvell/libertas/if_sdio.c @@ -1290,15 +1290,23 @@ static void if_sdio_remove(struct sdio_func *func) static int if_sdio_suspend(struct device *dev) { struct sdio_func *func = dev_to_sdio_func(dev); - int ret; struct if_sdio_card *card = sdio_get_drvdata(func); + struct lbs_private *priv = card->priv; + int ret; mmc_pm_flag_t flags = sdio_get_host_pm_caps(func); + priv->power_up_on_resume = false; /* If we're powered off anyway, just let the mmc layer remove the * card. */ - if (!lbs_iface_active(card->priv)) - return -ENOSYS; + if (!lbs_iface_active(priv)) { + if (priv->fw_ready) { + priv->power_up_on_resume = true; + if_sdio_power_off(card); + } + + return 0; + } dev_info(dev, "%s: suspend: PM flags = 0x%x\n", sdio_func_id(func), flags); @@ -1306,9 +1314,14 @@ static int if_sdio_suspend(struct device *dev) /* If we aren't being asked to wake on anything, we should bail out * and let the SD stack power down the card. */ - if (card->priv->wol_criteria == EHS_REMOVE_WAKEUP) { + if (priv->wol_criteria == EHS_REMOVE_WAKEUP) { dev_info(dev, "Suspend without wake params -- powering down card\n"); - return -ENOSYS; + if (priv->fw_ready) { + priv->power_up_on_resume = true; + if_sdio_power_off(card); + } + + return 0; } if (!(flags & MMC_PM_KEEP_POWER)) { @@ -1321,7 +1334,7 @@ static int if_sdio_suspend(struct device *dev) if (ret) return ret; - ret = lbs_suspend(card->priv); + ret = lbs_suspend(priv); if (ret) return ret; @@ -1336,6 +1349,11 @@ static int if_sdio_resume(struct device *dev) dev_info(dev, "%s: resume: we're back\n", sdio_func_id(func)); + if (card->priv->power_up_on_resume) { + if_sdio_power_on(card); + wait_event(card->pwron_waitq, card->priv->fw_ready); + } + ret = lbs_resume(card->priv); return ret; diff --git a/drivers/net/wireless/rndis_wlan.c b/drivers/net/wireless/rndis_wlan.c index 9935bd09db1fb309090222be94c85ec98917b5cc..d4947e3a909ec6758a7554647f18125d3c6cebbf 100644 --- a/drivers/net/wireless/rndis_wlan.c +++ b/drivers/net/wireless/rndis_wlan.c @@ -2928,6 +2928,8 @@ static void rndis_wlan_auth_indication(struct usbnet *usbdev, while (buflen >= sizeof(*auth_req)) { auth_req = (void *)buf; + if (buflen < le32_to_cpu(auth_req->length)) + return; type = "unknown"; flags = le32_to_cpu(auth_req->flags); pairwise_error = false; diff --git a/drivers/net/wireless/ti/wlcore/cmd.c b/drivers/net/wireless/ti/wlcore/cmd.c index 761cf8573a805e272121fa05bf129f1ee600a10a..f48c3f62966d43f0f74c83f311958f89da62f3dc 100644 --- a/drivers/net/wireless/ti/wlcore/cmd.c +++ b/drivers/net/wireless/ti/wlcore/cmd.c @@ -35,6 +35,7 @@ #include "wl12xx_80211.h" #include "cmd.h" #include "event.h" +#include "ps.h" #include "tx.h" #include "hw_ops.h" @@ -191,6 +192,10 @@ int wlcore_cmd_wait_for_event_or_timeout(struct wl1271 *wl, timeout_time = jiffies + msecs_to_jiffies(WL1271_EVENT_TIMEOUT); + ret = wl1271_ps_elp_wakeup(wl); + if (ret < 0) + return ret; + do { if (time_after(jiffies, timeout_time)) { wl1271_debug(DEBUG_CMD, "timeout waiting for event %d", @@ -222,6 +227,7 @@ int wlcore_cmd_wait_for_event_or_timeout(struct wl1271 *wl, } while (!event); out: + wl1271_ps_elp_sleep(wl); kfree(events_vector); return ret; } diff --git a/drivers/net/wireless/ti/wlcore/rx.c b/drivers/net/wireless/ti/wlcore/rx.c index 0f15696195f884f59c82e431757593362a0489ca..078a4940bc5c3bcd9a496b8afec6536c61fee5c8 100644 --- a/drivers/net/wireless/ti/wlcore/rx.c +++ b/drivers/net/wireless/ti/wlcore/rx.c @@ -59,7 +59,7 @@ static u32 wlcore_rx_get_align_buf_size(struct wl1271 *wl, u32 pkt_len) static void wl1271_rx_status(struct wl1271 *wl, struct wl1271_rx_descriptor *desc, struct ieee80211_rx_status *status, - u8 beacon) + u8 beacon, u8 probe_rsp) { memset(status, 0, sizeof(struct ieee80211_rx_status)); @@ -106,6 +106,9 @@ static void wl1271_rx_status(struct wl1271 *wl, } } + if (beacon || probe_rsp) + status->boottime_ns = ktime_get_boot_ns(); + if (beacon) wlcore_set_pending_regdomain_ch(wl, (u16)desc->channel, status->band); @@ -191,7 +194,8 @@ static int wl1271_rx_handle_data(struct wl1271 *wl, u8 *data, u32 length, if (ieee80211_is_data_present(hdr->frame_control)) is_data = 1; - wl1271_rx_status(wl, desc, IEEE80211_SKB_RXCB(skb), beacon); + wl1271_rx_status(wl, desc, IEEE80211_SKB_RXCB(skb), beacon, + ieee80211_is_probe_resp(hdr->frame_control)); wlcore_hw_set_rx_csum(wl, desc, skb); seq_num = (le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_SEQ) >> 4; diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c index d5e790dd589a27bf1d4094d152c791cab6fb7248..1a40fc3517a8f0b6d443c5e16c94b18c700a4847 100644 --- a/drivers/net/xen-netfront.c +++ b/drivers/net/xen-netfront.c @@ -87,8 +87,7 @@ struct netfront_cb { /* IRQ name is queue name with "-tx" or "-rx" appended */ #define IRQ_NAME_SIZE (QUEUE_NAME_SIZE + 3) -static DECLARE_WAIT_QUEUE_HEAD(module_load_q); -static DECLARE_WAIT_QUEUE_HEAD(module_unload_q); +static DECLARE_WAIT_QUEUE_HEAD(module_wq); struct netfront_stats { u64 packets; @@ -908,7 +907,11 @@ static RING_IDX xennet_fill_frags(struct netfront_queue *queue, BUG_ON(pull_to <= skb_headlen(skb)); __pskb_pull_tail(skb, pull_to - skb_headlen(skb)); } - BUG_ON(skb_shinfo(skb)->nr_frags >= MAX_SKB_FRAGS); + if (unlikely(skb_shinfo(skb)->nr_frags >= MAX_SKB_FRAGS)) { + queue->rx.rsp_cons = ++cons; + kfree_skb(nskb); + return ~0U; + } skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, skb_frag_page(nfrag), @@ -1045,6 +1048,8 @@ err: skb->len += rx->status; i = xennet_fill_frags(queue, skb, &tmpq); + if (unlikely(i == ~0U)) + goto err; if (rx->flags & XEN_NETRXF_csum_blank) skb->ip_summed = CHECKSUM_PARTIAL; @@ -1331,11 +1336,11 @@ static struct net_device *xennet_create_dev(struct xenbus_device *dev) netif_carrier_off(netdev); xenbus_switch_state(dev, XenbusStateInitialising); - wait_event(module_load_q, - xenbus_read_driver_state(dev->otherend) != - XenbusStateClosed && - xenbus_read_driver_state(dev->otherend) != - XenbusStateUnknown); + wait_event(module_wq, + xenbus_read_driver_state(dev->otherend) != + XenbusStateClosed && + xenbus_read_driver_state(dev->otherend) != + XenbusStateUnknown); return netdev; exit: @@ -1603,6 +1608,7 @@ static int xennet_init_queue(struct netfront_queue *queue) { unsigned short i; int err = 0; + char *devid; spin_lock_init(&queue->tx_lock); spin_lock_init(&queue->rx_lock); @@ -1610,8 +1616,9 @@ static int xennet_init_queue(struct netfront_queue *queue) setup_timer(&queue->rx_refill_timer, rx_refill_timeout, (unsigned long)queue); - snprintf(queue->name, sizeof(queue->name), "%s-q%u", - queue->info->netdev->name, queue->id); + devid = strrchr(queue->info->xbdev->nodename, '/') + 1; + snprintf(queue->name, sizeof(queue->name), "vif%s-q%u", + devid, queue->id); /* Initialise tx_skbs as a free chain containing every entry. */ queue->tx_skb_freelist = 0; @@ -2007,15 +2014,14 @@ static void netback_changed(struct xenbus_device *dev, dev_dbg(&dev->dev, "%s\n", xenbus_strstate(backend_state)); + wake_up_all(&module_wq); + switch (backend_state) { case XenbusStateInitialising: case XenbusStateInitialised: case XenbusStateReconfiguring: case XenbusStateReconfigured: - break; - case XenbusStateUnknown: - wake_up_all(&module_unload_q); break; case XenbusStateInitWait: @@ -2031,12 +2037,10 @@ static void netback_changed(struct xenbus_device *dev, break; case XenbusStateClosed: - wake_up_all(&module_unload_q); if (dev->state == XenbusStateClosed) break; /* Missed the backend's CLOSING state -- fallthrough */ case XenbusStateClosing: - wake_up_all(&module_unload_q); xenbus_frontend_closed(dev); break; } @@ -2144,14 +2148,14 @@ static int xennet_remove(struct xenbus_device *dev) if (xenbus_read_driver_state(dev->otherend) != XenbusStateClosed) { xenbus_switch_state(dev, XenbusStateClosing); - wait_event(module_unload_q, + wait_event(module_wq, xenbus_read_driver_state(dev->otherend) == XenbusStateClosing || xenbus_read_driver_state(dev->otherend) == XenbusStateUnknown); xenbus_switch_state(dev, XenbusStateClosed); - wait_event(module_unload_q, + wait_event(module_wq, xenbus_read_driver_state(dev->otherend) == XenbusStateClosed || xenbus_read_driver_state(dev->otherend) == diff --git a/drivers/nfc/pn533/usb.c b/drivers/nfc/pn533/usb.c index d5553c47014fade81a4f461903b3cb6c4372ccf5..5d823e965883b0f5f23db5ab39afc9f96a128267 100644 --- a/drivers/nfc/pn533/usb.c +++ b/drivers/nfc/pn533/usb.c @@ -74,7 +74,7 @@ static void pn533_recv_response(struct urb *urb) struct sk_buff *skb = NULL; if (!urb->status) { - skb = alloc_skb(urb->actual_length, GFP_KERNEL); + skb = alloc_skb(urb->actual_length, GFP_ATOMIC); if (!skb) { nfc_err(&phy->udev->dev, "failed to alloc memory\n"); } else { @@ -186,7 +186,7 @@ static int pn533_usb_send_frame(struct pn533 *dev, if (dev->protocol_type == PN533_PROTO_REQ_RESP) { /* request for response for sent packet directly */ - rc = pn533_submit_urb_for_response(phy, GFP_ATOMIC); + rc = pn533_submit_urb_for_response(phy, GFP_KERNEL); if (rc) goto error; } else if (dev->protocol_type == PN533_PROTO_REQ_ACK_RESP) { diff --git a/drivers/nvdimm/bus.c b/drivers/nvdimm/bus.c index 2fffd42767c7b5ddb398b32c3d5a1ec7e8e1dcc8..fb5ab5812a22f255cfe8766ee3a82cea7dd375bb 100644 --- a/drivers/nvdimm/bus.c +++ b/drivers/nvdimm/bus.c @@ -808,9 +808,9 @@ u32 nd_cmd_out_size(struct nvdimm *nvdimm, int cmd, * overshoots the remainder by 4 bytes, assume it was * including 'status'. */ - if (out_field[1] - 8 == remainder) + if (out_field[1] - 4 == remainder) return remainder; - return out_field[1] - 4; + return out_field[1] - 8; } else if (cmd == ND_CMD_CALL) { struct nd_cmd_pkg *pkg = (struct nd_cmd_pkg *) in_field; diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index 38c128f230e7cdf1221e90c4f878f695347a077e..3a63d58d2ca9f86f6d6f39a6b0516ce1fa44116a 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c @@ -1016,7 +1016,7 @@ static int nvme_user_cmd(struct nvme_ctrl *ctrl, struct nvme_ns *ns, status = nvme_submit_user_cmd(ns ? ns->queue : ctrl->admin_q, &c, (void __user *)(uintptr_t)cmd.addr, cmd.data_len, - (void __user *)(uintptr_t)cmd.metadata, cmd.metadata, + (void __user *)(uintptr_t)cmd.metadata, cmd.metadata_len, 0, &cmd.result, timeout); if (status >= 0) { if (put_user(cmd.result, &ucmd->result)) diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index a67d037165104db83dea64ab0bf99f05ee64e8db..afb99876fa9e1ad5be351d602e1fc1358bc81340 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c @@ -306,6 +306,14 @@ static bool nvme_dbbuf_update_and_check_event(u16 value, u32 *dbbuf_db, old_value = *dbbuf_db; *dbbuf_db = value; + /* + * Ensure that the doorbell is updated before reading the event + * index from memory. The controller needs to provide similar + * ordering to ensure the envent index is updated before reading + * the doorbell. + */ + mb(); + if (!nvme_dbbuf_need_event(*dbbuf_ei, value, old_value)) return false; } diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c index 48a831d58e7aeab2f628bb43cc9c84339f6bb5a2..9fffe41ead5009c497d65ce16bcf67da31bd41ea 100644 --- a/drivers/nvme/host/rdma.c +++ b/drivers/nvme/host/rdma.c @@ -1728,6 +1728,8 @@ static void nvme_rdma_shutdown_ctrl(struct nvme_rdma_ctrl *ctrl, bool shutdown) nvme_stop_queues(&ctrl->ctrl); blk_mq_tagset_busy_iter(&ctrl->tag_set, nvme_cancel_request, &ctrl->ctrl); + if (shutdown) + nvme_start_queues(&ctrl->ctrl); nvme_rdma_destroy_io_queues(ctrl, shutdown); } diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c index 240b0d628222026966db95be698c37031fb0c2c3..5fa7856f6b344c1d5dbef76b6d72a9ca1aa88513 100644 --- a/drivers/nvme/target/core.c +++ b/drivers/nvme/target/core.c @@ -598,6 +598,14 @@ static void nvmet_start_ctrl(struct nvmet_ctrl *ctrl) } ctrl->csts = NVME_CSTS_RDY; + + /* + * Controllers that are not yet enabled should not really enforce the + * keep alive timeout, but we still want to track a timeout and cleanup + * in case a host died before it enabled the controller. Hence, simply + * reset the keep alive timer when the controller is enabled. + */ + mod_delayed_work(system_wq, &ctrl->ka_work, ctrl->kato * HZ); } static void nvmet_clear_ctrl(struct nvmet_ctrl *ctrl) diff --git a/drivers/nvme/target/fcloop.c b/drivers/nvme/target/fcloop.c index c0080f6ab2f5b209eb8aeae003df6659f9124f11..0b0a4825b3eb1274a776eafc502de4787ffa8b11 100644 --- a/drivers/nvme/target/fcloop.c +++ b/drivers/nvme/target/fcloop.c @@ -300,7 +300,7 @@ fcloop_tgt_lsrqst_done_work(struct work_struct *work) struct fcloop_tport *tport = tls_req->tport; struct nvmefc_ls_req *lsreq = tls_req->lsreq; - if (tport->remoteport) + if (!tport || tport->remoteport) lsreq->done(lsreq, tls_req->status); } @@ -318,6 +318,7 @@ fcloop_ls_req(struct nvme_fc_local_port *localport, if (!rport->targetport) { tls_req->status = -ECONNREFUSED; + tls_req->tport = NULL; schedule_work(&tls_req->work); return ret; } diff --git a/drivers/nvmem/core.c b/drivers/nvmem/core.c index 2afafd5d8915088de1533881247fcb67b4a8aa4e..635886e4835cbb4b0a7836292c9bdd111ee0eb9c 100644 --- a/drivers/nvmem/core.c +++ b/drivers/nvmem/core.c @@ -865,6 +865,10 @@ struct nvmem_cell *nvmem_cell_get(struct device *dev, const char *cell_id) return cell; } + /* NULL cell_id only allowed for device tree; invalid otherwise */ + if (!cell_id) + return ERR_PTR(-EINVAL); + return nvmem_cell_get_from_list(cell_id); } EXPORT_SYMBOL_GPL(nvmem_cell_get); diff --git a/drivers/parport/parport_sunbpp.c b/drivers/parport/parport_sunbpp.c index 01cf1c1a841a4de23af9e84f7c40cea7c552ca16..8de329546b8220783664f291ffd55b62144cdec5 100644 --- a/drivers/parport/parport_sunbpp.c +++ b/drivers/parport/parport_sunbpp.c @@ -286,12 +286,16 @@ static int bpp_probe(struct platform_device *op) ops = kmemdup(&parport_sunbpp_ops, sizeof(struct parport_operations), GFP_KERNEL); - if (!ops) + if (!ops) { + err = -ENOMEM; goto out_unmap; + } dprintk(("register_port\n")); - if (!(p = parport_register_port((unsigned long)base, irq, dma, ops))) + if (!(p = parport_register_port((unsigned long)base, irq, dma, ops))) { + err = -ENOMEM; goto out_free_ops; + } p->size = size; p->dev = &op->dev; diff --git a/drivers/pci/host/pci-aardvark.c b/drivers/pci/host/pci-aardvark.c index 9bfc22b5da4b718ffe7ed5e1d9805e0e9317fee7..5f3048e75becb598b14013382ed102fa59a6f5b4 100644 --- a/drivers/pci/host/pci-aardvark.c +++ b/drivers/pci/host/pci-aardvark.c @@ -954,6 +954,7 @@ static int advk_pcie_probe(struct platform_device *pdev) bus = bridge->bus; + pci_bus_size_bridges(bus); pci_bus_assign_resources(bus); list_for_each_entry(child, &bus->children, node) diff --git a/drivers/pci/host/pci-ftpci100.c b/drivers/pci/host/pci-ftpci100.c index 4ea7d2ebcc5cd4ed4e3702947152c72054a06efe..4e6b219315142b67e05200fd8f5f250f112b9586 100644 --- a/drivers/pci/host/pci-ftpci100.c +++ b/drivers/pci/host/pci-ftpci100.c @@ -353,11 +353,13 @@ static int faraday_pci_setup_cascaded_irq(struct faraday_pci *p) irq = of_irq_get(intc, 0); if (irq <= 0) { dev_err(p->dev, "failed to get parent IRQ\n"); + of_node_put(intc); return irq ?: -EINVAL; } p->irqdomain = irq_domain_add_linear(intc, PCI_NUM_INTX, &faraday_pci_irqdomain_ops, p); + of_node_put(intc); if (!p->irqdomain) { dev_err(p->dev, "failed to create Gemini PCI IRQ domain\n"); return -EINVAL; diff --git a/drivers/pci/host/pci-host-common.c b/drivers/pci/host/pci-host-common.c index 44a47d4f0b8f294fef5c2b2482b0a16856849f01..148896f73c06f8eca9ab6cb99c9b3b98f569dab6 100644 --- a/drivers/pci/host/pci-host-common.c +++ b/drivers/pci/host/pci-host-common.c @@ -45,7 +45,7 @@ static int gen_pci_parse_request_of_pci_ranges(struct device *dev, switch (resource_type(res)) { case IORESOURCE_IO: - err = pci_remap_iospace(res, iobase); + err = devm_pci_remap_iospace(dev, res, iobase); if (err) { dev_warn(dev, "error %d: failed to map resource %pR\n", err, res); diff --git a/drivers/pci/host/pci-mvebu.c b/drivers/pci/host/pci-mvebu.c index 8d88f19dc171145a74e8a87345796406f1c0ec37..12c1c1851ee6308b043a1a4db37a18ec3304ec2d 100644 --- a/drivers/pci/host/pci-mvebu.c +++ b/drivers/pci/host/pci-mvebu.c @@ -1220,7 +1220,7 @@ static int mvebu_pcie_probe(struct platform_device *pdev) pcie->realio.start = PCIBIOS_MIN_IO; pcie->realio.end = min_t(resource_size_t, IO_SPACE_LIMIT, - resource_size(&pcie->io)); + resource_size(&pcie->io) - 1); } else pcie->realio = pcie->io; diff --git a/drivers/pci/host/pci-versatile.c b/drivers/pci/host/pci-versatile.c index d417acab0ecf7baa81fca755a97238cc1232131f..aff4cfb555fbbba74c122cc4efdb611546388ea3 100644 --- a/drivers/pci/host/pci-versatile.c +++ b/drivers/pci/host/pci-versatile.c @@ -89,7 +89,7 @@ static int versatile_pci_parse_request_of_pci_ranges(struct device *dev, switch (resource_type(res)) { case IORESOURCE_IO: - err = pci_remap_iospace(res, iobase); + err = devm_pci_remap_iospace(dev, res, iobase); if (err) { dev_warn(dev, "error %d: failed to map resource %pR\n", err, res); diff --git a/drivers/pci/host/pcie-rcar.c b/drivers/pci/host/pcie-rcar.c index 8f44a7d14bffa046ce25700807b2bbf27cefc1d6..41edce16a07cdcebd9df7f6ef466d9f28babb545 100644 --- a/drivers/pci/host/pcie-rcar.c +++ b/drivers/pci/host/pcie-rcar.c @@ -1105,7 +1105,7 @@ static int rcar_pcie_parse_request_of_pci_ranges(struct rcar_pcie *pci) struct resource *res = win->res; if (resource_type(res) == IORESOURCE_IO) { - err = pci_remap_iospace(res, iobase); + err = devm_pci_remap_iospace(dev, res, iobase); if (err) { dev_warn(dev, "error %d: failed to map resource %pR\n", err, res); diff --git a/drivers/pci/host/pcie-xilinx-nwl.c b/drivers/pci/host/pcie-xilinx-nwl.c index 65dea98b2643fb18469799a9e784ce8571416912..dd527ea558d71d51c1caff9593f43694f15a308c 100644 --- a/drivers/pci/host/pcie-xilinx-nwl.c +++ b/drivers/pci/host/pcie-xilinx-nwl.c @@ -561,7 +561,7 @@ static int nwl_pcie_init_irq_domain(struct nwl_pcie *pcie) PCI_NUM_INTX, &legacy_domain_ops, pcie); - + of_node_put(legacy_intc_node); if (!pcie->legacy_irq_domain) { dev_err(dev, "failed to create IRQ domain\n"); return -ENOMEM; diff --git a/drivers/pci/host/pcie-xilinx.c b/drivers/pci/host/pcie-xilinx.c index 94e13cb8608f179ba8e25d30dfd9436517bd55db..29f024f0ed7f2c2e8c0ab134a9f7949f09e8a9f8 100644 --- a/drivers/pci/host/pcie-xilinx.c +++ b/drivers/pci/host/pcie-xilinx.c @@ -511,6 +511,7 @@ static int xilinx_pcie_init_irq_domain(struct xilinx_pcie_port *port) port->leg_domain = irq_domain_add_linear(pcie_intc_node, PCI_NUM_INTX, &intx_domain_ops, port); + of_node_put(pcie_intc_node); if (!port->leg_domain) { dev_err(dev, "Failed to get a INTx IRQ domain\n"); return -ENODEV; diff --git a/drivers/pci/hotplug/pci_hotplug_core.c b/drivers/pci/hotplug/pci_hotplug_core.c index 7b0e97be9063d971dbd327836ab1c9cd2377287d..591f2e05ab1c3c1f7dc47b5d38c0aa390fdc72de 100644 --- a/drivers/pci/hotplug/pci_hotplug_core.c +++ b/drivers/pci/hotplug/pci_hotplug_core.c @@ -452,8 +452,17 @@ int __pci_hp_register(struct hotplug_slot *slot, struct pci_bus *bus, list_add(&slot->slot_list, &pci_hotplug_slot_list); result = fs_add_slot(pci_slot); + if (result) + goto err_list_del; + kobject_uevent(&pci_slot->kobj, KOBJ_ADD); dbg("Added slot %s to the list\n", name); + goto out; + +err_list_del: + list_del(&slot->slot_list); + pci_slot->hotplug = NULL; + pci_destroy_slot(pci_slot); out: mutex_unlock(&pci_hp_mutex); return result; diff --git a/drivers/pci/hotplug/pciehp.h b/drivers/pci/hotplug/pciehp.h index e7d6cfaf386581a7d89541c89298d235bde752d6..9fc4357c3db940312c7caf86e3e5cab8866316a7 100644 --- a/drivers/pci/hotplug/pciehp.h +++ b/drivers/pci/hotplug/pciehp.h @@ -132,6 +132,7 @@ int pciehp_unconfigure_device(struct slot *p_slot); void pciehp_queue_pushbutton_work(struct work_struct *work); struct controller *pcie_init(struct pcie_device *dev); int pcie_init_notification(struct controller *ctrl); +void pcie_shutdown_notification(struct controller *ctrl); int pciehp_enable_slot(struct slot *p_slot); int pciehp_disable_slot(struct slot *p_slot); void pcie_reenable_notification(struct controller *ctrl); diff --git a/drivers/pci/hotplug/pciehp_core.c b/drivers/pci/hotplug/pciehp_core.c index 1288289cc85d38fa09190586f7743ac1271e5a83..c38e392d63e4b2037b1c1cbb879b3d1239ec8a80 100644 --- a/drivers/pci/hotplug/pciehp_core.c +++ b/drivers/pci/hotplug/pciehp_core.c @@ -76,6 +76,12 @@ static int reset_slot(struct hotplug_slot *slot, int probe); */ static void release_slot(struct hotplug_slot *hotplug_slot) { + struct slot *slot = hotplug_slot->private; + + /* queued work needs hotplug_slot name */ + cancel_delayed_work(&slot->work); + drain_workqueue(slot->wq); + kfree(hotplug_slot->ops); kfree(hotplug_slot->info); kfree(hotplug_slot); @@ -278,6 +284,7 @@ static void pciehp_remove(struct pcie_device *dev) { struct controller *ctrl = get_service_data(dev); + pcie_shutdown_notification(ctrl); cleanup_slot(ctrl); pciehp_release_ctrl(ctrl); } diff --git a/drivers/pci/hotplug/pciehp_hpc.c b/drivers/pci/hotplug/pciehp_hpc.c index 46c2ee2caf281680aa280f1a386f4d059d110888..2fa830727362939ef0446940d308ad5500244c35 100644 --- a/drivers/pci/hotplug/pciehp_hpc.c +++ b/drivers/pci/hotplug/pciehp_hpc.c @@ -562,8 +562,6 @@ static irqreturn_t pciehp_isr(int irq, void *dev_id) { struct controller *ctrl = (struct controller *)dev_id; struct pci_dev *pdev = ctrl_dev(ctrl); - struct pci_bus *subordinate = pdev->subordinate; - struct pci_dev *dev; struct slot *slot = ctrl->slot; u16 status, events; u8 present; @@ -611,14 +609,9 @@ static irqreturn_t pciehp_isr(int irq, void *dev_id) wake_up(&ctrl->queue); } - if (subordinate) { - list_for_each_entry(dev, &subordinate->devices, bus_list) { - if (dev->ignore_hotplug) { - ctrl_dbg(ctrl, "ignoring hotplug event %#06x (%s requested no hotplug)\n", - events, pci_name(dev)); - return IRQ_HANDLED; - } - } + if (pdev->ignore_hotplug) { + ctrl_dbg(ctrl, "ignoring hotplug event %#06x\n", events); + return IRQ_HANDLED; } /* Check Attention Button Pressed */ @@ -789,7 +782,7 @@ int pcie_init_notification(struct controller *ctrl) return 0; } -static void pcie_shutdown_notification(struct controller *ctrl) +void pcie_shutdown_notification(struct controller *ctrl) { if (ctrl->notification_enabled) { pcie_disable_notification(ctrl); @@ -824,7 +817,7 @@ abort: static void pcie_cleanup_slot(struct controller *ctrl) { struct slot *slot = ctrl->slot; - cancel_delayed_work(&slot->work); + destroy_workqueue(slot->wq); kfree(slot); } @@ -912,7 +905,6 @@ abort: void pciehp_release_ctrl(struct controller *ctrl) { - pcie_shutdown_notification(ctrl); pcie_cleanup_slot(ctrl); kfree(ctrl); } diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c index 62a0677b32f100989b40e96985534ae3063121ac..22924629e64a81357b2366b413a40d3966c86619 100644 --- a/drivers/pci/pci.c +++ b/drivers/pci/pci.c @@ -3446,6 +3446,44 @@ void pci_unmap_iospace(struct resource *res) } EXPORT_SYMBOL(pci_unmap_iospace); +static void devm_pci_unmap_iospace(struct device *dev, void *ptr) +{ + struct resource **res = ptr; + + pci_unmap_iospace(*res); +} + +/** + * devm_pci_remap_iospace - Managed pci_remap_iospace() + * @dev: Generic device to remap IO address for + * @res: Resource describing the I/O space + * @phys_addr: physical address of range to be mapped + * + * Managed pci_remap_iospace(). Map is automatically unmapped on driver + * detach. + */ +int devm_pci_remap_iospace(struct device *dev, const struct resource *res, + phys_addr_t phys_addr) +{ + const struct resource **ptr; + int error; + + ptr = devres_alloc(devm_pci_unmap_iospace, sizeof(*ptr), GFP_KERNEL); + if (!ptr) + return -ENOMEM; + + error = pci_remap_iospace(res, phys_addr); + if (error) { + devres_free(ptr); + } else { + *ptr = res; + devres_add(dev, ptr); + } + + return error; +} +EXPORT_SYMBOL(devm_pci_remap_iospace); + /** * devm_pci_remap_cfgspace - Managed pci_remap_cfgspace() * @dev: Generic device to remap IO address for diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c index 4bccaf688aad7c7d5ed73b89b1b4f118c0a6690b..e23bfd9845b12f3808e88838bbb1bdec5c4dcc81 100644 --- a/drivers/pci/probe.c +++ b/drivers/pci/probe.c @@ -1560,6 +1560,10 @@ static void pci_configure_mps(struct pci_dev *dev) if (!pci_is_pcie(dev) || !bridge || !pci_is_pcie(bridge)) return; + /* MPS and MRRS fields are of type 'RsvdP' for VFs, short-circuit out */ + if (dev->is_virtfn) + return; + mps = pcie_get_mps(dev); p_mps = pcie_get_mps(bridge); diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c index ec2911c4ee425e7cde8ee990aa78848cfa96f8e1..35c9b2f4b293aae36cb0fee4cdd49982784c13f8 100644 --- a/drivers/pci/quirks.c +++ b/drivers/pci/quirks.c @@ -4388,11 +4388,6 @@ static int pci_quirk_qcom_rp_acs(struct pci_dev *dev, u16 acs_flags) * * 0x9d10-0x9d1b PCI Express Root port #{1-12} * - * The 300 series chipset suffers from the same bug so include those root - * ports here as well. - * - * 0xa32c-0xa343 PCI Express Root port #{0-24} - * * [1] http://www.intel.com/content/www/us/en/chipsets/100-series-chipset-datasheet-vol-2.html * [2] http://www.intel.com/content/www/us/en/chipsets/100-series-chipset-datasheet-vol-1.html * [3] http://www.intel.com/content/www/us/en/chipsets/100-series-chipset-spec-update.html @@ -4410,7 +4405,6 @@ static bool pci_quirk_intel_spt_pch_acs_match(struct pci_dev *dev) case 0xa110 ... 0xa11f: case 0xa167 ... 0xa16a: /* Sunrise Point */ case 0xa290 ... 0xa29f: case 0xa2e7 ... 0xa2ee: /* Union Point */ case 0x9d10 ... 0x9d1b: /* 7th & 8th Gen Mobile */ - case 0xa32c ... 0xa343: /* 300 series */ return true; } diff --git a/drivers/pci/switch/switchtec.c b/drivers/pci/switch/switchtec.c index 1910c6c72fcd2f918a2ff544cc490577d8ee32c3..7fd1548a29059fe9c843a9b309acc22355cb84a7 100644 --- a/drivers/pci/switch/switchtec.c +++ b/drivers/pci/switch/switchtec.c @@ -24,6 +24,8 @@ #include #include +#include + MODULE_DESCRIPTION("Microsemi Switchtec(tm) PCIe Management Driver"); MODULE_VERSION("0.1"); MODULE_LICENSE("GPL"); @@ -1176,6 +1178,8 @@ static int ioctl_port_to_pff(struct switchtec_dev *stdev, default: if (p.port > ARRAY_SIZE(pcfg->dsp_pff_inst_id)) return -EINVAL; + p.port = array_index_nospec(p.port, + ARRAY_SIZE(pcfg->dsp_pff_inst_id) + 1); p.pff = ioread32(&pcfg->dsp_pff_inst_id[p.port - 1]); break; } diff --git a/drivers/perf/xgene_pmu.c b/drivers/perf/xgene_pmu.c index eb23311bc70c042319909206fa4f015c7da4c905..8b79c2f7931f1711b4ed85b7f8a90123788c484a 100644 --- a/drivers/perf/xgene_pmu.c +++ b/drivers/perf/xgene_pmu.c @@ -1463,7 +1463,7 @@ static char *xgene_pmu_dev_name(struct device *dev, u32 type, int id) case PMU_TYPE_IOB: return devm_kasprintf(dev, GFP_KERNEL, "iob%d", id); case PMU_TYPE_IOB_SLOW: - return devm_kasprintf(dev, GFP_KERNEL, "iob-slow%d", id); + return devm_kasprintf(dev, GFP_KERNEL, "iob_slow%d", id); case PMU_TYPE_MCB: return devm_kasprintf(dev, GFP_KERNEL, "mcb%d", id); case PMU_TYPE_MC: diff --git a/drivers/pinctrl/bcm/pinctrl-nsp-mux.c b/drivers/pinctrl/bcm/pinctrl-nsp-mux.c index 35c17653c694767c8e13d1a8d02d7d91718d23b3..87618a4e90e451f2834214a337ce81e12de560fc 100644 --- a/drivers/pinctrl/bcm/pinctrl-nsp-mux.c +++ b/drivers/pinctrl/bcm/pinctrl-nsp-mux.c @@ -460,8 +460,8 @@ static int nsp_pinmux_enable(struct pinctrl_dev *pctrl_dev, const struct nsp_pin_function *func; const struct nsp_pin_group *grp; - if (grp_select > pinctrl->num_groups || - func_select > pinctrl->num_functions) + if (grp_select >= pinctrl->num_groups || + func_select >= pinctrl->num_functions) return -EINVAL; func = &pinctrl->functions[func_select]; @@ -577,6 +577,8 @@ static int nsp_pinmux_probe(struct platform_device *pdev) return PTR_ERR(pinctrl->base0); res = platform_get_resource(pdev, IORESOURCE_MEM, 1); + if (!res) + return -EINVAL; pinctrl->base1 = devm_ioremap_nocache(&pdev->dev, res->start, resource_size(res)); if (!pinctrl->base1) { diff --git a/drivers/pinctrl/freescale/pinctrl-imx.c b/drivers/pinctrl/freescale/pinctrl-imx.c index 6e472691d8eed6f0d3a71d20fe8e6e7e939d5b7d..17f2c5a505b25ee53a084c03d1d2c9afa5e82a44 100644 --- a/drivers/pinctrl/freescale/pinctrl-imx.c +++ b/drivers/pinctrl/freescale/pinctrl-imx.c @@ -389,7 +389,7 @@ static void imx_pinconf_group_dbg_show(struct pinctrl_dev *pctldev, const char *name; int i, ret; - if (group > pctldev->num_groups) + if (group >= pctldev->num_groups) return; seq_printf(s, "\n"); diff --git a/drivers/pinctrl/freescale/pinctrl-imx1-core.c b/drivers/pinctrl/freescale/pinctrl-imx1-core.c index a4e9f430d45269c45367cacc1010087971c5596b..e2cca91fd2669c2953d13743279c21a852ddefd6 100644 --- a/drivers/pinctrl/freescale/pinctrl-imx1-core.c +++ b/drivers/pinctrl/freescale/pinctrl-imx1-core.c @@ -433,7 +433,7 @@ static void imx1_pinconf_group_dbg_show(struct pinctrl_dev *pctldev, const char *name; int i, ret; - if (group > info->ngroups) + if (group >= info->ngroups) return; seq_puts(s, "\n"); diff --git a/drivers/pinctrl/pinctrl-amd.c b/drivers/pinctrl/pinctrl-amd.c index 433af328d9817a028f4bccce07492fbc6ac27a68..b78f42abff2f8a029c954e58e5ca518f3168ddd0 100644 --- a/drivers/pinctrl/pinctrl-amd.c +++ b/drivers/pinctrl/pinctrl-amd.c @@ -530,7 +530,8 @@ static irqreturn_t amd_gpio_irq_handler(int irq, void *dev_id) /* Each status bit covers four pins */ for (i = 0; i < 4; i++) { regval = readl(regs + i); - if (!(regval & PIN_IRQ_PENDING)) + if (!(regval & PIN_IRQ_PENDING) || + !(regval & BIT(INTERRUPT_MASK_OFF))) continue; irq = irq_find_mapping(gc->irqdomain, irqnr + i); generic_handle_irq(irq); diff --git a/drivers/pinctrl/pinctrl-ingenic.c b/drivers/pinctrl/pinctrl-ingenic.c index d84761822243ec4fb8716e0ecfed2859564ae291..103aaab41357033c1cc9a1e87bddda0cc376f4a7 100644 --- a/drivers/pinctrl/pinctrl-ingenic.c +++ b/drivers/pinctrl/pinctrl-ingenic.c @@ -536,7 +536,7 @@ static int ingenic_pinmux_gpio_set_direction(struct pinctrl_dev *pctldev, ingenic_config_pin(jzpc, pin, JZ4770_GPIO_PAT1, input); } else { ingenic_config_pin(jzpc, pin, JZ4740_GPIO_SELECT, false); - ingenic_config_pin(jzpc, pin, JZ4740_GPIO_DIR, input); + ingenic_config_pin(jzpc, pin, JZ4740_GPIO_DIR, !input); ingenic_config_pin(jzpc, pin, JZ4740_GPIO_FUNC, false); } diff --git a/drivers/pinctrl/pinctrl-rza1.c b/drivers/pinctrl/pinctrl-rza1.c index 04d058706b808fe8807192a6f3219c845cfa9b27..a865dc56a491b9b295f3d9cf6c409c4455b05e11 100644 --- a/drivers/pinctrl/pinctrl-rza1.c +++ b/drivers/pinctrl/pinctrl-rza1.c @@ -878,6 +878,7 @@ static int rza1_dt_node_to_map(struct pinctrl_dev *pctldev, const char *grpname; const char **fngrps; int ret, npins; + int gsel, fsel; npins = rza1_dt_node_pin_count(np); if (npins < 0) { @@ -927,18 +928,19 @@ static int rza1_dt_node_to_map(struct pinctrl_dev *pctldev, fngrps[0] = grpname; mutex_lock(&rza1_pctl->mutex); - ret = pinctrl_generic_add_group(pctldev, grpname, grpins, npins, - NULL); - if (ret) { + gsel = pinctrl_generic_add_group(pctldev, grpname, grpins, npins, + NULL); + if (gsel < 0) { mutex_unlock(&rza1_pctl->mutex); - return ret; + return gsel; } - ret = pinmux_generic_add_function(pctldev, grpname, fngrps, 1, - mux_confs); - if (ret) + fsel = pinmux_generic_add_function(pctldev, grpname, fngrps, 1, + mux_confs); + if (fsel < 0) { + ret = fsel; goto remove_group; - mutex_unlock(&rza1_pctl->mutex); + } dev_info(rza1_pctl->dev, "Parsed function and group %s with %d pins\n", grpname, npins); @@ -955,15 +957,15 @@ static int rza1_dt_node_to_map(struct pinctrl_dev *pctldev, (*map)->data.mux.group = np->name; (*map)->data.mux.function = np->name; *num_maps = 1; + mutex_unlock(&rza1_pctl->mutex); return 0; remove_function: - mutex_lock(&rza1_pctl->mutex); - pinmux_generic_remove_last_function(pctldev); + pinmux_generic_remove_function(pctldev, fsel); remove_group: - pinctrl_generic_remove_last_group(pctldev); + pinctrl_generic_remove_group(pctldev, gsel); mutex_unlock(&rza1_pctl->mutex); dev_info(rza1_pctl->dev, "Unable to parse function and group %s\n", diff --git a/drivers/pinctrl/qcom/pinctrl-msm.c b/drivers/pinctrl/qcom/pinctrl-msm.c index ff491da64dab8420298c9e00d22eddac908b310c..31632c087504785abfd2b1316e84d9f0702a7c70 100644 --- a/drivers/pinctrl/qcom/pinctrl-msm.c +++ b/drivers/pinctrl/qcom/pinctrl-msm.c @@ -238,22 +238,30 @@ static int msm_config_group_get(struct pinctrl_dev *pctldev, /* Convert register value to pinconf value */ switch (param) { case PIN_CONFIG_BIAS_DISABLE: - arg = arg == MSM_NO_PULL; + if (arg != MSM_NO_PULL) + return -EINVAL; + arg = 1; break; case PIN_CONFIG_BIAS_PULL_DOWN: - arg = arg == MSM_PULL_DOWN; + if (arg != MSM_PULL_DOWN) + return -EINVAL; + arg = 1; break; case PIN_CONFIG_BIAS_BUS_HOLD: if (pctrl->soc->pull_no_keeper) return -ENOTSUPP; - arg = arg == MSM_KEEPER; + if (arg != MSM_KEEPER) + return -EINVAL; + arg = 1; break; case PIN_CONFIG_BIAS_PULL_UP: if (pctrl->soc->pull_no_keeper) arg = arg == MSM_PULL_UP_NO_KEEPER; else arg = arg == MSM_PULL_UP; + if (!arg) + return -EINVAL; break; case PIN_CONFIG_DRIVE_STRENGTH: arg = msm_regval_to_drive(arg); diff --git a/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c b/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c index c2c0bab04257dfdd3d5d118d043f7928b94be3dc..22aaf4375fac09d48153577f2437e45e70c33877 100644 --- a/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c +++ b/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c @@ -390,31 +390,47 @@ static int pmic_gpio_config_get(struct pinctrl_dev *pctldev, switch (param) { case PIN_CONFIG_DRIVE_PUSH_PULL: - arg = pad->buffer_type == PMIC_GPIO_OUT_BUF_CMOS; + if (pad->buffer_type != PMIC_GPIO_OUT_BUF_CMOS) + return -EINVAL; + arg = 1; break; case PIN_CONFIG_DRIVE_OPEN_DRAIN: - arg = pad->buffer_type == PMIC_GPIO_OUT_BUF_OPEN_DRAIN_NMOS; + if (pad->buffer_type != PMIC_GPIO_OUT_BUF_OPEN_DRAIN_NMOS) + return -EINVAL; + arg = 1; break; case PIN_CONFIG_DRIVE_OPEN_SOURCE: - arg = pad->buffer_type == PMIC_GPIO_OUT_BUF_OPEN_DRAIN_PMOS; + if (pad->buffer_type != PMIC_GPIO_OUT_BUF_OPEN_DRAIN_PMOS) + return -EINVAL; + arg = 1; break; case PIN_CONFIG_BIAS_PULL_DOWN: - arg = pad->pullup == PMIC_GPIO_PULL_DOWN; + if (pad->pullup != PMIC_GPIO_PULL_DOWN) + return -EINVAL; + arg = 1; break; case PIN_CONFIG_BIAS_DISABLE: - arg = pad->pullup = PMIC_GPIO_PULL_DISABLE; + if (pad->pullup != PMIC_GPIO_PULL_DISABLE) + return -EINVAL; + arg = 1; break; case PIN_CONFIG_BIAS_PULL_UP: - arg = pad->pullup == PMIC_GPIO_PULL_UP_30; + if (pad->pullup != PMIC_GPIO_PULL_UP_30) + return -EINVAL; + arg = 1; break; case PIN_CONFIG_BIAS_HIGH_IMPEDANCE: - arg = !pad->is_enabled; + if (pad->is_enabled) + return -EINVAL; + arg = 1; break; case PIN_CONFIG_POWER_SOURCE: arg = pad->power_source; break; case PIN_CONFIG_INPUT_ENABLE: - arg = pad->input_enabled; + if (!pad->input_enabled) + return -EINVAL; + arg = 1; break; case PIN_CONFIG_OUTPUT: arg = pad->out_value; diff --git a/drivers/platform/x86/alienware-wmi.c b/drivers/platform/x86/alienware-wmi.c index 4eb8e1a472b23821280fae17f91df35a8ce8737e..e335b18da20fc8ccc3264f20b5cdb883d4149580 100644 --- a/drivers/platform/x86/alienware-wmi.c +++ b/drivers/platform/x86/alienware-wmi.c @@ -519,6 +519,7 @@ static acpi_status alienware_wmax_command(struct wmax_basic_args *in_args, if (obj && obj->type == ACPI_TYPE_INTEGER) *out_data = (u32) obj->integer.value; } + kfree(output.pointer); return status; } diff --git a/drivers/platform/x86/asus-nb-wmi.c b/drivers/platform/x86/asus-nb-wmi.c index 5269a01d9bdd9fa069541dfec40200be3601f714..a6a33327f5e7f79f776b9fb61bfdcb70ac6a5f83 100644 --- a/drivers/platform/x86/asus-nb-wmi.c +++ b/drivers/platform/x86/asus-nb-wmi.c @@ -487,6 +487,7 @@ static const struct key_entry asus_nb_wmi_keymap[] = { { KE_KEY, 0xC4, { KEY_KBDILLUMUP } }, { KE_KEY, 0xC5, { KEY_KBDILLUMDOWN } }, { KE_IGNORE, 0xC6, }, /* Ambient Light Sensor notification */ + { KE_KEY, 0xFA, { KEY_PROG2 } }, /* Lid flip action */ { KE_END, 0}, }; diff --git a/drivers/platform/x86/ideapad-laptop.c b/drivers/platform/x86/ideapad-laptop.c index fe98d4ac0df37040c3da8ad470bf7a33c5477fbe..e1e7e587b45b57e65deaf32397f3b2af5defa760 100644 --- a/drivers/platform/x86/ideapad-laptop.c +++ b/drivers/platform/x86/ideapad-laptop.c @@ -1097,10 +1097,10 @@ static const struct dmi_system_id no_hw_rfkill_list[] = { }, }, { - .ident = "Lenovo Legion Y520-15IKBN", + .ident = "Lenovo Legion Y520-15IKB", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), - DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo Y520-15IKBN"), + DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo Y520-15IKB"), }, }, { diff --git a/drivers/platform/x86/intel_punit_ipc.c b/drivers/platform/x86/intel_punit_ipc.c index b5b890127479f8f586880762828bbd447a383ddf..b7dfe06261f1e6441644aebae16f8d2d6163b842 100644 --- a/drivers/platform/x86/intel_punit_ipc.c +++ b/drivers/platform/x86/intel_punit_ipc.c @@ -17,6 +17,7 @@ #include #include #include +#include #include #include diff --git a/drivers/platform/x86/toshiba_acpi.c b/drivers/platform/x86/toshiba_acpi.c index bb1dcd7fbdeb81902f8b9a395569d1101c342fae..8221e000c8c23616d5206694f87e1f8d2c4a8665 100644 --- a/drivers/platform/x86/toshiba_acpi.c +++ b/drivers/platform/x86/toshiba_acpi.c @@ -34,6 +34,7 @@ #define TOSHIBA_ACPI_VERSION "0.24" #define PROC_INTERFACE_VERSION 1 +#include #include #include #include @@ -1682,7 +1683,7 @@ static const struct file_operations keys_proc_fops = { .write = keys_proc_write, }; -static int version_proc_show(struct seq_file *m, void *v) +static int __maybe_unused version_proc_show(struct seq_file *m, void *v) { seq_printf(m, "driver: %s\n", TOSHIBA_ACPI_VERSION); seq_printf(m, "proc_interface: %d\n", PROC_INTERFACE_VERSION); diff --git a/drivers/power/reset/vexpress-poweroff.c b/drivers/power/reset/vexpress-poweroff.c index 102f95a094603ff4c0b54245c6d79c50e764cab0..e9e749f87517d871cf2d4113568a44fb9c0b5d09 100644 --- a/drivers/power/reset/vexpress-poweroff.c +++ b/drivers/power/reset/vexpress-poweroff.c @@ -35,6 +35,7 @@ static void vexpress_reset_do(struct device *dev, const char *what) } static struct device *vexpress_power_off_device; +static atomic_t vexpress_restart_nb_refcnt = ATOMIC_INIT(0); static void vexpress_power_off(void) { @@ -99,10 +100,13 @@ static int _vexpress_register_restart_handler(struct device *dev) int err; vexpress_restart_device = dev; - err = register_restart_handler(&vexpress_restart_nb); - if (err) { - dev_err(dev, "cannot register restart handler (err=%d)\n", err); - return err; + if (atomic_inc_return(&vexpress_restart_nb_refcnt) == 1) { + err = register_restart_handler(&vexpress_restart_nb); + if (err) { + dev_err(dev, "cannot register restart handler (err=%d)\n", err); + atomic_dec(&vexpress_restart_nb_refcnt); + return err; + } } device_create_file(dev, &dev_attr_active); diff --git a/drivers/power/supply/axp288_charger.c b/drivers/power/supply/axp288_charger.c index 9dc7590e07cbe97795406b9810544f95faa2ae2a..4d016fbc3527ccfb447fdc91543d98757f540ea1 100644 --- a/drivers/power/supply/axp288_charger.c +++ b/drivers/power/supply/axp288_charger.c @@ -771,7 +771,7 @@ static int charger_init_hw_regs(struct axp288_chrg_info *info) } /* Determine charge current limit */ - cc = (ret & CHRG_CCCV_CC_MASK) >> CHRG_CCCV_CC_BIT_POS; + cc = (val & CHRG_CCCV_CC_MASK) >> CHRG_CCCV_CC_BIT_POS; cc = (cc * CHRG_CCCV_CC_LSB_RES) + CHRG_CCCV_CC_OFFSET; info->cc = cc; diff --git a/drivers/power/supply/generic-adc-battery.c b/drivers/power/supply/generic-adc-battery.c index 37e523374fe00ebe44de3f20ae12a1b8674641b5..371b5ec700870f0b3bc23f62ed939fc717644185 100644 --- a/drivers/power/supply/generic-adc-battery.c +++ b/drivers/power/supply/generic-adc-battery.c @@ -243,10 +243,10 @@ static int gab_probe(struct platform_device *pdev) struct power_supply_desc *psy_desc; struct power_supply_config psy_cfg = {}; struct gab_platform_data *pdata = pdev->dev.platform_data; - enum power_supply_property *properties; int ret = 0; int chan; - int index = 0; + int index = ARRAY_SIZE(gab_props); + bool any = false; adc_bat = devm_kzalloc(&pdev->dev, sizeof(*adc_bat), GFP_KERNEL); if (!adc_bat) { @@ -280,8 +280,6 @@ static int gab_probe(struct platform_device *pdev) } memcpy(psy_desc->properties, gab_props, sizeof(gab_props)); - properties = (enum power_supply_property *) - ((char *)psy_desc->properties + sizeof(gab_props)); /* * getting channel from iio and copying the battery properties @@ -295,15 +293,22 @@ static int gab_probe(struct platform_device *pdev) adc_bat->channel[chan] = NULL; } else { /* copying properties for supported channels only */ - memcpy(properties + sizeof(*(psy_desc->properties)) * index, - &gab_dyn_props[chan], - sizeof(gab_dyn_props[chan])); - index++; + int index2; + + for (index2 = 0; index2 < index; index2++) { + if (psy_desc->properties[index2] == + gab_dyn_props[chan]) + break; /* already known */ + } + if (index2 == index) /* really new */ + psy_desc->properties[index++] = + gab_dyn_props[chan]; + any = true; } } /* none of the channels are supported so let's bail out */ - if (index == 0) { + if (!any) { ret = -ENODEV; goto second_mem_fail; } @@ -314,7 +319,7 @@ static int gab_probe(struct platform_device *pdev) * as come channels may be not be supported by the device.So * we need to take care of that. */ - psy_desc->num_properties = ARRAY_SIZE(gab_props) + index; + psy_desc->num_properties = index; adc_bat->psy = power_supply_register(&pdev->dev, psy_desc, &psy_cfg); if (IS_ERR(adc_bat->psy)) { diff --git a/drivers/power/supply/power_supply_core.c b/drivers/power/supply/power_supply_core.c index 02c6340ae36fe93900662d7971b6af7df4df48ed..3226faebe0a084f779f2b127f1d082231e6ab5ec 100644 --- a/drivers/power/supply/power_supply_core.c +++ b/drivers/power/supply/power_supply_core.c @@ -14,6 +14,7 @@ #include #include #include +#include #include #include #include @@ -139,8 +140,13 @@ static void power_supply_deferred_register_work(struct work_struct *work) struct power_supply *psy = container_of(work, struct power_supply, deferred_register_work.work); - if (psy->dev.parent) - mutex_lock(&psy->dev.parent->mutex); + if (psy->dev.parent) { + while (!mutex_trylock(&psy->dev.parent->mutex)) { + if (psy->removing) + return; + msleep(10); + } + } power_supply_changed(psy); @@ -1071,6 +1077,7 @@ EXPORT_SYMBOL_GPL(devm_power_supply_register_no_ws); void power_supply_unregister(struct power_supply *psy) { WARN_ON(atomic_dec_return(&psy->use_cnt)); + psy->removing = true; cancel_work_sync(&psy->changed_work); cancel_delayed_work_sync(&psy->deferred_register_work); sysfs_remove_link(&psy->dev.kobj, "powers"); diff --git a/drivers/pwm/pwm-meson.c b/drivers/pwm/pwm-meson.c index d589331d1884b29f938c47f9fb3308ffcb212026..3540d00425d03be01fdcdb77c6f028c975a92617 100644 --- a/drivers/pwm/pwm-meson.c +++ b/drivers/pwm/pwm-meson.c @@ -432,7 +432,6 @@ static int meson_pwm_init_channels(struct meson_pwm *meson, struct meson_pwm_channel *channels) { struct device *dev = meson->chip.dev; - struct device_node *np = dev->of_node; struct clk_init_data init; unsigned int i; char name[255]; @@ -441,7 +440,7 @@ static int meson_pwm_init_channels(struct meson_pwm *meson, for (i = 0; i < meson->chip.npwm; i++) { struct meson_pwm_channel *channel = &channels[i]; - snprintf(name, sizeof(name), "%pOF#mux%u", np, i); + snprintf(name, sizeof(name), "%s#mux%u", dev_name(dev), i); init.name = name; init.ops = &clk_mux_ops; diff --git a/drivers/pwm/pwm-tiehrpwm.c b/drivers/pwm/pwm-tiehrpwm.c index 4c22cb39504094eb32db6ad94e7718eadb38917b..f7b8a86fa5c5e9570a616ccbcdb61e7d427b8ae3 100644 --- a/drivers/pwm/pwm-tiehrpwm.c +++ b/drivers/pwm/pwm-tiehrpwm.c @@ -33,10 +33,6 @@ #define TBCTL 0x00 #define TBPRD 0x0A -#define TBCTL_RUN_MASK (BIT(15) | BIT(14)) -#define TBCTL_STOP_NEXT 0 -#define TBCTL_STOP_ON_CYCLE BIT(14) -#define TBCTL_FREE_RUN (BIT(15) | BIT(14)) #define TBCTL_PRDLD_MASK BIT(3) #define TBCTL_PRDLD_SHDW 0 #define TBCTL_PRDLD_IMDT BIT(3) @@ -360,7 +356,7 @@ static int ehrpwm_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm) /* Channels polarity can be configured from action qualifier module */ configure_polarity(pc, pwm->hwpwm); - /* Enable TBCLK before enabling PWM device */ + /* Enable TBCLK */ ret = clk_enable(pc->tbclk); if (ret) { dev_err(chip->dev, "Failed to enable TBCLK for %s: %d\n", @@ -368,9 +364,6 @@ static int ehrpwm_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm) return ret; } - /* Enable time counter for free_run */ - ehrpwm_modify(pc->mmio_base, TBCTL, TBCTL_RUN_MASK, TBCTL_FREE_RUN); - return 0; } @@ -388,6 +381,8 @@ static void ehrpwm_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm) aqcsfrc_mask = AQCSFRC_CSFA_MASK; } + /* Update shadow register first before modifying active register */ + ehrpwm_modify(pc->mmio_base, AQCSFRC, aqcsfrc_mask, aqcsfrc_val); /* * Changes to immediate action on Action Qualifier. This puts * Action Qualifier control on PWM output from next TBCLK @@ -400,9 +395,6 @@ static void ehrpwm_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm) /* Disabling TBCLK on PWM disable */ clk_disable(pc->tbclk); - /* Stop Time base counter */ - ehrpwm_modify(pc->mmio_base, TBCTL, TBCTL_RUN_MASK, TBCTL_STOP_NEXT); - /* Disable clock on PWM disable */ pm_runtime_put_sync(chip->dev); } diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c index b64b7916507f28adb03b600387356ad9c8fcd26e..b2cb4f497ef6707bb02dd4a306424a2d9d9bbdb9 100644 --- a/drivers/regulator/core.c +++ b/drivers/regulator/core.c @@ -4115,13 +4115,13 @@ regulator_register(const struct regulator_desc *regulator_desc, !rdev->desc->fixed_uV) rdev->is_switch = true; + dev_set_drvdata(&rdev->dev, rdev); ret = device_register(&rdev->dev); if (ret != 0) { put_device(&rdev->dev); goto unset_supplies; } - dev_set_drvdata(&rdev->dev, rdev); rdev_init_debugfs(rdev); /* try to resolve regulators supply since a new one was registered */ diff --git a/drivers/reset/reset-imx7.c b/drivers/reset/reset-imx7.c index 4db177bc89bc4b18bbeb55e8766a7045a7a221a9..fdeac194642946b713a2a3e00fa2ca371b746e4c 100644 --- a/drivers/reset/reset-imx7.c +++ b/drivers/reset/reset-imx7.c @@ -80,7 +80,7 @@ static int imx7_reset_set(struct reset_controller_dev *rcdev, { struct imx7_src *imx7src = to_imx7_src(rcdev); const struct imx7_src_signal *signal = &imx7_src_signals[id]; - unsigned int value = 0; + unsigned int value = assert ? signal->bit : 0; switch (id) { case IMX7_RESET_PCIEPHY: diff --git a/drivers/rtc/rtc-bq4802.c b/drivers/rtc/rtc-bq4802.c index bd170cb3361ce15e511a3a81f2ff1c06a5ed3bfe..5747a54cbd423e34c13e5f920ee7f8bce1cc2c50 100644 --- a/drivers/rtc/rtc-bq4802.c +++ b/drivers/rtc/rtc-bq4802.c @@ -164,6 +164,10 @@ static int bq4802_probe(struct platform_device *pdev) } else if (p->r->flags & IORESOURCE_MEM) { p->regs = devm_ioremap(&pdev->dev, p->r->start, resource_size(p->r)); + if (!p->regs){ + err = -ENOMEM; + goto out; + } p->read = bq4802_read_mem; p->write = bq4802_write_mem; } else { diff --git a/drivers/rtc/rtc-omap.c b/drivers/rtc/rtc-omap.c index 13f7cd11c07eb52948121225242b57f8ab2c8e2c..ac6e6a6a194c71de44bde323135e0cca8780921b 100644 --- a/drivers/rtc/rtc-omap.c +++ b/drivers/rtc/rtc-omap.c @@ -817,13 +817,6 @@ static int omap_rtc_probe(struct platform_device *pdev) goto err; } - if (rtc->is_pmic_controller) { - if (!pm_power_off) { - omap_rtc_power_off_rtc = rtc; - pm_power_off = omap_rtc_power_off; - } - } - /* Support ext_wakeup pinconf */ rtc_pinctrl_desc.name = dev_name(&pdev->dev); @@ -833,6 +826,13 @@ static int omap_rtc_probe(struct platform_device *pdev) return PTR_ERR(rtc->pctldev); } + if (rtc->is_pmic_controller) { + if (!pm_power_off) { + omap_rtc_power_off_rtc = rtc; + pm_power_off = omap_rtc_power_off; + } + } + return 0; err: diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c index d072f84a8535f160889eca5891370747e67b03c8..92c4f5180ad07914e8c2722988b8f8e2f32fff17 100644 --- a/drivers/s390/block/dasd.c +++ b/drivers/s390/block/dasd.c @@ -3190,6 +3190,7 @@ static int dasd_alloc_queue(struct dasd_block *block) block->tag_set.nr_hw_queues = DASD_NR_HW_QUEUES; block->tag_set.queue_depth = DASD_MAX_LCU_DEV * DASD_REQ_PER_DEV; block->tag_set.flags = BLK_MQ_F_SHOULD_MERGE; + block->tag_set.numa_node = NUMA_NO_NODE; rc = blk_mq_alloc_tag_set(&block->tag_set); if (rc) diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c index 5ede251c52ca980e51f6e22fdbfc3bef08c5c9c6..4c7c8455da961422c8a92f9c8862fb8a59c16132 100644 --- a/drivers/s390/block/dasd_eckd.c +++ b/drivers/s390/block/dasd_eckd.c @@ -1778,6 +1778,9 @@ static void dasd_eckd_uncheck_device(struct dasd_device *device) struct dasd_eckd_private *private = device->private; int i; + if (!private) + return; + dasd_alias_disconnect_device_from_lcu(device); private->ned = NULL; private->sneq = NULL; @@ -2032,8 +2035,11 @@ static int dasd_eckd_basic_to_ready(struct dasd_device *device) static int dasd_eckd_online_to_ready(struct dasd_device *device) { - cancel_work_sync(&device->reload_device); - cancel_work_sync(&device->kick_validate); + if (cancel_work_sync(&device->reload_device)) + dasd_put_device(device); + if (cancel_work_sync(&device->kick_validate)) + dasd_put_device(device); + return 0; }; diff --git a/drivers/s390/block/scm_blk.c b/drivers/s390/block/scm_blk.c index eb51893c74a4ba4053fe8d15e064fbf42bed9845..5c944ee76ec1494c0515c5dee9aa80bc5da096df 100644 --- a/drivers/s390/block/scm_blk.c +++ b/drivers/s390/block/scm_blk.c @@ -454,6 +454,7 @@ int scm_blk_dev_setup(struct scm_blk_dev *bdev, struct scm_device *scmdev) bdev->tag_set.nr_hw_queues = nr_requests; bdev->tag_set.queue_depth = nr_requests_per_io * nr_requests; bdev->tag_set.flags = BLK_MQ_F_SHOULD_MERGE; + bdev->tag_set.numa_node = NUMA_NO_NODE; ret = blk_mq_alloc_tag_set(&bdev->tag_set); if (ret) diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c index 8941e7caaf4dc9c3ae180569329457f5342d1ae8..c7afdbded26b6bfefd82dd8db3c643c5c931f857 100644 --- a/drivers/s390/cio/qdio_main.c +++ b/drivers/s390/cio/qdio_main.c @@ -641,21 +641,20 @@ static inline unsigned long qdio_aob_for_buffer(struct qdio_output_q *q, unsigned long phys_aob = 0; if (!q->use_cq) - goto out; + return 0; if (!q->aobs[bufnr]) { struct qaob *aob = qdio_allocate_aob(); q->aobs[bufnr] = aob; } if (q->aobs[bufnr]) { - q->sbal_state[bufnr].flags = QDIO_OUTBUF_STATE_FLAG_NONE; q->sbal_state[bufnr].aob = q->aobs[bufnr]; q->aobs[bufnr]->user1 = (u64) q->sbal_state[bufnr].user; phys_aob = virt_to_phys(q->aobs[bufnr]); WARN_ON_ONCE(phys_aob & 0xFF); } -out: + q->sbal_state[bufnr].flags = 0; return phys_aob; } diff --git a/drivers/s390/cio/vfio_ccw_drv.c b/drivers/s390/cio/vfio_ccw_drv.c index 82f05c4b8c526f73a52aed819eacb33cdfd18e3a..ae7a49ade414bd8e33356f47e954cfc27a2488a3 100644 --- a/drivers/s390/cio/vfio_ccw_drv.c +++ b/drivers/s390/cio/vfio_ccw_drv.c @@ -176,6 +176,7 @@ static int vfio_ccw_sch_event(struct subchannel *sch, int process) { struct vfio_ccw_private *private = dev_get_drvdata(&sch->dev); unsigned long flags; + int rc = -EAGAIN; spin_lock_irqsave(sch->lock, flags); if (!device_is_registered(&sch->dev)) @@ -186,6 +187,7 @@ static int vfio_ccw_sch_event(struct subchannel *sch, int process) if (cio_update_schib(sch)) { vfio_ccw_fsm_event(private, VFIO_CCW_EVENT_NOT_OPER); + rc = 0; goto out_unlock; } @@ -194,11 +196,12 @@ static int vfio_ccw_sch_event(struct subchannel *sch, int process) private->state = private->mdev ? VFIO_CCW_STATE_IDLE : VFIO_CCW_STATE_STANDBY; } + rc = 0; out_unlock: spin_unlock_irqrestore(sch->lock, flags); - return 0; + return rc; } static struct css_device_id vfio_ccw_sch_ids[] = { diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c index 939b5b5e97efe81ad52a04ab04b678d0fc424ee5..0a6afd4b283d484daf8f4d3d96f2cca427970085 100644 --- a/drivers/s390/net/qeth_core_main.c +++ b/drivers/s390/net/qeth_core_main.c @@ -3507,13 +3507,14 @@ static void qeth_flush_buffers(struct qeth_qdio_out_q *queue, int index, qdio_flags = QDIO_FLAG_SYNC_OUTPUT; if (atomic_read(&queue->set_pci_flags_count)) qdio_flags |= QDIO_FLAG_PCI_OUT; + atomic_add(count, &queue->used_buffers); + rc = do_QDIO(CARD_DDEV(queue->card), qdio_flags, queue->queue_no, index, count); if (queue->card->options.performance_stats) queue->card->perf_stats.outbound_do_qdio_time += qeth_get_micros() - queue->card->perf_stats.outbound_do_qdio_start_time; - atomic_add(count, &queue->used_buffers); if (rc) { queue->card->stats.tx_errors += count; /* ignore temporary SIGA errors without busy condition */ diff --git a/drivers/s390/net/qeth_core_sys.c b/drivers/s390/net/qeth_core_sys.c index d1ee9e30c68bec45fba048ae30c4ae325fc5f2d0..21e91fbb28605399116a1144cfced5ea27f57fd4 100644 --- a/drivers/s390/net/qeth_core_sys.c +++ b/drivers/s390/net/qeth_core_sys.c @@ -423,6 +423,7 @@ static ssize_t qeth_dev_layer2_store(struct device *dev, if (card->discipline) { card->discipline->remove(card->gdev); qeth_core_free_discipline(card); + card->options.layer2 = -1; } rc = qeth_core_load_discipline(card, newdis); diff --git a/drivers/scsi/3w-9xxx.c b/drivers/scsi/3w-9xxx.c index a1388842e17e561e901726ccb61bc44163332c24..dd342207095af001581bce325583e0c991339d45 100644 --- a/drivers/scsi/3w-9xxx.c +++ b/drivers/scsi/3w-9xxx.c @@ -2042,6 +2042,7 @@ static int twa_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id) if (twa_initialize_device_extension(tw_dev)) { TW_PRINTK(tw_dev->host, TW_DRIVER, 0x25, "Failed to initialize device extension"); + retval = -ENOMEM; goto out_free_device_extension; } @@ -2064,6 +2065,7 @@ static int twa_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id) tw_dev->base_addr = ioremap(mem_addr, mem_len); if (!tw_dev->base_addr) { TW_PRINTK(tw_dev->host, TW_DRIVER, 0x35, "Failed to ioremap"); + retval = -ENOMEM; goto out_release_mem_region; } @@ -2071,8 +2073,10 @@ static int twa_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id) TW_DISABLE_INTERRUPTS(tw_dev); /* Initialize the card */ - if (twa_reset_sequence(tw_dev, 0)) + if (twa_reset_sequence(tw_dev, 0)) { + retval = -ENOMEM; goto out_iounmap; + } /* Set host specific parameters */ if ((pdev->device == PCI_DEVICE_ID_3WARE_9650SE) || diff --git a/drivers/scsi/3w-sas.c b/drivers/scsi/3w-sas.c index b150e131b2e76a172aa13bbf84949ec0cc269cd4..aa317d6909e8f0ea71196ad0cedd5337e10f87e4 100644 --- a/drivers/scsi/3w-sas.c +++ b/drivers/scsi/3w-sas.c @@ -1597,6 +1597,7 @@ static int twl_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id) if (twl_initialize_device_extension(tw_dev)) { TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1a, "Failed to initialize device extension"); + retval = -ENOMEM; goto out_free_device_extension; } @@ -1611,6 +1612,7 @@ static int twl_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id) tw_dev->base_addr = pci_iomap(pdev, 1, 0); if (!tw_dev->base_addr) { TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1c, "Failed to ioremap"); + retval = -ENOMEM; goto out_release_mem_region; } @@ -1620,6 +1622,7 @@ static int twl_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id) /* Initialize the card */ if (twl_reset_sequence(tw_dev, 0)) { TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1d, "Controller reset failed during probe"); + retval = -ENOMEM; goto out_iounmap; } diff --git a/drivers/scsi/3w-xxxx.c b/drivers/scsi/3w-xxxx.c index f6179e3d695397e368cdd57e913d8863e829fa31..961ea6f7def87263e314d389a6b33b3b34a883e1 100644 --- a/drivers/scsi/3w-xxxx.c +++ b/drivers/scsi/3w-xxxx.c @@ -2280,6 +2280,7 @@ static int tw_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id) if (tw_initialize_device_extension(tw_dev)) { printk(KERN_WARNING "3w-xxxx: Failed to initialize device extension."); + retval = -ENOMEM; goto out_free_device_extension; } @@ -2294,6 +2295,7 @@ static int tw_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id) tw_dev->base_addr = pci_resource_start(pdev, 0); if (!tw_dev->base_addr) { printk(KERN_WARNING "3w-xxxx: Failed to get io address."); + retval = -ENOMEM; goto out_release_mem_region; } diff --git a/drivers/scsi/aic94xx/aic94xx_init.c b/drivers/scsi/aic94xx/aic94xx_init.c index 6c838865ac5a7b32fd853aa36a07611e9809adc3..4a4746cc6745f2cc95b2ce0e570a7df05fc77484 100644 --- a/drivers/scsi/aic94xx/aic94xx_init.c +++ b/drivers/scsi/aic94xx/aic94xx_init.c @@ -1030,8 +1030,10 @@ static int __init aic94xx_init(void) aic94xx_transport_template = sas_domain_attach_transport(&aic94xx_transport_functions); - if (!aic94xx_transport_template) + if (!aic94xx_transport_template) { + err = -ENOMEM; goto out_destroy_caches; + } err = pci_register_driver(&aic94xx_pci_driver); if (err) diff --git a/drivers/scsi/bnx2i/bnx2i_hwi.c b/drivers/scsi/bnx2i/bnx2i_hwi.c index 42921dbba9272f793a2a4a483e727dfa78809e4b..4ca10501647b33573af8ef917810e7c199b97e4d 100644 --- a/drivers/scsi/bnx2i/bnx2i_hwi.c +++ b/drivers/scsi/bnx2i/bnx2i_hwi.c @@ -2742,6 +2742,8 @@ int bnx2i_map_ep_dbell_regs(struct bnx2i_endpoint *ep) BNX2X_DOORBELL_PCI_BAR); reg_off = (1 << BNX2X_DB_SHIFT) * (cid_num & 0x1FFFF); ep->qp.ctx_base = ioremap_nocache(reg_base + reg_off, 4); + if (!ep->qp.ctx_base) + return -ENOMEM; goto arm_cq; } diff --git a/drivers/scsi/fcoe/fcoe_ctlr.c b/drivers/scsi/fcoe/fcoe_ctlr.c index 0ac24fd699a144e7f62ed013cd1659d98e70bd46..9ec11316bfe683e4ee298bd4e7be6712eb595cc4 100644 --- a/drivers/scsi/fcoe/fcoe_ctlr.c +++ b/drivers/scsi/fcoe/fcoe_ctlr.c @@ -754,9 +754,9 @@ int fcoe_ctlr_els_send(struct fcoe_ctlr *fip, struct fc_lport *lport, case ELS_LOGO: if (fip->mode == FIP_MODE_VN2VN) { if (fip->state != FIP_ST_VNMP_UP) - return -EINVAL; + goto drop; if (ntoh24(fh->fh_d_id) == FC_FID_FLOGI) - return -EINVAL; + goto drop; } else { if (fip->state != FIP_ST_ENABLED) return 0; @@ -799,9 +799,9 @@ int fcoe_ctlr_els_send(struct fcoe_ctlr *fip, struct fc_lport *lport, fip->send(fip, skb); return -EINPROGRESS; drop: - kfree_skb(skb); LIBFCOE_FIP_DBG(fip, "drop els_send op %u d_id %x\n", op, ntoh24(fh->fh_d_id)); + kfree_skb(skb); return -EINVAL; } EXPORT_SYMBOL(fcoe_ctlr_els_send); diff --git a/drivers/scsi/ibmvscsi/ibmvscsi.c b/drivers/scsi/ibmvscsi/ibmvscsi.c index 7d156b161482988367ac323f8182d177421d1163..53eb277313739e982348ed2055342db7bca61c94 100644 --- a/drivers/scsi/ibmvscsi/ibmvscsi.c +++ b/drivers/scsi/ibmvscsi/ibmvscsi.c @@ -93,7 +93,7 @@ static int max_requests = IBMVSCSI_MAX_REQUESTS_DEFAULT; static int max_events = IBMVSCSI_MAX_REQUESTS_DEFAULT + 2; static int fast_fail = 1; static int client_reserve = 1; -static char partition_name[97] = "UNKNOWN"; +static char partition_name[96] = "UNKNOWN"; static unsigned int partition_number = -1; static LIST_HEAD(ibmvscsi_head); @@ -262,7 +262,7 @@ static void gather_partition_info(void) ppartition_name = of_get_property(of_root, "ibm,partition-name", NULL); if (ppartition_name) - strncpy(partition_name, ppartition_name, + strlcpy(partition_name, ppartition_name, sizeof(partition_name)); p_number_ptr = of_get_property(of_root, "ibm,partition-no", NULL); if (p_number_ptr) diff --git a/drivers/scsi/libfc/fc_disc.c b/drivers/scsi/libfc/fc_disc.c index 8660f923ace02120eb63c2ac11724448d4cbd060..bb9c1c01664390a31ddb7fd225825467bf95f134 100644 --- a/drivers/scsi/libfc/fc_disc.c +++ b/drivers/scsi/libfc/fc_disc.c @@ -294,9 +294,11 @@ static void fc_disc_done(struct fc_disc *disc, enum fc_disc_event event) * discovery, reverify or log them in. Otherwise, log them out. * Skip ports which were never discovered. These are the dNS port * and ports which were created by PLOGI. + * + * We don't need to use the _rcu variant here as the rport list + * is protected by the disc mutex which is already held on entry. */ - rcu_read_lock(); - list_for_each_entry_rcu(rdata, &disc->rports, peers) { + list_for_each_entry(rdata, &disc->rports, peers) { if (!kref_get_unless_zero(&rdata->kref)) continue; if (rdata->disc_id) { @@ -307,7 +309,6 @@ static void fc_disc_done(struct fc_disc *disc, enum fc_disc_event event) } kref_put(&rdata->kref, fc_rport_destroy); } - rcu_read_unlock(); mutex_unlock(&disc->disc_mutex); disc->disc_callback(lport, event); mutex_lock(&disc->disc_mutex); diff --git a/drivers/scsi/libfc/fc_rport.c b/drivers/scsi/libfc/fc_rport.c index 31d31aad3de1d3fd0f2ff58d2141cabddec474bf..89b1f1af2fd456c38e45e0a905af9a76385c1d89 100644 --- a/drivers/scsi/libfc/fc_rport.c +++ b/drivers/scsi/libfc/fc_rport.c @@ -2164,6 +2164,7 @@ static void fc_rport_recv_logo_req(struct fc_lport *lport, struct fc_frame *fp) FC_RPORT_DBG(rdata, "Received LOGO request while in state %s\n", fc_rport_state(rdata)); + rdata->flags &= ~FC_RP_STARTED; fc_rport_enter_delete(rdata, RPORT_EV_STOP); mutex_unlock(&rdata->rp_mutex); kref_put(&rdata->kref, fc_rport_destroy); diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c index bddbe2da528340b930e93c49134b9ab7b5c15067..cf8a15e54d83ff7de8e01e6791a61e65749495ec 100644 --- a/drivers/scsi/libiscsi.c +++ b/drivers/scsi/libiscsi.c @@ -284,11 +284,11 @@ static int iscsi_check_tmf_restrictions(struct iscsi_task *task, int opcode) */ if (opcode != ISCSI_OP_SCSI_DATA_OUT) { iscsi_conn_printk(KERN_INFO, conn, - "task [op %x/%x itt " + "task [op %x itt " "0x%x/0x%x] " "rejected.\n", - task->hdr->opcode, opcode, - task->itt, task->hdr_itt); + opcode, task->itt, + task->hdr_itt); return -EACCES; } /* @@ -297,10 +297,10 @@ static int iscsi_check_tmf_restrictions(struct iscsi_task *task, int opcode) */ if (conn->session->fast_abort) { iscsi_conn_printk(KERN_INFO, conn, - "task [op %x/%x itt " + "task [op %x itt " "0x%x/0x%x] fast abort.\n", - task->hdr->opcode, opcode, - task->itt, task->hdr_itt); + opcode, task->itt, + task->hdr_itt); return -EACCES; } break; diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h index 8eb3f96fe06896ca091e634cf55b8e76819d0dec..bc61cc8bc6f0227dfb37af5bb23a279c96e19e8f 100644 --- a/drivers/scsi/lpfc/lpfc.h +++ b/drivers/scsi/lpfc/lpfc.h @@ -676,7 +676,7 @@ struct lpfc_hba { #define LS_NPIV_FAB_SUPPORTED 0x2 /* Fabric supports NPIV */ #define LS_IGNORE_ERATT 0x4 /* intr handler should ignore ERATT */ #define LS_MDS_LINK_DOWN 0x8 /* MDS Diagnostics Link Down */ -#define LS_MDS_LOOPBACK 0x16 /* MDS Diagnostics Link Up (Loopback) */ +#define LS_MDS_LOOPBACK 0x10 /* MDS Diagnostics Link Up (Loopback) */ uint32_t hba_flag; /* hba generic flags */ #define HBA_ERATT_HANDLED 0x1 /* This flag is set when eratt handled */ diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c index 985378e4bb6ff02ed3327bf719628508e1fee434..d55c365be2388dce37f72f30f5cc005a0153c909 100644 --- a/drivers/scsi/megaraid/megaraid_sas_base.c +++ b/drivers/scsi/megaraid/megaraid_sas_base.c @@ -6597,6 +6597,9 @@ megasas_resume(struct pci_dev *pdev) goto fail_init_mfi; } + if (megasas_get_ctrl_info(instance) != DCMD_SUCCESS) + goto fail_init_mfi; + tasklet_init(&instance->isr_tasklet, instance->instancet->tasklet, (unsigned long)instance); diff --git a/drivers/scsi/mpt3sas/mpt3sas_transport.c b/drivers/scsi/mpt3sas/mpt3sas_transport.c index d3940c5d079dc728ada053fc6ca6023fdd5cc02b..63dd9bc21ff2db8d01339a5196c1763b514191dc 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_transport.c +++ b/drivers/scsi/mpt3sas/mpt3sas_transport.c @@ -1936,12 +1936,12 @@ _transport_smp_handler(struct bsg_job *job, struct Scsi_Host *shost, pr_info(MPT3SAS_FMT "%s: host reset in progress!\n", __func__, ioc->name); rc = -EFAULT; - goto out; + goto job_done; } rc = mutex_lock_interruptible(&ioc->transport_cmds.mutex); if (rc) - goto out; + goto job_done; if (ioc->transport_cmds.status != MPT3_CMD_NOT_USED) { pr_err(MPT3SAS_FMT "%s: transport_cmds in use\n", ioc->name, @@ -2066,6 +2066,7 @@ _transport_smp_handler(struct bsg_job *job, struct Scsi_Host *shost, out: ioc->transport_cmds.status = MPT3_CMD_NOT_USED; mutex_unlock(&ioc->transport_cmds.mutex); +job_done: bsg_job_done(job, rc, reslen); } diff --git a/drivers/scsi/qedf/qedf_main.c b/drivers/scsi/qedf/qedf_main.c index 382edb79a0de99f4bda0551cf999ec3a20a3ad56..56bcdd412d268ad6c6ea6d11b960422eef08344d 100644 --- a/drivers/scsi/qedf/qedf_main.c +++ b/drivers/scsi/qedf/qedf_main.c @@ -3240,6 +3240,11 @@ static int __qedf_probe(struct pci_dev *pdev, int mode) init_completion(&qedf->flogi_compl); + status = qed_ops->common->update_drv_state(qedf->cdev, true); + if (status) + QEDF_ERR(&(qedf->dbg_ctx), + "Failed to send drv state to MFW.\n"); + memset(&link_params, 0, sizeof(struct qed_link_params)); link_params.link_up = true; status = qed_ops->common->set_link(qedf->cdev, &link_params); @@ -3288,6 +3293,7 @@ static int qedf_probe(struct pci_dev *pdev, const struct pci_device_id *id) static void __qedf_remove(struct pci_dev *pdev, int mode) { struct qedf_ctx *qedf; + int rc; if (!pdev) { QEDF_ERR(NULL, "pdev is NULL.\n"); @@ -3382,6 +3388,12 @@ static void __qedf_remove(struct pci_dev *pdev, int mode) qed_ops->common->set_power_state(qedf->cdev, PCI_D0); pci_set_drvdata(pdev, NULL); } + + rc = qed_ops->common->update_drv_state(qedf->cdev, false); + if (rc) + QEDF_ERR(&(qedf->dbg_ctx), + "Failed to send drv state to MFW.\n"); + qed_ops->common->slowpath_stop(qedf->cdev); qed_ops->common->remove(qedf->cdev); diff --git a/drivers/scsi/qedi/qedi_main.c b/drivers/scsi/qedi/qedi_main.c index 1573749fe615cb5941db790e3668759c40fec2ae..e7daadc089fcbea1212507bad7313aac78fdadbe 100644 --- a/drivers/scsi/qedi/qedi_main.c +++ b/drivers/scsi/qedi/qedi_main.c @@ -2087,6 +2087,7 @@ kset_free: static void __qedi_remove(struct pci_dev *pdev, int mode) { struct qedi_ctx *qedi = pci_get_drvdata(pdev); + int rval; if (qedi->tmf_thread) { flush_workqueue(qedi->tmf_thread); @@ -2116,6 +2117,10 @@ static void __qedi_remove(struct pci_dev *pdev, int mode) if (mode == QEDI_MODE_NORMAL) qedi_free_iscsi_pf_param(qedi); + rval = qedi_ops->common->update_drv_state(qedi->cdev, false); + if (rval) + QEDI_ERR(&qedi->dbg_ctx, "Failed to send drv state to MFW\n"); + if (!test_bit(QEDI_IN_OFFLINE, &qedi->flags)) { qedi_ops->common->slowpath_stop(qedi->cdev); qedi_ops->common->remove(qedi->cdev); @@ -2390,6 +2395,12 @@ static int __qedi_probe(struct pci_dev *pdev, int mode) if (qedi_setup_boot_info(qedi)) QEDI_ERR(&qedi->dbg_ctx, "No iSCSI boot target configured\n"); + + rc = qedi_ops->common->update_drv_state(qedi->cdev, true); + if (rc) + QEDI_ERR(&qedi->dbg_ctx, + "Failed to send drv state to MFW\n"); + } return 0; diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c index 40406c162d0d7f88bcd1d479c2666888b8eda3c4..8ce12ffcbb7a8ec0bbe2e76654c8ff35fb43b3bd 100644 --- a/drivers/scsi/scsi_sysfs.c +++ b/drivers/scsi/scsi_sysfs.c @@ -721,8 +721,24 @@ static ssize_t sdev_store_delete(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { - if (device_remove_file_self(dev, attr)) - scsi_remove_device(to_scsi_device(dev)); + struct kernfs_node *kn; + + kn = sysfs_break_active_protection(&dev->kobj, &attr->attr); + WARN_ON_ONCE(!kn); + /* + * Concurrent writes into the "delete" sysfs attribute may trigger + * concurrent calls to device_remove_file() and scsi_remove_device(). + * device_remove_file() handles concurrent removal calls by + * serializing these and by ignoring the second and later removal + * attempts. Concurrent calls of scsi_remove_device() are + * serialized. The second and later calls of scsi_remove_device() are + * ignored because the first call of that function changes the device + * state into SDEV_DEL. + */ + device_remove_file(dev, attr); + scsi_remove_device(to_scsi_device(dev)); + if (kn) + sysfs_unbreak_active_protection(kn); return count; }; static DEVICE_ATTR(delete, S_IWUSR, NULL, sdev_store_delete); diff --git a/drivers/scsi/vmw_pvscsi.c b/drivers/scsi/vmw_pvscsi.c index 777e5f1e52d10968d5f23e0e316db05b8209511d..0cd947f78b5bfdfa013ce4909afdc8f894b6146b 100644 --- a/drivers/scsi/vmw_pvscsi.c +++ b/drivers/scsi/vmw_pvscsi.c @@ -561,9 +561,14 @@ static void pvscsi_complete_request(struct pvscsi_adapter *adapter, (btstat == BTSTAT_SUCCESS || btstat == BTSTAT_LINKED_COMMAND_COMPLETED || btstat == BTSTAT_LINKED_COMMAND_COMPLETED_WITH_FLAG)) { - cmd->result = (DID_OK << 16) | sdstat; - if (sdstat == SAM_STAT_CHECK_CONDITION && cmd->sense_buffer) - cmd->result |= (DRIVER_SENSE << 24); + if (sdstat == SAM_STAT_COMMAND_TERMINATED) { + cmd->result = (DID_RESET << 16); + } else { + cmd->result = (DID_OK << 16) | sdstat; + if (sdstat == SAM_STAT_CHECK_CONDITION && + cmd->sense_buffer) + cmd->result |= (DRIVER_SENSE << 24); + } } else switch (btstat) { case BTSTAT_SUCCESS: diff --git a/drivers/scsi/xen-scsifront.c b/drivers/scsi/xen-scsifront.c index 36f59a1be7e9a60be61c2b1ba8f7468dfbd8c6c9..61389bdc7926690100fc0a38fc59e8b6a73853ab 100644 --- a/drivers/scsi/xen-scsifront.c +++ b/drivers/scsi/xen-scsifront.c @@ -654,10 +654,17 @@ static int scsifront_dev_reset_handler(struct scsi_cmnd *sc) static int scsifront_sdev_configure(struct scsi_device *sdev) { struct vscsifrnt_info *info = shost_priv(sdev->host); + int err; - if (info && current == info->curr) - xenbus_printf(XBT_NIL, info->dev->nodename, + if (info && current == info->curr) { + err = xenbus_printf(XBT_NIL, info->dev->nodename, info->dev_state_path, "%d", XenbusStateConnected); + if (err) { + xenbus_dev_error(info->dev, err, + "%s: writing dev_state_path", __func__); + return err; + } + } return 0; } @@ -665,10 +672,15 @@ static int scsifront_sdev_configure(struct scsi_device *sdev) static void scsifront_sdev_destroy(struct scsi_device *sdev) { struct vscsifrnt_info *info = shost_priv(sdev->host); + int err; - if (info && current == info->curr) - xenbus_printf(XBT_NIL, info->dev->nodename, + if (info && current == info->curr) { + err = xenbus_printf(XBT_NIL, info->dev->nodename, info->dev_state_path, "%d", XenbusStateClosed); + if (err) + xenbus_dev_error(info->dev, err, + "%s: writing dev_state_path", __func__); + } } static struct scsi_host_template scsifront_sht = { @@ -1003,9 +1015,12 @@ static void scsifront_do_lun_hotplug(struct vscsifrnt_info *info, int op) if (scsi_add_device(info->host, chn, tgt, lun)) { dev_err(&dev->dev, "scsi_add_device\n"); - xenbus_printf(XBT_NIL, dev->nodename, + err = xenbus_printf(XBT_NIL, dev->nodename, info->dev_state_path, "%d", XenbusStateClosed); + if (err) + xenbus_dev_error(dev, err, + "%s: writing dev_state_path", __func__); } break; case VSCSIFRONT_OP_DEL_LUN: @@ -1019,10 +1034,14 @@ static void scsifront_do_lun_hotplug(struct vscsifrnt_info *info, int op) } break; case VSCSIFRONT_OP_READD_LUN: - if (device_state == XenbusStateConnected) - xenbus_printf(XBT_NIL, dev->nodename, + if (device_state == XenbusStateConnected) { + err = xenbus_printf(XBT_NIL, dev->nodename, info->dev_state_path, "%d", XenbusStateConnected); + if (err) + xenbus_dev_error(dev, err, + "%s: writing dev_state_path", __func__); + } break; default: break; diff --git a/drivers/soc/imx/gpc.c b/drivers/soc/imx/gpc.c index 1613ccf0c0591921bd044151fb9da9778f700023..c54d229f8da49c82f1fc9e104c2003bbe019104f 100644 --- a/drivers/soc/imx/gpc.c +++ b/drivers/soc/imx/gpc.c @@ -27,9 +27,16 @@ #define GPC_PGC_SW2ISO_SHIFT 0x8 #define GPC_PGC_SW_SHIFT 0x0 +#define GPC_PGC_PCI_PDN 0x200 +#define GPC_PGC_PCI_SR 0x20c + #define GPC_PGC_GPU_PDN 0x260 #define GPC_PGC_GPU_PUPSCR 0x264 #define GPC_PGC_GPU_PDNSCR 0x268 +#define GPC_PGC_GPU_SR 0x26c + +#define GPC_PGC_DISP_PDN 0x240 +#define GPC_PGC_DISP_SR 0x24c #define GPU_VPU_PUP_REQ BIT(1) #define GPU_VPU_PDN_REQ BIT(0) @@ -303,10 +310,24 @@ static const struct of_device_id imx_gpc_dt_ids[] = { { } }; +static const struct regmap_range yes_ranges[] = { + regmap_reg_range(GPC_CNTR, GPC_CNTR), + regmap_reg_range(GPC_PGC_PCI_PDN, GPC_PGC_PCI_SR), + regmap_reg_range(GPC_PGC_GPU_PDN, GPC_PGC_GPU_SR), + regmap_reg_range(GPC_PGC_DISP_PDN, GPC_PGC_DISP_SR), +}; + +static const struct regmap_access_table access_table = { + .yes_ranges = yes_ranges, + .n_yes_ranges = ARRAY_SIZE(yes_ranges), +}; + static const struct regmap_config imx_gpc_regmap_config = { .reg_bits = 32, .val_bits = 32, .reg_stride = 4, + .rd_table = &access_table, + .wr_table = &access_table, .max_register = 0x2ac, }; diff --git a/drivers/soc/imx/gpcv2.c b/drivers/soc/imx/gpcv2.c index f4e3bd40c72e60c0448c98456f7b53f6be7936bd..6ef18cf8f24387e324cf455ae98c30f2b27c95d3 100644 --- a/drivers/soc/imx/gpcv2.c +++ b/drivers/soc/imx/gpcv2.c @@ -39,10 +39,15 @@ #define GPC_M4_PU_PDN_FLG 0x1bc - -#define PGC_MIPI 4 -#define PGC_PCIE 5 -#define PGC_USB_HSIC 8 +/* + * The PGC offset values in Reference Manual + * (Rev. 1, 01/2018 and the older ones) GPC chapter's + * GPC_PGC memory map are incorrect, below offset + * values are from design RTL. + */ +#define PGC_MIPI 16 +#define PGC_PCIE 17 +#define PGC_USB_HSIC 20 #define GPC_PGC_CTRL(n) (0x800 + (n) * 0x40) #define GPC_PGC_SR(n) (GPC_PGC_CTRL(n) + 0xc) diff --git a/drivers/spi/spi-cadence.c b/drivers/spi/spi-cadence.c index 4a001634023e09b8e83b8e6b82b5af557e2c0853..02bd1eba045b8b6bc915ff44b076481971c7d1d6 100644 --- a/drivers/spi/spi-cadence.c +++ b/drivers/spi/spi-cadence.c @@ -319,7 +319,7 @@ static void cdns_spi_fill_tx_fifo(struct cdns_spi *xspi) */ if (cdns_spi_read(xspi, CDNS_SPI_ISR) & CDNS_SPI_IXR_TXFULL) - usleep_range(10, 20); + udelay(10); if (xspi->txbuf) cdns_spi_write(xspi, CDNS_SPI_TXD, *xspi->txbuf++); diff --git a/drivers/spi/spi-davinci.c b/drivers/spi/spi-davinci.c index 6ddb6ef1fda4ff0900b97d7b87942bbe9935475d..c5bbe08771a4d06894e14b284a589f2149d58f02 100644 --- a/drivers/spi/spi-davinci.c +++ b/drivers/spi/spi-davinci.c @@ -217,7 +217,7 @@ static void davinci_spi_chipselect(struct spi_device *spi, int value) pdata = &dspi->pdata; /* program delay transfers if tx_delay is non zero */ - if (spicfg->wdelay) + if (spicfg && spicfg->wdelay) spidat1 |= SPIDAT1_WDEL; /* diff --git a/drivers/spi/spi-fsl-dspi.c b/drivers/spi/spi-fsl-dspi.c index d89127f4a46dfd567e2c7cfb290833d038c930ed..ca013dd4ff6bb19d410ae522fe1f91b27d5f7ace 100644 --- a/drivers/spi/spi-fsl-dspi.c +++ b/drivers/spi/spi-fsl-dspi.c @@ -1006,30 +1006,30 @@ static int dspi_probe(struct platform_device *pdev) goto out_master_put; } + dspi->clk = devm_clk_get(&pdev->dev, "dspi"); + if (IS_ERR(dspi->clk)) { + ret = PTR_ERR(dspi->clk); + dev_err(&pdev->dev, "unable to get clock\n"); + goto out_master_put; + } + ret = clk_prepare_enable(dspi->clk); + if (ret) + goto out_master_put; + dspi_init(dspi); dspi->irq = platform_get_irq(pdev, 0); if (dspi->irq < 0) { dev_err(&pdev->dev, "can't get platform irq\n"); ret = dspi->irq; - goto out_master_put; + goto out_clk_put; } ret = devm_request_irq(&pdev->dev, dspi->irq, dspi_interrupt, 0, pdev->name, dspi); if (ret < 0) { dev_err(&pdev->dev, "Unable to attach DSPI interrupt\n"); - goto out_master_put; - } - - dspi->clk = devm_clk_get(&pdev->dev, "dspi"); - if (IS_ERR(dspi->clk)) { - ret = PTR_ERR(dspi->clk); - dev_err(&pdev->dev, "unable to get clock\n"); - goto out_master_put; + goto out_clk_put; } - ret = clk_prepare_enable(dspi->clk); - if (ret) - goto out_master_put; if (dspi->devtype_data->trans_mode == DSPI_DMA_MODE) { ret = dspi_request_dma(dspi, res->start); diff --git a/drivers/spi/spi-pxa2xx.c b/drivers/spi/spi-pxa2xx.c index 4cb515a3104c1759b451c1f2d313953c009fb3ce..3a2e46e49405b5ea2bd2e17f982d4298578b72ae 100644 --- a/drivers/spi/spi-pxa2xx.c +++ b/drivers/spi/spi-pxa2xx.c @@ -1480,6 +1480,10 @@ static const struct pci_device_id pxa2xx_spi_pci_compound_match[] = { { PCI_VDEVICE(INTEL, 0x31c2), LPSS_BXT_SSP }, { PCI_VDEVICE(INTEL, 0x31c4), LPSS_BXT_SSP }, { PCI_VDEVICE(INTEL, 0x31c6), LPSS_BXT_SSP }, + /* ICL-LP */ + { PCI_VDEVICE(INTEL, 0x34aa), LPSS_CNL_SSP }, + { PCI_VDEVICE(INTEL, 0x34ab), LPSS_CNL_SSP }, + { PCI_VDEVICE(INTEL, 0x34fb), LPSS_CNL_SSP }, /* APL */ { PCI_VDEVICE(INTEL, 0x5ac2), LPSS_BXT_SSP }, { PCI_VDEVICE(INTEL, 0x5ac4), LPSS_BXT_SSP }, diff --git a/drivers/spi/spi-rspi.c b/drivers/spi/spi-rspi.c index 2a10b3f94ff72a4ea3d6924a082ef6d2db174528..20981e08ee975d197c63845fcce11e0c76e53e8d 100644 --- a/drivers/spi/spi-rspi.c +++ b/drivers/spi/spi-rspi.c @@ -598,11 +598,13 @@ static int rspi_dma_transfer(struct rspi_data *rspi, struct sg_table *tx, ret = wait_event_interruptible_timeout(rspi->wait, rspi->dma_callbacked, HZ); - if (ret > 0 && rspi->dma_callbacked) + if (ret > 0 && rspi->dma_callbacked) { ret = 0; - else if (!ret) { - dev_err(&rspi->master->dev, "DMA timeout\n"); - ret = -ETIMEDOUT; + } else { + if (!ret) { + dev_err(&rspi->master->dev, "DMA timeout\n"); + ret = -ETIMEDOUT; + } if (tx) dmaengine_terminate_all(rspi->master->dma_tx); if (rx) @@ -1352,12 +1354,36 @@ static const struct platform_device_id spi_driver_ids[] = { MODULE_DEVICE_TABLE(platform, spi_driver_ids); +#ifdef CONFIG_PM_SLEEP +static int rspi_suspend(struct device *dev) +{ + struct platform_device *pdev = to_platform_device(dev); + struct rspi_data *rspi = platform_get_drvdata(pdev); + + return spi_master_suspend(rspi->master); +} + +static int rspi_resume(struct device *dev) +{ + struct platform_device *pdev = to_platform_device(dev); + struct rspi_data *rspi = platform_get_drvdata(pdev); + + return spi_master_resume(rspi->master); +} + +static SIMPLE_DEV_PM_OPS(rspi_pm_ops, rspi_suspend, rspi_resume); +#define DEV_PM_OPS &rspi_pm_ops +#else +#define DEV_PM_OPS NULL +#endif /* CONFIG_PM_SLEEP */ + static struct platform_driver rspi_driver = { .probe = rspi_probe, .remove = rspi_remove, .id_table = spi_driver_ids, .driver = { .name = "renesas_spi", + .pm = DEV_PM_OPS, .of_match_table = of_match_ptr(rspi_of_match), }, }; diff --git a/drivers/spi/spi-sh-msiof.c b/drivers/spi/spi-sh-msiof.c index 0fea18ab970e30424d0799f0b265d24f6d26dfc2..db2a529accae8471352ee54b70f19ef4a0fc2483 100644 --- a/drivers/spi/spi-sh-msiof.c +++ b/drivers/spi/spi-sh-msiof.c @@ -384,7 +384,8 @@ static void sh_msiof_spi_set_mode_regs(struct sh_msiof_spi_priv *p, static void sh_msiof_reset_str(struct sh_msiof_spi_priv *p) { - sh_msiof_write(p, STR, sh_msiof_read(p, STR)); + sh_msiof_write(p, STR, + sh_msiof_read(p, STR) & ~(STR_TDREQ | STR_RDREQ)); } static void sh_msiof_spi_write_fifo_8(struct sh_msiof_spi_priv *p, @@ -1361,12 +1362,37 @@ static const struct platform_device_id spi_driver_ids[] = { }; MODULE_DEVICE_TABLE(platform, spi_driver_ids); +#ifdef CONFIG_PM_SLEEP +static int sh_msiof_spi_suspend(struct device *dev) +{ + struct platform_device *pdev = to_platform_device(dev); + struct sh_msiof_spi_priv *p = platform_get_drvdata(pdev); + + return spi_master_suspend(p->master); +} + +static int sh_msiof_spi_resume(struct device *dev) +{ + struct platform_device *pdev = to_platform_device(dev); + struct sh_msiof_spi_priv *p = platform_get_drvdata(pdev); + + return spi_master_resume(p->master); +} + +static SIMPLE_DEV_PM_OPS(sh_msiof_spi_pm_ops, sh_msiof_spi_suspend, + sh_msiof_spi_resume); +#define DEV_PM_OPS &sh_msiof_spi_pm_ops +#else +#define DEV_PM_OPS NULL +#endif /* CONFIG_PM_SLEEP */ + static struct platform_driver sh_msiof_spi_drv = { .probe = sh_msiof_spi_probe, .remove = sh_msiof_spi_remove, .id_table = spi_driver_ids, .driver = { .name = "spi_sh_msiof", + .pm = DEV_PM_OPS, .of_match_table = of_match_ptr(sh_msiof_match), }, }; diff --git a/drivers/spi/spi-tegra20-slink.c b/drivers/spi/spi-tegra20-slink.c index 3e12d5f87ee4412b086649c8f2312fb3cc9e30a8..9831c1106945e835d6f8891569a8adc9bbcdc42a 100644 --- a/drivers/spi/spi-tegra20-slink.c +++ b/drivers/spi/spi-tegra20-slink.c @@ -1063,6 +1063,24 @@ static int tegra_slink_probe(struct platform_device *pdev) goto exit_free_master; } + /* disabled clock may cause interrupt storm upon request */ + tspi->clk = devm_clk_get(&pdev->dev, NULL); + if (IS_ERR(tspi->clk)) { + ret = PTR_ERR(tspi->clk); + dev_err(&pdev->dev, "Can not get clock %d\n", ret); + goto exit_free_master; + } + ret = clk_prepare(tspi->clk); + if (ret < 0) { + dev_err(&pdev->dev, "Clock prepare failed %d\n", ret); + goto exit_free_master; + } + ret = clk_enable(tspi->clk); + if (ret < 0) { + dev_err(&pdev->dev, "Clock enable failed %d\n", ret); + goto exit_free_master; + } + spi_irq = platform_get_irq(pdev, 0); tspi->irq = spi_irq; ret = request_threaded_irq(tspi->irq, tegra_slink_isr, @@ -1071,14 +1089,7 @@ static int tegra_slink_probe(struct platform_device *pdev) if (ret < 0) { dev_err(&pdev->dev, "Failed to register ISR for IRQ %d\n", tspi->irq); - goto exit_free_master; - } - - tspi->clk = devm_clk_get(&pdev->dev, NULL); - if (IS_ERR(tspi->clk)) { - dev_err(&pdev->dev, "can not get clock\n"); - ret = PTR_ERR(tspi->clk); - goto exit_free_irq; + goto exit_clk_disable; } tspi->rst = devm_reset_control_get_exclusive(&pdev->dev, "spi"); @@ -1138,6 +1149,8 @@ exit_rx_dma_free: tegra_slink_deinit_dma_param(tspi, true); exit_free_irq: free_irq(spi_irq, tspi); +exit_clk_disable: + clk_disable(tspi->clk); exit_free_master: spi_master_put(master); return ret; @@ -1150,6 +1163,8 @@ static int tegra_slink_remove(struct platform_device *pdev) free_irq(tspi->irq, tspi); + clk_disable(tspi->clk); + if (tspi->tx_dma_chan) tegra_slink_deinit_dma_param(tspi, false); diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c index eece63e5dd5842cef0a6ead20831a9aeeba86755..af71c91812165dcbaed8391d42084b9a4e9ef253 100644 --- a/drivers/spi/spi.c +++ b/drivers/spi/spi.c @@ -2110,8 +2110,17 @@ int spi_register_controller(struct spi_controller *ctlr) */ if (ctlr->num_chipselect == 0) return -EINVAL; - /* allocate dynamic bus number using Linux idr */ - if ((ctlr->bus_num < 0) && ctlr->dev.of_node) { + if (ctlr->bus_num >= 0) { + /* devices with a fixed bus num must check-in with the num */ + mutex_lock(&board_lock); + id = idr_alloc(&spi_master_idr, ctlr, ctlr->bus_num, + ctlr->bus_num + 1, GFP_KERNEL); + mutex_unlock(&board_lock); + if (WARN(id < 0, "couldn't get idr")) + return id == -ENOSPC ? -EBUSY : id; + ctlr->bus_num = id; + } else if (ctlr->dev.of_node) { + /* allocate dynamic bus number using Linux idr */ id = of_alias_get_id(ctlr->dev.of_node, "spi"); if (id >= 0) { ctlr->bus_num = id; diff --git a/drivers/staging/android/ashmem.c b/drivers/staging/android/ashmem.c index 893b2836089c3d1d02333a8b5a066c6bfa951866..4151bb44a41035eed00f2a2bcc60b3ff030846c5 100644 --- a/drivers/staging/android/ashmem.c +++ b/drivers/staging/android/ashmem.c @@ -374,6 +374,12 @@ static int ashmem_mmap(struct file *file, struct vm_area_struct *vma) goto out; } + /* requested mapping size larger than object size */ + if (vma->vm_end - vma->vm_start > PAGE_ALIGN(asma->size)) { + ret = -EINVAL; + goto out; + } + /* requested protection bits must match our allowed protection mask */ if (unlikely((vma->vm_flags & ~calc_vm_prot_bits(asma->prot_mask, 0)) & calc_vm_prot_bits(PROT_MASK, 0))) { diff --git a/drivers/staging/comedi/drivers/ni_mio_common.c b/drivers/staging/comedi/drivers/ni_mio_common.c index 2cac160993bbd82f12246951e7db109b5fc043ac..158f3e83efb66d75daf0785f298da811900a9252 100644 --- a/drivers/staging/comedi/drivers/ni_mio_common.c +++ b/drivers/staging/comedi/drivers/ni_mio_common.c @@ -5453,11 +5453,11 @@ static int ni_E_init(struct comedi_device *dev, /* Digital I/O (PFI) subdevice */ s = &dev->subdevices[NI_PFI_DIO_SUBDEV]; s->type = COMEDI_SUBD_DIO; - s->subdev_flags = SDF_READABLE | SDF_WRITABLE | SDF_INTERNAL; s->maxdata = 1; if (devpriv->is_m_series) { s->n_chan = 16; s->insn_bits = ni_pfi_insn_bits; + s->subdev_flags = SDF_READABLE | SDF_WRITABLE | SDF_INTERNAL; ni_writew(dev, s->state, NI_M_PFI_DO_REG); for (i = 0; i < NUM_PFI_OUTPUT_SELECT_REGS; ++i) { @@ -5466,6 +5466,7 @@ static int ni_E_init(struct comedi_device *dev, } } else { s->n_chan = 10; + s->subdev_flags = SDF_INTERNAL; } s->insn_config = ni_pfi_insn_config; diff --git a/drivers/staging/irda/net/af_irda.c b/drivers/staging/irda/net/af_irda.c index 23fa7c8b09a5861e2acc042775830cc2dac4e0f4..cebe9878ca03629d054bb91e0fe309ca9a370615 100644 --- a/drivers/staging/irda/net/af_irda.c +++ b/drivers/staging/irda/net/af_irda.c @@ -775,6 +775,13 @@ static int irda_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) return -EINVAL; lock_sock(sk); + + /* Ensure that the socket is not already bound */ + if (self->ias_obj) { + err = -EINVAL; + goto out; + } + #ifdef CONFIG_IRDA_ULTRA /* Special care for Ultra sockets */ if ((sk->sk_type == SOCK_DGRAM) && @@ -2012,7 +2019,11 @@ static int irda_setsockopt(struct socket *sock, int level, int optname, err = -EINVAL; goto out; } - irias_insert_object(ias_obj); + + /* Only insert newly allocated objects */ + if (free_ias) + irias_insert_object(ias_obj); + kfree(ias_opt); break; case IRLMP_IAS_DEL: diff --git a/drivers/staging/media/imx/imx-ic-prpencvf.c b/drivers/staging/media/imx/imx-ic-prpencvf.c index 0790b3d9e25560366a48633ddce37beeea21e762..111afd34aa3c328e385d09fcdb0f4c19dbc1e6cb 100644 --- a/drivers/staging/media/imx/imx-ic-prpencvf.c +++ b/drivers/staging/media/imx/imx-ic-prpencvf.c @@ -210,6 +210,7 @@ static void prp_vb2_buf_done(struct prp_priv *priv, struct ipuv3_channel *ch) done = priv->active_vb2_buf[priv->ipu_buf_num]; if (done) { + done->vbuf.field = vdev->fmt.fmt.pix.field; vb = &done->vbuf.vb2_buf; vb->timestamp = ktime_get_ns(); vb2_buffer_done(vb, priv->nfb4eof ? diff --git a/drivers/staging/media/imx/imx-media-csi.c b/drivers/staging/media/imx/imx-media-csi.c index 6d856118c223285702913468b29200e26c448a7c..83ecb5b2fb9e18c66426f56d18f8379f3cbdab9c 100644 --- a/drivers/staging/media/imx/imx-media-csi.c +++ b/drivers/staging/media/imx/imx-media-csi.c @@ -171,6 +171,7 @@ static void csi_vb2_buf_done(struct csi_priv *priv) done = priv->active_vb2_buf[priv->ipu_buf_num]; if (done) { + done->vbuf.field = vdev->fmt.fmt.pix.field; vb = &done->vbuf.vb2_buf; vb->timestamp = ktime_get_ns(); vb2_buffer_done(vb, priv->nfb4eof ? diff --git a/drivers/staging/media/omap4iss/iss_video.c b/drivers/staging/media/omap4iss/iss_video.c index 9e2f0421a01eadd561e0fd1b247e95dc3f40ee65..0bf6643cca0722f1be7aa2d03d553faeaa6383db 100644 --- a/drivers/staging/media/omap4iss/iss_video.c +++ b/drivers/staging/media/omap4iss/iss_video.c @@ -11,7 +11,6 @@ * (at your option) any later version. */ -#include #include #include #include @@ -24,6 +23,8 @@ #include #include +#include + #include "iss_video.h" #include "iss.h" diff --git a/drivers/staging/rts5208/sd.c b/drivers/staging/rts5208/sd.c index 4033a2cf7ac9b3b7028f84f7ea54f753a75536a4..d98d5fe25a17d755e6fe9ba19ba7474ff7233fcb 100644 --- a/drivers/staging/rts5208/sd.c +++ b/drivers/staging/rts5208/sd.c @@ -5002,7 +5002,7 @@ int sd_execute_write_data(struct scsi_cmnd *srb, struct rtsx_chip *chip) goto sd_execute_write_cmd_failed; } - rtsx_write_register(chip, SD_BYTE_CNT_L, 0xFF, 0x00); + retval = rtsx_write_register(chip, SD_BYTE_CNT_L, 0xFF, 0x00); if (retval != STATUS_SUCCESS) { rtsx_trace(chip); goto sd_execute_write_cmd_failed; diff --git a/drivers/staging/typec/tcpm.c b/drivers/staging/typec/tcpm.c index 8af62e74d54c6319678f315a48e302a761dbb71c..f237e31926f4cfc13be67b2f0432cc429e750e7c 100644 --- a/drivers/staging/typec/tcpm.c +++ b/drivers/staging/typec/tcpm.c @@ -2479,7 +2479,8 @@ static void run_state_machine(struct tcpm_port *port) tcpm_port_is_sink(port) && time_is_after_jiffies(port->delayed_runtime)) { tcpm_set_state(port, SNK_DISCOVERY, - port->delayed_runtime - jiffies); + jiffies_to_msecs(port->delayed_runtime - + jiffies)); break; } tcpm_set_state(port, unattached_state(port), 0); diff --git a/drivers/staging/vc04_services/bcm2835-audio/bcm2835-pcm.c b/drivers/staging/vc04_services/bcm2835-audio/bcm2835-pcm.c index 94654c0c7bba572f551e2e018cc57396b457b371..36165a60fb0593797b7bc99a15d9ea60bb04cadb 100644 --- a/drivers/staging/vc04_services/bcm2835-audio/bcm2835-pcm.c +++ b/drivers/staging/vc04_services/bcm2835-audio/bcm2835-pcm.c @@ -22,7 +22,7 @@ /* hardware definition */ static const struct snd_pcm_hardware snd_bcm2835_playback_hw = { .info = (SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_BLOCK_TRANSFER | - SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_MMAP_VALID), + SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_MMAP_VALID | SNDRV_PCM_INFO_BATCH), .formats = SNDRV_PCM_FMTBIT_U8 | SNDRV_PCM_FMTBIT_S16_LE, .rates = SNDRV_PCM_RATE_CONTINUOUS | SNDRV_PCM_RATE_8000_48000, .rate_min = 8000, @@ -93,6 +93,8 @@ void bcm2835_playback_fifo(struct bcm2835_alsa_stream *alsa_stream) alsa_stream->pos %= alsa_stream->buffer_size; } + alsa_stream->interpolate_start = ktime_get_ns(); + if (alsa_stream->substream) { if (new_period) snd_pcm_period_elapsed(alsa_stream->substream); @@ -323,6 +325,7 @@ static int snd_bcm2835_pcm_prepare(struct snd_pcm_substream *substream) alsa_stream->buffer_size = snd_pcm_lib_buffer_bytes(substream); alsa_stream->period_size = snd_pcm_lib_period_bytes(substream); alsa_stream->pos = 0; + alsa_stream->interpolate_start = ktime_get_ns(); audio_debug("buffer_size=%d, period_size=%d pos=%d frame_bits=%d\n", alsa_stream->buffer_size, alsa_stream->period_size, @@ -415,13 +418,19 @@ snd_bcm2835_pcm_pointer(struct snd_pcm_substream *substream) { struct snd_pcm_runtime *runtime = substream->runtime; struct bcm2835_alsa_stream *alsa_stream = runtime->private_data; - + u64 now = ktime_get_ns(); audio_debug("pcm_pointer... (%d) hwptr=%d appl=%d pos=%d\n", 0, frames_to_bytes(runtime, runtime->status->hw_ptr), frames_to_bytes(runtime, runtime->control->appl_ptr), alsa_stream->pos); + /* Give userspace better delay reporting by interpolating between GPU + * notifications, assuming audio speed is close enough to the clock + * used for ktime */ + if (alsa_stream->interpolate_start && alsa_stream->interpolate_start < now) + runtime->delay = -(int)div_u64((now - alsa_stream->interpolate_start) * runtime->rate, 1000000000); + return snd_pcm_indirect_playback_pointer(substream, &alsa_stream->pcm_indirect, alsa_stream->pos); diff --git a/drivers/staging/vc04_services/bcm2835-audio/bcm2835-vchiq.c b/drivers/staging/vc04_services/bcm2835-audio/bcm2835-vchiq.c index 4be864dbd41c9f4eb63f03361a0dab1e13c54a67..9eb3b625a1b18b46fa940c96a9c5893ca44cade0 100644 --- a/drivers/staging/vc04_services/bcm2835-audio/bcm2835-vchiq.c +++ b/drivers/staging/vc04_services/bcm2835-audio/bcm2835-vchiq.c @@ -442,16 +442,16 @@ int bcm2835_audio_open(struct bcm2835_alsa_stream *alsa_stream) my_workqueue_init(alsa_stream); ret = bcm2835_audio_open_connection(alsa_stream); - if (ret) { - ret = -1; - goto exit; - } + if (ret) + goto free_wq; + instance = alsa_stream->instance; LOG_DBG(" instance (%p)\n", instance); if (mutex_lock_interruptible(&instance->vchi_mutex)) { LOG_DBG("Interrupted whilst waiting for lock on (%d)\n", instance->num_connections); - return -EINTR; + ret = -EINTR; + goto free_wq; } vchi_service_use(instance->vchi_handle[0]); @@ -474,7 +474,11 @@ int bcm2835_audio_open(struct bcm2835_alsa_stream *alsa_stream) unlock: vchi_service_release(instance->vchi_handle[0]); mutex_unlock(&instance->vchi_mutex); -exit: + +free_wq: + if (ret) + destroy_workqueue(alsa_stream->my_wq); + return ret; } diff --git a/drivers/staging/vc04_services/bcm2835-audio/bcm2835.h b/drivers/staging/vc04_services/bcm2835-audio/bcm2835.h index 379604d3554e8a37e8fef3cafc388baf936281c6..e64862e1781bead630edbfa347ff4579cc807c82 100644 --- a/drivers/staging/vc04_services/bcm2835-audio/bcm2835.h +++ b/drivers/staging/vc04_services/bcm2835-audio/bcm2835.h @@ -137,6 +137,7 @@ struct bcm2835_alsa_stream { unsigned int pos; unsigned int buffer_size; unsigned int period_size; + u64 interpolate_start; atomic_t retrieved; struct bcm2835_audio_instance *instance; diff --git a/drivers/staging/vc04_services/bcm2835-camera/bcm2835-camera.c b/drivers/staging/vc04_services/bcm2835-camera/bcm2835-camera.c index c2e53522afe7ec1e43467b58f002450471a03412..949005bb124682c9d81817d10068e39265e52ffd 100644 --- a/drivers/staging/vc04_services/bcm2835-camera/bcm2835-camera.c +++ b/drivers/staging/vc04_services/bcm2835-camera/bcm2835-camera.c @@ -625,6 +625,7 @@ static int start_streaming(struct vb2_queue *vq, unsigned int count) static void stop_streaming(struct vb2_queue *vq) { int ret; + unsigned long timeout; struct bm2835_mmal_dev *dev = vb2_get_drv_priv(vq); struct vchiq_mmal_port *port = dev->capture.port; @@ -650,6 +651,12 @@ static void stop_streaming(struct vb2_queue *vq) &dev->capture.frame_count, sizeof(dev->capture.frame_count)); + /* wait for last frame to complete */ + timeout = wait_for_completion_timeout(&dev->capture.frame_cmplt, HZ); + if (timeout == 0) + v4l2_err(&dev->v4l2_dev, + "timed out waiting for frame completion\n"); + v4l2_dbg(1, bcm2835_v4l2_debug, &dev->v4l2_dev, "disabling connection\n"); diff --git a/drivers/staging/vc04_services/bcm2835-camera/mmal-vchiq.c b/drivers/staging/vc04_services/bcm2835-camera/mmal-vchiq.c index f8fd99bbb5e5e105a216968c7693c415cbb922a6..2b6ac7045635f080bd58c9363e2798e21e1ce771 100644 --- a/drivers/staging/vc04_services/bcm2835-camera/mmal-vchiq.c +++ b/drivers/staging/vc04_services/bcm2835-camera/mmal-vchiq.c @@ -716,6 +716,7 @@ static int send_synchronous_mmal_msg(struct vchiq_mmal_instance *instance, { struct mmal_msg_context *msg_context; int ret; + unsigned long timeout; /* payload size must not cause message to exceed max size */ if (payload_len > @@ -754,11 +755,11 @@ static int send_synchronous_mmal_msg(struct vchiq_mmal_instance *instance, return ret; } - ret = wait_for_completion_timeout(&msg_context->u.sync.cmplt, 3 * HZ); - if (ret <= 0) { - pr_err("error %d waiting for sync completion\n", ret); - if (ret == 0) - ret = -ETIME; + timeout = wait_for_completion_timeout(&msg_context->u.sync.cmplt, + 3 * HZ); + if (timeout == 0) { + pr_err("timed out waiting for sync completion\n"); + ret = -ETIME; /* todo: what happens if the message arrives after aborting */ release_msg_context(msg_context); return ret; diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c index be08849175ea3622321136f3be66df8705e40772..59e21d2277de90511cadf4fd7b7ebe523f5f30d2 100644 --- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c +++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c @@ -37,6 +37,7 @@ #include #include #include +#include #include #include #include @@ -59,6 +60,8 @@ #define BELL0 0x00 #define BELL2 0x08 +#define VCHIQ_DMA_POOL_SIZE PAGE_SIZE + typedef struct vchiq_2835_state_struct { int inited; VCHIQ_ARM_STATE_T arm_state; @@ -68,6 +71,7 @@ struct vchiq_pagelist_info { PAGELIST_T *pagelist; size_t pagelist_buffer_size; dma_addr_t dma_addr; + bool is_from_pool; enum dma_data_direction dma_dir; unsigned int num_pages; unsigned int pages_need_release; @@ -78,6 +82,7 @@ struct vchiq_pagelist_info { static void __iomem *g_regs; static unsigned int g_cache_line_size = sizeof(CACHE_LINE_SIZE); +static struct dma_pool *g_dma_pool; static unsigned int g_fragments_size; static char *g_fragments_base; static char *g_free_fragments; @@ -193,6 +198,14 @@ int vchiq_platform_init(struct platform_device *pdev, VCHIQ_STATE_T *state) } g_dev = dev; + g_dma_pool = dmam_pool_create("vchiq_scatter_pool", dev, + VCHIQ_DMA_POOL_SIZE, g_cache_line_size, + 0); + if (!g_dma_pool) { + dev_err(dev, "failed to create dma pool"); + return -ENOMEM; + } + vchiq_log_info(vchiq_arm_log_level, "vchiq_init - done (slots %pK, phys %pad)", vchiq_slot_zero, &slot_phys); @@ -377,9 +390,14 @@ cleanup_pagelistinfo(struct vchiq_pagelist_info *pagelistinfo) for (i = 0; i < pagelistinfo->num_pages; i++) put_page(pagelistinfo->pages[i]); } - - dma_free_coherent(g_dev, pagelistinfo->pagelist_buffer_size, - pagelistinfo->pagelist, pagelistinfo->dma_addr); + if (pagelistinfo->is_from_pool) { + dma_pool_free(g_dma_pool, pagelistinfo->pagelist, + pagelistinfo->dma_addr); + } else { + dma_free_coherent(g_dev, pagelistinfo->pagelist_buffer_size, + pagelistinfo->pagelist, + pagelistinfo->dma_addr); + } } /* There is a potential problem with partial cache lines (pages?) @@ -400,6 +418,7 @@ create_pagelist(char __user *buf, size_t count, unsigned short type, u32 *addrs; unsigned int num_pages, offset, i, k; int actual_pages; + bool is_from_pool; size_t pagelist_size; struct scatterlist *scatterlist, *sg; int dma_buffers; @@ -417,10 +436,16 @@ create_pagelist(char __user *buf, size_t count, unsigned short type, /* Allocate enough storage to hold the page pointers and the page ** list */ - pagelist = dma_zalloc_coherent(g_dev, - pagelist_size, - &dma_addr, - GFP_KERNEL); + if (pagelist_size > VCHIQ_DMA_POOL_SIZE) { + pagelist = dma_zalloc_coherent(g_dev, + pagelist_size, + &dma_addr, + GFP_KERNEL); + is_from_pool = false; + } else { + pagelist = dma_pool_zalloc(g_dma_pool, GFP_KERNEL, &dma_addr); + is_from_pool = true; + } vchiq_log_trace(vchiq_arm_log_level, "create_pagelist - %pK", pagelist); @@ -441,6 +466,7 @@ create_pagelist(char __user *buf, size_t count, unsigned short type, pagelistinfo->pagelist = pagelist; pagelistinfo->pagelist_buffer_size = pagelist_size; pagelistinfo->dma_addr = dma_addr; + pagelistinfo->is_from_pool = is_from_pool; pagelistinfo->dma_dir = (type == PAGELIST_WRITE) ? DMA_TO_DEVICE : DMA_FROM_DEVICE; pagelistinfo->num_pages = num_pages; diff --git a/drivers/target/iscsi/cxgbit/cxgbit_target.c b/drivers/target/iscsi/cxgbit/cxgbit_target.c index 514986b57c2d60ce19c1074f4d19d65dd550be2e..25eb3891e34b8435fe15b80c5b7eb5e2a7b7c6d6 100644 --- a/drivers/target/iscsi/cxgbit/cxgbit_target.c +++ b/drivers/target/iscsi/cxgbit/cxgbit_target.c @@ -652,6 +652,7 @@ static int cxgbit_set_iso_npdu(struct cxgbit_sock *csk) struct iscsi_param *param; u32 mrdsl, mbl; u32 max_npdu, max_iso_npdu; + u32 max_iso_payload; if (conn->login->leading_connection) { param = iscsi_find_param_from_key(MAXBURSTLENGTH, @@ -670,8 +671,10 @@ static int cxgbit_set_iso_npdu(struct cxgbit_sock *csk) mrdsl = conn_ops->MaxRecvDataSegmentLength; max_npdu = mbl / mrdsl; - max_iso_npdu = CXGBIT_MAX_ISO_PAYLOAD / - (ISCSI_HDR_LEN + mrdsl + + max_iso_payload = rounddown(CXGBIT_MAX_ISO_PAYLOAD, csk->emss); + + max_iso_npdu = max_iso_payload / + (ISCSI_HDR_LEN + mrdsl + cxgbit_digest_len[csk->submode]); csk->max_iso_npdu = min(max_npdu, max_iso_npdu); @@ -741,6 +744,9 @@ static int cxgbit_set_params(struct iscsi_conn *conn) if (conn_ops->MaxRecvDataSegmentLength > cdev->mdsl) conn_ops->MaxRecvDataSegmentLength = cdev->mdsl; + if (cxgbit_set_digest(csk)) + return -1; + if (conn->login->leading_connection) { param = iscsi_find_param_from_key(ERRORRECOVERYLEVEL, conn->param_list); @@ -764,7 +770,7 @@ static int cxgbit_set_params(struct iscsi_conn *conn) if (is_t5(cdev->lldi.adapter_type)) goto enable_ddp; else - goto enable_digest; + return 0; } if (test_bit(CDEV_ISO_ENABLE, &cdev->flags)) { @@ -781,10 +787,6 @@ enable_ddp: } } -enable_digest: - if (cxgbit_set_digest(csk)) - return -1; - return 0; } diff --git a/drivers/target/iscsi/iscsi_target_auth.c b/drivers/target/iscsi/iscsi_target_auth.c index 9518ffd8b8bac81cc1b725046857d5f229e58b15..4e680d753941f71ea299d87b6c0cd18fc307b50f 100644 --- a/drivers/target/iscsi/iscsi_target_auth.c +++ b/drivers/target/iscsi/iscsi_target_auth.c @@ -26,27 +26,6 @@ #include "iscsi_target_nego.h" #include "iscsi_target_auth.h" -static int chap_string_to_hex(unsigned char *dst, unsigned char *src, int len) -{ - int j = DIV_ROUND_UP(len, 2), rc; - - rc = hex2bin(dst, src, j); - if (rc < 0) - pr_debug("CHAP string contains non hex digit symbols\n"); - - dst[j] = '\0'; - return j; -} - -static void chap_binaryhex_to_asciihex(char *dst, char *src, int src_len) -{ - int i; - - for (i = 0; i < src_len; i++) { - sprintf(&dst[i*2], "%02x", (int) src[i] & 0xff); - } -} - static int chap_gen_challenge( struct iscsi_conn *conn, int caller, @@ -62,7 +41,7 @@ static int chap_gen_challenge( ret = get_random_bytes_wait(chap->challenge, CHAP_CHALLENGE_LENGTH); if (unlikely(ret)) return ret; - chap_binaryhex_to_asciihex(challenge_asciihex, chap->challenge, + bin2hex(challenge_asciihex, chap->challenge, CHAP_CHALLENGE_LENGTH); /* * Set CHAP_C, and copy the generated challenge into c_str. @@ -248,9 +227,16 @@ static int chap_server_compute_md5( pr_err("Could not find CHAP_R.\n"); goto out; } + if (strlen(chap_r) != MD5_SIGNATURE_SIZE * 2) { + pr_err("Malformed CHAP_R\n"); + goto out; + } + if (hex2bin(client_digest, chap_r, MD5_SIGNATURE_SIZE) < 0) { + pr_err("Malformed CHAP_R\n"); + goto out; + } pr_debug("[server] Got CHAP_R=%s\n", chap_r); - chap_string_to_hex(client_digest, chap_r, strlen(chap_r)); tfm = crypto_alloc_shash("md5", 0, 0); if (IS_ERR(tfm)) { @@ -294,7 +280,7 @@ static int chap_server_compute_md5( goto out; } - chap_binaryhex_to_asciihex(response, server_digest, MD5_SIGNATURE_SIZE); + bin2hex(response, server_digest, MD5_SIGNATURE_SIZE); pr_debug("[server] MD5 Server Digest: %s\n", response); if (memcmp(server_digest, client_digest, MD5_SIGNATURE_SIZE) != 0) { @@ -349,9 +335,7 @@ static int chap_server_compute_md5( pr_err("Could not find CHAP_C.\n"); goto out; } - pr_debug("[server] Got CHAP_C=%s\n", challenge); - challenge_len = chap_string_to_hex(challenge_binhex, challenge, - strlen(challenge)); + challenge_len = DIV_ROUND_UP(strlen(challenge), 2); if (!challenge_len) { pr_err("Unable to convert incoming challenge\n"); goto out; @@ -360,6 +344,11 @@ static int chap_server_compute_md5( pr_err("CHAP_C exceeds maximum binary size of 1024 bytes\n"); goto out; } + if (hex2bin(challenge_binhex, challenge, challenge_len) < 0) { + pr_err("Malformed CHAP_C\n"); + goto out; + } + pr_debug("[server] Got CHAP_C=%s\n", challenge); /* * During mutual authentication, the CHAP_C generated by the * initiator must not match the original CHAP_C generated by @@ -413,7 +402,7 @@ static int chap_server_compute_md5( /* * Convert response from binary hex to ascii hext. */ - chap_binaryhex_to_asciihex(response, digest, MD5_SIGNATURE_SIZE); + bin2hex(response, digest, MD5_SIGNATURE_SIZE); *nr_out_len += sprintf(nr_out_ptr + *nr_out_len, "CHAP_R=0x%s", response); *nr_out_len += 1; diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c index dc13afbd4c88dec2390ca8e65b3332d2f78acd73..98e27da34f3cb541e9575ec6b7ac5d021d258679 100644 --- a/drivers/target/iscsi/iscsi_target_login.c +++ b/drivers/target/iscsi/iscsi_target_login.c @@ -345,8 +345,7 @@ static int iscsi_login_zero_tsih_s1( pr_err("idr_alloc() for sess_idr failed\n"); iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR, ISCSI_LOGIN_STATUS_NO_RESOURCES); - kfree(sess); - return -ENOMEM; + goto free_sess; } sess->creation_time = get_jiffies_64(); @@ -362,20 +361,28 @@ static int iscsi_login_zero_tsih_s1( ISCSI_LOGIN_STATUS_NO_RESOURCES); pr_err("Unable to allocate memory for" " struct iscsi_sess_ops.\n"); - kfree(sess); - return -ENOMEM; + goto remove_idr; } sess->se_sess = transport_init_session(TARGET_PROT_NORMAL); if (IS_ERR(sess->se_sess)) { iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR, ISCSI_LOGIN_STATUS_NO_RESOURCES); - kfree(sess->sess_ops); - kfree(sess); - return -ENOMEM; + goto free_ops; } return 0; + +free_ops: + kfree(sess->sess_ops); +remove_idr: + spin_lock_bh(&sess_idr_lock); + idr_remove(&sess_idr, sess->session_index); + spin_unlock_bh(&sess_idr_lock); +free_sess: + kfree(sess); + conn->sess = NULL; + return -ENOMEM; } static int iscsi_login_zero_tsih_s2( @@ -1162,13 +1169,13 @@ void iscsi_target_login_sess_out(struct iscsi_conn *conn, ISCSI_LOGIN_STATUS_INIT_ERR); if (!zero_tsih || !conn->sess) goto old_sess_out; - if (conn->sess->se_sess) - transport_free_session(conn->sess->se_sess); - if (conn->sess->session_index != 0) { - spin_lock_bh(&sess_idr_lock); - idr_remove(&sess_idr, conn->sess->session_index); - spin_unlock_bh(&sess_idr_lock); - } + + transport_free_session(conn->sess->se_sess); + + spin_lock_bh(&sess_idr_lock); + idr_remove(&sess_idr, conn->sess->session_index); + spin_unlock_bh(&sess_idr_lock); + kfree(conn->sess->sess_ops); kfree(conn->sess); conn->sess = NULL; diff --git a/drivers/target/iscsi/iscsi_target_tpg.c b/drivers/target/iscsi/iscsi_target_tpg.c index 594d07a1e995ec87d467f4286a1d8668f2e32e3d..16e7516052a4486601241026d0e411e9731a716c 100644 --- a/drivers/target/iscsi/iscsi_target_tpg.c +++ b/drivers/target/iscsi/iscsi_target_tpg.c @@ -633,8 +633,7 @@ int iscsit_ta_authentication(struct iscsi_portal_group *tpg, u32 authentication) none = strstr(buf1, NONE); if (none) goto out; - strncat(buf1, ",", strlen(",")); - strncat(buf1, NONE, strlen(NONE)); + strlcat(buf1, "," NONE, sizeof(buf1)); if (iscsi_update_param_value(param, buf1) < 0) return -EINVAL; } diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c index e8dd6da164b28550f42d4909afed480291cc29ab..84742125f77303ce7bf6539d7659a88ff52bdf81 100644 --- a/drivers/target/target_core_device.c +++ b/drivers/target/target_core_device.c @@ -904,14 +904,20 @@ struct se_device *target_find_device(int id, bool do_depend) EXPORT_SYMBOL(target_find_device); struct devices_idr_iter { + struct config_item *prev_item; int (*fn)(struct se_device *dev, void *data); void *data; }; static int target_devices_idr_iter(int id, void *p, void *data) + __must_hold(&device_mutex) { struct devices_idr_iter *iter = data; struct se_device *dev = p; + int ret; + + config_item_put(iter->prev_item); + iter->prev_item = NULL; /* * We add the device early to the idr, so it can be used @@ -922,7 +928,15 @@ static int target_devices_idr_iter(int id, void *p, void *data) if (!(dev->dev_flags & DF_CONFIGURED)) return 0; - return iter->fn(dev, iter->data); + iter->prev_item = config_item_get_unless_zero(&dev->dev_group.cg_item); + if (!iter->prev_item) + return 0; + mutex_unlock(&device_mutex); + + ret = iter->fn(dev, iter->data); + + mutex_lock(&device_mutex); + return ret; } /** @@ -936,15 +950,13 @@ static int target_devices_idr_iter(int id, void *p, void *data) int target_for_each_device(int (*fn)(struct se_device *dev, void *data), void *data) { - struct devices_idr_iter iter; + struct devices_idr_iter iter = { .fn = fn, .data = data }; int ret; - iter.fn = fn; - iter.data = data; - mutex_lock(&device_mutex); ret = idr_for_each(&devices_idr, target_devices_idr_iter, &iter); mutex_unlock(&device_mutex); + config_item_put(iter.prev_item); return ret; } diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c index b00829995131e388c655e1c16733b17e4b792771..f652e58e2988cd738092f446e1882ba6f4b0d5aa 100644 --- a/drivers/target/target_core_transport.c +++ b/drivers/target/target_core_transport.c @@ -317,6 +317,7 @@ void __transport_register_session( { const struct target_core_fabric_ops *tfo = se_tpg->se_tpg_tfo; unsigned char buf[PR_REG_ISID_LEN]; + unsigned long flags; se_sess->se_tpg = se_tpg; se_sess->fabric_sess_ptr = fabric_sess_ptr; @@ -353,7 +354,7 @@ void __transport_register_session( se_sess->sess_bin_isid = get_unaligned_be64(&buf[0]); } - spin_lock_irq(&se_nacl->nacl_sess_lock); + spin_lock_irqsave(&se_nacl->nacl_sess_lock, flags); /* * The se_nacl->nacl_sess pointer will be set to the * last active I_T Nexus for each struct se_node_acl. @@ -362,7 +363,7 @@ void __transport_register_session( list_add_tail(&se_sess->sess_acl_list, &se_nacl->acl_sess_list); - spin_unlock_irq(&se_nacl->nacl_sess_lock); + spin_unlock_irqrestore(&se_nacl->nacl_sess_lock, flags); } list_add_tail(&se_sess->sess_list, &se_tpg->tpg_sess_list); diff --git a/drivers/thermal/of-thermal.c b/drivers/thermal/of-thermal.c index d04ec3b9e5ff21f68aa4a25d4a9d3e4caef0f4ec..8a70b57d129cf7f8d29ecd909fe211066d656b6e 100644 --- a/drivers/thermal/of-thermal.c +++ b/drivers/thermal/of-thermal.c @@ -278,10 +278,13 @@ static int of_thermal_set_mode(struct thermal_zone_device *tz, mutex_lock(&tz->lock); - if (mode == THERMAL_DEVICE_ENABLED) + if (mode == THERMAL_DEVICE_ENABLED) { tz->polling_delay = data->polling_delay; - else + tz->passive_delay = data->passive_delay; + } else { tz->polling_delay = 0; + tz->passive_delay = 0; + } mutex_unlock(&tz->lock); diff --git a/drivers/tty/pty.c b/drivers/tty/pty.c index 899e8fe5e00f512a75fe82c25b42f341eb9a338e..9e26c530d2ddb798385a2c731915e3ad336c0cd3 100644 --- a/drivers/tty/pty.c +++ b/drivers/tty/pty.c @@ -625,7 +625,7 @@ int ptm_open_peer(struct file *master, struct tty_struct *tty, int flags) if (tty->driver != ptm_driver) return -EIO; - fd = get_unused_fd_flags(0); + fd = get_unused_fd_flags(flags); if (fd < 0) { retval = fd; goto err; diff --git a/drivers/tty/rocket.c b/drivers/tty/rocket.c index 20d79a6007d50bcb9457890df30c1d341899485d..070733ca94d5e7fd5eeac3b11462be64ecef95c9 100644 --- a/drivers/tty/rocket.c +++ b/drivers/tty/rocket.c @@ -1894,7 +1894,7 @@ static __init int register_PCI(int i, struct pci_dev *dev) ByteIO_t UPCIRingInd = 0; if (!dev || !pci_match_id(rocket_pci_ids, dev) || - pci_enable_device(dev)) + pci_enable_device(dev) || i >= NUM_BOARDS) return 0; rcktpt_io_addr[i] = pci_resource_start(dev, 0); diff --git a/drivers/tty/serial/8250/serial_cs.c b/drivers/tty/serial/8250/serial_cs.c index 933c2688dd7ea90c4a38d7f55938bfc454e9a67c..8106353ce7aa00853bcf4b051b80c1d02310fbbb 100644 --- a/drivers/tty/serial/8250/serial_cs.c +++ b/drivers/tty/serial/8250/serial_cs.c @@ -637,8 +637,10 @@ static int serial_config(struct pcmcia_device *link) (link->has_func_id) && (link->socket->pcmcia_pfc == 0) && ((link->func_id == CISTPL_FUNCID_MULTI) || - (link->func_id == CISTPL_FUNCID_SERIAL))) - pcmcia_loop_config(link, serial_check_for_multi, info); + (link->func_id == CISTPL_FUNCID_SERIAL))) { + if (pcmcia_loop_config(link, serial_check_for_multi, info)) + goto failed; + } /* * Apply any multi-port quirk. diff --git a/drivers/tty/serial/cpm_uart/cpm_uart_core.c b/drivers/tty/serial/cpm_uart/cpm_uart_core.c index 9ac142cfc1f1b99a89cf21582472e05993664ed2..8b2b694334ec0568b1e08b07427c57388d50c0d5 100644 --- a/drivers/tty/serial/cpm_uart/cpm_uart_core.c +++ b/drivers/tty/serial/cpm_uart/cpm_uart_core.c @@ -1068,8 +1068,8 @@ static int poll_wait_key(char *obuf, struct uart_cpm_port *pinfo) /* Get the address of the host memory buffer. */ bdp = pinfo->rx_cur; - while (bdp->cbd_sc & BD_SC_EMPTY) - ; + if (bdp->cbd_sc & BD_SC_EMPTY) + return NO_POLL_CHAR; /* If the buffer address is in the CPM DPRAM, don't * convert it. @@ -1104,7 +1104,11 @@ static int cpm_get_poll_char(struct uart_port *port) poll_chars = 0; } if (poll_chars <= 0) { - poll_chars = poll_wait_key(poll_buf, pinfo); + int ret = poll_wait_key(poll_buf, pinfo); + + if (ret == NO_POLL_CHAR) + return ret; + poll_chars = ret; pollp = poll_buf; } poll_chars--; diff --git a/drivers/tty/serial/earlycon.c b/drivers/tty/serial/earlycon.c index ac667b47f19970c451712280c2a6d7fbbe7a5ff2..7b0a3a1688e8370d08257b735a0cd63c1d083926 100644 --- a/drivers/tty/serial/earlycon.c +++ b/drivers/tty/serial/earlycon.c @@ -254,7 +254,6 @@ int __init of_setup_earlycon(const struct earlycon_id *match, return -ENXIO; } port->mapbase = addr; - port->uartclk = BASE_BAUD * 16; val = of_get_flat_dt_prop(node, "reg-offset", NULL); if (val) @@ -289,6 +288,10 @@ int __init of_setup_earlycon(const struct earlycon_id *match, if (val) early_console_dev.baud = be32_to_cpu(*val); + val = of_get_flat_dt_prop(node, "clock-frequency", NULL); + if (val) + port->uartclk = be32_to_cpu(*val); + if (options) { early_console_dev.baud = simple_strtoul(options, NULL, 0); strlcpy(early_console_dev.options, options, diff --git a/drivers/tty/serial/fsl_lpuart.c b/drivers/tty/serial/fsl_lpuart.c index 7a3db9378fa388818eddbdeb12082ac703686b9e..fd64ac2c1a748b80cef780d9076c2616787a85c2 100644 --- a/drivers/tty/serial/fsl_lpuart.c +++ b/drivers/tty/serial/fsl_lpuart.c @@ -983,7 +983,8 @@ static inline int lpuart_start_rx_dma(struct lpuart_port *sport) struct circ_buf *ring = &sport->rx_ring; int ret, nent; int bits, baud; - struct tty_struct *tty = tty_port_tty_get(&sport->port.state->port); + struct tty_port *port = &sport->port.state->port; + struct tty_struct *tty = port->tty; struct ktermios *termios = &tty->termios; baud = tty_get_baud_rate(tty); diff --git a/drivers/tty/serial/imx.c b/drivers/tty/serial/imx.c index 8deaf2ad8b34ac4bafeb795a5f5a8e4dc5faf345..4e827e5a52a364162055e4856305f6fec9b7fd19 100644 --- a/drivers/tty/serial/imx.c +++ b/drivers/tty/serial/imx.c @@ -2213,6 +2213,14 @@ static int serial_imx_probe(struct platform_device *pdev) ret); return ret; } + + ret = devm_request_irq(&pdev->dev, rtsirq, imx_rtsint, 0, + dev_name(&pdev->dev), sport); + if (ret) { + dev_err(&pdev->dev, "failed to request rts irq: %d\n", + ret); + return ret; + } } else { ret = devm_request_irq(&pdev->dev, rxirq, imx_int, 0, dev_name(&pdev->dev), sport); diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c index c8cb0b398cb19b26bc83bcfb7fb86585691114cb..6db8844ef3ecdbb9bbbdd287d263fe176838e342 100644 --- a/drivers/tty/serial/serial_core.c +++ b/drivers/tty/serial/serial_core.c @@ -195,6 +195,7 @@ static int uart_port_startup(struct tty_struct *tty, struct uart_state *state, { struct uart_port *uport = uart_port_check(state); unsigned long page; + unsigned long flags = 0; int retval = 0; if (uport->type == PORT_UNKNOWN) @@ -209,15 +210,18 @@ static int uart_port_startup(struct tty_struct *tty, struct uart_state *state, * Initialise and allocate the transmit and temporary * buffer. */ - if (!state->xmit.buf) { - /* This is protected by the per port mutex */ - page = get_zeroed_page(GFP_KERNEL); - if (!page) - return -ENOMEM; + page = get_zeroed_page(GFP_KERNEL); + if (!page) + return -ENOMEM; + uart_port_lock(state, flags); + if (!state->xmit.buf) { state->xmit.buf = (unsigned char *) page; uart_circ_clear(&state->xmit); + } else { + free_page(page); } + uart_port_unlock(uport, flags); retval = uport->ops->startup(uport); if (retval == 0) { @@ -276,6 +280,7 @@ static void uart_shutdown(struct tty_struct *tty, struct uart_state *state) { struct uart_port *uport = uart_port_check(state); struct tty_port *port = &state->port; + unsigned long flags = 0; /* * Set the TTY IO error marker @@ -308,10 +313,12 @@ static void uart_shutdown(struct tty_struct *tty, struct uart_state *state) /* * Free the transmit buffer page. */ + uart_port_lock(state, flags); if (state->xmit.buf) { free_page((unsigned long)state->xmit.buf); state->xmit.buf = NULL; } + uart_port_unlock(uport, flags); } /** diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c index 8bc8fe2b75f7a34cad43ac693a1faf7c4de8720d..37dba940d8980c36ada5e64ce70119f9a59e20af 100644 --- a/drivers/tty/serial/sh-sci.c +++ b/drivers/tty/serial/sh-sci.c @@ -2060,6 +2060,8 @@ static void sci_shutdown(struct uart_port *port) } #endif + if (s->rx_trigger > 1 && s->rx_fifo_timeout > 0) + del_timer_sync(&s->rx_fifo_timer); sci_free_irq(s); sci_free_dma(port); } diff --git a/drivers/tty/tty_baudrate.c b/drivers/tty/tty_baudrate.c index 5c33fd25676d59bc6453d7f4a5f4a46b2aadabf1..ebc797fc1afd30d5d96b2b4fd1f73db48f93aaaf 100644 --- a/drivers/tty/tty_baudrate.c +++ b/drivers/tty/tty_baudrate.c @@ -156,18 +156,25 @@ void tty_termios_encode_baud_rate(struct ktermios *termios, termios->c_ospeed = obaud; #ifdef BOTHER + if ((termios->c_cflag >> IBSHIFT) & CBAUD) + ibinput = 1; /* An input speed was specified */ + /* If the user asked for a precise weird speed give a precise weird answer. If they asked for a Bfoo speed they may have problems digesting non-exact replies so fuzz a bit */ - if ((termios->c_cflag & CBAUD) == BOTHER) + if ((termios->c_cflag & CBAUD) == BOTHER) { oclose = 0; + if (!ibinput) + iclose = 0; + } if (((termios->c_cflag >> IBSHIFT) & CBAUD) == BOTHER) iclose = 0; - if ((termios->c_cflag >> IBSHIFT) & CBAUD) - ibinput = 1; /* An input speed was specified */ #endif termios->c_cflag &= ~CBAUD; +#ifdef IBSHIFT + termios->c_cflag &= ~(CBAUD << IBSHIFT); +#endif /* * Our goal is to find a close match to the standard baud rate diff --git a/drivers/tty/vt/vt_ioctl.c b/drivers/tty/vt/vt_ioctl.c index 2d2b420598b236a9aa8a135ea16e74cf5763350f..7b34b0ddbf0e0281ec8ca723fb6469b5ac43d30f 100644 --- a/drivers/tty/vt/vt_ioctl.c +++ b/drivers/tty/vt/vt_ioctl.c @@ -32,6 +32,8 @@ #include #include +#include + #include #include #include @@ -700,6 +702,8 @@ int vt_ioctl(struct tty_struct *tty, if (vsa.console == 0 || vsa.console > MAX_NR_CONSOLES) ret = -ENXIO; else { + vsa.console = array_index_nospec(vsa.console, + MAX_NR_CONSOLES + 1); vsa.console--; console_lock(); ret = vc_allocate(vsa.console); diff --git a/drivers/uio/uio.c b/drivers/uio/uio.c index ff04b7f8549f06f5730afb1cf6334785afab9d05..41784798c789b9dd1c1bffbd80920e346ea283a3 100644 --- a/drivers/uio/uio.c +++ b/drivers/uio/uio.c @@ -841,8 +841,6 @@ int __uio_register_device(struct module *owner, if (ret) goto err_uio_dev_add_attributes; - info->uio_dev = idev; - if (info->irq && (info->irq != UIO_IRQ_CUSTOM)) { /* * Note that we deliberately don't use devm_request_irq @@ -858,6 +856,7 @@ int __uio_register_device(struct module *owner, goto err_request_irq; } + info->uio_dev = idev; return 0; err_request_irq: diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c index f2f31fc16f2909720bd1fdf2b53d77a405098a9e..feaa0d8f830acaf9fc16b7f6733673b6bd61c4d6 100644 --- a/drivers/usb/class/cdc-acm.c +++ b/drivers/usb/class/cdc-acm.c @@ -792,20 +792,9 @@ static int acm_tty_write(struct tty_struct *tty, } if (acm->susp_count) { - if (acm->putbuffer) { - /* now to preserve order */ - usb_anchor_urb(acm->putbuffer->urb, &acm->delayed); - acm->putbuffer = NULL; - } usb_anchor_urb(wb->urb, &acm->delayed); spin_unlock_irqrestore(&acm->write_lock, flags); return count; - } else { - if (acm->putbuffer) { - /* at this point there is no good way to handle errors */ - acm_start_wb(acm, acm->putbuffer); - acm->putbuffer = NULL; - } } stat = acm_start_wb(acm, wb); @@ -816,66 +805,6 @@ static int acm_tty_write(struct tty_struct *tty, return count; } -static void acm_tty_flush_chars(struct tty_struct *tty) -{ - struct acm *acm = tty->driver_data; - struct acm_wb *cur; - int err; - unsigned long flags; - - spin_lock_irqsave(&acm->write_lock, flags); - - cur = acm->putbuffer; - if (!cur) /* nothing to do */ - goto out; - - acm->putbuffer = NULL; - err = usb_autopm_get_interface_async(acm->control); - if (err < 0) { - cur->use = 0; - acm->putbuffer = cur; - goto out; - } - - if (acm->susp_count) - usb_anchor_urb(cur->urb, &acm->delayed); - else - acm_start_wb(acm, cur); -out: - spin_unlock_irqrestore(&acm->write_lock, flags); - return; -} - -static int acm_tty_put_char(struct tty_struct *tty, unsigned char ch) -{ - struct acm *acm = tty->driver_data; - struct acm_wb *cur; - int wbn; - unsigned long flags; - -overflow: - cur = acm->putbuffer; - if (!cur) { - spin_lock_irqsave(&acm->write_lock, flags); - wbn = acm_wb_alloc(acm); - if (wbn >= 0) { - cur = &acm->wb[wbn]; - acm->putbuffer = cur; - } - spin_unlock_irqrestore(&acm->write_lock, flags); - if (!cur) - return 0; - } - - if (cur->len == acm->writesize) { - acm_tty_flush_chars(tty); - goto overflow; - } - - cur->buf[cur->len++] = ch; - return 1; -} - static int acm_tty_write_room(struct tty_struct *tty) { struct acm *acm = tty->driver_data; @@ -2000,8 +1929,6 @@ static const struct tty_operations acm_ops = { .cleanup = acm_tty_cleanup, .hangup = acm_tty_hangup, .write = acm_tty_write, - .put_char = acm_tty_put_char, - .flush_chars = acm_tty_flush_chars, .write_room = acm_tty_write_room, .ioctl = acm_tty_ioctl, .throttle = acm_tty_throttle, diff --git a/drivers/usb/class/cdc-acm.h b/drivers/usb/class/cdc-acm.h index eacc116e83da2ccf38b2640aa5b5b02b5b3621c8..ca06b20d7af9cc9567da1f243ad99935f8bc99e7 100644 --- a/drivers/usb/class/cdc-acm.h +++ b/drivers/usb/class/cdc-acm.h @@ -96,7 +96,6 @@ struct acm { unsigned long read_urbs_free; struct urb *read_urbs[ACM_NR]; struct acm_rb read_buffers[ACM_NR]; - struct acm_wb *putbuffer; /* for acm_tty_put_char() */ int rx_buflimit; spinlock_t read_lock; u8 *notification_buffer; /* to reassemble fragmented notifications */ diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c index ab245352f102a6bd6821a75be6840474f82e095e..76cb9b3649b4952d48b3c01a346134b7cd835e90 100644 --- a/drivers/usb/core/devio.c +++ b/drivers/usb/core/devio.c @@ -1451,10 +1451,13 @@ static int proc_do_submiturb(struct usb_dev_state *ps, struct usbdevfs_urb *uurb struct async *as = NULL; struct usb_ctrlrequest *dr = NULL; unsigned int u, totlen, isofrmlen; - int i, ret, is_in, num_sgs = 0, ifnum = -1; + int i, ret, num_sgs = 0, ifnum = -1; int number_of_packets = 0; unsigned int stream_id = 0; void *buf; + bool is_in; + bool allow_short = false; + bool allow_zero = false; unsigned long mask = USBDEVFS_URB_SHORT_NOT_OK | USBDEVFS_URB_BULK_CONTINUATION | USBDEVFS_URB_NO_FSBR | @@ -1488,6 +1491,8 @@ static int proc_do_submiturb(struct usb_dev_state *ps, struct usbdevfs_urb *uurb u = 0; switch (uurb->type) { case USBDEVFS_URB_TYPE_CONTROL: + if (is_in) + allow_short = true; if (!usb_endpoint_xfer_control(&ep->desc)) return -EINVAL; /* min 8 byte setup packet */ @@ -1528,6 +1533,10 @@ static int proc_do_submiturb(struct usb_dev_state *ps, struct usbdevfs_urb *uurb break; case USBDEVFS_URB_TYPE_BULK: + if (!is_in) + allow_zero = true; + else + allow_short = true; switch (usb_endpoint_type(&ep->desc)) { case USB_ENDPOINT_XFER_CONTROL: case USB_ENDPOINT_XFER_ISOC: @@ -1548,6 +1557,10 @@ static int proc_do_submiturb(struct usb_dev_state *ps, struct usbdevfs_urb *uurb if (!usb_endpoint_xfer_int(&ep->desc)) return -EINVAL; interrupt_urb: + if (!is_in) + allow_zero = true; + else + allow_short = true; break; case USBDEVFS_URB_TYPE_ISO: @@ -1692,16 +1705,21 @@ static int proc_do_submiturb(struct usb_dev_state *ps, struct usbdevfs_urb *uurb u = (is_in ? URB_DIR_IN : URB_DIR_OUT); if (uurb->flags & USBDEVFS_URB_ISO_ASAP) u |= URB_ISO_ASAP; - if (uurb->flags & USBDEVFS_URB_SHORT_NOT_OK && is_in) + if (allow_short && uurb->flags & USBDEVFS_URB_SHORT_NOT_OK) u |= URB_SHORT_NOT_OK; if (uurb->flags & USBDEVFS_URB_NO_FSBR) u |= URB_NO_FSBR; - if (uurb->flags & USBDEVFS_URB_ZERO_PACKET) + if (allow_zero && uurb->flags & USBDEVFS_URB_ZERO_PACKET) u |= URB_ZERO_PACKET; if (uurb->flags & USBDEVFS_URB_NO_INTERRUPT) u |= URB_NO_INTERRUPT; as->urb->transfer_flags = u; + if (!allow_short && uurb->flags & USBDEVFS_URB_SHORT_NOT_OK) + dev_warn(&ps->dev->dev, "Requested nonsensical USBDEVFS_URB_SHORT_NOT_OK.\n"); + if (!allow_zero && uurb->flags & USBDEVFS_URB_ZERO_PACKET) + dev_warn(&ps->dev->dev, "Requested nonsensical USBDEVFS_URB_ZERO_PACKET.\n"); + as->urb->transfer_buffer_length = uurb->buffer_length; as->urb->setup_packet = (unsigned char *)dr; dr = NULL; diff --git a/drivers/usb/core/driver.c b/drivers/usb/core/driver.c index eb87a259d55c3abcc4d54e929cba861a0b429fae..2f3dbf1c3c2d684915d7915d640aa0c9f1f78a2f 100644 --- a/drivers/usb/core/driver.c +++ b/drivers/usb/core/driver.c @@ -512,7 +512,6 @@ int usb_driver_claim_interface(struct usb_driver *driver, struct device *dev; struct usb_device *udev; int retval = 0; - int lpm_disable_error = -ENODEV; if (!iface) return -ENODEV; @@ -533,16 +532,6 @@ int usb_driver_claim_interface(struct usb_driver *driver, iface->condition = USB_INTERFACE_BOUND; - /* See the comment about disabling LPM in usb_probe_interface(). */ - if (driver->disable_hub_initiated_lpm) { - lpm_disable_error = usb_unlocked_disable_lpm(udev); - if (lpm_disable_error) { - dev_err(&iface->dev, "%s Failed to disable LPM for driver %s\n.", - __func__, driver->name); - return -ENOMEM; - } - } - /* Claimed interfaces are initially inactive (suspended) and * runtime-PM-enabled, but only if the driver has autosuspend * support. Otherwise they are marked active, to prevent the @@ -561,9 +550,20 @@ int usb_driver_claim_interface(struct usb_driver *driver, if (device_is_registered(dev)) retval = device_bind_driver(dev); - /* Attempt to re-enable USB3 LPM, if the disable was successful. */ - if (!lpm_disable_error) - usb_unlocked_enable_lpm(udev); + if (retval) { + dev->driver = NULL; + usb_set_intfdata(iface, NULL); + iface->needs_remote_wakeup = 0; + iface->condition = USB_INTERFACE_UNBOUND; + + /* + * Unbound interfaces are always runtime-PM-disabled + * and runtime-PM-suspended + */ + if (driver->supports_autosuspend) + pm_runtime_disable(dev); + pm_runtime_set_suspended(dev); + } return retval; } diff --git a/drivers/usb/core/hcd-pci.c b/drivers/usb/core/hcd-pci.c index ea829ad798c0773c7ea93ec0dbeda5ff1772f6ba..5340d433cdf0ebc1bdc0d47fe312763a539bc06a 100644 --- a/drivers/usb/core/hcd-pci.c +++ b/drivers/usb/core/hcd-pci.c @@ -528,8 +528,6 @@ static int resume_common(struct device *dev, int event) event == PM_EVENT_RESTORE); if (retval) { dev_err(dev, "PCI post-resume error %d!\n", retval); - if (hcd->shared_hcd) - usb_hc_died(hcd->shared_hcd); usb_hc_died(hcd); } } diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c index d97e126438150ea9e02294fe1292935eb3d56276..d39d58cf70d0485673d351899ba0605cba22bb61 100644 --- a/drivers/usb/core/message.c +++ b/drivers/usb/core/message.c @@ -1280,6 +1280,11 @@ void usb_enable_interface(struct usb_device *dev, * is submitted that needs that bandwidth. Some other operating systems * allocate bandwidth early, when a configuration is chosen. * + * xHCI reserves bandwidth and configures the alternate setting in + * usb_hcd_alloc_bandwidth(). If it fails the original interface altsetting + * may be disabled. Drivers cannot rely on any particular alternate + * setting being in effect after a failure. + * * This call is synchronous, and may not be used in an interrupt context. * Also, drivers must not change altsettings while urbs are scheduled for * endpoints in that interface; all such urbs must first be completed @@ -1315,6 +1320,12 @@ int usb_set_interface(struct usb_device *dev, int interface, int alternate) alternate); return -EINVAL; } + /* + * usb3 hosts configure the interface in usb_hcd_alloc_bandwidth, + * including freeing dropped endpoint ring buffers. + * Make sure the interface endpoints are flushed before that + */ + usb_disable_interface(dev, iface, false); /* Make sure we have enough bandwidth for this alternate interface. * Remove the current alt setting and add the new alt setting. diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c index 99f67764765fdc23bf78051c4c72d914eb64f3b6..37a5e07b3488de2173574dbd757ff1520a2e9332 100644 --- a/drivers/usb/core/quirks.c +++ b/drivers/usb/core/quirks.c @@ -37,6 +37,10 @@ static const struct usb_device_id usb_quirk_list[] = { /* CBM - Flash disk */ { USB_DEVICE(0x0204, 0x6025), .driver_info = USB_QUIRK_RESET_RESUME }, + /* WORLDE Controller KS49 or Prodipe MIDI 49C USB controller */ + { USB_DEVICE(0x0218, 0x0201), .driver_info = + USB_QUIRK_CONFIG_INTF_STRINGS }, + /* WORLDE easy key (easykey.25) MIDI controller */ { USB_DEVICE(0x0218, 0x0401), .driver_info = USB_QUIRK_CONFIG_INTF_STRINGS }, @@ -259,6 +263,9 @@ static const struct usb_device_id usb_quirk_list[] = { { USB_DEVICE(0x2040, 0x7200), .driver_info = USB_QUIRK_CONFIG_INTF_STRINGS }, + /* DJI CineSSD */ + { USB_DEVICE(0x2ca3, 0x0031), .driver_info = USB_QUIRK_NO_LPM }, + /* INTEL VALUE SSD */ { USB_DEVICE(0x8086, 0xf1a5), .driver_info = USB_QUIRK_RESET_RESUME }, diff --git a/drivers/usb/core/usb.c b/drivers/usb/core/usb.c index 17681d5638ac3ff418dbec8eb8254c4b4e6e7727..f8b50eaf6d1ee1aa2eeeb87837787f1ab1cb5adc 100644 --- a/drivers/usb/core/usb.c +++ b/drivers/usb/core/usb.c @@ -228,6 +228,8 @@ struct usb_host_interface *usb_find_alt_setting( struct usb_interface_cache *intf_cache = NULL; int i; + if (!config) + return NULL; for (i = 0; i < config->desc.bNumInterfaces; i++) { if (config->intf_cache[i]->altsetting[0].desc.bInterfaceNumber == iface_num) { diff --git a/drivers/usb/dwc2/core.h b/drivers/usb/dwc2/core.h index ec965ac5f1f5dfb4ae87f1f8ee830542e436c455..3c0d386dc62fcda61b77e70357aedd464bb640fa 100644 --- a/drivers/usb/dwc2/core.h +++ b/drivers/usb/dwc2/core.h @@ -872,6 +872,7 @@ struct dwc2_hregs_backup { * @frame_list_sz: Frame list size * @desc_gen_cache: Kmem cache for generic descriptors * @desc_hsisoc_cache: Kmem cache for hs isochronous descriptors + * @unaligned_cache: Kmem cache for DMA mode to handle non-aligned buf * * These are for peripheral mode: * @@ -1004,6 +1005,8 @@ struct dwc2_hsotg { u32 frame_list_sz; struct kmem_cache *desc_gen_cache; struct kmem_cache *desc_hsisoc_cache; + struct kmem_cache *unaligned_cache; +#define DWC2_KMEM_UNALIGNED_BUF_SIZE 1024 #ifdef DEBUG u32 frrem_samples; diff --git a/drivers/usb/dwc2/gadget.c b/drivers/usb/dwc2/gadget.c index 6ef001a83fe28bf0320d322dcc4867304c070055..e164439b215429492ce414bd8d088109397ea639 100644 --- a/drivers/usb/dwc2/gadget.c +++ b/drivers/usb/dwc2/gadget.c @@ -848,6 +848,7 @@ static int dwc2_gadget_fill_isoc_desc(struct dwc2_hsotg_ep *hs_ep, u32 index; u32 maxsize = 0; u32 mask = 0; + u8 pid = 0; maxsize = dwc2_gadget_get_desc_params(hs_ep, &mask); if (len > maxsize) { @@ -893,7 +894,11 @@ static int dwc2_gadget_fill_isoc_desc(struct dwc2_hsotg_ep *hs_ep, ((len << DEV_DMA_NBYTES_SHIFT) & mask)); if (hs_ep->dir_in) { - desc->status |= ((hs_ep->mc << DEV_DMA_ISOC_PID_SHIFT) & + if (len) + pid = DIV_ROUND_UP(len, hs_ep->ep.maxpacket); + else + pid = 1; + desc->status |= ((pid << DEV_DMA_ISOC_PID_SHIFT) & DEV_DMA_ISOC_PID_MASK) | ((len % hs_ep->ep.maxpacket) ? DEV_DMA_SHORT : 0) | @@ -932,6 +937,7 @@ static void dwc2_gadget_start_isoc_ddma(struct dwc2_hsotg_ep *hs_ep) u32 ctrl; if (list_empty(&hs_ep->queue)) { + hs_ep->target_frame = TARGET_FRAME_INITIAL; dev_dbg(hsotg->dev, "%s: No requests in queue\n", __func__); return; } @@ -4716,9 +4722,11 @@ int dwc2_gadget_init(struct dwc2_hsotg *hsotg, int irq) } ret = usb_add_gadget_udc(dev, &hsotg->gadget); - if (ret) + if (ret) { + dwc2_hsotg_ep_free_request(&hsotg->eps_out[0]->ep, + hsotg->ctrl_req); return ret; - + } dwc2_hsotg_dump(hsotg); return 0; @@ -4731,6 +4739,7 @@ int dwc2_gadget_init(struct dwc2_hsotg *hsotg, int irq) int dwc2_hsotg_remove(struct dwc2_hsotg *hsotg) { usb_del_gadget_udc(&hsotg->gadget); + dwc2_hsotg_ep_free_request(&hsotg->eps_out[0]->ep, hsotg->ctrl_req); return 0; } diff --git a/drivers/usb/dwc2/hcd.c b/drivers/usb/dwc2/hcd.c index 46d3b0fc00c5c6dadc91d61d3bcfeb3571697449..fa20ec43a187673e7461d408d55b725bcfc611eb 100644 --- a/drivers/usb/dwc2/hcd.c +++ b/drivers/usb/dwc2/hcd.c @@ -1544,11 +1544,20 @@ static void dwc2_hc_start_transfer(struct dwc2_hsotg *hsotg, } if (hsotg->params.host_dma) { - dwc2_writel((u32)chan->xfer_dma, - hsotg->regs + HCDMA(chan->hc_num)); + dma_addr_t dma_addr; + + if (chan->align_buf) { + if (dbg_hc(chan)) + dev_vdbg(hsotg->dev, "align_buf\n"); + dma_addr = chan->align_buf; + } else { + dma_addr = chan->xfer_dma; + } + dwc2_writel((u32)dma_addr, hsotg->regs + HCDMA(chan->hc_num)); + if (dbg_hc(chan)) dev_vdbg(hsotg->dev, "Wrote %08lx to HCDMA(%d)\n", - (unsigned long)chan->xfer_dma, chan->hc_num); + (unsigned long)dma_addr, chan->hc_num); } /* Start the split */ @@ -2604,6 +2613,35 @@ static void dwc2_hc_init_xfer(struct dwc2_hsotg *hsotg, } } +static int dwc2_alloc_split_dma_aligned_buf(struct dwc2_hsotg *hsotg, + struct dwc2_qh *qh, + struct dwc2_host_chan *chan) +{ + if (!hsotg->unaligned_cache || + chan->max_packet > DWC2_KMEM_UNALIGNED_BUF_SIZE) + return -ENOMEM; + + if (!qh->dw_align_buf) { + qh->dw_align_buf = kmem_cache_alloc(hsotg->unaligned_cache, + GFP_ATOMIC | GFP_DMA); + if (!qh->dw_align_buf) + return -ENOMEM; + } + + qh->dw_align_buf_dma = dma_map_single(hsotg->dev, qh->dw_align_buf, + DWC2_KMEM_UNALIGNED_BUF_SIZE, + DMA_FROM_DEVICE); + + if (dma_mapping_error(hsotg->dev, qh->dw_align_buf_dma)) { + dev_err(hsotg->dev, "can't map align_buf\n"); + chan->align_buf = 0; + return -EINVAL; + } + + chan->align_buf = qh->dw_align_buf_dma; + return 0; +} + #define DWC2_USB_DMA_ALIGN 4 static void dwc2_free_dma_aligned_buffer(struct urb *urb) @@ -2783,6 +2821,32 @@ static int dwc2_assign_and_init_hc(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh) /* Set the transfer attributes */ dwc2_hc_init_xfer(hsotg, chan, qtd); + /* For non-dword aligned buffers */ + if (hsotg->params.host_dma && qh->do_split && + chan->ep_is_in && (chan->xfer_dma & 0x3)) { + dev_vdbg(hsotg->dev, "Non-aligned buffer\n"); + if (dwc2_alloc_split_dma_aligned_buf(hsotg, qh, chan)) { + dev_err(hsotg->dev, + "Failed to allocate memory to handle non-aligned buffer\n"); + /* Add channel back to free list */ + chan->align_buf = 0; + chan->multi_count = 0; + list_add_tail(&chan->hc_list_entry, + &hsotg->free_hc_list); + qtd->in_process = 0; + qh->channel = NULL; + return -ENOMEM; + } + } else { + /* + * We assume that DMA is always aligned in non-split + * case or split out case. Warn if not. + */ + WARN_ON_ONCE(hsotg->params.host_dma && + (chan->xfer_dma & 0x3)); + chan->align_buf = 0; + } + if (chan->ep_type == USB_ENDPOINT_XFER_INT || chan->ep_type == USB_ENDPOINT_XFER_ISOC) /* @@ -5277,6 +5341,19 @@ int dwc2_hcd_init(struct dwc2_hsotg *hsotg) } } + if (hsotg->params.host_dma) { + /* + * Create kmem caches to handle non-aligned buffer + * in Buffer DMA mode. + */ + hsotg->unaligned_cache = kmem_cache_create("dwc2-unaligned-dma", + DWC2_KMEM_UNALIGNED_BUF_SIZE, 4, + SLAB_CACHE_DMA, NULL); + if (!hsotg->unaligned_cache) + dev_err(hsotg->dev, + "unable to create dwc2 unaligned cache\n"); + } + hsotg->otg_port = 1; hsotg->frame_list = NULL; hsotg->frame_list_dma = 0; @@ -5311,8 +5388,9 @@ int dwc2_hcd_init(struct dwc2_hsotg *hsotg) return 0; error4: - kmem_cache_destroy(hsotg->desc_gen_cache); + kmem_cache_destroy(hsotg->unaligned_cache); kmem_cache_destroy(hsotg->desc_hsisoc_cache); + kmem_cache_destroy(hsotg->desc_gen_cache); error3: dwc2_hcd_release(hsotg); error2: @@ -5353,8 +5431,9 @@ void dwc2_hcd_remove(struct dwc2_hsotg *hsotg) usb_remove_hcd(hcd); hsotg->priv = NULL; - kmem_cache_destroy(hsotg->desc_gen_cache); + kmem_cache_destroy(hsotg->unaligned_cache); kmem_cache_destroy(hsotg->desc_hsisoc_cache); + kmem_cache_destroy(hsotg->desc_gen_cache); dwc2_hcd_release(hsotg); usb_put_hcd(hcd); diff --git a/drivers/usb/dwc2/hcd.h b/drivers/usb/dwc2/hcd.h index 11c3c145b79363d673b4db83be09076ac38095ee..461bdc67df6fbb244e6c1cd492fa3be5db9aff96 100644 --- a/drivers/usb/dwc2/hcd.h +++ b/drivers/usb/dwc2/hcd.h @@ -75,6 +75,8 @@ struct dwc2_qh; * (micro)frame * @xfer_buf: Pointer to current transfer buffer position * @xfer_dma: DMA address of xfer_buf + * @align_buf: In Buffer DMA mode this will be used if xfer_buf is not + * DWORD aligned * @xfer_len: Total number of bytes to transfer * @xfer_count: Number of bytes transferred so far * @start_pkt_count: Packet count at start of transfer @@ -132,6 +134,7 @@ struct dwc2_host_chan { u8 *xfer_buf; dma_addr_t xfer_dma; + dma_addr_t align_buf; u32 xfer_len; u32 xfer_count; u16 start_pkt_count; @@ -302,6 +305,9 @@ struct dwc2_hs_transfer_time { * is tightly packed. * @ls_duration_us: Duration on the low speed bus schedule. * @ntd: Actual number of transfer descriptors in a list + * @dw_align_buf: Used instead of original buffer if its physical address + * is not dword-aligned + * @dw_align_buf_dma: DMA address for dw_align_buf * @qtd_list: List of QTDs for this QH * @channel: Host channel currently processing transfers for this QH * @qh_list_entry: Entry for QH in either the periodic or non-periodic @@ -345,6 +351,8 @@ struct dwc2_qh { struct dwc2_hs_transfer_time hs_transfers[DWC2_HS_SCHEDULE_UFRAMES]; u32 ls_start_schedule_slice; u16 ntd; + u8 *dw_align_buf; + dma_addr_t dw_align_buf_dma; struct list_head qtd_list; struct dwc2_host_chan *channel; struct list_head qh_list_entry; diff --git a/drivers/usb/dwc2/hcd_intr.c b/drivers/usb/dwc2/hcd_intr.c index 28a8210710b121616353d881b2e27c73da7e03da..17905ba1139c5698a9a72b0ef4cf2c8a67f55291 100644 --- a/drivers/usb/dwc2/hcd_intr.c +++ b/drivers/usb/dwc2/hcd_intr.c @@ -923,14 +923,21 @@ static int dwc2_xfercomp_isoc_split_in(struct dwc2_hsotg *hsotg, frame_desc = &qtd->urb->iso_descs[qtd->isoc_frame_index]; len = dwc2_get_actual_xfer_length(hsotg, chan, chnum, qtd, DWC2_HC_XFER_COMPLETE, NULL); - if (!len) { + if (!len && !qtd->isoc_split_offset) { qtd->complete_split = 0; - qtd->isoc_split_offset = 0; return 0; } frame_desc->actual_length += len; + if (chan->align_buf) { + dev_vdbg(hsotg->dev, "non-aligned buffer\n"); + dma_unmap_single(hsotg->dev, chan->qh->dw_align_buf_dma, + DWC2_KMEM_UNALIGNED_BUF_SIZE, DMA_FROM_DEVICE); + memcpy(qtd->urb->buf + (chan->xfer_dma - qtd->urb->dma), + chan->qh->dw_align_buf, len); + } + qtd->isoc_split_offset += len; hctsiz = dwc2_readl(hsotg->regs + HCTSIZ(chnum)); diff --git a/drivers/usb/dwc2/hcd_queue.c b/drivers/usb/dwc2/hcd_queue.c index 7f51a77bc5ccd3335277a474f27c6e143b58df0f..56e61220efc66a5571159ca2f46932576ab451dd 100644 --- a/drivers/usb/dwc2/hcd_queue.c +++ b/drivers/usb/dwc2/hcd_queue.c @@ -1632,6 +1632,9 @@ void dwc2_hcd_qh_free(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh) if (qh->desc_list) dwc2_hcd_qh_free_ddma(hsotg, qh); + else if (hsotg->unaligned_cache && qh->dw_align_buf) + kmem_cache_free(hsotg->unaligned_cache, qh->dw_align_buf); + kfree(qh); } diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c index dca78bb20e5d541587699931ee861928050f7b93..8b323a360e037b9d9d5c1f43c12ec6b11b39b0b3 100644 --- a/drivers/usb/dwc3/core.c +++ b/drivers/usb/dwc3/core.c @@ -511,6 +511,22 @@ static void dwc3_cache_hwparams(struct dwc3 *dwc) parms->hwparams8 = dwc3_readl(dwc->regs, DWC3_GHWPARAMS8); } +static int dwc3_core_ulpi_init(struct dwc3 *dwc) +{ + int intf; + int ret = 0; + + intf = DWC3_GHWPARAMS3_HSPHY_IFC(dwc->hwparams.hwparams3); + + if (intf == DWC3_GHWPARAMS3_HSPHY_IFC_ULPI || + (intf == DWC3_GHWPARAMS3_HSPHY_IFC_UTMI_ULPI && + dwc->hsphy_interface && + !strncmp(dwc->hsphy_interface, "ulpi", 4))) + ret = dwc3_ulpi_init(dwc); + + return ret; +} + /** * dwc3_phy_setup - Configure USB PHY Interface of DWC3 Core * @dwc: Pointer to our controller context structure @@ -522,7 +538,6 @@ static void dwc3_cache_hwparams(struct dwc3 *dwc) static int dwc3_phy_setup(struct dwc3 *dwc) { u32 reg; - int ret; reg = dwc3_readl(dwc->regs, DWC3_GUSB3PIPECTL(0)); @@ -593,9 +608,6 @@ static int dwc3_phy_setup(struct dwc3 *dwc) } /* FALLTHROUGH */ case DWC3_GHWPARAMS3_HSPHY_IFC_ULPI: - ret = dwc3_ulpi_init(dwc); - if (ret) - return ret; /* FALLTHROUGH */ default: break; @@ -752,6 +764,7 @@ static void dwc3_core_setup_global_control(struct dwc3 *dwc) } static int dwc3_core_get_phy(struct dwc3 *dwc); +static int dwc3_core_ulpi_init(struct dwc3 *dwc); /** * dwc3_core_init - Low-level initialization of DWC3 Core @@ -783,17 +796,27 @@ static int dwc3_core_init(struct dwc3 *dwc) dwc->maximum_speed = USB_SPEED_HIGH; } - ret = dwc3_core_get_phy(dwc); + ret = dwc3_phy_setup(dwc); if (ret) goto err0; - ret = dwc3_core_soft_reset(dwc); - if (ret) - goto err0; + if (!dwc->ulpi_ready) { + ret = dwc3_core_ulpi_init(dwc); + if (ret) + goto err0; + dwc->ulpi_ready = true; + } - ret = dwc3_phy_setup(dwc); + if (!dwc->phys_ready) { + ret = dwc3_core_get_phy(dwc); + if (ret) + goto err0a; + dwc->phys_ready = true; + } + + ret = dwc3_core_soft_reset(dwc); if (ret) - goto err0; + goto err0a; dwc3_core_setup_global_control(dwc); dwc3_core_num_eps(dwc); @@ -866,6 +889,9 @@ err1: phy_exit(dwc->usb2_generic_phy); phy_exit(dwc->usb3_generic_phy); +err0a: + dwc3_ulpi_exit(dwc); + err0: return ret; } @@ -1256,7 +1282,6 @@ err4: err3: dwc3_free_event_buffers(dwc); - dwc3_ulpi_exit(dwc); err2: pm_runtime_allow(&pdev->dev); diff --git a/drivers/usb/dwc3/core.h b/drivers/usb/dwc3/core.h index b782ba58a7fc643d07f5fd09b2ca6b088b86add8..abd1142c9e4d95135fb5958ed46da621cd78c825 100644 --- a/drivers/usb/dwc3/core.h +++ b/drivers/usb/dwc3/core.h @@ -805,7 +805,9 @@ struct dwc3_scratchpad_array { * @usb3_phy: pointer to USB3 PHY * @usb2_generic_phy: pointer to USB2 PHY * @usb3_generic_phy: pointer to USB3 PHY + * @phys_ready: flag to indicate that PHYs are ready * @ulpi: pointer to ulpi interface + * @ulpi_ready: flag to indicate that ULPI is initialized * @isoch_delay: wValue from Set Isochronous Delay request; * @u2sel: parameter from Set SEL request. * @u2pel: parameter from Set SEL request. @@ -903,7 +905,10 @@ struct dwc3 { struct phy *usb2_generic_phy; struct phy *usb3_generic_phy; + bool phys_ready; + struct ulpi *ulpi; + bool ulpi_ready; void __iomem *regs; size_t regs_size; diff --git a/drivers/usb/dwc3/dwc3-of-simple.c b/drivers/usb/dwc3/dwc3-of-simple.c index fbfc09ebd2eceec1a33201819667d7746be20410..acf41ba3638dd7ba7950c6a2cdd34dd7db5ee471 100644 --- a/drivers/usb/dwc3/dwc3-of-simple.c +++ b/drivers/usb/dwc3/dwc3-of-simple.c @@ -132,8 +132,9 @@ static int dwc3_of_simple_remove(struct platform_device *pdev) of_platform_depopulate(dev); - pm_runtime_put_sync(dev); pm_runtime_disable(dev); + pm_runtime_put_noidle(dev); + pm_runtime_set_suspended(dev); return 0; } diff --git a/drivers/usb/dwc3/dwc3-pci.c b/drivers/usb/dwc3/dwc3-pci.c index bc5e91d4fac87c80d184fccd24267467666d0dd3..09c0454833ad20cdde3e940b51eac9a1448d8815 100644 --- a/drivers/usb/dwc3/dwc3-pci.c +++ b/drivers/usb/dwc3/dwc3-pci.c @@ -41,6 +41,7 @@ #define PCI_DEVICE_ID_INTEL_GLK 0x31aa #define PCI_DEVICE_ID_INTEL_CNPLP 0x9dee #define PCI_DEVICE_ID_INTEL_CNPH 0xa36e +#define PCI_DEVICE_ID_INTEL_ICLLP 0x34ee #define PCI_INTEL_BXT_DSM_GUID "732b85d5-b7a7-4a1b-9ba0-4bbd00ffd511" #define PCI_INTEL_BXT_FUNC_PMU_PWR 4 @@ -273,6 +274,7 @@ static const struct pci_device_id dwc3_pci_id_table[] = { { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_GLK), }, { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CNPLP), }, { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CNPH), }, + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICLLP), }, { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_NL_USB), }, { } /* Terminating Entry */ }; diff --git a/drivers/usb/dwc3/gadget.h b/drivers/usb/dwc3/gadget.h index 4a3227543255e33a805b282f4bc126d84fc7acdd..5cc9fd7306560d07b6876e3a80571d14dda93c08 100644 --- a/drivers/usb/dwc3/gadget.h +++ b/drivers/usb/dwc3/gadget.h @@ -33,7 +33,7 @@ struct dwc3; #define DWC3_DEPCFG_XFER_IN_PROGRESS_EN BIT(9) #define DWC3_DEPCFG_XFER_NOT_READY_EN BIT(10) #define DWC3_DEPCFG_FIFO_ERROR_EN BIT(11) -#define DWC3_DEPCFG_STREAM_EVENT_EN BIT(12) +#define DWC3_DEPCFG_STREAM_EVENT_EN BIT(13) #define DWC3_DEPCFG_BINTERVAL_M1(n) (((n) & 0xff) << 16) #define DWC3_DEPCFG_STREAM_CAPABLE BIT(24) #define DWC3_DEPCFG_EP_NUMBER(n) (((n) & 0x1f) << 25) diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c index 940de04ed72a9e224f01e0952410850d1ed82bd8..b805962f5154393b803c054c8c30074b8d0a6ca7 100644 --- a/drivers/usb/gadget/composite.c +++ b/drivers/usb/gadget/composite.c @@ -1720,6 +1720,8 @@ composite_setup(struct usb_gadget *gadget, const struct usb_ctrlrequest *ctrl) */ if (w_value && !f->get_alt) break; + + spin_lock(&cdev->lock); value = f->set_alt(f, w_index, w_value); if (value == USB_GADGET_DELAYED_STATUS) { DBG(cdev, @@ -1729,6 +1731,7 @@ composite_setup(struct usb_gadget *gadget, const struct usb_ctrlrequest *ctrl) DBG(cdev, "delayed_status count %d\n", cdev->delayed_status); } + spin_unlock(&cdev->lock); break; case USB_REQ_GET_INTERFACE: if (ctrl->bRequestType != (USB_DIR_IN|USB_RECIP_INTERFACE)) diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c index d9f2040c10f23bf28bb7d585e9a25b436b5c96b1..42ec6f2db6a9334ad11eecc9a30e0494cfe93812 100644 --- a/drivers/usb/gadget/function/f_fs.c +++ b/drivers/usb/gadget/function/f_fs.c @@ -219,6 +219,7 @@ struct ffs_io_data { struct mm_struct *mm; struct work_struct work; + struct work_struct cancellation_work; struct usb_ep *ep; struct usb_request *req; @@ -1073,22 +1074,31 @@ ffs_epfile_open(struct inode *inode, struct file *file) return 0; } +static void ffs_aio_cancel_worker(struct work_struct *work) +{ + struct ffs_io_data *io_data = container_of(work, struct ffs_io_data, + cancellation_work); + + ENTER(); + + usb_ep_dequeue(io_data->ep, io_data->req); +} + static int ffs_aio_cancel(struct kiocb *kiocb) { struct ffs_io_data *io_data = kiocb->private; - struct ffs_epfile *epfile = kiocb->ki_filp->private_data; + struct ffs_data *ffs = io_data->ffs; int value; ENTER(); - spin_lock_irq(&epfile->ffs->eps_lock); - - if (likely(io_data && io_data->ep && io_data->req)) - value = usb_ep_dequeue(io_data->ep, io_data->req); - else + if (likely(io_data && io_data->ep && io_data->req)) { + INIT_WORK(&io_data->cancellation_work, ffs_aio_cancel_worker); + queue_work(ffs->io_completion_wq, &io_data->cancellation_work); + value = -EINPROGRESS; + } else { value = -EINVAL; - - spin_unlock_irq(&epfile->ffs->eps_lock); + } return value; } diff --git a/drivers/usb/gadget/function/f_uac2.c b/drivers/usb/gadget/function/f_uac2.c index 97cb2dfd6369751a5d03f5cbcce78b6e41f54154..d063f0401f8475a5bf5dff5f7d247e6cffb77ae4 100644 --- a/drivers/usb/gadget/function/f_uac2.c +++ b/drivers/usb/gadget/function/f_uac2.c @@ -442,14 +442,14 @@ static struct usb_descriptor_header *hs_audio_desc[] = { }; struct cntrl_cur_lay3 { - __u32 dCUR; + __le32 dCUR; }; struct cntrl_range_lay3 { - __u16 wNumSubRanges; - __u32 dMIN; - __u32 dMAX; - __u32 dRES; + __le16 wNumSubRanges; + __le32 dMIN; + __le32 dMAX; + __le32 dRES; } __packed; static void set_ep_max_packet_size(const struct f_uac2_opts *uac2_opts, @@ -563,13 +563,13 @@ afunc_bind(struct usb_configuration *cfg, struct usb_function *fn) agdev->out_ep = usb_ep_autoconfig(gadget, &fs_epout_desc); if (!agdev->out_ep) { dev_err(dev, "%s:%d Error!\n", __func__, __LINE__); - return ret; + return -ENODEV; } agdev->in_ep = usb_ep_autoconfig(gadget, &fs_epin_desc); if (!agdev->in_ep) { dev_err(dev, "%s:%d Error!\n", __func__, __LINE__); - return ret; + return -ENODEV; } agdev->in_ep_maxpsize = max_t(u16, @@ -707,9 +707,9 @@ in_rq_cur(struct usb_function *fn, const struct usb_ctrlrequest *cr) memset(&c, 0, sizeof(struct cntrl_cur_lay3)); if (entity_id == USB_IN_CLK_ID) - c.dCUR = p_srate; + c.dCUR = cpu_to_le32(p_srate); else if (entity_id == USB_OUT_CLK_ID) - c.dCUR = c_srate; + c.dCUR = cpu_to_le32(c_srate); value = min_t(unsigned, w_length, sizeof c); memcpy(req->buf, &c, value); @@ -746,15 +746,15 @@ in_rq_range(struct usb_function *fn, const struct usb_ctrlrequest *cr) if (control_selector == UAC2_CS_CONTROL_SAM_FREQ) { if (entity_id == USB_IN_CLK_ID) - r.dMIN = p_srate; + r.dMIN = cpu_to_le32(p_srate); else if (entity_id == USB_OUT_CLK_ID) - r.dMIN = c_srate; + r.dMIN = cpu_to_le32(c_srate); else return -EOPNOTSUPP; r.dMAX = r.dMIN; r.dRES = 0; - r.wNumSubRanges = 1; + r.wNumSubRanges = cpu_to_le16(1); value = min_t(unsigned, w_length, sizeof r); memcpy(req->buf, &r, value); diff --git a/drivers/usb/gadget/function/u_audio.c b/drivers/usb/gadget/function/u_audio.c index 3971bbab88bd7ccf0282cd4a2ade6d636cfa1c8b..d3a639297e060d936af2ddacef048f8d7f755406 100644 --- a/drivers/usb/gadget/function/u_audio.c +++ b/drivers/usb/gadget/function/u_audio.c @@ -41,9 +41,6 @@ struct uac_req { struct uac_rtd_params { struct snd_uac_chip *uac; /* parent chip */ bool ep_enabled; /* if the ep is enabled */ - /* Size of the ring buffer */ - size_t dma_bytes; - unsigned char *dma_area; struct snd_pcm_substream *ss; @@ -52,8 +49,6 @@ struct uac_rtd_params { void *rbuf; - size_t period_size; - unsigned max_psize; /* MaxPacketSize of endpoint */ struct uac_req *ureq; @@ -93,12 +88,12 @@ static const struct snd_pcm_hardware uac_pcm_hardware = { static void u_audio_iso_complete(struct usb_ep *ep, struct usb_request *req) { unsigned pending; - unsigned long flags; + unsigned long flags, flags2; unsigned int hw_ptr; - bool update_alsa = false; int status = req->status; struct uac_req *ur = req->context; struct snd_pcm_substream *substream; + struct snd_pcm_runtime *runtime; struct uac_rtd_params *prm = ur->pp; struct snd_uac_chip *uac = prm->uac; @@ -120,6 +115,14 @@ static void u_audio_iso_complete(struct usb_ep *ep, struct usb_request *req) if (!substream) goto exit; + snd_pcm_stream_lock_irqsave(substream, flags2); + + runtime = substream->runtime; + if (!runtime || !snd_pcm_running(substream)) { + snd_pcm_stream_unlock_irqrestore(substream, flags2); + goto exit; + } + spin_lock_irqsave(&prm->lock, flags); if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) { @@ -146,43 +149,46 @@ static void u_audio_iso_complete(struct usb_ep *ep, struct usb_request *req) req->actual = req->length; } - pending = prm->hw_ptr % prm->period_size; - pending += req->actual; - if (pending >= prm->period_size) - update_alsa = true; - hw_ptr = prm->hw_ptr; - prm->hw_ptr = (prm->hw_ptr + req->actual) % prm->dma_bytes; spin_unlock_irqrestore(&prm->lock, flags); /* Pack USB load in ALSA ring buffer */ - pending = prm->dma_bytes - hw_ptr; + pending = runtime->dma_bytes - hw_ptr; if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) { if (unlikely(pending < req->actual)) { - memcpy(req->buf, prm->dma_area + hw_ptr, pending); - memcpy(req->buf + pending, prm->dma_area, + memcpy(req->buf, runtime->dma_area + hw_ptr, pending); + memcpy(req->buf + pending, runtime->dma_area, req->actual - pending); } else { - memcpy(req->buf, prm->dma_area + hw_ptr, req->actual); + memcpy(req->buf, runtime->dma_area + hw_ptr, + req->actual); } } else { if (unlikely(pending < req->actual)) { - memcpy(prm->dma_area + hw_ptr, req->buf, pending); - memcpy(prm->dma_area, req->buf + pending, + memcpy(runtime->dma_area + hw_ptr, req->buf, pending); + memcpy(runtime->dma_area, req->buf + pending, req->actual - pending); } else { - memcpy(prm->dma_area + hw_ptr, req->buf, req->actual); + memcpy(runtime->dma_area + hw_ptr, req->buf, + req->actual); } } + spin_lock_irqsave(&prm->lock, flags); + /* update hw_ptr after data is copied to memory */ + prm->hw_ptr = (hw_ptr + req->actual) % runtime->dma_bytes; + hw_ptr = prm->hw_ptr; + spin_unlock_irqrestore(&prm->lock, flags); + snd_pcm_stream_unlock_irqrestore(substream, flags2); + + if ((hw_ptr % snd_pcm_lib_period_bytes(substream)) < req->actual) + snd_pcm_period_elapsed(substream); + exit: if (usb_ep_queue(ep, req, GFP_ATOMIC)) dev_err(uac->card->dev, "%d Error!\n", __LINE__); - - if (update_alsa) - snd_pcm_period_elapsed(substream); } static int uac_pcm_trigger(struct snd_pcm_substream *substream, int cmd) @@ -245,40 +251,12 @@ static snd_pcm_uframes_t uac_pcm_pointer(struct snd_pcm_substream *substream) static int uac_pcm_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *hw_params) { - struct snd_uac_chip *uac = snd_pcm_substream_chip(substream); - struct uac_rtd_params *prm; - int err; - - if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) - prm = &uac->p_prm; - else - prm = &uac->c_prm; - - err = snd_pcm_lib_malloc_pages(substream, + return snd_pcm_lib_malloc_pages(substream, params_buffer_bytes(hw_params)); - if (err >= 0) { - prm->dma_bytes = substream->runtime->dma_bytes; - prm->dma_area = substream->runtime->dma_area; - prm->period_size = params_period_bytes(hw_params); - } - - return err; } static int uac_pcm_hw_free(struct snd_pcm_substream *substream) { - struct snd_uac_chip *uac = snd_pcm_substream_chip(substream); - struct uac_rtd_params *prm; - - if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) - prm = &uac->p_prm; - else - prm = &uac->c_prm; - - prm->dma_area = NULL; - prm->dma_bytes = 0; - prm->period_size = 0; - return snd_pcm_lib_free_pages(substream); } @@ -604,15 +582,15 @@ int g_audio_setup(struct g_audio *g_audio, const char *pcm_name, if (err < 0) goto snd_fail; - strcpy(pcm->name, pcm_name); + strlcpy(pcm->name, pcm_name, sizeof(pcm->name)); pcm->private_data = uac; uac->pcm = pcm; snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK, &uac_pcm_ops); snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE, &uac_pcm_ops); - strcpy(card->driver, card_name); - strcpy(card->shortname, card_name); + strlcpy(card->driver, card_name, sizeof(card->driver)); + strlcpy(card->shortname, card_name, sizeof(card->shortname)); sprintf(card->longname, "%s %i", card_name, card->dev->id); snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_CONTINUOUS, diff --git a/drivers/usb/gadget/udc/net2280.c b/drivers/usb/gadget/udc/net2280.c index f608c1f85e611f2a36fd6389006e3f00837a7035..9cbb061582a774676eb6cf4d42b8d84eb19b10f3 100644 --- a/drivers/usb/gadget/udc/net2280.c +++ b/drivers/usb/gadget/udc/net2280.c @@ -1549,11 +1549,14 @@ static int net2280_pullup(struct usb_gadget *_gadget, int is_on) writel(tmp | BIT(USB_DETECT_ENABLE), &dev->usb->usbctl); } else { writel(tmp & ~BIT(USB_DETECT_ENABLE), &dev->usb->usbctl); - stop_activity(dev, dev->driver); + stop_activity(dev, NULL); } spin_unlock_irqrestore(&dev->lock, flags); + if (!is_on && dev->driver) + dev->driver->disconnect(&dev->gadget); + return 0; } @@ -2470,8 +2473,11 @@ static void stop_activity(struct net2280 *dev, struct usb_gadget_driver *driver) nuke(&dev->ep[i]); /* report disconnect; the driver is already quiesced */ - if (driver) + if (driver) { + spin_unlock(&dev->lock); driver->disconnect(&dev->gadget); + spin_lock(&dev->lock); + } usb_reinit(dev); } @@ -3345,6 +3351,8 @@ next_endpoints: BIT(PCI_RETRY_ABORT_INTERRUPT)) static void handle_stat1_irqs(struct net2280 *dev, u32 stat) +__releases(dev->lock) +__acquires(dev->lock) { struct net2280_ep *ep; u32 tmp, num, mask, scratch; @@ -3385,12 +3393,14 @@ static void handle_stat1_irqs(struct net2280 *dev, u32 stat) if (disconnect || reset) { stop_activity(dev, dev->driver); ep0_start(dev); + spin_unlock(&dev->lock); if (reset) usb_gadget_udc_reset (&dev->gadget, dev->driver); else (dev->driver->disconnect) (&dev->gadget); + spin_lock(&dev->lock); return; } } @@ -3409,6 +3419,7 @@ static void handle_stat1_irqs(struct net2280 *dev, u32 stat) tmp = BIT(SUSPEND_REQUEST_CHANGE_INTERRUPT); if (stat & tmp) { writel(tmp, &dev->regs->irqstat1); + spin_unlock(&dev->lock); if (stat & BIT(SUSPEND_REQUEST_INTERRUPT)) { if (dev->driver->suspend) dev->driver->suspend(&dev->gadget); @@ -3419,6 +3430,7 @@ static void handle_stat1_irqs(struct net2280 *dev, u32 stat) dev->driver->resume(&dev->gadget); /* at high speed, note erratum 0133 */ } + spin_lock(&dev->lock); stat &= ~tmp; } diff --git a/drivers/usb/gadget/udc/r8a66597-udc.c b/drivers/usb/gadget/udc/r8a66597-udc.c index 118ad70f1af0f109714a83cfd4b48327382c3268..84b227ede08232eca9fd1b3602150c06a5fdff4c 100644 --- a/drivers/usb/gadget/udc/r8a66597-udc.c +++ b/drivers/usb/gadget/udc/r8a66597-udc.c @@ -835,11 +835,11 @@ static void init_controller(struct r8a66597 *r8a66597) r8a66597_bset(r8a66597, XCKE, SYSCFG0); - msleep(3); + mdelay(3); r8a66597_bset(r8a66597, PLLC, SYSCFG0); - msleep(1); + mdelay(1); r8a66597_bset(r8a66597, SCKE, SYSCFG0); @@ -1193,7 +1193,7 @@ __acquires(r8a66597->lock) r8a66597->ep0_req->length = 2; /* AV: what happens if we get called again before that gets through? */ spin_unlock(&r8a66597->lock); - r8a66597_queue(r8a66597->gadget.ep0, r8a66597->ep0_req, GFP_KERNEL); + r8a66597_queue(r8a66597->gadget.ep0, r8a66597->ep0_req, GFP_ATOMIC); spin_lock(&r8a66597->lock); } diff --git a/drivers/usb/gadget/udc/renesas_usb3.c b/drivers/usb/gadget/udc/renesas_usb3.c index c12a1a6554bad90bd36d805e3805453f433c5121..36a706f475d251d89bbbcac46258aec724393e10 100644 --- a/drivers/usb/gadget/udc/renesas_usb3.c +++ b/drivers/usb/gadget/udc/renesas_usb3.c @@ -788,12 +788,15 @@ static void usb3_irq_epc_int_1_speed(struct renesas_usb3 *usb3) switch (speed) { case USB_STA_SPEED_SS: usb3->gadget.speed = USB_SPEED_SUPER; + usb3->gadget.ep0->maxpacket = USB3_EP0_SS_MAX_PACKET_SIZE; break; case USB_STA_SPEED_HS: usb3->gadget.speed = USB_SPEED_HIGH; + usb3->gadget.ep0->maxpacket = USB3_EP0_HSFS_MAX_PACKET_SIZE; break; case USB_STA_SPEED_FS: usb3->gadget.speed = USB_SPEED_FULL; + usb3->gadget.ep0->maxpacket = USB3_EP0_HSFS_MAX_PACKET_SIZE; break; default: usb3->gadget.speed = USB_SPEED_UNKNOWN; @@ -2458,7 +2461,7 @@ static int renesas_usb3_init_ep(struct renesas_usb3 *usb3, struct device *dev, /* for control pipe */ usb3->gadget.ep0 = &usb3_ep->ep; usb_ep_set_maxpacket_limit(&usb3_ep->ep, - USB3_EP0_HSFS_MAX_PACKET_SIZE); + USB3_EP0_SS_MAX_PACKET_SIZE); usb3_ep->ep.caps.type_control = true; usb3_ep->ep.caps.dir_in = true; usb3_ep->ep.caps.dir_out = true; diff --git a/drivers/usb/host/dwc_otg/dwc_otg_fiq_fsm.c b/drivers/usb/host/dwc_otg/dwc_otg_fiq_fsm.c index ca362db1602aa935ae48ab89fc740859f72d83cd..c44650f80adc93ea608533cb218c1a5f842f34c3 100644 --- a/drivers/usb/host/dwc_otg/dwc_otg_fiq_fsm.c +++ b/drivers/usb/host/dwc_otg/dwc_otg_fiq_fsm.c @@ -250,7 +250,7 @@ static int notrace fiq_increment_dma_buf(struct fiq_state *st, int num_channels, BUG(); hcdma.d32 = (dma_addr_t) &blob->channel[n].index[i].buf[0]; - FIQ_WRITE(st->dwc_regs_base + HC_DMA + (HC_OFFSET * n), hcdma.d32); + FIQ_WRITE(st->dwc_regs_base + HC_START + (HC_OFFSET * n) + HC_DMA, hcdma.d32); st->channel[n].dma_info.index = i; return 0; } @@ -302,7 +302,7 @@ static int notrace fiq_iso_out_advance(struct fiq_state *st, int num_channels, i /* New DMA address - address of bounce buffer referred to in index */ hcdma.d32 = (uint32_t) &blob->channel[n].index[i].buf[0]; - //hcdma.d32 = FIQ_READ(st->dwc_regs_base + HC_DMA + (HC_OFFSET * n)); + //hcdma.d32 = FIQ_READ(st->dwc_regs_base + HC_START + (HC_OFFSET * n) + HC_DMA); //hcdma.d32 += st->channel[n].dma_info.slot_len[i]; fiq_print(FIQDBG_INT, st, "LAST: %01d ", last); fiq_print(FIQDBG_INT, st, "LEN: %03d", st->channel[n].dma_info.slot_len[i]); @@ -317,7 +317,7 @@ static int notrace fiq_iso_out_advance(struct fiq_state *st, int num_channels, i st->channel[n].dma_info.index++; FIQ_WRITE(st->dwc_regs_base + HC_START + (HC_OFFSET * n) + HCSPLT, hcsplt.d32); FIQ_WRITE(st->dwc_regs_base + HC_START + (HC_OFFSET * n) + HCTSIZ, hctsiz.d32); - FIQ_WRITE(st->dwc_regs_base + HC_DMA + (HC_OFFSET * n), hcdma.d32); + FIQ_WRITE(st->dwc_regs_base + HC_START + (HC_OFFSET * n) + HC_DMA, hcdma.d32); return last; } @@ -564,7 +564,7 @@ static int notrace noinline fiq_fsm_update_hs_isoc(struct fiq_state *state, int /* grab the next DMA address offset from the array */ hcdma.d32 = st->hcdma_copy.d32 + st->hs_isoc_info.iso_desc[st->hs_isoc_info.index].offset; - FIQ_WRITE(state->dwc_regs_base + HC_DMA + (HC_OFFSET * n), hcdma.d32); + FIQ_WRITE(state->dwc_regs_base + HC_START + (HC_OFFSET * n) + HC_DMA, hcdma.d32); /* We need to set multi_count. This is a bit tricky - has to be set per-transaction as * the core needs to be told to send the correct number. Caution: for IN transfers, diff --git a/drivers/usb/host/dwc_otg/dwc_otg_fiq_fsm.h b/drivers/usb/host/dwc_otg/dwc_otg_fiq_fsm.h index 68bf29e4582cd2dc179f61e2eca74b3cec824bd7..295194d1874ef1d783f9740b757023401722b37a 100644 --- a/drivers/usb/host/dwc_otg/dwc_otg_fiq_fsm.h +++ b/drivers/usb/host/dwc_otg/dwc_otg_fiq_fsm.h @@ -94,7 +94,7 @@ do { \ #define HC_START 0x500 #define HC_OFFSET 0x020 -#define HC_DMA 0x514 +#define HC_DMA 0x14 #define HCCHAR 0x00 #define HCSPLT 0x04 diff --git a/drivers/usb/host/u132-hcd.c b/drivers/usb/host/u132-hcd.c index c38855aed62c7df6ef3c7e134eae2c9b0e787da2..65c0086e25ae6d8ac1a302308e954ef15d88c41d 100644 --- a/drivers/usb/host/u132-hcd.c +++ b/drivers/usb/host/u132-hcd.c @@ -2559,7 +2559,7 @@ static int u132_get_frame(struct usb_hcd *hcd) } else { int frame = 0; dev_err(&u132->platform_dev->dev, "TODO: u132_get_frame\n"); - msleep(100); + mdelay(100); return frame; } } diff --git a/drivers/usb/host/xhci-tegra.c b/drivers/usb/host/xhci-tegra.c index 74436f8ca5382f736dbf352c21b7c2f83a8438ba..32ddafe7af87d08276a03118e46b6e7b8f5494e6 100644 --- a/drivers/usb/host/xhci-tegra.c +++ b/drivers/usb/host/xhci-tegra.c @@ -482,7 +482,7 @@ static void tegra_xusb_mbox_handle(struct tegra_xusb *tegra, unsigned long mask; unsigned int port; bool idle, enable; - int err; + int err = 0; memset(&rsp, 0, sizeof(rsp)); diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c index fe84b36627ecd1b060e3a456a1f786ce43f61125..64ddba3f79a9586e4f8bed3ec6a6ed36edcce474 100644 --- a/drivers/usb/host/xhci.c +++ b/drivers/usb/host/xhci.c @@ -47,6 +47,21 @@ static unsigned int quirks; module_param(quirks, uint, S_IRUGO); MODULE_PARM_DESC(quirks, "Bit flags for quirks to be enabled as default"); +static bool td_on_ring(struct xhci_td *td, struct xhci_ring *ring) +{ + struct xhci_segment *seg = ring->first_seg; + + if (!td || !td->start_seg) + return false; + do { + if (seg == td->start_seg) + return true; + seg = seg->next; + } while (seg && seg != ring->first_seg); + + return false; +} + /* TODO: copied from ehci-hcd.c - can this be refactored? */ /* * xhci_handshake - spin reading hc until handshake completes or fails @@ -1024,8 +1039,13 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated) command = readl(&xhci->op_regs->command); command |= CMD_CRS; writel(command, &xhci->op_regs->command); + /* + * Some controllers take up to 55+ ms to complete the controller + * restore so setting the timeout to 100ms. Xhci specification + * doesn't mention any timeout value. + */ if (xhci_handshake(&xhci->op_regs->status, - STS_RESTORE, 0, 10 * 1000)) { + STS_RESTORE, 0, 100 * 1000)) { xhci_warn(xhci, "WARN: xHC restore state timeout\n"); spin_unlock_irq(&xhci->lock); return -ETIMEDOUT; @@ -1506,6 +1526,21 @@ static int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status) goto done; } + /* + * check ring is not re-allocated since URB was enqueued. If it is, then + * make sure none of the ring related pointers in this URB private data + * are touched, such as td_list, otherwise we overwrite freed data + */ + if (!td_on_ring(&urb_priv->td[0], ep_ring)) { + xhci_err(xhci, "Canceled URB td not found on endpoint ring"); + for (i = urb_priv->num_tds_done; i < urb_priv->num_tds; i++) { + td = &urb_priv->td[i]; + if (!list_empty(&td->cancelled_td_list)) + list_del_init(&td->cancelled_td_list); + } + goto err_giveback; + } + if (xhci->xhc_state & XHCI_STATE_HALTED) { xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, "HC halted, freeing TD manually."); diff --git a/drivers/usb/misc/uss720.c b/drivers/usb/misc/uss720.c index 8a13b2fcf3e135d0403f1e6d40641906e45277f9..03152f98108c784ef6cb2d5951bb39b8f2921c88 100644 --- a/drivers/usb/misc/uss720.c +++ b/drivers/usb/misc/uss720.c @@ -382,7 +382,7 @@ static unsigned char parport_uss720_frob_control(struct parport *pp, unsigned ch mask &= 0x0f; val &= 0x0f; d = (priv->reg[1] & (~mask)) ^ val; - if (set_1284_register(pp, 2, d, GFP_KERNEL)) + if (set_1284_register(pp, 2, d, GFP_ATOMIC)) return 0; priv->reg[1] = d; return d & 0xf; @@ -392,7 +392,7 @@ static unsigned char parport_uss720_read_status(struct parport *pp) { unsigned char ret; - if (get_1284_register(pp, 1, &ret, GFP_KERNEL)) + if (get_1284_register(pp, 1, &ret, GFP_ATOMIC)) return 0; return ret & 0xf8; } diff --git a/drivers/usb/misc/yurex.c b/drivers/usb/misc/yurex.c index 47763311a42e15a20eae8eee6b2b0427541ba773..0673f286afbd4abb3a360540c3c95dae6237a078 100644 --- a/drivers/usb/misc/yurex.c +++ b/drivers/usb/misc/yurex.c @@ -425,13 +425,13 @@ static ssize_t yurex_write(struct file *file, const char __user *user_buffer, { struct usb_yurex *dev; int i, set = 0, retval = 0; - char buffer[16]; + char buffer[16 + 1]; char *data = buffer; unsigned long long c, c2 = 0; signed long timeout = 0; DEFINE_WAIT(wait); - count = min(sizeof(buffer), count); + count = min(sizeof(buffer) - 1, count); dev = file->private_data; /* verify that we actually have some data to write */ @@ -450,6 +450,7 @@ static ssize_t yurex_write(struct file *file, const char __user *user_buffer, retval = -EFAULT; goto error; } + buffer[count] = 0; memset(dev->cntl_buffer, CMD_PADDING, YUREX_BUF_SIZE); switch (buffer[0]) { diff --git a/drivers/usb/musb/musb_dsps.c b/drivers/usb/musb/musb_dsps.c index f6b526606ad1092494f3d8eaac6946bd77069ad2..dbb482b7e0ba61181b6eb2be614e312b55240893 100644 --- a/drivers/usb/musb/musb_dsps.c +++ b/drivers/usb/musb/musb_dsps.c @@ -684,16 +684,6 @@ dsps_dma_controller_create(struct musb *musb, void __iomem *base) return controller; } -static void dsps_dma_controller_destroy(struct dma_controller *c) -{ - struct musb *musb = c->musb; - struct dsps_glue *glue = dev_get_drvdata(musb->controller->parent); - void __iomem *usbss_base = glue->usbss_base; - - musb_writel(usbss_base, USBSS_IRQ_CLEARR, USBSS_IRQ_PD_COMP); - cppi41_dma_controller_destroy(c); -} - #ifdef CONFIG_PM_SLEEP static void dsps_dma_controller_suspend(struct dsps_glue *glue) { @@ -723,7 +713,7 @@ static struct musb_platform_ops dsps_ops = { #ifdef CONFIG_USB_TI_CPPI41_DMA .dma_init = dsps_dma_controller_create, - .dma_exit = dsps_dma_controller_destroy, + .dma_exit = cppi41_dma_controller_destroy, #endif .enable = dsps_musb_enable, .disable = dsps_musb_disable, diff --git a/drivers/usb/phy/phy-fsl-usb.c b/drivers/usb/phy/phy-fsl-usb.c index cf8f40ae6e017943c9ae97e50fca17d50304d51e..9b4354a00ca71d7377bb846390c6f346e6593e48 100644 --- a/drivers/usb/phy/phy-fsl-usb.c +++ b/drivers/usb/phy/phy-fsl-usb.c @@ -874,6 +874,7 @@ int usb_otg_start(struct platform_device *pdev) if (pdata->init && pdata->init(pdev) != 0) return -EINVAL; +#ifdef CONFIG_PPC32 if (pdata->big_endian_mmio) { _fsl_readl = _fsl_readl_be; _fsl_writel = _fsl_writel_be; @@ -881,6 +882,7 @@ int usb_otg_start(struct platform_device *pdev) _fsl_readl = _fsl_readl_le; _fsl_writel = _fsl_writel_le; } +#endif /* request irq */ p_otg->irq = platform_get_irq(pdev, 0); @@ -971,7 +973,7 @@ int usb_otg_start(struct platform_device *pdev) /* * state file in sysfs */ -static int show_fsl_usb2_otg_state(struct device *dev, +static ssize_t show_fsl_usb2_otg_state(struct device *dev, struct device_attribute *attr, char *buf) { struct otg_fsm *fsm = &fsl_otg_dev->fsm; diff --git a/drivers/usb/serial/io_ti.h b/drivers/usb/serial/io_ti.h index 1bd67b24f9160a2d9b674151708c3fb5e13b2821..bc9ff5ebd67c1b5a1ab1ea4c4e619558c47f8c32 100644 --- a/drivers/usb/serial/io_ti.h +++ b/drivers/usb/serial/io_ti.h @@ -178,7 +178,7 @@ struct ump_interrupt { } __attribute__((packed)); -#define TIUMP_GET_PORT_FROM_CODE(c) (((c) >> 4) - 3) +#define TIUMP_GET_PORT_FROM_CODE(c) (((c) >> 6) & 0x01) #define TIUMP_GET_FUNC_FROM_CODE(c) ((c) & 0x0f) #define TIUMP_INTERRUPT_CODE_LSR 0x03 #define TIUMP_INTERRUPT_CODE_MSR 0x04 diff --git a/drivers/usb/serial/kobil_sct.c b/drivers/usb/serial/kobil_sct.c index 3024b9b253605160a5f5e7038159bcc47ec4d1ea..75181d3afd958a81d2de0dfde24092481c089637 100644 --- a/drivers/usb/serial/kobil_sct.c +++ b/drivers/usb/serial/kobil_sct.c @@ -397,12 +397,20 @@ static int kobil_tiocmget(struct tty_struct *tty) transfer_buffer_length, KOBIL_TIMEOUT); - dev_dbg(&port->dev, "%s - Send get_status_line_state URB returns: %i. Statusline: %02x\n", - __func__, result, transfer_buffer[0]); + dev_dbg(&port->dev, "Send get_status_line_state URB returns: %i\n", + result); + if (result < 1) { + if (result >= 0) + result = -EIO; + goto out_free; + } + + dev_dbg(&port->dev, "Statusline: %02x\n", transfer_buffer[0]); result = 0; if ((transfer_buffer[0] & SUSBCR_GSL_DSR) != 0) result = TIOCM_DSR; +out_free: kfree(transfer_buffer); return result; } diff --git a/drivers/usb/serial/ti_usb_3410_5052.c b/drivers/usb/serial/ti_usb_3410_5052.c index 8fc3854e5e6967dc32f1a04e5ba95a304f10b683..57e9f6617084f4ae7aa62fafd9d8bcff6925549e 100644 --- a/drivers/usb/serial/ti_usb_3410_5052.c +++ b/drivers/usb/serial/ti_usb_3410_5052.c @@ -1123,7 +1123,7 @@ static void ti_break(struct tty_struct *tty, int break_state) static int ti_get_port_from_code(unsigned char code) { - return (code >> 4) - 3; + return (code >> 6) & 0x01; } static int ti_get_func_from_code(unsigned char code) diff --git a/drivers/usb/storage/scsiglue.c b/drivers/usb/storage/scsiglue.c index 8cd2926fb1fe6f9a77fc8943beb5ac2eb3ddd40d..344ec8631481ee41dce9e7242f9063580bae696d 100644 --- a/drivers/usb/storage/scsiglue.c +++ b/drivers/usb/storage/scsiglue.c @@ -392,6 +392,15 @@ static int queuecommand_lck(struct scsi_cmnd *srb, return 0; } + if ((us->fflags & US_FL_NO_ATA_1X) && + (srb->cmnd[0] == ATA_12 || srb->cmnd[0] == ATA_16)) { + memcpy(srb->sense_buffer, usb_stor_sense_invalidCDB, + sizeof(usb_stor_sense_invalidCDB)); + srb->result = SAM_STAT_CHECK_CONDITION; + done(srb); + return 0; + } + /* enqueue the command and wake up the control thread */ srb->scsi_done = done; us->srb = srb; diff --git a/drivers/usb/storage/uas.c b/drivers/usb/storage/uas.c index 33a6d624c8438b50e29c23bbf86af1cdf20d91d8..24de9c00d8e2498234fbc10887442950a1559977 100644 --- a/drivers/usb/storage/uas.c +++ b/drivers/usb/storage/uas.c @@ -842,6 +842,27 @@ static int uas_slave_configure(struct scsi_device *sdev) sdev->skip_ms_page_8 = 1; sdev->wce_default_on = 1; } + + /* + * Some disks return the total number of blocks in response + * to READ CAPACITY rather than the highest block number. + * If this device makes that mistake, tell the sd driver. + */ + if (devinfo->flags & US_FL_FIX_CAPACITY) + sdev->fix_capacity = 1; + + /* + * Some devices don't like MODE SENSE with page=0x3f, + * which is the command used for checking if a device + * is write-protected. Now that we tell the sd driver + * to do a 192-byte transfer with this command the + * majority of devices work fine, but a few still can't + * handle it. The sd driver will simply assume those + * devices are write-enabled. + */ + if (devinfo->flags & US_FL_NO_WP_DETECT) + sdev->skip_ms_page_3f = 1; + scsi_change_queue_depth(sdev, devinfo->qdepth - 2); return 0; } diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h index d100290628bd26beb11991ebbcd8e6c84d6d719a..5e9b35a91431987e1c14dd57ebb7a38a669daa64 100644 --- a/drivers/usb/storage/unusual_devs.h +++ b/drivers/usb/storage/unusual_devs.h @@ -2307,6 +2307,13 @@ UNUSUAL_DEV( 0x2735, 0x100b, 0x0000, 0x9999, USB_SC_DEVICE, USB_PR_DEVICE, NULL, US_FL_GO_SLOW ), +/* Reported-by: Tim Anderson */ +UNUSUAL_DEV( 0x2ca3, 0x0031, 0x0000, 0x9999, + "DJI", + "CineSSD", + USB_SC_DEVICE, USB_PR_DEVICE, NULL, + US_FL_NO_ATA_1X), + /* * Reported by Frederic Marchal * Mio Moov 330 diff --git a/drivers/usb/wusbcore/security.c b/drivers/usb/wusbcore/security.c index 170f2c38de9b5695e37008c9dc975787f66021cb..5274aa7339b8fffc281e653134ccfdf57f99e790 100644 --- a/drivers/usb/wusbcore/security.c +++ b/drivers/usb/wusbcore/security.c @@ -230,7 +230,7 @@ int wusb_dev_sec_add(struct wusbhc *wusbhc, result = usb_get_descriptor(usb_dev, USB_DT_SECURITY, 0, secd, sizeof(*secd)); - if (result < sizeof(*secd)) { + if (result < (int)sizeof(*secd)) { dev_err(dev, "Can't read security descriptor or " "not enough data: %d\n", result); goto out; diff --git a/drivers/uwb/hwa-rc.c b/drivers/uwb/hwa-rc.c index 9a53912bdfe9f73d4b7de6a1d3b74dd8a94dc8b6..5d3ba747ae17a8882a959ff7be59309eecf11c36 100644 --- a/drivers/uwb/hwa-rc.c +++ b/drivers/uwb/hwa-rc.c @@ -873,6 +873,7 @@ error_get_version: error_rc_add: usb_put_intf(iface); usb_put_dev(hwarc->usb_dev); + kfree(hwarc); error_alloc: uwb_rc_put(uwb_rc); error_rc_alloc: diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c index 3cf74f54c7a1e2645f12c71c06e2c4da96a47044..7ee3167bc083e6ea1c36bd0795240d84d520eb19 100644 --- a/drivers/vhost/vhost.c +++ b/drivers/vhost/vhost.c @@ -960,7 +960,7 @@ static void vhost_iotlb_notify_vq(struct vhost_dev *d, list_for_each_entry_safe(node, n, &d->pending_list, node) { struct vhost_iotlb_msg *vq_msg = &node->msg.iotlb; if (msg->iova <= vq_msg->iova && - msg->iova + msg->size - 1 > vq_msg->iova && + msg->iova + msg->size - 1 >= vq_msg->iova && vq_msg->type == VHOST_IOTLB_MISS) { vhost_poll_queue(&node->vq->poll); list_del(&node->node); diff --git a/drivers/video/fbdev/core/fbmem.c b/drivers/video/fbdev/core/fbmem.c index be8c43d62698a426504642a2b51ffd71b2ac90e8..610e97125116597f7d62f22e72dec3ae8c9a32e3 100644 --- a/drivers/video/fbdev/core/fbmem.c +++ b/drivers/video/fbdev/core/fbmem.c @@ -1752,12 +1752,12 @@ static int do_register_framebuffer(struct fb_info *fb_info) return 0; } -static int do_unregister_framebuffer(struct fb_info *fb_info) +static int unbind_console(struct fb_info *fb_info) { struct fb_event event; - int i, ret = 0; + int ret; + int i = fb_info->node; - i = fb_info->node; if (i < 0 || i >= FB_MAX || registered_fb[i] != fb_info) return -EINVAL; @@ -1772,17 +1772,29 @@ static int do_unregister_framebuffer(struct fb_info *fb_info) unlock_fb_info(fb_info); console_unlock(); + return ret; +} + +static int __unlink_framebuffer(struct fb_info *fb_info); + +static int do_unregister_framebuffer(struct fb_info *fb_info) +{ + struct fb_event event; + int ret; + + ret = unbind_console(fb_info); + if (ret) return -EINVAL; pm_vt_switch_unregister(fb_info->dev); - unlink_framebuffer(fb_info); + __unlink_framebuffer(fb_info); if (fb_info->pixmap.addr && (fb_info->pixmap.flags & FB_PIXMAP_DEFAULT)) kfree(fb_info->pixmap.addr); fb_destroy_modelist(&fb_info->modelist); - registered_fb[i] = NULL; + registered_fb[fb_info->node] = NULL; num_registered_fb--; fb_cleanup_device(fb_info); event.info = fb_info; @@ -1795,7 +1807,7 @@ static int do_unregister_framebuffer(struct fb_info *fb_info) return 0; } -int unlink_framebuffer(struct fb_info *fb_info) +static int __unlink_framebuffer(struct fb_info *fb_info) { int i; @@ -1807,6 +1819,20 @@ int unlink_framebuffer(struct fb_info *fb_info) device_destroy(fb_class, MKDEV(FB_MAJOR, i)); fb_info->dev = NULL; } + + return 0; +} + +int unlink_framebuffer(struct fb_info *fb_info) +{ + int ret; + + ret = __unlink_framebuffer(fb_info); + if (ret) + return ret; + + unbind_console(fb_info); + return 0; } EXPORT_SYMBOL(unlink_framebuffer); diff --git a/drivers/video/fbdev/core/modedb.c b/drivers/video/fbdev/core/modedb.c index 2510fa728d77160326781219ec554ae50c99be4a..de119f11b78f964301ef9dafa4c3f1043b6e53e2 100644 --- a/drivers/video/fbdev/core/modedb.c +++ b/drivers/video/fbdev/core/modedb.c @@ -644,7 +644,7 @@ static int fb_try_mode(struct fb_var_screeninfo *var, struct fb_info *info, * * Valid mode specifiers for @mode_option: * - * x[M][R][-][@][i][m] or + * x[M][R][-][@][i][p][m] or * [-][@] * * with , , and decimal numbers and @@ -653,10 +653,10 @@ static int fb_try_mode(struct fb_var_screeninfo *var, struct fb_info *info, * If 'M' is present after yres (and before refresh/bpp if present), * the function will compute the timings using VESA(tm) Coordinated * Video Timings (CVT). If 'R' is present after 'M', will compute with - * reduced blanking (for flatpanels). If 'i' is present, compute - * interlaced mode. If 'm' is present, add margins equal to 1.8% - * of xres rounded down to 8 pixels, and 1.8% of yres. The char - * 'i' and 'm' must be after 'M' and 'R'. Example: + * reduced blanking (for flatpanels). If 'i' or 'p' are present, compute + * interlaced or progressive mode. If 'm' is present, add margins equal + * to 1.8% of xres rounded down to 8 pixels, and 1.8% of yres. The chars + * 'i', 'p' and 'm' must be after 'M' and 'R'. Example: * * 1024x768MR-8@60m - Reduced blank with margins at 60Hz. * @@ -697,7 +697,8 @@ int fb_find_mode(struct fb_var_screeninfo *var, unsigned int namelen = strlen(name); int res_specified = 0, bpp_specified = 0, refresh_specified = 0; unsigned int xres = 0, yres = 0, bpp = default_bpp, refresh = 0; - int yres_specified = 0, cvt = 0, rb = 0, interlace = 0; + int yres_specified = 0, cvt = 0, rb = 0; + int interlace_specified = 0, interlace = 0; int margins = 0; u32 best, diff, tdiff; @@ -748,9 +749,17 @@ int fb_find_mode(struct fb_var_screeninfo *var, if (!cvt) margins = 1; break; + case 'p': + if (!cvt) { + interlace = 0; + interlace_specified = 1; + } + break; case 'i': - if (!cvt) + if (!cvt) { interlace = 1; + interlace_specified = 1; + } break; default: goto done; @@ -819,11 +828,21 @@ done: if ((name_matches(db[i], name, namelen) || (res_specified && res_matches(db[i], xres, yres))) && !fb_try_mode(var, info, &db[i], bpp)) { - if (refresh_specified && db[i].refresh == refresh) - return 1; + const int db_interlace = (db[i].vmode & + FB_VMODE_INTERLACED ? 1 : 0); + int score = abs(db[i].refresh - refresh); + + if (interlace_specified) + score += abs(db_interlace - interlace); + + if (!interlace_specified || + db_interlace == interlace) + if (refresh_specified && + db[i].refresh == refresh) + return 1; - if (abs(db[i].refresh - refresh) < diff) { - diff = abs(db[i].refresh - refresh); + if (score < diff) { + diff = score; best = i; } } diff --git a/drivers/video/fbdev/goldfishfb.c b/drivers/video/fbdev/goldfishfb.c index 7f6c9e6cfc6c99d8d9912db5d2f78242923f51c5..14a93cb213100a5062c60ce94ef9a976fff1d3e4 100644 --- a/drivers/video/fbdev/goldfishfb.c +++ b/drivers/video/fbdev/goldfishfb.c @@ -301,6 +301,7 @@ static int goldfish_fb_remove(struct platform_device *pdev) dma_free_coherent(&pdev->dev, framesize, (void *)fb->fb.screen_base, fb->fb.fix.smem_start); iounmap(fb->reg_base); + kfree(fb); return 0; } diff --git a/drivers/video/fbdev/omap/omapfb_main.c b/drivers/video/fbdev/omap/omapfb_main.c index 3479a47a308208f3c414efde067f9cf8cfecd17c..0eee8a29d28d038345b2526c0fba7cff2720b521 100644 --- a/drivers/video/fbdev/omap/omapfb_main.c +++ b/drivers/video/fbdev/omap/omapfb_main.c @@ -958,7 +958,7 @@ int omapfb_register_client(struct omapfb_notifier_block *omapfb_nb, { int r; - if ((unsigned)omapfb_nb->plane_idx > OMAPFB_PLANE_NUM) + if ((unsigned)omapfb_nb->plane_idx >= OMAPFB_PLANE_NUM) return -EINVAL; if (!notifier_inited) { diff --git a/drivers/video/fbdev/pxafb.c b/drivers/video/fbdev/pxafb.c index c3d49e13643cd21504d7ffa62f1231d6a9a8ef38..29f00eccdd015474858023a7af300c73d0d9f8ee 100644 --- a/drivers/video/fbdev/pxafb.c +++ b/drivers/video/fbdev/pxafb.c @@ -2130,8 +2130,8 @@ static int of_get_pxafb_display(struct device *dev, struct device_node *disp, return -EINVAL; ret = -ENOMEM; - info->modes = kmalloc_array(timings->num_timings, - sizeof(info->modes[0]), GFP_KERNEL); + info->modes = kcalloc(timings->num_timings, sizeof(info->modes[0]), + GFP_KERNEL); if (!info->modes) goto out; info->num_modes = timings->num_timings; diff --git a/drivers/video/fbdev/via/viafbdev.c b/drivers/video/fbdev/via/viafbdev.c index badee04ef496cefd02b8f3f2faebb21fafb839b5..71b5dca95bdbddbc85d3eb2fa3371478873586b1 100644 --- a/drivers/video/fbdev/via/viafbdev.c +++ b/drivers/video/fbdev/via/viafbdev.c @@ -19,6 +19,7 @@ * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ +#include #include #include #include @@ -1468,7 +1469,7 @@ static const struct file_operations viafb_vt1636_proc_fops = { #endif /* CONFIG_FB_VIA_DIRECT_PROCFS */ -static int viafb_sup_odev_proc_show(struct seq_file *m, void *v) +static int __maybe_unused viafb_sup_odev_proc_show(struct seq_file *m, void *v) { via_odev_to_seq(m, supported_odev_map[ viaparinfo->shared->chip_info.gfx_chip_name]); diff --git a/drivers/virtio/virtio_pci_legacy.c b/drivers/virtio/virtio_pci_legacy.c index 2780886e8ba3d393ba4847366983c2ab56a7ed3e..de062fb201bc2a4316910419d0495fbb2d0f9601 100644 --- a/drivers/virtio/virtio_pci_legacy.c +++ b/drivers/virtio/virtio_pci_legacy.c @@ -122,6 +122,7 @@ static struct virtqueue *setup_vq(struct virtio_pci_device *vp_dev, struct virtqueue *vq; u16 num; int err; + u64 q_pfn; /* Select the queue we're interested in */ iowrite16(index, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_SEL); @@ -141,9 +142,17 @@ static struct virtqueue *setup_vq(struct virtio_pci_device *vp_dev, if (!vq) return ERR_PTR(-ENOMEM); + q_pfn = virtqueue_get_desc_addr(vq) >> VIRTIO_PCI_QUEUE_ADDR_SHIFT; + if (q_pfn >> 32) { + dev_err(&vp_dev->pci_dev->dev, + "platform bug: legacy virtio-mmio must not be used with RAM above 0x%llxGB\n", + 0x1ULL << (32 + PAGE_SHIFT - 30)); + err = -E2BIG; + goto out_del_vq; + } + /* activate the queue */ - iowrite32(virtqueue_get_desc_addr(vq) >> VIRTIO_PCI_QUEUE_ADDR_SHIFT, - vp_dev->ioaddr + VIRTIO_PCI_QUEUE_PFN); + iowrite32(q_pfn, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_PFN); vq->priv = (void __force *)vp_dev->ioaddr + VIRTIO_PCI_QUEUE_NOTIFY; @@ -160,6 +169,7 @@ static struct virtqueue *setup_vq(struct virtio_pci_device *vp_dev, out_deactivate: iowrite32(0, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_PFN); +out_del_vq: vring_del_virtqueue(vq); return ERR_PTR(err); } diff --git a/drivers/xen/manage.c b/drivers/xen/manage.c index c425d03d37d2700de084b920416fef0465d918f8..587d12829925b4279ea4a28f24ece5f340f44569 100644 --- a/drivers/xen/manage.c +++ b/drivers/xen/manage.c @@ -292,8 +292,15 @@ static void sysrq_handler(struct xenbus_watch *watch, const char *path, return; } - if (sysrq_key != '\0') - xenbus_printf(xbt, "control", "sysrq", "%c", '\0'); + if (sysrq_key != '\0') { + err = xenbus_printf(xbt, "control", "sysrq", "%c", '\0'); + if (err) { + pr_err("%s: Error %d writing sysrq in control/sysrq\n", + __func__, err); + xenbus_transaction_end(xbt, 1); + return; + } + } err = xenbus_transaction_end(xbt, 0); if (err == -EAGAIN) @@ -345,7 +352,12 @@ static int setup_shutdown_watcher(void) continue; snprintf(node, FEATURE_PATH_SIZE, "feature-%s", shutdown_handlers[idx].command); - xenbus_printf(XBT_NIL, "control", node, "%u", 1); + err = xenbus_printf(XBT_NIL, "control", node, "%u", 1); + if (err) { + pr_err("%s: Error %d writing %s\n", __func__, + err, node); + return err; + } } return 0; diff --git a/drivers/xen/xen-balloon.c b/drivers/xen/xen-balloon.c index b437fccd4e624e3b7a8972245e9e992fe77c2993..294f35ce9e46bc063a7e01061462cf3e298b4364 100644 --- a/drivers/xen/xen-balloon.c +++ b/drivers/xen/xen-balloon.c @@ -81,7 +81,7 @@ static void watch_target(struct xenbus_watch *watch, static_max = new_target; else static_max >>= PAGE_SHIFT - 10; - target_diff = xen_pv_domain() ? 0 + target_diff = (xen_pv_domain() || xen_initial_domain()) ? 0 : static_max - balloon_stats.target_pages; } diff --git a/drivers/xen/xen-scsiback.c b/drivers/xen/xen-scsiback.c index 7bc88fd43cfc84d05873893ef4ddec8307e76c2a..e2f3e8b0fba9ff160a7c82a37e64cf5fe0b3c8f0 100644 --- a/drivers/xen/xen-scsiback.c +++ b/drivers/xen/xen-scsiback.c @@ -1012,6 +1012,7 @@ static void scsiback_do_add_lun(struct vscsibk_info *info, const char *state, { struct v2p_entry *entry; unsigned long flags; + int err; if (try) { spin_lock_irqsave(&info->v2p_lock, flags); @@ -1027,8 +1028,11 @@ static void scsiback_do_add_lun(struct vscsibk_info *info, const char *state, scsiback_del_translation_entry(info, vir); } } else if (!try) { - xenbus_printf(XBT_NIL, info->dev->nodename, state, + err = xenbus_printf(XBT_NIL, info->dev->nodename, state, "%d", XenbusStateClosed); + if (err) + xenbus_dev_error(info->dev, err, + "%s: writing %s", __func__, state); } } @@ -1067,8 +1071,11 @@ static void scsiback_do_1lun_hotplug(struct vscsibk_info *info, int op, snprintf(str, sizeof(str), "vscsi-devs/%s/p-dev", ent); val = xenbus_read(XBT_NIL, dev->nodename, str, NULL); if (IS_ERR(val)) { - xenbus_printf(XBT_NIL, dev->nodename, state, + err = xenbus_printf(XBT_NIL, dev->nodename, state, "%d", XenbusStateClosed); + if (err) + xenbus_dev_error(info->dev, err, + "%s: writing %s", __func__, state); return; } strlcpy(phy, val, VSCSI_NAMELEN); @@ -1079,8 +1086,11 @@ static void scsiback_do_1lun_hotplug(struct vscsibk_info *info, int op, err = xenbus_scanf(XBT_NIL, dev->nodename, str, "%u:%u:%u:%u", &vir.hst, &vir.chn, &vir.tgt, &vir.lun); if (XENBUS_EXIST_ERR(err)) { - xenbus_printf(XBT_NIL, dev->nodename, state, + err = xenbus_printf(XBT_NIL, dev->nodename, state, "%d", XenbusStateClosed); + if (err) + xenbus_dev_error(info->dev, err, + "%s: writing %s", __func__, state); return; } diff --git a/fs/9p/xattr.c b/fs/9p/xattr.c index f329eee6dc93e3f75033aa4c52e459aceeeb8df0..352abc39e891a1468d3576cc199fa13b089f32f7 100644 --- a/fs/9p/xattr.c +++ b/fs/9p/xattr.c @@ -105,7 +105,7 @@ int v9fs_fid_xattr_set(struct p9_fid *fid, const char *name, { struct kvec kvec = {.iov_base = (void *)value, .iov_len = value_len}; struct iov_iter from; - int retval; + int retval, err; iov_iter_kvec(&from, WRITE | ITER_KVEC, &kvec, 1, value_len); @@ -126,7 +126,9 @@ int v9fs_fid_xattr_set(struct p9_fid *fid, const char *name, retval); else p9_client_write(fid, 0, &from, &retval); - p9_client_clunk(fid); + err = p9_client_clunk(fid); + if (!retval && err) + retval = err; return retval; } diff --git a/fs/autofs4/autofs_i.h b/fs/autofs4/autofs_i.h index bf71f1aaecbe95de81c04a4e064a4737bc5a860c..b120fbd4148389d54a4a8cc82df5c5ccfb1216fc 100644 --- a/fs/autofs4/autofs_i.h +++ b/fs/autofs4/autofs_i.h @@ -27,6 +27,7 @@ #include #include #include +#include /* This is the range of ioctl() numbers we claim as ours */ #define AUTOFS_IOC_FIRST AUTOFS_IOC_READY @@ -125,7 +126,8 @@ struct autofs_sb_info { static inline struct autofs_sb_info *autofs4_sbi(struct super_block *sb) { - return (struct autofs_sb_info *)(sb->s_fs_info); + return sb->s_magic != AUTOFS_SUPER_MAGIC ? + NULL : (struct autofs_sb_info *)(sb->s_fs_info); } static inline struct autofs_info *autofs4_dentry_ino(struct dentry *dentry) diff --git a/fs/autofs4/inode.c b/fs/autofs4/inode.c index 09e7d68dff02db1dd054c0a6e0fd97aa6bef8aaf..3c7e727612fa3911415e0fa22e72fcedff81881d 100644 --- a/fs/autofs4/inode.c +++ b/fs/autofs4/inode.c @@ -14,7 +14,6 @@ #include #include #include -#include #include "autofs_i.h" #include diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c index c0e3f91e28e91ed128d9b2285999a9bfd5dee05c..469666df91da296f8ca0e18688a2fcc4308a5459 100644 --- a/fs/binfmt_elf.c +++ b/fs/binfmt_elf.c @@ -1725,7 +1725,7 @@ static int fill_thread_core_info(struct elf_thread_core_info *t, const struct user_regset *regset = &view->regsets[i]; do_thread_regset_writeback(t->task, regset); if (regset->core_note_type && regset->get && - (!regset->active || regset->active(t->task, regset))) { + (!regset->active || regset->active(t->task, regset) > 0)) { int ret; size_t size = regset->n * regset->size; void *data = kmalloc(size, GFP_KERNEL); diff --git a/fs/btrfs/dev-replace.c b/fs/btrfs/dev-replace.c index 7c655f9a7a504e5d758323e6be536cfb39753b2d..dd80a1bdf9e2725c49a6920426022f2fddbc0edc 100644 --- a/fs/btrfs/dev-replace.c +++ b/fs/btrfs/dev-replace.c @@ -588,6 +588,12 @@ static int btrfs_dev_replace_finishing(struct btrfs_fs_info *fs_info, btrfs_rm_dev_replace_unblocked(fs_info); + /* + * Increment dev_stats_ccnt so that btrfs_run_dev_stats() will + * update on-disk dev stats value during commit transaction + */ + atomic_inc(&tgt_device->dev_stats_ccnt); + /* * this is again a consistent state where no dev_replace procedure * is running, the target device is part of the filesystem, the diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index b475d1ebbbbf02117a45fc26f5652e907538e7d6..5cf1bbe9754cfed6eb6b5a9e246859b8d30d0204 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -1098,8 +1098,9 @@ static int btree_writepages(struct address_space *mapping, fs_info = BTRFS_I(mapping->host)->root->fs_info; /* this is a bit racy, but that's ok */ - ret = percpu_counter_compare(&fs_info->dirty_metadata_bytes, - BTRFS_DIRTY_METADATA_THRESH); + ret = __percpu_counter_compare(&fs_info->dirty_metadata_bytes, + BTRFS_DIRTY_METADATA_THRESH, + fs_info->dirty_metadata_batch); if (ret < 0) return 0; } @@ -4030,8 +4031,9 @@ static void __btrfs_btree_balance_dirty(struct btrfs_fs_info *fs_info, if (flush_delayed) btrfs_balance_delayed_items(fs_info); - ret = percpu_counter_compare(&fs_info->dirty_metadata_bytes, - BTRFS_DIRTY_METADATA_THRESH); + ret = __percpu_counter_compare(&fs_info->dirty_metadata_bytes, + BTRFS_DIRTY_METADATA_THRESH, + fs_info->dirty_metadata_batch); if (ret > 0) { balance_dirty_pages_ratelimited(fs_info->btree_inode->i_mapping); } diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index 53487102081d6c8c2adfd46c2e9f83926166cb30..f96f72659693a185530200d30afd7d687c436ed1 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -4407,7 +4407,7 @@ commit_trans: data_sinfo->flags, bytes, 1); spin_unlock(&data_sinfo->lock); - return ret; + return 0; } int btrfs_check_data_free_space(struct inode *inode, @@ -10757,7 +10757,7 @@ void btrfs_delete_unused_bgs(struct btrfs_fs_info *fs_info) /* Don't want to race with allocators so take the groups_sem */ down_write(&space_info->groups_sem); spin_lock(&block_group->lock); - if (block_group->reserved || + if (block_group->reserved || block_group->pinned || btrfs_block_group_used(&block_group->item) || block_group->ro || list_is_singular(&block_group->list)) { diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 28a58f40f3a4746b2586f8155840ed7c692bea2e..e8bfafa25a71684a191f55ad34733dcc76072778 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -6152,32 +6152,6 @@ err: return ret; } -int btrfs_write_inode(struct inode *inode, struct writeback_control *wbc) -{ - struct btrfs_root *root = BTRFS_I(inode)->root; - struct btrfs_trans_handle *trans; - int ret = 0; - bool nolock = false; - - if (test_bit(BTRFS_INODE_DUMMY, &BTRFS_I(inode)->runtime_flags)) - return 0; - - if (btrfs_fs_closing(root->fs_info) && - btrfs_is_free_space_inode(BTRFS_I(inode))) - nolock = true; - - if (wbc->sync_mode == WB_SYNC_ALL) { - if (nolock) - trans = btrfs_join_transaction_nolock(root); - else - trans = btrfs_join_transaction(root); - if (IS_ERR(trans)) - return PTR_ERR(trans); - ret = btrfs_commit_transaction(trans); - } - return ret; -} - /* * This is somewhat expensive, updating the tree every time the * inode changes. But, it is most likely to find the inode in cache. diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c index 7303ba108112215da25b906bf8145bd7a9bea087..a507c0d253545f5df4c487e27755a0fe3eb8cc27 100644 --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c @@ -3158,6 +3158,25 @@ static int btrfs_extent_same(struct inode *src, u64 loff, u64 olen, same_lock_start = min_t(u64, loff, dst_loff); same_lock_len = max_t(u64, loff, dst_loff) + len - same_lock_start; + } else { + /* + * If the source and destination inodes are different, the + * source's range end offset matches the source's i_size, that + * i_size is not a multiple of the sector size, and the + * destination range does not go past the destination's i_size, + * we must round down the length to the nearest sector size + * multiple. If we don't do this adjustment we end replacing + * with zeroes the bytes in the range that starts at the + * deduplication range's end offset and ends at the next sector + * size multiple. + */ + if (loff + olen == i_size_read(src) && + dst_loff + len < i_size_read(dst)) { + const u64 sz = BTRFS_I(src)->root->fs_info->sectorsize; + + len = round_down(i_size_read(src), sz) - loff; + olen = len; + } } /* don't make the dst file partly checksummed */ diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c index 9841faef08ea72d0b50aa3ad3c881bd27bd7fc76..b80b03e0c5d3f3f2b70b4cb7bef64450c889a38a 100644 --- a/fs/btrfs/relocation.c +++ b/fs/btrfs/relocation.c @@ -1334,18 +1334,19 @@ static void __del_reloc_root(struct btrfs_root *root) struct mapping_node *node = NULL; struct reloc_control *rc = fs_info->reloc_ctl; - spin_lock(&rc->reloc_root_tree.lock); - rb_node = tree_search(&rc->reloc_root_tree.rb_root, - root->node->start); - if (rb_node) { - node = rb_entry(rb_node, struct mapping_node, rb_node); - rb_erase(&node->rb_node, &rc->reloc_root_tree.rb_root); + if (rc) { + spin_lock(&rc->reloc_root_tree.lock); + rb_node = tree_search(&rc->reloc_root_tree.rb_root, + root->node->start); + if (rb_node) { + node = rb_entry(rb_node, struct mapping_node, rb_node); + rb_erase(&node->rb_node, &rc->reloc_root_tree.rb_root); + } + spin_unlock(&rc->reloc_root_tree.lock); + if (!node) + return; + BUG_ON((struct btrfs_root *)node->data != root); } - spin_unlock(&rc->reloc_root_tree.lock); - - if (!node) - return; - BUG_ON((struct btrfs_root *)node->data != root); spin_lock(&fs_info->trans_lock); list_del_init(&root->root_list); diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c index 936d58ca2b4914a7215920832d88fa5d151f00ec..61192c536e6ced67b82b18ccdcf2820e34d11c1b 100644 --- a/fs/btrfs/scrub.c +++ b/fs/btrfs/scrub.c @@ -1166,11 +1166,6 @@ static int scrub_handle_errored_block(struct scrub_block *sblock_to_check) return ret; } - if (sctx->is_dev_replace && !is_metadata && !have_csum) { - sblocks_for_recheck = NULL; - goto nodatasum_case; - } - /* * read all mirrors one after the other. This includes to * re-read the extent or metadata block that failed (that was @@ -1283,13 +1278,19 @@ static int scrub_handle_errored_block(struct scrub_block *sblock_to_check) goto out; } - if (!is_metadata && !have_csum) { + /* + * NOTE: Even for nodatasum case, it's still possible that it's a + * compressed data extent, thus scrub_fixup_nodatasum(), which write + * inode page cache onto disk, could cause serious data corruption. + * + * So here we could only read from disk, and hope our recovery could + * reach disk before the newer write. + */ + if (0 && !is_metadata && !have_csum) { struct scrub_fixup_nodatasum *fixup_nodatasum; WARN_ON(sctx->is_dev_replace); -nodatasum_case: - /* * !is_metadata and !have_csum, this means that the data * might not be COWed, that it might be modified diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c index 8e3ce81d3f44c5da51b5efe31d61ea17f7fa6f1b..fe960d5e89137d9b092d9603a88679867e5e424e 100644 --- a/fs/btrfs/super.c +++ b/fs/btrfs/super.c @@ -2271,7 +2271,6 @@ static const struct super_operations btrfs_super_ops = { .sync_fs = btrfs_sync_fs, .show_options = btrfs_show_options, .show_devname = btrfs_show_devname, - .write_inode = btrfs_write_inode, .alloc_inode = btrfs_alloc_inode, .destroy_inode = btrfs_destroy_inode, .statfs = btrfs_statfs, diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index 08afafb6ecf763eb37bc579eeec11d9801fd63e2..a39b1f0b06066e34bd1594db8f3d63c44bbb1380 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c @@ -6492,10 +6492,14 @@ static int read_one_chunk(struct btrfs_fs_info *fs_info, struct btrfs_key *key, write_lock(&map_tree->map_tree.lock); ret = add_extent_mapping(&map_tree->map_tree, em, 0); write_unlock(&map_tree->map_tree.lock); - BUG_ON(ret); /* Tree corruption */ + if (ret < 0) { + btrfs_err(fs_info, + "failed to add chunk map, start=%llu len=%llu: %d", + em->start, em->len, ret); + } free_extent_map(em); - return 0; + return ret; } static void fill_device_from_item(struct extent_buffer *leaf, diff --git a/fs/cachefiles/namei.c b/fs/cachefiles/namei.c index 3978b324cbca814e86a06017e155f0002c96d1c0..5f2f67d220fa984991a39d35e6c000e679b8a365 100644 --- a/fs/cachefiles/namei.c +++ b/fs/cachefiles/namei.c @@ -195,7 +195,6 @@ wait_for_old_object: pr_err("\n"); pr_err("Error: Unexpected object collision\n"); cachefiles_printk_object(object, xobject); - BUG(); } atomic_inc(&xobject->usage); write_unlock(&cache->active_lock); diff --git a/fs/cachefiles/rdwr.c b/fs/cachefiles/rdwr.c index 18d7aa61ef0f4e4bd6acb7ecfb782e497bb25c8d..199eb396a1bbb753e9aa39618b6a5cae25002006 100644 --- a/fs/cachefiles/rdwr.c +++ b/fs/cachefiles/rdwr.c @@ -27,6 +27,7 @@ static int cachefiles_read_waiter(wait_queue_entry_t *wait, unsigned mode, struct cachefiles_one_read *monitor = container_of(wait, struct cachefiles_one_read, monitor); struct cachefiles_object *object; + struct fscache_retrieval *op = monitor->op; struct wait_bit_key *key = _key; struct page *page = wait->private; @@ -51,16 +52,22 @@ static int cachefiles_read_waiter(wait_queue_entry_t *wait, unsigned mode, list_del(&wait->entry); /* move onto the action list and queue for FS-Cache thread pool */ - ASSERT(monitor->op); + ASSERT(op); - object = container_of(monitor->op->op.object, - struct cachefiles_object, fscache); + /* We need to temporarily bump the usage count as we don't own a ref + * here otherwise cachefiles_read_copier() may free the op between the + * monitor being enqueued on the op->to_do list and the op getting + * enqueued on the work queue. + */ + fscache_get_retrieval(op); + object = container_of(op->op.object, struct cachefiles_object, fscache); spin_lock(&object->work_lock); - list_add_tail(&monitor->op_link, &monitor->op->to_do); + list_add_tail(&monitor->op_link, &op->to_do); spin_unlock(&object->work_lock); - fscache_enqueue_retrieval(monitor->op); + fscache_enqueue_retrieval(op); + fscache_put_retrieval(op); return 0; } diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c index f2550a076edc4e65da6e36354ef9ba2ba517d184..d5124ed35154e38063e04e5033a4be459f96b713 100644 --- a/fs/ceph/inode.c +++ b/fs/ceph/inode.c @@ -1087,6 +1087,7 @@ static struct dentry *splice_dentry(struct dentry *dn, struct inode *in) if (IS_ERR(realdn)) { pr_err("splice_dentry error %ld %p inode %p ino %llx.%llx\n", PTR_ERR(realdn), dn, in, ceph_vinop(in)); + dput(dn); dn = realdn; /* note realdn contains the error */ goto out; } else if (realdn) { diff --git a/fs/cifs/cifs_debug.c b/fs/cifs/cifs_debug.c index cbb9534b89b40bc3f3cc3be31254485b976d2bcb..2565cee702e484609808e774a7f81b27a9e87243 100644 --- a/fs/cifs/cifs_debug.c +++ b/fs/cifs/cifs_debug.c @@ -123,25 +123,41 @@ static int cifs_debug_data_proc_show(struct seq_file *m, void *v) seq_printf(m, "CIFS Version %s\n", CIFS_VERSION); seq_printf(m, "Features:"); #ifdef CONFIG_CIFS_DFS_UPCALL - seq_printf(m, " dfs"); + seq_printf(m, " DFS"); #endif #ifdef CONFIG_CIFS_FSCACHE - seq_printf(m, " fscache"); + seq_printf(m, ",FSCACHE"); +#endif +#ifdef CONFIG_CIFS_SMB_DIRECT + seq_printf(m, ",SMB_DIRECT"); +#endif +#ifdef CONFIG_CIFS_STATS2 + seq_printf(m, ",STATS2"); +#elif defined(CONFIG_CIFS_STATS) + seq_printf(m, ",STATS"); +#endif +#ifdef CONFIG_CIFS_DEBUG2 + seq_printf(m, ",DEBUG2"); +#elif defined(CONFIG_CIFS_DEBUG) + seq_printf(m, ",DEBUG"); +#endif +#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY + seq_printf(m, ",ALLOW_INSECURE_LEGACY"); #endif #ifdef CONFIG_CIFS_WEAK_PW_HASH - seq_printf(m, " lanman"); + seq_printf(m, ",WEAK_PW_HASH"); #endif #ifdef CONFIG_CIFS_POSIX - seq_printf(m, " posix"); + seq_printf(m, ",CIFS_POSIX"); #endif #ifdef CONFIG_CIFS_UPCALL - seq_printf(m, " spnego"); + seq_printf(m, ",UPCALL(SPNEGO)"); #endif #ifdef CONFIG_CIFS_XATTR - seq_printf(m, " xattr"); + seq_printf(m, ",XATTR"); #endif #ifdef CONFIG_CIFS_ACL - seq_printf(m, " acl"); + seq_printf(m, ",ACL"); #endif seq_putc(m, '\n'); seq_printf(m, "Active VFS Requests: %d\n", GlobalTotalActiveXid); @@ -273,6 +289,10 @@ static ssize_t cifs_stats_proc_write(struct file *file, atomic_set(&totBufAllocCount, 0); atomic_set(&totSmBufAllocCount, 0); #endif /* CONFIG_CIFS_STATS2 */ + spin_lock(&GlobalMid_Lock); + GlobalMaxActiveXid = 0; + GlobalCurrentXid = 0; + spin_unlock(&GlobalMid_Lock); spin_lock(&cifs_tcp_ses_lock); list_for_each(tmp1, &cifs_tcp_ses_list) { server = list_entry(tmp1, struct TCP_Server_Info, @@ -285,6 +305,10 @@ static ssize_t cifs_stats_proc_write(struct file *file, struct cifs_tcon, tcon_list); atomic_set(&tcon->num_smbs_sent, 0); + spin_lock(&tcon->stat_lock); + tcon->bytes_read = 0; + tcon->bytes_written = 0; + spin_unlock(&tcon->stat_lock); if (server->ops->clear_stats) server->ops->clear_stats(tcon); } diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c index 490c5fc9e69c6f075c6009b7d5fea3a1314f7a28..44a7b2dea6882ad3769cd9e5e22aef355bb186b5 100644 --- a/fs/cifs/cifsfs.c +++ b/fs/cifs/cifsfs.c @@ -197,14 +197,16 @@ cifs_statfs(struct dentry *dentry, struct kstatfs *buf) xid = get_xid(); - /* - * PATH_MAX may be too long - it would presumably be total path, - * but note that some servers (includinng Samba 3) have a shorter - * maximum path. - * - * Instead could get the real value via SMB_QUERY_FS_ATTRIBUTE_INFO. - */ - buf->f_namelen = PATH_MAX; + if (le32_to_cpu(tcon->fsAttrInfo.MaxPathNameComponentLength) > 0) + buf->f_namelen = + le32_to_cpu(tcon->fsAttrInfo.MaxPathNameComponentLength); + else + buf->f_namelen = PATH_MAX; + + buf->f_fsid.val[0] = tcon->vol_serial_number; + /* are using part of create time for more randomness, see man statfs */ + buf->f_fsid.val[1] = (int)le64_to_cpu(tcon->vol_create_time); + buf->f_files = 0; /* undefined */ buf->f_ffree = 0; /* unlimited */ diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c index 0c7b7e2a0919a9a8c219543f5febe79c9dd6195b..2cd0b3053439fa689bd5c2fc5e712a5273264ca2 100644 --- a/fs/cifs/inode.c +++ b/fs/cifs/inode.c @@ -467,6 +467,8 @@ cifs_sfu_type(struct cifs_fattr *fattr, const char *path, oparms.cifs_sb = cifs_sb; oparms.desired_access = GENERIC_READ; oparms.create_options = CREATE_NOT_DIR; + if (backup_cred(cifs_sb)) + oparms.create_options |= CREATE_OPEN_BACKUP_INTENT; oparms.disposition = FILE_OPEN; oparms.path = path; oparms.fid = &fid; @@ -1122,6 +1124,8 @@ cifs_set_file_info(struct inode *inode, struct iattr *attrs, unsigned int xid, if (!server->ops->set_file_info) return -ENOSYS; + info_buf.Pad = 0; + if (attrs->ia_valid & ATTR_ATIME) { set_time = true; info_buf.LastAccessTime = diff --git a/fs/cifs/link.c b/fs/cifs/link.c index 889a840172eb8840daa73268e2ec5523122c4307..9451a7f6893db9682994fdddbe831612d5f55ada 100644 --- a/fs/cifs/link.c +++ b/fs/cifs/link.c @@ -396,7 +396,7 @@ smb3_query_mf_symlink(unsigned int xid, struct cifs_tcon *tcon, struct cifs_io_parms io_parms; int buf_type = CIFS_NO_BUFFER; __le16 *utf16_path; - __u8 oplock = SMB2_OPLOCK_LEVEL_II; + __u8 oplock = SMB2_OPLOCK_LEVEL_NONE; struct smb2_file_all_info *pfile_info = NULL; oparms.tcon = tcon; @@ -458,7 +458,7 @@ smb3_create_mf_symlink(unsigned int xid, struct cifs_tcon *tcon, struct cifs_io_parms io_parms; int create_options = CREATE_NOT_DIR; __le16 *utf16_path; - __u8 oplock = SMB2_OPLOCK_LEVEL_EXCLUSIVE; + __u8 oplock = SMB2_OPLOCK_LEVEL_NONE; struct kvec iov[2]; if (backup_cred(cifs_sb)) diff --git a/fs/cifs/readdir.c b/fs/cifs/readdir.c index 791aecb7c1accf83c5b773ba459d896139140908..3ce6331a11013aa1c8c576c28d02aaa13e4be0b1 100644 --- a/fs/cifs/readdir.c +++ b/fs/cifs/readdir.c @@ -376,8 +376,15 @@ static char *nxt_dir_entry(char *old_entry, char *end_of_smb, int level) new_entry = old_entry + sizeof(FIND_FILE_STANDARD_INFO) + pfData->FileNameLength; - } else - new_entry = old_entry + le32_to_cpu(pDirInfo->NextEntryOffset); + } else { + u32 next_offset = le32_to_cpu(pDirInfo->NextEntryOffset); + + if (old_entry + next_offset < old_entry) { + cifs_dbg(VFS, "invalid offset %u\n", next_offset); + return NULL; + } + new_entry = old_entry + next_offset; + } cifs_dbg(FYI, "new entry %p old entry %p\n", new_entry, old_entry); /* validate that new_entry is not past end of SMB */ if (new_entry >= end_of_smb) { diff --git a/fs/cifs/sess.c b/fs/cifs/sess.c index 8b0502cd39afb6b27f89b24fdb63e8122f162aee..aa23c00367ec4644118090a73d5f9eb995229555 100644 --- a/fs/cifs/sess.c +++ b/fs/cifs/sess.c @@ -398,6 +398,12 @@ int build_ntlmssp_auth_blob(unsigned char **pbuffer, goto setup_ntlmv2_ret; } *pbuffer = kmalloc(size_of_ntlmssp_blob(ses), GFP_KERNEL); + if (!*pbuffer) { + rc = -ENOMEM; + cifs_dbg(VFS, "Error %d during NTLMSSP allocation\n", rc); + *buflen = 0; + goto setup_ntlmv2_ret; + } sec_blob = (AUTHENTICATE_MESSAGE *)*pbuffer; memcpy(sec_blob->Signature, NTLMSSP_SIGNATURE, 8); diff --git a/fs/cifs/smb2inode.c b/fs/cifs/smb2inode.c index 1238cd3552f9cc8e8f8fc560117af37cd224aa13..0267d8cbc9966cd7e2327ee98f014344ce5d7c0c 100644 --- a/fs/cifs/smb2inode.c +++ b/fs/cifs/smb2inode.c @@ -267,7 +267,7 @@ smb2_set_file_info(struct inode *inode, const char *full_path, int rc; if ((buf->CreationTime == 0) && (buf->LastAccessTime == 0) && - (buf->LastWriteTime == 0) && (buf->ChangeTime) && + (buf->LastWriteTime == 0) && (buf->ChangeTime == 0) && (buf->Attributes == 0)) return 0; /* would be a no op, no sense sending this */ diff --git a/fs/cifs/smb2misc.c b/fs/cifs/smb2misc.c index 7b08a1446a7fbd08f2f245fa2dbb75261eb3f5b2..efdfdb47a7dd35697cad7baf2eb82c731de6e847 100644 --- a/fs/cifs/smb2misc.c +++ b/fs/cifs/smb2misc.c @@ -211,6 +211,13 @@ smb2_check_message(char *buf, unsigned int length, struct TCP_Server_Info *srvr) if (clc_len == 4 + len + 1) return 0; + /* + * Some windows servers (win2016) will pad also the final + * PDU in a compound to 8 bytes. + */ + if (((clc_len + 7) & ~7) == len) + return 0; + /* * MacOS server pads after SMB2.1 write response with 3 bytes * of junk. Other servers match RFC1001 len to actual diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c index 83267ac3a3f0274fdc936e42bf786c4f5aa581b4..759cbbf7b1af1514ddff280a367ef0c8c5091ae7 100644 --- a/fs/cifs/smb2ops.c +++ b/fs/cifs/smb2ops.c @@ -332,6 +332,8 @@ smb3_qfs_tcon(const unsigned int xid, struct cifs_tcon *tcon) FS_ATTRIBUTE_INFORMATION); SMB2_QFS_attr(xid, tcon, fid.persistent_fid, fid.volatile_fid, FS_DEVICE_INFORMATION); + SMB2_QFS_attr(xid, tcon, fid.persistent_fid, fid.volatile_fid, + FS_VOLUME_INFORMATION); SMB2_QFS_attr(xid, tcon, fid.persistent_fid, fid.volatile_fid, FS_SECTOR_SIZE_INFORMATION); /* SMB3 specific */ SMB2_close(xid, tcon, fid.persistent_fid, fid.volatile_fid); @@ -383,7 +385,10 @@ smb2_is_path_accessible(const unsigned int xid, struct cifs_tcon *tcon, oparms.tcon = tcon; oparms.desired_access = FILE_READ_ATTRIBUTES; oparms.disposition = FILE_OPEN; - oparms.create_options = 0; + if (backup_cred(cifs_sb)) + oparms.create_options = CREATE_OPEN_BACKUP_INTENT; + else + oparms.create_options = 0; oparms.fid = &fid; oparms.reconnect = false; @@ -532,7 +537,10 @@ smb2_query_eas(const unsigned int xid, struct cifs_tcon *tcon, oparms.tcon = tcon; oparms.desired_access = FILE_READ_EA; oparms.disposition = FILE_OPEN; - oparms.create_options = 0; + if (backup_cred(cifs_sb)) + oparms.create_options = CREATE_OPEN_BACKUP_INTENT; + else + oparms.create_options = 0; oparms.fid = &fid; oparms.reconnect = false; @@ -611,7 +619,10 @@ smb2_set_ea(const unsigned int xid, struct cifs_tcon *tcon, oparms.tcon = tcon; oparms.desired_access = FILE_WRITE_EA; oparms.disposition = FILE_OPEN; - oparms.create_options = 0; + if (backup_cred(cifs_sb)) + oparms.create_options = CREATE_OPEN_BACKUP_INTENT; + else + oparms.create_options = 0; oparms.fid = &fid; oparms.reconnect = false; @@ -1129,6 +1140,13 @@ smb3_set_integrity(const unsigned int xid, struct cifs_tcon *tcon, } +/* GMT Token is @GMT-YYYY.MM.DD-HH.MM.SS Unicode which is 48 bytes + null */ +#define GMT_TOKEN_SIZE 50 + +/* + * Input buffer contains (empty) struct smb_snapshot array with size filled in + * For output see struct SRV_SNAPSHOT_ARRAY in MS-SMB2 section 2.2.32.2 + */ static int smb3_enum_snapshots(const unsigned int xid, struct cifs_tcon *tcon, struct cifsFileInfo *cfile, void __user *ioc_buf) @@ -1158,14 +1176,27 @@ smb3_enum_snapshots(const unsigned int xid, struct cifs_tcon *tcon, kfree(retbuf); return rc; } - if (snapshot_in.snapshot_array_size < sizeof(struct smb_snapshot_array)) { - rc = -ERANGE; - kfree(retbuf); - return rc; - } - if (ret_data_len > snapshot_in.snapshot_array_size) - ret_data_len = snapshot_in.snapshot_array_size; + /* + * Check for min size, ie not large enough to fit even one GMT + * token (snapshot). On the first ioctl some users may pass in + * smaller size (or zero) to simply get the size of the array + * so the user space caller can allocate sufficient memory + * and retry the ioctl again with larger array size sufficient + * to hold all of the snapshot GMT tokens on the second try. + */ + if (snapshot_in.snapshot_array_size < GMT_TOKEN_SIZE) + ret_data_len = sizeof(struct smb_snapshot_array); + + /* + * We return struct SRV_SNAPSHOT_ARRAY, followed by + * the snapshot array (of 50 byte GMT tokens) each + * representing an available previous version of the data + */ + if (ret_data_len > (snapshot_in.snapshot_array_size + + sizeof(struct smb_snapshot_array))) + ret_data_len = snapshot_in.snapshot_array_size + + sizeof(struct smb_snapshot_array); if (copy_to_user(ioc_buf, retbuf, ret_data_len)) rc = -EFAULT; @@ -1193,7 +1224,10 @@ smb2_query_dir_first(const unsigned int xid, struct cifs_tcon *tcon, oparms.tcon = tcon; oparms.desired_access = FILE_READ_ATTRIBUTES | FILE_READ_DATA; oparms.disposition = FILE_OPEN; - oparms.create_options = 0; + if (backup_cred(cifs_sb)) + oparms.create_options = CREATE_OPEN_BACKUP_INTENT; + else + oparms.create_options = 0; oparms.fid = fid; oparms.reconnect = false; @@ -1469,7 +1503,10 @@ smb2_query_symlink(const unsigned int xid, struct cifs_tcon *tcon, oparms.tcon = tcon; oparms.desired_access = FILE_READ_ATTRIBUTES; oparms.disposition = FILE_OPEN; - oparms.create_options = 0; + if (backup_cred(cifs_sb)) + oparms.create_options = CREATE_OPEN_BACKUP_INTENT; + else + oparms.create_options = 0; oparms.fid = &fid; oparms.reconnect = false; @@ -3178,7 +3215,7 @@ struct smb_version_values smb21_values = { struct smb_version_values smb3any_values = { .version_string = SMB3ANY_VERSION_STRING, .protocol_id = SMB302_PROT_ID, /* doesn't matter, send protocol array */ - .req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION, + .req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION | SMB2_GLOBAL_CAP_DIRECTORY_LEASING, .large_lock_type = 0, .exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE_LOCK, .shared_lock_type = SMB2_LOCKFLAG_SHARED_LOCK, @@ -3198,7 +3235,7 @@ struct smb_version_values smb3any_values = { struct smb_version_values smbdefault_values = { .version_string = SMBDEFAULT_VERSION_STRING, .protocol_id = SMB302_PROT_ID, /* doesn't matter, send protocol array */ - .req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION, + .req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION | SMB2_GLOBAL_CAP_DIRECTORY_LEASING, .large_lock_type = 0, .exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE_LOCK, .shared_lock_type = SMB2_LOCKFLAG_SHARED_LOCK, @@ -3218,7 +3255,7 @@ struct smb_version_values smbdefault_values = { struct smb_version_values smb30_values = { .version_string = SMB30_VERSION_STRING, .protocol_id = SMB30_PROT_ID, - .req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION, + .req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION | SMB2_GLOBAL_CAP_DIRECTORY_LEASING, .large_lock_type = 0, .exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE_LOCK, .shared_lock_type = SMB2_LOCKFLAG_SHARED_LOCK, @@ -3238,7 +3275,7 @@ struct smb_version_values smb30_values = { struct smb_version_values smb302_values = { .version_string = SMB302_VERSION_STRING, .protocol_id = SMB302_PROT_ID, - .req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION, + .req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION | SMB2_GLOBAL_CAP_DIRECTORY_LEASING, .large_lock_type = 0, .exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE_LOCK, .shared_lock_type = SMB2_LOCKFLAG_SHARED_LOCK, @@ -3259,7 +3296,7 @@ struct smb_version_values smb302_values = { struct smb_version_values smb311_values = { .version_string = SMB311_VERSION_STRING, .protocol_id = SMB311_PROT_ID, - .req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION, + .req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION | SMB2_GLOBAL_CAP_DIRECTORY_LEASING, .large_lock_type = 0, .exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE_LOCK, .shared_lock_type = SMB2_LOCKFLAG_SHARED_LOCK, diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c index 71b81980787fce27a478e080faf276e80f9d65a1..69309538ffb807f42a51e1ef6df5617402a79e3b 100644 --- a/fs/cifs/smb2pdu.c +++ b/fs/cifs/smb2pdu.c @@ -393,7 +393,7 @@ small_smb2_init(__le16 smb2_command, struct cifs_tcon *tcon, pdu->hdr.smb2_buf_length = cpu_to_be32(total_len); if (tcon != NULL) { -#ifdef CONFIG_CIFS_STATS2 +#ifdef CONFIG_CIFS_STATS uint16_t com_code = le16_to_cpu(smb2_command); cifs_stats_inc(&tcon->stats.smb2_stats.smb2_com_sent[com_code]); #endif @@ -1816,6 +1816,9 @@ SMB2_open(const unsigned int xid, struct cifs_open_parms *oparms, __le16 *path, if (!(server->capabilities & SMB2_GLOBAL_CAP_LEASING) || *oplock == SMB2_OPLOCK_LEVEL_NONE) req->RequestedOplockLevel = *oplock; + else if (!(server->capabilities & SMB2_GLOBAL_CAP_DIRECTORY_LEASING) && + (oparms->create_options & CREATE_NOT_FILE)) + req->RequestedOplockLevel = *oplock; /* no srv lease support */ else { rc = add_lease_context(server, iov, &n_iov, oplock); if (rc) { @@ -2936,33 +2939,38 @@ num_entries(char *bufstart, char *end_of_buf, char **lastentry, size_t size) int len; unsigned int entrycount = 0; unsigned int next_offset = 0; - FILE_DIRECTORY_INFO *entryptr; + char *entryptr; + FILE_DIRECTORY_INFO *dir_info; if (bufstart == NULL) return 0; - entryptr = (FILE_DIRECTORY_INFO *)bufstart; + entryptr = bufstart; while (1) { - entryptr = (FILE_DIRECTORY_INFO *) - ((char *)entryptr + next_offset); - - if ((char *)entryptr + size > end_of_buf) { + if (entryptr + next_offset < entryptr || + entryptr + next_offset > end_of_buf || + entryptr + next_offset + size > end_of_buf) { cifs_dbg(VFS, "malformed search entry would overflow\n"); break; } - len = le32_to_cpu(entryptr->FileNameLength); - if ((char *)entryptr + len + size > end_of_buf) { + entryptr = entryptr + next_offset; + dir_info = (FILE_DIRECTORY_INFO *)entryptr; + + len = le32_to_cpu(dir_info->FileNameLength); + if (entryptr + len < entryptr || + entryptr + len > end_of_buf || + entryptr + len + size > end_of_buf) { cifs_dbg(VFS, "directory entry name would overflow frame end of buf %p\n", end_of_buf); break; } - *lastentry = (char *)entryptr; + *lastentry = entryptr; entrycount++; - next_offset = le32_to_cpu(entryptr->NextEntryOffset); + next_offset = le32_to_cpu(dir_info->NextEntryOffset); if (!next_offset) break; } @@ -3455,6 +3463,9 @@ SMB2_QFS_attr(const unsigned int xid, struct cifs_tcon *tcon, } else if (level == FS_SECTOR_SIZE_INFORMATION) { max_len = sizeof(struct smb3_fs_ss_info); min_len = sizeof(struct smb3_fs_ss_info); + } else if (level == FS_VOLUME_INFORMATION) { + max_len = sizeof(struct smb3_fs_vol_info) + MAX_VOL_LABEL_LEN; + min_len = sizeof(struct smb3_fs_vol_info); } else { cifs_dbg(FYI, "Invalid qfsinfo level %d\n", level); return -EINVAL; @@ -3495,6 +3506,11 @@ SMB2_QFS_attr(const unsigned int xid, struct cifs_tcon *tcon, tcon->ss_flags = le32_to_cpu(ss_info->Flags); tcon->perf_sector_size = le32_to_cpu(ss_info->PhysicalBytesPerSectorForPerf); + } else if (level == FS_VOLUME_INFORMATION) { + struct smb3_fs_vol_info *vol_info = (struct smb3_fs_vol_info *) + (offset + (char *)rsp); + tcon->vol_serial_number = vol_info->VolumeSerialNumber; + tcon->vol_create_time = vol_info->VolumeCreationTime; } qfsattr_exit: diff --git a/fs/cifs/smb2pdu.h b/fs/cifs/smb2pdu.h index c2ec934be96891a645537140df279b0fb3e2fcbe..e5245405972566b77b3d35f97315f2f16a2748d3 100644 --- a/fs/cifs/smb2pdu.h +++ b/fs/cifs/smb2pdu.h @@ -1108,6 +1108,17 @@ struct smb3_fs_ss_info { __le32 ByteOffsetForPartitionAlignment; } __packed; +/* volume info struct - see MS-FSCC 2.5.9 */ +#define MAX_VOL_LABEL_LEN 32 +struct smb3_fs_vol_info { + __le64 VolumeCreationTime; + __u32 VolumeSerialNumber; + __le32 VolumeLabelLength; /* includes trailing null */ + __u8 SupportsObjects; /* True if eg like NTFS, supports objects */ + __u8 Reserved; + __u8 VolumeLabel[0]; /* variable len */ +} __packed; + /* partial list of QUERY INFO levels */ #define FILE_DIRECTORY_INFORMATION 1 #define FILE_FULL_DIRECTORY_INFORMATION 2 diff --git a/fs/configfs/dir.c b/fs/configfs/dir.c index 56fb26127fef2544c52838674b2e8b84ae873807..d2a1a79fa324b29ca61af0558c8ab4a900f4b206 100644 --- a/fs/configfs/dir.c +++ b/fs/configfs/dir.c @@ -1777,6 +1777,16 @@ void configfs_unregister_group(struct config_group *group) struct dentry *dentry = group->cg_item.ci_dentry; struct dentry *parent = group->cg_item.ci_parent->ci_dentry; + mutex_lock(&subsys->su_mutex); + if (!group->cg_item.ci_parent->ci_group) { + /* + * The parent has already been unlinked and detached + * due to a rmdir. + */ + goto unlink_group; + } + mutex_unlock(&subsys->su_mutex); + inode_lock_nested(d_inode(parent), I_MUTEX_PARENT); spin_lock(&configfs_dirent_lock); configfs_detach_prep(dentry, NULL); @@ -1791,6 +1801,7 @@ void configfs_unregister_group(struct config_group *group) dput(dentry); mutex_lock(&subsys->su_mutex); +unlink_group: unlink_group(group); mutex_unlock(&subsys->su_mutex); } diff --git a/fs/dcache.c b/fs/dcache.c index 999334aa656a3f3154257fbcfbe8fa2ebb5e9d9a..26c798d79add434d7b8e408b78a47dd78a044175 100644 --- a/fs/dcache.c +++ b/fs/dcache.c @@ -292,7 +292,8 @@ void take_dentry_name_snapshot(struct name_snapshot *name, struct dentry *dentry spin_unlock(&dentry->d_lock); name->name = p->name; } else { - memcpy(name->inline_name, dentry->d_iname, DNAME_INLINE_LEN); + memcpy(name->inline_name, dentry->d_iname, + dentry->d_name.len + 1); spin_unlock(&dentry->d_lock); name->name = name->inline_name; } diff --git a/fs/ext4/dir.c b/fs/ext4/dir.c index d5babc9f222bf38fdfd782fac8fd825b1bae4bf5..dc676714454aac9ffaafb18244be753241c352ec 100644 --- a/fs/ext4/dir.c +++ b/fs/ext4/dir.c @@ -75,7 +75,7 @@ int __ext4_check_dir_entry(const char *function, unsigned int line, else if (unlikely(rlen < EXT4_DIR_REC_LEN(de->name_len))) error_msg = "rec_len is too small for name_len"; else if (unlikely(((char *) de - buf) + rlen > size)) - error_msg = "directory entry across range"; + error_msg = "directory entry overrun"; else if (unlikely(le32_to_cpu(de->inode) > le32_to_cpu(EXT4_SB(dir->i_sb)->s_es->s_inodes_count))) error_msg = "inode out of bounds"; @@ -84,18 +84,16 @@ int __ext4_check_dir_entry(const char *function, unsigned int line, if (filp) ext4_error_file(filp, function, line, bh->b_blocknr, - "bad entry in directory: %s - offset=%u(%u), " - "inode=%u, rec_len=%d, name_len=%d", - error_msg, (unsigned) (offset % size), - offset, le32_to_cpu(de->inode), - rlen, de->name_len); + "bad entry in directory: %s - offset=%u, " + "inode=%u, rec_len=%d, name_len=%d, size=%d", + error_msg, offset, le32_to_cpu(de->inode), + rlen, de->name_len, size); else ext4_error_inode(dir, function, line, bh->b_blocknr, - "bad entry in directory: %s - offset=%u(%u), " - "inode=%u, rec_len=%d, name_len=%d", - error_msg, (unsigned) (offset % size), - offset, le32_to_cpu(de->inode), - rlen, de->name_len); + "bad entry in directory: %s - offset=%u, " + "inode=%u, rec_len=%d, name_len=%d, size=%d", + error_msg, offset, le32_to_cpu(de->inode), + rlen, de->name_len, size); return 1; } diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h index 0abb30d19fa18b8fb463615ee785caf9f9d0a8db..c96778c39885b9cc2fd8634dfc756975fef07457 100644 --- a/fs/ext4/ext4.h +++ b/fs/ext4/ext4.h @@ -714,6 +714,9 @@ struct fsxattr { /* Max physical block we can address w/o extents */ #define EXT4_MAX_BLOCK_FILE_PHYS 0xFFFFFFFF +/* Max logical block we can support */ +#define EXT4_MAX_LOGICAL_BLOCK 0xFFFFFFFF + /* * Structure of an inode on the disk */ diff --git a/fs/ext4/inline.c b/fs/ext4/inline.c index b549cfd2d7d3f818429385ac78e5d75f447418e3..4e1d62ba0703f96ddb5811bb993cf56e794c53fc 100644 --- a/fs/ext4/inline.c +++ b/fs/ext4/inline.c @@ -1759,6 +1759,7 @@ bool empty_inline_dir(struct inode *dir, int *has_inline_data) { int err, inline_size; struct ext4_iloc iloc; + size_t inline_len; void *inline_pos; unsigned int offset; struct ext4_dir_entry_2 *de; @@ -1786,8 +1787,9 @@ bool empty_inline_dir(struct inode *dir, int *has_inline_data) goto out; } + inline_len = ext4_get_inline_size(dir); offset = EXT4_INLINE_DOTDOT_SIZE; - while (offset < dir->i_size) { + while (offset < inline_len) { de = ext4_get_inline_entry(dir, &iloc, offset, &inline_pos, &inline_size); if (ext4_check_dir_entry(dir, NULL, de, diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c index f9baa59de0e2083b7463d5a49bcd224ea26b8ca3..9808df52ceca6472a5e2e2e4d2269b9bd13a6109 100644 --- a/fs/ext4/inode.c +++ b/fs/ext4/inode.c @@ -3407,11 +3407,16 @@ static int ext4_iomap_begin(struct inode *inode, loff_t offset, loff_t length, { struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); unsigned int blkbits = inode->i_blkbits; - unsigned long first_block = offset >> blkbits; - unsigned long last_block = (offset + length - 1) >> blkbits; + unsigned long first_block, last_block; struct ext4_map_blocks map; int ret; + if ((offset >> blkbits) > EXT4_MAX_LOGICAL_BLOCK) + return -EINVAL; + first_block = offset >> blkbits; + last_block = min_t(loff_t, (offset + length - 1) >> blkbits, + EXT4_MAX_LOGICAL_BLOCK); + if (WARN_ON_ONCE(ext4_has_inline_data(inode))) return -ERANGE; diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c index 048c586d9a8b58f96944321674129e2fca11909e..1792999eec91088bebed1c52c8f933f5c830b096 100644 --- a/fs/ext4/mballoc.c +++ b/fs/ext4/mballoc.c @@ -26,6 +26,7 @@ #include #include #include +#include #include #include @@ -2152,7 +2153,8 @@ ext4_mb_regular_allocator(struct ext4_allocation_context *ac) * This should tell if fe_len is exactly power of 2 */ if ((ac->ac_g_ex.fe_len & (~(1 << (i - 1)))) == 0) - ac->ac_2order = i - 1; + ac->ac_2order = array_index_nospec(i - 1, + sb->s_blocksize_bits + 2); } /* if stream allocation is enabled, use global goal */ diff --git a/fs/ext4/mmp.c b/fs/ext4/mmp.c index 27b9a76a0dfabeff3ee9ec65a15d8d5e711d851a..38e6a846aac16a463249dddba9d0cd7eb36ccff6 100644 --- a/fs/ext4/mmp.c +++ b/fs/ext4/mmp.c @@ -49,7 +49,6 @@ static int write_mmp_block(struct super_block *sb, struct buffer_head *bh) */ sb_start_write(sb); ext4_mmp_csum_set(sb, mmp); - mark_buffer_dirty(bh); lock_buffer(bh); bh->b_end_io = end_buffer_write_sync; get_bh(bh); @@ -186,11 +185,8 @@ static int kmmpd(void *data) goto exit_thread; } - if (sb_rdonly(sb)) { - ext4_warning(sb, "kmmpd being stopped since filesystem " - "has been remounted as readonly."); - goto exit_thread; - } + if (sb_rdonly(sb)) + break; diff = jiffies - last_update_time; if (diff < mmp_update_interval * HZ) diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c index 6747861f9b705c9935c7d96ca8f6a56c2792f747..eb0d8ee39827a83dca3c1fec3d9def1947f1a7ee 100644 --- a/fs/ext4/namei.c +++ b/fs/ext4/namei.c @@ -1397,6 +1397,7 @@ static struct buffer_head * ext4_find_entry (struct inode *dir, goto cleanup_and_exit; dxtrace(printk(KERN_DEBUG "ext4_find_entry: dx failed, " "falling back\n")); + ret = NULL; } nblocks = dir->i_size >> EXT4_BLOCK_SIZE_BITS(sb); if (!nblocks) { @@ -3513,6 +3514,12 @@ static int ext4_rename(struct inode *old_dir, struct dentry *old_dentry, int credits; u8 old_file_type; + if (new.inode && new.inode->i_nlink == 0) { + EXT4_ERROR_INODE(new.inode, + "target of rename is already freed"); + return -EFSCORRUPTED; + } + if ((ext4_test_inode_flag(new_dir, EXT4_INODE_PROJINHERIT)) && (!projid_eq(EXT4_I(new_dir)->i_projid, EXT4_I(old_dentry->d_inode)->i_projid))) diff --git a/fs/ext4/resize.c b/fs/ext4/resize.c index 823c0b82dfeb006ea546caeaa38e7435a1baca3b..e344e606c054c23f9bd54ada9b8dc4823a809411 100644 --- a/fs/ext4/resize.c +++ b/fs/ext4/resize.c @@ -19,6 +19,7 @@ int ext4_resize_begin(struct super_block *sb) { + struct ext4_sb_info *sbi = EXT4_SB(sb); int ret = 0; if (!capable(CAP_SYS_RESOURCE)) @@ -29,7 +30,7 @@ int ext4_resize_begin(struct super_block *sb) * because the user tools have no way of handling this. Probably a * bad time to do it anyways. */ - if (EXT4_SB(sb)->s_sbh->b_blocknr != + if (EXT4_B2C(sbi, sbi->s_sbh->b_blocknr) != le32_to_cpu(EXT4_SB(sb)->s_es->s_first_data_block)) { ext4_warning(sb, "won't resize using backup superblock at %llu", (unsigned long long)EXT4_SB(sb)->s_sbh->b_blocknr); @@ -1956,6 +1957,26 @@ retry: } } + /* + * Make sure the last group has enough space so that it's + * guaranteed to have enough space for all metadata blocks + * that it might need to hold. (We might not need to store + * the inode table blocks in the last block group, but there + * will be cases where this might be needed.) + */ + if ((ext4_group_first_block_no(sb, n_group) + + ext4_group_overhead_blocks(sb, n_group) + 2 + + sbi->s_itb_per_group + sbi->s_cluster_ratio) >= n_blocks_count) { + n_blocks_count = ext4_group_first_block_no(sb, n_group); + n_group--; + n_blocks_count_retry = 0; + if (resize_inode) { + iput(resize_inode); + resize_inode = NULL; + } + goto retry; + } + /* extend the last group */ if (n_group == o_group) add = n_blocks_count - o_blocks_count; diff --git a/fs/ext4/super.c b/fs/ext4/super.c index f30d2bf40471eb489e537a33d4da09d0d0975bfc..9dbd27f7b778245f8a580cddeb73ee77ace764aa 100644 --- a/fs/ext4/super.c +++ b/fs/ext4/super.c @@ -2085,6 +2085,8 @@ static int _ext4_show_options(struct seq_file *seq, struct super_block *sb, SEQ_OPTS_PRINT("max_dir_size_kb=%u", sbi->s_max_dir_size_kb); if (test_opt(sb, DATA_ERR_ABORT)) SEQ_OPTS_PUTS("data_err=abort"); + if (DUMMY_ENCRYPTION_ENABLED(sbi)) + SEQ_OPTS_PUTS("test_dummy_encryption"); ext4_show_quota_options(seq, sb); return 0; @@ -4311,11 +4313,13 @@ no_journal: block = ext4_count_free_clusters(sb); ext4_free_blocks_count_set(sbi->s_es, EXT4_C2B(sbi, block)); + ext4_superblock_csum_set(sb); err = percpu_counter_init(&sbi->s_freeclusters_counter, block, GFP_KERNEL); if (!err) { unsigned long freei = ext4_count_free_inodes(sb); sbi->s_es->s_free_inodes_count = cpu_to_le32(freei); + ext4_superblock_csum_set(sb); err = percpu_counter_init(&sbi->s_freeinodes_counter, freei, GFP_KERNEL); } @@ -5163,6 +5167,8 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data) if (sbi->s_journal) ext4_mark_recovery_complete(sb, es); + if (sbi->s_mmp_tsk) + kthread_stop(sbi->s_mmp_tsk); } else { /* Make sure we can mount this feature set readwrite */ if (ext4_has_feature_readonly(sb) || diff --git a/fs/ext4/sysfs.c b/fs/ext4/sysfs.c index e21afd52e7d7906bd1ff473b090a1d938307c492..bdfc2a2de8f252603f695104ff775c296b92ba10 100644 --- a/fs/ext4/sysfs.c +++ b/fs/ext4/sysfs.c @@ -278,8 +278,12 @@ static ssize_t ext4_attr_show(struct kobject *kobj, case attr_pointer_ui: if (!ptr) return 0; - return snprintf(buf, PAGE_SIZE, "%u\n", - *((unsigned int *) ptr)); + if (a->attr_ptr == ptr_ext4_super_block_offset) + return snprintf(buf, PAGE_SIZE, "%u\n", + le32_to_cpup(ptr)); + else + return snprintf(buf, PAGE_SIZE, "%u\n", + *((unsigned int *) ptr)); case attr_pointer_atomic: if (!ptr) return 0; @@ -312,7 +316,10 @@ static ssize_t ext4_attr_store(struct kobject *kobj, ret = kstrtoul(skip_spaces(buf), 0, &t); if (ret) return ret; - *((unsigned int *) ptr) = t; + if (a->attr_ptr == ptr_ext4_super_block_offset) + *((__le32 *) ptr) = cpu_to_le32(t); + else + *((unsigned int *) ptr) = t; return len; case attr_inode_readahead: return inode_readahead_blks_store(a, sbi, buf, len); diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c index c7c8c16ccd936b699f5cb8154e4ffe7cfb890d00..9bc50eef61279dd30ebb16b750909f3e126fb3f6 100644 --- a/fs/ext4/xattr.c +++ b/fs/ext4/xattr.c @@ -189,6 +189,8 @@ ext4_xattr_check_entries(struct ext4_xattr_entry *entry, void *end, struct ext4_xattr_entry *next = EXT4_XATTR_NEXT(e); if ((void *)next >= end) return -EFSCORRUPTED; + if (strnlen(e->e_name, e->e_name_len) != e->e_name_len) + return -EFSCORRUPTED; e = next; } diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c index 85142e5df88b0d81b5d46ff4eb83c76df1310e2a..e10bd73f0723ec5e95c254eb8dc10912861ef2a7 100644 --- a/fs/f2fs/data.c +++ b/fs/f2fs/data.c @@ -2190,6 +2190,10 @@ static int f2fs_set_data_page_dirty(struct page *page) if (!PageUptodate(page)) SetPageUptodate(page); + /* don't remain PG_checked flag which was set during GC */ + if (is_cold_data(page)) + clear_cold_data(page); + if (f2fs_is_atomic_file(inode) && !f2fs_is_commit_atomic_write(inode)) { if (!IS_ATOMIC_WRITTEN_PAGE(page)) { register_inmem_page(inode, page); diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h index 3b34004a71c157660c0533ef0a26fe7b827dd963..54f8520ad7a2f8e6ae17135e5280fbe42edc2757 100644 --- a/fs/f2fs/f2fs.h +++ b/fs/f2fs/f2fs.h @@ -1766,8 +1766,13 @@ static inline struct page *f2fs_grab_cache_page(struct address_space *mapping, pgoff_t index, bool for_write) { #ifdef CONFIG_F2FS_FAULT_INJECTION - struct page *page = find_lock_page(mapping, index); + struct page *page; + if (!for_write) + page = find_get_page_flags(mapping, index, + FGP_LOCK | FGP_ACCESSED); + else + page = find_lock_page(mapping, index); if (page) return page; diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c index 87e654c53c317a5bc745f463ab818d26fb91cb36..6f589730782d5cc8b1d193e7a073f54be295ffb7 100644 --- a/fs/f2fs/file.c +++ b/fs/f2fs/file.c @@ -1803,7 +1803,7 @@ static int f2fs_ioc_shutdown(struct file *filp, unsigned long arg) struct f2fs_sb_info *sbi = F2FS_I_SB(inode); struct super_block *sb = sbi->sb; __u32 in; - int ret; + int ret = 0; if (!capable(CAP_SYS_ADMIN)) return -EPERM; diff --git a/fs/f2fs/gc.c b/fs/f2fs/gc.c index f2f897cd23c9677a375b0cb4650af49ea4bdfe3c..f22884418e92d15ff42b4de85f1d21cf886d741e 100644 --- a/fs/f2fs/gc.c +++ b/fs/f2fs/gc.c @@ -958,7 +958,13 @@ static int do_garbage_collect(struct f2fs_sb_info *sbi, goto next; sum = page_address(sum_page); - f2fs_bug_on(sbi, type != GET_SUM_TYPE((&sum->footer))); + if (type != GET_SUM_TYPE((&sum->footer))) { + f2fs_msg(sbi->sb, KERN_ERR, "Inconsistent segment (%u) " + "type [%d, %d] in SSA and SIT", + segno, type, GET_SUM_TYPE((&sum->footer))); + set_sbi_flag(sbi, SBI_NEED_FSCK); + goto next; + } /* * this is to avoid deadlock: diff --git a/fs/f2fs/inline.c b/fs/f2fs/inline.c index 8322e4e7bb3fc432aabc8067b7ab459274c6d326..888a9dc13677f9e8b4723413ad5a877f1f181daf 100644 --- a/fs/f2fs/inline.c +++ b/fs/f2fs/inline.c @@ -128,6 +128,16 @@ int f2fs_convert_inline_page(struct dnode_of_data *dn, struct page *page) if (err) return err; + if (unlikely(dn->data_blkaddr != NEW_ADDR)) { + f2fs_put_dnode(dn); + set_sbi_flag(fio.sbi, SBI_NEED_FSCK); + f2fs_msg(fio.sbi->sb, KERN_WARNING, + "%s: corrupted inline inode ino=%lx, i_addr[0]:0x%x, " + "run fsck to fix.", + __func__, dn->inode->i_ino, dn->data_blkaddr); + return -EINVAL; + } + f2fs_bug_on(F2FS_P_SB(page), PageWriteback(page)); read_inline_data(page, dn->inode_page); @@ -365,6 +375,17 @@ static int f2fs_move_inline_dirents(struct inode *dir, struct page *ipage, if (err) goto out; + if (unlikely(dn.data_blkaddr != NEW_ADDR)) { + f2fs_put_dnode(&dn); + set_sbi_flag(F2FS_P_SB(page), SBI_NEED_FSCK); + f2fs_msg(F2FS_P_SB(page)->sb, KERN_WARNING, + "%s: corrupted inline inode ino=%lx, i_addr[0]:0x%x, " + "run fsck to fix.", + __func__, dir->i_ino, dn.data_blkaddr); + err = -EINVAL; + goto out; + } + f2fs_wait_on_page_writeback(page, DATA, true); zero_user_segment(page, MAX_INLINE_DATA(dir), PAGE_SIZE); @@ -481,6 +502,7 @@ static int f2fs_move_rehashed_dirents(struct inode *dir, struct page *ipage, return 0; recover: lock_page(ipage); + f2fs_wait_on_page_writeback(ipage, NODE, true); memcpy(inline_dentry, backup_dentry, MAX_INLINE_DATA(dir)); f2fs_i_depth_write(dir, 0); f2fs_i_size_write(dir, MAX_INLINE_DATA(dir)); diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c index f623da26159f668f97767dbcd631bbe9af9bb445..712505ec5de4851625b996afd465d5eedea40621 100644 --- a/fs/f2fs/node.c +++ b/fs/f2fs/node.c @@ -1610,7 +1610,9 @@ next_step: !is_cold_node(page))) continue; lock_node: - if (!trylock_page(page)) + if (wbc->sync_mode == WB_SYNC_ALL) + lock_page(page); + else if (!trylock_page(page)) continue; if (unlikely(page->mapping != NODE_MAPPING(sbi))) { diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c index 7c05bd4222b2921ff6ee749a585be810f28a5927..3c7bbbae0afa48650af0d32a5d52d0140c517dbf 100644 --- a/fs/f2fs/segment.c +++ b/fs/f2fs/segment.c @@ -3240,7 +3240,7 @@ static int build_curseg(struct f2fs_sb_info *sbi) return restore_curseg_summaries(sbi); } -static void build_sit_entries(struct f2fs_sb_info *sbi) +static int build_sit_entries(struct f2fs_sb_info *sbi) { struct sit_info *sit_i = SIT_I(sbi); struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_COLD_DATA); @@ -3250,6 +3250,8 @@ static void build_sit_entries(struct f2fs_sb_info *sbi) int sit_blk_cnt = SIT_BLK_CNT(sbi); unsigned int i, start, end; unsigned int readed, start_blk = 0; + int err = 0; + block_t total_node_blocks = 0; do { readed = ra_meta_pages(sbi, start_blk, BIO_MAX_PAGES, @@ -3268,8 +3270,12 @@ static void build_sit_entries(struct f2fs_sb_info *sbi) sit = sit_blk->entries[SIT_ENTRY_OFFSET(sit_i, start)]; f2fs_put_page(page, 1); - check_block_count(sbi, start, &sit); + err = check_block_count(sbi, start, &sit); + if (err) + return err; seg_info_from_raw_sit(se, &sit); + if (IS_NODESEG(se->type)) + total_node_blocks += se->valid_blocks; /* build discard map only one time */ if (f2fs_discard_en(sbi)) { @@ -3302,9 +3308,15 @@ static void build_sit_entries(struct f2fs_sb_info *sbi) sit = sit_in_journal(journal, i); old_valid_blocks = se->valid_blocks; + if (IS_NODESEG(se->type)) + total_node_blocks -= old_valid_blocks; - check_block_count(sbi, start, &sit); + err = check_block_count(sbi, start, &sit); + if (err) + break; seg_info_from_raw_sit(se, &sit); + if (IS_NODESEG(se->type)) + total_node_blocks += se->valid_blocks; if (f2fs_discard_en(sbi)) { if (is_set_ckpt_flags(sbi, CP_TRIMMED_FLAG)) { @@ -3323,6 +3335,16 @@ static void build_sit_entries(struct f2fs_sb_info *sbi) se->valid_blocks - old_valid_blocks; } up_read(&curseg->journal_rwsem); + + if (!err && total_node_blocks != valid_node_count(sbi)) { + f2fs_msg(sbi->sb, KERN_ERR, + "SIT is corrupted node# %u vs %u", + total_node_blocks, valid_node_count(sbi)); + set_sbi_flag(sbi, SBI_NEED_FSCK); + err = -EINVAL; + } + + return err; } static void init_free_segmap(struct f2fs_sb_info *sbi) @@ -3492,7 +3514,9 @@ int build_segment_manager(struct f2fs_sb_info *sbi) return err; /* reinit free segmap based on SIT */ - build_sit_entries(sbi); + err = build_sit_entries(sbi); + if (err) + return err; init_free_segmap(sbi); err = build_dirty_segmap(sbi); diff --git a/fs/f2fs/segment.h b/fs/f2fs/segment.h index e0a6cc23ace3a66ddcd5975c0e3f5ee14ace3946..4dfb5080098ff49af15154fce91e0cc686df5e76 100644 --- a/fs/f2fs/segment.h +++ b/fs/f2fs/segment.h @@ -414,6 +414,8 @@ static inline void __set_test_and_free(struct f2fs_sb_info *sbi, if (test_and_clear_bit(segno, free_i->free_segmap)) { free_i->free_segments++; + if (IS_CURSEC(sbi, secno)) + goto skip_free; next = find_next_bit(free_i->free_segmap, start_segno + sbi->segs_per_sec, start_segno); if (next >= start_segno + sbi->segs_per_sec) { @@ -421,6 +423,7 @@ static inline void __set_test_and_free(struct f2fs_sb_info *sbi, free_i->free_sections++; } } +skip_free: spin_unlock(&free_i->segmap_lock); } @@ -625,7 +628,7 @@ static inline void verify_block_addr(struct f2fs_sb_info *sbi, block_t blk_addr) /* * Summary block is always treated as an invalid block */ -static inline void check_block_count(struct f2fs_sb_info *sbi, +static inline int check_block_count(struct f2fs_sb_info *sbi, int segno, struct f2fs_sit_entry *raw_sit) { #ifdef CONFIG_F2FS_CHECK_FS @@ -647,11 +650,25 @@ static inline void check_block_count(struct f2fs_sb_info *sbi, cur_pos = next_pos; is_valid = !is_valid; } while (cur_pos < sbi->blocks_per_seg); - BUG_ON(GET_SIT_VBLOCKS(raw_sit) != valid_blocks); + + if (unlikely(GET_SIT_VBLOCKS(raw_sit) != valid_blocks)) { + f2fs_msg(sbi->sb, KERN_ERR, + "Mismatch valid blocks %d vs. %d", + GET_SIT_VBLOCKS(raw_sit), valid_blocks); + set_sbi_flag(sbi, SBI_NEED_FSCK); + return -EINVAL; + } #endif /* check segment usage, and check boundary of a given segment number */ - f2fs_bug_on(sbi, GET_SIT_VBLOCKS(raw_sit) > sbi->blocks_per_seg - || segno > TOTAL_SEGS(sbi) - 1); + if (unlikely(GET_SIT_VBLOCKS(raw_sit) > sbi->blocks_per_seg + || segno > TOTAL_SEGS(sbi) - 1)) { + f2fs_msg(sbi->sb, KERN_ERR, + "Wrong valid blocks %d or segno %u", + GET_SIT_VBLOCKS(raw_sit), segno); + set_sbi_flag(sbi, SBI_NEED_FSCK); + return -EINVAL; + } + return 0; } static inline pgoff_t current_sit_addr(struct f2fs_sb_info *sbi, diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c index 400c00058bad9553d32477366a0904a42af09709..eae35909fa51a00b5bb053b7bcd9c1de21a7026b 100644 --- a/fs/f2fs/super.c +++ b/fs/f2fs/super.c @@ -1883,12 +1883,17 @@ int sanity_check_ckpt(struct f2fs_sb_info *sbi) struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi); unsigned int ovp_segments, reserved_segments; unsigned int main_segs, blocks_per_seg; + unsigned int sit_segs, nat_segs; + unsigned int sit_bitmap_size, nat_bitmap_size; + unsigned int log_blocks_per_seg; int i; total = le32_to_cpu(raw_super->segment_count); fsmeta = le32_to_cpu(raw_super->segment_count_ckpt); - fsmeta += le32_to_cpu(raw_super->segment_count_sit); - fsmeta += le32_to_cpu(raw_super->segment_count_nat); + sit_segs = le32_to_cpu(raw_super->segment_count_sit); + fsmeta += sit_segs; + nat_segs = le32_to_cpu(raw_super->segment_count_nat); + fsmeta += nat_segs; fsmeta += le32_to_cpu(ckpt->rsvd_segment_count); fsmeta += le32_to_cpu(raw_super->segment_count_ssa); @@ -1919,6 +1924,18 @@ int sanity_check_ckpt(struct f2fs_sb_info *sbi) return 1; } + sit_bitmap_size = le32_to_cpu(ckpt->sit_ver_bitmap_bytesize); + nat_bitmap_size = le32_to_cpu(ckpt->nat_ver_bitmap_bytesize); + log_blocks_per_seg = le32_to_cpu(raw_super->log_blocks_per_seg); + + if (sit_bitmap_size != ((sit_segs / 2) << log_blocks_per_seg) / 8 || + nat_bitmap_size != ((nat_segs / 2) << log_blocks_per_seg) / 8) { + f2fs_msg(sbi->sb, KERN_ERR, + "Wrong bitmap size: sit: %u, nat:%u", + sit_bitmap_size, nat_bitmap_size); + return 1; + } + if (unlikely(f2fs_cp_error(sbi))) { f2fs_msg(sbi->sb, KERN_ERR, "A bug case: need to run fsck"); return 1; diff --git a/fs/f2fs/sysfs.c b/fs/f2fs/sysfs.c index e2c258f717cd11214eb9fe9dd7f4bb202ccd6bc4..93af9d7dfcdca1fa68539aa8256f473760233fe6 100644 --- a/fs/f2fs/sysfs.c +++ b/fs/f2fs/sysfs.c @@ -9,6 +9,7 @@ * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ +#include #include #include #include @@ -381,7 +382,8 @@ static struct kobject f2fs_feat = { .kset = &f2fs_kset, }; -static int segment_info_seq_show(struct seq_file *seq, void *offset) +static int __maybe_unused segment_info_seq_show(struct seq_file *seq, + void *offset) { struct super_block *sb = seq->private; struct f2fs_sb_info *sbi = F2FS_SB(sb); @@ -408,7 +410,8 @@ static int segment_info_seq_show(struct seq_file *seq, void *offset) return 0; } -static int segment_bits_seq_show(struct seq_file *seq, void *offset) +static int __maybe_unused segment_bits_seq_show(struct seq_file *seq, + void *offset) { struct super_block *sb = seq->private; struct f2fs_sb_info *sbi = F2FS_SB(sb); @@ -432,7 +435,8 @@ static int segment_bits_seq_show(struct seq_file *seq, void *offset) return 0; } -static int iostat_info_seq_show(struct seq_file *seq, void *offset) +static int __maybe_unused iostat_info_seq_show(struct seq_file *seq, + void *offset) { struct super_block *sb = seq->private; struct f2fs_sb_info *sbi = F2FS_SB(sb); diff --git a/fs/fat/cache.c b/fs/fat/cache.c index e9bed49df6b71047b6658063e377fb59ef4c339e..78d501c1fb658f153a9683a95c560a3483c70cd7 100644 --- a/fs/fat/cache.c +++ b/fs/fat/cache.c @@ -225,7 +225,8 @@ static inline void cache_init(struct fat_cache_id *cid, int fclus, int dclus) int fat_get_cluster(struct inode *inode, int cluster, int *fclus, int *dclus) { struct super_block *sb = inode->i_sb; - const int limit = sb->s_maxbytes >> MSDOS_SB(sb)->cluster_bits; + struct msdos_sb_info *sbi = MSDOS_SB(sb); + const int limit = sb->s_maxbytes >> sbi->cluster_bits; struct fat_entry fatent; struct fat_cache_id cid; int nr; @@ -234,6 +235,12 @@ int fat_get_cluster(struct inode *inode, int cluster, int *fclus, int *dclus) *fclus = 0; *dclus = MSDOS_I(inode)->i_start; + if (!fat_valid_entry(sbi, *dclus)) { + fat_fs_error_ratelimit(sb, + "%s: invalid start cluster (i_pos %lld, start %08x)", + __func__, MSDOS_I(inode)->i_pos, *dclus); + return -EIO; + } if (cluster == 0) return 0; @@ -250,9 +257,8 @@ int fat_get_cluster(struct inode *inode, int cluster, int *fclus, int *dclus) /* prevent the infinite loop of cluster chain */ if (*fclus > limit) { fat_fs_error_ratelimit(sb, - "%s: detected the cluster chain loop" - " (i_pos %lld)", __func__, - MSDOS_I(inode)->i_pos); + "%s: detected the cluster chain loop (i_pos %lld)", + __func__, MSDOS_I(inode)->i_pos); nr = -EIO; goto out; } @@ -262,9 +268,8 @@ int fat_get_cluster(struct inode *inode, int cluster, int *fclus, int *dclus) goto out; else if (nr == FAT_ENT_FREE) { fat_fs_error_ratelimit(sb, - "%s: invalid cluster chain (i_pos %lld)", - __func__, - MSDOS_I(inode)->i_pos); + "%s: invalid cluster chain (i_pos %lld)", + __func__, MSDOS_I(inode)->i_pos); nr = -EIO; goto out; } else if (nr == FAT_ENT_EOF) { diff --git a/fs/fat/fat.h b/fs/fat/fat.h index 8fc1093da47d38ebf354e2b96c23d6fb5f086822..a0a00f3734bc18b7998b561a2d5a364bf7f1212f 100644 --- a/fs/fat/fat.h +++ b/fs/fat/fat.h @@ -348,6 +348,11 @@ static inline void fatent_brelse(struct fat_entry *fatent) fatent->fat_inode = NULL; } +static inline bool fat_valid_entry(struct msdos_sb_info *sbi, int entry) +{ + return FAT_START_ENT <= entry && entry < sbi->max_cluster; +} + extern void fat_ent_access_init(struct super_block *sb); extern int fat_ent_read(struct inode *inode, struct fat_entry *fatent, int entry); diff --git a/fs/fat/fatent.c b/fs/fat/fatent.c index 48b2336692f9f70a3d8c230ee3a9169af5e634be..a40f36b1b292eb1a45f7cf15ba2881c598d127d3 100644 --- a/fs/fat/fatent.c +++ b/fs/fat/fatent.c @@ -23,7 +23,7 @@ static void fat12_ent_blocknr(struct super_block *sb, int entry, { struct msdos_sb_info *sbi = MSDOS_SB(sb); int bytes = entry + (entry >> 1); - WARN_ON(entry < FAT_START_ENT || sbi->max_cluster <= entry); + WARN_ON(!fat_valid_entry(sbi, entry)); *offset = bytes & (sb->s_blocksize - 1); *blocknr = sbi->fat_start + (bytes >> sb->s_blocksize_bits); } @@ -33,7 +33,7 @@ static void fat_ent_blocknr(struct super_block *sb, int entry, { struct msdos_sb_info *sbi = MSDOS_SB(sb); int bytes = (entry << sbi->fatent_shift); - WARN_ON(entry < FAT_START_ENT || sbi->max_cluster <= entry); + WARN_ON(!fat_valid_entry(sbi, entry)); *offset = bytes & (sb->s_blocksize - 1); *blocknr = sbi->fat_start + (bytes >> sb->s_blocksize_bits); } @@ -353,7 +353,7 @@ int fat_ent_read(struct inode *inode, struct fat_entry *fatent, int entry) int err, offset; sector_t blocknr; - if (entry < FAT_START_ENT || sbi->max_cluster <= entry) { + if (!fat_valid_entry(sbi, entry)) { fatent_brelse(fatent); fat_fs_error(sb, "invalid access to FAT (entry 0x%08x)", entry); return -EIO; diff --git a/fs/fscache/operation.c b/fs/fscache/operation.c index de67745e1cd7d3fdda98965c9a38d417deb4af66..77946d6f617d0147a846b960f91406f7b06aa66a 100644 --- a/fs/fscache/operation.c +++ b/fs/fscache/operation.c @@ -66,7 +66,8 @@ void fscache_enqueue_operation(struct fscache_operation *op) ASSERT(op->processor != NULL); ASSERT(fscache_object_is_available(op->object)); ASSERTCMP(atomic_read(&op->usage), >, 0); - ASSERTCMP(op->state, ==, FSCACHE_OP_ST_IN_PROGRESS); + ASSERTIFCMP(op->state != FSCACHE_OP_ST_IN_PROGRESS, + op->state, ==, FSCACHE_OP_ST_CANCELLED); fscache_stat(&fscache_n_op_enqueue); switch (op->flags & FSCACHE_OP_TYPE) { @@ -481,7 +482,8 @@ void fscache_put_operation(struct fscache_operation *op) struct fscache_cache *cache; _enter("{OBJ%x OP%x,%d}", - op->object->debug_id, op->debug_id, atomic_read(&op->usage)); + op->object ? op->object->debug_id : 0, + op->debug_id, atomic_read(&op->usage)); ASSERTCMP(atomic_read(&op->usage), >, 0); diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c index 261fd13a75c671407464a72ca5d811ac985a1918..ee8105af40010d5ab7f01ea223557130bb4cb66a 100644 --- a/fs/fuse/dev.c +++ b/fs/fuse/dev.c @@ -131,6 +131,16 @@ static bool fuse_block_alloc(struct fuse_conn *fc, bool for_background) return !fc->initialized || (for_background && fc->blocked); } +static void fuse_drop_waiting(struct fuse_conn *fc) +{ + if (fc->connected) { + atomic_dec(&fc->num_waiting); + } else if (atomic_dec_and_test(&fc->num_waiting)) { + /* wake up aborters */ + wake_up_all(&fc->blocked_waitq); + } +} + static struct fuse_req *__fuse_get_req(struct fuse_conn *fc, unsigned npages, bool for_background) { @@ -171,7 +181,7 @@ static struct fuse_req *__fuse_get_req(struct fuse_conn *fc, unsigned npages, return req; out: - atomic_dec(&fc->num_waiting); + fuse_drop_waiting(fc); return ERR_PTR(err); } @@ -278,7 +288,7 @@ void fuse_put_request(struct fuse_conn *fc, struct fuse_req *req) if (test_bit(FR_WAITING, &req->flags)) { __clear_bit(FR_WAITING, &req->flags); - atomic_dec(&fc->num_waiting); + fuse_drop_waiting(fc); } if (req->stolen_file) @@ -364,7 +374,7 @@ static void request_end(struct fuse_conn *fc, struct fuse_req *req) struct fuse_iqueue *fiq = &fc->iq; if (test_and_set_bit(FR_FINISHED, &req->flags)) - return; + goto put_request; spin_lock(&fiq->waitq.lock); list_del_init(&req->intr_entry); @@ -393,6 +403,7 @@ static void request_end(struct fuse_conn *fc, struct fuse_req *req) wake_up(&req->waitq); if (req->end) req->end(fc, req); +put_request: fuse_put_request(fc, req); } @@ -1941,11 +1952,14 @@ static ssize_t fuse_dev_splice_write(struct pipe_inode_info *pipe, if (!fud) return -EPERM; + pipe_lock(pipe); + bufs = kmalloc(pipe->buffers * sizeof(struct pipe_buffer), GFP_KERNEL); - if (!bufs) + if (!bufs) { + pipe_unlock(pipe); return -ENOMEM; + } - pipe_lock(pipe); nbuf = 0; rem = 0; for (idx = 0; idx < pipe->nrbufs && rem < len; idx++) @@ -2100,6 +2114,7 @@ void fuse_abort_conn(struct fuse_conn *fc) set_bit(FR_ABORTED, &req->flags); if (!test_bit(FR_LOCKED, &req->flags)) { set_bit(FR_PRIVATE, &req->flags); + __fuse_get_request(req); list_move(&req->list, &to_end1); } spin_unlock(&req->waitq.lock); @@ -2126,7 +2141,6 @@ void fuse_abort_conn(struct fuse_conn *fc) while (!list_empty(&to_end1)) { req = list_first_entry(&to_end1, struct fuse_req, list); - __fuse_get_request(req); list_del_init(&req->list); request_end(fc, req); } @@ -2137,6 +2151,11 @@ void fuse_abort_conn(struct fuse_conn *fc) } EXPORT_SYMBOL_GPL(fuse_abort_conn); +void fuse_wait_aborted(struct fuse_conn *fc) +{ + wait_event(fc->blocked_waitq, atomic_read(&fc->num_waiting) == 0); +} + int fuse_dev_release(struct inode *inode, struct file *file) { struct fuse_dev *fud = fuse_get_dev(file); @@ -2144,9 +2163,15 @@ int fuse_dev_release(struct inode *inode, struct file *file) if (fud) { struct fuse_conn *fc = fud->fc; struct fuse_pqueue *fpq = &fud->pq; + LIST_HEAD(to_end); + spin_lock(&fpq->lock); WARN_ON(!list_empty(&fpq->io)); - end_requests(fc, &fpq->processing); + list_splice_init(&fpq->processing, &to_end); + spin_unlock(&fpq->lock); + + end_requests(fc, &to_end); + /* Are we the last open device? */ if (atomic_dec_and_test(&fc->dev_count)) { WARN_ON(fc->iq.fasync != NULL); diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c index 6dfe526c832f96dfd620ae6e3cf611a84621cdc6..76d354eee035c1dfcb8467be1aa9739eb401e7ef 100644 --- a/fs/fuse/dir.c +++ b/fs/fuse/dir.c @@ -355,11 +355,12 @@ static struct dentry *fuse_lookup(struct inode *dir, struct dentry *entry, struct inode *inode; struct dentry *newent; bool outarg_valid = true; + bool locked; - fuse_lock_inode(dir); + locked = fuse_lock_inode(dir); err = fuse_lookup_name(dir->i_sb, get_node_id(dir), &entry->d_name, &outarg, &inode); - fuse_unlock_inode(dir); + fuse_unlock_inode(dir, locked); if (err == -ENOENT) { outarg_valid = false; err = 0; @@ -1332,6 +1333,7 @@ static int fuse_readdir(struct file *file, struct dir_context *ctx) struct fuse_conn *fc = get_fuse_conn(inode); struct fuse_req *req; u64 attr_version = 0; + bool locked; if (is_bad_inode(inode)) return -EIO; @@ -1359,9 +1361,9 @@ static int fuse_readdir(struct file *file, struct dir_context *ctx) fuse_read_fill(req, file, ctx->pos, PAGE_SIZE, FUSE_READDIR); } - fuse_lock_inode(inode); + locked = fuse_lock_inode(inode); fuse_request_send(fc, req); - fuse_unlock_inode(inode); + fuse_unlock_inode(inode, locked); nbytes = req->out.args[0].size; err = req->out.h.error; fuse_put_request(fc, req); diff --git a/fs/fuse/file.c b/fs/fuse/file.c index cb7dff5c45d767991698058d8d52027572ebea41..fb4738ef162f370898c0ab3d274b3e687c11ad81 100644 --- a/fs/fuse/file.c +++ b/fs/fuse/file.c @@ -866,6 +866,7 @@ static int fuse_readpages_fill(void *_data, struct page *page) } if (WARN_ON(req->num_pages >= req->max_pages)) { + unlock_page(page); fuse_put_request(fc, req); return -EIO; } diff --git a/fs/fuse/fuse_i.h b/fs/fuse/fuse_i.h index d5773ca67ad2bbc36155a30fafa596dd7e44502a..e105640153ce2f1fac8c7924f0f8848688b0f9c4 100644 --- a/fs/fuse/fuse_i.h +++ b/fs/fuse/fuse_i.h @@ -852,6 +852,7 @@ void fuse_request_send_background_locked(struct fuse_conn *fc, /* Abort all requests */ void fuse_abort_conn(struct fuse_conn *fc); +void fuse_wait_aborted(struct fuse_conn *fc); /** * Invalidate inode attributes @@ -964,8 +965,8 @@ int fuse_do_setattr(struct dentry *dentry, struct iattr *attr, void fuse_set_initialized(struct fuse_conn *fc); -void fuse_unlock_inode(struct inode *inode); -void fuse_lock_inode(struct inode *inode); +void fuse_unlock_inode(struct inode *inode, bool locked); +bool fuse_lock_inode(struct inode *inode); int fuse_setxattr(struct inode *inode, const char *name, const void *value, size_t size, int flags); diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c index a13ecefa9cd1f1b9705ec6b124fd060fa1fd4ed4..ffb61787d77af84b041ea61a267c67779504dcfc 100644 --- a/fs/fuse/inode.c +++ b/fs/fuse/inode.c @@ -357,15 +357,21 @@ int fuse_reverse_inval_inode(struct super_block *sb, u64 nodeid, return 0; } -void fuse_lock_inode(struct inode *inode) +bool fuse_lock_inode(struct inode *inode) { - if (!get_fuse_conn(inode)->parallel_dirops) + bool locked = false; + + if (!get_fuse_conn(inode)->parallel_dirops) { mutex_lock(&get_fuse_inode(inode)->mutex); + locked = true; + } + + return locked; } -void fuse_unlock_inode(struct inode *inode) +void fuse_unlock_inode(struct inode *inode, bool locked) { - if (!get_fuse_conn(inode)->parallel_dirops) + if (locked) mutex_unlock(&get_fuse_inode(inode)->mutex); } @@ -391,9 +397,6 @@ static void fuse_put_super(struct super_block *sb) { struct fuse_conn *fc = get_fuse_conn_super(sb); - fuse_send_destroy(fc); - - fuse_abort_conn(fc); mutex_lock(&fuse_mutex); list_del(&fc->entry); fuse_ctl_remove_conn(fc); @@ -1190,16 +1193,25 @@ static struct dentry *fuse_mount(struct file_system_type *fs_type, return mount_nodev(fs_type, flags, raw_data, fuse_fill_super); } -static void fuse_kill_sb_anon(struct super_block *sb) +static void fuse_sb_destroy(struct super_block *sb) { struct fuse_conn *fc = get_fuse_conn_super(sb); if (fc) { + fuse_send_destroy(fc); + + fuse_abort_conn(fc); + fuse_wait_aborted(fc); + down_write(&fc->killsb); fc->sb = NULL; up_write(&fc->killsb); } +} +static void fuse_kill_sb_anon(struct super_block *sb) +{ + fuse_sb_destroy(sb); kill_anon_super(sb); } @@ -1222,14 +1234,7 @@ static struct dentry *fuse_mount_blk(struct file_system_type *fs_type, static void fuse_kill_sb_blk(struct super_block *sb) { - struct fuse_conn *fc = get_fuse_conn_super(sb); - - if (fc) { - down_write(&fc->killsb); - fc->sb = NULL; - up_write(&fc->killsb); - } - + fuse_sb_destroy(sb); kill_block_super(sb); } diff --git a/fs/gfs2/bmap.c b/fs/gfs2/bmap.c index 3dd0cceefa435bb623de0b1ee8dd33d83a6bdba8..bc8787718febb655a006f52fb5557fec902a1fd5 100644 --- a/fs/gfs2/bmap.c +++ b/fs/gfs2/bmap.c @@ -1680,7 +1680,7 @@ int gfs2_write_alloc_required(struct gfs2_inode *ip, u64 offset, end_of_file = (i_size_read(&ip->i_inode) + sdp->sd_sb.sb_bsize - 1) >> shift; lblock = offset >> shift; lblock_stop = (offset + len + sdp->sd_sb.sb_bsize - 1) >> shift; - if (lblock_stop > end_of_file) + if (lblock_stop > end_of_file && ip != GFS2_I(sdp->sd_rindex)) return 1; size = (lblock_stop - lblock) << shift; diff --git a/fs/gfs2/rgrp.c b/fs/gfs2/rgrp.c index 95b2a57ded3300a5c2d8b4d351bdfebc04aa151c..5fe033131f0361d04b12d7ef40fb726de1959eed 100644 --- a/fs/gfs2/rgrp.c +++ b/fs/gfs2/rgrp.c @@ -1665,7 +1665,8 @@ static int gfs2_rbm_find(struct gfs2_rbm *rbm, u8 state, u32 *minext, while(1) { bi = rbm_bi(rbm); - if (test_bit(GBF_FULL, &bi->bi_flags) && + if ((ip == NULL || !gfs2_rs_active(&ip->i_res)) && + test_bit(GBF_FULL, &bi->bi_flags) && (state == GFS2_BLKST_FREE)) goto next_bitmap; diff --git a/fs/hfs/brec.c b/fs/hfs/brec.c index ad04a5741016994374289cf8cc8ba6f97d6c0480..9a8772465a907a5e320a44c8b6ee7ddf61e87410 100644 --- a/fs/hfs/brec.c +++ b/fs/hfs/brec.c @@ -75,9 +75,10 @@ int hfs_brec_insert(struct hfs_find_data *fd, void *entry, int entry_len) if (!fd->bnode) { if (!tree->root) hfs_btree_inc_height(tree); - fd->bnode = hfs_bnode_find(tree, tree->leaf_head); - if (IS_ERR(fd->bnode)) - return PTR_ERR(fd->bnode); + node = hfs_bnode_find(tree, tree->leaf_head); + if (IS_ERR(node)) + return PTR_ERR(node); + fd->bnode = node; fd->record = -1; } new_node = NULL; diff --git a/fs/hfsplus/dir.c b/fs/hfsplus/dir.c index e8120a282435d1eac2e4f08ce1451b126c3e5b6d..1a44c4621e74b5ffe4bc44d36b55868ded4315b9 100644 --- a/fs/hfsplus/dir.c +++ b/fs/hfsplus/dir.c @@ -78,13 +78,13 @@ again: cpu_to_be32(HFSP_HARDLINK_TYPE) && entry.file.user_info.fdCreator == cpu_to_be32(HFSP_HFSPLUS_CREATOR) && + HFSPLUS_SB(sb)->hidden_dir && (entry.file.create_date == HFSPLUS_I(HFSPLUS_SB(sb)->hidden_dir)-> create_date || entry.file.create_date == HFSPLUS_I(d_inode(sb->s_root))-> - create_date) && - HFSPLUS_SB(sb)->hidden_dir) { + create_date)) { struct qstr str; char name[32]; diff --git a/fs/hfsplus/super.c b/fs/hfsplus/super.c index 3cba08c931eebe7a70403fe892a76aa8e355f0fd..410f59372f19d47ddad53a885f8d6b0637f66f5c 100644 --- a/fs/hfsplus/super.c +++ b/fs/hfsplus/super.c @@ -524,8 +524,10 @@ static int hfsplus_fill_super(struct super_block *sb, void *data, int silent) goto out_put_root; if (!hfs_brec_read(&fd, &entry, sizeof(entry))) { hfs_find_exit(&fd); - if (entry.type != cpu_to_be16(HFSPLUS_FOLDER)) + if (entry.type != cpu_to_be16(HFSPLUS_FOLDER)) { + err = -EINVAL; goto out_put_root; + } inode = hfsplus_iget(sb, be32_to_cpu(entry.folder.id)); if (IS_ERR(inode)) { err = PTR_ERR(inode); diff --git a/fs/iomap.c b/fs/iomap.c index d4801f8dd4fd55a111e647a810c3e0788829897d..8f7673a692736bb7f5d31977797b653bb9de442d 100644 --- a/fs/iomap.c +++ b/fs/iomap.c @@ -693,6 +693,7 @@ struct iomap_dio { atomic_t ref; unsigned flags; int error; + bool wait_for_completion; union { /* used during submission and for synchronous completion: */ @@ -793,9 +794,8 @@ static void iomap_dio_bio_end_io(struct bio *bio) iomap_dio_set_error(dio, blk_status_to_errno(bio->bi_status)); if (atomic_dec_and_test(&dio->ref)) { - if (is_sync_kiocb(dio->iocb)) { + if (dio->wait_for_completion) { struct task_struct *waiter = dio->submit.waiter; - WRITE_ONCE(dio->submit.waiter, NULL); wake_up_process(waiter); } else if (dio->flags & IOMAP_DIO_WRITE) { @@ -980,13 +980,12 @@ iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter, dio->end_io = end_io; dio->error = 0; dio->flags = 0; + dio->wait_for_completion = is_sync_kiocb(iocb); dio->submit.iter = iter; - if (is_sync_kiocb(iocb)) { - dio->submit.waiter = current; - dio->submit.cookie = BLK_QC_T_NONE; - dio->submit.last_queue = NULL; - } + dio->submit.waiter = current; + dio->submit.cookie = BLK_QC_T_NONE; + dio->submit.last_queue = NULL; if (iov_iter_rw(iter) == READ) { if (pos >= dio->i_size) @@ -1016,7 +1015,7 @@ iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter, WARN_ON_ONCE(ret); ret = 0; - if (iov_iter_rw(iter) == WRITE && !is_sync_kiocb(iocb) && + if (iov_iter_rw(iter) == WRITE && !dio->wait_for_completion && !inode->i_sb->s_dio_done_wq) { ret = sb_init_dio_done_wq(inode->i_sb); if (ret < 0) @@ -1031,8 +1030,10 @@ iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter, iomap_dio_actor); if (ret <= 0) { /* magic error code to fall back to buffered I/O */ - if (ret == -ENOTBLK) + if (ret == -ENOTBLK) { + dio->wait_for_completion = true; ret = 0; + } break; } pos += ret; @@ -1046,7 +1047,7 @@ iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter, iomap_dio_set_error(dio, ret); if (!atomic_dec_and_test(&dio->ref)) { - if (!is_sync_kiocb(iocb)) + if (!dio->wait_for_completion) return -EIOCBQUEUED; for (;;) { diff --git a/fs/isofs/inode.c b/fs/isofs/inode.c index ed4edcd2bc56db126064864c625f7455e070aee3..a4994e25e19e7a611af3d1ca505a763ac0e77d32 100644 --- a/fs/isofs/inode.c +++ b/fs/isofs/inode.c @@ -24,6 +24,7 @@ #include #include #include +#include #include "isofs.h" #include "zisofs.h" @@ -653,6 +654,12 @@ static int isofs_fill_super(struct super_block *s, void *data, int silent) /* * What if bugger tells us to go beyond page size? */ + if (bdev_logical_block_size(s->s_bdev) > 2048) { + printk(KERN_WARNING + "ISOFS: unsupported/invalid hardware sector size %d\n", + bdev_logical_block_size(s->s_bdev)); + goto out_freesbi; + } opt.blocksize = sb_min_blocksize(s, opt.blocksize); sbi->s_high_sierra = 0; /* default is iso9660 */ diff --git a/fs/locks.c b/fs/locks.c index fef5f1e29a4f20a5788e7ae475318e674038c449..47b66bfc4fa3a2b3ce9127e7f01eb529012fce34 100644 --- a/fs/locks.c +++ b/fs/locks.c @@ -2074,6 +2074,13 @@ static pid_t locks_translate_pid(struct file_lock *fl, struct pid_namespace *ns) return -1; if (IS_REMOTELCK(fl)) return fl->fl_pid; + /* + * If the flock owner process is dead and its pid has been already + * freed, the translation below won't work, but we still want to show + * flock owner pid number in init pidns. + */ + if (ns == &init_pid_ns) + return (pid_t)fl->fl_pid; rcu_read_lock(); pid = find_pid_ns(fl->fl_pid, &init_pid_ns); diff --git a/fs/namespace.c b/fs/namespace.c index 85bfe5e55adfcae01aee23e0db714824841f1d9a..bb28ba484c6947e4325d317cdefdaa2f6aac842d 100644 --- a/fs/namespace.c +++ b/fs/namespace.c @@ -450,10 +450,10 @@ int mnt_want_write_file_path(struct file *file) { int ret; - sb_start_write(file->f_path.mnt->mnt_sb); + sb_start_write(file_inode(file)->i_sb); ret = __mnt_want_write_file(file); if (ret) - sb_end_write(file->f_path.mnt->mnt_sb); + sb_end_write(file_inode(file)->i_sb); return ret; } @@ -544,7 +544,8 @@ void __mnt_drop_write_file(struct file *file) void mnt_drop_write_file_path(struct file *file) { - mnt_drop_write(file->f_path.mnt); + __mnt_drop_write_file(file); + sb_end_write(file_inode(file)->i_sb); } void mnt_drop_write_file(struct file *file) diff --git a/fs/nfs/blocklayout/dev.c b/fs/nfs/blocklayout/dev.c index 95f74bd2c067fcd2556f09e3abbbb57cbd41f0f2..70c4165d2d742d3812fb7ebe850e359af2e60a25 100644 --- a/fs/nfs/blocklayout/dev.c +++ b/fs/nfs/blocklayout/dev.c @@ -204,7 +204,7 @@ static bool bl_map_stripe(struct pnfs_block_dev *dev, u64 offset, chunk = div_u64(offset, dev->chunk_size); div_u64_rem(chunk, dev->nr_children, &chunk_idx); - if (chunk_idx > dev->nr_children) { + if (chunk_idx >= dev->nr_children) { dprintk("%s: invalid chunk idx %d (%lld/%lld)\n", __func__, chunk_idx, offset, dev->chunk_size); /* error, should not happen */ diff --git a/fs/nfs/callback_proc.c b/fs/nfs/callback_proc.c index 516b2248cafe8ad5f074be1af0c7ad52d1fffd3e..b8d55da2f04d5299c84ab640b44dc4c21de0bbf7 100644 --- a/fs/nfs/callback_proc.c +++ b/fs/nfs/callback_proc.c @@ -213,9 +213,9 @@ static u32 pnfs_check_callback_stateid(struct pnfs_layout_hdr *lo, { u32 oldseq, newseq; - /* Is the stateid still not initialised? */ + /* Is the stateid not initialised? */ if (!pnfs_layout_is_valid(lo)) - return NFS4ERR_DELAY; + return NFS4ERR_NOMATCHING_LAYOUT; /* Mismatched stateid? */ if (!nfs4_stateid_match_other(&lo->plh_stateid, new)) @@ -433,11 +433,14 @@ validate_seqid(const struct nfs4_slot_table *tbl, const struct nfs4_slot *slot, * a match. If the slot is in use and the sequence numbers match, the * client is still waiting for a response to the original request. */ -static bool referring_call_exists(struct nfs_client *clp, +static int referring_call_exists(struct nfs_client *clp, uint32_t nrclists, - struct referring_call_list *rclists) + struct referring_call_list *rclists, + spinlock_t *lock) + __releases(lock) + __acquires(lock) { - bool status = 0; + int status = 0; int i, j; struct nfs4_session *session; struct nfs4_slot_table *tbl; @@ -460,8 +463,10 @@ static bool referring_call_exists(struct nfs_client *clp, for (j = 0; j < rclist->rcl_nrefcalls; j++) { ref = &rclist->rcl_refcalls[j]; + spin_unlock(lock); status = nfs4_slot_wait_on_seqid(tbl, ref->rc_slotid, ref->rc_sequenceid, HZ >> 1) < 0; + spin_lock(lock); if (status) goto out; } @@ -538,7 +543,8 @@ __be32 nfs4_callback_sequence(void *argp, void *resp, * related callback was received before the response to the original * call. */ - if (referring_call_exists(clp, args->csa_nrclists, args->csa_rclists)) { + if (referring_call_exists(clp, args->csa_nrclists, args->csa_rclists, + &tbl->slot_tbl_lock) < 0) { status = htonl(NFS4ERR_DELAY); goto out_unlock; } diff --git a/fs/nfs/callback_xdr.c b/fs/nfs/callback_xdr.c index 123c069429a7ba71fed0d1ed986eb7e764f75706..57de914630bc9ea36a0ed1f4adae271706488c94 100644 --- a/fs/nfs/callback_xdr.c +++ b/fs/nfs/callback_xdr.c @@ -904,16 +904,21 @@ static __be32 nfs4_callback_compound(struct svc_rqst *rqstp) if (hdr_arg.minorversion == 0) { cps.clp = nfs4_find_client_ident(SVC_NET(rqstp), hdr_arg.cb_ident); - if (!cps.clp || !check_gss_callback_principal(cps.clp, rqstp)) + if (!cps.clp || !check_gss_callback_principal(cps.clp, rqstp)) { + if (cps.clp) + nfs_put_client(cps.clp); goto out_invalidcred; + } } cps.minorversion = hdr_arg.minorversion; hdr_res.taglen = hdr_arg.taglen; hdr_res.tag = hdr_arg.tag; - if (encode_compound_hdr_res(&xdr_out, &hdr_res) != 0) + if (encode_compound_hdr_res(&xdr_out, &hdr_res) != 0) { + if (cps.clp) + nfs_put_client(cps.clp); return rpc_system_err; - + } while (status == 0 && nops != hdr_arg.nops) { status = process_op(nops, rqstp, &xdr_in, rqstp->rq_argp, &xdr_out, rqstp->rq_resp, diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index 751063bd6211e3b7b50da8b41022bbf332b6b817..4ce6ec109c2bf660c13ff64fa692fe3037ce231b 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c @@ -547,8 +547,15 @@ nfs4_async_handle_exception(struct rpc_task *task, struct nfs_server *server, ret = -EIO; return ret; out_retry: - if (ret == 0) + if (ret == 0) { exception->retry = 1; + /* + * For NFS4ERR_MOVED, the client transport will need to + * be recomputed after migration recovery has completed. + */ + if (errorcode == -NFS4ERR_MOVED) + rpc_task_release_transport(task); + } return ret; } @@ -2526,14 +2533,18 @@ static void nfs41_check_delegation_stateid(struct nfs4_state *state) } nfs4_stateid_copy(&stateid, &delegation->stateid); - if (test_bit(NFS_DELEGATION_REVOKED, &delegation->flags) || - !test_and_clear_bit(NFS_DELEGATION_TEST_EXPIRED, - &delegation->flags)) { + if (test_bit(NFS_DELEGATION_REVOKED, &delegation->flags)) { rcu_read_unlock(); nfs_finish_clear_delegation_stateid(state, &stateid); return; } + if (!test_and_clear_bit(NFS_DELEGATION_TEST_EXPIRED, + &delegation->flags)) { + rcu_read_unlock(); + return; + } + cred = get_rpccred(delegation->cred); rcu_read_unlock(); status = nfs41_test_and_free_expired_stateid(server, &stateid, cred); @@ -7490,7 +7501,7 @@ static int nfs4_sp4_select_mode(struct nfs_client *clp, } out: clp->cl_sp4_flags = flags; - return 0; + return ret; } struct nfs41_exchange_id_data { @@ -8429,6 +8440,8 @@ nfs4_layoutget_handle_exception(struct rpc_task *task, dprintk("--> %s tk_status => %d\n", __func__, -task->tk_status); + nfs4_sequence_free_slot(&lgp->res.seq_res); + switch (nfs4err) { case 0: goto out; @@ -8493,7 +8506,6 @@ nfs4_layoutget_handle_exception(struct rpc_task *task, goto out; } - nfs4_sequence_free_slot(&lgp->res.seq_res); err = nfs4_handle_exception(server, nfs4err, exception); if (!status) { if (exception->retry) @@ -8619,20 +8631,22 @@ nfs4_proc_layoutget(struct nfs4_layoutget *lgp, long *timeout, gfp_t gfp_flags) if (IS_ERR(task)) return ERR_CAST(task); status = rpc_wait_for_completion_task(task); - if (status == 0) { + if (status != 0) + goto out; + + /* if layoutp->len is 0, nfs4_layoutget_prepare called rpc_exit */ + if (task->tk_status < 0 || lgp->res.layoutp->len == 0) { status = nfs4_layoutget_handle_exception(task, lgp, &exception); *timeout = exception.timeout; - } - + } else + lseg = pnfs_layout_process(lgp); +out: trace_nfs4_layoutget(lgp->args.ctx, &lgp->args.range, &lgp->res.range, &lgp->res.stateid, status); - /* if layoutp->len is 0, nfs4_layoutget_prepare called rpc_exit */ - if (status == 0 && lgp->res.layoutp->len) - lseg = pnfs_layout_process(lgp); rpc_put_task(task); dprintk("<-- %s status=%d\n", __func__, status); if (status) diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c index c487a1ca7106e70db76c85f927d2311d9d87aa44..c51bcc176026cc540337205d4b88b8f0e6190f11 100644 --- a/fs/nfs/nfs4state.c +++ b/fs/nfs/nfs4state.c @@ -1354,6 +1354,8 @@ int nfs4_schedule_stateid_recovery(const struct nfs_server *server, struct nfs4_ if (!nfs4_state_mark_reclaim_nograce(clp, state)) return -EBADF; + nfs_inode_find_delegation_state_and_recover(state->inode, + &state->stateid); dprintk("%s: scheduling stateid recovery for server %s\n", __func__, clp->cl_hostname); nfs4_schedule_state_manager(clp); diff --git a/fs/nfs/pnfs_nfs.c b/fs/nfs/pnfs_nfs.c index 60da59be83b6128241bf3be4a93e153e0329421a..4a3dd66175fed1732dc4a8c6f8c241e6391d6f53 100644 --- a/fs/nfs/pnfs_nfs.c +++ b/fs/nfs/pnfs_nfs.c @@ -61,7 +61,7 @@ EXPORT_SYMBOL_GPL(pnfs_generic_commit_release); /* The generic layer is about to remove the req from the commit list. * If this will make the bucket empty, it will need to put the lseg reference. - * Note this must be called holding i_lock + * Note this must be called holding nfsi->commit_mutex */ void pnfs_generic_clear_request_commit(struct nfs_page *req, @@ -149,9 +149,7 @@ restart: if (list_empty(&b->written)) { freeme = b->wlseg; b->wlseg = NULL; - spin_unlock(&cinfo->inode->i_lock); pnfs_put_lseg(freeme); - spin_lock(&cinfo->inode->i_lock); goto restart; } } @@ -167,7 +165,7 @@ static void pnfs_generic_retry_commit(struct nfs_commit_info *cinfo, int idx) LIST_HEAD(pages); int i; - spin_lock(&cinfo->inode->i_lock); + mutex_lock(&NFS_I(cinfo->inode)->commit_mutex); for (i = idx; i < fl_cinfo->nbuckets; i++) { bucket = &fl_cinfo->buckets[i]; if (list_empty(&bucket->committing)) @@ -177,12 +175,12 @@ static void pnfs_generic_retry_commit(struct nfs_commit_info *cinfo, int idx) list_for_each(pos, &bucket->committing) cinfo->ds->ncommitting--; list_splice_init(&bucket->committing, &pages); - spin_unlock(&cinfo->inode->i_lock); + mutex_unlock(&NFS_I(cinfo->inode)->commit_mutex); nfs_retry_commit(&pages, freeme, cinfo, i); pnfs_put_lseg(freeme); - spin_lock(&cinfo->inode->i_lock); + mutex_lock(&NFS_I(cinfo->inode)->commit_mutex); } - spin_unlock(&cinfo->inode->i_lock); + mutex_unlock(&NFS_I(cinfo->inode)->commit_mutex); } static unsigned int @@ -222,13 +220,13 @@ void pnfs_fetch_commit_bucket_list(struct list_head *pages, struct list_head *pos; bucket = &cinfo->ds->buckets[data->ds_commit_index]; - spin_lock(&cinfo->inode->i_lock); + mutex_lock(&NFS_I(cinfo->inode)->commit_mutex); list_for_each(pos, &bucket->committing) cinfo->ds->ncommitting--; list_splice_init(&bucket->committing, pages); data->lseg = bucket->clseg; bucket->clseg = NULL; - spin_unlock(&cinfo->inode->i_lock); + mutex_unlock(&NFS_I(cinfo->inode)->commit_mutex); } diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c index 5b6ff168d11aaeb0e9046329a96ced18621f8672..6d16399a350e05302edc0f902c3e073f5fdc60ef 100644 --- a/fs/nfsd/nfs4proc.c +++ b/fs/nfsd/nfs4proc.c @@ -1725,6 +1725,7 @@ nfsd4_proc_compound(struct svc_rqst *rqstp) if (status) { op = &args->ops[0]; op->status = status; + resp->opcnt = 1; goto encode_op; } diff --git a/fs/ocfs2/buffer_head_io.c b/fs/ocfs2/buffer_head_io.c index d9ebe11c89909b8587fa3d7faa491c75426bed87..1d098c3c00e023540d6f0665720390647945af58 100644 --- a/fs/ocfs2/buffer_head_io.c +++ b/fs/ocfs2/buffer_head_io.c @@ -342,6 +342,7 @@ int ocfs2_read_blocks(struct ocfs2_caching_info *ci, u64 block, int nr, * for this bh as it's not marked locally * uptodate. */ status = -EIO; + clear_buffer_needs_validate(bh); put_bh(bh); bhs[i] = NULL; continue; diff --git a/fs/overlayfs/inode.c b/fs/overlayfs/inode.c index d60900b615f9c7feafc4499e9aa64cdb5ca45d35..efed50304b497d89999f6c76504b9d3347230f86 100644 --- a/fs/overlayfs/inode.c +++ b/fs/overlayfs/inode.c @@ -14,6 +14,7 @@ #include #include #include "overlayfs.h" +#include "ovl_entry.h" int ovl_setattr(struct dentry *dentry, struct iattr *attr) { @@ -608,39 +609,63 @@ static bool ovl_verify_inode(struct inode *inode, struct dentry *lowerdentry, return true; } +/* + * Does overlay inode need to be hashed by lower inode? + */ +static bool ovl_hash_bylower(struct super_block *sb, struct dentry *upper, + struct dentry *lower, struct dentry *index) +{ + struct ovl_fs *ofs = sb->s_fs_info; + + /* No, if pure upper */ + if (!lower) + return false; + + /* Yes, if already indexed */ + if (index) + return true; + + /* Yes, if won't be copied up */ + if (!ofs->upper_mnt) + return true; + + /* No, if lower hardlink is or will be broken on copy up */ + if ((upper || !ovl_indexdir(sb)) && + !d_is_dir(lower) && d_inode(lower)->i_nlink > 1) + return false; + + /* No, if non-indexed upper with NFS export */ + if (sb->s_export_op && upper) + return false; + + /* Otherwise, hash by lower inode for fsnotify */ + return true; +} + struct inode *ovl_get_inode(struct dentry *dentry, struct dentry *upperdentry, struct dentry *index) { + struct super_block *sb = dentry->d_sb; struct dentry *lowerdentry = ovl_dentry_lower(dentry); struct inode *realinode = upperdentry ? d_inode(upperdentry) : NULL; struct inode *inode; - /* Already indexed or could be indexed on copy up? */ - bool indexed = (index || (ovl_indexdir(dentry->d_sb) && !upperdentry)); - struct dentry *origin = indexed ? lowerdentry : NULL; + bool bylower = ovl_hash_bylower(sb, upperdentry, lowerdentry, index); bool is_dir; - if (WARN_ON(upperdentry && indexed && !lowerdentry)) - return ERR_PTR(-EIO); - if (!realinode) realinode = d_inode(lowerdentry); /* - * Copy up origin (lower) may exist for non-indexed non-dir upper, but - * we must not use lower as hash key in that case. - * Hash non-dir that is or could be indexed by origin inode. - * Hash dir that is or could be merged by origin inode. - * Hash pure upper and non-indexed non-dir by upper inode. + * Copy up origin (lower) may exist for non-indexed upper, but we must + * not use lower as hash key if this is a broken hardlink. */ is_dir = S_ISDIR(realinode->i_mode); - if (is_dir) - origin = lowerdentry; - - if (upperdentry || origin) { - struct inode *key = d_inode(origin ?: upperdentry); + if (upperdentry || bylower) { + struct inode *key = d_inode(bylower ? lowerdentry : + upperdentry); unsigned int nlink = is_dir ? 1 : realinode->i_nlink; - inode = iget5_locked(dentry->d_sb, (unsigned long) key, + inode = iget5_locked(sb, (unsigned long) key, ovl_inode_test, ovl_inode_set, key); if (!inode) goto out_nomem; @@ -664,7 +689,8 @@ struct inode *ovl_get_inode(struct dentry *dentry, struct dentry *upperdentry, nlink = ovl_get_nlink(lowerdentry, upperdentry, nlink); set_nlink(inode, nlink); } else { - inode = new_inode(dentry->d_sb); + /* Lower hardlink that will be broken on copy up */ + inode = new_inode(sb); if (!inode) goto out_nomem; } diff --git a/fs/overlayfs/readdir.c b/fs/overlayfs/readdir.c index 7fa7d68baa6d197d81f868ab795d5316f236f006..1d4f9997236f221fbd72d8a42405e3730f73c612 100644 --- a/fs/overlayfs/readdir.c +++ b/fs/overlayfs/readdir.c @@ -623,6 +623,21 @@ static int ovl_fill_real(struct dir_context *ctx, const char *name, return orig_ctx->actor(orig_ctx, name, namelen, offset, ino, d_type); } +static bool ovl_is_impure_dir(struct file *file) +{ + struct ovl_dir_file *od = file->private_data; + struct inode *dir = d_inode(file->f_path.dentry); + + /* + * Only upper dir can be impure, but if we are in the middle of + * iterating a lower real dir, dir could be copied up and marked + * impure. We only want the impure cache if we started iterating + * a real upper dir to begin with. + */ + return od->is_upper && ovl_test_flag(OVL_IMPURE, dir); + +} + static int ovl_iterate_real(struct file *file, struct dir_context *ctx) { int err; @@ -646,7 +661,7 @@ static int ovl_iterate_real(struct file *file, struct dir_context *ctx) rdt.parent_ino = stat.ino; } - if (ovl_test_flag(OVL_IMPURE, d_inode(dir))) { + if (ovl_is_impure_dir(file)) { rdt.cache = ovl_cache_get_impure(&file->f_path); if (IS_ERR(rdt.cache)) return PTR_ERR(rdt.cache); @@ -676,7 +691,7 @@ static int ovl_iterate(struct file *file, struct dir_context *ctx) * entries. */ if (ovl_same_sb(dentry->d_sb) && - (ovl_test_flag(OVL_IMPURE, d_inode(dentry)) || + (ovl_is_impure_dir(file) || OVL_TYPE_MERGE(ovl_path_type(dentry->d_parent)))) { return ovl_iterate_real(file, ctx); } diff --git a/fs/proc/kcore.c b/fs/proc/kcore.c index e64ecb9f272090bf6b23772a0d36e56b63b8106a..66c373230e60ef5afcead2bba922b49f707073c8 100644 --- a/fs/proc/kcore.c +++ b/fs/proc/kcore.c @@ -384,8 +384,10 @@ static void elf_kcore_store_hdr(char *bufp, int nphdr, int dataoff) phdr->p_flags = PF_R|PF_W|PF_X; phdr->p_offset = kc_vaddr_to_offset(m->addr) + dataoff; phdr->p_vaddr = (size_t)m->addr; - if (m->type == KCORE_RAM || m->type == KCORE_TEXT) + if (m->type == KCORE_RAM) phdr->p_paddr = __pa(m->addr); + else if (m->type == KCORE_TEXT) + phdr->p_paddr = __pa_symbol(m->addr); else phdr->p_paddr = (elf_addr_t)-1; phdr->p_filesz = phdr->p_memsz = m->size; diff --git a/fs/pstore/ram_core.c b/fs/pstore/ram_core.c index e11672aa457514b8d84695688edc342f80a57b20..ecdb3baa12833ca0b71f85e7b6080e8e8ed59b0c 100644 --- a/fs/pstore/ram_core.c +++ b/fs/pstore/ram_core.c @@ -421,7 +421,12 @@ static void *persistent_ram_vmap(phys_addr_t start, size_t size, vaddr = vmap(pages, page_count, VM_MAP, prot); kfree(pages); - return vaddr; + /* + * Since vmap() uses page granularity, we must add the offset + * into the page here, to get the byte granularity address + * into the mapping to represent the actual "start" location. + */ + return vaddr + offset_in_page(start); } static void *persistent_ram_iomap(phys_addr_t start, size_t size, @@ -440,6 +445,11 @@ static void *persistent_ram_iomap(phys_addr_t start, size_t size, else va = ioremap_wc(start, size); + /* + * Since request_mem_region() and ioremap() are byte-granularity + * there is no need handle anything special like we do when the + * vmap() case in persistent_ram_vmap() above. + */ return va; } @@ -460,7 +470,7 @@ static int persistent_ram_buffer_map(phys_addr_t start, phys_addr_t size, return -ENOMEM; } - prz->buffer = prz->vaddr + offset_in_page(start); + prz->buffer = prz->vaddr; prz->buffer_size = size - sizeof(struct persistent_ram_buffer); return 0; @@ -507,7 +517,8 @@ void persistent_ram_free(struct persistent_ram_zone *prz) if (prz->vaddr) { if (pfn_valid(prz->paddr >> PAGE_SHIFT)) { - vunmap(prz->vaddr); + /* We must vunmap() at page-granularity. */ + vunmap(prz->vaddr - offset_in_page(prz->paddr)); } else { iounmap(prz->vaddr); release_mem_region(prz->paddr, prz->size); diff --git a/fs/quota/quota.c b/fs/quota/quota.c index 43612e2a73af8c7d334dabe6278f6e427c916f42..3f02bab0db4e21fb983811925dad9b2378dbf463 100644 --- a/fs/quota/quota.c +++ b/fs/quota/quota.c @@ -18,6 +18,7 @@ #include #include #include +#include static int check_quotactl_permission(struct super_block *sb, int type, int cmd, qid_t id) @@ -703,6 +704,7 @@ static int do_quotactl(struct super_block *sb, int type, int cmd, qid_t id, if (type >= (XQM_COMMAND(cmd) ? XQM_MAXQUOTAS : MAXQUOTAS)) return -EINVAL; + type = array_index_nospec(type, MAXQUOTAS); /* * Quota not supported on this fs? Check this before s_quota_types * since they needn't be set if quota is not supported at all. diff --git a/fs/reiserfs/reiserfs.h b/fs/reiserfs/reiserfs.h index 48835a659948f1367b1d2ad9cf71b3dc51a73ae0..eabf85371ece61783e6dfb6e0bc54ad8fc40f80b 100644 --- a/fs/reiserfs/reiserfs.h +++ b/fs/reiserfs/reiserfs.h @@ -271,7 +271,7 @@ struct reiserfs_journal_list { struct mutex j_commit_mutex; unsigned int j_trans_id; - time_t j_timestamp; + time64_t j_timestamp; /* write-only but useful for crash dump analysis */ struct reiserfs_list_bitmap *j_list_bitmap; struct buffer_head *j_commit_bh; /* commit buffer head */ struct reiserfs_journal_cnode *j_realblock; diff --git a/fs/reiserfs/xattr.c b/fs/reiserfs/xattr.c index 46492fb37a4c6a44194f171fdab768acbbd17f06..505f87a8c724ffdc71807ddfb61ec9d1aeaf50e2 100644 --- a/fs/reiserfs/xattr.c +++ b/fs/reiserfs/xattr.c @@ -792,8 +792,10 @@ static int listxattr_filler(struct dir_context *ctx, const char *name, return 0; size = namelen + 1; if (b->buf) { - if (size > b->size) + if (b->pos + size > b->size) { + b->pos = -ERANGE; return -ERANGE; + } memcpy(b->buf + b->pos, name, namelen); b->buf[b->pos + namelen] = 0; } diff --git a/fs/squashfs/file.c b/fs/squashfs/file.c index fcff2e0487fef11f89724a72221090e529f0a775..f1c1430ae7213515214e7ea51941c894b83349d7 100644 --- a/fs/squashfs/file.c +++ b/fs/squashfs/file.c @@ -374,13 +374,29 @@ static int read_blocklist(struct inode *inode, int index, u64 *block) return squashfs_block_size(size); } +void squashfs_fill_page(struct page *page, struct squashfs_cache_entry *buffer, int offset, int avail) +{ + int copied; + void *pageaddr; + + pageaddr = kmap_atomic(page); + copied = squashfs_copy_data(pageaddr, buffer, offset, avail); + memset(pageaddr + copied, 0, PAGE_SIZE - copied); + kunmap_atomic(pageaddr); + + flush_dcache_page(page); + if (copied == avail) + SetPageUptodate(page); + else + SetPageError(page); +} + /* Copy data into page cache */ void squashfs_copy_cache(struct page *page, struct squashfs_cache_entry *buffer, int bytes, int offset) { struct inode *inode = page->mapping->host; struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info; - void *pageaddr; int i, mask = (1 << (msblk->block_log - PAGE_SHIFT)) - 1; int start_index = page->index & ~mask, end_index = start_index | mask; @@ -406,12 +422,7 @@ void squashfs_copy_cache(struct page *page, struct squashfs_cache_entry *buffer, if (PageUptodate(push_page)) goto skip_page; - pageaddr = kmap_atomic(push_page); - squashfs_copy_data(pageaddr, buffer, offset, avail); - memset(pageaddr + avail, 0, PAGE_SIZE - avail); - kunmap_atomic(pageaddr); - flush_dcache_page(push_page); - SetPageUptodate(push_page); + squashfs_fill_page(push_page, buffer, offset, avail); skip_page: unlock_page(push_page); if (i != page->index) @@ -420,10 +431,9 @@ skip_page: } /* Read datablock stored packed inside a fragment (tail-end packed block) */ -static int squashfs_readpage_fragment(struct page *page) +static int squashfs_readpage_fragment(struct page *page, int expected) { struct inode *inode = page->mapping->host; - struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info; struct squashfs_cache_entry *buffer = squashfs_get_fragment(inode->i_sb, squashfs_i(inode)->fragment_block, squashfs_i(inode)->fragment_size); @@ -434,23 +444,16 @@ static int squashfs_readpage_fragment(struct page *page) squashfs_i(inode)->fragment_block, squashfs_i(inode)->fragment_size); else - squashfs_copy_cache(page, buffer, i_size_read(inode) & - (msblk->block_size - 1), + squashfs_copy_cache(page, buffer, expected, squashfs_i(inode)->fragment_offset); squashfs_cache_put(buffer); return res; } -static int squashfs_readpage_sparse(struct page *page, int index, int file_end) +static int squashfs_readpage_sparse(struct page *page, int expected) { - struct inode *inode = page->mapping->host; - struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info; - int bytes = index == file_end ? - (i_size_read(inode) & (msblk->block_size - 1)) : - msblk->block_size; - - squashfs_copy_cache(page, NULL, bytes, 0); + squashfs_copy_cache(page, NULL, expected, 0); return 0; } @@ -460,6 +463,9 @@ static int squashfs_readpage(struct file *file, struct page *page) struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info; int index = page->index >> (msblk->block_log - PAGE_SHIFT); int file_end = i_size_read(inode) >> msblk->block_log; + int expected = index == file_end ? + (i_size_read(inode) & (msblk->block_size - 1)) : + msblk->block_size; int res; void *pageaddr; @@ -478,11 +484,11 @@ static int squashfs_readpage(struct file *file, struct page *page) goto error_out; if (bsize == 0) - res = squashfs_readpage_sparse(page, index, file_end); + res = squashfs_readpage_sparse(page, expected); else - res = squashfs_readpage_block(page, block, bsize); + res = squashfs_readpage_block(page, block, bsize, expected); } else - res = squashfs_readpage_fragment(page); + res = squashfs_readpage_fragment(page, expected); if (!res) return 0; diff --git a/fs/squashfs/file_cache.c b/fs/squashfs/file_cache.c index f2310d2a2019556ea316b2c03386f3bbcf98c355..a9ba8d96776ac2661a98370612d02b53784577c7 100644 --- a/fs/squashfs/file_cache.c +++ b/fs/squashfs/file_cache.c @@ -20,7 +20,7 @@ #include "squashfs.h" /* Read separately compressed datablock and memcopy into page cache */ -int squashfs_readpage_block(struct page *page, u64 block, int bsize) +int squashfs_readpage_block(struct page *page, u64 block, int bsize, int expected) { struct inode *i = page->mapping->host; struct squashfs_cache_entry *buffer = squashfs_get_datablock(i->i_sb, @@ -31,7 +31,7 @@ int squashfs_readpage_block(struct page *page, u64 block, int bsize) ERROR("Unable to read page, block %llx, size %x\n", block, bsize); else - squashfs_copy_cache(page, buffer, buffer->length, 0); + squashfs_copy_cache(page, buffer, expected, 0); squashfs_cache_put(buffer); return res; diff --git a/fs/squashfs/file_direct.c b/fs/squashfs/file_direct.c index cb485d8e0e91b1b2ff1cb9b0330339c51c15b8a4..80db1b86a27c66b1eb2105971685267171e52ce2 100644 --- a/fs/squashfs/file_direct.c +++ b/fs/squashfs/file_direct.c @@ -21,10 +21,11 @@ #include "page_actor.h" static int squashfs_read_cache(struct page *target_page, u64 block, int bsize, - int pages, struct page **page); + int pages, struct page **page, int bytes); /* Read separately compressed datablock directly into page cache */ -int squashfs_readpage_block(struct page *target_page, u64 block, int bsize) +int squashfs_readpage_block(struct page *target_page, u64 block, int bsize, + int expected) { struct inode *inode = target_page->mapping->host; @@ -83,7 +84,7 @@ int squashfs_readpage_block(struct page *target_page, u64 block, int bsize) * using an intermediate buffer. */ res = squashfs_read_cache(target_page, block, bsize, pages, - page); + page, expected); if (res < 0) goto mark_errored; @@ -95,6 +96,11 @@ int squashfs_readpage_block(struct page *target_page, u64 block, int bsize) if (res < 0) goto mark_errored; + if (res != expected) { + res = -EIO; + goto mark_errored; + } + /* Last page may have trailing bytes not filled */ bytes = res % PAGE_SIZE; if (bytes) { @@ -138,13 +144,12 @@ out: static int squashfs_read_cache(struct page *target_page, u64 block, int bsize, - int pages, struct page **page) + int pages, struct page **page, int bytes) { struct inode *i = target_page->mapping->host; struct squashfs_cache_entry *buffer = squashfs_get_datablock(i->i_sb, block, bsize); - int bytes = buffer->length, res = buffer->error, n, offset = 0; - void *pageaddr; + int res = buffer->error, n, offset = 0; if (res) { ERROR("Unable to read page, block %llx, size %x\n", block, @@ -159,12 +164,7 @@ static int squashfs_read_cache(struct page *target_page, u64 block, int bsize, if (page[n] == NULL) continue; - pageaddr = kmap_atomic(page[n]); - squashfs_copy_data(pageaddr, buffer, offset, avail); - memset(pageaddr + avail, 0, PAGE_SIZE - avail); - kunmap_atomic(pageaddr); - flush_dcache_page(page[n]); - SetPageUptodate(page[n]); + squashfs_fill_page(page[n], buffer, offset, avail); unlock_page(page[n]); if (page[n] != target_page) put_page(page[n]); diff --git a/fs/squashfs/squashfs.h b/fs/squashfs/squashfs.h index 887d6d270080a6d8d945868c5bc1265d3f38f93c..f89f8a74c6cea5c6fc8723319af336e7bf1ec04b 100644 --- a/fs/squashfs/squashfs.h +++ b/fs/squashfs/squashfs.h @@ -67,11 +67,12 @@ extern __le64 *squashfs_read_fragment_index_table(struct super_block *, u64, u64, unsigned int); /* file.c */ +void squashfs_fill_page(struct page *, struct squashfs_cache_entry *, int, int); void squashfs_copy_cache(struct page *, struct squashfs_cache_entry *, int, int); /* file_xxx.c */ -extern int squashfs_readpage_block(struct page *, u64, int); +extern int squashfs_readpage_block(struct page *, u64, int, int); /* id.c */ extern int squashfs_get_id(struct super_block *, unsigned int, unsigned int *); diff --git a/fs/sysfs/file.c b/fs/sysfs/file.c index 39c75a86c67f1d24dc14bc79ce0d0f9d7dccef2b..666986b95c5d106063ae38f20273f5b06979526e 100644 --- a/fs/sysfs/file.c +++ b/fs/sysfs/file.c @@ -407,6 +407,50 @@ int sysfs_chmod_file(struct kobject *kobj, const struct attribute *attr, } EXPORT_SYMBOL_GPL(sysfs_chmod_file); +/** + * sysfs_break_active_protection - break "active" protection + * @kobj: The kernel object @attr is associated with. + * @attr: The attribute to break the "active" protection for. + * + * With sysfs, just like kernfs, deletion of an attribute is postponed until + * all active .show() and .store() callbacks have finished unless this function + * is called. Hence this function is useful in methods that implement self + * deletion. + */ +struct kernfs_node *sysfs_break_active_protection(struct kobject *kobj, + const struct attribute *attr) +{ + struct kernfs_node *kn; + + kobject_get(kobj); + kn = kernfs_find_and_get(kobj->sd, attr->name); + if (kn) + kernfs_break_active_protection(kn); + return kn; +} +EXPORT_SYMBOL_GPL(sysfs_break_active_protection); + +/** + * sysfs_unbreak_active_protection - restore "active" protection + * @kn: Pointer returned by sysfs_break_active_protection(). + * + * Undo the effects of sysfs_break_active_protection(). Since this function + * calls kernfs_put() on the kernfs node that corresponds to the 'attr' + * argument passed to sysfs_break_active_protection() that attribute may have + * been removed between the sysfs_break_active_protection() and + * sysfs_unbreak_active_protection() calls, it is not safe to access @kn after + * this function has returned. + */ +void sysfs_unbreak_active_protection(struct kernfs_node *kn) +{ + struct kobject *kobj = kn->parent->priv; + + kernfs_unbreak_active_protection(kn); + kernfs_put(kn); + kobject_put(kobj); +} +EXPORT_SYMBOL_GPL(sysfs_unbreak_active_protection); + /** * sysfs_remove_file_ns - remove an object attribute with a custom ns tag * @kobj: object we're acting for diff --git a/fs/ubifs/journal.c b/fs/ubifs/journal.c index 8ae1cd8611cc4c719a9470dbd4585b1b630d4a5e..69051f7a960667502894a8ba3cd357a21be7e1ac 100644 --- a/fs/ubifs/journal.c +++ b/fs/ubifs/journal.c @@ -665,6 +665,11 @@ int ubifs_jnl_update(struct ubifs_info *c, const struct inode *dir, spin_lock(&ui->ui_lock); ui->synced_i_size = ui->ui_size; spin_unlock(&ui->ui_lock); + if (xent) { + spin_lock(&host_ui->ui_lock); + host_ui->synced_i_size = host_ui->ui_size; + spin_unlock(&host_ui->ui_lock); + } mark_inode_clean(c, ui); mark_inode_clean(c, host_ui); return 0; @@ -1283,11 +1288,10 @@ static int truncate_data_node(const struct ubifs_info *c, const struct inode *in int *new_len) { void *buf; - int err, compr_type; - u32 dlen, out_len, old_dlen; + int err, dlen, compr_type, out_len, old_dlen; out_len = le32_to_cpu(dn->size); - buf = kmalloc_array(out_len, WORST_COMPR_FACTOR, GFP_NOFS); + buf = kmalloc(out_len * WORST_COMPR_FACTOR, GFP_NOFS); if (!buf) return -ENOMEM; @@ -1389,7 +1393,16 @@ int ubifs_jnl_truncate(struct ubifs_info *c, const struct inode *inode, else if (err) goto out_free; else { - if (le32_to_cpu(dn->size) <= dlen) + int dn_len = le32_to_cpu(dn->size); + + if (dn_len <= 0 || dn_len > UBIFS_BLOCK_SIZE) { + ubifs_err(c, "bad data node (block %u, inode %lu)", + blk, inode->i_ino); + ubifs_dump_node(c, dn); + goto out_free; + } + + if (dn_len <= dlen) dlen = 0; /* Nothing to do */ else { err = truncate_data_node(c, inode, blk, dn, &dlen); diff --git a/fs/ubifs/lprops.c b/fs/ubifs/lprops.c index 6c3a1abd0e22c9e033da093e4f21b4d3ad9898b2..780a436d8c45b73c91c3de209a4f607a33f324b2 100644 --- a/fs/ubifs/lprops.c +++ b/fs/ubifs/lprops.c @@ -1091,10 +1091,6 @@ static int scan_check_cb(struct ubifs_info *c, } } - buf = __vmalloc(c->leb_size, GFP_NOFS, PAGE_KERNEL); - if (!buf) - return -ENOMEM; - /* * After an unclean unmount, empty and freeable LEBs * may contain garbage - do not scan them. @@ -1113,6 +1109,10 @@ static int scan_check_cb(struct ubifs_info *c, return LPT_SCAN_CONTINUE; } + buf = __vmalloc(c->leb_size, GFP_NOFS, PAGE_KERNEL); + if (!buf) + return -ENOMEM; + sleb = ubifs_scan(c, lnum, 0, buf, 0); if (IS_ERR(sleb)) { ret = PTR_ERR(sleb); diff --git a/fs/xattr.c b/fs/xattr.c index 61cd28ba25f364df5af103277924befb6d4a39a0..be2ce57cd6ad0c73d8d6bd4ee796981fe302aaad 100644 --- a/fs/xattr.c +++ b/fs/xattr.c @@ -541,7 +541,7 @@ getxattr(struct dentry *d, const char __user *name, void __user *value, if (error > 0) { if ((strcmp(kname, XATTR_NAME_POSIX_ACL_ACCESS) == 0) || (strcmp(kname, XATTR_NAME_POSIX_ACL_DEFAULT) == 0)) - posix_acl_fix_xattr_to_user(kvalue, size); + posix_acl_fix_xattr_to_user(kvalue, error); if (size && copy_to_user(value, kvalue, error)) error = -EFAULT; } else if (error == -ERANGE && size >= XATTR_SIZE_MAX) { diff --git a/include/linux/arm-smccc.h b/include/linux/arm-smccc.h index ca1d2cc2cdfa09a8a760693b6b3c10e9c3f6a704..18863d56273cc7ea144bfaf366a2c74755ee72c9 100644 --- a/include/linux/arm-smccc.h +++ b/include/linux/arm-smccc.h @@ -199,47 +199,57 @@ asmlinkage void __arm_smccc_hvc(unsigned long a0, unsigned long a1, #define __declare_arg_0(a0, res) \ struct arm_smccc_res *___res = res; \ - register u32 r0 asm("r0") = a0; \ + register unsigned long r0 asm("r0") = (u32)a0; \ register unsigned long r1 asm("r1"); \ register unsigned long r2 asm("r2"); \ register unsigned long r3 asm("r3") #define __declare_arg_1(a0, a1, res) \ + typeof(a1) __a1 = a1; \ struct arm_smccc_res *___res = res; \ - register u32 r0 asm("r0") = a0; \ - register typeof(a1) r1 asm("r1") = a1; \ + register unsigned long r0 asm("r0") = (u32)a0; \ + register unsigned long r1 asm("r1") = __a1; \ register unsigned long r2 asm("r2"); \ register unsigned long r3 asm("r3") #define __declare_arg_2(a0, a1, a2, res) \ + typeof(a1) __a1 = a1; \ + typeof(a2) __a2 = a2; \ struct arm_smccc_res *___res = res; \ - register u32 r0 asm("r0") = a0; \ - register typeof(a1) r1 asm("r1") = a1; \ - register typeof(a2) r2 asm("r2") = a2; \ + register unsigned long r0 asm("r0") = (u32)a0; \ + register unsigned long r1 asm("r1") = __a1; \ + register unsigned long r2 asm("r2") = __a2; \ register unsigned long r3 asm("r3") #define __declare_arg_3(a0, a1, a2, a3, res) \ + typeof(a1) __a1 = a1; \ + typeof(a2) __a2 = a2; \ + typeof(a3) __a3 = a3; \ struct arm_smccc_res *___res = res; \ - register u32 r0 asm("r0") = a0; \ - register typeof(a1) r1 asm("r1") = a1; \ - register typeof(a2) r2 asm("r2") = a2; \ - register typeof(a3) r3 asm("r3") = a3 + register unsigned long r0 asm("r0") = (u32)a0; \ + register unsigned long r1 asm("r1") = __a1; \ + register unsigned long r2 asm("r2") = __a2; \ + register unsigned long r3 asm("r3") = __a3 #define __declare_arg_4(a0, a1, a2, a3, a4, res) \ + typeof(a4) __a4 = a4; \ __declare_arg_3(a0, a1, a2, a3, res); \ - register typeof(a4) r4 asm("r4") = a4 + register unsigned long r4 asm("r4") = __a4 #define __declare_arg_5(a0, a1, a2, a3, a4, a5, res) \ + typeof(a5) __a5 = a5; \ __declare_arg_4(a0, a1, a2, a3, a4, res); \ - register typeof(a5) r5 asm("r5") = a5 + register unsigned long r5 asm("r5") = __a5 #define __declare_arg_6(a0, a1, a2, a3, a4, a5, a6, res) \ + typeof(a6) __a6 = a6; \ __declare_arg_5(a0, a1, a2, a3, a4, a5, res); \ - register typeof(a6) r6 asm("r6") = a6 + register unsigned long r6 asm("r6") = __a6 #define __declare_arg_7(a0, a1, a2, a3, a4, a5, a6, a7, res) \ + typeof(a7) __a7 = a7; \ __declare_arg_6(a0, a1, a2, a3, a4, a5, a6, res); \ - register typeof(a7) r7 asm("r7") = a7 + register unsigned long r7 asm("r7") = __a7 #define ___declare_args(count, ...) __declare_arg_ ## count(__VA_ARGS__) #define __declare_args(count, ...) ___declare_args(count, __VA_ARGS__) diff --git a/include/linux/crypto.h b/include/linux/crypto.h index cc36484d29e1671826776ac4bcd3446a96474b1c..de96913306cbb8f4fc5fbc91b2944fc1ca66dce6 100644 --- a/include/linux/crypto.h +++ b/include/linux/crypto.h @@ -111,6 +111,11 @@ */ #define CRYPTO_ALG_OPTIONAL_KEY 0x00004000 +/* + * Don't trigger module loading + */ +#define CRYPTO_NOLOAD 0x00008000 + /* * Transform masks and values (for crt_flags). */ diff --git a/include/linux/fsl/guts.h b/include/linux/fsl/guts.h index 3efa3b861d44cae46670532c9db208d8630099a9..941b11811f85915bd70a730bbc338288d995493b 100644 --- a/include/linux/fsl/guts.h +++ b/include/linux/fsl/guts.h @@ -16,6 +16,7 @@ #define __FSL_GUTS_H__ #include +#include /** * Global Utility Registers. diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h index ba74eaa8eadffdf6282ef4f0d3c09bedb3d4be6f..0c51f753652d886e50d6b9d0a07ed0d5c75e5e4c 100644 --- a/include/linux/hyperv.h +++ b/include/linux/hyperv.h @@ -1026,6 +1026,8 @@ extern int vmbus_establish_gpadl(struct vmbus_channel *channel, extern int vmbus_teardown_gpadl(struct vmbus_channel *channel, u32 gpadl_handle); +void vmbus_reset_channel_cb(struct vmbus_channel *channel); + extern int vmbus_recvpacket(struct vmbus_channel *channel, void *buffer, u32 bufferlen, diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h index 485a5b48f0380460fa0b46243e34aa6c83796c07..a6ab2f51f703f3c6b32e40f365e1513feac200e8 100644 --- a/include/linux/intel-iommu.h +++ b/include/linux/intel-iommu.h @@ -112,6 +112,7 @@ * Extended Capability Register */ +#define ecap_dit(e) ((e >> 41) & 0x1) #define ecap_pasid(e) ((e >> 40) & 0x1) #define ecap_pss(e) ((e >> 35) & 0x1f) #define ecap_eafs(e) ((e >> 34) & 0x1) @@ -281,6 +282,7 @@ enum { #define QI_DEV_IOTLB_SID(sid) ((u64)((sid) & 0xffff) << 32) #define QI_DEV_IOTLB_QDEP(qdep) (((qdep) & 0x1f) << 16) #define QI_DEV_IOTLB_ADDR(addr) ((u64)(addr) & VTD_PAGE_MASK) +#define QI_DEV_IOTLB_PFSID(pfsid) (((u64)(pfsid & 0xf) << 12) | ((u64)(pfsid & 0xfff) << 52)) #define QI_DEV_IOTLB_SIZE 1 #define QI_DEV_IOTLB_MAX_INVS 32 @@ -305,6 +307,7 @@ enum { #define QI_DEV_EIOTLB_PASID(p) (((u64)p) << 32) #define QI_DEV_EIOTLB_SID(sid) ((u64)((sid) & 0xffff) << 16) #define QI_DEV_EIOTLB_QDEP(qd) ((u64)((qd) & 0x1f) << 4) +#define QI_DEV_EIOTLB_PFSID(pfsid) (((u64)(pfsid & 0xf) << 12) | ((u64)(pfsid & 0xfff) << 52)) #define QI_DEV_EIOTLB_MAX_INVS 32 #define QI_PGRP_IDX(idx) (((u64)(idx)) << 55) @@ -450,9 +453,8 @@ extern void qi_flush_context(struct intel_iommu *iommu, u16 did, u16 sid, u8 fm, u64 type); extern void qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr, unsigned int size_order, u64 type); -extern void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 qdep, - u64 addr, unsigned mask); - +extern void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 pfsid, + u16 qdep, u64 addr, unsigned mask); extern int qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu); extern int dmar_ir_support(void); diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h index f2f9e957bf1b5e15cef34f84683f57d944f9e8b2..c4d19e77fea8baa66662635192d7f6cd873fec4e 100644 --- a/include/linux/mlx5/driver.h +++ b/include/linux/mlx5/driver.h @@ -950,7 +950,7 @@ int mlx5_cmd_free_uar(struct mlx5_core_dev *dev, u32 uarn); void mlx5_health_cleanup(struct mlx5_core_dev *dev); int mlx5_health_init(struct mlx5_core_dev *dev); void mlx5_start_health_poll(struct mlx5_core_dev *dev); -void mlx5_stop_health_poll(struct mlx5_core_dev *dev); +void mlx5_stop_health_poll(struct mlx5_core_dev *dev, bool disable_health); void mlx5_drain_health_wq(struct mlx5_core_dev *dev); void mlx5_trigger_health_work(struct mlx5_core_dev *dev); void mlx5_drain_health_recovery(struct mlx5_core_dev *dev); diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index 5a716df466a00cbef0e18ea22c6655527961bcee..63317710311e369e05135ef7677bed5c70f5d2fa 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h @@ -355,7 +355,7 @@ struct kioctx_table; struct mm_struct { struct vm_area_struct *mmap; /* list of VMAs */ struct rb_root mm_rb; - u32 vmacache_seqnum; /* per-thread vmacache */ + u64 vmacache_seqnum; /* per-thread vmacache */ #ifdef CONFIG_MMU unsigned long (*get_unmapped_area) (struct file *filp, unsigned long addr, unsigned long len, diff --git a/include/linux/mm_types_task.h b/include/linux/mm_types_task.h index 5fe87687664c7d78046060499cf96b28790190cb..d7016dcb245eeaff7ba8820a3fd73e267c0e46cb 100644 --- a/include/linux/mm_types_task.h +++ b/include/linux/mm_types_task.h @@ -32,7 +32,7 @@ #define VMACACHE_MASK (VMACACHE_SIZE - 1) struct vmacache { - u32 seqnum; + u64 seqnum; struct vm_area_struct *vmas[VMACACHE_SIZE]; }; diff --git a/include/linux/pci.h b/include/linux/pci.h index 727e309baa5e831ea27c38f49915ab660239b407..b1abbcc614cf9b0e86e4b6a67abe187de5b296fc 100644 --- a/include/linux/pci.h +++ b/include/linux/pci.h @@ -1235,6 +1235,8 @@ int pci_register_io_range(phys_addr_t addr, resource_size_t size); unsigned long pci_address_to_pio(phys_addr_t addr); phys_addr_t pci_pio_to_address(unsigned long pio); int pci_remap_iospace(const struct resource *res, phys_addr_t phys_addr); +int devm_pci_remap_iospace(struct device *dev, const struct resource *res, + phys_addr_t phys_addr); void pci_unmap_iospace(struct resource *res); void __iomem *devm_pci_remap_cfgspace(struct device *dev, resource_size_t offset, @@ -2290,4 +2292,16 @@ static inline bool pci_is_thunderbolt_attached(struct pci_dev *pdev) /* provide the legacy pci_dma_* API */ #include +#define pci_printk(level, pdev, fmt, arg...) \ + dev_printk(level, &(pdev)->dev, fmt, ##arg) + +#define pci_emerg(pdev, fmt, arg...) dev_emerg(&(pdev)->dev, fmt, ##arg) +#define pci_alert(pdev, fmt, arg...) dev_alert(&(pdev)->dev, fmt, ##arg) +#define pci_crit(pdev, fmt, arg...) dev_crit(&(pdev)->dev, fmt, ##arg) +#define pci_err(pdev, fmt, arg...) dev_err(&(pdev)->dev, fmt, ##arg) +#define pci_warn(pdev, fmt, arg...) dev_warn(&(pdev)->dev, fmt, ##arg) +#define pci_notice(pdev, fmt, arg...) dev_notice(&(pdev)->dev, fmt, ##arg) +#define pci_info(pdev, fmt, arg...) dev_info(&(pdev)->dev, fmt, ##arg) +#define pci_dbg(pdev, fmt, arg...) dev_dbg(&(pdev)->dev, fmt, ##arg) + #endif /* LINUX_PCI_H */ diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h index ab20dc5db423cd80f5e2b5a18a0e631af3a5e4b8..7fa3f1498b340adb612bec87b7555eea30f6ee1a 100644 --- a/include/linux/pci_ids.h +++ b/include/linux/pci_ids.h @@ -3062,4 +3062,6 @@ #define PCI_VENDOR_ID_OCZ 0x1b85 +#define PCI_VENDOR_ID_NCUBE 0x10ff + #endif /* _LINUX_PCI_IDS_H */ diff --git a/include/linux/platform_data/ina2xx.h b/include/linux/platform_data/ina2xx.h index 9abc0ca7259b78d5400166dbafd81825ca05b915..9f0aa1b48c7849ae4fd7545bfbf5d8b5a3db102e 100644 --- a/include/linux/platform_data/ina2xx.h +++ b/include/linux/platform_data/ina2xx.h @@ -1,7 +1,7 @@ /* * Driver for Texas Instruments INA219, INA226 power monitor chips * - * Copyright (C) 2012 Lothar Felten + * Copyright (C) 2012 Lothar Felten * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as diff --git a/include/linux/posix-timers.h b/include/linux/posix-timers.h index 4754eb4298b1b6fa345246d9b63d5fefd4fb5ac5..de5c49b0dccf569d89cb891ddc00eaca5086279a 100644 --- a/include/linux/posix-timers.h +++ b/include/linux/posix-timers.h @@ -82,8 +82,8 @@ struct k_itimer { clockid_t it_clock; timer_t it_id; int it_active; - int it_overrun; - int it_overrun_last; + s64 it_overrun; + s64 it_overrun_last; int it_requeue_pending; int it_sigev_notify; ktime_t it_interval; diff --git a/include/linux/power_supply.h b/include/linux/power_supply.h index 79e90b3d32888fe6fd5e3abff958a346738d4ab2..4617cf4f6c5b0ab042c7f6a5af1af84a00684b20 100644 --- a/include/linux/power_supply.h +++ b/include/linux/power_supply.h @@ -251,6 +251,7 @@ struct power_supply { spinlock_t changed_lock; bool changed; bool initialized; + bool removing; atomic_t use_cnt; #ifdef CONFIG_THERMAL struct thermal_zone_device *tzd; diff --git a/include/linux/printk.h b/include/linux/printk.h index 2289a26bbf685fe371992edf86f6546df922c23a..1dba9cb7b91b0c3deb1f9035fb722dfc2b4c940a 100644 --- a/include/linux/printk.h +++ b/include/linux/printk.h @@ -152,9 +152,13 @@ static inline void printk_kill(void) { } #ifdef CONFIG_PRINTK_NMI extern void printk_nmi_enter(void); extern void printk_nmi_exit(void); +extern void printk_nmi_direct_enter(void); +extern void printk_nmi_direct_exit(void); #else static inline void printk_nmi_enter(void) { } static inline void printk_nmi_exit(void) { } +static inline void printk_nmi_direct_enter(void) { } +static inline void printk_nmi_direct_exit(void) { } #endif /* PRINTK_NMI */ #ifdef CONFIG_PRINTK diff --git a/include/linux/rhashtable.h b/include/linux/rhashtable.h index 7fd514f36e74a9dbecd3664ed6c917c41326aee1..a4be6388a98035e49d816886785380336f861ba6 100644 --- a/include/linux/rhashtable.h +++ b/include/linux/rhashtable.h @@ -152,25 +152,25 @@ struct rhashtable_params { /** * struct rhashtable - Hash table handle * @tbl: Bucket table - * @nelems: Number of elements in table * @key_len: Key length for hashfn - * @p: Configuration parameters * @max_elems: Maximum number of elements in table + * @p: Configuration parameters * @rhlist: True if this is an rhltable * @run_work: Deferred worker to expand/shrink asynchronously * @mutex: Mutex to protect current/future table swapping * @lock: Spin lock to protect walker list + * @nelems: Number of elements in table */ struct rhashtable { struct bucket_table __rcu *tbl; - atomic_t nelems; unsigned int key_len; - struct rhashtable_params p; unsigned int max_elems; + struct rhashtable_params p; bool rhlist; struct work_struct run_work; struct mutex mutex; spinlock_t lock; + atomic_t nelems; }; /** diff --git a/include/linux/rtmutex.h b/include/linux/rtmutex.h index 82ac0e5db005b36c4d4a5109503cb2fb698c69af..138bd1e183e0976ff455ab6a1a73df145b80a445 100644 --- a/include/linux/rtmutex.h +++ b/include/linux/rtmutex.h @@ -117,7 +117,14 @@ static inline int rt_mutex_is_locked(struct rt_mutex *lock) extern void __rt_mutex_init(struct rt_mutex *lock, const char *name, struct lock_class_key *key); extern void rt_mutex_destroy(struct rt_mutex *lock); +#ifdef CONFIG_DEBUG_LOCK_ALLOC +extern void rt_mutex_lock_nested(struct rt_mutex *lock, unsigned int subclass); +#define rt_mutex_lock(lock) rt_mutex_lock_nested(lock, 0) +#else extern void rt_mutex_lock(struct rt_mutex *lock); +#define rt_mutex_lock_nested(lock, subclass) rt_mutex_lock(lock) +#endif + extern int rt_mutex_lock_interruptible(struct rt_mutex *lock); extern int rt_mutex_lock_killable(struct rt_mutex *lock); extern int rt_mutex_timed_lock(struct rt_mutex *lock, diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 795b2d513874d6f9d538c0de34b72a90856f0ea1..07576a062ac09075504b71dba73e8310148849d5 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -664,21 +664,26 @@ struct sk_buff { struct sk_buff *prev; union { - ktime_t tstamp; - u64 skb_mstamp; + struct net_device *dev; + /* Some protocols might use this space to store information, + * while device pointer would be NULL. + * UDP receive path is one user. + */ + unsigned long dev_scratch; }; }; - struct rb_node rbnode; /* used in netem & tcp stack */ + struct rb_node rbnode; /* used in netem, ip4 defrag, and tcp stack */ + struct list_head list; }; - struct sock *sk; union { - struct net_device *dev; - /* Some protocols might use this space to store information, - * while device pointer would be NULL. - * UDP receive path is one user. - */ - unsigned long dev_scratch; + struct sock *sk; + int ip_defrag_offset; + }; + + union { + ktime_t tstamp; + u64 skb_mstamp; }; /* * This is the control buffer. It is free to use for every @@ -2587,7 +2592,7 @@ static inline void __skb_queue_purge(struct sk_buff_head *list) kfree_skb(skb); } -void skb_rbtree_purge(struct rb_root *root); +unsigned int skb_rbtree_purge(struct rb_root *root); void *netdev_alloc_frag(unsigned int fragsz); @@ -3141,6 +3146,7 @@ static inline void *skb_push_rcsum(struct sk_buff *skb, unsigned int len) return skb->data; } +int pskb_trim_rcsum_slow(struct sk_buff *skb, unsigned int len); /** * pskb_trim_rcsum - trim received skb and update checksum * @skb: buffer to trim @@ -3154,9 +3160,7 @@ static inline int pskb_trim_rcsum(struct sk_buff *skb, unsigned int len) { if (likely(len >= skb->len)) return 0; - if (skb->ip_summed == CHECKSUM_COMPLETE) - skb->ip_summed = CHECKSUM_NONE; - return __pskb_trim(skb, len); + return pskb_trim_rcsum_slow(skb, len); } static inline int __skb_trim_rcsum(struct sk_buff *skb, unsigned int len) @@ -3176,6 +3180,12 @@ static inline int __skb_grow_rcsum(struct sk_buff *skb, unsigned int len) #define rb_to_skb(rb) rb_entry_safe(rb, struct sk_buff, rbnode) +#define rb_to_skb(rb) rb_entry_safe(rb, struct sk_buff, rbnode) +#define skb_rb_first(root) rb_to_skb(rb_first(root)) +#define skb_rb_last(root) rb_to_skb(rb_last(root)) +#define skb_rb_next(skb) rb_to_skb(rb_next(&(skb)->rbnode)) +#define skb_rb_prev(skb) rb_to_skb(rb_prev(&(skb)->rbnode)) + #define skb_queue_walk(queue, skb) \ for (skb = (queue)->next; \ skb != (struct sk_buff *)(queue); \ @@ -3190,6 +3200,18 @@ static inline int __skb_grow_rcsum(struct sk_buff *skb, unsigned int len) for (; skb != (struct sk_buff *)(queue); \ skb = skb->next) +#define skb_rbtree_walk(skb, root) \ + for (skb = skb_rb_first(root); skb != NULL; \ + skb = skb_rb_next(skb)) + +#define skb_rbtree_walk_from(skb) \ + for (; skb != NULL; \ + skb = skb_rb_next(skb)) + +#define skb_rbtree_walk_from_safe(skb, tmp) \ + for (; tmp = skb ? skb_rb_next(skb) : NULL, (skb != NULL); \ + skb = tmp) + #define skb_queue_walk_from_safe(queue, skb, tmp) \ for (tmp = skb->next; \ skb != (struct sk_buff *)(queue); \ diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h index 2038ab5316161a1bcf8cb40355086b08c6392454..f8ced87a2efea489190c61e6d9287b9bf74ada1d 100644 --- a/include/linux/slub_def.h +++ b/include/linux/slub_def.h @@ -88,7 +88,8 @@ struct kmem_cache { int object_size; /* The size of an object without meta data */ int offset; /* Free pointer offset. */ #ifdef CONFIG_SLUB_CPU_PARTIAL - int cpu_partial; /* Number of per cpu partial objects to keep around */ + /* Number of per cpu partial objects to keep around */ + unsigned int cpu_partial; #endif struct kmem_cache_order_objects oo; diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h index 71c237e8240e1f0c6886baaf6c07b8fd1581d69e..166fc4e76df616e8cc2cbe59133d95b726cf8953 100644 --- a/include/linux/sunrpc/clnt.h +++ b/include/linux/sunrpc/clnt.h @@ -156,6 +156,7 @@ int rpc_switch_client_transport(struct rpc_clnt *, void rpc_shutdown_client(struct rpc_clnt *); void rpc_release_client(struct rpc_clnt *); +void rpc_task_release_transport(struct rpc_task *); void rpc_task_release_client(struct rpc_task *); int rpcb_create_local(struct net *); diff --git a/include/linux/sysfs.h b/include/linux/sysfs.h index 40839c02d28c04389218a72012f784efc8ca5ff8..cca19bb200bda055da1ae73e66d195debd570f66 100644 --- a/include/linux/sysfs.h +++ b/include/linux/sysfs.h @@ -239,6 +239,9 @@ int __must_check sysfs_create_files(struct kobject *kobj, const struct attribute **attr); int __must_check sysfs_chmod_file(struct kobject *kobj, const struct attribute *attr, umode_t mode); +struct kernfs_node *sysfs_break_active_protection(struct kobject *kobj, + const struct attribute *attr); +void sysfs_unbreak_active_protection(struct kernfs_node *kn); void sysfs_remove_file_ns(struct kobject *kobj, const struct attribute *attr, const void *ns); bool sysfs_remove_file_self(struct kobject *kobj, const struct attribute *attr); @@ -352,6 +355,17 @@ static inline int sysfs_chmod_file(struct kobject *kobj, return 0; } +static inline struct kernfs_node * +sysfs_break_active_protection(struct kobject *kobj, + const struct attribute *attr) +{ + return NULL; +} + +static inline void sysfs_unbreak_active_protection(struct kernfs_node *kn) +{ +} + static inline void sysfs_remove_file_ns(struct kobject *kobj, const struct attribute *attr, const void *ns) diff --git a/include/linux/tpm.h b/include/linux/tpm.h index 2a6c3d96b31f0bc52960d134d6318cb57b1396af..7f7b29f86c59935421888ca08441b973ec05e7d0 100644 --- a/include/linux/tpm.h +++ b/include/linux/tpm.h @@ -48,6 +48,8 @@ struct tpm_class_ops { u8 (*status) (struct tpm_chip *chip); bool (*update_timeouts)(struct tpm_chip *chip, unsigned long *timeout_cap); + int (*go_idle)(struct tpm_chip *chip); + int (*cmd_ready)(struct tpm_chip *chip); int (*request_locality)(struct tpm_chip *chip, int loc); int (*relinquish_locality)(struct tpm_chip *chip, int loc); void (*clk_enable)(struct tpm_chip *chip, bool value); diff --git a/include/linux/verification.h b/include/linux/verification.h index a10549a6c7cdfa72d805d5509fc9c1286c36324b..cfa4730d607aa2edc95364f2e09dca1f0e442407 100644 --- a/include/linux/verification.h +++ b/include/linux/verification.h @@ -12,6 +12,12 @@ #ifndef _LINUX_VERIFICATION_H #define _LINUX_VERIFICATION_H +/* + * Indicate that both builtin trusted keys and secondary trusted keys + * should be used. + */ +#define VERIFY_USE_SECONDARY_KEYRING ((struct key *)1UL) + /* * The use to which an asymmetric key is being put. */ diff --git a/include/linux/vm_event_item.h b/include/linux/vm_event_item.h index 5c7f010676a74206e06282337ca77b578058f2f1..47a3441cf4c4a4d59f588aac2c181ba1d8098c4f 100644 --- a/include/linux/vm_event_item.h +++ b/include/linux/vm_event_item.h @@ -105,7 +105,6 @@ enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT, #ifdef CONFIG_DEBUG_VM_VMACACHE VMACACHE_FIND_CALLS, VMACACHE_FIND_HITS, - VMACACHE_FULL_FLUSHES, #endif #ifdef CONFIG_SWAP SWAP_RA, diff --git a/include/linux/vmacache.h b/include/linux/vmacache.h index a5b3aa8d281f869ae21a5edd9137e0ee25ef4262..a09b28f76460b22c8d3f2da047adcb2069b98b64 100644 --- a/include/linux/vmacache.h +++ b/include/linux/vmacache.h @@ -16,7 +16,6 @@ static inline void vmacache_flush(struct task_struct *tsk) memset(tsk->vmacache.vmas, 0, sizeof(tsk->vmacache.vmas)); } -extern void vmacache_flush_all(struct mm_struct *mm); extern void vmacache_update(unsigned long addr, struct vm_area_struct *newvma); extern struct vm_area_struct *vmacache_find(struct mm_struct *mm, unsigned long addr); @@ -30,10 +29,6 @@ extern struct vm_area_struct *vmacache_find_exact(struct mm_struct *mm, static inline void vmacache_invalidate(struct mm_struct *mm) { mm->vmacache_seqnum++; - - /* deal with overflows */ - if (unlikely(mm->vmacache_seqnum == 0)) - vmacache_flush_all(mm); } #endif /* __LINUX_VMACACHE_H */ diff --git a/include/media/v4l2-fh.h b/include/media/v4l2-fh.h index 62633e7d2630ce590311ed908558e582595c8900..0f22a5eda4cc5f89b9c75eeeedfdd885330358da 100644 --- a/include/media/v4l2-fh.h +++ b/include/media/v4l2-fh.h @@ -37,10 +37,13 @@ struct v4l2_ctrl_handler; * @prio: priority of the file handler, as defined by &enum v4l2_priority * * @wait: event' s wait queue + * @subscribe_lock: serialise changes to the subscribed list; guarantee that + * the add and del event callbacks are orderly called * @subscribed: list of subscribed events * @available: list of events waiting to be dequeued * @navailable: number of available events at @available list * @sequence: event sequence number + * * @m2m_ctx: pointer to &struct v4l2_m2m_ctx */ struct v4l2_fh { @@ -51,6 +54,7 @@ struct v4l2_fh { /* Events */ wait_queue_head_t wait; + struct mutex subscribe_lock; struct list_head subscribed; struct list_head available; unsigned int navailable; diff --git a/include/net/inet_frag.h b/include/net/inet_frag.h index a6e4edd8d4a254dfa1472d73f911dc1bb3bdaa4b..335cf7851f129e6130c07e8aefc3cfb98d9d7450 100644 --- a/include/net/inet_frag.h +++ b/include/net/inet_frag.h @@ -2,14 +2,20 @@ #ifndef __NET_FRAG_H__ #define __NET_FRAG_H__ +#include + struct netns_frags { - /* Keep atomic mem on separate cachelines in structs that include it */ - atomic_t mem ____cacheline_aligned_in_smp; /* sysctls */ + long high_thresh; + long low_thresh; int timeout; - int high_thresh; - int low_thresh; int max_dist; + struct inet_frags *f; + + struct rhashtable rhashtable ____cacheline_aligned_in_smp; + + /* Keep atomic mem on separate cachelines in structs that include it */ + atomic_long_t mem ____cacheline_aligned_in_smp; }; /** @@ -25,130 +31,115 @@ enum { INET_FRAG_COMPLETE = BIT(2), }; +struct frag_v4_compare_key { + __be32 saddr; + __be32 daddr; + u32 user; + u32 vif; + __be16 id; + u16 protocol; +}; + +struct frag_v6_compare_key { + struct in6_addr saddr; + struct in6_addr daddr; + u32 user; + __be32 id; + u32 iif; +}; + /** * struct inet_frag_queue - fragment queue * - * @lock: spinlock protecting the queue + * @node: rhash node + * @key: keys identifying this frag. * @timer: queue expiration timer - * @list: hash bucket list + * @lock: spinlock protecting this frag * @refcnt: reference count of the queue * @fragments: received fragments head + * @rb_fragments: received fragments rb-tree root * @fragments_tail: received fragments tail + * @last_run_head: the head of the last "run". see ip_fragment.c * @stamp: timestamp of the last received fragment * @len: total length of the original datagram * @meat: length of received fragments so far * @flags: fragment queue flags * @max_size: maximum received fragment size * @net: namespace that this frag belongs to - * @list_evictor: list of queues to forcefully evict (e.g. due to low memory) + * @rcu: rcu head for freeing deferall */ struct inet_frag_queue { - spinlock_t lock; + struct rhash_head node; + union { + struct frag_v4_compare_key v4; + struct frag_v6_compare_key v6; + } key; struct timer_list timer; - struct hlist_node list; + spinlock_t lock; refcount_t refcnt; - struct sk_buff *fragments; + struct sk_buff *fragments; /* Used in IPv6. */ + struct rb_root rb_fragments; /* Used in IPv4. */ struct sk_buff *fragments_tail; + struct sk_buff *last_run_head; ktime_t stamp; int len; int meat; __u8 flags; u16 max_size; - struct netns_frags *net; - struct hlist_node list_evictor; -}; - -#define INETFRAGS_HASHSZ 1024 - -/* averaged: - * max_depth = default ipfrag_high_thresh / INETFRAGS_HASHSZ / - * rounded up (SKB_TRUELEN(0) + sizeof(struct ipq or - * struct frag_queue)) - */ -#define INETFRAGS_MAXDEPTH 128 - -struct inet_frag_bucket { - struct hlist_head chain; - spinlock_t chain_lock; + struct netns_frags *net; + struct rcu_head rcu; }; struct inet_frags { - struct inet_frag_bucket hash[INETFRAGS_HASHSZ]; - - struct work_struct frags_work; - unsigned int next_bucket; - unsigned long last_rebuild_jiffies; - bool rebuild; - - /* The first call to hashfn is responsible to initialize - * rnd. This is best done with net_get_random_once. - * - * rnd_seqlock is used to let hash insertion detect - * when it needs to re-lookup the hash chain to use. - */ - u32 rnd; - seqlock_t rnd_seqlock; unsigned int qsize; - unsigned int (*hashfn)(const struct inet_frag_queue *); - bool (*match)(const struct inet_frag_queue *q, - const void *arg); void (*constructor)(struct inet_frag_queue *q, const void *arg); void (*destructor)(struct inet_frag_queue *); - void (*frag_expire)(unsigned long data); + void (*frag_expire)(struct timer_list *t); struct kmem_cache *frags_cachep; const char *frags_cache_name; + struct rhashtable_params rhash_params; }; int inet_frags_init(struct inet_frags *); void inet_frags_fini(struct inet_frags *); -static inline void inet_frags_init_net(struct netns_frags *nf) +static inline int inet_frags_init_net(struct netns_frags *nf) { - atomic_set(&nf->mem, 0); + atomic_long_set(&nf->mem, 0); + return rhashtable_init(&nf->rhashtable, &nf->f->rhash_params); } -void inet_frags_exit_net(struct netns_frags *nf, struct inet_frags *f); +void inet_frags_exit_net(struct netns_frags *nf); -void inet_frag_kill(struct inet_frag_queue *q, struct inet_frags *f); -void inet_frag_destroy(struct inet_frag_queue *q, struct inet_frags *f); -struct inet_frag_queue *inet_frag_find(struct netns_frags *nf, - struct inet_frags *f, void *key, unsigned int hash); +void inet_frag_kill(struct inet_frag_queue *q); +void inet_frag_destroy(struct inet_frag_queue *q); +struct inet_frag_queue *inet_frag_find(struct netns_frags *nf, void *key); -void inet_frag_maybe_warn_overflow(struct inet_frag_queue *q, - const char *prefix); +/* Free all skbs in the queue; return the sum of their truesizes. */ +unsigned int inet_frag_rbtree_purge(struct rb_root *root); -static inline void inet_frag_put(struct inet_frag_queue *q, struct inet_frags *f) +static inline void inet_frag_put(struct inet_frag_queue *q) { if (refcount_dec_and_test(&q->refcnt)) - inet_frag_destroy(q, f); -} - -static inline bool inet_frag_evicting(struct inet_frag_queue *q) -{ - return !hlist_unhashed(&q->list_evictor); + inet_frag_destroy(q); } /* Memory Tracking Functions. */ -static inline int frag_mem_limit(struct netns_frags *nf) -{ - return atomic_read(&nf->mem); -} - -static inline void sub_frag_mem_limit(struct netns_frags *nf, int i) +static inline long frag_mem_limit(const struct netns_frags *nf) { - atomic_sub(i, &nf->mem); + return atomic_long_read(&nf->mem); } -static inline void add_frag_mem_limit(struct netns_frags *nf, int i) +static inline void sub_frag_mem_limit(struct netns_frags *nf, long val) { - atomic_add(i, &nf->mem); + atomic_long_sub(val, &nf->mem); } -static inline int sum_frag_mem_limit(struct netns_frags *nf) +static inline void add_frag_mem_limit(struct netns_frags *nf, long val) { - return atomic_read(&nf->mem); + atomic_long_add(val, &nf->mem); } /* RFC 3168 support : diff --git a/include/net/ip.h b/include/net/ip.h index 81da1123fc8ea0d6eed89e5f117c0c9be5198982..7c430343176afe769d3e885ca1d3bd5176541c3b 100644 --- a/include/net/ip.h +++ b/include/net/ip.h @@ -570,7 +570,6 @@ static inline struct sk_buff *ip_check_defrag(struct net *net, struct sk_buff *s return skb; } #endif -int ip_frag_mem(struct net *net); /* * Functions provided by ip_forward.c diff --git a/include/net/ipv6.h b/include/net/ipv6.h index e59f385da38e409cbcbe78cb7a756b6dee4ab3f9..fa87a62e9bd3b5c0aa132e169689553afd6e727b 100644 --- a/include/net/ipv6.h +++ b/include/net/ipv6.h @@ -313,14 +313,7 @@ struct ipv6_txoptions *ipv6_dup_options(struct sock *sk, struct ipv6_txoptions *ipv6_renew_options(struct sock *sk, struct ipv6_txoptions *opt, int newtype, - struct ipv6_opt_hdr __user *newopt, - int newoptlen); -struct ipv6_txoptions * -ipv6_renew_options_kern(struct sock *sk, - struct ipv6_txoptions *opt, - int newtype, - struct ipv6_opt_hdr *newopt, - int newoptlen); + struct ipv6_opt_hdr *newopt); struct ipv6_txoptions *ipv6_fixup_options(struct ipv6_txoptions *opt_space, struct ipv6_txoptions *opt); @@ -338,13 +331,6 @@ static inline bool ipv6_accept_ra(struct inet6_dev *idev) idev->cnf.accept_ra; } -#if IS_ENABLED(CONFIG_IPV6) -static inline int ip6_frag_mem(struct net *net) -{ - return sum_frag_mem_limit(&net->ipv6.frags); -} -#endif - #define IPV6_FRAG_HIGH_THRESH (4 * 1024*1024) /* 4194304 */ #define IPV6_FRAG_LOW_THRESH (3 * 1024*1024) /* 3145728 */ #define IPV6_FRAG_TIMEOUT (60 * HZ) /* 60 seconds */ @@ -538,17 +524,8 @@ enum ip6_defrag_users { __IP6_DEFRAG_CONNTRACK_BRIDGE_IN = IP6_DEFRAG_CONNTRACK_BRIDGE_IN + USHRT_MAX, }; -struct ip6_create_arg { - __be32 id; - u32 user; - const struct in6_addr *src; - const struct in6_addr *dst; - int iif; - u8 ecn; -}; - void ip6_frag_init(struct inet_frag_queue *q, const void *a); -bool ip6_frag_match(const struct inet_frag_queue *q, const void *a); +extern const struct rhashtable_params ip6_rhash_params; /* * Equivalent of ipv4 struct ip @@ -556,19 +533,13 @@ bool ip6_frag_match(const struct inet_frag_queue *q, const void *a); struct frag_queue { struct inet_frag_queue q; - __be32 id; /* fragment id */ - u32 user; - struct in6_addr saddr; - struct in6_addr daddr; - int iif; unsigned int csum; __u16 nhoffset; u8 ecn; }; -void ip6_expire_frag_queue(struct net *net, struct frag_queue *fq, - struct inet_frags *frags); +void ip6_expire_frag_queue(struct net *net, struct frag_queue *fq); static inline bool ipv6_addr_any(const struct in6_addr *a) { diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h index 049008493faf67902365570a16f26233a1d9a6ca..f4bf75fac349ca66f1345854406c64bb71a74af4 100644 --- a/include/net/net_namespace.h +++ b/include/net/net_namespace.h @@ -120,6 +120,7 @@ struct net { #endif #if IS_ENABLED(CONFIG_NF_DEFRAG_IPV6) struct netns_nf_frag nf_frag; + struct ctl_table_header *nf_frag_frags_hdr; #endif struct sock *nfnl; struct sock *nfnl_stash; diff --git a/include/net/netns/ipv6.h b/include/net/netns/ipv6.h index dc825a5ddd7f4cc67ffb2bb51d3e04e2697e5cef..c004d051c2d3fbd85c2583fe2812a125b3d45807 100644 --- a/include/net/netns/ipv6.h +++ b/include/net/netns/ipv6.h @@ -94,7 +94,6 @@ struct netns_ipv6 { #if IS_ENABLED(CONFIG_NF_DEFRAG_IPV6) struct netns_nf_frag { - struct netns_sysctl_ipv6 sysctl; struct netns_frags frags; }; #endif diff --git a/include/net/nfc/hci.h b/include/net/nfc/hci.h index 316694dafa5bae75363e34c035906e2d937c33c6..008f466d1da7ea60c7b46dc4b5133e8ed257c9c3 100644 --- a/include/net/nfc/hci.h +++ b/include/net/nfc/hci.h @@ -87,7 +87,7 @@ struct nfc_hci_pipe { * According to specification 102 622 chapter 4.4 Pipes, * the pipe identifier is 7 bits long. */ -#define NFC_HCI_MAX_PIPES 127 +#define NFC_HCI_MAX_PIPES 128 struct nfc_hci_init_data { u8 gate_count; struct nfc_hci_gate gates[NFC_HCI_MAX_CUSTOM_GATES]; diff --git a/include/net/tc_act/tc_tunnel_key.h b/include/net/tc_act/tc_tunnel_key.h index efef0b4b1b2bddc76095bcd4d02ebaaa3b2beb56..46b8c7f1c8d5273791df55eeb6345807d8812e96 100644 --- a/include/net/tc_act/tc_tunnel_key.h +++ b/include/net/tc_act/tc_tunnel_key.h @@ -18,7 +18,6 @@ struct tcf_tunnel_key_params { struct rcu_head rcu; int tcft_action; - int action; struct metadata_dst *tcft_enc_metadata; }; diff --git a/include/net/tcp.h b/include/net/tcp.h index 686e33ea76e7fe109c2e5b32ed9b3daea1252c67..0c828aac7e0445998604e8ddf7f5da4888addd36 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -938,8 +938,6 @@ enum tcp_ca_event { CA_EVENT_LOSS, /* loss timeout */ CA_EVENT_ECN_NO_CE, /* ECT set, but not CE marked */ CA_EVENT_ECN_IS_CE, /* received CE marked IP packet */ - CA_EVENT_DELAYED_ACK, /* Delayed ack is sent */ - CA_EVENT_NON_DELAYED_ACK, }; /* Information about inbound ACK, passed to cong_ops->in_ack_event() */ @@ -2065,6 +2063,10 @@ int tcp_set_ulp(struct sock *sk, const char *name); void tcp_get_available_ulp(char *buf, size_t len); void tcp_cleanup_ulp(struct sock *sk); +#define MODULE_ALIAS_TCP_ULP(name) \ + __MODULE_INFO(alias, alias_userspace, name); \ + __MODULE_INFO(alias, alias_tcp_ulp, "tcp-ulp-" name) + /* Call BPF_SOCK_OPS program that returns an int. If the return value * is < 0, then the BPF op failed (for example if the loaded BPF * program does not support the chosen operation or there is no BPF diff --git a/include/net/tls.h b/include/net/tls.h index 48940a883d9a379487af5eb00d59b95620145e83..86ed3dd80fe7092467f37566cd80589114cd9b07 100644 --- a/include/net/tls.h +++ b/include/net/tls.h @@ -79,11 +79,13 @@ enum { TLS_PENDING_CLOSED_RECORD }; +union tls_crypto_context { + struct tls_crypto_info info; + struct tls12_crypto_info_aes_gcm_128 aes_gcm_128; +}; + struct tls_context { - union { - struct tls_crypto_info crypto_send; - struct tls12_crypto_info_aes_gcm_128 crypto_send_aes_gcm_128; - }; + union tls_crypto_context crypto_send; void *priv_ctx; @@ -208,8 +210,8 @@ static inline void tls_fill_prepend(struct tls_context *ctx, * size KTLS_DTLS_HEADER_SIZE + KTLS_DTLS_NONCE_EXPLICIT_SIZE */ buf[0] = record_type; - buf[1] = TLS_VERSION_MINOR(ctx->crypto_send.version); - buf[2] = TLS_VERSION_MAJOR(ctx->crypto_send.version); + buf[1] = TLS_VERSION_MINOR(ctx->crypto_send.info.version); + buf[2] = TLS_VERSION_MAJOR(ctx->crypto_send.info.version); /* we can use IV for nonce explicit according to spec */ buf[3] = pkt_len >> 8; buf[4] = pkt_len & 0xFF; diff --git a/include/uapi/linux/ethtool.h b/include/uapi/linux/ethtool.h index ac71559314e731c079e2a2544290ff1200b3c40b..9eae13eefc49eab841dfbb6bee693aaadc81d300 100644 --- a/include/uapi/linux/ethtool.h +++ b/include/uapi/linux/ethtool.h @@ -898,13 +898,13 @@ struct ethtool_rx_flow_spec { static inline __u64 ethtool_get_flow_spec_ring(__u64 ring_cookie) { return ETHTOOL_RX_FLOW_SPEC_RING & ring_cookie; -}; +} static inline __u64 ethtool_get_flow_spec_ring_vf(__u64 ring_cookie) { return (ETHTOOL_RX_FLOW_SPEC_RING_VF & ring_cookie) >> ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF; -}; +} /** * struct ethtool_rxnfc - command to get or set RX flow classification rules diff --git a/include/uapi/linux/nbd.h b/include/uapi/linux/nbd.h index 85a3fb65e40a6f3941337c7fad17e7fdff3b33d0..20d6cc91435df90f08741c478ab29ea85efa7167 100644 --- a/include/uapi/linux/nbd.h +++ b/include/uapi/linux/nbd.h @@ -53,6 +53,9 @@ enum { /* These are client behavior specific flags. */ #define NBD_CFLAG_DESTROY_ON_DISCONNECT (1 << 0) /* delete the nbd device on disconnect. */ +#define NBD_CFLAG_DISCONNECT_ON_CLOSE (1 << 1) /* disconnect the nbd device on + * close by last opener. + */ /* userspace doesn't need the nbd_device structure */ diff --git a/include/uapi/linux/snmp.h b/include/uapi/linux/snmp.h index 0d941cdd8e8c0533259aa4df53b0e3be92f8cf00..f5d753e60836ebd63f06b03ca07d1c49771f5892 100644 --- a/include/uapi/linux/snmp.h +++ b/include/uapi/linux/snmp.h @@ -56,6 +56,7 @@ enum IPSTATS_MIB_ECT1PKTS, /* InECT1Pkts */ IPSTATS_MIB_ECT0PKTS, /* InECT0Pkts */ IPSTATS_MIB_CEPKTS, /* InCEPkts */ + IPSTATS_MIB_REASM_OVERLAPS, /* ReasmOverlaps */ __IPSTATS_MIB_MAX }; diff --git a/include/video/udlfb.h b/include/video/udlfb.h index 1252a7a89bc02f602473896fc5b7f5e149db5cf8..85e32ee739fc34521de2a27954b021ee4cfd389c 100644 --- a/include/video/udlfb.h +++ b/include/video/udlfb.h @@ -88,7 +88,7 @@ struct dlfb_data { #define MIN_RAW_PIX_BYTES 2 #define MIN_RAW_CMD_BYTES (RAW_HEADER_BYTES + MIN_RAW_PIX_BYTES) -#define DL_DEFIO_WRITE_DELAY 5 /* fb_deferred_io.delay in jiffies */ +#define DL_DEFIO_WRITE_DELAY msecs_to_jiffies(HZ <= 300 ? 4 : 10) /* optimal value for 720p video */ #define DL_DEFIO_WRITE_DISABLE (HZ*60) /* "disable" with long delay */ /* remove these once align.h patch is taken into kernel */ diff --git a/ipc/sem.c b/ipc/sem.c index b2698ebdcb31e65ea2f3e94f64cc567607b8ca2a..d6dd2dc9ddad3c579ddab5d5b8e2730465fc9238 100644 --- a/ipc/sem.c +++ b/ipc/sem.c @@ -2041,7 +2041,7 @@ static long do_semtimedop(int semid, struct sembuf __user *tsops, } do { - queue.status = -EINTR; + WRITE_ONCE(queue.status, -EINTR); queue.sleeper = current; __set_current_state(TASK_INTERRUPTIBLE); diff --git a/kernel/audit_watch.c b/kernel/audit_watch.c index 9eb8b3511636e96e0607be2484119823b1034fb5..4a98f6e314a9b3124a6d15371d4d906407799977 100644 --- a/kernel/audit_watch.c +++ b/kernel/audit_watch.c @@ -419,6 +419,13 @@ int audit_add_watch(struct audit_krule *krule, struct list_head **list) struct path parent_path; int h, ret = 0; + /* + * When we will be calling audit_add_to_parent, krule->watch might have + * been updated and watch might have been freed. + * So we need to keep a reference of watch. + */ + audit_get_watch(watch); + mutex_unlock(&audit_filter_mutex); /* Avoid calling path_lookup under audit_filter_mutex. */ @@ -427,8 +434,10 @@ int audit_add_watch(struct audit_krule *krule, struct list_head **list) /* caller expects mutex locked */ mutex_lock(&audit_filter_mutex); - if (ret) + if (ret) { + audit_put_watch(watch); return ret; + } /* either find an old parent or attach a new one */ parent = audit_find_parent(d_backing_inode(parent_path.dentry)); @@ -446,6 +455,7 @@ int audit_add_watch(struct audit_krule *krule, struct list_head **list) *list = &audit_inode_hash[h]; error: path_put(&parent_path); + audit_put_watch(watch); return ret; } diff --git a/kernel/bpf/hashtab.c b/kernel/bpf/hashtab.c index 6533f08d1238e136895a5cf0665be31d7b23df51..3d0ecc273cc64cb9cefcb791256543414b93c088 100644 --- a/kernel/bpf/hashtab.c +++ b/kernel/bpf/hashtab.c @@ -730,13 +730,15 @@ static struct htab_elem *alloc_htab_elem(struct bpf_htab *htab, void *key, * old element will be freed immediately. * Otherwise return an error */ - atomic_dec(&htab->count); - return ERR_PTR(-E2BIG); + l_new = ERR_PTR(-E2BIG); + goto dec_count; } l_new = kmalloc_node(htab->elem_size, GFP_ATOMIC | __GFP_NOWARN, htab->map.numa_node); - if (!l_new) - return ERR_PTR(-ENOMEM); + if (!l_new) { + l_new = ERR_PTR(-ENOMEM); + goto dec_count; + } } memcpy(l_new->key, key, key_size); @@ -749,7 +751,8 @@ static struct htab_elem *alloc_htab_elem(struct bpf_htab *htab, void *key, GFP_ATOMIC | __GFP_NOWARN); if (!pptr) { kfree(l_new); - return ERR_PTR(-ENOMEM); + l_new = ERR_PTR(-ENOMEM); + goto dec_count; } } @@ -763,6 +766,9 @@ static struct htab_elem *alloc_htab_elem(struct bpf_htab *htab, void *key, l_new->hash = hash; return l_new; +dec_count: + atomic_dec(&htab->count); + return l_new; } static int check_flags(struct bpf_htab *htab, struct htab_elem *l_old, diff --git a/kernel/bpf/sockmap.c b/kernel/bpf/sockmap.c index 53a4787c08d80d88544953560a7a955895c16490..20eaddfa691c2eb7a4c63c39d2b8ccb4582a50bd 100644 --- a/kernel/bpf/sockmap.c +++ b/kernel/bpf/sockmap.c @@ -313,12 +313,15 @@ out: static void smap_write_space(struct sock *sk) { struct smap_psock *psock; + void (*write_space)(struct sock *sk); rcu_read_lock(); psock = smap_psock_sk(sk); if (likely(psock && test_bit(SMAP_TX_RUNNING, &psock->state))) schedule_work(&psock->tx_work); + write_space = psock->save_write_space; rcu_read_unlock(); + write_space(sk); } static void smap_stop_sock(struct smap_psock *psock, struct sock *sk) diff --git a/kernel/cpu.c b/kernel/cpu.c index 7278b33a503d57594ba94a9212e787b9bfc9b4d5..7d777b62e4eba0141472cd6b1c860c045ddb1abc 100644 --- a/kernel/cpu.c +++ b/kernel/cpu.c @@ -666,15 +666,15 @@ static void cpuhp_thread_fun(unsigned int cpu) bool bringup = st->bringup; enum cpuhp_state state; + if (WARN_ON_ONCE(!st->should_run)) + return; + /* * ACQUIRE for the cpuhp_should_run() load of ->should_run. Ensures * that if we see ->should_run we also see the rest of the state. */ smp_mb(); - if (WARN_ON_ONCE(!st->should_run)) - return; - cpuhp_lock_acquire(bringup); if (st->single) { @@ -999,7 +999,8 @@ static int cpuhp_down_callbacks(unsigned int cpu, struct cpuhp_cpu_state *st, ret = cpuhp_invoke_callback(cpu, st->state, false, NULL, NULL); if (ret) { st->target = prev_state; - undo_cpu_down(cpu, st); + if (st->state < prev_state) + undo_cpu_down(cpu, st); break; } } @@ -1052,7 +1053,7 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen, * to do the further cleanups. */ ret = cpuhp_down_callbacks(cpu, st, target); - if (ret && st->state > CPUHP_TEARDOWN_CPU && st->state < prev_state) { + if (ret && st->state == CPUHP_TEARDOWN_CPU && st->state < prev_state) { cpuhp_reset_state(st, prev_state); __cpuhp_kick_ap(st); } diff --git a/kernel/events/core.c b/kernel/events/core.c index 178d9c5feb627e812c63d8b04078fd3343351995..676a408873411da5a1008421ea8232f4a660d47d 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -5700,6 +5700,7 @@ perf_output_sample_ustack(struct perf_output_handle *handle, u64 dump_size, unsigned long sp; unsigned int rem; u64 dyn_size; + mm_segment_t fs; /* * We dump: @@ -5717,7 +5718,10 @@ perf_output_sample_ustack(struct perf_output_handle *handle, u64 dump_size, /* Data. */ sp = perf_user_stack_pointer(regs); + fs = get_fs(); + set_fs(USER_DS); rem = __output_copy_user(handle, (void *) sp, dump_size); + set_fs(fs); dyn_size = dump_size - rem; perf_output_skip(handle, rem); diff --git a/kernel/fork.c b/kernel/fork.c index 72700b643f9e0f4d96c76f197b23e1a1f92b55c6..bc849ac60aa6f5fd3376b8501c35521b9773cf98 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -1388,7 +1388,9 @@ static int copy_sighand(unsigned long clone_flags, struct task_struct *tsk) return -ENOMEM; atomic_set(&sig->count, 1); + spin_lock_irq(¤t->sighand->siglock); memcpy(sig->action, current->sighand->action, sizeof(sig->action)); + spin_unlock_irq(¤t->sighand->siglock); return 0; } diff --git a/kernel/kprobes.c b/kernel/kprobes.c index a66e838640eaedb7fd84f08366b527730cee0d29..5c90765d37e77d373c8a7a2da0c5e40b26884411 100644 --- a/kernel/kprobes.c +++ b/kernel/kprobes.c @@ -2531,7 +2531,7 @@ static int __init debugfs_kprobe_init(void) if (!dir) return -ENOMEM; - file = debugfs_create_file("list", 0444, dir, NULL, + file = debugfs_create_file("list", 0400, dir, NULL, &debugfs_kprobes_operations); if (!file) goto error; @@ -2541,7 +2541,7 @@ static int __init debugfs_kprobe_init(void) if (!file) goto error; - file = debugfs_create_file("blacklist", 0444, dir, NULL, + file = debugfs_create_file("blacklist", 0400, dir, NULL, &debugfs_kprobe_blacklist_ops); if (!file) goto error; diff --git a/kernel/livepatch/core.c b/kernel/livepatch/core.c index bf8c8fd72589ddeeae34662a8d76352afa890678..7c51f065b212cb37e2d4531e05f1f1fb2faf2312 100644 --- a/kernel/livepatch/core.c +++ b/kernel/livepatch/core.c @@ -605,6 +605,9 @@ static int klp_init_func(struct klp_object *obj, struct klp_func *func) if (!func->old_name || !func->new_func) return -EINVAL; + if (strlen(func->old_name) >= KSYM_NAME_LEN) + return -EINVAL; + INIT_LIST_HEAD(&func->stack_node); func->patched = false; func->transition = false; @@ -678,6 +681,9 @@ static int klp_init_object(struct klp_patch *patch, struct klp_object *obj) if (!obj->funcs) return -EINVAL; + if (klp_is_module(obj) && strlen(obj->name) >= MODULE_NAME_LEN) + return -EINVAL; + obj->patched = false; obj->mod = NULL; diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c index aad05e4b228c37f46ca3b28534f80a2a228821d6..def51a27f20f7e6c31269b2ce16384d1cc87b0a3 100644 --- a/kernel/locking/lockdep.c +++ b/kernel/locking/lockdep.c @@ -1296,11 +1296,11 @@ unsigned long lockdep_count_forward_deps(struct lock_class *class) this.parent = NULL; this.class = class; - local_irq_save(flags); + raw_local_irq_save(flags); arch_spin_lock(&lockdep_lock); ret = __lockdep_count_forward_deps(&this); arch_spin_unlock(&lockdep_lock); - local_irq_restore(flags); + raw_local_irq_restore(flags); return ret; } @@ -1323,11 +1323,11 @@ unsigned long lockdep_count_backward_deps(struct lock_class *class) this.parent = NULL; this.class = class; - local_irq_save(flags); + raw_local_irq_save(flags); arch_spin_lock(&lockdep_lock); ret = __lockdep_count_backward_deps(&this); arch_spin_unlock(&lockdep_lock); - local_irq_restore(flags); + raw_local_irq_restore(flags); return ret; } @@ -4480,7 +4480,7 @@ void debug_check_no_locks_freed(const void *mem_from, unsigned long mem_len) if (unlikely(!debug_locks)) return; - local_irq_save(flags); + raw_local_irq_save(flags); for (i = 0; i < curr->lockdep_depth; i++) { hlock = curr->held_locks + i; @@ -4491,7 +4491,7 @@ void debug_check_no_locks_freed(const void *mem_from, unsigned long mem_len) print_freed_lock_bug(curr, mem_from, mem_from + mem_len, hlock); break; } - local_irq_restore(flags); + raw_local_irq_restore(flags); } EXPORT_SYMBOL_GPL(debug_check_no_locks_freed); diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c index bac3fb580af6572b91a025e279e7d655c9e96549..3dfa01cf425584e56bf422d097e420f0f805de22 100644 --- a/kernel/locking/rtmutex.c +++ b/kernel/locking/rtmutex.c @@ -2036,6 +2036,29 @@ static int __sched rt_mutex_lock_state(struct rt_mutex *lock, int state) return ret; } +static inline void __rt_mutex_lock(struct rt_mutex *lock, unsigned int subclass) +{ + might_sleep(); + + mutex_acquire(&lock->dep_map, subclass, 0, _RET_IP_); + rt_mutex_fastlock(lock, TASK_UNINTERRUPTIBLE, rt_mutex_slowlock); +} + +#ifdef CONFIG_DEBUG_LOCK_ALLOC +/** + * rt_mutex_lock_nested - lock a rt_mutex + * + * @lock: the rt_mutex to be locked + * @subclass: the lockdep subclass + */ +void __sched rt_mutex_lock_nested(struct rt_mutex *lock, unsigned int subclass) +{ + __rt_mutex_lock(lock, subclass); +} +EXPORT_SYMBOL_GPL(rt_mutex_lock_nested); +#endif + +#ifndef CONFIG_DEBUG_LOCK_ALLOC /** * rt_mutex_lock - lock a rt_mutex * @@ -2046,6 +2069,7 @@ void __sched rt_mutex_lock(struct rt_mutex *lock) rt_mutex_lock_state(lock, TASK_UNINTERRUPTIBLE); } EXPORT_SYMBOL_GPL(rt_mutex_lock); +#endif /** * rt_mutex_lock_interruptible - lock a rt_mutex interruptible diff --git a/kernel/memremap.c b/kernel/memremap.c index 4712ce646e04b5690c59232a947a733143980e32..2b136d4988f78a481566ac4ce37213517e2c60f8 100644 --- a/kernel/memremap.c +++ b/kernel/memremap.c @@ -248,13 +248,16 @@ int device_private_entry_fault(struct vm_area_struct *vma, EXPORT_SYMBOL(device_private_entry_fault); #endif /* CONFIG_DEVICE_PRIVATE */ -static void pgmap_radix_release(struct resource *res) +static void pgmap_radix_release(struct resource *res, unsigned long end_pgoff) { unsigned long pgoff, order; mutex_lock(&pgmap_lock); - foreach_order_pgoff(res, order, pgoff) + foreach_order_pgoff(res, order, pgoff) { + if (pgoff >= end_pgoff) + break; radix_tree_delete(&pgmap_radix, PHYS_PFN(res->start) + pgoff); + } mutex_unlock(&pgmap_lock); synchronize_rcu(); @@ -309,7 +312,7 @@ static void devm_memremap_pages_release(struct device *dev, void *data) mem_hotplug_done(); untrack_pfn(NULL, PHYS_PFN(align_start), align_size); - pgmap_radix_release(res); + pgmap_radix_release(res, -1); dev_WARN_ONCE(dev, pgmap->altmap && pgmap->altmap->alloc, "%s: failed to free all reserved pages\n", __func__); } @@ -459,7 +462,7 @@ void *devm_memremap_pages(struct device *dev, struct resource *res, untrack_pfn(NULL, PHYS_PFN(align_start), align_size); err_pfn_remap: err_radix: - pgmap_radix_release(res); + pgmap_radix_release(res, pgoff); devres_free(page_map); return ERR_PTR(error); } diff --git a/kernel/module.c b/kernel/module.c index 321b0b1f87e7a9359e5b7d37023f693e22c9caed..2a44c515f0d74694c4d44f42ee53eb6bc8479aff 100644 --- a/kernel/module.c +++ b/kernel/module.c @@ -4058,7 +4058,7 @@ static unsigned long mod_find_symname(struct module *mod, const char *name) for (i = 0; i < kallsyms->num_symtab; i++) if (strcmp(name, symname(kallsyms, i)) == 0 && - kallsyms->symtab[i].st_info != 'U') + kallsyms->symtab[i].st_shndx != SHN_UNDEF) return kallsyms->symtab[i].st_value; return 0; } @@ -4104,6 +4104,10 @@ int module_kallsyms_on_each_symbol(int (*fn)(void *, const char *, if (mod->state == MODULE_STATE_UNFORMED) continue; for (i = 0; i < kallsyms->num_symtab; i++) { + + if (kallsyms->symtab[i].st_shndx == SHN_UNDEF) + continue; + ret = fn(data, symname(kallsyms, i), mod, kallsyms->symtab[i].st_value); if (ret != 0) diff --git a/kernel/power/Kconfig b/kernel/power/Kconfig index e8517b63eb372eaa14b37bc32b616630e25766e1..dd2b5a4d89a59e34036302b166f55d1bedb93b8f 100644 --- a/kernel/power/Kconfig +++ b/kernel/power/Kconfig @@ -105,6 +105,7 @@ config PM_SLEEP def_bool y depends on SUSPEND || HIBERNATE_CALLBACKS select PM + select SRCU config PM_SLEEP_SMP def_bool y diff --git a/kernel/printk/internal.h b/kernel/printk/internal.h index 2a7d04049af49a6a99dbec3dabd7c6c8a5614dbd..0f1898820cbaf2295f85f01664713418e75a3312 100644 --- a/kernel/printk/internal.h +++ b/kernel/printk/internal.h @@ -19,11 +19,16 @@ #ifdef CONFIG_PRINTK #define PRINTK_SAFE_CONTEXT_MASK 0x3fffffff -#define PRINTK_NMI_DEFERRED_CONTEXT_MASK 0x40000000 +#define PRINTK_NMI_DIRECT_CONTEXT_MASK 0x40000000 #define PRINTK_NMI_CONTEXT_MASK 0x80000000 extern raw_spinlock_t logbuf_lock; +__printf(5, 0) +int vprintk_store(int facility, int level, + const char *dict, size_t dictlen, + const char *fmt, va_list args); + __printf(1, 0) int vprintk_default(const char *fmt, va_list args); __printf(1, 0) int vprintk_deferred(const char *fmt, va_list args); __printf(1, 0) int vprintk_func(const char *fmt, va_list args); @@ -54,6 +59,8 @@ void __printk_safe_exit(void); local_irq_enable(); \ } while (0) +void defer_console_output(void); + #else __printf(1, 0) int vprintk_func(const char *fmt, va_list args) { return 0; } diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c index 7393fcb271fb4ba5dedf04e62c9e0dc1c048a92b..26382c029b6b71912f3c8ba8e0f9205655aecad2 100644 --- a/kernel/printk/printk.c +++ b/kernel/printk/printk.c @@ -1773,35 +1773,16 @@ static size_t log_output(int facility, int level, enum log_flags lflags, const c return log_store(facility, level, lflags, 0, dict, dictlen, text, text_len); } -asmlinkage int vprintk_emit(int facility, int level, - const char *dict, size_t dictlen, - const char *fmt, va_list args) +/* Must be called under logbuf_lock. */ +int vprintk_store(int facility, int level, + const char *dict, size_t dictlen, + const char *fmt, va_list args) { static char textbuf[LOG_LINE_MAX]; char *text = textbuf; size_t text_len; enum log_flags lflags = 0; - unsigned long flags; - int printed_len; - bool in_sched = false; - - /* - * Fall back to early_printk if a debugging subsystem has - * killed printk output - */ - if (unlikely(forced_early_printk(fmt, args))) - return 1; - - if (level == LOGLEVEL_SCHED) { - level = LOGLEVEL_DEFAULT; - in_sched = true; - } - boot_delay_msec(level); - printk_delay(); - - /* This stops the holder of console_sem just where we want him */ - logbuf_lock_irqsave(flags); /* * The printf needs to come first; we need the syslog * prefix which might be passed-in as a parameter. @@ -1842,8 +1823,29 @@ asmlinkage int vprintk_emit(int facility, int level, if (dict) lflags |= LOG_PREFIX|LOG_NEWLINE; - printed_len = log_output(facility, level, lflags, dict, dictlen, text, text_len); + return log_output(facility, level, lflags, + dict, dictlen, text, text_len); +} +asmlinkage int vprintk_emit(int facility, int level, + const char *dict, size_t dictlen, + const char *fmt, va_list args) +{ + int printed_len; + bool in_sched = false; + unsigned long flags; + + if (level == LOGLEVEL_SCHED) { + level = LOGLEVEL_DEFAULT; + in_sched = true; + } + + boot_delay_msec(level); + printk_delay(); + + /* This stops the holder of console_sem just where we want him */ + logbuf_lock_irqsave(flags); + printed_len = vprintk_store(facility, level, dict, dictlen, fmt, args); logbuf_unlock_irqrestore(flags); /* If called from the scheduler, we can not call up(). */ @@ -2814,16 +2816,20 @@ void wake_up_klogd(void) preempt_enable(); } -int vprintk_deferred(const char *fmt, va_list args) +void defer_console_output(void) { - int r; - - r = vprintk_emit(0, LOGLEVEL_SCHED, NULL, 0, fmt, args); - preempt_disable(); __this_cpu_or(printk_pending, PRINTK_PENDING_OUTPUT); irq_work_queue(this_cpu_ptr(&wake_up_klogd_work)); preempt_enable(); +} + +int vprintk_deferred(const char *fmt, va_list args) +{ + int r; + + r = vprintk_emit(0, LOGLEVEL_SCHED, NULL, 0, fmt, args); + defer_console_output(); return r; } diff --git a/kernel/printk/printk_safe.c b/kernel/printk/printk_safe.c index 64825b2df3a5fcbf0efab285c931d06527d19da6..64f8046586b685763b91975048cb5a9b768e0834 100644 --- a/kernel/printk/printk_safe.c +++ b/kernel/printk/printk_safe.c @@ -309,26 +309,35 @@ static __printf(1, 0) int vprintk_nmi(const char *fmt, va_list args) return printk_safe_log_store(s, fmt, args); } -void printk_nmi_enter(void) +void notrace printk_nmi_enter(void) { - /* - * The size of the extra per-CPU buffer is limited. Use it only when - * the main one is locked. If this CPU is not in the safe context, - * the lock must be taken on another CPU and we could wait for it. - */ - if ((this_cpu_read(printk_context) & PRINTK_SAFE_CONTEXT_MASK) && - raw_spin_is_locked(&logbuf_lock)) { - this_cpu_or(printk_context, PRINTK_NMI_CONTEXT_MASK); - } else { - this_cpu_or(printk_context, PRINTK_NMI_DEFERRED_CONTEXT_MASK); - } + this_cpu_or(printk_context, PRINTK_NMI_CONTEXT_MASK); +} + +void notrace printk_nmi_exit(void) +{ + this_cpu_and(printk_context, ~PRINTK_NMI_CONTEXT_MASK); +} + +/* + * Marks a code that might produce many messages in NMI context + * and the risk of losing them is more critical than eventual + * reordering. + * + * It has effect only when called in NMI context. Then printk() + * will try to store the messages into the main logbuf directly + * and use the per-CPU buffers only as a fallback when the lock + * is not available. + */ +void printk_nmi_direct_enter(void) +{ + if (this_cpu_read(printk_context) & PRINTK_NMI_CONTEXT_MASK) + this_cpu_or(printk_context, PRINTK_NMI_DIRECT_CONTEXT_MASK); } -void printk_nmi_exit(void) +void printk_nmi_direct_exit(void) { - this_cpu_and(printk_context, - ~(PRINTK_NMI_CONTEXT_MASK | - PRINTK_NMI_DEFERRED_CONTEXT_MASK)); + this_cpu_and(printk_context, ~PRINTK_NMI_DIRECT_CONTEXT_MASK); } #else @@ -366,6 +375,20 @@ void __printk_safe_exit(void) __printf(1, 0) int vprintk_func(const char *fmt, va_list args) { + /* + * Try to use the main logbuf even in NMI. But avoid calling console + * drivers that might have their own locks. + */ + if ((this_cpu_read(printk_context) & PRINTK_NMI_DIRECT_CONTEXT_MASK) && + raw_spin_trylock(&logbuf_lock)) { + int len; + + len = vprintk_store(0, LOGLEVEL_DEFAULT, NULL, 0, fmt, args); + raw_spin_unlock(&logbuf_lock); + defer_console_output(); + return len; + } + /* Use extra buffer in NMI when logbuf_lock is taken or in safe mode. */ if (this_cpu_read(printk_context) & PRINTK_NMI_CONTEXT_MASK) return vprintk_nmi(fmt, args); @@ -374,13 +397,6 @@ __printf(1, 0) int vprintk_func(const char *fmt, va_list args) if (this_cpu_read(printk_context) & PRINTK_SAFE_CONTEXT_MASK) return vprintk_safe(fmt, args); - /* - * Use the main logbuf when logbuf_lock is available in NMI. - * But avoid calling console drivers that might have their own locks. - */ - if (this_cpu_read(printk_context) & PRINTK_NMI_DEFERRED_CONTEXT_MASK) - return vprintk_deferred(fmt, args); - /* No obstacles. */ return vprintk_default(fmt, args); } diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c index 24e69803cd14f638ca5084e7a479271071ed9d64..28a75a9526ace52c36e1112fe2338319a679f5ea 100644 --- a/kernel/sched/deadline.c +++ b/kernel/sched/deadline.c @@ -1365,6 +1365,10 @@ enqueue_dl_entity(struct sched_dl_entity *dl_se, update_dl_entity(dl_se, pi_se); } else if (flags & ENQUEUE_REPLENISH) { replenish_dl_entity(dl_se, pi_se); + } else if ((flags & ENQUEUE_RESTORE) && + dl_time_before(dl_se->deadline, + rq_clock(rq_of_dl_rq(dl_rq_of_se(dl_se))))) { + setup_new_dl_entity(dl_se); } __enqueue_dl_entity(dl_se); @@ -2256,13 +2260,6 @@ static void switched_to_dl(struct rq *rq, struct task_struct *p) return; } - /* - * If p is boosted we already updated its params in - * rt_mutex_setprio()->enqueue_task(..., ENQUEUE_REPLENISH), - * p's deadline being now already after rq_clock(rq). - */ - if (dl_time_before(p->dl.deadline, rq_clock(rq))) - setup_new_dl_entity(&p->dl); if (rq->curr != p) { #ifdef CONFIG_SMP diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 36ef77839be464f528e75880f5f375473858edbd..13a6f2f6bcf2b3fd98b0a271bb5b03decc1dda60 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -757,11 +757,12 @@ static void attach_entity_cfs_rq(struct sched_entity *se); * To solve this problem, we also cap the util_avg of successive tasks to * only 1/2 of the left utilization budget: * - * util_avg_cap = (1024 - cfs_rq->avg.util_avg) / 2^n + * util_avg_cap = (cpu_scale - cfs_rq->avg.util_avg) / 2^n * - * where n denotes the nth task. + * where n denotes the nth task and cpu_scale the CPU capacity. * - * For example, a simplest series from the beginning would be like: + * For example, for a CPU with 1024 of capacity, a simplest series from + * the beginning would be like: * * task util_avg: 512, 256, 128, 64, 32, 16, 8, ... * cfs_rq util_avg: 512, 768, 896, 960, 992, 1008, 1016, ... @@ -773,7 +774,8 @@ void post_init_entity_util_avg(struct sched_entity *se) { struct cfs_rq *cfs_rq = cfs_rq_of(se); struct sched_avg *sa = &se->avg; - long cap = (long)(SCHED_CAPACITY_SCALE - cfs_rq->avg.util_avg) / 2; + long cpu_scale = arch_scale_cpu_capacity(NULL, cpu_of(rq_of(cfs_rq))); + long cap = (long)(cpu_scale - cfs_rq->avg.util_avg) / 2; if (cap > 0) { if (cfs_rq->avg.util_avg != 0) { @@ -9135,7 +9137,8 @@ static inline bool vruntime_normalized(struct task_struct *p) * - A task which has been woken up by try_to_wake_up() and * waiting for actually being woken up by sched_ttwu_pending(). */ - if (!se->sum_exec_runtime || p->state == TASK_WAKING) + if (!se->sum_exec_runtime || + (p->state == TASK_WAKING && p->sched_remote_wakeup)) return true; return false; diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c index b2478126d1093bbbbebfb00341300ccbd99a2c12..6c72332dab3f215ba1939b0e5be560d8edc1ec14 100644 --- a/kernel/sched/rt.c +++ b/kernel/sched/rt.c @@ -837,6 +837,8 @@ static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun) * can be time-consuming. Try to avoid it when possible. */ raw_spin_lock(&rt_rq->rt_runtime_lock); + if (!sched_feat(RT_RUNTIME_SHARE) && rt_rq->rt_runtime != RUNTIME_INF) + rt_rq->rt_runtime = rt_b->rt_runtime; skip = !rt_rq->rt_time && !rt_rq->rt_nr_running; raw_spin_unlock(&rt_rq->rt_runtime_lock); if (skip) diff --git a/kernel/sched/wait.c b/kernel/sched/wait.c index 929ecb7d6b78a70f4ec4548a582f78c749d7d3ee..e29608464382156984cdc2cb850ca8f178fef243 100644 --- a/kernel/sched/wait.c +++ b/kernel/sched/wait.c @@ -395,35 +395,36 @@ static inline bool is_kthread_should_stop(void) * if (condition) * break; * - * p->state = mode; condition = true; - * smp_mb(); // A smp_wmb(); // C - * if (!wq_entry->flags & WQ_FLAG_WOKEN) wq_entry->flags |= WQ_FLAG_WOKEN; - * schedule() try_to_wake_up(); - * p->state = TASK_RUNNING; ~~~~~~~~~~~~~~~~~~ - * wq_entry->flags &= ~WQ_FLAG_WOKEN; condition = true; - * smp_mb() // B smp_wmb(); // C - * wq_entry->flags |= WQ_FLAG_WOKEN; - * } - * remove_wait_queue(&wq_head, &wait); + * // in wait_woken() // in woken_wake_function() * + * p->state = mode; wq_entry->flags |= WQ_FLAG_WOKEN; + * smp_mb(); // A try_to_wake_up(): + * if (!(wq_entry->flags & WQ_FLAG_WOKEN)) + * schedule() if (p->state & mode) + * p->state = TASK_RUNNING; p->state = TASK_RUNNING; + * wq_entry->flags &= ~WQ_FLAG_WOKEN; ~~~~~~~~~~~~~~~~~~ + * smp_mb(); // B condition = true; + * } smp_mb(); // C + * remove_wait_queue(&wq_head, &wait); wq_entry->flags |= WQ_FLAG_WOKEN; */ long wait_woken(struct wait_queue_entry *wq_entry, unsigned mode, long timeout) { - set_current_state(mode); /* A */ /* - * The above implies an smp_mb(), which matches with the smp_wmb() from - * woken_wake_function() such that if we observe WQ_FLAG_WOKEN we must - * also observe all state before the wakeup. + * The below executes an smp_mb(), which matches with the full barrier + * executed by the try_to_wake_up() in woken_wake_function() such that + * either we see the store to wq_entry->flags in woken_wake_function() + * or woken_wake_function() sees our store to current->state. */ + set_current_state(mode); /* A */ if (!(wq_entry->flags & WQ_FLAG_WOKEN) && !is_kthread_should_stop()) timeout = schedule_timeout(timeout); __set_current_state(TASK_RUNNING); /* - * The below implies an smp_mb(), it too pairs with the smp_wmb() from - * woken_wake_function() such that we must either observe the wait - * condition being true _OR_ WQ_FLAG_WOKEN such that we will not miss - * an event. + * The below executes an smp_mb(), which matches with the smp_mb() (C) + * in woken_wake_function() such that either we see the wait condition + * being true or the store to wq_entry->flags in woken_wake_function() + * follows ours in the coherence order. */ smp_store_mb(wq_entry->flags, wq_entry->flags & ~WQ_FLAG_WOKEN); /* B */ @@ -433,14 +434,8 @@ EXPORT_SYMBOL(wait_woken); int woken_wake_function(struct wait_queue_entry *wq_entry, unsigned mode, int sync, void *key) { - /* - * Although this function is called under waitqueue lock, LOCK - * doesn't imply write barrier and the users expects write - * barrier semantics on wakeup functions. The following - * smp_wmb() is equivalent to smp_wmb() in try_to_wake_up() - * and is paired with smp_store_mb() in wait_woken(). - */ - smp_wmb(); /* C */ + /* Pairs with the smp_store_mb() in wait_woken(). */ + smp_mb(); /* C */ wq_entry->flags |= WQ_FLAG_WOKEN; return default_wake_function(wq_entry, mode, sync, key); diff --git a/kernel/stop_machine.c b/kernel/stop_machine.c index 36059808b0f7c002aeecf2b47731ea69cc0876d3..56f2f2e0122971534d08f25caabfd0764a8f8d35 100644 --- a/kernel/stop_machine.c +++ b/kernel/stop_machine.c @@ -81,6 +81,7 @@ static bool cpu_stop_queue_work(unsigned int cpu, struct cpu_stop_work *work) unsigned long flags; bool enabled; + preempt_disable(); raw_spin_lock_irqsave(&stopper->lock, flags); enabled = stopper->enabled; if (enabled) @@ -90,6 +91,7 @@ static bool cpu_stop_queue_work(unsigned int cpu, struct cpu_stop_work *work) raw_spin_unlock_irqrestore(&stopper->lock, flags); wake_up_q(&wakeq); + preempt_enable(); return enabled; } @@ -236,13 +238,24 @@ static int cpu_stop_queue_two_works(int cpu1, struct cpu_stop_work *work1, struct cpu_stopper *stopper2 = per_cpu_ptr(&cpu_stopper, cpu2); DEFINE_WAKE_Q(wakeq); int err; + retry: + /* + * The waking up of stopper threads has to happen in the same + * scheduling context as the queueing. Otherwise, there is a + * possibility of one of the above stoppers being woken up by another + * CPU, and preempting us. This will cause us to not wake up the other + * stopper forever. + */ + preempt_disable(); raw_spin_lock_irq(&stopper1->lock); raw_spin_lock_nested(&stopper2->lock, SINGLE_DEPTH_NESTING); - err = -ENOENT; - if (!stopper1->enabled || !stopper2->enabled) + if (!stopper1->enabled || !stopper2->enabled) { + err = -ENOENT; goto unlock; + } + /* * Ensure that if we race with __stop_cpus() the stoppers won't get * queued up in reverse order leading to system deadlock. @@ -253,36 +266,30 @@ retry: * It can be falsely true but it is safe to spin until it is cleared, * queue_stop_cpus_work() does everything under preempt_disable(). */ - err = -EDEADLK; - if (unlikely(stop_cpus_in_progress)) - goto unlock; + if (unlikely(stop_cpus_in_progress)) { + err = -EDEADLK; + goto unlock; + } err = 0; __cpu_stop_queue_work(stopper1, work1, &wakeq); __cpu_stop_queue_work(stopper2, work2, &wakeq); - /* - * The waking up of stopper threads has to happen - * in the same scheduling context as the queueing. - * Otherwise, there is a possibility of one of the - * above stoppers being woken up by another CPU, - * and preempting us. This will cause us to n ot - * wake up the other stopper forever. - */ - preempt_disable(); + unlock: raw_spin_unlock(&stopper2->lock); raw_spin_unlock_irq(&stopper1->lock); if (unlikely(err == -EDEADLK)) { + preempt_enable(); + while (stop_cpus_in_progress) cpu_relax(); + goto retry; } - if (!err) { - wake_up_q(&wakeq); - preempt_enable(); - } + wake_up_q(&wakeq); + preempt_enable(); return err; } diff --git a/kernel/sys.c b/kernel/sys.c index de4ed027dfd74b9c337c25271b5f2ac3f70e3ef4..e25ec93aea22179a2bee37fb4316bb20ea61165c 100644 --- a/kernel/sys.c +++ b/kernel/sys.c @@ -1176,18 +1176,19 @@ static int override_release(char __user *release, size_t len) SYSCALL_DEFINE1(newuname, struct new_utsname __user *, name) { - int errno = 0; + struct new_utsname tmp; down_read(&uts_sem); - if (copy_to_user(name, utsname(), sizeof *name)) - errno = -EFAULT; + memcpy(&tmp, utsname(), sizeof(tmp)); up_read(&uts_sem); + if (copy_to_user(name, &tmp, sizeof(tmp))) + return -EFAULT; - if (!errno && override_release(name->release, sizeof(name->release))) - errno = -EFAULT; - if (!errno && override_architecture(name)) - errno = -EFAULT; - return errno; + if (override_release(name->release, sizeof(name->release))) + return -EFAULT; + if (override_architecture(name)) + return -EFAULT; + return 0; } #ifdef __ARCH_WANT_SYS_OLD_UNAME @@ -1196,55 +1197,46 @@ SYSCALL_DEFINE1(newuname, struct new_utsname __user *, name) */ SYSCALL_DEFINE1(uname, struct old_utsname __user *, name) { - int error = 0; + struct old_utsname tmp; if (!name) return -EFAULT; down_read(&uts_sem); - if (copy_to_user(name, utsname(), sizeof(*name))) - error = -EFAULT; + memcpy(&tmp, utsname(), sizeof(tmp)); up_read(&uts_sem); + if (copy_to_user(name, &tmp, sizeof(tmp))) + return -EFAULT; - if (!error && override_release(name->release, sizeof(name->release))) - error = -EFAULT; - if (!error && override_architecture(name)) - error = -EFAULT; - return error; + if (override_release(name->release, sizeof(name->release))) + return -EFAULT; + if (override_architecture(name)) + return -EFAULT; + return 0; } SYSCALL_DEFINE1(olduname, struct oldold_utsname __user *, name) { - int error; + struct oldold_utsname tmp = {}; if (!name) return -EFAULT; - if (!access_ok(VERIFY_WRITE, name, sizeof(struct oldold_utsname))) - return -EFAULT; down_read(&uts_sem); - error = __copy_to_user(&name->sysname, &utsname()->sysname, - __OLD_UTS_LEN); - error |= __put_user(0, name->sysname + __OLD_UTS_LEN); - error |= __copy_to_user(&name->nodename, &utsname()->nodename, - __OLD_UTS_LEN); - error |= __put_user(0, name->nodename + __OLD_UTS_LEN); - error |= __copy_to_user(&name->release, &utsname()->release, - __OLD_UTS_LEN); - error |= __put_user(0, name->release + __OLD_UTS_LEN); - error |= __copy_to_user(&name->version, &utsname()->version, - __OLD_UTS_LEN); - error |= __put_user(0, name->version + __OLD_UTS_LEN); - error |= __copy_to_user(&name->machine, &utsname()->machine, - __OLD_UTS_LEN); - error |= __put_user(0, name->machine + __OLD_UTS_LEN); + memcpy(&tmp.sysname, &utsname()->sysname, __OLD_UTS_LEN); + memcpy(&tmp.nodename, &utsname()->nodename, __OLD_UTS_LEN); + memcpy(&tmp.release, &utsname()->release, __OLD_UTS_LEN); + memcpy(&tmp.version, &utsname()->version, __OLD_UTS_LEN); + memcpy(&tmp.machine, &utsname()->machine, __OLD_UTS_LEN); up_read(&uts_sem); + if (copy_to_user(name, &tmp, sizeof(tmp))) + return -EFAULT; - if (!error && override_architecture(name)) - error = -EFAULT; - if (!error && override_release(name->release, sizeof(name->release))) - error = -EFAULT; - return error ? -EFAULT : 0; + if (override_architecture(name)) + return -EFAULT; + if (override_release(name->release, sizeof(name->release))) + return -EFAULT; + return 0; } #endif @@ -1258,17 +1250,18 @@ SYSCALL_DEFINE2(sethostname, char __user *, name, int, len) if (len < 0 || len > __NEW_UTS_LEN) return -EINVAL; - down_write(&uts_sem); errno = -EFAULT; if (!copy_from_user(tmp, name, len)) { - struct new_utsname *u = utsname(); + struct new_utsname *u; + down_write(&uts_sem); + u = utsname(); memcpy(u->nodename, tmp, len); memset(u->nodename + len, 0, sizeof(u->nodename) - len); errno = 0; uts_proc_notify(UTS_PROC_HOSTNAME); + up_write(&uts_sem); } - up_write(&uts_sem); return errno; } @@ -1276,8 +1269,9 @@ SYSCALL_DEFINE2(sethostname, char __user *, name, int, len) SYSCALL_DEFINE2(gethostname, char __user *, name, int, len) { - int i, errno; + int i; struct new_utsname *u; + char tmp[__NEW_UTS_LEN + 1]; if (len < 0) return -EINVAL; @@ -1286,11 +1280,11 @@ SYSCALL_DEFINE2(gethostname, char __user *, name, int, len) i = 1 + strlen(u->nodename); if (i > len) i = len; - errno = 0; - if (copy_to_user(name, u->nodename, i)) - errno = -EFAULT; + memcpy(tmp, u->nodename, i); up_read(&uts_sem); - return errno; + if (copy_to_user(name, tmp, i)) + return -EFAULT; + return 0; } #endif @@ -1309,17 +1303,18 @@ SYSCALL_DEFINE2(setdomainname, char __user *, name, int, len) if (len < 0 || len > __NEW_UTS_LEN) return -EINVAL; - down_write(&uts_sem); errno = -EFAULT; if (!copy_from_user(tmp, name, len)) { - struct new_utsname *u = utsname(); + struct new_utsname *u; + down_write(&uts_sem); + u = utsname(); memcpy(u->domainname, tmp, len); memset(u->domainname + len, 0, sizeof(u->domainname) - len); errno = 0; uts_proc_notify(UTS_PROC_DOMAINNAME); + up_write(&uts_sem); } - up_write(&uts_sem); return errno; } diff --git a/kernel/time/alarmtimer.c b/kernel/time/alarmtimer.c index 0c7227f893491a4381747702e3697d82ffddc16a..6020ee66e51769589dc82666f2c5165da549b16e 100644 --- a/kernel/time/alarmtimer.c +++ b/kernel/time/alarmtimer.c @@ -581,11 +581,11 @@ static void alarm_timer_rearm(struct k_itimer *timr) * @timr: Pointer to the posixtimer data struct * @now: Current time to forward the timer against */ -static int alarm_timer_forward(struct k_itimer *timr, ktime_t now) +static s64 alarm_timer_forward(struct k_itimer *timr, ktime_t now) { struct alarm *alarm = &timr->it.alarm.alarmtimer; - return (int) alarm_forward(alarm, timr->it_interval, now); + return alarm_forward(alarm, timr->it_interval, now); } /** @@ -808,7 +808,8 @@ static int alarm_timer_nsleep(const clockid_t which_clock, int flags, /* Convert (if necessary) to absolute time */ if (flags != TIMER_ABSTIME) { ktime_t now = alarm_bases[type].gettime(); - exp = ktime_add(now, exp); + + exp = ktime_add_safe(now, exp); } ret = alarmtimer_do_nsleep(&alarm, exp, type); diff --git a/kernel/time/posix-cpu-timers.c b/kernel/time/posix-cpu-timers.c index 47d063c4ed03470801c62e3101ee95c8e810e6d3..c7b7d047d12e7872792b2f940f05bdf3ce2949b4 100644 --- a/kernel/time/posix-cpu-timers.c +++ b/kernel/time/posix-cpu-timers.c @@ -87,7 +87,7 @@ static void bump_cpu_timer(struct k_itimer *timer, u64 now) continue; timer->it.cpu.expires += incr; - timer->it_overrun += 1 << i; + timer->it_overrun += 1LL << i; delta -= incr; } } diff --git a/kernel/time/posix-timers.c b/kernel/time/posix-timers.c index c5866984f12d3a081fca572fb04e48a1086c1d49..5a59538f3d16a8aaebbc3488004d41b2fe8c33c5 100644 --- a/kernel/time/posix-timers.c +++ b/kernel/time/posix-timers.c @@ -283,6 +283,17 @@ static __init int init_posix_timers(void) } __initcall(init_posix_timers); +/* + * The siginfo si_overrun field and the return value of timer_getoverrun(2) + * are of type int. Clamp the overrun value to INT_MAX + */ +static inline int timer_overrun_to_int(struct k_itimer *timr, int baseval) +{ + s64 sum = timr->it_overrun_last + (s64)baseval; + + return sum > (s64)INT_MAX ? INT_MAX : (int)sum; +} + static void common_hrtimer_rearm(struct k_itimer *timr) { struct hrtimer *timer = &timr->it.real.timer; @@ -290,9 +301,8 @@ static void common_hrtimer_rearm(struct k_itimer *timr) if (!timr->it_interval) return; - timr->it_overrun += (unsigned int) hrtimer_forward(timer, - timer->base->get_time(), - timr->it_interval); + timr->it_overrun += hrtimer_forward(timer, timer->base->get_time(), + timr->it_interval); hrtimer_restart(timer); } @@ -321,10 +331,10 @@ void posixtimer_rearm(struct siginfo *info) timr->it_active = 1; timr->it_overrun_last = timr->it_overrun; - timr->it_overrun = -1; + timr->it_overrun = -1LL; ++timr->it_requeue_pending; - info->si_overrun += timr->it_overrun_last; + info->si_overrun = timer_overrun_to_int(timr, info->si_overrun); } unlock_timer(timr, flags); @@ -418,9 +428,8 @@ static enum hrtimer_restart posix_timer_fn(struct hrtimer *timer) now = ktime_add(now, kj); } #endif - timr->it_overrun += (unsigned int) - hrtimer_forward(timer, now, - timr->it_interval); + timr->it_overrun += hrtimer_forward(timer, now, + timr->it_interval); ret = HRTIMER_RESTART; ++timr->it_requeue_pending; timr->it_active = 1; @@ -526,7 +535,7 @@ static int do_timer_create(clockid_t which_clock, struct sigevent *event, new_timer->it_id = (timer_t) new_timer_id; new_timer->it_clock = which_clock; new_timer->kclock = kc; - new_timer->it_overrun = -1; + new_timer->it_overrun = -1LL; if (event) { rcu_read_lock(); @@ -647,11 +656,11 @@ static ktime_t common_hrtimer_remaining(struct k_itimer *timr, ktime_t now) return __hrtimer_expires_remaining_adjusted(timer, now); } -static int common_hrtimer_forward(struct k_itimer *timr, ktime_t now) +static s64 common_hrtimer_forward(struct k_itimer *timr, ktime_t now) { struct hrtimer *timer = &timr->it.real.timer; - return (int)hrtimer_forward(timer, now, timr->it_interval); + return hrtimer_forward(timer, now, timr->it_interval); } /* @@ -791,7 +800,7 @@ SYSCALL_DEFINE1(timer_getoverrun, timer_t, timer_id) if (!timr) return -EINVAL; - overrun = timr->it_overrun_last; + overrun = timer_overrun_to_int(timr, 0); unlock_timer(timr, flags); return overrun; diff --git a/kernel/time/posix-timers.h b/kernel/time/posix-timers.h index 151e28f5bf304ce134897038639c53998adae4d8..ddb21145211a0280f9b20667a33b9503ce62d776 100644 --- a/kernel/time/posix-timers.h +++ b/kernel/time/posix-timers.h @@ -19,7 +19,7 @@ struct k_clock { void (*timer_get)(struct k_itimer *timr, struct itimerspec64 *cur_setting); void (*timer_rearm)(struct k_itimer *timr); - int (*timer_forward)(struct k_itimer *timr, ktime_t now); + s64 (*timer_forward)(struct k_itimer *timr, ktime_t now); ktime_t (*timer_remaining)(struct k_itimer *timr, ktime_t now); int (*timer_try_to_cancel)(struct k_itimer *timr); void (*timer_arm)(struct k_itimer *timr, ktime_t expires, diff --git a/kernel/time/timer.c b/kernel/time/timer.c index ff1d60d4c0ccfa4fd77d59dfedbd67bbfd33d12d..5426875bc4e51ec7b371b55e513e635d3c7ec220 100644 --- a/kernel/time/timer.c +++ b/kernel/time/timer.c @@ -1660,6 +1660,22 @@ static inline void __run_timers(struct timer_base *base) raw_spin_lock_irq(&base->lock); + /* + * timer_base::must_forward_clk must be cleared before running + * timers so that any timer functions that call mod_timer() will + * not try to forward the base. Idle tracking / clock forwarding + * logic is only used with BASE_STD timers. + * + * The must_forward_clk flag is cleared unconditionally also for + * the deferrable base. The deferrable base is not affected by idle + * tracking and never forwarded, so clearing the flag is a NOOP. + * + * The fact that the deferrable base is never forwarded can cause + * large variations in granularity for deferrable timers, but they + * can be deferred for long periods due to idle anyway. + */ + base->must_forward_clk = false; + while (time_after_eq(jiffies, base->clk)) { levels = collect_expired_timers(base, heads); diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c index e73dcab8e9f0da86c021803066f4d6c20ab74804..71a8ee6e60dca654973713c4a263fa1685d9cd4e 100644 --- a/kernel/trace/blktrace.c +++ b/kernel/trace/blktrace.c @@ -1809,6 +1809,10 @@ static ssize_t sysfs_blk_trace_attr_store(struct device *dev, mutex_lock(&q->blk_trace_mutex); if (attr == &dev_attr_enable) { + if (!!value == !!q->blk_trace) { + ret = 0; + goto out_unlock_bdev; + } if (value) ret = blk_trace_setup_queue(q, bdev); else diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index 75fea9321ffb341f5b56572dfae687dfd7ed839e..e8ca1e01facd59acf73fc89b57f9e75799bb15c0 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c @@ -1517,6 +1517,8 @@ rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned long nr_pages) tmp_iter_page = first_page; do { + cond_resched(); + to_remove_page = tmp_iter_page; rb_inc_page(cpu_buffer, &tmp_iter_page); diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 21c3b817034337d528b7df65f96fc2e856b4879d..4fc60e5ec4b925f3e968fe9adb0a55fb7afea254 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@ -2970,6 +2970,7 @@ out_nobuffer: } EXPORT_SYMBOL_GPL(trace_vbprintk); +__printf(3, 0) static int __trace_array_vprintk(struct ring_buffer *buffer, unsigned long ip, const char *fmt, va_list args) @@ -3024,12 +3025,14 @@ out_nobuffer: return len; } +__printf(3, 0) int trace_array_vprintk(struct trace_array *tr, unsigned long ip, const char *fmt, va_list args) { return __trace_array_vprintk(tr->trace_buffer.buffer, ip, fmt, args); } +__printf(3, 0) int trace_array_printk(struct trace_array *tr, unsigned long ip, const char *fmt, ...) { @@ -3045,6 +3048,7 @@ int trace_array_printk(struct trace_array *tr, return ret; } +__printf(3, 4) int trace_array_printk_buf(struct ring_buffer *buffer, unsigned long ip, const char *fmt, ...) { @@ -3060,6 +3064,7 @@ int trace_array_printk_buf(struct ring_buffer *buffer, return ret; } +__printf(2, 0) int trace_vprintk(unsigned long ip, const char *fmt, va_list args) { return trace_array_vprintk(&global_trace, ip, fmt, args); @@ -7633,7 +7638,9 @@ rb_simple_write(struct file *filp, const char __user *ubuf, if (buffer) { mutex_lock(&trace_types_lock); - if (val) { + if (!!val == tracer_tracing_is_on(tr)) { + val = 0; /* do nothing */ + } else if (val) { tracer_tracing_on(tr); if (tr->current_trace->start) tr->current_trace->start(tr); @@ -8284,6 +8291,7 @@ void ftrace_dump(enum ftrace_dump_mode oops_dump_mode) tracing_off(); local_irq_save(flags); + printk_nmi_direct_enter(); /* Simulate the iterator */ trace_init_global_iter(&iter); @@ -8363,7 +8371,8 @@ void ftrace_dump(enum ftrace_dump_mode oops_dump_mode) for_each_tracing_cpu(cpu) { atomic_dec(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled); } - atomic_dec(&dump_running); + atomic_dec(&dump_running); + printk_nmi_direct_exit(); local_irq_restore(flags); } EXPORT_SYMBOL_GPL(ftrace_dump); diff --git a/kernel/trace/trace_uprobe.c b/kernel/trace/trace_uprobe.c index 2db5a19833edaa8efabbca8ee9d22f6af050c7df..2ccfbb8efeb2e152e75cf201f95549ee5dddaf1d 100644 --- a/kernel/trace/trace_uprobe.c +++ b/kernel/trace/trace_uprobe.c @@ -967,7 +967,7 @@ probe_event_disable(struct trace_uprobe *tu, struct trace_event_file *file) list_del_rcu(&link->list); /* synchronize with u{,ret}probe_trace_func */ - synchronize_sched(); + synchronize_rcu(); kfree(link); if (!list_empty(&tu->tp.files)) diff --git a/kernel/user_namespace.c b/kernel/user_namespace.c index c490f1e4313b998a60686b98fbdf6b0167753e11..ed80a88980f0f842c6cd429b4281ce8391f80f9e 100644 --- a/kernel/user_namespace.c +++ b/kernel/user_namespace.c @@ -650,7 +650,16 @@ static ssize_t map_write(struct file *file, const char __user *buf, unsigned idx; struct uid_gid_extent *extent = NULL; char *kbuf = NULL, *pos, *next_line; - ssize_t ret = -EINVAL; + ssize_t ret; + + /* Only allow < page size writes at the beginning of the file */ + if ((*ppos != 0) || (count >= PAGE_SIZE)) + return -EINVAL; + + /* Slurp in the user data */ + kbuf = memdup_user_nul(buf, count); + if (IS_ERR(kbuf)) + return PTR_ERR(kbuf); /* * The userns_state_mutex serializes all writes to any given map. @@ -684,19 +693,6 @@ static ssize_t map_write(struct file *file, const char __user *buf, if (cap_valid(cap_setid) && !file_ns_capable(file, ns, CAP_SYS_ADMIN)) goto out; - /* Only allow < page size writes at the beginning of the file */ - ret = -EINVAL; - if ((*ppos != 0) || (count >= PAGE_SIZE)) - goto out; - - /* Slurp in the user data */ - kbuf = memdup_user_nul(buf, count); - if (IS_ERR(kbuf)) { - ret = PTR_ERR(kbuf); - kbuf = NULL; - goto out; - } - /* Parse the user data */ ret = -EINVAL; pos = kbuf; diff --git a/kernel/utsname_sysctl.c b/kernel/utsname_sysctl.c index 233cd8fc691082363d6c324f1555aa1dd1f33c21..258033d62cb3a4ae8da270df003bb8444f409eeb 100644 --- a/kernel/utsname_sysctl.c +++ b/kernel/utsname_sysctl.c @@ -18,7 +18,7 @@ #ifdef CONFIG_PROC_SYSCTL -static void *get_uts(struct ctl_table *table, int write) +static void *get_uts(struct ctl_table *table) { char *which = table->data; struct uts_namespace *uts_ns; @@ -26,21 +26,9 @@ static void *get_uts(struct ctl_table *table, int write) uts_ns = current->nsproxy->uts_ns; which = (which - (char *)&init_uts_ns) + (char *)uts_ns; - if (!write) - down_read(&uts_sem); - else - down_write(&uts_sem); return which; } -static void put_uts(struct ctl_table *table, int write, void *which) -{ - if (!write) - up_read(&uts_sem); - else - up_write(&uts_sem); -} - /* * Special case of dostring for the UTS structure. This has locks * to observe. Should this be in kernel/sys.c ???? @@ -50,13 +38,34 @@ static int proc_do_uts_string(struct ctl_table *table, int write, { struct ctl_table uts_table; int r; + char tmp_data[__NEW_UTS_LEN + 1]; + memcpy(&uts_table, table, sizeof(uts_table)); - uts_table.data = get_uts(table, write); + uts_table.data = tmp_data; + + /* + * Buffer the value in tmp_data so that proc_dostring() can be called + * without holding any locks. + * We also need to read the original value in the write==1 case to + * support partial writes. + */ + down_read(&uts_sem); + memcpy(tmp_data, get_uts(table), sizeof(tmp_data)); + up_read(&uts_sem); r = proc_dostring(&uts_table, write, buffer, lenp, ppos); - put_uts(table, write, uts_table.data); - if (write) + if (write) { + /* + * Write back the new value. + * Note that, since we dropped uts_sem, the result can + * theoretically be incorrect if there are two parallel writes + * at non-zero offsets to the same sysctl. + */ + down_write(&uts_sem); + memcpy(get_uts(table), tmp_data, sizeof(tmp_data)); + up_write(&uts_sem); proc_sys_poll_notify(table->poll); + } return r; } diff --git a/kernel/watchdog.c b/kernel/watchdog.c index 1c6188a154813ed796ce688beba2c0ad3a8623ee..ea4c09109ce4f0e585efaa775dacbbd5a5ad3b31 100644 --- a/kernel/watchdog.c +++ b/kernel/watchdog.c @@ -265,7 +265,7 @@ static void __touch_watchdog(void) * entering idle state. This should only be used for scheduler events. * Use touch_softlockup_watchdog() for everything else. */ -void touch_softlockup_watchdog_sched(void) +notrace void touch_softlockup_watchdog_sched(void) { /* * Preemption can be enabled. It doesn't matter which CPU's timestamp @@ -274,7 +274,7 @@ void touch_softlockup_watchdog_sched(void) raw_cpu_write(watchdog_touch_ts, 0); } -void touch_softlockup_watchdog(void) +notrace void touch_softlockup_watchdog(void) { touch_softlockup_watchdog_sched(); wq_watchdog_touch(raw_smp_processor_id()); diff --git a/kernel/watchdog_hld.c b/kernel/watchdog_hld.c index 5cdc09411b85d4e3528b9d4d2eac3cc9a55db8b5..210dccc57c0445fd2b7a9d2a57bf08c241c47b46 100644 --- a/kernel/watchdog_hld.c +++ b/kernel/watchdog_hld.c @@ -31,7 +31,7 @@ static struct cpumask dead_events_mask; static unsigned long hardlockup_allcpu_dumped; static atomic_t watchdog_cpus = ATOMIC_INIT(0); -void arch_touch_nmi_watchdog(void) +notrace void arch_touch_nmi_watchdog(void) { /* * Using __raw here because some code paths have diff --git a/kernel/workqueue.c b/kernel/workqueue.c index f3f7afecb6868559aba72cba69e9a1de2ecff77e..76297cce56024461c8d7fdb44d73096b22c5deb5 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -5530,7 +5530,7 @@ static void wq_watchdog_timer_fn(unsigned long data) mod_timer(&wq_watchdog_timer, jiffies + thresh); } -void wq_watchdog_touch(int cpu) +notrace void wq_watchdog_touch(int cpu) { if (cpu >= 0) per_cpu(wq_watchdog_touched_cpu, cpu) = jiffies; diff --git a/lib/debugobjects.c b/lib/debugobjects.c index d789930edcc2c318e53c71d23ffd0bd2f83c1d2b..161da6c6e1731b159d55a44ff0905a905ae23d50 100644 --- a/lib/debugobjects.c +++ b/lib/debugobjects.c @@ -322,9 +322,12 @@ static void debug_object_is_on_stack(void *addr, int onstack) limit++; if (is_on_stack) - pr_warn("object is on stack, but not annotated\n"); + pr_warn("object %p is on stack %p, but NOT annotated.\n", addr, + task_stack_page(current)); else - pr_warn("object is not on stack, but annotated\n"); + pr_warn("object %p is NOT on stack %p, but annotated.\n", addr, + task_stack_page(current)); + WARN_ON(1); } diff --git a/lib/klist.c b/lib/klist.c index 0507fa5d84c534917d0842a453bdbcaba386422a..f6b547812fe3de34a408e3ce64fce23084dedf34 100644 --- a/lib/klist.c +++ b/lib/klist.c @@ -336,8 +336,9 @@ struct klist_node *klist_prev(struct klist_iter *i) void (*put)(struct klist_node *) = i->i_klist->put; struct klist_node *last = i->i_cur; struct klist_node *prev; + unsigned long flags; - spin_lock(&i->i_klist->k_lock); + spin_lock_irqsave(&i->i_klist->k_lock, flags); if (last) { prev = to_klist_node(last->n_node.prev); @@ -356,7 +357,7 @@ struct klist_node *klist_prev(struct klist_iter *i) prev = to_klist_node(prev->n_node.prev); } - spin_unlock(&i->i_klist->k_lock); + spin_unlock_irqrestore(&i->i_klist->k_lock, flags); if (put && last) put(last); @@ -377,8 +378,9 @@ struct klist_node *klist_next(struct klist_iter *i) void (*put)(struct klist_node *) = i->i_klist->put; struct klist_node *last = i->i_cur; struct klist_node *next; + unsigned long flags; - spin_lock(&i->i_klist->k_lock); + spin_lock_irqsave(&i->i_klist->k_lock, flags); if (last) { next = to_klist_node(last->n_node.next); @@ -397,7 +399,7 @@ struct klist_node *klist_next(struct klist_iter *i) next = to_klist_node(next->n_node.next); } - spin_unlock(&i->i_klist->k_lock); + spin_unlock_irqrestore(&i->i_klist->k_lock, flags); if (put && last) put(last); diff --git a/lib/nmi_backtrace.c b/lib/nmi_backtrace.c index 46e4c749e4eba64da193d0e1f221fa65eee4802a..70b1f9d830cda984c4801abd0278f4b2528a02fa 100644 --- a/lib/nmi_backtrace.c +++ b/lib/nmi_backtrace.c @@ -87,11 +87,9 @@ void nmi_trigger_cpumask_backtrace(const cpumask_t *mask, bool nmi_cpu_backtrace(struct pt_regs *regs) { - static arch_spinlock_t lock = __ARCH_SPIN_LOCK_UNLOCKED; int cpu = smp_processor_id(); if (cpumask_test_cpu(cpu, to_cpumask(backtrace_mask))) { - arch_spin_lock(&lock); if (regs && cpu_in_idle(instruction_pointer(regs))) { pr_warn("NMI backtrace for cpu %d skipped: idling at pc %#lx\n", cpu, instruction_pointer(regs)); @@ -102,7 +100,6 @@ bool nmi_cpu_backtrace(struct pt_regs *regs) else dump_stack(); } - arch_spin_unlock(&lock); cpumask_clear_cpu(cpu, to_cpumask(backtrace_mask)); return true; } diff --git a/lib/rhashtable.c b/lib/rhashtable.c index 39215c724fc72213a6fa2fda6f07186956300605..cebbcec877d71a96ce2ce110a8c60fe67a5c1306 100644 --- a/lib/rhashtable.c +++ b/lib/rhashtable.c @@ -364,6 +364,7 @@ static int rhashtable_rehash_table(struct rhashtable *ht) err = rhashtable_rehash_chain(ht, old_hash); if (err) return err; + cond_resched(); } /* Publish the new table pointer. */ @@ -1073,6 +1074,7 @@ void rhashtable_free_and_destroy(struct rhashtable *ht, for (i = 0; i < tbl->size; i++) { struct rhash_head *pos, *next; + cond_resched(); for (pos = rht_dereference(*rht_bucket(tbl, i), ht), next = !rht_is_a_nulls(pos) ? rht_dereference(pos->next, ht) : NULL; diff --git a/mm/debug.c b/mm/debug.c index 6726bec731c956e904450a9f67e22fc7be8f049c..c55abc893fdcfb5c48c9ebefc9c3bc34a35ccd15 100644 --- a/mm/debug.c +++ b/mm/debug.c @@ -100,7 +100,7 @@ EXPORT_SYMBOL(dump_vma); void dump_mm(const struct mm_struct *mm) { - pr_emerg("mm %p mmap %p seqnum %d task_size %lu\n" + pr_emerg("mm %p mmap %p seqnum %llu task_size %lu\n" #ifdef CONFIG_MMU "get_unmapped_area %p\n" #endif @@ -128,7 +128,7 @@ void dump_mm(const struct mm_struct *mm) "tlb_flush_pending %d\n" "def_flags: %#lx(%pGv)\n", - mm, mm->mmap, mm->vmacache_seqnum, mm->task_size, + mm, mm->mmap, (long long) mm->vmacache_seqnum, mm->task_size, #ifdef CONFIG_MMU mm->get_unmapped_area, #endif diff --git a/mm/fadvise.c b/mm/fadvise.c index 767887f5f3bfd79b493a731dbc467d96545f8494..3f5f68ad57080f6909e8fcc91a516d946da5d519 100644 --- a/mm/fadvise.c +++ b/mm/fadvise.c @@ -71,8 +71,12 @@ SYSCALL_DEFINE4(fadvise64_64, int, fd, loff_t, offset, loff_t, len, int, advice) goto out; } - /* Careful about overflows. Len == 0 means "as much as possible" */ - endbyte = offset + len; + /* + * Careful about overflows. Len == 0 means "as much as possible". Use + * unsigned math because signed overflows are undefined and UBSan + * complains. + */ + endbyte = (u64)offset + (u64)len; if (!len || endbyte < len) endbyte = -1; else diff --git a/mm/kasan/kasan.c b/mm/kasan/kasan.c index d90f29a166d89e8d6182ff84ea44451ff64a9fb3..71a4319256b6fe999c60e8ff6536e0067558df64 100644 --- a/mm/kasan/kasan.c +++ b/mm/kasan/kasan.c @@ -613,12 +613,13 @@ void kasan_kfree_large(const void *ptr) int kasan_module_alloc(void *addr, size_t size) { void *ret; + size_t scaled_size; size_t shadow_size; unsigned long shadow_start; shadow_start = (unsigned long)kasan_mem_to_shadow(addr); - shadow_size = round_up(size >> KASAN_SHADOW_SCALE_SHIFT, - PAGE_SIZE); + scaled_size = (size + KASAN_SHADOW_MASK) >> KASAN_SHADOW_SCALE_SHIFT; + shadow_size = round_up(scaled_size, PAGE_SIZE); if (WARN_ON(!PAGE_ALIGNED(shadow_start))) return -EINVAL; diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 0e08c9b7d7047820b19f2fca1f865d372bf7cb20..3cc2977301034c063b41819d38b7fc81f7bb2d9a 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -4113,6 +4113,14 @@ static struct cftype mem_cgroup_legacy_files[] = { static DEFINE_IDR(mem_cgroup_idr); +static void mem_cgroup_id_remove(struct mem_cgroup *memcg) +{ + if (memcg->id.id > 0) { + idr_remove(&mem_cgroup_idr, memcg->id.id); + memcg->id.id = 0; + } +} + static void mem_cgroup_id_get_many(struct mem_cgroup *memcg, unsigned int n) { VM_BUG_ON(atomic_read(&memcg->id.ref) <= 0); @@ -4123,8 +4131,7 @@ static void mem_cgroup_id_put_many(struct mem_cgroup *memcg, unsigned int n) { VM_BUG_ON(atomic_read(&memcg->id.ref) < n); if (atomic_sub_and_test(n, &memcg->id.ref)) { - idr_remove(&mem_cgroup_idr, memcg->id.id); - memcg->id.id = 0; + mem_cgroup_id_remove(memcg); /* Memcg ID pins CSS */ css_put(&memcg->css); @@ -4261,8 +4268,7 @@ static struct mem_cgroup *mem_cgroup_alloc(void) idr_replace(&mem_cgroup_idr, memcg, memcg->id.id); return memcg; fail: - if (memcg->id.id > 0) - idr_remove(&mem_cgroup_idr, memcg->id.id); + mem_cgroup_id_remove(memcg); __mem_cgroup_free(memcg); return NULL; } @@ -4321,6 +4327,7 @@ mem_cgroup_css_alloc(struct cgroup_subsys_state *parent_css) return &memcg->css; fail: + mem_cgroup_id_remove(memcg); mem_cgroup_free(memcg); return ERR_PTR(-ENOMEM); } diff --git a/mm/memory.c b/mm/memory.c index 5539b19750915c6bf6d877a5b6df8d48637eac3c..93d5d324904b32c2c5fd6f5cee7885ecd82c6070 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -246,9 +246,6 @@ static void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb) tlb_flush(tlb); mmu_notifier_invalidate_range(tlb->mm, tlb->start, tlb->end); -#ifdef CONFIG_HAVE_RCU_TABLE_FREE - tlb_table_flush(tlb); -#endif __tlb_reset_range(tlb); } @@ -256,6 +253,9 @@ static void tlb_flush_mmu_free(struct mmu_gather *tlb) { struct mmu_gather_batch *batch; +#ifdef CONFIG_HAVE_RCU_TABLE_FREE + tlb_table_flush(tlb); +#endif for (batch = &tlb->local; batch && batch->nr; batch = batch->next) { free_pages_and_swap_cache(batch->pages, batch->nr); batch->nr = 0; @@ -331,6 +331,21 @@ bool __tlb_remove_page_size(struct mmu_gather *tlb, struct page *page, int page_ * See the comment near struct mmu_table_batch. */ +/* + * If we want tlb_remove_table() to imply TLB invalidates. + */ +static inline void tlb_table_invalidate(struct mmu_gather *tlb) +{ +#ifdef CONFIG_HAVE_RCU_TABLE_INVALIDATE + /* + * Invalidate page-table caches used by hardware walkers. Then we still + * need to RCU-sched wait while freeing the pages because software + * walkers can still be in-flight. + */ + tlb_flush_mmu_tlbonly(tlb); +#endif +} + static void tlb_remove_table_smp_sync(void *arg) { /* Simply deliver the interrupt */ @@ -367,6 +382,7 @@ void tlb_table_flush(struct mmu_gather *tlb) struct mmu_table_batch **batch = &tlb->batch; if (*batch) { + tlb_table_invalidate(tlb); call_rcu_sched(&(*batch)->rcu, tlb_remove_table_rcu); *batch = NULL; } @@ -376,23 +392,16 @@ void tlb_remove_table(struct mmu_gather *tlb, void *table) { struct mmu_table_batch **batch = &tlb->batch; - /* - * When there's less then two users of this mm there cannot be a - * concurrent page-table walk. - */ - if (atomic_read(&tlb->mm->mm_users) < 2) { - __tlb_remove_table(table); - return; - } - if (*batch == NULL) { *batch = (struct mmu_table_batch *)__get_free_page(GFP_NOWAIT | __GFP_NOWARN); if (*batch == NULL) { + tlb_table_invalidate(tlb); tlb_remove_table_one(table); return; } (*batch)->nr = 0; } + (*batch)->tables[(*batch)->nr++] = table; if ((*batch)->nr == MAX_TABLE_BATCH) tlb_table_flush(tlb); @@ -1417,11 +1426,9 @@ static inline unsigned long zap_pmd_range(struct mmu_gather *tlb, do { next = pmd_addr_end(addr, end); if (is_swap_pmd(*pmd) || pmd_trans_huge(*pmd) || pmd_devmap(*pmd)) { - if (next - addr != HPAGE_PMD_SIZE) { - VM_BUG_ON_VMA(vma_is_anonymous(vma) && - !rwsem_is_locked(&tlb->mm->mmap_sem), vma); + if (next - addr != HPAGE_PMD_SIZE) __split_huge_pmd(vma, pmd, addr, false, NULL); - } else if (zap_huge_pmd(tlb, vma, pmd, addr)) + else if (zap_huge_pmd(tlb, vma, pmd, addr)) goto next; /* fall through */ } @@ -4350,6 +4357,9 @@ int generic_access_phys(struct vm_area_struct *vma, unsigned long addr, return -EINVAL; maddr = ioremap_prot(phys_addr, PAGE_ALIGN(len + offset), prot); + if (!maddr) + return -ENOMEM; + if (write) memcpy_toio(maddr + offset, buf, len); else diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 47ba2c6ce0ac366436ce4ad21a20bf1659232eb4..2112167d02054e3e32a2473d3151a7a46630c8be 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -7588,6 +7588,7 @@ int alloc_contig_range(unsigned long start, unsigned long end, .zone = page_zone(pfn_to_page(start)), .mode = MIGRATE_SYNC, .ignore_skip_hint = true, + .gfp_mask = current_gfp_context(gfp_mask), }; INIT_LIST_HEAD(&cc.migratepages); diff --git a/mm/readahead.c b/mm/readahead.c index c4ca702392333288448cc7ae4ba6b410ddb93fef..59aa0d06f25468c7578b132dba69f20e81525f87 100644 --- a/mm/readahead.c +++ b/mm/readahead.c @@ -380,6 +380,7 @@ ondemand_readahead(struct address_space *mapping, { struct backing_dev_info *bdi = inode_to_bdi(mapping->host); unsigned long max_pages = ra->ra_pages; + unsigned long add_pages; pgoff_t prev_offset; /* @@ -469,10 +470,17 @@ readit: * Will this read hit the readahead marker made by itself? * If so, trigger the readahead marker hit now, and merge * the resulted next readahead window into the current one. + * Take care of maximum IO pages as above. */ if (offset == ra->start && ra->size == ra->async_size) { - ra->async_size = get_next_ra_size(ra, max_pages); - ra->size += ra->async_size; + add_pages = get_next_ra_size(ra, max_pages); + if (ra->size + add_pages <= max_pages) { + ra->async_size = add_pages; + ra->size += add_pages; + } else { + ra->size = max_pages; + ra->async_size = max_pages >> 1; + } } return ra_submit(ra, mapping, filp); diff --git a/mm/shmem.c b/mm/shmem.c index f383298b7280afb0cdfcb936cdf332d7a822d6cf..ea786a504e1b9c854a929e1b8cc4f8289fe983aa 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -2207,6 +2207,8 @@ static struct inode *shmem_get_inode(struct super_block *sb, const struct inode mpol_shared_policy_init(&info->policy, NULL); break; } + + lockdep_annotate_inode_mutex_key(inode); } else shmem_free_inode(sb); return inode; diff --git a/mm/slub.c b/mm/slub.c index 13bb67ee32e83b28534eb961f880166627abe257..9b337c28dd1fa25f37961b0bc959167debac5aa6 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -1836,7 +1836,7 @@ static void *get_partial_node(struct kmem_cache *s, struct kmem_cache_node *n, { struct page *page, *page2; void *object = NULL; - int available = 0; + unsigned int available = 0; int objects; /* @@ -5018,10 +5018,10 @@ static ssize_t cpu_partial_show(struct kmem_cache *s, char *buf) static ssize_t cpu_partial_store(struct kmem_cache *s, const char *buf, size_t length) { - unsigned long objects; + unsigned int objects; int err; - err = kstrtoul(buf, 10, &objects); + err = kstrtouint(buf, 10, &objects); if (err) return err; if (objects && !kmem_cache_has_cpu_partial(s)) diff --git a/mm/vmacache.c b/mm/vmacache.c index db7596eb6132e8118b73e9c851563cde4a6d376a..f1729617dc85c0fb39c629e1a5d6e2cc2660b9f1 100644 --- a/mm/vmacache.c +++ b/mm/vmacache.c @@ -7,44 +7,6 @@ #include #include -/* - * Flush vma caches for threads that share a given mm. - * - * The operation is safe because the caller holds the mmap_sem - * exclusively and other threads accessing the vma cache will - * have mmap_sem held at least for read, so no extra locking - * is required to maintain the vma cache. - */ -void vmacache_flush_all(struct mm_struct *mm) -{ - struct task_struct *g, *p; - - count_vm_vmacache_event(VMACACHE_FULL_FLUSHES); - - /* - * Single threaded tasks need not iterate the entire - * list of process. We can avoid the flushing as well - * since the mm's seqnum was increased and don't have - * to worry about other threads' seqnum. Current's - * flush will occur upon the next lookup. - */ - if (atomic_read(&mm->mm_users) == 1) - return; - - rcu_read_lock(); - for_each_process_thread(g, p) { - /* - * Only flush the vmacache pointers as the - * mm seqnum is already set and curr's will - * be set upon invalidation when the next - * lookup is done. - */ - if (mm == p->mm) - vmacache_flush(p); - } - rcu_read_unlock(); -} - /* * This task may be accessing a foreign mm via (for example) * get_user_pages()->find_vma(). The vmacache is task-local and this diff --git a/mm/zswap.c b/mm/zswap.c index 597008a44f7082cc2256e37599ae5fd6517ba6a8..ebb0bc88c5f72c0550c93c1af34634ef3eef353b 100644 --- a/mm/zswap.c +++ b/mm/zswap.c @@ -989,6 +989,15 @@ static int zswap_frontswap_store(unsigned type, pgoff_t offset, ret = -ENOMEM; goto reject; } + + /* A second zswap_is_full() check after + * zswap_shrink() to make sure it's now + * under the max_pool_percent + */ + if (zswap_is_full()) { + ret = -ENOMEM; + goto reject; + } } /* allocate entry */ diff --git a/net/6lowpan/iphc.c b/net/6lowpan/iphc.c index 6b1042e216565a85ecc1b1e6301172750c07a8ae..52fad5dad9f71599ac8130ebe0efb560b6600945 100644 --- a/net/6lowpan/iphc.c +++ b/net/6lowpan/iphc.c @@ -770,6 +770,7 @@ int lowpan_header_decompress(struct sk_buff *skb, const struct net_device *dev, hdr.hop_limit, &hdr.daddr); skb_push(skb, sizeof(hdr)); + skb_reset_mac_header(skb); skb_reset_network_header(skb); skb_copy_to_linear_data(skb, &hdr, sizeof(hdr)); diff --git a/net/9p/client.c b/net/9p/client.c index b433aff5ff13ce9e10cddc204f97f9f19ae6c979..3ec5a82929b23d10aa3ccad572021aa2797984b2 100644 --- a/net/9p/client.c +++ b/net/9p/client.c @@ -955,7 +955,7 @@ static int p9_client_version(struct p9_client *c) { int err = 0; struct p9_req_t *req; - char *version; + char *version = NULL; int msize; p9_debug(P9_DEBUG_9P, ">>> TVERSION msize %d protocol %d\n", diff --git a/net/9p/trans_fd.c b/net/9p/trans_fd.c index 985046ae42312e86505d6fded2fb56501c38536b..a9c65f13b7f5164e9a852cf5053d7094edaddee1 100644 --- a/net/9p/trans_fd.c +++ b/net/9p/trans_fd.c @@ -185,6 +185,8 @@ static void p9_mux_poll_stop(struct p9_conn *m) spin_lock_irqsave(&p9_poll_lock, flags); list_del_init(&m->poll_pending_link); spin_unlock_irqrestore(&p9_poll_lock, flags); + + flush_work(&p9_poll_work); } /** @@ -197,15 +199,14 @@ static void p9_mux_poll_stop(struct p9_conn *m) static void p9_conn_cancel(struct p9_conn *m, int err) { struct p9_req_t *req, *rtmp; - unsigned long flags; LIST_HEAD(cancel_list); p9_debug(P9_DEBUG_ERROR, "mux %p err %d\n", m, err); - spin_lock_irqsave(&m->client->lock, flags); + spin_lock(&m->client->lock); if (m->err) { - spin_unlock_irqrestore(&m->client->lock, flags); + spin_unlock(&m->client->lock); return; } @@ -217,7 +218,6 @@ static void p9_conn_cancel(struct p9_conn *m, int err) list_for_each_entry_safe(req, rtmp, &m->unsent_req_list, req_list) { list_move(&req->req_list, &cancel_list); } - spin_unlock_irqrestore(&m->client->lock, flags); list_for_each_entry_safe(req, rtmp, &cancel_list, req_list) { p9_debug(P9_DEBUG_ERROR, "call back req %p\n", req); @@ -226,6 +226,7 @@ static void p9_conn_cancel(struct p9_conn *m, int err) req->t_err = err; p9_client_cb(m->client, req, REQ_STATUS_ERROR); } + spin_unlock(&m->client->lock); } static int @@ -383,8 +384,9 @@ static void p9_read_work(struct work_struct *work) if (m->req->status != REQ_STATUS_ERROR) status = REQ_STATUS_RCVD; list_del(&m->req->req_list); - spin_unlock(&m->client->lock); + /* update req->status while holding client->lock */ p9_client_cb(m->client, m->req, status); + spin_unlock(&m->client->lock); m->rc.sdata = NULL; m->rc.offset = 0; m->rc.capacity = 0; @@ -951,7 +953,7 @@ p9_fd_create_tcp(struct p9_client *client, const char *addr, char *args) if (err < 0) return err; - if (valid_ipaddr4(addr) < 0) + if (addr == NULL || valid_ipaddr4(addr) < 0) return -EINVAL; csocket = NULL; @@ -1001,6 +1003,9 @@ p9_fd_create_unix(struct p9_client *client, const char *addr, char *args) csocket = NULL; + if (addr == NULL) + return -EINVAL; + if (strlen(addr) >= UNIX_PATH_MAX) { pr_err("%s (%d): address too long: %s\n", __func__, task_pid_nr(current), addr); diff --git a/net/9p/trans_rdma.c b/net/9p/trans_rdma.c index 6d8e3031978f3493edb123a51b58b30b1c2807c5..f58467a490903b166b263a054ec009c6499d640c 100644 --- a/net/9p/trans_rdma.c +++ b/net/9p/trans_rdma.c @@ -646,6 +646,9 @@ rdma_create_trans(struct p9_client *client, const char *addr, char *args) struct rdma_conn_param conn_param; struct ib_qp_init_attr qp_attr; + if (addr == NULL) + return -EINVAL; + /* Parse the transport specific mount options */ err = parse_opts(args, &opts); if (err < 0) diff --git a/net/9p/trans_virtio.c b/net/9p/trans_virtio.c index 3aa5a93ad107c1490146240a112e3a9bf3a89b62..e73fd647065a1a597a41fad86a256f6b19ff4fdf 100644 --- a/net/9p/trans_virtio.c +++ b/net/9p/trans_virtio.c @@ -189,7 +189,7 @@ static int pack_sg_list(struct scatterlist *sg, int start, s = rest_of_page(data); if (s > count) s = count; - BUG_ON(index > limit); + BUG_ON(index >= limit); /* Make sure we don't terminate early. */ sg_unmark_end(&sg[index]); sg_set_buf(&sg[index++], data, s); @@ -234,6 +234,7 @@ pack_sg_list_p(struct scatterlist *sg, int start, int limit, s = PAGE_SIZE - data_off; if (s > count) s = count; + BUG_ON(index >= limit); /* Make sure we don't terminate early. */ sg_unmark_end(&sg[index]); sg_set_page(&sg[index++], pdata[i++], s, data_off); @@ -406,6 +407,7 @@ p9_virtio_zc_request(struct p9_client *client, struct p9_req_t *req, p9_debug(P9_DEBUG_TRANS, "virtio request\n"); if (uodata) { + __le32 sz; int n = p9_get_mapped_pages(chan, &out_pages, uodata, outlen, &offs, &need_drop); if (n < 0) @@ -416,6 +418,12 @@ p9_virtio_zc_request(struct p9_client *client, struct p9_req_t *req, memcpy(&req->tc->sdata[req->tc->size - 4], &v, 4); outlen = n; } + /* The size field of the message must include the length of the + * header and the length of the data. We didn't actually know + * the length of the data until this point so add it in now. + */ + sz = cpu_to_le32(req->tc->size + outlen); + memcpy(&req->tc->sdata[0], &sz, sizeof(sz)); } else if (uidata) { int n = p9_get_mapped_pages(chan, &in_pages, uidata, inlen, &offs, &need_drop); @@ -563,7 +571,7 @@ static int p9_virtio_probe(struct virtio_device *vdev) chan->vq = virtio_find_single_vq(vdev, req_done, "requests"); if (IS_ERR(chan->vq)) { err = PTR_ERR(chan->vq); - goto out_free_vq; + goto out_free_chan; } chan->vq->vdev->priv = chan; spin_lock_init(&chan->lock); @@ -616,6 +624,7 @@ out_free_tag: kfree(tag); out_free_vq: vdev->config->del_vqs(vdev); +out_free_chan: kfree(chan); fail: return err; @@ -643,6 +652,9 @@ p9_virtio_create(struct p9_client *client, const char *devname, char *args) int ret = -ENOENT; int found = 0; + if (devname == NULL) + return -EINVAL; + mutex_lock(&virtio_9p_lock); list_for_each_entry(chan, &virtio_chan_list, chan_list) { if (!strncmp(devname, chan->tag, chan->tag_len) && diff --git a/net/9p/trans_xen.c b/net/9p/trans_xen.c index a3f577e81c799487dff6f00930a9941ba2f013ef..84a49f2bcfbc07213d8f915416f7ce95781c0d25 100644 --- a/net/9p/trans_xen.c +++ b/net/9p/trans_xen.c @@ -94,6 +94,9 @@ static int p9_xen_create(struct p9_client *client, const char *addr, char *args) { struct xen_9pfs_front_priv *priv; + if (addr == NULL) + return -EINVAL; + read_lock(&xen_9pfs_lock); list_for_each_entry(priv, &xen_9pfs_devs, list) { if (!strcmp(priv->tag, addr)) { diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c index 71d8809fbe940c9343ce1007c650f5ad85306c1f..5bd9b389f8c982c83bcb88860953f3cdd84861c2 100644 --- a/net/batman-adv/bat_iv_ogm.c +++ b/net/batman-adv/bat_iv_ogm.c @@ -2718,7 +2718,7 @@ static int batadv_iv_gw_dump_entry(struct sk_buff *msg, u32 portid, u32 seq, { struct batadv_neigh_ifinfo *router_ifinfo = NULL; struct batadv_neigh_node *router; - struct batadv_gw_node *curr_gw; + struct batadv_gw_node *curr_gw = NULL; int ret = 0; void *hdr; @@ -2766,6 +2766,8 @@ static int batadv_iv_gw_dump_entry(struct sk_buff *msg, u32 portid, u32 seq, ret = 0; out: + if (curr_gw) + batadv_gw_node_put(curr_gw); if (router_ifinfo) batadv_neigh_ifinfo_put(router_ifinfo); if (router) diff --git a/net/batman-adv/bat_v.c b/net/batman-adv/bat_v.c index a8f4c3902cf500e970d4ee29b6ca82eaece0648a..371a1f1651b48d58beef141f002eec7bf3cfe556 100644 --- a/net/batman-adv/bat_v.c +++ b/net/batman-adv/bat_v.c @@ -929,7 +929,7 @@ static int batadv_v_gw_dump_entry(struct sk_buff *msg, u32 portid, u32 seq, { struct batadv_neigh_ifinfo *router_ifinfo = NULL; struct batadv_neigh_node *router; - struct batadv_gw_node *curr_gw; + struct batadv_gw_node *curr_gw = NULL; int ret = 0; void *hdr; @@ -997,6 +997,8 @@ static int batadv_v_gw_dump_entry(struct sk_buff *msg, u32 portid, u32 seq, ret = 0; out: + if (curr_gw) + batadv_gw_node_put(curr_gw); if (router_ifinfo) batadv_neigh_ifinfo_put(router_ifinfo); if (router) diff --git a/net/batman-adv/translation-table.c b/net/batman-adv/translation-table.c index 8a3ce79b1307b7f260ce2f64e96bdacfb9a322f0..0f4d4eece3e4deca20f3e6531fba13d4dbe005bf 100644 --- a/net/batman-adv/translation-table.c +++ b/net/batman-adv/translation-table.c @@ -1679,7 +1679,9 @@ static bool batadv_tt_global_add(struct batadv_priv *bat_priv, ether_addr_copy(common->addr, tt_addr); common->vid = vid; - common->flags = flags; + if (!is_multicast_ether_addr(common->addr)) + common->flags = flags & (~BATADV_TT_SYNC_MASK); + tt_global_entry->roam_at = 0; /* node must store current time in case of roaming. This is * needed to purge this entry out on timeout (if nobody claims @@ -1742,7 +1744,8 @@ static bool batadv_tt_global_add(struct batadv_priv *bat_priv, * TT_CLIENT_TEMP, therefore they have to be copied in the * client entry */ - common->flags |= flags & (~BATADV_TT_SYNC_MASK); + if (!is_multicast_ether_addr(common->addr)) + common->flags |= flags & (~BATADV_TT_SYNC_MASK); /* If there is the BATADV_TT_CLIENT_ROAM flag set, there is only * one originator left in the list and we previously received a diff --git a/net/bluetooth/af_bluetooth.c b/net/bluetooth/af_bluetooth.c index 91e3ba28070647bc93960c1d729aa200b666b249..583951e82ceed32d5add4aa17cc950afcc0e87db 100644 --- a/net/bluetooth/af_bluetooth.c +++ b/net/bluetooth/af_bluetooth.c @@ -159,7 +159,7 @@ void bt_accept_enqueue(struct sock *parent, struct sock *sk) BT_DBG("parent %p, sk %p", parent, sk); sock_hold(sk); - lock_sock(sk); + lock_sock_nested(sk, SINGLE_DEPTH_NESTING); list_add_tail(&bt_sk(sk)->accept_q, &bt_sk(parent)->accept_q); bt_sk(sk)->parent = parent; release_sock(sk); diff --git a/net/bluetooth/hidp/core.c b/net/bluetooth/hidp/core.c index cef3754408d44f55011469d258a708be685ce487..b21fcc838784dc0bfa9564f42b95a8d129675c07 100644 --- a/net/bluetooth/hidp/core.c +++ b/net/bluetooth/hidp/core.c @@ -775,7 +775,7 @@ static int hidp_setup_hid(struct hidp_session *session, hid->version = req->version; hid->country = req->country; - strncpy(hid->name, req->name, sizeof(req->name) - 1); + strncpy(hid->name, req->name, sizeof(hid->name)); snprintf(hid->phys, sizeof(hid->phys), "%pMR", &l2cap_pi(session->ctrl_sock->sk)->chan->src); diff --git a/net/caif/caif_dev.c b/net/caif/caif_dev.c index 2d38b6e34203b7df5638c340126c2f5169fb3791..98b62a7990aa257fc2de5d6c20614a58f465d1a0 100644 --- a/net/caif/caif_dev.c +++ b/net/caif/caif_dev.c @@ -131,8 +131,10 @@ static void caif_flow_cb(struct sk_buff *skb) caifd = caif_get(skb->dev); WARN_ON(caifd == NULL); - if (caifd == NULL) + if (!caifd) { + rcu_read_unlock(); return; + } caifd_hold(caifd); rcu_read_unlock(); diff --git a/net/core/dev.c b/net/core/dev.c index e0e8738abac96aacfa100f51fb1bce2dea5e9af1..a8ab119258a92cb86f9791d78c645a4fecd26fcc 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -8331,7 +8331,8 @@ int dev_change_net_namespace(struct net_device *dev, struct net *net, const char /* We get here if we can't use the current device name */ if (!pat) goto out; - if (dev_get_valid_name(net, dev, pat) < 0) + err = dev_get_valid_name(net, dev, pat); + if (err < 0) goto out; } @@ -8343,7 +8344,6 @@ int dev_change_net_namespace(struct net_device *dev, struct net *net, const char dev_close(dev); /* And unlink it from device chain */ - err = -ENODEV; unlist_netdevice(dev); synchronize_net(); diff --git a/net/core/lwt_bpf.c b/net/core/lwt_bpf.c index 1307731ddfe4aa8889d63eab13d0145ca3432f31..832d69649cb6c642afcdf14ed3ea225e0d003ca1 100644 --- a/net/core/lwt_bpf.c +++ b/net/core/lwt_bpf.c @@ -217,7 +217,7 @@ static int bpf_parse_prog(struct nlattr *attr, struct bpf_lwt_prog *prog, if (!tb[LWT_BPF_PROG_FD] || !tb[LWT_BPF_PROG_NAME]) return -EINVAL; - prog->name = nla_memdup(tb[LWT_BPF_PROG_NAME], GFP_KERNEL); + prog->name = nla_memdup(tb[LWT_BPF_PROG_NAME], GFP_ATOMIC); if (!prog->name) return -ENOMEM; diff --git a/net/core/neighbour.c b/net/core/neighbour.c index 514d697d469137d2d1f229fd0698dacc34850d79..dcb89cbc2730228113b90290088e5fa9407d9828 100644 --- a/net/core/neighbour.c +++ b/net/core/neighbour.c @@ -1174,6 +1174,12 @@ int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new, lladdr = neigh->ha; } + /* Update confirmed timestamp for neighbour entry after we + * received ARP packet even if it doesn't change IP to MAC binding. + */ + if (new & NUD_CONNECTED) + neigh->confirmed = jiffies; + /* If entry was valid and address is not changed, do not change entry state, if new one is STALE. */ @@ -1195,15 +1201,12 @@ int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new, } } - /* Update timestamps only once we know we will make a change to the + /* Update timestamp only once we know we will make a change to the * neighbour entry. Otherwise we risk to move the locktime window with * noop updates and ignore relevant ARP updates. */ - if (new != old || lladdr != neigh->ha) { - if (new & NUD_CONNECTED) - neigh->confirmed = jiffies; + if (new != old || lladdr != neigh->ha) neigh->updated = jiffies; - } if (new != old) { neigh_del_timer(neigh); diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 73ba966326b08ad558608eede0da653cefd123f6..c0f23b8dcfc6aa4d8b1c6d39031db06b06e48a42 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -952,9 +952,6 @@ struct ubuf_info *sock_zerocopy_alloc(struct sock *sk, size_t size) WARN_ON_ONCE(!in_task()); - if (!sock_flag(sk, SOCK_ZEROCOPY)) - return NULL; - skb = sock_omalloc(sk, 0, GFP_KERNEL); if (!skb) return NULL; @@ -1854,6 +1851,20 @@ done: } EXPORT_SYMBOL(___pskb_trim); +/* Note : use pskb_trim_rcsum() instead of calling this directly + */ +int pskb_trim_rcsum_slow(struct sk_buff *skb, unsigned int len) +{ + if (skb->ip_summed == CHECKSUM_COMPLETE) { + int delta = skb->len - len; + + skb->csum = csum_sub(skb->csum, + skb_checksum(skb, len, delta, 0)); + } + return __pskb_trim(skb, len); +} +EXPORT_SYMBOL(pskb_trim_rcsum_slow); + /** * __pskb_pull_tail - advance tail of skb header * @skb: buffer to reallocate @@ -2857,20 +2868,27 @@ EXPORT_SYMBOL(skb_queue_purge); /** * skb_rbtree_purge - empty a skb rbtree * @root: root of the rbtree to empty + * Return value: the sum of truesizes of all purged skbs. * * Delete all buffers on an &sk_buff rbtree. Each buffer is removed from * the list and one reference dropped. This function does not take * any lock. Synchronization should be handled by the caller (e.g., TCP * out-of-order queue is protected by the socket lock). */ -void skb_rbtree_purge(struct rb_root *root) +unsigned int skb_rbtree_purge(struct rb_root *root) { - struct sk_buff *skb, *next; + struct rb_node *p = rb_first(root); + unsigned int sum = 0; - rbtree_postorder_for_each_entry_safe(skb, next, root, rbnode) - kfree_skb(skb); + while (p) { + struct sk_buff *skb = rb_entry(p, struct sk_buff, rbnode); - *root = RB_ROOT; + p = rb_next(p); + rb_erase(&skb->rbnode, root); + sum += skb->truesize; + kfree_skb(skb); + } + return sum; } /** diff --git a/net/dcb/dcbnl.c b/net/dcb/dcbnl.c index bae7d78aa06895dd9237abc6f1bc2ae3db23f38b..fbeacbc2be5dafd49963d518985be84839606a00 100644 --- a/net/dcb/dcbnl.c +++ b/net/dcb/dcbnl.c @@ -1765,7 +1765,7 @@ static struct dcb_app_type *dcb_app_lookup(const struct dcb_app *app, if (itr->app.selector == app->selector && itr->app.protocol == app->protocol && itr->ifindex == ifindex && - (!prio || itr->app.priority == prio)) + ((prio == -1) || itr->app.priority == prio)) return itr; } @@ -1800,7 +1800,8 @@ u8 dcb_getapp(struct net_device *dev, struct dcb_app *app) u8 prio = 0; spin_lock_bh(&dcb_lock); - if ((itr = dcb_app_lookup(app, dev->ifindex, 0))) + itr = dcb_app_lookup(app, dev->ifindex, -1); + if (itr) prio = itr->app.priority; spin_unlock_bh(&dcb_lock); @@ -1828,7 +1829,8 @@ int dcb_setapp(struct net_device *dev, struct dcb_app *new) spin_lock_bh(&dcb_lock); /* Search for existing match and replace */ - if ((itr = dcb_app_lookup(new, dev->ifindex, 0))) { + itr = dcb_app_lookup(new, dev->ifindex, -1); + if (itr) { if (new->priority) itr->app.priority = new->priority; else { @@ -1861,7 +1863,8 @@ u8 dcb_ieee_getapp_mask(struct net_device *dev, struct dcb_app *app) u8 prio = 0; spin_lock_bh(&dcb_lock); - if ((itr = dcb_app_lookup(app, dev->ifindex, 0))) + itr = dcb_app_lookup(app, dev->ifindex, -1); + if (itr) prio |= 1 << itr->app.priority; spin_unlock_bh(&dcb_lock); diff --git a/net/ieee802154/6lowpan/6lowpan_i.h b/net/ieee802154/6lowpan/6lowpan_i.h index d8de3bcfb1032a1133402cb2a4c50a2448133846..b8d95cb71c25dd69c8a88b2c886a3f0d2ce1174f 100644 --- a/net/ieee802154/6lowpan/6lowpan_i.h +++ b/net/ieee802154/6lowpan/6lowpan_i.h @@ -17,37 +17,19 @@ typedef unsigned __bitwise lowpan_rx_result; #define LOWPAN_DISPATCH_FRAG1 0xc0 #define LOWPAN_DISPATCH_FRAGN 0xe0 -struct lowpan_create_arg { +struct frag_lowpan_compare_key { u16 tag; u16 d_size; - const struct ieee802154_addr *src; - const struct ieee802154_addr *dst; + const struct ieee802154_addr src; + const struct ieee802154_addr dst; }; -/* Equivalent of ipv4 struct ip +/* Equivalent of ipv4 struct ipq */ struct lowpan_frag_queue { struct inet_frag_queue q; - - u16 tag; - u16 d_size; - struct ieee802154_addr saddr; - struct ieee802154_addr daddr; }; -static inline u32 ieee802154_addr_hash(const struct ieee802154_addr *a) -{ - switch (a->mode) { - case IEEE802154_ADDR_LONG: - return (((__force u64)a->extended_addr) >> 32) ^ - (((__force u64)a->extended_addr) & 0xffffffff); - case IEEE802154_ADDR_SHORT: - return (__force u32)(a->short_addr + (a->pan_id << 16)); - default: - return 0; - } -} - int lowpan_frag_rcv(struct sk_buff *skb, const u8 frag_type); void lowpan_net_frag_exit(void); int lowpan_net_frag_init(void); diff --git a/net/ieee802154/6lowpan/core.c b/net/ieee802154/6lowpan/core.c index e9f0489e422944ee15b40e10e39bc7200c4bb905..22db273f15ea351728e0e5d58d4ea135f4baae02 100644 --- a/net/ieee802154/6lowpan/core.c +++ b/net/ieee802154/6lowpan/core.c @@ -90,12 +90,18 @@ static int lowpan_neigh_construct(struct net_device *dev, struct neighbour *n) return 0; } +static int lowpan_get_iflink(const struct net_device *dev) +{ + return lowpan_802154_dev(dev)->wdev->ifindex; +} + static const struct net_device_ops lowpan_netdev_ops = { .ndo_init = lowpan_dev_init, .ndo_start_xmit = lowpan_xmit, .ndo_open = lowpan_open, .ndo_stop = lowpan_stop, .ndo_neigh_construct = lowpan_neigh_construct, + .ndo_get_iflink = lowpan_get_iflink, }; static void lowpan_setup(struct net_device *ldev) diff --git a/net/ieee802154/6lowpan/reassembly.c b/net/ieee802154/6lowpan/reassembly.c index f85b08baff160307516d8367a1f60dfc555f3c6c..1790b65944b3ee188608b1a76d4f9a42fb1479d5 100644 --- a/net/ieee802154/6lowpan/reassembly.c +++ b/net/ieee802154/6lowpan/reassembly.c @@ -37,55 +37,24 @@ static struct inet_frags lowpan_frags; static int lowpan_frag_reasm(struct lowpan_frag_queue *fq, struct sk_buff *prev, struct net_device *ldev); -static unsigned int lowpan_hash_frag(u16 tag, u16 d_size, - const struct ieee802154_addr *saddr, - const struct ieee802154_addr *daddr) -{ - net_get_random_once(&lowpan_frags.rnd, sizeof(lowpan_frags.rnd)); - return jhash_3words(ieee802154_addr_hash(saddr), - ieee802154_addr_hash(daddr), - (__force u32)(tag + (d_size << 16)), - lowpan_frags.rnd); -} - -static unsigned int lowpan_hashfn(const struct inet_frag_queue *q) -{ - const struct lowpan_frag_queue *fq; - - fq = container_of(q, struct lowpan_frag_queue, q); - return lowpan_hash_frag(fq->tag, fq->d_size, &fq->saddr, &fq->daddr); -} - -static bool lowpan_frag_match(const struct inet_frag_queue *q, const void *a) -{ - const struct lowpan_frag_queue *fq; - const struct lowpan_create_arg *arg = a; - - fq = container_of(q, struct lowpan_frag_queue, q); - return fq->tag == arg->tag && fq->d_size == arg->d_size && - ieee802154_addr_equal(&fq->saddr, arg->src) && - ieee802154_addr_equal(&fq->daddr, arg->dst); -} - static void lowpan_frag_init(struct inet_frag_queue *q, const void *a) { - const struct lowpan_create_arg *arg = a; + const struct frag_lowpan_compare_key *key = a; struct lowpan_frag_queue *fq; fq = container_of(q, struct lowpan_frag_queue, q); - fq->tag = arg->tag; - fq->d_size = arg->d_size; - fq->saddr = *arg->src; - fq->daddr = *arg->dst; + BUILD_BUG_ON(sizeof(*key) > sizeof(q->key)); + memcpy(&q->key, key, sizeof(*key)); } -static void lowpan_frag_expire(unsigned long data) +static void lowpan_frag_expire(struct timer_list *t) { + struct inet_frag_queue *frag = from_timer(frag, t, timer); struct frag_queue *fq; struct net *net; - fq = container_of((struct inet_frag_queue *)data, struct frag_queue, q); + fq = container_of(frag, struct frag_queue, q); net = container_of(fq->q.net, struct net, ieee802154_lowpan.frags); spin_lock(&fq->q.lock); @@ -93,10 +62,10 @@ static void lowpan_frag_expire(unsigned long data) if (fq->q.flags & INET_FRAG_COMPLETE) goto out; - inet_frag_kill(&fq->q, &lowpan_frags); + inet_frag_kill(&fq->q); out: spin_unlock(&fq->q.lock); - inet_frag_put(&fq->q, &lowpan_frags); + inet_frag_put(&fq->q); } static inline struct lowpan_frag_queue * @@ -104,25 +73,20 @@ fq_find(struct net *net, const struct lowpan_802154_cb *cb, const struct ieee802154_addr *src, const struct ieee802154_addr *dst) { - struct inet_frag_queue *q; - struct lowpan_create_arg arg; - unsigned int hash; struct netns_ieee802154_lowpan *ieee802154_lowpan = net_ieee802154_lowpan(net); + struct frag_lowpan_compare_key key = { + .tag = cb->d_tag, + .d_size = cb->d_size, + .src = *src, + .dst = *dst, + }; + struct inet_frag_queue *q; - arg.tag = cb->d_tag; - arg.d_size = cb->d_size; - arg.src = src; - arg.dst = dst; - - hash = lowpan_hash_frag(cb->d_tag, cb->d_size, src, dst); - - q = inet_frag_find(&ieee802154_lowpan->frags, - &lowpan_frags, &arg, hash); - if (IS_ERR_OR_NULL(q)) { - inet_frag_maybe_warn_overflow(q, pr_fmt()); + q = inet_frag_find(&ieee802154_lowpan->frags, &key); + if (!q) return NULL; - } + return container_of(q, struct lowpan_frag_queue, q); } @@ -229,7 +193,7 @@ static int lowpan_frag_reasm(struct lowpan_frag_queue *fq, struct sk_buff *prev, struct sk_buff *fp, *head = fq->q.fragments; int sum_truesize; - inet_frag_kill(&fq->q, &lowpan_frags); + inet_frag_kill(&fq->q); /* Make the one we just received the head. */ if (prev) { @@ -437,7 +401,7 @@ int lowpan_frag_rcv(struct sk_buff *skb, u8 frag_type) ret = lowpan_frag_queue(fq, skb, frag_type); spin_unlock(&fq->q.lock); - inet_frag_put(&fq->q, &lowpan_frags); + inet_frag_put(&fq->q); return ret; } @@ -447,24 +411,22 @@ err: } #ifdef CONFIG_SYSCTL -static int zero; static struct ctl_table lowpan_frags_ns_ctl_table[] = { { .procname = "6lowpanfrag_high_thresh", .data = &init_net.ieee802154_lowpan.frags.high_thresh, - .maxlen = sizeof(int), + .maxlen = sizeof(unsigned long), .mode = 0644, - .proc_handler = proc_dointvec_minmax, + .proc_handler = proc_doulongvec_minmax, .extra1 = &init_net.ieee802154_lowpan.frags.low_thresh }, { .procname = "6lowpanfrag_low_thresh", .data = &init_net.ieee802154_lowpan.frags.low_thresh, - .maxlen = sizeof(int), + .maxlen = sizeof(unsigned long), .mode = 0644, - .proc_handler = proc_dointvec_minmax, - .extra1 = &zero, + .proc_handler = proc_doulongvec_minmax, .extra2 = &init_net.ieee802154_lowpan.frags.high_thresh }, { @@ -580,14 +542,20 @@ static int __net_init lowpan_frags_init_net(struct net *net) { struct netns_ieee802154_lowpan *ieee802154_lowpan = net_ieee802154_lowpan(net); + int res; ieee802154_lowpan->frags.high_thresh = IPV6_FRAG_HIGH_THRESH; ieee802154_lowpan->frags.low_thresh = IPV6_FRAG_LOW_THRESH; ieee802154_lowpan->frags.timeout = IPV6_FRAG_TIMEOUT; + ieee802154_lowpan->frags.f = &lowpan_frags; - inet_frags_init_net(&ieee802154_lowpan->frags); - - return lowpan_frags_ns_sysctl_register(net); + res = inet_frags_init_net(&ieee802154_lowpan->frags); + if (res < 0) + return res; + res = lowpan_frags_ns_sysctl_register(net); + if (res < 0) + inet_frags_exit_net(&ieee802154_lowpan->frags); + return res; } static void __net_exit lowpan_frags_exit_net(struct net *net) @@ -596,7 +564,7 @@ static void __net_exit lowpan_frags_exit_net(struct net *net) net_ieee802154_lowpan(net); lowpan_frags_ns_sysctl_unregister(net); - inet_frags_exit_net(&ieee802154_lowpan->frags, &lowpan_frags); + inet_frags_exit_net(&ieee802154_lowpan->frags); } static struct pernet_operations lowpan_frags_ops = { @@ -604,32 +572,63 @@ static struct pernet_operations lowpan_frags_ops = { .exit = lowpan_frags_exit_net, }; -int __init lowpan_net_frag_init(void) +static u32 lowpan_key_hashfn(const void *data, u32 len, u32 seed) { - int ret; + return jhash2(data, + sizeof(struct frag_lowpan_compare_key) / sizeof(u32), seed); +} - ret = lowpan_frags_sysctl_register(); - if (ret) - return ret; +static u32 lowpan_obj_hashfn(const void *data, u32 len, u32 seed) +{ + const struct inet_frag_queue *fq = data; - ret = register_pernet_subsys(&lowpan_frags_ops); - if (ret) - goto err_pernet; + return jhash2((const u32 *)&fq->key, + sizeof(struct frag_lowpan_compare_key) / sizeof(u32), seed); +} + +static int lowpan_obj_cmpfn(struct rhashtable_compare_arg *arg, const void *ptr) +{ + const struct frag_lowpan_compare_key *key = arg->key; + const struct inet_frag_queue *fq = ptr; + + return !!memcmp(&fq->key, key, sizeof(*key)); +} + +static const struct rhashtable_params lowpan_rhash_params = { + .head_offset = offsetof(struct inet_frag_queue, node), + .hashfn = lowpan_key_hashfn, + .obj_hashfn = lowpan_obj_hashfn, + .obj_cmpfn = lowpan_obj_cmpfn, + .automatic_shrinking = true, +}; + +int __init lowpan_net_frag_init(void) +{ + int ret; - lowpan_frags.hashfn = lowpan_hashfn; lowpan_frags.constructor = lowpan_frag_init; lowpan_frags.destructor = NULL; lowpan_frags.qsize = sizeof(struct frag_queue); - lowpan_frags.match = lowpan_frag_match; lowpan_frags.frag_expire = lowpan_frag_expire; lowpan_frags.frags_cache_name = lowpan_frags_cache_name; + lowpan_frags.rhash_params = lowpan_rhash_params; ret = inet_frags_init(&lowpan_frags); if (ret) - goto err_pernet; + goto out; + ret = lowpan_frags_sysctl_register(); + if (ret) + goto err_sysctl; + + ret = register_pernet_subsys(&lowpan_frags_ops); + if (ret) + goto err_pernet; +out: return ret; err_pernet: lowpan_frags_sysctl_unregister(); +err_sysctl: + inet_frags_fini(&lowpan_frags); return ret; } diff --git a/net/ieee802154/6lowpan/tx.c b/net/ieee802154/6lowpan/tx.c index e6ff5128e61acf135b7be9071dc06ed2e4fa96b7..ca53efa17be15b78db9e00372b33a9ef0911b47f 100644 --- a/net/ieee802154/6lowpan/tx.c +++ b/net/ieee802154/6lowpan/tx.c @@ -265,9 +265,24 @@ netdev_tx_t lowpan_xmit(struct sk_buff *skb, struct net_device *ldev) /* We must take a copy of the skb before we modify/replace the ipv6 * header as the header could be used elsewhere */ - skb = skb_unshare(skb, GFP_ATOMIC); - if (!skb) - return NET_XMIT_DROP; + if (unlikely(skb_headroom(skb) < ldev->needed_headroom || + skb_tailroom(skb) < ldev->needed_tailroom)) { + struct sk_buff *nskb; + + nskb = skb_copy_expand(skb, ldev->needed_headroom, + ldev->needed_tailroom, GFP_ATOMIC); + if (likely(nskb)) { + consume_skb(skb); + skb = nskb; + } else { + kfree_skb(skb); + return NET_XMIT_DROP; + } + } else { + skb = skb_unshare(skb, GFP_ATOMIC); + if (!skb) + return NET_XMIT_DROP; + } ret = lowpan_header(skb, ldev, &dgram_size, &dgram_offset); if (ret < 0) { diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c index b9d9a2b8792c7a9aa6744f80b55f7b5a727a5cef..f31c09873d0f82c8a7b082710179a38acced6353 100644 --- a/net/ipv4/af_inet.c +++ b/net/ipv4/af_inet.c @@ -1307,6 +1307,7 @@ struct sk_buff *inet_gso_segment(struct sk_buff *skb, if (encap) skb_reset_inner_headers(skb); skb->network_header = (u8 *)iph - skb->head; + skb_reset_mac_len(skb); } while ((skb = skb->next)); out: diff --git a/net/ipv4/inet_fragment.c b/net/ipv4/inet_fragment.c index ba4454ecdf0f64708ae19cfd88d2e22230f60d41..f6764537148ce97c7b7c21d115115554d2893248 100644 --- a/net/ipv4/inet_fragment.c +++ b/net/ipv4/inet_fragment.c @@ -25,12 +25,6 @@ #include #include -#define INETFRAGS_EVICT_BUCKETS 128 -#define INETFRAGS_EVICT_MAX 512 - -/* don't rebuild inetfrag table with new secret more often than this */ -#define INETFRAGS_MIN_REBUILD_INTERVAL (5 * HZ) - /* Given the OR values of all fragments, apply RFC 3168 5.3 requirements * Value : 0xff if frame should be dropped. * 0 or INET_ECN_CE value, to be ORed in to final iph->tos field @@ -52,157 +46,8 @@ const u8 ip_frag_ecn_table[16] = { }; EXPORT_SYMBOL(ip_frag_ecn_table); -static unsigned int -inet_frag_hashfn(const struct inet_frags *f, const struct inet_frag_queue *q) -{ - return f->hashfn(q) & (INETFRAGS_HASHSZ - 1); -} - -static bool inet_frag_may_rebuild(struct inet_frags *f) -{ - return time_after(jiffies, - f->last_rebuild_jiffies + INETFRAGS_MIN_REBUILD_INTERVAL); -} - -static void inet_frag_secret_rebuild(struct inet_frags *f) -{ - int i; - - write_seqlock_bh(&f->rnd_seqlock); - - if (!inet_frag_may_rebuild(f)) - goto out; - - get_random_bytes(&f->rnd, sizeof(u32)); - - for (i = 0; i < INETFRAGS_HASHSZ; i++) { - struct inet_frag_bucket *hb; - struct inet_frag_queue *q; - struct hlist_node *n; - - hb = &f->hash[i]; - spin_lock(&hb->chain_lock); - - hlist_for_each_entry_safe(q, n, &hb->chain, list) { - unsigned int hval = inet_frag_hashfn(f, q); - - if (hval != i) { - struct inet_frag_bucket *hb_dest; - - hlist_del(&q->list); - - /* Relink to new hash chain. */ - hb_dest = &f->hash[hval]; - - /* This is the only place where we take - * another chain_lock while already holding - * one. As this will not run concurrently, - * we cannot deadlock on hb_dest lock below, if its - * already locked it will be released soon since - * other caller cannot be waiting for hb lock - * that we've taken above. - */ - spin_lock_nested(&hb_dest->chain_lock, - SINGLE_DEPTH_NESTING); - hlist_add_head(&q->list, &hb_dest->chain); - spin_unlock(&hb_dest->chain_lock); - } - } - spin_unlock(&hb->chain_lock); - } - - f->rebuild = false; - f->last_rebuild_jiffies = jiffies; -out: - write_sequnlock_bh(&f->rnd_seqlock); -} - -static bool inet_fragq_should_evict(const struct inet_frag_queue *q) -{ - if (!hlist_unhashed(&q->list_evictor)) - return false; - - return q->net->low_thresh == 0 || - frag_mem_limit(q->net) >= q->net->low_thresh; -} - -static unsigned int -inet_evict_bucket(struct inet_frags *f, struct inet_frag_bucket *hb) -{ - struct inet_frag_queue *fq; - struct hlist_node *n; - unsigned int evicted = 0; - HLIST_HEAD(expired); - - spin_lock(&hb->chain_lock); - - hlist_for_each_entry_safe(fq, n, &hb->chain, list) { - if (!inet_fragq_should_evict(fq)) - continue; - - if (!del_timer(&fq->timer)) - continue; - - hlist_add_head(&fq->list_evictor, &expired); - ++evicted; - } - - spin_unlock(&hb->chain_lock); - - hlist_for_each_entry_safe(fq, n, &expired, list_evictor) - f->frag_expire((unsigned long) fq); - - return evicted; -} - -static void inet_frag_worker(struct work_struct *work) -{ - unsigned int budget = INETFRAGS_EVICT_BUCKETS; - unsigned int i, evicted = 0; - struct inet_frags *f; - - f = container_of(work, struct inet_frags, frags_work); - - BUILD_BUG_ON(INETFRAGS_EVICT_BUCKETS >= INETFRAGS_HASHSZ); - - local_bh_disable(); - - for (i = ACCESS_ONCE(f->next_bucket); budget; --budget) { - evicted += inet_evict_bucket(f, &f->hash[i]); - i = (i + 1) & (INETFRAGS_HASHSZ - 1); - if (evicted > INETFRAGS_EVICT_MAX) - break; - } - - f->next_bucket = i; - - local_bh_enable(); - - if (f->rebuild && inet_frag_may_rebuild(f)) - inet_frag_secret_rebuild(f); -} - -static void inet_frag_schedule_worker(struct inet_frags *f) -{ - if (unlikely(!work_pending(&f->frags_work))) - schedule_work(&f->frags_work); -} - int inet_frags_init(struct inet_frags *f) { - int i; - - INIT_WORK(&f->frags_work, inet_frag_worker); - - for (i = 0; i < INETFRAGS_HASHSZ; i++) { - struct inet_frag_bucket *hb = &f->hash[i]; - - spin_lock_init(&hb->chain_lock); - INIT_HLIST_HEAD(&hb->chain); - } - - seqlock_init(&f->rnd_seqlock); - f->last_rebuild_jiffies = 0; f->frags_cachep = kmem_cache_create(f->frags_cache_name, f->qsize, 0, 0, NULL); if (!f->frags_cachep) @@ -214,83 +59,75 @@ EXPORT_SYMBOL(inet_frags_init); void inet_frags_fini(struct inet_frags *f) { - cancel_work_sync(&f->frags_work); + /* We must wait that all inet_frag_destroy_rcu() have completed. */ + rcu_barrier(); + kmem_cache_destroy(f->frags_cachep); + f->frags_cachep = NULL; } EXPORT_SYMBOL(inet_frags_fini); -void inet_frags_exit_net(struct netns_frags *nf, struct inet_frags *f) +static void inet_frags_free_cb(void *ptr, void *arg) { - unsigned int seq; - int i; - - nf->low_thresh = 0; + struct inet_frag_queue *fq = ptr; -evict_again: - local_bh_disable(); - seq = read_seqbegin(&f->rnd_seqlock); - - for (i = 0; i < INETFRAGS_HASHSZ ; i++) - inet_evict_bucket(f, &f->hash[i]); - - local_bh_enable(); - cond_resched(); - - if (read_seqretry(&f->rnd_seqlock, seq) || - sum_frag_mem_limit(nf)) - goto evict_again; -} -EXPORT_SYMBOL(inet_frags_exit_net); - -static struct inet_frag_bucket * -get_frag_bucket_locked(struct inet_frag_queue *fq, struct inet_frags *f) -__acquires(hb->chain_lock) -{ - struct inet_frag_bucket *hb; - unsigned int seq, hash; - - restart: - seq = read_seqbegin(&f->rnd_seqlock); - - hash = inet_frag_hashfn(f, fq); - hb = &f->hash[hash]; + /* If we can not cancel the timer, it means this frag_queue + * is already disappearing, we have nothing to do. + * Otherwise, we own a refcount until the end of this function. + */ + if (!del_timer(&fq->timer)) + return; - spin_lock(&hb->chain_lock); - if (read_seqretry(&f->rnd_seqlock, seq)) { - spin_unlock(&hb->chain_lock); - goto restart; + spin_lock_bh(&fq->lock); + if (!(fq->flags & INET_FRAG_COMPLETE)) { + fq->flags |= INET_FRAG_COMPLETE; + refcount_dec(&fq->refcnt); } + spin_unlock_bh(&fq->lock); - return hb; + inet_frag_put(fq); } -static inline void fq_unlink(struct inet_frag_queue *fq, struct inet_frags *f) +void inet_frags_exit_net(struct netns_frags *nf) { - struct inet_frag_bucket *hb; + nf->low_thresh = 0; /* prevent creation of new frags */ - hb = get_frag_bucket_locked(fq, f); - hlist_del(&fq->list); - fq->flags |= INET_FRAG_COMPLETE; - spin_unlock(&hb->chain_lock); + rhashtable_free_and_destroy(&nf->rhashtable, inet_frags_free_cb, NULL); } +EXPORT_SYMBOL(inet_frags_exit_net); -void inet_frag_kill(struct inet_frag_queue *fq, struct inet_frags *f) +void inet_frag_kill(struct inet_frag_queue *fq) { if (del_timer(&fq->timer)) refcount_dec(&fq->refcnt); if (!(fq->flags & INET_FRAG_COMPLETE)) { - fq_unlink(fq, f); + struct netns_frags *nf = fq->net; + + fq->flags |= INET_FRAG_COMPLETE; + rhashtable_remove_fast(&nf->rhashtable, &fq->node, nf->f->rhash_params); refcount_dec(&fq->refcnt); } } EXPORT_SYMBOL(inet_frag_kill); -void inet_frag_destroy(struct inet_frag_queue *q, struct inet_frags *f) +static void inet_frag_destroy_rcu(struct rcu_head *head) +{ + struct inet_frag_queue *q = container_of(head, struct inet_frag_queue, + rcu); + struct inet_frags *f = q->net->f; + + if (f->destructor) + f->destructor(q); + kmem_cache_free(f->frags_cachep, q); +} + +void inet_frag_destroy(struct inet_frag_queue *q) { struct sk_buff *fp; struct netns_frags *nf; unsigned int sum, sum_truesize = 0; + struct inet_frags *f; WARN_ON(!(q->flags & INET_FRAG_COMPLETE)); WARN_ON(del_timer(&q->timer) != 0); @@ -298,64 +135,35 @@ void inet_frag_destroy(struct inet_frag_queue *q, struct inet_frags *f) /* Release all fragment data. */ fp = q->fragments; nf = q->net; - while (fp) { - struct sk_buff *xp = fp->next; - - sum_truesize += fp->truesize; - kfree_skb(fp); - fp = xp; + f = nf->f; + if (fp) { + do { + struct sk_buff *xp = fp->next; + + sum_truesize += fp->truesize; + kfree_skb(fp); + fp = xp; + } while (fp); + } else { + sum_truesize = inet_frag_rbtree_purge(&q->rb_fragments); } sum = sum_truesize + f->qsize; - if (f->destructor) - f->destructor(q); - kmem_cache_free(f->frags_cachep, q); + call_rcu(&q->rcu, inet_frag_destroy_rcu); sub_frag_mem_limit(nf, sum); } EXPORT_SYMBOL(inet_frag_destroy); -static struct inet_frag_queue *inet_frag_intern(struct netns_frags *nf, - struct inet_frag_queue *qp_in, - struct inet_frags *f, - void *arg) -{ - struct inet_frag_bucket *hb = get_frag_bucket_locked(qp_in, f); - struct inet_frag_queue *qp; - -#ifdef CONFIG_SMP - /* With SMP race we have to recheck hash table, because - * such entry could have been created on other cpu before - * we acquired hash bucket lock. - */ - hlist_for_each_entry(qp, &hb->chain, list) { - if (qp->net == nf && f->match(qp, arg)) { - refcount_inc(&qp->refcnt); - spin_unlock(&hb->chain_lock); - qp_in->flags |= INET_FRAG_COMPLETE; - inet_frag_put(qp_in, f); - return qp; - } - } -#endif - qp = qp_in; - if (!mod_timer(&qp->timer, jiffies + nf->timeout)) - refcount_inc(&qp->refcnt); - - refcount_inc(&qp->refcnt); - hlist_add_head(&qp->list, &hb->chain); - - spin_unlock(&hb->chain_lock); - - return qp; -} - static struct inet_frag_queue *inet_frag_alloc(struct netns_frags *nf, struct inet_frags *f, void *arg) { struct inet_frag_queue *q; + if (!nf->high_thresh || frag_mem_limit(nf) > nf->high_thresh) + return NULL; + q = kmem_cache_zalloc(f->frags_cachep, GFP_ATOMIC); if (!q) return NULL; @@ -364,77 +172,53 @@ static struct inet_frag_queue *inet_frag_alloc(struct netns_frags *nf, f->constructor(q, arg); add_frag_mem_limit(nf, f->qsize); - setup_timer(&q->timer, f->frag_expire, (unsigned long)q); + timer_setup(&q->timer, f->frag_expire, 0); spin_lock_init(&q->lock); - refcount_set(&q->refcnt, 1); + refcount_set(&q->refcnt, 3); return q; } static struct inet_frag_queue *inet_frag_create(struct netns_frags *nf, - struct inet_frags *f, void *arg) { + struct inet_frags *f = nf->f; struct inet_frag_queue *q; + int err; q = inet_frag_alloc(nf, f, arg); if (!q) return NULL; - return inet_frag_intern(nf, q, f, arg); -} - -struct inet_frag_queue *inet_frag_find(struct netns_frags *nf, - struct inet_frags *f, void *key, - unsigned int hash) -{ - struct inet_frag_bucket *hb; - struct inet_frag_queue *q; - int depth = 0; + mod_timer(&q->timer, jiffies + nf->timeout); - if (!nf->high_thresh || frag_mem_limit(nf) > nf->high_thresh) { - inet_frag_schedule_worker(f); + err = rhashtable_insert_fast(&nf->rhashtable, &q->node, + f->rhash_params); + if (err < 0) { + q->flags |= INET_FRAG_COMPLETE; + inet_frag_kill(q); + inet_frag_destroy(q); return NULL; } + return q; +} - if (frag_mem_limit(nf) > nf->low_thresh) - inet_frag_schedule_worker(f); - - hash &= (INETFRAGS_HASHSZ - 1); - hb = &f->hash[hash]; - - spin_lock(&hb->chain_lock); - hlist_for_each_entry(q, &hb->chain, list) { - if (q->net == nf && f->match(q, key)) { - refcount_inc(&q->refcnt); - spin_unlock(&hb->chain_lock); - return q; - } - depth++; - } - spin_unlock(&hb->chain_lock); +/* TODO : call from rcu_read_lock() and no longer use refcount_inc_not_zero() */ +struct inet_frag_queue *inet_frag_find(struct netns_frags *nf, void *key) +{ + struct inet_frag_queue *fq; - if (depth <= INETFRAGS_MAXDEPTH) - return inet_frag_create(nf, f, key); + rcu_read_lock(); - if (inet_frag_may_rebuild(f)) { - if (!f->rebuild) - f->rebuild = true; - inet_frag_schedule_worker(f); + fq = rhashtable_lookup(&nf->rhashtable, key, nf->f->rhash_params); + if (fq) { + if (!refcount_inc_not_zero(&fq->refcnt)) + fq = NULL; + rcu_read_unlock(); + return fq; } + rcu_read_unlock(); - return ERR_PTR(-ENOBUFS); + return inet_frag_create(nf, key); } EXPORT_SYMBOL(inet_frag_find); - -void inet_frag_maybe_warn_overflow(struct inet_frag_queue *q, - const char *prefix) -{ - static const char msg[] = "inet_frag_find: Fragment hash bucket" - " list length grew over limit " __stringify(INETFRAGS_MAXDEPTH) - ". Dropping fragment.\n"; - - if (PTR_ERR(q) == -ENOBUFS) - net_dbg_ratelimited("%s%s", prefix, msg); -} -EXPORT_SYMBOL(inet_frag_maybe_warn_overflow); diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c index 4cb1befc39494fcec05f6a93f949bb6eefc4e0a3..e7227128df2c8fd54727c234f76043133809bd1e 100644 --- a/net/ipv4/ip_fragment.c +++ b/net/ipv4/ip_fragment.c @@ -57,27 +57,64 @@ */ static const char ip_frag_cache_name[] = "ip4-frags"; -struct ipfrag_skb_cb -{ +/* Use skb->cb to track consecutive/adjacent fragments coming at + * the end of the queue. Nodes in the rb-tree queue will + * contain "runs" of one or more adjacent fragments. + * + * Invariants: + * - next_frag is NULL at the tail of a "run"; + * - the head of a "run" has the sum of all fragment lengths in frag_run_len. + */ +struct ipfrag_skb_cb { struct inet_skb_parm h; - int offset; + struct sk_buff *next_frag; + int frag_run_len; }; -#define FRAG_CB(skb) ((struct ipfrag_skb_cb *)((skb)->cb)) +#define FRAG_CB(skb) ((struct ipfrag_skb_cb *)((skb)->cb)) + +static void ip4_frag_init_run(struct sk_buff *skb) +{ + BUILD_BUG_ON(sizeof(struct ipfrag_skb_cb) > sizeof(skb->cb)); + + FRAG_CB(skb)->next_frag = NULL; + FRAG_CB(skb)->frag_run_len = skb->len; +} + +/* Append skb to the last "run". */ +static void ip4_frag_append_to_last_run(struct inet_frag_queue *q, + struct sk_buff *skb) +{ + RB_CLEAR_NODE(&skb->rbnode); + FRAG_CB(skb)->next_frag = NULL; + + FRAG_CB(q->last_run_head)->frag_run_len += skb->len; + FRAG_CB(q->fragments_tail)->next_frag = skb; + q->fragments_tail = skb; +} + +/* Create a new "run" with the skb. */ +static void ip4_frag_create_run(struct inet_frag_queue *q, struct sk_buff *skb) +{ + if (q->last_run_head) + rb_link_node(&skb->rbnode, &q->last_run_head->rbnode, + &q->last_run_head->rbnode.rb_right); + else + rb_link_node(&skb->rbnode, NULL, &q->rb_fragments.rb_node); + rb_insert_color(&skb->rbnode, &q->rb_fragments); + + ip4_frag_init_run(skb); + q->fragments_tail = skb; + q->last_run_head = skb; +} /* Describe an entry in the "incomplete datagrams" queue. */ struct ipq { struct inet_frag_queue q; - u32 user; - __be32 saddr; - __be32 daddr; - __be16 id; - u8 protocol; u8 ecn; /* RFC3168 support */ u16 max_df_size; /* largest frag with DF set seen */ int iif; - int vif; /* L3 master device index */ unsigned int rid; struct inet_peer *peer; }; @@ -89,49 +126,9 @@ static u8 ip4_frag_ecn(u8 tos) static struct inet_frags ip4_frags; -int ip_frag_mem(struct net *net) -{ - return sum_frag_mem_limit(&net->ipv4.frags); -} - -static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev, - struct net_device *dev); - -struct ip4_create_arg { - struct iphdr *iph; - u32 user; - int vif; -}; +static int ip_frag_reasm(struct ipq *qp, struct sk_buff *skb, + struct sk_buff *prev_tail, struct net_device *dev); -static unsigned int ipqhashfn(__be16 id, __be32 saddr, __be32 daddr, u8 prot) -{ - net_get_random_once(&ip4_frags.rnd, sizeof(ip4_frags.rnd)); - return jhash_3words((__force u32)id << 16 | prot, - (__force u32)saddr, (__force u32)daddr, - ip4_frags.rnd); -} - -static unsigned int ip4_hashfn(const struct inet_frag_queue *q) -{ - const struct ipq *ipq; - - ipq = container_of(q, struct ipq, q); - return ipqhashfn(ipq->id, ipq->saddr, ipq->daddr, ipq->protocol); -} - -static bool ip4_frag_match(const struct inet_frag_queue *q, const void *a) -{ - const struct ipq *qp; - const struct ip4_create_arg *arg = a; - - qp = container_of(q, struct ipq, q); - return qp->id == arg->iph->id && - qp->saddr == arg->iph->saddr && - qp->daddr == arg->iph->daddr && - qp->protocol == arg->iph->protocol && - qp->user == arg->user && - qp->vif == arg->vif; -} static void ip4_frag_init(struct inet_frag_queue *q, const void *a) { @@ -140,17 +137,12 @@ static void ip4_frag_init(struct inet_frag_queue *q, const void *a) frags); struct net *net = container_of(ipv4, struct net, ipv4); - const struct ip4_create_arg *arg = a; + const struct frag_v4_compare_key *key = a; - qp->protocol = arg->iph->protocol; - qp->id = arg->iph->id; - qp->ecn = ip4_frag_ecn(arg->iph->tos); - qp->saddr = arg->iph->saddr; - qp->daddr = arg->iph->daddr; - qp->vif = arg->vif; - qp->user = arg->user; + q->key.v4 = *key; + qp->ecn = 0; qp->peer = q->net->max_dist ? - inet_getpeer_v4(net->ipv4.peers, arg->iph->saddr, arg->vif, 1) : + inet_getpeer_v4(net->ipv4.peers, key->saddr, key->vif, 1) : NULL; } @@ -168,7 +160,7 @@ static void ip4_frag_free(struct inet_frag_queue *q) static void ipq_put(struct ipq *ipq) { - inet_frag_put(&ipq->q, &ip4_frags); + inet_frag_put(&ipq->q); } /* Kill ipq entry. It is not destroyed immediately, @@ -176,7 +168,7 @@ static void ipq_put(struct ipq *ipq) */ static void ipq_kill(struct ipq *ipq) { - inet_frag_kill(&ipq->q, &ip4_frags); + inet_frag_kill(&ipq->q); } static bool frag_expire_skip_icmp(u32 user) @@ -191,12 +183,16 @@ static bool frag_expire_skip_icmp(u32 user) /* * Oops, a fragment queue timed out. Kill it and send an ICMP reply. */ -static void ip_expire(unsigned long arg) +static void ip_expire(struct timer_list *t) { - struct ipq *qp; + struct inet_frag_queue *frag = from_timer(frag, t, timer); + const struct iphdr *iph; + struct sk_buff *head = NULL; struct net *net; + struct ipq *qp; + int err; - qp = container_of((struct inet_frag_queue *) arg, struct ipq, q); + qp = container_of(frag, struct ipq, q); net = container_of(qp->q.net, struct net, ipv4.frags); rcu_read_lock(); @@ -207,51 +203,65 @@ static void ip_expire(unsigned long arg) ipq_kill(qp); __IP_INC_STATS(net, IPSTATS_MIB_REASMFAILS); + __IP_INC_STATS(net, IPSTATS_MIB_REASMTIMEOUT); - if (!inet_frag_evicting(&qp->q)) { - struct sk_buff *clone, *head = qp->q.fragments; - const struct iphdr *iph; - int err; - - __IP_INC_STATS(net, IPSTATS_MIB_REASMTIMEOUT); + if (!(qp->q.flags & INET_FRAG_FIRST_IN)) + goto out; - if (!(qp->q.flags & INET_FRAG_FIRST_IN) || !qp->q.fragments) + /* sk_buff::dev and sk_buff::rbnode are unionized. So we + * pull the head out of the tree in order to be able to + * deal with head->dev. + */ + if (qp->q.fragments) { + head = qp->q.fragments; + qp->q.fragments = head->next; + } else { + head = skb_rb_first(&qp->q.rb_fragments); + if (!head) goto out; + if (FRAG_CB(head)->next_frag) + rb_replace_node(&head->rbnode, + &FRAG_CB(head)->next_frag->rbnode, + &qp->q.rb_fragments); + else + rb_erase(&head->rbnode, &qp->q.rb_fragments); + memset(&head->rbnode, 0, sizeof(head->rbnode)); + barrier(); + } + if (head == qp->q.fragments_tail) + qp->q.fragments_tail = NULL; - head->dev = dev_get_by_index_rcu(net, qp->iif); - if (!head->dev) - goto out; + sub_frag_mem_limit(qp->q.net, head->truesize); + + head->dev = dev_get_by_index_rcu(net, qp->iif); + if (!head->dev) + goto out; - /* skb has no dst, perform route lookup again */ - iph = ip_hdr(head); - err = ip_route_input_noref(head, iph->daddr, iph->saddr, + /* skb has no dst, perform route lookup again */ + iph = ip_hdr(head); + err = ip_route_input_noref(head, iph->daddr, iph->saddr, iph->tos, head->dev); - if (err) - goto out; + if (err) + goto out; - /* Only an end host needs to send an ICMP - * "Fragment Reassembly Timeout" message, per RFC792. - */ - if (frag_expire_skip_icmp(qp->user) && - (skb_rtable(head)->rt_type != RTN_LOCAL)) - goto out; + /* Only an end host needs to send an ICMP + * "Fragment Reassembly Timeout" message, per RFC792. + */ + if (frag_expire_skip_icmp(qp->q.key.v4.user) && + (skb_rtable(head)->rt_type != RTN_LOCAL)) + goto out; - clone = skb_clone(head, GFP_ATOMIC); + spin_unlock(&qp->q.lock); + icmp_send(head, ICMP_TIME_EXCEEDED, ICMP_EXC_FRAGTIME, 0); + goto out_rcu_unlock; - /* Send an ICMP "Fragment Reassembly Timeout" message. */ - if (clone) { - spin_unlock(&qp->q.lock); - icmp_send(clone, ICMP_TIME_EXCEEDED, - ICMP_EXC_FRAGTIME, 0); - consume_skb(clone); - goto out_rcu_unlock; - } - } out: spin_unlock(&qp->q.lock); out_rcu_unlock: rcu_read_unlock(); + if (head) + kfree_skb(head); ipq_put(qp); } @@ -261,21 +271,20 @@ out_rcu_unlock: static struct ipq *ip_find(struct net *net, struct iphdr *iph, u32 user, int vif) { + struct frag_v4_compare_key key = { + .saddr = iph->saddr, + .daddr = iph->daddr, + .user = user, + .vif = vif, + .id = iph->id, + .protocol = iph->protocol, + }; struct inet_frag_queue *q; - struct ip4_create_arg arg; - unsigned int hash; - - arg.iph = iph; - arg.user = user; - arg.vif = vif; - hash = ipqhashfn(iph->id, iph->saddr, iph->daddr, iph->protocol); - - q = inet_frag_find(&net->ipv4.frags, &ip4_frags, &arg, hash); - if (IS_ERR_OR_NULL(q)) { - inet_frag_maybe_warn_overflow(q, pr_fmt()); + q = inet_frag_find(&net->ipv4.frags, &key); + if (!q) return NULL; - } + return container_of(q, struct ipq, q); } @@ -295,7 +304,7 @@ static int ip_frag_too_far(struct ipq *qp) end = atomic_inc_return(&peer->rid); qp->rid = end; - rc = qp->q.fragments && (end - start) > max; + rc = qp->q.fragments_tail && (end - start) > max; if (rc) { struct net *net; @@ -309,7 +318,6 @@ static int ip_frag_too_far(struct ipq *qp) static int ip_frag_reinit(struct ipq *qp) { - struct sk_buff *fp; unsigned int sum_truesize = 0; if (!mod_timer(&qp->q.timer, jiffies + qp->q.net->timeout)) { @@ -317,21 +325,16 @@ static int ip_frag_reinit(struct ipq *qp) return -ETIMEDOUT; } - fp = qp->q.fragments; - do { - struct sk_buff *xp = fp->next; - - sum_truesize += fp->truesize; - kfree_skb(fp); - fp = xp; - } while (fp); + sum_truesize = inet_frag_rbtree_purge(&qp->q.rb_fragments); sub_frag_mem_limit(qp->q.net, sum_truesize); qp->q.flags = 0; qp->q.len = 0; qp->q.meat = 0; qp->q.fragments = NULL; + qp->q.rb_fragments = RB_ROOT; qp->q.fragments_tail = NULL; + qp->q.last_run_head = NULL; qp->iif = 0; qp->ecn = 0; @@ -341,7 +344,9 @@ static int ip_frag_reinit(struct ipq *qp) /* Add new segment to existing queue. */ static int ip_frag_queue(struct ipq *qp, struct sk_buff *skb) { - struct sk_buff *prev, *next; + struct net *net = container_of(qp->q.net, struct net, ipv4.frags); + struct rb_node **rbn, *parent; + struct sk_buff *skb1, *prev_tail; struct net_device *dev; unsigned int fragsize; int flags, offset; @@ -404,99 +409,61 @@ static int ip_frag_queue(struct ipq *qp, struct sk_buff *skb) if (err) goto err; - /* Find out which fragments are in front and at the back of us - * in the chain of fragments so far. We must know where to put - * this fragment, right? - */ - prev = qp->q.fragments_tail; - if (!prev || FRAG_CB(prev)->offset < offset) { - next = NULL; - goto found; - } - prev = NULL; - for (next = qp->q.fragments; next != NULL; next = next->next) { - if (FRAG_CB(next)->offset >= offset) - break; /* bingo! */ - prev = next; - } - -found: - /* We found where to put this one. Check for overlap with - * preceding fragment, and, if needed, align things so that - * any overlaps are eliminated. + /* Note : skb->rbnode and skb->dev share the same location. */ + dev = skb->dev; + /* Makes sure compiler wont do silly aliasing games */ + barrier(); + + /* RFC5722, Section 4, amended by Errata ID : 3089 + * When reassembling an IPv6 datagram, if + * one or more its constituent fragments is determined to be an + * overlapping fragment, the entire datagram (and any constituent + * fragments) MUST be silently discarded. + * + * We do the same here for IPv4 (and increment an snmp counter). */ - if (prev) { - int i = (FRAG_CB(prev)->offset + prev->len) - offset; - if (i > 0) { - offset += i; - err = -EINVAL; - if (end <= offset) - goto err; - err = -ENOMEM; - if (!pskb_pull(skb, i)) - goto err; - if (skb->ip_summed != CHECKSUM_UNNECESSARY) - skb->ip_summed = CHECKSUM_NONE; - } - } - - err = -ENOMEM; - - while (next && FRAG_CB(next)->offset < end) { - int i = end - FRAG_CB(next)->offset; /* overlap is 'i' bytes */ - - if (i < next->len) { - int delta = -next->truesize; - - /* Eat head of the next overlapped fragment - * and leave the loop. The next ones cannot overlap. - */ - if (!pskb_pull(next, i)) - goto err; - delta += next->truesize; - if (delta) - add_frag_mem_limit(qp->q.net, delta); - FRAG_CB(next)->offset += i; - qp->q.meat -= i; - if (next->ip_summed != CHECKSUM_UNNECESSARY) - next->ip_summed = CHECKSUM_NONE; - break; - } else { - struct sk_buff *free_it = next; - - /* Old fragment is completely overridden with - * new one drop it. - */ - next = next->next; - - if (prev) - prev->next = next; - else - qp->q.fragments = next; - - qp->q.meat -= free_it->len; - sub_frag_mem_limit(qp->q.net, free_it->truesize); - kfree_skb(free_it); - } + /* Find out where to put this fragment. */ + prev_tail = qp->q.fragments_tail; + if (!prev_tail) + ip4_frag_create_run(&qp->q, skb); /* First fragment. */ + else if (prev_tail->ip_defrag_offset + prev_tail->len < end) { + /* This is the common case: skb goes to the end. */ + /* Detect and discard overlaps. */ + if (offset < prev_tail->ip_defrag_offset + prev_tail->len) + goto discard_qp; + if (offset == prev_tail->ip_defrag_offset + prev_tail->len) + ip4_frag_append_to_last_run(&qp->q, skb); + else + ip4_frag_create_run(&qp->q, skb); + } else { + /* Binary search. Note that skb can become the first fragment, + * but not the last (covered above). + */ + rbn = &qp->q.rb_fragments.rb_node; + do { + parent = *rbn; + skb1 = rb_to_skb(parent); + if (end <= skb1->ip_defrag_offset) + rbn = &parent->rb_left; + else if (offset >= skb1->ip_defrag_offset + + FRAG_CB(skb1)->frag_run_len) + rbn = &parent->rb_right; + else /* Found an overlap with skb1. */ + goto discard_qp; + } while (*rbn); + /* Here we have parent properly set, and rbn pointing to + * one of its NULL left/right children. Insert skb. + */ + ip4_frag_init_run(skb); + rb_link_node(&skb->rbnode, parent, rbn); + rb_insert_color(&skb->rbnode, &qp->q.rb_fragments); } - FRAG_CB(skb)->offset = offset; - - /* Insert this fragment in the chain of fragments. */ - skb->next = next; - if (!next) - qp->q.fragments_tail = skb; - if (prev) - prev->next = skb; - else - qp->q.fragments = skb; - - dev = skb->dev; - if (dev) { + if (dev) qp->iif = dev->ifindex; - skb->dev = NULL; - } + skb->ip_defrag_offset = offset; + qp->q.stamp = skb->tstamp; qp->q.meat += skb->len; qp->ecn |= ecn; @@ -518,7 +485,7 @@ found: unsigned long orefdst = skb->_skb_refdst; skb->_skb_refdst = 0UL; - err = ip_frag_reasm(qp, prev, dev); + err = ip_frag_reasm(qp, skb, prev_tail, dev); skb->_skb_refdst = orefdst; return err; } @@ -526,20 +493,24 @@ found: skb_dst_drop(skb); return -EINPROGRESS; +discard_qp: + inet_frag_kill(&qp->q); + err = -EINVAL; + __IP_INC_STATS(net, IPSTATS_MIB_REASM_OVERLAPS); err: kfree_skb(skb); return err; } - /* Build a new IP datagram from all its fragments. */ - -static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev, - struct net_device *dev) +static int ip_frag_reasm(struct ipq *qp, struct sk_buff *skb, + struct sk_buff *prev_tail, struct net_device *dev) { struct net *net = container_of(qp->q.net, struct net, ipv4.frags); struct iphdr *iph; - struct sk_buff *fp, *head = qp->q.fragments; + struct sk_buff *fp, *head = skb_rb_first(&qp->q.rb_fragments); + struct sk_buff **nextp; /* To build frag_list. */ + struct rb_node *rbn; int len; int ihlen; int err; @@ -553,26 +524,27 @@ static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev, goto out_fail; } /* Make the one we just received the head. */ - if (prev) { - head = prev->next; - fp = skb_clone(head, GFP_ATOMIC); + if (head != skb) { + fp = skb_clone(skb, GFP_ATOMIC); if (!fp) goto out_nomem; - - fp->next = head->next; - if (!fp->next) + FRAG_CB(fp)->next_frag = FRAG_CB(skb)->next_frag; + if (RB_EMPTY_NODE(&skb->rbnode)) + FRAG_CB(prev_tail)->next_frag = fp; + else + rb_replace_node(&skb->rbnode, &fp->rbnode, + &qp->q.rb_fragments); + if (qp->q.fragments_tail == skb) qp->q.fragments_tail = fp; - prev->next = fp; - - skb_morph(head, qp->q.fragments); - head->next = qp->q.fragments->next; - - consume_skb(qp->q.fragments); - qp->q.fragments = head; + skb_morph(skb, head); + FRAG_CB(skb)->next_frag = FRAG_CB(head)->next_frag; + rb_replace_node(&head->rbnode, &skb->rbnode, + &qp->q.rb_fragments); + consume_skb(head); + head = skb; } - WARN_ON(!head); - WARN_ON(FRAG_CB(head)->offset != 0); + WARN_ON(head->ip_defrag_offset != 0); /* Allocate a new buffer for the datagram. */ ihlen = ip_hdrlen(head); @@ -596,35 +568,61 @@ static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev, clone = alloc_skb(0, GFP_ATOMIC); if (!clone) goto out_nomem; - clone->next = head->next; - head->next = clone; skb_shinfo(clone)->frag_list = skb_shinfo(head)->frag_list; skb_frag_list_init(head); for (i = 0; i < skb_shinfo(head)->nr_frags; i++) plen += skb_frag_size(&skb_shinfo(head)->frags[i]); clone->len = clone->data_len = head->data_len - plen; - head->data_len -= clone->len; - head->len -= clone->len; + head->truesize += clone->truesize; clone->csum = 0; clone->ip_summed = head->ip_summed; add_frag_mem_limit(qp->q.net, clone->truesize); + skb_shinfo(head)->frag_list = clone; + nextp = &clone->next; + } else { + nextp = &skb_shinfo(head)->frag_list; } - skb_shinfo(head)->frag_list = head->next; skb_push(head, head->data - skb_network_header(head)); - for (fp=head->next; fp; fp = fp->next) { - head->data_len += fp->len; - head->len += fp->len; - if (head->ip_summed != fp->ip_summed) - head->ip_summed = CHECKSUM_NONE; - else if (head->ip_summed == CHECKSUM_COMPLETE) - head->csum = csum_add(head->csum, fp->csum); - head->truesize += fp->truesize; + /* Traverse the tree in order, to build frag_list. */ + fp = FRAG_CB(head)->next_frag; + rbn = rb_next(&head->rbnode); + rb_erase(&head->rbnode, &qp->q.rb_fragments); + while (rbn || fp) { + /* fp points to the next sk_buff in the current run; + * rbn points to the next run. + */ + /* Go through the current run. */ + while (fp) { + *nextp = fp; + nextp = &fp->next; + fp->prev = NULL; + memset(&fp->rbnode, 0, sizeof(fp->rbnode)); + fp->sk = NULL; + head->data_len += fp->len; + head->len += fp->len; + if (head->ip_summed != fp->ip_summed) + head->ip_summed = CHECKSUM_NONE; + else if (head->ip_summed == CHECKSUM_COMPLETE) + head->csum = csum_add(head->csum, fp->csum); + head->truesize += fp->truesize; + fp = FRAG_CB(fp)->next_frag; + } + /* Move to the next run. */ + if (rbn) { + struct rb_node *rbnext = rb_next(rbn); + + fp = rb_to_skb(rbn); + rb_erase(rbn, &qp->q.rb_fragments); + rbn = rbnext; + } } sub_frag_mem_limit(qp->q.net, head->truesize); + *nextp = NULL; head->next = NULL; + head->prev = NULL; head->dev = dev; head->tstamp = qp->q.stamp; IPCB(head)->frag_max_size = max(qp->max_df_size, qp->q.max_size); @@ -652,7 +650,9 @@ static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev, __IP_INC_STATS(net, IPSTATS_MIB_REASMOKS); qp->q.fragments = NULL; + qp->q.rb_fragments = RB_ROOT; qp->q.fragments_tail = NULL; + qp->q.last_run_head = NULL; return 0; out_nomem: @@ -660,7 +660,7 @@ out_nomem: err = -ENOMEM; goto out_fail; out_oversize: - net_info_ratelimited("Oversized IP packet from %pI4\n", &qp->saddr); + net_info_ratelimited("Oversized IP packet from %pI4\n", &qp->q.key.v4.saddr); out_fail: __IP_INC_STATS(net, IPSTATS_MIB_REASMFAILS); return err; @@ -734,25 +734,46 @@ struct sk_buff *ip_check_defrag(struct net *net, struct sk_buff *skb, u32 user) } EXPORT_SYMBOL(ip_check_defrag); +unsigned int inet_frag_rbtree_purge(struct rb_root *root) +{ + struct rb_node *p = rb_first(root); + unsigned int sum = 0; + + while (p) { + struct sk_buff *skb = rb_entry(p, struct sk_buff, rbnode); + + p = rb_next(p); + rb_erase(&skb->rbnode, root); + while (skb) { + struct sk_buff *next = FRAG_CB(skb)->next_frag; + + sum += skb->truesize; + kfree_skb(skb); + skb = next; + } + } + return sum; +} +EXPORT_SYMBOL(inet_frag_rbtree_purge); + #ifdef CONFIG_SYSCTL -static int zero; +static int dist_min; static struct ctl_table ip4_frags_ns_ctl_table[] = { { .procname = "ipfrag_high_thresh", .data = &init_net.ipv4.frags.high_thresh, - .maxlen = sizeof(int), + .maxlen = sizeof(unsigned long), .mode = 0644, - .proc_handler = proc_dointvec_minmax, + .proc_handler = proc_doulongvec_minmax, .extra1 = &init_net.ipv4.frags.low_thresh }, { .procname = "ipfrag_low_thresh", .data = &init_net.ipv4.frags.low_thresh, - .maxlen = sizeof(int), + .maxlen = sizeof(unsigned long), .mode = 0644, - .proc_handler = proc_dointvec_minmax, - .extra1 = &zero, + .proc_handler = proc_doulongvec_minmax, .extra2 = &init_net.ipv4.frags.high_thresh }, { @@ -768,7 +789,7 @@ static struct ctl_table ip4_frags_ns_ctl_table[] = { .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec_minmax, - .extra1 = &zero + .extra1 = &dist_min, }, { } }; @@ -850,6 +871,8 @@ static void __init ip4_frags_ctl_register(void) static int __net_init ipv4_frags_init_net(struct net *net) { + int res; + /* Fragment cache limits. * * The fragment memory accounting code, (tries to) account for @@ -874,16 +897,21 @@ static int __net_init ipv4_frags_init_net(struct net *net) net->ipv4.frags.timeout = IP_FRAG_TIME; net->ipv4.frags.max_dist = 64; - - inet_frags_init_net(&net->ipv4.frags); - - return ip4_frags_ns_ctl_register(net); + net->ipv4.frags.f = &ip4_frags; + + res = inet_frags_init_net(&net->ipv4.frags); + if (res < 0) + return res; + res = ip4_frags_ns_ctl_register(net); + if (res < 0) + inet_frags_exit_net(&net->ipv4.frags); + return res; } static void __net_exit ipv4_frags_exit_net(struct net *net) { ip4_frags_ns_ctl_unregister(net); - inet_frags_exit_net(&net->ipv4.frags, &ip4_frags); + inet_frags_exit_net(&net->ipv4.frags); } static struct pernet_operations ip4_frags_ops = { @@ -891,17 +919,49 @@ static struct pernet_operations ip4_frags_ops = { .exit = ipv4_frags_exit_net, }; + +static u32 ip4_key_hashfn(const void *data, u32 len, u32 seed) +{ + return jhash2(data, + sizeof(struct frag_v4_compare_key) / sizeof(u32), seed); +} + +static u32 ip4_obj_hashfn(const void *data, u32 len, u32 seed) +{ + const struct inet_frag_queue *fq = data; + + return jhash2((const u32 *)&fq->key.v4, + sizeof(struct frag_v4_compare_key) / sizeof(u32), seed); +} + +static int ip4_obj_cmpfn(struct rhashtable_compare_arg *arg, const void *ptr) +{ + const struct frag_v4_compare_key *key = arg->key; + const struct inet_frag_queue *fq = ptr; + + return !!memcmp(&fq->key, key, sizeof(*key)); +} + +static const struct rhashtable_params ip4_rhash_params = { + .head_offset = offsetof(struct inet_frag_queue, node), + .key_offset = offsetof(struct inet_frag_queue, key), + .key_len = sizeof(struct frag_v4_compare_key), + .hashfn = ip4_key_hashfn, + .obj_hashfn = ip4_obj_hashfn, + .obj_cmpfn = ip4_obj_cmpfn, + .automatic_shrinking = true, +}; + void __init ipfrag_init(void) { - ip4_frags_ctl_register(); - register_pernet_subsys(&ip4_frags_ops); - ip4_frags.hashfn = ip4_hashfn; ip4_frags.constructor = ip4_frag_init; ip4_frags.destructor = ip4_frag_free; ip4_frags.qsize = sizeof(struct ipq); - ip4_frags.match = ip4_frag_match; ip4_frags.frag_expire = ip_expire; ip4_frags.frags_cache_name = ip_frag_cache_name; + ip4_frags.rhash_params = ip4_rhash_params; if (inet_frags_init(&ip4_frags)) panic("IP: failed to allocate ip4_frags cache\n"); + ip4_frags_ctl_register(); + register_pernet_subsys(&ip4_frags_ops); } diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c index 2459e9cc22a694be003c951aeb99a2eb942c650d..dd3bcf22fe8b4669099f8ab2466f6b23b1d6e32c 100644 --- a/net/ipv4/ip_gre.c +++ b/net/ipv4/ip_gre.c @@ -177,6 +177,8 @@ static void ipgre_err(struct sk_buff *skb, u32 info, if (tpi->proto == htons(ETH_P_TEB)) itn = net_generic(net, gre_tap_net_id); + else if (tpi->proto == htons(ETH_P_ERSPAN)) + itn = net_generic(net, erspan_net_id); else itn = net_generic(net, ipgre_net_id); @@ -320,6 +322,8 @@ static int erspan_rcv(struct sk_buff *skb, struct tnl_ptk_info *tpi, ip_tunnel_rcv(tunnel, skb, tpi, tun_dst, log_ecn_error); return PACKET_RCVD; } + return PACKET_REJECT; + drop: kfree_skb(skb); return PACKET_RCVD; diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c index 114d4bef1bec291ca56173c5370cd20dbf2996ae..0d1a2cda1bfb1b7490615c54c8b1646c6d6e8a53 100644 --- a/net/ipv4/netfilter/ip_tables.c +++ b/net/ipv4/netfilter/ip_tables.c @@ -1894,6 +1894,7 @@ static struct xt_match ipt_builtin_mt[] __read_mostly = { .checkentry = icmp_checkentry, .proto = IPPROTO_ICMP, .family = NFPROTO_IPV4, + .me = THIS_MODULE, }, }; diff --git a/net/ipv4/proc.c b/net/ipv4/proc.c index 127153f1ed8a7894e0b2d2059af08bdcc6a00236..3fbf688a1943a22b82271e00ed6716ae6d395d7a 100644 --- a/net/ipv4/proc.c +++ b/net/ipv4/proc.c @@ -54,7 +54,6 @@ static int sockstat_seq_show(struct seq_file *seq, void *v) { struct net *net = seq->private; - unsigned int frag_mem; int orphans, sockets; orphans = percpu_counter_sum_positive(&tcp_orphan_count); @@ -72,8 +71,9 @@ static int sockstat_seq_show(struct seq_file *seq, void *v) sock_prot_inuse_get(net, &udplite_prot)); seq_printf(seq, "RAW: inuse %d\n", sock_prot_inuse_get(net, &raw_prot)); - frag_mem = ip_frag_mem(net); - seq_printf(seq, "FRAG: inuse %u memory %u\n", !!frag_mem, frag_mem); + seq_printf(seq, "FRAG: inuse %u memory %lu\n", + atomic_read(&net->ipv4.frags.rhashtable.nelems), + frag_mem_limit(&net->ipv4.frags)); return 0; } @@ -132,6 +132,7 @@ static const struct snmp_mib snmp4_ipextstats_list[] = { SNMP_MIB_ITEM("InECT1Pkts", IPSTATS_MIB_ECT1PKTS), SNMP_MIB_ITEM("InECT0Pkts", IPSTATS_MIB_ECT0PKTS), SNMP_MIB_ITEM("InCEPkts", IPSTATS_MIB_CEPKTS), + SNMP_MIB_ITEM("ReasmOverlaps", IPSTATS_MIB_REASM_OVERLAPS), SNMP_MIB_SENTINEL }; diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index e81ff9d545a401db0c8892ec93a45fe10e4c4996..f9c985460faa3547269205fbf026e6a502768f55 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -1177,7 +1177,7 @@ int tcp_sendmsg_locked(struct sock *sk, struct msghdr *msg, size_t size) flags = msg->msg_flags; - if (flags & MSG_ZEROCOPY && size) { + if (flags & MSG_ZEROCOPY && size && sock_flag(sk, SOCK_ZEROCOPY)) { if (sk->sk_state != TCP_ESTABLISHED) { err = -EINVAL; goto out_err; @@ -1837,7 +1837,7 @@ int tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int nonblock, * shouldn't happen. */ if (WARN(before(*seq, TCP_SKB_CB(skb)->seq), - "recvmsg bug: copied %X seq %X rcvnxt %X fl %X\n", + "TCP recvmsg seq # bug: copied %X, seq %X, rcvnxt %X, fl %X\n", *seq, TCP_SKB_CB(skb)->seq, tp->rcv_nxt, flags)) break; @@ -1852,7 +1852,7 @@ int tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int nonblock, if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) goto found_fin_ok; WARN(!(flags & MSG_PEEK), - "recvmsg bug 2: copied %X seq %X rcvnxt %X fl %X\n", + "TCP recvmsg seq # bug 2: copied %X, seq %X, rcvnxt %X, fl %X\n", *seq, TCP_SKB_CB(skb)->seq, tp->rcv_nxt, flags); } diff --git a/net/ipv4/tcp_dctcp.c b/net/ipv4/tcp_dctcp.c index 1a9b88c8cf7263c016277163636eb361f57960d3..8b637f9f23a232a137f4a7f2d685a599cc063c1b 100644 --- a/net/ipv4/tcp_dctcp.c +++ b/net/ipv4/tcp_dctcp.c @@ -55,7 +55,6 @@ struct dctcp { u32 dctcp_alpha; u32 next_seq; u32 ce_state; - u32 delayed_ack_reserved; u32 loss_cwnd; }; @@ -96,7 +95,6 @@ static void dctcp_init(struct sock *sk) ca->dctcp_alpha = min(dctcp_alpha_on_init, DCTCP_MAX_ALPHA); - ca->delayed_ack_reserved = 0; ca->loss_cwnd = 0; ca->ce_state = 0; @@ -230,25 +228,6 @@ static void dctcp_state(struct sock *sk, u8 new_state) } } -static void dctcp_update_ack_reserved(struct sock *sk, enum tcp_ca_event ev) -{ - struct dctcp *ca = inet_csk_ca(sk); - - switch (ev) { - case CA_EVENT_DELAYED_ACK: - if (!ca->delayed_ack_reserved) - ca->delayed_ack_reserved = 1; - break; - case CA_EVENT_NON_DELAYED_ACK: - if (ca->delayed_ack_reserved) - ca->delayed_ack_reserved = 0; - break; - default: - /* Don't care for the rest. */ - break; - } -} - static void dctcp_cwnd_event(struct sock *sk, enum tcp_ca_event ev) { switch (ev) { @@ -258,10 +237,6 @@ static void dctcp_cwnd_event(struct sock *sk, enum tcp_ca_event ev) case CA_EVENT_ECN_NO_CE: dctcp_ce_state_1_to_0(sk); break; - case CA_EVENT_DELAYED_ACK: - case CA_EVENT_NON_DELAYED_ACK: - dctcp_update_ack_reserved(sk, ev); - break; default: /* Don't care for the rest. */ break; diff --git a/net/ipv4/tcp_fastopen.c b/net/ipv4/tcp_fastopen.c index fbbeda64777406d16beb0c2905bedb633c28d33a..0567edb76522cbeb84943cada6335556719c8fc0 100644 --- a/net/ipv4/tcp_fastopen.c +++ b/net/ipv4/tcp_fastopen.c @@ -458,17 +458,15 @@ bool tcp_fastopen_active_should_disable(struct sock *sk) void tcp_fastopen_active_disable_ofo_check(struct sock *sk) { struct tcp_sock *tp = tcp_sk(sk); - struct rb_node *p; - struct sk_buff *skb; struct dst_entry *dst; + struct sk_buff *skb; if (!tp->syn_fastopen) return; if (!tp->data_segs_in) { - p = rb_first(&tp->out_of_order_queue); - if (p && !rb_next(p)) { - skb = rb_entry(p, struct sk_buff, rbnode); + skb = skb_rb_first(&tp->out_of_order_queue); + if (skb && !skb_rb_next(skb)) { if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) { tcp_fastopen_active_disable(sk); return; diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index bdabd748f4bcf24a4c980f4351044c69691a5304..991f382afc1b077f6005cffa7a80be98c52c9eb5 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -4372,7 +4372,7 @@ static void tcp_ofo_queue(struct sock *sk) p = rb_first(&tp->out_of_order_queue); while (p) { - skb = rb_entry(p, struct sk_buff, rbnode); + skb = rb_to_skb(p); if (after(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) break; @@ -4440,7 +4440,7 @@ static int tcp_try_rmem_schedule(struct sock *sk, struct sk_buff *skb, static void tcp_data_queue_ofo(struct sock *sk, struct sk_buff *skb) { struct tcp_sock *tp = tcp_sk(sk); - struct rb_node **p, *q, *parent; + struct rb_node **p, *parent; struct sk_buff *skb1; u32 seq, end_seq; bool fragstolen; @@ -4503,7 +4503,7 @@ coalesce_done: parent = NULL; while (*p) { parent = *p; - skb1 = rb_entry(parent, struct sk_buff, rbnode); + skb1 = rb_to_skb(parent); if (before(seq, TCP_SKB_CB(skb1)->seq)) { p = &parent->rb_left; continue; @@ -4548,9 +4548,7 @@ insert: merge_right: /* Remove other segments covered by skb. */ - while ((q = rb_next(&skb->rbnode)) != NULL) { - skb1 = rb_entry(q, struct sk_buff, rbnode); - + while ((skb1 = skb_rb_next(skb)) != NULL) { if (!after(end_seq, TCP_SKB_CB(skb1)->seq)) break; if (before(end_seq, TCP_SKB_CB(skb1)->end_seq)) { @@ -4565,7 +4563,7 @@ merge_right: tcp_drop(sk, skb1); } /* If there is no skb after us, we are the last_skb ! */ - if (!q) + if (!skb1) tp->ooo_last_skb = skb; add_sack: @@ -4749,7 +4747,7 @@ static struct sk_buff *tcp_skb_next(struct sk_buff *skb, struct sk_buff_head *li if (list) return !skb_queue_is_last(list, skb) ? skb->next : NULL; - return rb_entry_safe(rb_next(&skb->rbnode), struct sk_buff, rbnode); + return skb_rb_next(skb); } static struct sk_buff *tcp_collapse_one(struct sock *sk, struct sk_buff *skb, @@ -4778,7 +4776,7 @@ static void tcp_rbtree_insert(struct rb_root *root, struct sk_buff *skb) while (*p) { parent = *p; - skb1 = rb_entry(parent, struct sk_buff, rbnode); + skb1 = rb_to_skb(parent); if (before(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb1)->seq)) p = &parent->rb_left; else @@ -4898,19 +4896,12 @@ static void tcp_collapse_ofo_queue(struct sock *sk) struct tcp_sock *tp = tcp_sk(sk); u32 range_truesize, sum_tiny = 0; struct sk_buff *skb, *head; - struct rb_node *p; u32 start, end; - p = rb_first(&tp->out_of_order_queue); - skb = rb_entry_safe(p, struct sk_buff, rbnode); + skb = skb_rb_first(&tp->out_of_order_queue); new_range: if (!skb) { - p = rb_last(&tp->out_of_order_queue); - /* Note: This is possible p is NULL here. We do not - * use rb_entry_safe(), as ooo_last_skb is valid only - * if rbtree is not empty. - */ - tp->ooo_last_skb = rb_entry(p, struct sk_buff, rbnode); + tp->ooo_last_skb = skb_rb_last(&tp->out_of_order_queue); return; } start = TCP_SKB_CB(skb)->seq; @@ -4918,7 +4909,7 @@ new_range: range_truesize = skb->truesize; for (head = skb;;) { - skb = tcp_skb_next(skb, NULL); + skb = skb_rb_next(skb); /* Range is terminated when we see a gap or when * we are at the queue end. @@ -4974,7 +4965,7 @@ static bool tcp_prune_ofo_queue(struct sock *sk) prev = rb_prev(node); rb_erase(node, &tp->out_of_order_queue); goal -= rb_to_skb(node)->truesize; - tcp_drop(sk, rb_entry(node, struct sk_buff, rbnode)); + tcp_drop(sk, rb_to_skb(node)); if (!prev || goal <= 0) { sk_mem_reclaim(sk); if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf && @@ -4984,7 +4975,7 @@ static bool tcp_prune_ofo_queue(struct sock *sk) } node = prev; } while (node); - tp->ooo_last_skb = rb_entry(prev, struct sk_buff, rbnode); + tp->ooo_last_skb = rb_to_skb(prev); /* Reset SACK state. A conforming SACK implementation will * do the same at a timeout based retransmit. When a connection diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index 41e18629441b5f99dc88f123ccd5dbf8f0cf84e8..ca4507290102f12b90a58246eaa9ddf505ac3d71 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c @@ -2474,6 +2474,12 @@ static int __net_init tcp_sk_init(struct net *net) if (res) goto fail; sock_set_flag(sk, SOCK_USE_WRITE_QUEUE); + + /* Please enforce IP_DF and IPID==0 for RST and + * ACK sent in SYN-RECV and TIME-WAIT state. + */ + inet_sk(sk)->pmtudisc = IP_PMTUDISC_DO; + *per_cpu_ptr(net->ipv4.tcp_sk, cpu) = sk; } diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c index 420fecbb98fe7edc3a215455d603997c95b7ced3..61584638dba7fc7d2c1f6c569108400d3237677d 100644 --- a/net/ipv4/tcp_minisocks.c +++ b/net/ipv4/tcp_minisocks.c @@ -185,8 +185,9 @@ kill: inet_twsk_deschedule_put(tw); return TCP_TW_SUCCESS; } + } else { + inet_twsk_reschedule(tw, TCP_TIMEWAIT_LEN); } - inet_twsk_reschedule(tw, TCP_TIMEWAIT_LEN); if (tmp_opt.saw_tstamp) { tcptw->tw_ts_recent = tmp_opt.rcv_tsval; diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index 3d8f6f342cb1965bdc7d690722545e4402404a5a..b2ead31afcbab34e7ff5a95a156b173e48b1bc73 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c @@ -3513,8 +3513,6 @@ void tcp_send_delayed_ack(struct sock *sk) int ato = icsk->icsk_ack.ato; unsigned long timeout; - tcp_ca_event(sk, CA_EVENT_DELAYED_ACK); - if (ato > TCP_DELACK_MIN) { const struct tcp_sock *tp = tcp_sk(sk); int max_ato = HZ / 2; @@ -3571,8 +3569,6 @@ void __tcp_send_ack(struct sock *sk, u32 rcv_nxt) if (sk->sk_state == TCP_CLOSE) return; - tcp_ca_event(sk, CA_EVENT_NON_DELAYED_ACK); - /* We are not putting this on the write queue, so * tcp_transmit_skb() will set the ownership to this * sock. diff --git a/net/ipv4/tcp_ulp.c b/net/ipv4/tcp_ulp.c index 6bb9e14c710a7e2bfa58ee63ff6e02461a22cbec..1feecb72f4fc8643956d61492b3e65fdcbbdd13f 100644 --- a/net/ipv4/tcp_ulp.c +++ b/net/ipv4/tcp_ulp.c @@ -39,7 +39,7 @@ static const struct tcp_ulp_ops *__tcp_ulp_find_autoload(const char *name) #ifdef CONFIG_MODULES if (!ulp && capable(CAP_NET_ADMIN)) { rcu_read_unlock(); - request_module("%s", name); + request_module("tcp-ulp-%s", name); rcu_read_lock(); ulp = tcp_ulp_find(name); } diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index 5752bf7593dc4ddaf527c6b783a82440783dfc1e..3de413867991ed21d22a08b7ca1977c5f2e4b62e 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c @@ -2049,6 +2049,28 @@ static inline int udp4_csum_init(struct sk_buff *skb, struct udphdr *uh, inet_compute_pseudo); } +/* wrapper for udp_queue_rcv_skb tacking care of csum conversion and + * return code conversion for ip layer consumption + */ +static int udp_unicast_rcv_skb(struct sock *sk, struct sk_buff *skb, + struct udphdr *uh) +{ + int ret; + + if (inet_get_convert_csum(sk) && uh->check && !IS_UDPLITE(sk)) + skb_checksum_try_convert(skb, IPPROTO_UDP, uh->check, + inet_compute_pseudo); + + ret = udp_queue_rcv_skb(sk, skb); + + /* a return value > 0 means to resubmit the input, but + * it wants the return to be -protocol, or 0 + */ + if (ret > 0) + return -ret; + return 0; +} + /* * All we need to do is get the socket, and then do a checksum. */ @@ -2095,14 +2117,9 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table *udptable, if (unlikely(sk->sk_rx_dst != dst)) udp_sk_rx_dst_set(sk, dst); - ret = udp_queue_rcv_skb(sk, skb); + ret = udp_unicast_rcv_skb(sk, skb, uh); sock_put(sk); - /* a return value > 0 means to resubmit the input, but - * it wants the return to be -protocol, or 0 - */ - if (ret > 0) - return -ret; - return 0; + return ret; } if (rt->rt_flags & (RTCF_BROADCAST|RTCF_MULTICAST)) @@ -2110,22 +2127,8 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table *udptable, saddr, daddr, udptable, proto); sk = __udp4_lib_lookup_skb(skb, uh->source, uh->dest, udptable); - if (sk) { - int ret; - - if (inet_get_convert_csum(sk) && uh->check && !IS_UDPLITE(sk)) - skb_checksum_try_convert(skb, IPPROTO_UDP, uh->check, - inet_compute_pseudo); - - ret = udp_queue_rcv_skb(sk, skb); - - /* a return value > 0 means to resubmit the input, but - * it wants the return to be -protocol, or 0 - */ - if (ret > 0) - return -ret; - return 0; - } + if (sk) + return udp_unicast_rcv_skb(sk, skb, uh); if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) goto drop; diff --git a/net/ipv6/calipso.c b/net/ipv6/calipso.c index 1323b9679cf718d0023bf5880dcd60fb8602d9db..1c0bb9fb76e61fa7d12317190ebac38847530858 100644 --- a/net/ipv6/calipso.c +++ b/net/ipv6/calipso.c @@ -799,8 +799,7 @@ static int calipso_opt_update(struct sock *sk, struct ipv6_opt_hdr *hop) { struct ipv6_txoptions *old = txopt_get(inet6_sk(sk)), *txopts; - txopts = ipv6_renew_options_kern(sk, old, IPV6_HOPOPTS, - hop, hop ? ipv6_optlen(hop) : 0); + txopts = ipv6_renew_options(sk, old, IPV6_HOPOPTS, hop); txopt_put(old); if (IS_ERR(txopts)) return PTR_ERR(txopts); @@ -1222,8 +1221,7 @@ static int calipso_req_setattr(struct request_sock *req, if (IS_ERR(new)) return PTR_ERR(new); - txopts = ipv6_renew_options_kern(sk, req_inet->ipv6_opt, IPV6_HOPOPTS, - new, new ? ipv6_optlen(new) : 0); + txopts = ipv6_renew_options(sk, req_inet->ipv6_opt, IPV6_HOPOPTS, new); kfree(new); @@ -1260,8 +1258,7 @@ static void calipso_req_delattr(struct request_sock *req) if (calipso_opt_del(req_inet->ipv6_opt->hopopt, &new)) return; /* Nothing to do */ - txopts = ipv6_renew_options_kern(sk, req_inet->ipv6_opt, IPV6_HOPOPTS, - new, new ? ipv6_optlen(new) : 0); + txopts = ipv6_renew_options(sk, req_inet->ipv6_opt, IPV6_HOPOPTS, new); if (!IS_ERR(txopts)) { txopts = xchg(&req_inet->ipv6_opt, txopts); diff --git a/net/ipv6/esp6.c b/net/ipv6/esp6.c index 89910e2c10f4a63bcd285e28820141266f6d056f..f112fef79216a898588ba19af68fb0c0ec9d9d12 100644 --- a/net/ipv6/esp6.c +++ b/net/ipv6/esp6.c @@ -651,8 +651,10 @@ skip_cow: sg_init_table(sg, nfrags); ret = skb_to_sgvec(skb, sg, 0, skb->len); - if (unlikely(ret < 0)) + if (unlikely(ret < 0)) { + kfree(tmp); goto out; + } skb->ip_summed = CHECKSUM_NONE; diff --git a/net/ipv6/exthdrs.c b/net/ipv6/exthdrs.c index d6189c2a35e4f669cfe5b15ba976bd72fd07e958..47a5f8f88c70241c31821b56d5e128c8d28a0a28 100644 --- a/net/ipv6/exthdrs.c +++ b/net/ipv6/exthdrs.c @@ -987,29 +987,21 @@ ipv6_dup_options(struct sock *sk, struct ipv6_txoptions *opt) } EXPORT_SYMBOL_GPL(ipv6_dup_options); -static int ipv6_renew_option(void *ohdr, - struct ipv6_opt_hdr __user *newopt, int newoptlen, - int inherit, - struct ipv6_opt_hdr **hdr, - char **p) +static void ipv6_renew_option(int renewtype, + struct ipv6_opt_hdr **dest, + struct ipv6_opt_hdr *old, + struct ipv6_opt_hdr *new, + int newtype, char **p) { - if (inherit) { - if (ohdr) { - memcpy(*p, ohdr, ipv6_optlen((struct ipv6_opt_hdr *)ohdr)); - *hdr = (struct ipv6_opt_hdr *)*p; - *p += CMSG_ALIGN(ipv6_optlen(*hdr)); - } - } else { - if (newopt) { - if (copy_from_user(*p, newopt, newoptlen)) - return -EFAULT; - *hdr = (struct ipv6_opt_hdr *)*p; - if (ipv6_optlen(*hdr) > newoptlen) - return -EINVAL; - *p += CMSG_ALIGN(newoptlen); - } - } - return 0; + struct ipv6_opt_hdr *src; + + src = (renewtype == newtype ? new : old); + if (!src) + return; + + memcpy(*p, src, ipv6_optlen(src)); + *dest = (struct ipv6_opt_hdr *)*p; + *p += CMSG_ALIGN(ipv6_optlen(*dest)); } /** @@ -1035,13 +1027,11 @@ static int ipv6_renew_option(void *ohdr, */ struct ipv6_txoptions * ipv6_renew_options(struct sock *sk, struct ipv6_txoptions *opt, - int newtype, - struct ipv6_opt_hdr __user *newopt, int newoptlen) + int newtype, struct ipv6_opt_hdr *newopt) { int tot_len = 0; char *p; struct ipv6_txoptions *opt2; - int err; if (opt) { if (newtype != IPV6_HOPOPTS && opt->hopopt) @@ -1054,8 +1044,8 @@ ipv6_renew_options(struct sock *sk, struct ipv6_txoptions *opt, tot_len += CMSG_ALIGN(ipv6_optlen(opt->dst1opt)); } - if (newopt && newoptlen) - tot_len += CMSG_ALIGN(newoptlen); + if (newopt) + tot_len += CMSG_ALIGN(ipv6_optlen(newopt)); if (!tot_len) return NULL; @@ -1070,29 +1060,19 @@ ipv6_renew_options(struct sock *sk, struct ipv6_txoptions *opt, opt2->tot_len = tot_len; p = (char *)(opt2 + 1); - err = ipv6_renew_option(opt ? opt->hopopt : NULL, newopt, newoptlen, - newtype != IPV6_HOPOPTS, - &opt2->hopopt, &p); - if (err) - goto out; - - err = ipv6_renew_option(opt ? opt->dst0opt : NULL, newopt, newoptlen, - newtype != IPV6_RTHDRDSTOPTS, - &opt2->dst0opt, &p); - if (err) - goto out; - - err = ipv6_renew_option(opt ? opt->srcrt : NULL, newopt, newoptlen, - newtype != IPV6_RTHDR, - (struct ipv6_opt_hdr **)&opt2->srcrt, &p); - if (err) - goto out; - - err = ipv6_renew_option(opt ? opt->dst1opt : NULL, newopt, newoptlen, - newtype != IPV6_DSTOPTS, - &opt2->dst1opt, &p); - if (err) - goto out; + ipv6_renew_option(IPV6_HOPOPTS, &opt2->hopopt, + (opt ? opt->hopopt : NULL), + newopt, newtype, &p); + ipv6_renew_option(IPV6_RTHDRDSTOPTS, &opt2->dst0opt, + (opt ? opt->dst0opt : NULL), + newopt, newtype, &p); + ipv6_renew_option(IPV6_RTHDR, + (struct ipv6_opt_hdr **)&opt2->srcrt, + (opt ? (struct ipv6_opt_hdr *)opt->srcrt : NULL), + newopt, newtype, &p); + ipv6_renew_option(IPV6_DSTOPTS, &opt2->dst1opt, + (opt ? opt->dst1opt : NULL), + newopt, newtype, &p); opt2->opt_nflen = (opt2->hopopt ? ipv6_optlen(opt2->hopopt) : 0) + (opt2->dst0opt ? ipv6_optlen(opt2->dst0opt) : 0) + @@ -1100,37 +1080,6 @@ ipv6_renew_options(struct sock *sk, struct ipv6_txoptions *opt, opt2->opt_flen = (opt2->dst1opt ? ipv6_optlen(opt2->dst1opt) : 0); return opt2; -out: - sock_kfree_s(sk, opt2, opt2->tot_len); - return ERR_PTR(err); -} - -/** - * ipv6_renew_options_kern - replace a specific ext hdr with a new one. - * - * @sk: sock from which to allocate memory - * @opt: original options - * @newtype: option type to replace in @opt - * @newopt: new option of type @newtype to replace (kernel-mem) - * @newoptlen: length of @newopt - * - * See ipv6_renew_options(). The difference is that @newopt is - * kernel memory, rather than user memory. - */ -struct ipv6_txoptions * -ipv6_renew_options_kern(struct sock *sk, struct ipv6_txoptions *opt, - int newtype, struct ipv6_opt_hdr *newopt, - int newoptlen) -{ - struct ipv6_txoptions *ret_val; - const mm_segment_t old_fs = get_fs(); - - set_fs(KERNEL_DS); - ret_val = ipv6_renew_options(sk, opt, newtype, - (struct ipv6_opt_hdr __user *)newopt, - newoptlen); - set_fs(old_fs); - return ret_val; } struct ipv6_txoptions *ipv6_fixup_options(struct ipv6_txoptions *opt_space, diff --git a/net/ipv6/ip6_offload.c b/net/ipv6/ip6_offload.c index 4a87f9428ca519b475f8feaceaaa3a225bcfe6d2..e3698b6d82313f00101a3877f8af6a8d2462ba9a 100644 --- a/net/ipv6/ip6_offload.c +++ b/net/ipv6/ip6_offload.c @@ -113,6 +113,7 @@ static struct sk_buff *ipv6_gso_segment(struct sk_buff *skb, payload_len = skb->len - nhoff - sizeof(*ipv6h); ipv6h->payload_len = htons(payload_len); skb->network_header = (u8 *)ipv6h - skb->head; + skb_reset_mac_len(skb); if (udpfrag) { int err = ip6_find_1stfragopt(skb, &prevhdr); diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c index 1da021527fcd196341477067ff6a518756de78e5..9ab1e0fcbc13cec92c59feafedf947564437969c 100644 --- a/net/ipv6/ip6_output.c +++ b/net/ipv6/ip6_output.c @@ -219,12 +219,10 @@ int ip6_xmit(const struct sock *sk, struct sk_buff *skb, struct flowi6 *fl6, kfree_skb(skb); return -ENOBUFS; } + if (skb->sk) + skb_set_owner_w(skb2, skb->sk); consume_skb(skb); skb = skb2; - /* skb_set_owner_w() changes sk->sk_wmem_alloc atomically, - * it is safe to call in our context (socket lock not held) - */ - skb_set_owner_w(skb, (struct sock *)sk); } if (opt->opt_flen) ipv6_push_frag_opts(skb, opt, &proto); diff --git a/net/ipv6/ip6_vti.c b/net/ipv6/ip6_vti.c index 0e0ab90a433494a259333745be9ef1966022f614..db5a24f093352967bfa5f83ba294eab7c3bc86fd 100644 --- a/net/ipv6/ip6_vti.c +++ b/net/ipv6/ip6_vti.c @@ -480,12 +480,8 @@ vti6_xmit(struct sk_buff *skb, struct net_device *dev, struct flowi *fl) goto tx_err_dst_release; } - skb_scrub_packet(skb, !net_eq(t->net, dev_net(dev))); - skb_dst_set(skb, dst); - skb->dev = skb_dst(skb)->dev; - mtu = dst_mtu(dst); - if (!skb->ignore_df && skb->len > mtu) { + if (skb->len > mtu) { skb_dst_update_pmtu(skb, mtu); if (skb->protocol == htons(ETH_P_IPV6)) { @@ -498,9 +494,14 @@ vti6_xmit(struct sk_buff *skb, struct net_device *dev, struct flowi *fl) htonl(mtu)); } - return -EMSGSIZE; + err = -EMSGSIZE; + goto tx_err_dst_release; } + skb_scrub_packet(skb, !net_eq(t->net, dev_net(dev))); + skb_dst_set(skb, dst); + skb->dev = skb_dst(skb)->dev; + err = dst_output(t->net, skb->sk, skb); if (net_xmit_eval(err) == 0) { struct pcpu_sw_netstats *tstats = this_cpu_ptr(dev->tstats); diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c index 1276d5bd567590c7adb92d10ba12a8f49bc875c9..5c91b05c8d8feb3207741a4d9b821df7e5a24cc6 100644 --- a/net/ipv6/ipv6_sockglue.c +++ b/net/ipv6/ipv6_sockglue.c @@ -390,6 +390,12 @@ static int do_ipv6_setsockopt(struct sock *sk, int level, int optname, case IPV6_DSTOPTS: { struct ipv6_txoptions *opt; + struct ipv6_opt_hdr *new = NULL; + + /* hop-by-hop / destination options are privileged option */ + retv = -EPERM; + if (optname != IPV6_RTHDR && !ns_capable(net->user_ns, CAP_NET_RAW)) + break; /* remove any sticky options header with a zero option * length, per RFC3542. @@ -401,17 +407,22 @@ static int do_ipv6_setsockopt(struct sock *sk, int level, int optname, else if (optlen < sizeof(struct ipv6_opt_hdr) || optlen & 0x7 || optlen > 8 * 255) goto e_inval; - - /* hop-by-hop / destination options are privileged option */ - retv = -EPERM; - if (optname != IPV6_RTHDR && !ns_capable(net->user_ns, CAP_NET_RAW)) - break; + else { + new = memdup_user(optval, optlen); + if (IS_ERR(new)) { + retv = PTR_ERR(new); + break; + } + if (unlikely(ipv6_optlen(new) > optlen)) { + kfree(new); + goto e_inval; + } + } opt = rcu_dereference_protected(np->opt, lockdep_sock_is_held(sk)); - opt = ipv6_renew_options(sk, opt, optname, - (struct ipv6_opt_hdr __user *)optval, - optlen); + opt = ipv6_renew_options(sk, opt, optname, new); + kfree(new); if (IS_ERR(opt)) { retv = PTR_ERR(opt); break; diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c index 6fd913d63835761a43e7cf6e0dd348df44732a9f..d112762b4cb8681bec7663bd1513295fcf704ba2 100644 --- a/net/ipv6/mcast.c +++ b/net/ipv6/mcast.c @@ -2083,7 +2083,8 @@ void ipv6_mc_dad_complete(struct inet6_dev *idev) mld_send_initial_cr(idev); idev->mc_dad_count--; if (idev->mc_dad_count) - mld_dad_start_timer(idev, idev->mc_maxdelay); + mld_dad_start_timer(idev, + unsolicited_report_interval(idev)); } } @@ -2095,7 +2096,8 @@ static void mld_dad_timer_expire(unsigned long data) if (idev->mc_dad_count) { idev->mc_dad_count--; if (idev->mc_dad_count) - mld_dad_start_timer(idev, idev->mc_maxdelay); + mld_dad_start_timer(idev, + unsolicited_report_interval(idev)); } in6_dev_put(idev); } @@ -2453,7 +2455,8 @@ static void mld_ifc_timer_expire(unsigned long data) if (idev->mc_ifc_count) { idev->mc_ifc_count--; if (idev->mc_ifc_count) - mld_ifc_start_timer(idev, idev->mc_maxdelay); + mld_ifc_start_timer(idev, + unsolicited_report_interval(idev)); } in6_dev_put(idev); } diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c index 2e51e01569030a3cc7d0837470372728b765c412..90f5bf2502a73f1b6b7c2b1403a01eee7814783f 100644 --- a/net/ipv6/netfilter/ip6_tables.c +++ b/net/ipv6/netfilter/ip6_tables.c @@ -1907,6 +1907,7 @@ static struct xt_match ip6t_builtin_mt[] __read_mostly = { .checkentry = icmp6_checkentry, .proto = IPPROTO_ICMPV6, .family = NFPROTO_IPV6, + .me = THIS_MODULE, }, }; diff --git a/net/ipv6/netfilter/ip6t_rpfilter.c b/net/ipv6/netfilter/ip6t_rpfilter.c index 1c4a5de3f301ae1e1dc85041dcfb9fdbf33c1b22..40eb16bd97860b6a52ea5019b17e2e7c0f8bce03 100644 --- a/net/ipv6/netfilter/ip6t_rpfilter.c +++ b/net/ipv6/netfilter/ip6t_rpfilter.c @@ -26,6 +26,12 @@ static bool rpfilter_addr_unicast(const struct in6_addr *addr) return addr_type & IPV6_ADDR_UNICAST; } +static bool rpfilter_addr_linklocal(const struct in6_addr *addr) +{ + int addr_type = ipv6_addr_type(addr); + return addr_type & IPV6_ADDR_LINKLOCAL; +} + static bool rpfilter_lookup_reverse6(struct net *net, const struct sk_buff *skb, const struct net_device *dev, u8 flags) { @@ -48,7 +54,11 @@ static bool rpfilter_lookup_reverse6(struct net *net, const struct sk_buff *skb, } fl6.flowi6_mark = flags & XT_RPFILTER_VALID_MARK ? skb->mark : 0; - if ((flags & XT_RPFILTER_LOOSE) == 0) + + if (rpfilter_addr_linklocal(&iph->saddr)) { + lookup_flags |= RT6_LOOKUP_F_IFACE; + fl6.flowi6_oif = dev->ifindex; + } else if ((flags & XT_RPFILTER_LOOSE) == 0) fl6.flowi6_oif = dev->ifindex; rt = (void *) ip6_route_lookup(net, &fl6, lookup_flags); diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c index 722a9db8c6a7b786500f29cc8bfe732b1e3e23c5..2ed8536e10b64eb1bf0617a7377dee768c8e0624 100644 --- a/net/ipv6/netfilter/nf_conntrack_reasm.c +++ b/net/ipv6/netfilter/nf_conntrack_reasm.c @@ -63,7 +63,6 @@ struct nf_ct_frag6_skb_cb static struct inet_frags nf_frags; #ifdef CONFIG_SYSCTL -static int zero; static struct ctl_table nf_ct_frag6_sysctl_table[] = { { @@ -76,18 +75,17 @@ static struct ctl_table nf_ct_frag6_sysctl_table[] = { { .procname = "nf_conntrack_frag6_low_thresh", .data = &init_net.nf_frag.frags.low_thresh, - .maxlen = sizeof(unsigned int), + .maxlen = sizeof(unsigned long), .mode = 0644, - .proc_handler = proc_dointvec_minmax, - .extra1 = &zero, + .proc_handler = proc_doulongvec_minmax, .extra2 = &init_net.nf_frag.frags.high_thresh }, { .procname = "nf_conntrack_frag6_high_thresh", .data = &init_net.nf_frag.frags.high_thresh, - .maxlen = sizeof(unsigned int), + .maxlen = sizeof(unsigned long), .mode = 0644, - .proc_handler = proc_dointvec_minmax, + .proc_handler = proc_doulongvec_minmax, .extra1 = &init_net.nf_frag.frags.low_thresh }, { } @@ -117,7 +115,7 @@ static int nf_ct_frag6_sysctl_register(struct net *net) if (hdr == NULL) goto err_reg; - net->nf_frag.sysctl.frags_hdr = hdr; + net->nf_frag_frags_hdr = hdr; return 0; err_reg: @@ -131,8 +129,8 @@ static void __net_exit nf_ct_frags6_sysctl_unregister(struct net *net) { struct ctl_table *table; - table = net->nf_frag.sysctl.frags_hdr->ctl_table_arg; - unregister_net_sysctl_table(net->nf_frag.sysctl.frags_hdr); + table = net->nf_frag_frags_hdr->ctl_table_arg; + unregister_net_sysctl_table(net->nf_frag_frags_hdr); if (!net_eq(net, &init_net)) kfree(table); } @@ -152,59 +150,35 @@ static inline u8 ip6_frag_ecn(const struct ipv6hdr *ipv6h) return 1 << (ipv6_get_dsfield(ipv6h) & INET_ECN_MASK); } -static unsigned int nf_hash_frag(__be32 id, const struct in6_addr *saddr, - const struct in6_addr *daddr) -{ - net_get_random_once(&nf_frags.rnd, sizeof(nf_frags.rnd)); - return jhash_3words(ipv6_addr_hash(saddr), ipv6_addr_hash(daddr), - (__force u32)id, nf_frags.rnd); -} - - -static unsigned int nf_hashfn(const struct inet_frag_queue *q) -{ - const struct frag_queue *nq; - - nq = container_of(q, struct frag_queue, q); - return nf_hash_frag(nq->id, &nq->saddr, &nq->daddr); -} - -static void nf_ct_frag6_expire(unsigned long data) +static void nf_ct_frag6_expire(struct timer_list *t) { + struct inet_frag_queue *frag = from_timer(frag, t, timer); struct frag_queue *fq; struct net *net; - fq = container_of((struct inet_frag_queue *)data, struct frag_queue, q); + fq = container_of(frag, struct frag_queue, q); net = container_of(fq->q.net, struct net, nf_frag.frags); - ip6_expire_frag_queue(net, fq, &nf_frags); + ip6_expire_frag_queue(net, fq); } /* Creation primitives. */ -static inline struct frag_queue *fq_find(struct net *net, __be32 id, - u32 user, struct in6_addr *src, - struct in6_addr *dst, int iif, u8 ecn) +static struct frag_queue *fq_find(struct net *net, __be32 id, u32 user, + const struct ipv6hdr *hdr, int iif) { + struct frag_v6_compare_key key = { + .id = id, + .saddr = hdr->saddr, + .daddr = hdr->daddr, + .user = user, + .iif = iif, + }; struct inet_frag_queue *q; - struct ip6_create_arg arg; - unsigned int hash; - - arg.id = id; - arg.user = user; - arg.src = src; - arg.dst = dst; - arg.iif = iif; - arg.ecn = ecn; - - local_bh_disable(); - hash = nf_hash_frag(id, src, dst); - - q = inet_frag_find(&net->nf_frag.frags, &nf_frags, &arg, hash); - local_bh_enable(); - if (IS_ERR_OR_NULL(q)) { - inet_frag_maybe_warn_overflow(q, pr_fmt()); + + q = inet_frag_find(&net->nf_frag.frags, &key); + if (!q) return NULL; - } + return container_of(q, struct frag_queue, q); } @@ -263,7 +237,7 @@ static int nf_ct_frag6_queue(struct frag_queue *fq, struct sk_buff *skb, * this case. -DaveM */ pr_debug("end of fragment not rounded to 8 bytes.\n"); - inet_frag_kill(&fq->q, &nf_frags); + inet_frag_kill(&fq->q); return -EPROTO; } if (end > fq->q.len) { @@ -356,7 +330,7 @@ found: return 0; discard_fq: - inet_frag_kill(&fq->q, &nf_frags); + inet_frag_kill(&fq->q); err: return -EINVAL; } @@ -378,7 +352,7 @@ nf_ct_frag6_reasm(struct frag_queue *fq, struct sk_buff *prev, struct net_devic int payload_len; u8 ecn; - inet_frag_kill(&fq->q, &nf_frags); + inet_frag_kill(&fq->q); WARN_ON(head == NULL); WARN_ON(NFCT_FRAG6_CB(head)->offset != 0); @@ -479,6 +453,7 @@ nf_ct_frag6_reasm(struct frag_queue *fq, struct sk_buff *prev, struct net_devic else if (head->ip_summed == CHECKSUM_COMPLETE) head->csum = csum_add(head->csum, fp->csum); head->truesize += fp->truesize; + fp->sk = NULL; } sub_frag_mem_limit(fq->q.net, head->truesize); @@ -497,6 +472,7 @@ nf_ct_frag6_reasm(struct frag_queue *fq, struct sk_buff *prev, struct net_devic head->csum); fq->q.fragments = NULL; + fq->q.rb_fragments = RB_ROOT; fq->q.fragments_tail = NULL; return true; @@ -591,9 +567,13 @@ int nf_ct_frag6_gather(struct net *net, struct sk_buff *skb, u32 user) hdr = ipv6_hdr(skb); fhdr = (struct frag_hdr *)skb_transport_header(skb); + if (skb->len - skb_network_offset(skb) < IPV6_MIN_MTU && + fhdr->frag_off & htons(IP6_MF)) + return -EINVAL; + skb_orphan(skb); - fq = fq_find(net, fhdr->identification, user, &hdr->saddr, &hdr->daddr, - skb->dev ? skb->dev->ifindex : 0, ip6_frag_ecn(hdr)); + fq = fq_find(net, fhdr->identification, user, hdr, + skb->dev ? skb->dev->ifindex : 0); if (fq == NULL) { pr_debug("Can't find and can't create new queue\n"); return -ENOMEM; @@ -623,25 +603,33 @@ int nf_ct_frag6_gather(struct net *net, struct sk_buff *skb, u32 user) out_unlock: spin_unlock_bh(&fq->q.lock); - inet_frag_put(&fq->q, &nf_frags); + inet_frag_put(&fq->q); return ret; } EXPORT_SYMBOL_GPL(nf_ct_frag6_gather); static int nf_ct_net_init(struct net *net) { + int res; + net->nf_frag.frags.high_thresh = IPV6_FRAG_HIGH_THRESH; net->nf_frag.frags.low_thresh = IPV6_FRAG_LOW_THRESH; net->nf_frag.frags.timeout = IPV6_FRAG_TIMEOUT; - inet_frags_init_net(&net->nf_frag.frags); - - return nf_ct_frag6_sysctl_register(net); + net->nf_frag.frags.f = &nf_frags; + + res = inet_frags_init_net(&net->nf_frag.frags); + if (res < 0) + return res; + res = nf_ct_frag6_sysctl_register(net); + if (res < 0) + inet_frags_exit_net(&net->nf_frag.frags); + return res; } static void nf_ct_net_exit(struct net *net) { nf_ct_frags6_sysctl_unregister(net); - inet_frags_exit_net(&net->nf_frag.frags, &nf_frags); + inet_frags_exit_net(&net->nf_frag.frags); } static struct pernet_operations nf_ct_net_ops = { @@ -653,13 +641,12 @@ int nf_ct_frag6_init(void) { int ret = 0; - nf_frags.hashfn = nf_hashfn; nf_frags.constructor = ip6_frag_init; nf_frags.destructor = NULL; nf_frags.qsize = sizeof(struct frag_queue); - nf_frags.match = ip6_frag_match; nf_frags.frag_expire = nf_ct_frag6_expire; nf_frags.frags_cache_name = nf_frags_cache_name; + nf_frags.rhash_params = ip6_rhash_params; ret = inet_frags_init(&nf_frags); if (ret) goto out; diff --git a/net/ipv6/proc.c b/net/ipv6/proc.c index e88bcb8ff0fd73b8377f5e0d3c3d1fa524ef1da8..dc04c024986ce1c02db0065b2f3ad649e1c9c7e4 100644 --- a/net/ipv6/proc.c +++ b/net/ipv6/proc.c @@ -38,7 +38,6 @@ static int sockstat6_seq_show(struct seq_file *seq, void *v) { struct net *net = seq->private; - unsigned int frag_mem = ip6_frag_mem(net); seq_printf(seq, "TCP6: inuse %d\n", sock_prot_inuse_get(net, &tcpv6_prot)); @@ -48,7 +47,9 @@ static int sockstat6_seq_show(struct seq_file *seq, void *v) sock_prot_inuse_get(net, &udplitev6_prot)); seq_printf(seq, "RAW6: inuse %d\n", sock_prot_inuse_get(net, &rawv6_prot)); - seq_printf(seq, "FRAG6: inuse %u memory %u\n", !!frag_mem, frag_mem); + seq_printf(seq, "FRAG6: inuse %u memory %lu\n", + atomic_read(&net->ipv6.frags.rhashtable.nelems), + frag_mem_limit(&net->ipv6.frags)); return 0; } diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c index 846012eae52680e00108deb65c356f5024f94552..ede0061b6f5dfacf7b25a868725213e9d43e5323 100644 --- a/net/ipv6/reassembly.c +++ b/net/ipv6/reassembly.c @@ -79,130 +79,93 @@ static struct inet_frags ip6_frags; static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev, struct net_device *dev); -/* - * callers should be careful not to use the hash value outside the ipfrag_lock - * as doing so could race with ipfrag_hash_rnd being recalculated. - */ -static unsigned int inet6_hash_frag(__be32 id, const struct in6_addr *saddr, - const struct in6_addr *daddr) -{ - net_get_random_once(&ip6_frags.rnd, sizeof(ip6_frags.rnd)); - return jhash_3words(ipv6_addr_hash(saddr), ipv6_addr_hash(daddr), - (__force u32)id, ip6_frags.rnd); -} - -static unsigned int ip6_hashfn(const struct inet_frag_queue *q) -{ - const struct frag_queue *fq; - - fq = container_of(q, struct frag_queue, q); - return inet6_hash_frag(fq->id, &fq->saddr, &fq->daddr); -} - -bool ip6_frag_match(const struct inet_frag_queue *q, const void *a) -{ - const struct frag_queue *fq; - const struct ip6_create_arg *arg = a; - - fq = container_of(q, struct frag_queue, q); - return fq->id == arg->id && - fq->user == arg->user && - ipv6_addr_equal(&fq->saddr, arg->src) && - ipv6_addr_equal(&fq->daddr, arg->dst) && - (arg->iif == fq->iif || - !(ipv6_addr_type(arg->dst) & (IPV6_ADDR_MULTICAST | - IPV6_ADDR_LINKLOCAL))); -} -EXPORT_SYMBOL(ip6_frag_match); - void ip6_frag_init(struct inet_frag_queue *q, const void *a) { struct frag_queue *fq = container_of(q, struct frag_queue, q); - const struct ip6_create_arg *arg = a; + const struct frag_v6_compare_key *key = a; - fq->id = arg->id; - fq->user = arg->user; - fq->saddr = *arg->src; - fq->daddr = *arg->dst; - fq->ecn = arg->ecn; + q->key.v6 = *key; + fq->ecn = 0; } EXPORT_SYMBOL(ip6_frag_init); -void ip6_expire_frag_queue(struct net *net, struct frag_queue *fq, - struct inet_frags *frags) +void ip6_expire_frag_queue(struct net *net, struct frag_queue *fq) { struct net_device *dev = NULL; + struct sk_buff *head; + rcu_read_lock(); spin_lock(&fq->q.lock); if (fq->q.flags & INET_FRAG_COMPLETE) goto out; - inet_frag_kill(&fq->q, frags); + inet_frag_kill(&fq->q); - rcu_read_lock(); dev = dev_get_by_index_rcu(net, fq->iif); if (!dev) - goto out_rcu_unlock; + goto out; __IP6_INC_STATS(net, __in6_dev_get(dev), IPSTATS_MIB_REASMFAILS); - - if (inet_frag_evicting(&fq->q)) - goto out_rcu_unlock; - __IP6_INC_STATS(net, __in6_dev_get(dev), IPSTATS_MIB_REASMTIMEOUT); /* Don't send error if the first segment did not arrive. */ - if (!(fq->q.flags & INET_FRAG_FIRST_IN) || !fq->q.fragments) - goto out_rcu_unlock; + head = fq->q.fragments; + if (!(fq->q.flags & INET_FRAG_FIRST_IN) || !head) + goto out; /* But use as source device on which LAST ARRIVED * segment was received. And do not use fq->dev * pointer directly, device might already disappeared. */ - fq->q.fragments->dev = dev; - icmpv6_send(fq->q.fragments, ICMPV6_TIME_EXCEED, ICMPV6_EXC_FRAGTIME, 0); -out_rcu_unlock: - rcu_read_unlock(); + head->dev = dev; + skb_get(head); + spin_unlock(&fq->q.lock); + + icmpv6_send(head, ICMPV6_TIME_EXCEED, ICMPV6_EXC_FRAGTIME, 0); + kfree_skb(head); + goto out_rcu_unlock; + out: spin_unlock(&fq->q.lock); - inet_frag_put(&fq->q, frags); +out_rcu_unlock: + rcu_read_unlock(); + inet_frag_put(&fq->q); } EXPORT_SYMBOL(ip6_expire_frag_queue); -static void ip6_frag_expire(unsigned long data) +static void ip6_frag_expire(struct timer_list *t) { + struct inet_frag_queue *frag = from_timer(frag, t, timer); struct frag_queue *fq; struct net *net; - fq = container_of((struct inet_frag_queue *)data, struct frag_queue, q); + fq = container_of(frag, struct frag_queue, q); net = container_of(fq->q.net, struct net, ipv6.frags); - ip6_expire_frag_queue(net, fq, &ip6_frags); + ip6_expire_frag_queue(net, fq); } static struct frag_queue * -fq_find(struct net *net, __be32 id, const struct in6_addr *src, - const struct in6_addr *dst, int iif, u8 ecn) +fq_find(struct net *net, __be32 id, const struct ipv6hdr *hdr, int iif) { + struct frag_v6_compare_key key = { + .id = id, + .saddr = hdr->saddr, + .daddr = hdr->daddr, + .user = IP6_DEFRAG_LOCAL_DELIVER, + .iif = iif, + }; struct inet_frag_queue *q; - struct ip6_create_arg arg; - unsigned int hash; - arg.id = id; - arg.user = IP6_DEFRAG_LOCAL_DELIVER; - arg.src = src; - arg.dst = dst; - arg.iif = iif; - arg.ecn = ecn; + if (!(ipv6_addr_type(&hdr->daddr) & (IPV6_ADDR_MULTICAST | + IPV6_ADDR_LINKLOCAL))) + key.iif = 0; - hash = inet6_hash_frag(id, src, dst); - - q = inet_frag_find(&net->ipv6.frags, &ip6_frags, &arg, hash); - if (IS_ERR_OR_NULL(q)) { - inet_frag_maybe_warn_overflow(q, pr_fmt()); + q = inet_frag_find(&net->ipv6.frags, &key); + if (!q) return NULL; - } + return container_of(q, struct frag_queue, q); } @@ -363,7 +326,7 @@ found: return -1; discard_fq: - inet_frag_kill(&fq->q, &ip6_frags); + inet_frag_kill(&fq->q); err: __IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_REASMFAILS); @@ -390,7 +353,7 @@ static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev, int sum_truesize; u8 ecn; - inet_frag_kill(&fq->q, &ip6_frags); + inet_frag_kill(&fq->q); ecn = ip_frag_ecn_table[fq->ecn]; if (unlikely(ecn == 0xff)) @@ -509,6 +472,7 @@ static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev, __IP6_INC_STATS(net, __in6_dev_get(dev), IPSTATS_MIB_REASMOKS); rcu_read_unlock(); fq->q.fragments = NULL; + fq->q.rb_fragments = RB_ROOT; fq->q.fragments_tail = NULL; return 1; @@ -530,6 +494,7 @@ static int ipv6_frag_rcv(struct sk_buff *skb) struct frag_queue *fq; const struct ipv6hdr *hdr = ipv6_hdr(skb); struct net *net = dev_net(skb_dst(skb)->dev); + int iif; if (IP6CB(skb)->flags & IP6SKB_FRAGMENTED) goto fail_hdr; @@ -558,17 +523,22 @@ static int ipv6_frag_rcv(struct sk_buff *skb) return 1; } - fq = fq_find(net, fhdr->identification, &hdr->saddr, &hdr->daddr, - skb->dev ? skb->dev->ifindex : 0, ip6_frag_ecn(hdr)); + if (skb->len - skb_network_offset(skb) < IPV6_MIN_MTU && + fhdr->frag_off & htons(IP6_MF)) + goto fail_hdr; + + iif = skb->dev ? skb->dev->ifindex : 0; + fq = fq_find(net, fhdr->identification, hdr, iif); if (fq) { int ret; spin_lock(&fq->q.lock); + fq->iif = iif; ret = ip6_frag_queue(fq, skb, fhdr, IP6CB(skb)->nhoff); spin_unlock(&fq->q.lock); - inet_frag_put(&fq->q, &ip6_frags); + inet_frag_put(&fq->q); return ret; } @@ -589,24 +559,22 @@ static const struct inet6_protocol frag_protocol = { }; #ifdef CONFIG_SYSCTL -static int zero; static struct ctl_table ip6_frags_ns_ctl_table[] = { { .procname = "ip6frag_high_thresh", .data = &init_net.ipv6.frags.high_thresh, - .maxlen = sizeof(int), + .maxlen = sizeof(unsigned long), .mode = 0644, - .proc_handler = proc_dointvec_minmax, + .proc_handler = proc_doulongvec_minmax, .extra1 = &init_net.ipv6.frags.low_thresh }, { .procname = "ip6frag_low_thresh", .data = &init_net.ipv6.frags.low_thresh, - .maxlen = sizeof(int), + .maxlen = sizeof(unsigned long), .mode = 0644, - .proc_handler = proc_dointvec_minmax, - .extra1 = &zero, + .proc_handler = proc_doulongvec_minmax, .extra2 = &init_net.ipv6.frags.high_thresh }, { @@ -649,10 +617,6 @@ static int __net_init ip6_frags_ns_sysctl_register(struct net *net) table[1].data = &net->ipv6.frags.low_thresh; table[1].extra2 = &net->ipv6.frags.high_thresh; table[2].data = &net->ipv6.frags.timeout; - - /* Don't export sysctls to unprivileged users */ - if (net->user_ns != &init_user_ns) - table[0].procname = NULL; } hdr = register_net_sysctl(net, "net/ipv6", table); @@ -714,19 +678,27 @@ static void ip6_frags_sysctl_unregister(void) static int __net_init ipv6_frags_init_net(struct net *net) { + int res; + net->ipv6.frags.high_thresh = IPV6_FRAG_HIGH_THRESH; net->ipv6.frags.low_thresh = IPV6_FRAG_LOW_THRESH; net->ipv6.frags.timeout = IPV6_FRAG_TIMEOUT; + net->ipv6.frags.f = &ip6_frags; - inet_frags_init_net(&net->ipv6.frags); + res = inet_frags_init_net(&net->ipv6.frags); + if (res < 0) + return res; - return ip6_frags_ns_sysctl_register(net); + res = ip6_frags_ns_sysctl_register(net); + if (res < 0) + inet_frags_exit_net(&net->ipv6.frags); + return res; } static void __net_exit ipv6_frags_exit_net(struct net *net) { ip6_frags_ns_sysctl_unregister(net); - inet_frags_exit_net(&net->ipv6.frags, &ip6_frags); + inet_frags_exit_net(&net->ipv6.frags); } static struct pernet_operations ip6_frags_ops = { @@ -734,14 +706,55 @@ static struct pernet_operations ip6_frags_ops = { .exit = ipv6_frags_exit_net, }; +static u32 ip6_key_hashfn(const void *data, u32 len, u32 seed) +{ + return jhash2(data, + sizeof(struct frag_v6_compare_key) / sizeof(u32), seed); +} + +static u32 ip6_obj_hashfn(const void *data, u32 len, u32 seed) +{ + const struct inet_frag_queue *fq = data; + + return jhash2((const u32 *)&fq->key.v6, + sizeof(struct frag_v6_compare_key) / sizeof(u32), seed); +} + +static int ip6_obj_cmpfn(struct rhashtable_compare_arg *arg, const void *ptr) +{ + const struct frag_v6_compare_key *key = arg->key; + const struct inet_frag_queue *fq = ptr; + + return !!memcmp(&fq->key, key, sizeof(*key)); +} + +const struct rhashtable_params ip6_rhash_params = { + .head_offset = offsetof(struct inet_frag_queue, node), + .hashfn = ip6_key_hashfn, + .obj_hashfn = ip6_obj_hashfn, + .obj_cmpfn = ip6_obj_cmpfn, + .automatic_shrinking = true, +}; +EXPORT_SYMBOL(ip6_rhash_params); + int __init ipv6_frag_init(void) { int ret; - ret = inet6_add_protocol(&frag_protocol, IPPROTO_FRAGMENT); + ip6_frags.constructor = ip6_frag_init; + ip6_frags.destructor = NULL; + ip6_frags.qsize = sizeof(struct frag_queue); + ip6_frags.frag_expire = ip6_frag_expire; + ip6_frags.frags_cache_name = ip6_frag_cache_name; + ip6_frags.rhash_params = ip6_rhash_params; + ret = inet_frags_init(&ip6_frags); if (ret) goto out; + ret = inet6_add_protocol(&frag_protocol, IPPROTO_FRAGMENT); + if (ret) + goto err_protocol; + ret = ip6_frags_sysctl_register(); if (ret) goto err_sysctl; @@ -750,16 +763,6 @@ int __init ipv6_frag_init(void) if (ret) goto err_pernet; - ip6_frags.hashfn = ip6_hashfn; - ip6_frags.constructor = ip6_frag_init; - ip6_frags.destructor = NULL; - ip6_frags.qsize = sizeof(struct frag_queue); - ip6_frags.match = ip6_frag_match; - ip6_frags.frag_expire = ip6_frag_expire; - ip6_frags.frags_cache_name = ip6_frag_cache_name; - ret = inet_frags_init(&ip6_frags); - if (ret) - goto err_pernet; out: return ret; @@ -767,6 +770,8 @@ err_pernet: ip6_frags_sysctl_unregister(); err_sysctl: inet6_del_protocol(&frag_protocol, IPPROTO_FRAGMENT); +err_protocol: + inet_frags_fini(&ip6_frags); goto out; } diff --git a/net/ipv6/route.c b/net/ipv6/route.c index 60efd326014bcc9acbda5081b950dc5b0c8f03f8..30204bc2fc480aebb52b2f25a43cca1a7ffc8524 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c @@ -3239,11 +3239,16 @@ static int ip6_route_multipath_add(struct fib6_config *cfg, err_nh = NULL; list_for_each_entry(nh, &rt6_nh_list, next) { - rt_last = nh->rt6_info; err = __ip6_ins_rt(nh->rt6_info, info, &nh->mxc, extack); - /* save reference to first route for notification */ - if (!rt_notif && !err) - rt_notif = nh->rt6_info; + + if (!err) { + /* save reference to last route successfully inserted */ + rt_last = nh->rt6_info; + + /* save reference to first route for notification */ + if (!rt_notif) + rt_notif = nh->rt6_info; + } /* nh->rt6_info is used or freed at this point, reset to NULL*/ nh->rt6_info = NULL; diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c index 330d5ea8451b53c0b5b049f47c473e041f9ad24a..5cee941ab0a9c81b316247fb7f43adf445f328f2 100644 --- a/net/ipv6/udp.c +++ b/net/ipv6/udp.c @@ -780,6 +780,28 @@ static void udp6_sk_rx_dst_set(struct sock *sk, struct dst_entry *dst) } } +/* wrapper for udp_queue_rcv_skb tacking care of csum conversion and + * return code conversion for ip layer consumption + */ +static int udp6_unicast_rcv_skb(struct sock *sk, struct sk_buff *skb, + struct udphdr *uh) +{ + int ret; + + if (inet_get_convert_csum(sk) && uh->check && !IS_UDPLITE(sk)) + skb_checksum_try_convert(skb, IPPROTO_UDP, uh->check, + ip6_compute_pseudo); + + ret = udpv6_queue_rcv_skb(sk, skb); + + /* a return value > 0 means to resubmit the input, but + * it wants the return to be -protocol, or 0 + */ + if (ret > 0) + return -ret; + return 0; +} + int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable, int proto) { @@ -831,13 +853,14 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable, if (unlikely(sk->sk_rx_dst != dst)) udp6_sk_rx_dst_set(sk, dst); - ret = udpv6_queue_rcv_skb(sk, skb); - sock_put(sk); + if (!uh->check && !udp_sk(sk)->no_check6_rx) { + sock_put(sk); + goto report_csum_error; + } - /* a return value > 0 means to resubmit the input */ - if (ret > 0) - return ret; - return 0; + ret = udp6_unicast_rcv_skb(sk, skb, uh); + sock_put(sk); + return ret; } /* @@ -850,30 +873,13 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable, /* Unicast */ sk = __udp6_lib_lookup_skb(skb, uh->source, uh->dest, udptable); if (sk) { - int ret; - - if (!uh->check && !udp_sk(sk)->no_check6_rx) { - udp6_csum_zero_error(skb); - goto csum_error; - } - - if (inet_get_convert_csum(sk) && uh->check && !IS_UDPLITE(sk)) - skb_checksum_try_convert(skb, IPPROTO_UDP, uh->check, - ip6_compute_pseudo); - - ret = udpv6_queue_rcv_skb(sk, skb); - - /* a return value > 0 means to resubmit the input */ - if (ret > 0) - return ret; - - return 0; + if (!uh->check && !udp_sk(sk)->no_check6_rx) + goto report_csum_error; + return udp6_unicast_rcv_skb(sk, skb, uh); } - if (!uh->check) { - udp6_csum_zero_error(skb); - goto csum_error; - } + if (!uh->check) + goto report_csum_error; if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) goto discard; @@ -894,6 +900,9 @@ short_packet: ulen, skb->len, daddr, ntohs(uh->dest)); goto discard; + +report_csum_error: + udp6_csum_zero_error(skb); csum_error: __UDP6_INC_STATS(net, UDP_MIB_CSUMERRORS, proto == IPPROTO_UDPLITE); discard: diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c index 288640471c2fa427fc19c719f0a8cfe4279e9fad..b456b882a6ea5f994166ca3d3a6162e33679bf53 100644 --- a/net/mac80211/cfg.c +++ b/net/mac80211/cfg.c @@ -494,7 +494,7 @@ static int ieee80211_del_key(struct wiphy *wiphy, struct net_device *dev, goto out_unlock; } - ieee80211_key_free(key, true); + ieee80211_key_free(key, sdata->vif.type == NL80211_IFTYPE_STATION); ret = 0; out_unlock: diff --git a/net/mac80211/key.c b/net/mac80211/key.c index 938049395f9073169035ded5f506cb0c192c41f7..fa10c61422440b8faaa5f37fb327490cbb43d60f 100644 --- a/net/mac80211/key.c +++ b/net/mac80211/key.c @@ -649,11 +649,15 @@ int ieee80211_key_link(struct ieee80211_key *key, { struct ieee80211_local *local = sdata->local; struct ieee80211_key *old_key; - int idx, ret; - bool pairwise; - - pairwise = key->conf.flags & IEEE80211_KEY_FLAG_PAIRWISE; - idx = key->conf.keyidx; + int idx = key->conf.keyidx; + bool pairwise = key->conf.flags & IEEE80211_KEY_FLAG_PAIRWISE; + /* + * We want to delay tailroom updates only for station - in that + * case it helps roaming speed, but in other cases it hurts and + * can cause warnings to appear. + */ + bool delay_tailroom = sdata->vif.type == NL80211_IFTYPE_STATION; + int ret; mutex_lock(&sdata->local->key_mtx); @@ -681,14 +685,14 @@ int ieee80211_key_link(struct ieee80211_key *key, increment_tailroom_need_count(sdata); ieee80211_key_replace(sdata, sta, pairwise, old_key, key); - ieee80211_key_destroy(old_key, true); + ieee80211_key_destroy(old_key, delay_tailroom); ieee80211_debugfs_key_add(key); if (!local->wowlan) { ret = ieee80211_key_enable_hw_accel(key); if (ret) - ieee80211_key_free(key, true); + ieee80211_key_free(key, delay_tailroom); } else { ret = 0; } @@ -923,7 +927,8 @@ void ieee80211_free_sta_keys(struct ieee80211_local *local, ieee80211_key_replace(key->sdata, key->sta, key->conf.flags & IEEE80211_KEY_FLAG_PAIRWISE, key, NULL); - __ieee80211_key_destroy(key, true); + __ieee80211_key_destroy(key, key->sdata->vif.type == + NL80211_IFTYPE_STATION); } for (i = 0; i < NUM_DEFAULT_KEYS; i++) { @@ -933,7 +938,8 @@ void ieee80211_free_sta_keys(struct ieee80211_local *local, ieee80211_key_replace(key->sdata, key->sta, key->conf.flags & IEEE80211_KEY_FLAG_PAIRWISE, key, NULL); - __ieee80211_key_destroy(key, true); + __ieee80211_key_destroy(key, key->sdata->vif.type == + NL80211_IFTYPE_STATION); } mutex_unlock(&local->key_mtx); diff --git a/net/mac80211/util.c b/net/mac80211/util.c index 6aef6793d05236c19afe00a51f4ddc91bc060a39..81f120466c38b9971a1cf7b02ae4596dfa4ac40e 100644 --- a/net/mac80211/util.c +++ b/net/mac80211/util.c @@ -2068,7 +2068,8 @@ int ieee80211_reconfig(struct ieee80211_local *local) if (!sta->uploaded) continue; - if (sta->sdata->vif.type != NL80211_IFTYPE_AP) + if (sta->sdata->vif.type != NL80211_IFTYPE_AP && + sta->sdata->vif.type != NL80211_IFTYPE_AP_VLAN) continue; for (state = IEEE80211_STA_NOTEXIST; diff --git a/net/mac802154/tx.c b/net/mac802154/tx.c index 7e253455f9dd0ed995186ff3482b39e8679aa75a..bcd1a5e6ebf4202440d51b3d2e2c3eadaae27674 100644 --- a/net/mac802154/tx.c +++ b/net/mac802154/tx.c @@ -63,8 +63,21 @@ ieee802154_tx(struct ieee802154_local *local, struct sk_buff *skb) int ret; if (!(local->hw.flags & IEEE802154_HW_TX_OMIT_CKSUM)) { - u16 crc = crc_ccitt(0, skb->data, skb->len); + struct sk_buff *nskb; + u16 crc; + + if (unlikely(skb_tailroom(skb) < IEEE802154_FCS_LEN)) { + nskb = skb_copy_expand(skb, 0, IEEE802154_FCS_LEN, + GFP_ATOMIC); + if (likely(nskb)) { + consume_skb(skb); + skb = nskb; + } else { + goto err_tx; + } + } + crc = crc_ccitt(0, skb->data, skb->len); put_unaligned_le16(crc, skb_put(skb, 2)); } diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c index 5cb7cac9177d8bb2b2a37e887aed7a8b7378980c..1bd53b1e7672331084222fd263d4aa8222c05971 100644 --- a/net/netfilter/ipvs/ip_vs_core.c +++ b/net/netfilter/ipvs/ip_vs_core.c @@ -1960,13 +1960,20 @@ ip_vs_in(struct netns_ipvs *ipvs, unsigned int hooknum, struct sk_buff *skb, int if (cp->dest && !(cp->dest->flags & IP_VS_DEST_F_AVAILABLE)) { /* the destination server is not available */ - if (sysctl_expire_nodest_conn(ipvs)) { + __u32 flags = cp->flags; + + /* when timer already started, silently drop the packet.*/ + if (timer_pending(&cp->timer)) + __ip_vs_conn_put(cp); + else + ip_vs_conn_put(cp); + + if (sysctl_expire_nodest_conn(ipvs) && + !(flags & IP_VS_CONN_F_ONE_PACKET)) { /* try to expire the connection immediately */ ip_vs_conn_expire_now(cp); } - /* don't restart its timer, and silently - drop the packet. */ - __ip_vs_conn_put(cp); + return NF_DROP; } diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c index 01130392b7c0f536dc5afb74ff24d5b49cbd9f8a..a268acc48af05acb19660958346a44f56df051b2 100644 --- a/net/netfilter/nf_conntrack_core.c +++ b/net/netfilter/nf_conntrack_core.c @@ -1949,7 +1949,7 @@ int nf_conntrack_set_hashsize(const char *val, struct kernel_param *kp) return -EOPNOTSUPP; /* On boot, we can set this without any fancy locking. */ - if (!nf_conntrack_htable_size) + if (!nf_conntrack_hash) return param_set_uint(val, kp); rc = kstrtouint(val, 0, &hashsize); diff --git a/net/netfilter/nf_conntrack_helper.c b/net/netfilter/nf_conntrack_helper.c index 551a1eddf0fab75eccf803b9711e069e61e60d5d..a75b11c393128d79107fc447c5109b7d0a786ea5 100644 --- a/net/netfilter/nf_conntrack_helper.c +++ b/net/netfilter/nf_conntrack_helper.c @@ -465,6 +465,11 @@ void nf_conntrack_helper_unregister(struct nf_conntrack_helper *me) nf_ct_expect_iterate_destroy(expect_iter_me, NULL); nf_ct_iterate_destroy(unhelp, me); + + /* Maybe someone has gotten the helper already when unhelp above. + * So need to wait it. + */ + synchronize_rcu(); } EXPORT_SYMBOL_GPL(nf_conntrack_helper_unregister); diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c index de4053d84364b245e1593f7fa7d1d3cbff2de4fe..48dab1403b2c76c2e204bb76f1845efc3a44e82d 100644 --- a/net/netfilter/nf_conntrack_netlink.c +++ b/net/netfilter/nf_conntrack_netlink.c @@ -788,6 +788,21 @@ ctnetlink_alloc_filter(const struct nlattr * const cda[]) #endif } +static int ctnetlink_start(struct netlink_callback *cb) +{ + const struct nlattr * const *cda = cb->data; + struct ctnetlink_filter *filter = NULL; + + if (cda[CTA_MARK] && cda[CTA_MARK_MASK]) { + filter = ctnetlink_alloc_filter(cda); + if (IS_ERR(filter)) + return PTR_ERR(filter); + } + + cb->data = filter; + return 0; +} + static int ctnetlink_filter_match(struct nf_conn *ct, void *data) { struct ctnetlink_filter *filter = data; @@ -1194,19 +1209,12 @@ static int ctnetlink_get_conntrack(struct net *net, struct sock *ctnl, if (nlh->nlmsg_flags & NLM_F_DUMP) { struct netlink_dump_control c = { + .start = ctnetlink_start, .dump = ctnetlink_dump_table, .done = ctnetlink_done, + .data = (void *)cda, }; - if (cda[CTA_MARK] && cda[CTA_MARK_MASK]) { - struct ctnetlink_filter *filter; - - filter = ctnetlink_alloc_filter(cda); - if (IS_ERR(filter)) - return PTR_ERR(filter); - - c.data = filter; - } return netlink_dump_start(ctnl, skb, nlh, &c); } diff --git a/net/netfilter/nf_conntrack_proto_dccp.c b/net/netfilter/nf_conntrack_proto_dccp.c index 0f5a4d79f6b85637277371f1705c04b9235f2324..812de5496b376f8425f3c4a362dee9066841e9b1 100644 --- a/net/netfilter/nf_conntrack_proto_dccp.c +++ b/net/netfilter/nf_conntrack_proto_dccp.c @@ -243,14 +243,14 @@ dccp_state_table[CT_DCCP_ROLE_MAX + 1][DCCP_PKT_SYNCACK + 1][CT_DCCP_MAX + 1] = * We currently ignore Sync packets * * sNO, sRQ, sRS, sPO, sOP, sCR, sCG, sTW */ - sIG, sIG, sIG, sIG, sIG, sIG, sIG, sIG, + sIV, sIG, sIG, sIG, sIG, sIG, sIG, sIG, }, [DCCP_PKT_SYNCACK] = { /* * We currently ignore SyncAck packets * * sNO, sRQ, sRS, sPO, sOP, sCR, sCG, sTW */ - sIG, sIG, sIG, sIG, sIG, sIG, sIG, sIG, + sIV, sIG, sIG, sIG, sIG, sIG, sIG, sIG, }, }, [CT_DCCP_ROLE_SERVER] = { @@ -371,14 +371,14 @@ dccp_state_table[CT_DCCP_ROLE_MAX + 1][DCCP_PKT_SYNCACK + 1][CT_DCCP_MAX + 1] = * We currently ignore Sync packets * * sNO, sRQ, sRS, sPO, sOP, sCR, sCG, sTW */ - sIG, sIG, sIG, sIG, sIG, sIG, sIG, sIG, + sIV, sIG, sIG, sIG, sIG, sIG, sIG, sIG, }, [DCCP_PKT_SYNCACK] = { /* * We currently ignore SyncAck packets * * sNO, sRQ, sRS, sPO, sOP, sCR, sCG, sTW */ - sIG, sIG, sIG, sIG, sIG, sIG, sIG, sIG, + sIV, sIG, sIG, sIG, sIG, sIG, sIG, sIG, }, }, }; diff --git a/net/netfilter/nf_log.c b/net/netfilter/nf_log.c index 276324abfa604dfcdca6a864daf67958f66b907f..cdc744aa58893cfa80169cc368669ed51805eba3 100644 --- a/net/netfilter/nf_log.c +++ b/net/netfilter/nf_log.c @@ -440,6 +440,10 @@ static int nf_log_proc_dostring(struct ctl_table *table, int write, if (write) { struct ctl_table tmp = *table; + /* proc_dostring() can append to existing strings, so we need to + * initialize it as an empty string. + */ + buf[0] = '\0'; tmp.data = buf; r = proc_dostring(&tmp, write, buffer, lenp, ppos); if (r) diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c index 9a945024a0b6379fc83d7dc6f49a9625314dd1e7..742aacb317e50aefa4f541212346798a6a1892cc 100644 --- a/net/netfilter/nf_tables_api.c +++ b/net/netfilter/nf_tables_api.c @@ -1480,7 +1480,6 @@ static int nf_tables_updchain(struct nft_ctx *ctx, u8 genmask, u8 policy, struct nft_base_chain *basechain; struct nft_stats *stats = NULL; struct nft_chain_hook hook; - const struct nlattr *name; struct nf_hook_ops *ops; struct nft_trans *trans; int err, i; @@ -1531,12 +1530,11 @@ static int nf_tables_updchain(struct nft_ctx *ctx, u8 genmask, u8 policy, return PTR_ERR(stats); } + err = -ENOMEM; trans = nft_trans_alloc(ctx, NFT_MSG_NEWCHAIN, sizeof(struct nft_trans_chain)); - if (trans == NULL) { - free_percpu(stats); - return -ENOMEM; - } + if (trans == NULL) + goto err; nft_trans_chain_stats(trans) = stats; nft_trans_chain_update(trans) = true; @@ -1546,19 +1544,37 @@ static int nf_tables_updchain(struct nft_ctx *ctx, u8 genmask, u8 policy, else nft_trans_chain_policy(trans) = -1; - name = nla[NFTA_CHAIN_NAME]; - if (nla[NFTA_CHAIN_HANDLE] && name) { - nft_trans_chain_name(trans) = - nla_strdup(name, GFP_KERNEL); - if (!nft_trans_chain_name(trans)) { - kfree(trans); - free_percpu(stats); - return -ENOMEM; + if (nla[NFTA_CHAIN_HANDLE] && + nla[NFTA_CHAIN_NAME]) { + struct nft_trans *tmp; + char *name; + + err = -ENOMEM; + name = nla_strdup(nla[NFTA_CHAIN_NAME], GFP_KERNEL); + if (!name) + goto err; + + err = -EEXIST; + list_for_each_entry(tmp, &ctx->net->nft.commit_list, list) { + if (tmp->msg_type == NFT_MSG_NEWCHAIN && + tmp->ctx.table == table && + nft_trans_chain_update(tmp) && + nft_trans_chain_name(tmp) && + strcmp(name, nft_trans_chain_name(tmp)) == 0) { + kfree(name); + goto err; + } } + + nft_trans_chain_name(trans) = name; } list_add_tail(&trans->list, &ctx->net->nft.commit_list); return 0; +err: + free_percpu(stats); + kfree(trans); + return err; } static int nf_tables_newchain(struct net *net, struct sock *nlsk, @@ -5043,6 +5059,9 @@ static void nf_tables_commit_release(struct nft_trans *trans) case NFT_MSG_DELTABLE: nf_tables_table_destroy(&trans->ctx); break; + case NFT_MSG_NEWCHAIN: + kfree(nft_trans_chain_name(trans)); + break; case NFT_MSG_DELCHAIN: nf_tables_chain_destroy(trans->ctx.chain); break; @@ -5100,13 +5119,15 @@ static int nf_tables_commit(struct net *net, struct sk_buff *skb) nf_tables_table_notify(&trans->ctx, NFT_MSG_DELTABLE); break; case NFT_MSG_NEWCHAIN: - if (nft_trans_chain_update(trans)) + if (nft_trans_chain_update(trans)) { nft_chain_commit_update(trans); - else + nf_tables_chain_notify(&trans->ctx, NFT_MSG_NEWCHAIN); + /* trans destroyed after rcu grace period */ + } else { nft_clear(net, trans->ctx.chain); - - nf_tables_chain_notify(&trans->ctx, NFT_MSG_NEWCHAIN); - nft_trans_destroy(trans); + nf_tables_chain_notify(&trans->ctx, NFT_MSG_NEWCHAIN); + nft_trans_destroy(trans); + } break; case NFT_MSG_DELCHAIN: list_del_rcu(&trans->ctx.chain->list); @@ -5246,7 +5267,7 @@ static int nf_tables_abort(struct net *net, struct sk_buff *skb) case NFT_MSG_NEWCHAIN: if (nft_trans_chain_update(trans)) { free_percpu(nft_trans_chain_stats(trans)); - + kfree(nft_trans_chain_name(trans)); nft_trans_destroy(trans); } else { trans->ctx.table->use--; diff --git a/net/netfilter/nfnetlink_acct.c b/net/netfilter/nfnetlink_acct.c index c45e6d4358abe912d5a49efca4b183c6fbe21127..75624d17fc69ebd431f9e0bc0a47e964f53f6eac 100644 --- a/net/netfilter/nfnetlink_acct.c +++ b/net/netfilter/nfnetlink_acct.c @@ -238,29 +238,33 @@ static const struct nla_policy filter_policy[NFACCT_FILTER_MAX + 1] = { [NFACCT_FILTER_VALUE] = { .type = NLA_U32 }, }; -static struct nfacct_filter * -nfacct_filter_alloc(const struct nlattr * const attr) +static int nfnl_acct_start(struct netlink_callback *cb) { - struct nfacct_filter *filter; + const struct nlattr *const attr = cb->data; struct nlattr *tb[NFACCT_FILTER_MAX + 1]; + struct nfacct_filter *filter; int err; + if (!attr) + return 0; + err = nla_parse_nested(tb, NFACCT_FILTER_MAX, attr, filter_policy, NULL); if (err < 0) - return ERR_PTR(err); + return err; if (!tb[NFACCT_FILTER_MASK] || !tb[NFACCT_FILTER_VALUE]) - return ERR_PTR(-EINVAL); + return -EINVAL; filter = kzalloc(sizeof(struct nfacct_filter), GFP_KERNEL); if (!filter) - return ERR_PTR(-ENOMEM); + return -ENOMEM; filter->mask = ntohl(nla_get_be32(tb[NFACCT_FILTER_MASK])); filter->value = ntohl(nla_get_be32(tb[NFACCT_FILTER_VALUE])); + cb->data = filter; - return filter; + return 0; } static int nfnl_acct_get(struct net *net, struct sock *nfnl, @@ -275,18 +279,11 @@ static int nfnl_acct_get(struct net *net, struct sock *nfnl, if (nlh->nlmsg_flags & NLM_F_DUMP) { struct netlink_dump_control c = { .dump = nfnl_acct_dump, + .start = nfnl_acct_start, .done = nfnl_acct_done, + .data = (void *)tb[NFACCT_FILTER], }; - if (tb[NFACCT_FILTER]) { - struct nfacct_filter *filter; - - filter = nfacct_filter_alloc(tb[NFACCT_FILTER]); - if (IS_ERR(filter)) - return PTR_ERR(filter); - - c.data = filter; - } return netlink_dump_start(nfnl, skb, nlh, &c); } diff --git a/net/netfilter/nft_compat.c b/net/netfilter/nft_compat.c index 3bd637eadc4223ee81f3e76b8e4f22408647d27f..6da1cec1494a9c8ff9b00c44fdf2ac3c09b4fe54 100644 --- a/net/netfilter/nft_compat.c +++ b/net/netfilter/nft_compat.c @@ -825,10 +825,18 @@ nft_target_select_ops(const struct nft_ctx *ctx, rev = ntohl(nla_get_be32(tb[NFTA_TARGET_REV])); family = ctx->afi->family; + if (strcmp(tg_name, XT_ERROR_TARGET) == 0 || + strcmp(tg_name, XT_STANDARD_TARGET) == 0 || + strcmp(tg_name, "standard") == 0) + return ERR_PTR(-EINVAL); + /* Re-use the existing target if it's already loaded. */ list_for_each_entry(nft_target, &nft_target_list, head) { struct xt_target *target = nft_target->ops.data; + if (!target->target) + continue; + if (nft_target_cmp(target, tg_name, rev, family)) return &nft_target->ops; } @@ -837,6 +845,11 @@ nft_target_select_ops(const struct nft_ctx *ctx, if (IS_ERR(target)) return ERR_PTR(-ENOENT); + if (!target->target) { + err = -EINVAL; + goto err; + } + if (target->targetsize > nla_len(tb[NFTA_TARGET_INFO])) { err = -EINVAL; goto err; diff --git a/net/netfilter/nft_set_hash.c b/net/netfilter/nft_set_hash.c index 9c0d5a7ce5f900c615213094496db11eb0338d03..33aa2ac3a62ef6cf7b9879e81eafae3a95ec329d 100644 --- a/net/netfilter/nft_set_hash.c +++ b/net/netfilter/nft_set_hash.c @@ -359,6 +359,7 @@ static void nft_rhash_destroy(const struct nft_set *set) struct nft_rhash *priv = nft_set_priv(set); cancel_delayed_work_sync(&priv->gc_work); + rcu_barrier(); rhashtable_free_and_destroy(&priv->ht, nft_rhash_elem_destroy, (void *)set); } diff --git a/net/nfc/hci/core.c b/net/nfc/hci/core.c index b740fef0acc5ef5b6f81c633da4693675d2be417..6bf14f4f4b428e8283d19460331a7b9f26334b72 100644 --- a/net/nfc/hci/core.c +++ b/net/nfc/hci/core.c @@ -209,6 +209,11 @@ void nfc_hci_cmd_received(struct nfc_hci_dev *hdev, u8 pipe, u8 cmd, } create_info = (struct hci_create_pipe_resp *)skb->data; + if (create_info->pipe >= NFC_HCI_MAX_PIPES) { + status = NFC_HCI_ANY_E_NOK; + goto exit; + } + /* Save the new created pipe and bind with local gate, * the description for skb->data[3] is destination gate id * but since we received this cmd from host controller, we @@ -232,6 +237,11 @@ void nfc_hci_cmd_received(struct nfc_hci_dev *hdev, u8 pipe, u8 cmd, } delete_info = (struct hci_delete_pipe_noti *)skb->data; + if (delete_info->pipe >= NFC_HCI_MAX_PIPES) { + status = NFC_HCI_ANY_E_NOK; + goto exit; + } + hdev->pipes[delete_info->pipe].gate = NFC_HCI_INVALID_GATE; hdev->pipes[delete_info->pipe].dest_host = NFC_HCI_INVALID_HOST; break; diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c index c8f59c5447c32a7d447446234fdab26cc905ef9a..1137bf87f944c32b24f38f9040628f0626220aee 100644 --- a/net/packet/af_packet.c +++ b/net/packet/af_packet.c @@ -2920,6 +2920,8 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len) goto out_free; } else if (reserve) { skb_reserve(skb, -reserve); + if (len < reserve) + skb_reset_network_header(skb); } /* Returns -EFAULT on error */ @@ -4268,6 +4270,8 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u, } if (req->tp_block_nr) { + unsigned int min_frame_size; + /* Sanity tests and some calculations */ err = -EBUSY; if (unlikely(rb->pg_vec)) @@ -4290,12 +4294,12 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u, goto out; if (unlikely(!PAGE_ALIGNED(req->tp_block_size))) goto out; + min_frame_size = po->tp_hdrlen + po->tp_reserve; if (po->tp_version >= TPACKET_V3 && - req->tp_block_size <= - BLK_PLUS_PRIV((u64)req_u->req3.tp_sizeof_priv) + sizeof(struct tpacket3_hdr)) + req->tp_block_size < + BLK_PLUS_PRIV((u64)req_u->req3.tp_sizeof_priv) + min_frame_size) goto out; - if (unlikely(req->tp_frame_size < po->tp_hdrlen + - po->tp_reserve)) + if (unlikely(req->tp_frame_size < min_frame_size)) goto out; if (unlikely(req->tp_frame_size & (TPACKET_ALIGNMENT - 1))) goto out; diff --git a/net/qrtr/qrtr.c b/net/qrtr/qrtr.c index 78418f38464a4319f8ab6e3f4a2c3e745b698bd7..084adea6a81886e4e16f6e32ffeb869abd7c4d2e 100644 --- a/net/qrtr/qrtr.c +++ b/net/qrtr/qrtr.c @@ -710,6 +710,10 @@ static int qrtr_sendmsg(struct socket *sock, struct msghdr *msg, size_t len) node = NULL; if (addr->sq_node == QRTR_NODE_BCAST) { enqueue_fn = qrtr_bcast_enqueue; + if (addr->sq_port != QRTR_PORT_CTRL) { + release_sock(sk); + return -ENOTCONN; + } } else if (addr->sq_node == ipc->us.sq_node) { enqueue_fn = qrtr_local_enqueue; } else { diff --git a/net/rds/bind.c b/net/rds/bind.c index 5aa3a64aa4f0ef254bd31ecbfb11c81db0438185..48257d3a4201573db6931ea1da537e2515931a47 100644 --- a/net/rds/bind.c +++ b/net/rds/bind.c @@ -60,11 +60,13 @@ struct rds_sock *rds_find_bound(__be32 addr, __be16 port) u64 key = ((u64)addr << 32) | port; struct rds_sock *rs; - rs = rhashtable_lookup_fast(&bind_hash_table, &key, ht_parms); + rcu_read_lock(); + rs = rhashtable_lookup(&bind_hash_table, &key, ht_parms); if (rs && !sock_flag(rds_rs_to_sk(rs), SOCK_DEAD)) rds_sock_addref(rs); else rs = NULL; + rcu_read_unlock(); rdsdebug("returning rs %p for %pI4:%u\n", rs, &addr, ntohs(port)); @@ -157,6 +159,7 @@ int rds_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) goto out; } + sock_set_flag(sk, SOCK_RCU_FREE); ret = rds_add_bound(rs, sin->sin_addr.s_addr, &sin->sin_port); if (ret) goto out; diff --git a/net/rds/ib_frmr.c b/net/rds/ib_frmr.c index 48332a6ed7383c51def7402dcfef1e581fa677f7..d290416e79e96d610e48a11e6e778102f4b147a5 100644 --- a/net/rds/ib_frmr.c +++ b/net/rds/ib_frmr.c @@ -61,6 +61,7 @@ static struct rds_ib_mr *rds_ib_alloc_frmr(struct rds_ib_device *rds_ibdev, pool->fmr_attr.max_pages); if (IS_ERR(frmr->mr)) { pr_warn("RDS/IB: %s failed to allocate MR", __func__); + err = PTR_ERR(frmr->mr); goto out_no_cigar; } diff --git a/net/sched/act_ife.c b/net/sched/act_ife.c index 85757af7f1508522e4dc7468c92cacd402987047..31de26c990231ff550f5937845e3ba080dde2bab 100644 --- a/net/sched/act_ife.c +++ b/net/sched/act_ife.c @@ -249,10 +249,8 @@ static int ife_validate_metatype(struct tcf_meta_ops *ops, void *val, int len) } /* called when adding new meta information - * under ife->tcf_lock for existing action */ -static int load_metaops_and_vet(struct tcf_ife_info *ife, u32 metaid, - void *val, int len, bool exists) +static int load_metaops_and_vet(u32 metaid, void *val, int len) { struct tcf_meta_ops *ops = find_ife_oplist(metaid); int ret = 0; @@ -260,13 +258,9 @@ static int load_metaops_and_vet(struct tcf_ife_info *ife, u32 metaid, if (!ops) { ret = -ENOENT; #ifdef CONFIG_MODULES - if (exists) - spin_unlock_bh(&ife->tcf_lock); rtnl_unlock(); request_module("ifemeta%u", metaid); rtnl_lock(); - if (exists) - spin_lock_bh(&ife->tcf_lock); ops = find_ife_oplist(metaid); #endif } @@ -283,24 +277,17 @@ static int load_metaops_and_vet(struct tcf_ife_info *ife, u32 metaid, } /* called when adding new meta information - * under ife->tcf_lock for existing action */ -static int add_metainfo(struct tcf_ife_info *ife, u32 metaid, void *metaval, - int len, bool atomic) +static int __add_metainfo(const struct tcf_meta_ops *ops, + struct tcf_ife_info *ife, u32 metaid, void *metaval, + int len, bool atomic, bool exists) { struct tcf_meta_info *mi = NULL; - struct tcf_meta_ops *ops = find_ife_oplist(metaid); int ret = 0; - if (!ops) - return -ENOENT; - mi = kzalloc(sizeof(*mi), atomic ? GFP_ATOMIC : GFP_KERNEL); - if (!mi) { - /*put back what find_ife_oplist took */ - module_put(ops->owner); + if (!mi) return -ENOMEM; - } mi->metaid = metaid; mi->ops = ops; @@ -308,17 +295,49 @@ static int add_metainfo(struct tcf_ife_info *ife, u32 metaid, void *metaval, ret = ops->alloc(mi, metaval, atomic ? GFP_ATOMIC : GFP_KERNEL); if (ret != 0) { kfree(mi); - module_put(ops->owner); return ret; } } + if (exists) + spin_lock_bh(&ife->tcf_lock); list_add_tail(&mi->metalist, &ife->metalist); + if (exists) + spin_unlock_bh(&ife->tcf_lock); + + return ret; +} + +static int add_metainfo_and_get_ops(const struct tcf_meta_ops *ops, + struct tcf_ife_info *ife, u32 metaid, + bool exists) +{ + int ret; + + if (!try_module_get(ops->owner)) + return -ENOENT; + ret = __add_metainfo(ops, ife, metaid, NULL, 0, true, exists); + if (ret) + module_put(ops->owner); + return ret; +} + +static int add_metainfo(struct tcf_ife_info *ife, u32 metaid, void *metaval, + int len, bool exists) +{ + const struct tcf_meta_ops *ops = find_ife_oplist(metaid); + int ret; + if (!ops) + return -ENOENT; + ret = __add_metainfo(ops, ife, metaid, metaval, len, false, exists); + if (ret) + /*put back what find_ife_oplist took */ + module_put(ops->owner); return ret; } -static int use_all_metadata(struct tcf_ife_info *ife) +static int use_all_metadata(struct tcf_ife_info *ife, bool exists) { struct tcf_meta_ops *o; int rc = 0; @@ -326,7 +345,7 @@ static int use_all_metadata(struct tcf_ife_info *ife) read_lock(&ife_mod_lock); list_for_each_entry(o, &ifeoplist, list) { - rc = add_metainfo(ife, o->metaid, NULL, 0, true); + rc = add_metainfo_and_get_ops(o, ife, o->metaid, exists); if (rc == 0) installed += 1; } @@ -377,7 +396,6 @@ static void _tcf_ife_cleanup(struct tc_action *a, int bind) struct tcf_meta_info *e, *n; list_for_each_entry_safe(e, n, &ife->metalist, metalist) { - module_put(e->ops->owner); list_del(&e->metalist); if (e->metaval) { if (e->ops->release) @@ -385,6 +403,7 @@ static void _tcf_ife_cleanup(struct tc_action *a, int bind) else kfree(e->metaval); } + module_put(e->ops->owner); kfree(e); } } @@ -398,7 +417,6 @@ static void tcf_ife_cleanup(struct tc_action *a, int bind) spin_unlock_bh(&ife->tcf_lock); } -/* under ife->tcf_lock for existing action */ static int populate_metalist(struct tcf_ife_info *ife, struct nlattr **tb, bool exists) { @@ -412,7 +430,7 @@ static int populate_metalist(struct tcf_ife_info *ife, struct nlattr **tb, val = nla_data(tb[i]); len = nla_len(tb[i]); - rc = load_metaops_and_vet(ife, i, val, len, exists); + rc = load_metaops_and_vet(i, val, len); if (rc != 0) return rc; @@ -481,6 +499,8 @@ static int tcf_ife_init(struct net *net, struct nlattr *nla, if (exists) spin_lock_bh(&ife->tcf_lock); ife->tcf_action = parm->action; + if (exists) + spin_unlock_bh(&ife->tcf_lock); if (parm->flags & IFE_ENCODE) { if (daddr) @@ -508,9 +528,6 @@ metadata_parse_err: tcf_idr_release(*a, bind); if (ret == ACT_P_CREATED) _tcf_ife_cleanup(*a, bind); - - if (exists) - spin_unlock_bh(&ife->tcf_lock); return err; } @@ -524,20 +541,14 @@ metadata_parse_err: * as we can. You better have at least one else we are * going to bail out */ - err = use_all_metadata(ife); + err = use_all_metadata(ife, exists); if (err) { if (ret == ACT_P_CREATED) _tcf_ife_cleanup(*a, bind); - - if (exists) - spin_unlock_bh(&ife->tcf_lock); return err; } } - if (exists) - spin_unlock_bh(&ife->tcf_lock); - if (ret == ACT_P_CREATED) tcf_idr_insert(tn, *a); diff --git a/net/sched/act_pedit.c b/net/sched/act_pedit.c index 51ab463d9e168b7e174ae1e3169639ed721580ad..656b6ada92210c87a3e6309ecc4f7b9bb425ec4c 100644 --- a/net/sched/act_pedit.c +++ b/net/sched/act_pedit.c @@ -109,16 +109,18 @@ static int tcf_pedit_key_ex_dump(struct sk_buff *skb, { struct nlattr *keys_start = nla_nest_start(skb, TCA_PEDIT_KEYS_EX); + if (!keys_start) + goto nla_failure; for (; n > 0; n--) { struct nlattr *key_start; key_start = nla_nest_start(skb, TCA_PEDIT_KEY_EX); + if (!key_start) + goto nla_failure; if (nla_put_u16(skb, TCA_PEDIT_KEY_EX_HTYPE, keys_ex->htype) || - nla_put_u16(skb, TCA_PEDIT_KEY_EX_CMD, keys_ex->cmd)) { - nlmsg_trim(skb, keys_start); - return -EINVAL; - } + nla_put_u16(skb, TCA_PEDIT_KEY_EX_CMD, keys_ex->cmd)) + goto nla_failure; nla_nest_end(skb, key_start); @@ -128,6 +130,9 @@ static int tcf_pedit_key_ex_dump(struct sk_buff *skb, nla_nest_end(skb, keys_start); return 0; +nla_failure: + nla_nest_cancel(skb, keys_start); + return -EINVAL; } static int tcf_pedit_init(struct net *net, struct nlattr *nla, @@ -395,7 +400,10 @@ static int tcf_pedit_dump(struct sk_buff *skb, struct tc_action *a, opt->bindcnt = p->tcf_bindcnt - bind; if (p->tcfp_keys_ex) { - tcf_pedit_key_ex_dump(skb, p->tcfp_keys_ex, p->tcfp_nkeys); + if (tcf_pedit_key_ex_dump(skb, + p->tcfp_keys_ex, + p->tcfp_nkeys)) + goto nla_put_failure; if (nla_put(skb, TCA_PEDIT_PARMS_EX, s, opt)) goto nla_put_failure; diff --git a/net/sched/act_sample.c b/net/sched/act_sample.c index 53752b9327d02e1f81ce48a0d4459366421bd117..a859b55d789918074d29778d159a254a93f04748 100644 --- a/net/sched/act_sample.c +++ b/net/sched/act_sample.c @@ -64,7 +64,7 @@ static int tcf_sample_init(struct net *net, struct nlattr *nla, if (!exists) { ret = tcf_idr_create(tn, parm->index, est, a, - &act_sample_ops, bind, false); + &act_sample_ops, bind, true); if (ret) return ret; ret = ACT_P_CREATED; diff --git a/net/sched/act_tunnel_key.c b/net/sched/act_tunnel_key.c index 7cb63616805d81fb8f8798289bc4e33809510637..cd51f2ed55fabf98b3780cc14e49f8cb878e31a3 100644 --- a/net/sched/act_tunnel_key.c +++ b/net/sched/act_tunnel_key.c @@ -36,7 +36,7 @@ static int tunnel_key_act(struct sk_buff *skb, const struct tc_action *a, tcf_lastuse_update(&t->tcf_tm); bstats_cpu_update(this_cpu_ptr(t->common.cpu_bstats), skb); - action = params->action; + action = READ_ONCE(t->tcf_action); switch (params->tcft_action) { case TCA_TUNNEL_KEY_ACT_RELEASE: @@ -182,7 +182,7 @@ static int tunnel_key_init(struct net *net, struct nlattr *nla, params_old = rtnl_dereference(t->params); - params_new->action = parm->action; + t->tcf_action = parm->action; params_new->tcft_action = parm->t_action; params_new->tcft_enc_metadata = metadata; @@ -254,13 +254,13 @@ static int tunnel_key_dump(struct sk_buff *skb, struct tc_action *a, .index = t->tcf_index, .refcnt = t->tcf_refcnt - ref, .bindcnt = t->tcf_bindcnt - bind, + .action = t->tcf_action, }; struct tcf_t tm; params = rtnl_dereference(t->params); opt.t_action = params->tcft_action; - opt.action = params->action; if (nla_put(skb, TCA_TUNNEL_KEY_PARMS, sizeof(opt), &opt)) goto nla_put_failure; diff --git a/net/sched/cls_u32.c b/net/sched/cls_u32.c index ba37d8f57e68655d3e9e9b8c75967b1e36d09b8f..0c9bc29dcf97a599359038bfe4dd7e17e90ebc01 100644 --- a/net/sched/cls_u32.c +++ b/net/sched/cls_u32.c @@ -903,6 +903,7 @@ static int u32_change(struct net *net, struct sk_buff *in_skb, struct nlattr *opt = tca[TCA_OPTIONS]; struct nlattr *tb[TCA_U32_MAX + 1]; u32 htid, flags = 0; + size_t sel_size; int err; #ifdef CONFIG_CLS_U32_PERF size_t size; @@ -1024,8 +1025,11 @@ static int u32_change(struct net *net, struct sk_buff *in_skb, return -EINVAL; s = nla_data(tb[TCA_U32_SEL]); + sel_size = sizeof(*s) + sizeof(*s->keys) * s->nkeys; + if (nla_len(tb[TCA_U32_SEL]) < sel_size) + return -EINVAL; - n = kzalloc(sizeof(*n) + s->nkeys*sizeof(struct tc_u32_key), GFP_KERNEL); + n = kzalloc(offsetof(typeof(*n), sel) + sel_size, GFP_KERNEL); if (n == NULL) return -ENOBUFS; @@ -1038,7 +1042,7 @@ static int u32_change(struct net *net, struct sk_buff *in_skb, } #endif - memcpy(&n->sel, s, sizeof(*s) + s->nkeys*sizeof(struct tc_u32_key)); + memcpy(&n->sel, s, sel_size); RCU_INIT_POINTER(n->ht_up, ht); n->handle = handle; n->fshift = s->hmask ? ffs(ntohl(s->hmask)) - 1 : 0; diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c index 8c8df75dbeadd2f555d2e4d9e9c2b2a03ee0c89d..2a2ab6bfe5d85d93380165211affcf1acbeaacc8 100644 --- a/net/sched/sch_netem.c +++ b/net/sched/sch_netem.c @@ -149,12 +149,6 @@ struct netem_skb_cb { ktime_t tstamp_save; }; - -static struct sk_buff *netem_rb_to_skb(struct rb_node *rb) -{ - return rb_entry(rb, struct sk_buff, rbnode); -} - static inline struct netem_skb_cb *netem_skb_cb(struct sk_buff *skb) { /* we assume we can use skb next/prev/tstamp as storage for rb_node */ @@ -365,7 +359,7 @@ static void tfifo_reset(struct Qdisc *sch) struct rb_node *p; while ((p = rb_first(&q->t_root))) { - struct sk_buff *skb = netem_rb_to_skb(p); + struct sk_buff *skb = rb_to_skb(p); rb_erase(p, &q->t_root); rtnl_kfree_skbs(skb, skb); @@ -382,7 +376,7 @@ static void tfifo_enqueue(struct sk_buff *nskb, struct Qdisc *sch) struct sk_buff *skb; parent = *p; - skb = netem_rb_to_skb(parent); + skb = rb_to_skb(parent); if (tnext >= netem_skb_cb(skb)->time_to_send) p = &parent->rb_right; else @@ -538,7 +532,7 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff *t_skb; struct netem_skb_cb *t_last; - t_skb = netem_rb_to_skb(rb_last(&q->t_root)); + t_skb = skb_rb_last(&q->t_root); t_last = netem_skb_cb(t_skb); if (!last || t_last->time_to_send > last->time_to_send) { @@ -618,7 +612,7 @@ deliver: if (p) { psched_time_t time_to_send; - skb = netem_rb_to_skb(p); + skb = rb_to_skb(p); /* if more time remaining? */ time_to_send = netem_skb_cb(skb)->time_to_send; diff --git a/net/sctp/chunk.c b/net/sctp/chunk.c index 3afac275ee82dbec825dd71378dffe69a53718a7..acd9380a56fb0be62ae8595f8942fa48dad4e968 100644 --- a/net/sctp/chunk.c +++ b/net/sctp/chunk.c @@ -230,7 +230,9 @@ struct sctp_datamsg *sctp_datamsg_from_user(struct sctp_association *asoc, /* Account for a different sized first fragment */ if (msg_len >= first_len) { msg->can_delay = 0; - SCTP_INC_STATS(sock_net(asoc->base.sk), SCTP_MIB_FRAGUSRMSGS); + if (msg_len > first_len) + SCTP_INC_STATS(sock_net(asoc->base.sk), + SCTP_MIB_FRAGUSRMSGS); } else { /* Which may be the only one... */ first_len = msg_len; diff --git a/net/sctp/proc.c b/net/sctp/proc.c index 26b4be6b41720a23040fcf31983199d331f026e4..6c82a959fc6e27ab7bfce181ee297993d71dd6cb 100644 --- a/net/sctp/proc.c +++ b/net/sctp/proc.c @@ -335,8 +335,6 @@ static int sctp_assocs_seq_show(struct seq_file *seq, void *v) } transport = (struct sctp_transport *)v; - if (!sctp_transport_hold(transport)) - return 0; assoc = transport->asoc; epb = &assoc->base; sk = epb->sk; @@ -426,8 +424,6 @@ static int sctp_remaddr_seq_show(struct seq_file *seq, void *v) } transport = (struct sctp_transport *)v; - if (!sctp_transport_hold(transport)) - return 0; assoc = transport->asoc; list_for_each_entry_rcu(tsp, &assoc->peer.transport_addr_list, diff --git a/net/sctp/socket.c b/net/sctp/socket.c index 2d6f612f32c3e94a7e74054ceb74b401f528126e..79009431114313df903bce2d7778f7c8773ad51d 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c @@ -4660,9 +4660,14 @@ struct sctp_transport *sctp_transport_get_next(struct net *net, break; } + if (!sctp_transport_hold(t)) + continue; + if (net_eq(sock_net(t->asoc->base.sk), net) && t->asoc->peer.primary_path == t) break; + + sctp_transport_put(t); } return t; @@ -4672,13 +4677,18 @@ struct sctp_transport *sctp_transport_get_idx(struct net *net, struct rhashtable_iter *iter, int pos) { - void *obj = SEQ_START_TOKEN; + struct sctp_transport *t; - while (pos && (obj = sctp_transport_get_next(net, iter)) && - !IS_ERR(obj)) - pos--; + if (!pos) + return SEQ_START_TOKEN; - return obj; + while ((t = sctp_transport_get_next(net, iter)) && !IS_ERR(t)) { + if (!--pos) + break; + sctp_transport_put(t); + } + + return t; } int sctp_for_each_endpoint(int (*cb)(struct sctp_endpoint *, void *), @@ -4738,8 +4748,6 @@ again: tsp = sctp_transport_get_idx(net, &hti, *pos + 1); for (; !IS_ERR_OR_NULL(tsp); tsp = sctp_transport_get_next(net, &hti)) { - if (!sctp_transport_hold(tsp)) - continue; ret = cb(tsp, p); if (ret) break; diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c index 654a8123840639c48b356abe8e958dbe6c549ddb..43ef7be69428a6aca96ce0f3e2f1e01ae5d3d792 100644 --- a/net/smc/af_smc.c +++ b/net/smc/af_smc.c @@ -1180,8 +1180,7 @@ static int smc_shutdown(struct socket *sock, int how) lock_sock(sk); rc = -ENOTCONN; - if ((sk->sk_state != SMC_LISTEN) && - (sk->sk_state != SMC_ACTIVE) && + if ((sk->sk_state != SMC_ACTIVE) && (sk->sk_state != SMC_PEERCLOSEWAIT1) && (sk->sk_state != SMC_PEERCLOSEWAIT2) && (sk->sk_state != SMC_APPCLOSEWAIT1) && diff --git a/net/sunrpc/auth_gss/gss_krb5_crypto.c b/net/sunrpc/auth_gss/gss_krb5_crypto.c index 8654494b4d0a39c2ad970597fd7919640b948885..834eb2b9e41b5c743d17887574edf43322f68190 100644 --- a/net/sunrpc/auth_gss/gss_krb5_crypto.c +++ b/net/sunrpc/auth_gss/gss_krb5_crypto.c @@ -169,7 +169,7 @@ make_checksum_hmac_md5(struct krb5_ctx *kctx, char *header, int hdrlen, struct scatterlist sg[1]; int err = -1; u8 *checksumdata; - u8 rc4salt[4]; + u8 *rc4salt; struct crypto_ahash *md5; struct crypto_ahash *hmac_md5; struct ahash_request *req; @@ -183,14 +183,18 @@ make_checksum_hmac_md5(struct krb5_ctx *kctx, char *header, int hdrlen, return GSS_S_FAILURE; } + rc4salt = kmalloc_array(4, sizeof(*rc4salt), GFP_NOFS); + if (!rc4salt) + return GSS_S_FAILURE; + if (arcfour_hmac_md5_usage_to_salt(usage, rc4salt)) { dprintk("%s: invalid usage value %u\n", __func__, usage); - return GSS_S_FAILURE; + goto out_free_rc4salt; } checksumdata = kmalloc(GSS_KRB5_MAX_CKSUM_LEN, GFP_NOFS); if (!checksumdata) - return GSS_S_FAILURE; + goto out_free_rc4salt; md5 = crypto_alloc_ahash("md5", 0, CRYPTO_ALG_ASYNC); if (IS_ERR(md5)) @@ -258,6 +262,8 @@ out_free_md5: crypto_free_ahash(md5); out_free_cksum: kfree(checksumdata); +out_free_rc4salt: + kfree(rc4salt); return err ? GSS_S_FAILURE : 0; } diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c index 2ad827db270422ed7f2c84aae374c234f445e6ab..6d118357d9dc99fbfdf3c2583167b8176ccd2611 100644 --- a/net/sunrpc/clnt.c +++ b/net/sunrpc/clnt.c @@ -965,10 +965,20 @@ out: } EXPORT_SYMBOL_GPL(rpc_bind_new_program); +void rpc_task_release_transport(struct rpc_task *task) +{ + struct rpc_xprt *xprt = task->tk_xprt; + + if (xprt) { + task->tk_xprt = NULL; + xprt_put(xprt); + } +} +EXPORT_SYMBOL_GPL(rpc_task_release_transport); + void rpc_task_release_client(struct rpc_task *task) { struct rpc_clnt *clnt = task->tk_client; - struct rpc_xprt *xprt = task->tk_xprt; if (clnt != NULL) { /* Remove from client task list */ @@ -979,12 +989,14 @@ void rpc_task_release_client(struct rpc_task *task) rpc_release_client(clnt); } + rpc_task_release_transport(task); +} - if (xprt != NULL) { - task->tk_xprt = NULL; - - xprt_put(xprt); - } +static +void rpc_task_set_transport(struct rpc_task *task, struct rpc_clnt *clnt) +{ + if (!task->tk_xprt) + task->tk_xprt = xprt_iter_get_next(&clnt->cl_xpi); } static @@ -992,8 +1004,7 @@ void rpc_task_set_client(struct rpc_task *task, struct rpc_clnt *clnt) { if (clnt != NULL) { - if (task->tk_xprt == NULL) - task->tk_xprt = xprt_iter_get_next(&clnt->cl_xpi); + rpc_task_set_transport(task, clnt); task->tk_client = clnt; atomic_inc(&clnt->cl_count); if (clnt->cl_softrtry) @@ -1529,6 +1540,7 @@ call_start(struct rpc_task *task) clnt->cl_program->version[clnt->cl_vers]->counts[idx]++; clnt->cl_stats->rpccnt++; task->tk_action = call_reserve; + rpc_task_set_transport(task, clnt); } /* diff --git a/net/tipc/socket.c b/net/tipc/socket.c index 98a44ecb11e7be8b74f07d7067e0c28b783addc6..0aebf0695ae024966d87a0015a97d6d1d2058c8d 100644 --- a/net/tipc/socket.c +++ b/net/tipc/socket.c @@ -2268,6 +2268,8 @@ void tipc_sk_reinit(struct net *net) walk_stop: rhashtable_walk_stop(&iter); } while (tsk == ERR_PTR(-EAGAIN)); + + rhashtable_walk_exit(&iter); } static struct tipc_sock *tipc_sk_lookup(struct net *net, u32 portid) diff --git a/net/tls/tls_main.c b/net/tls/tls_main.c index ffb1a3a69bdd99d34d1cc29c2176b118a1ebe2f0..4f2971f528dbd1c47c34433f10c073d587d1872d 100644 --- a/net/tls/tls_main.c +++ b/net/tls/tls_main.c @@ -44,6 +44,7 @@ MODULE_AUTHOR("Mellanox Technologies"); MODULE_DESCRIPTION("Transport Layer Security Support"); MODULE_LICENSE("Dual BSD/GPL"); +MODULE_ALIAS_TCP_ULP("tls"); static struct proto tls_base_prot; static struct proto tls_sw_prot; @@ -194,9 +195,14 @@ static void tls_write_space(struct sock *sk) { struct tls_context *ctx = tls_get_ctx(sk); - /* We are already sending pages, ignore notification */ - if (ctx->in_tcp_sendpages) + /* If in_tcp_sendpages call lower protocol write space handler + * to ensure we wake up any waiting operations there. For example + * if do_tcp_sendpages where to call sk_wait_event. + */ + if (ctx->in_tcp_sendpages) { + ctx->sk_write_space(sk); return; + } if (!sk->sk_write_pending && tls_is_pending_closed_record(ctx)) { gfp_t sk_allocation = sk->sk_allocation; @@ -217,6 +223,15 @@ static void tls_write_space(struct sock *sk) ctx->sk_write_space(sk); } +static void tls_ctx_free(struct tls_context *ctx) +{ + if (!ctx) + return; + + memzero_explicit(&ctx->crypto_send, sizeof(ctx->crypto_send)); + kfree(ctx); +} + static void tls_sk_proto_close(struct sock *sk, long timeout) { struct tls_context *ctx = tls_get_ctx(sk); @@ -245,7 +260,7 @@ static void tls_sk_proto_close(struct sock *sk, long timeout) kfree(ctx->iv); sk_proto_close = ctx->sk_proto_close; - kfree(ctx); + tls_ctx_free(ctx); release_sock(sk); sk_proto_close(sk, timeout); @@ -273,7 +288,7 @@ static int do_tls_getsockopt_tx(struct sock *sk, char __user *optval, } /* get user crypto info */ - crypto_info = &ctx->crypto_send; + crypto_info = &ctx->crypto_send.info; if (!TLS_CRYPTO_INFO_READY(crypto_info)) { rc = -EBUSY; @@ -370,7 +385,7 @@ static int do_tls_setsockopt_tx(struct sock *sk, char __user *optval, } /* get user crypto info */ - crypto_info = &ctx->crypto_send; + crypto_info = &ctx->crypto_send.info; /* Currently we don't support set crypto info more than one time */ if (TLS_CRYPTO_INFO_READY(crypto_info)) { @@ -415,7 +430,7 @@ static int do_tls_setsockopt_tx(struct sock *sk, char __user *optval, goto out; err_crypto_info: - memset(crypto_info, 0, sizeof(*crypto_info)); + memzero_explicit(crypto_info, sizeof(union tls_crypto_context)); out: return rc; } diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c index fb79caf56d0e8fe4ec0a8baf08b3067c54666315..6ae9ca567d6caeca70bcbd37b0e1c267d995aeed 100644 --- a/net/tls/tls_sw.c +++ b/net/tls/tls_sw.c @@ -170,6 +170,9 @@ static int alloc_encrypted_sg(struct sock *sk, int len) rc = alloc_sg(sk, len, ctx->sg_encrypted_data, &ctx->sg_encrypted_num_elem, &ctx->sg_encrypted_size, 0); + if (rc == -ENOSPC) + ctx->sg_encrypted_num_elem = ARRAY_SIZE(ctx->sg_encrypted_data); + return rc; } @@ -183,6 +186,9 @@ static int alloc_plaintext_sg(struct sock *sk, int len) &ctx->sg_plaintext_num_elem, &ctx->sg_plaintext_size, tls_ctx->pending_open_record_frags); + if (rc == -ENOSPC) + ctx->sg_plaintext_num_elem = ARRAY_SIZE(ctx->sg_plaintext_data); + return rc; } @@ -655,7 +661,6 @@ static void tls_sw_free_resources(struct sock *sk) int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx) { - char keyval[TLS_CIPHER_AES_GCM_128_KEY_SIZE]; struct tls_crypto_info *crypto_info; struct tls12_crypto_info_aes_gcm_128 *gcm_128_info; struct tls_sw_context *sw_ctx; @@ -682,7 +687,7 @@ int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx) ctx->priv_ctx = (struct tls_offload_context *)sw_ctx; ctx->free_resources = tls_sw_free_resources; - crypto_info = &ctx->crypto_send; + crypto_info = &ctx->crypto_send.info; switch (crypto_info->cipher_type) { case TLS_CIPHER_AES_GCM_128: { nonce_size = TLS_CIPHER_AES_GCM_128_IV_SIZE; @@ -747,9 +752,7 @@ int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx) ctx->push_pending_record = tls_sw_push_pending_record; - memcpy(keyval, gcm_128_info->key, TLS_CIPHER_AES_GCM_128_KEY_SIZE); - - rc = crypto_aead_setkey(sw_ctx->aead_send, keyval, + rc = crypto_aead_setkey(sw_ctx->aead_send, gcm_128_info->key, TLS_CIPHER_AES_GCM_128_KEY_SIZE); if (rc) goto free_aead; diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index ea28aa505302916b61370587a473c48656f72f6d..753f3e73c4980759a8a5dac051d84af36550908c 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c @@ -4186,6 +4186,7 @@ static int parse_station_flags(struct genl_info *info, params->sta_flags_mask = BIT(NL80211_STA_FLAG_AUTHENTICATED) | BIT(NL80211_STA_FLAG_MFP) | BIT(NL80211_STA_FLAG_AUTHORIZED); + break; default: return -EINVAL; } @@ -5990,7 +5991,7 @@ do { \ nl80211_check_s32); /* * Check HT operation mode based on - * IEEE 802.11 2012 8.4.2.59 HT Operation element. + * IEEE 802.11-2016 9.4.2.57 HT Operation element. */ if (tb[NL80211_MESHCONF_HT_OPMODE]) { ht_opmode = nla_get_u16(tb[NL80211_MESHCONF_HT_OPMODE]); @@ -6000,22 +6001,9 @@ do { \ IEEE80211_HT_OP_MODE_NON_HT_STA_PRSNT)) return -EINVAL; - if ((ht_opmode & IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT) && - (ht_opmode & IEEE80211_HT_OP_MODE_NON_HT_STA_PRSNT)) - return -EINVAL; + /* NON_HT_STA bit is reserved, but some programs set it */ + ht_opmode &= ~IEEE80211_HT_OP_MODE_NON_HT_STA_PRSNT; - switch (ht_opmode & IEEE80211_HT_OP_MODE_PROTECTION) { - case IEEE80211_HT_OP_MODE_PROTECTION_NONE: - case IEEE80211_HT_OP_MODE_PROTECTION_20MHZ: - if (ht_opmode & IEEE80211_HT_OP_MODE_NON_HT_STA_PRSNT) - return -EINVAL; - break; - case IEEE80211_HT_OP_MODE_PROTECTION_NONMEMBER: - case IEEE80211_HT_OP_MODE_PROTECTION_NONHT_MIXED: - if (!(ht_opmode & IEEE80211_HT_OP_MODE_NON_HT_STA_PRSNT)) - return -EINVAL; - break; - } cfg->ht_opmode = ht_opmode; mask |= (1 << (NL80211_MESHCONF_HT_OPMODE - 1)); } @@ -10542,9 +10530,12 @@ static int nl80211_set_wowlan(struct sk_buff *skb, struct genl_info *info) rem) { u8 *mask_pat; - nla_parse_nested(pat_tb, MAX_NL80211_PKTPAT, pat, - nl80211_packet_pattern_policy, - info->extack); + err = nla_parse_nested(pat_tb, MAX_NL80211_PKTPAT, pat, + nl80211_packet_pattern_policy, + info->extack); + if (err) + goto error; + err = -EINVAL; if (!pat_tb[NL80211_PKTPAT_MASK] || !pat_tb[NL80211_PKTPAT_PATTERN]) @@ -10793,8 +10784,11 @@ static int nl80211_parse_coalesce_rule(struct cfg80211_registered_device *rdev, rem) { u8 *mask_pat; - nla_parse_nested(pat_tb, MAX_NL80211_PKTPAT, pat, - nl80211_packet_pattern_policy, NULL); + err = nla_parse_nested(pat_tb, MAX_NL80211_PKTPAT, pat, + nl80211_packet_pattern_policy, NULL); + if (err) + return err; + if (!pat_tb[NL80211_PKTPAT_MASK] || !pat_tb[NL80211_PKTPAT_PATTERN]) return -EINVAL; diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c index 9c57d6a5816cc4c8d863387a8351e22bad424ede..2fb7a78308e1a3e751b7d523dcc3eb9ae7b6ef9c 100644 --- a/net/xfrm/xfrm_policy.c +++ b/net/xfrm/xfrm_policy.c @@ -1831,7 +1831,10 @@ xfrm_resolve_and_create_bundle(struct xfrm_policy **pols, int num_pols, /* Try to instantiate a bundle */ err = xfrm_tmpl_resolve(pols, num_pols, fl, xfrm, family); if (err <= 0) { - if (err != 0 && err != -EAGAIN) + if (err == 0) + return NULL; + + if (err != -EAGAIN) XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTPOLERROR); return ERR_PTR(err); } @@ -2285,6 +2288,9 @@ struct dst_entry *xfrm_lookup_route(struct net *net, struct dst_entry *dst_orig, if (IS_ERR(dst) && PTR_ERR(dst) == -EREMOTE) return make_blackhole(net, dst_orig->ops->family, dst_orig); + if (IS_ERR(dst)) + dst_release(dst_orig); + return dst; } EXPORT_SYMBOL(xfrm_lookup_route); diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c index dbfcfefd6d69309027c2ccf5871894dcbe5b4fa4..5554d28a32eb10866b8403c7992b8209e3376a04 100644 --- a/net/xfrm/xfrm_user.c +++ b/net/xfrm/xfrm_user.c @@ -1021,10 +1021,12 @@ static inline int xfrm_nlmsg_multicast(struct net *net, struct sk_buff *skb, { struct sock *nlsk = rcu_dereference(net->xfrm.nlsk); - if (nlsk) - return nlmsg_multicast(nlsk, skb, pid, group, GFP_ATOMIC); - else - return -1; + if (!nlsk) { + kfree_skb(skb); + return -EPIPE; + } + + return nlmsg_multicast(nlsk, skb, pid, group, GFP_ATOMIC); } static inline size_t xfrm_spdinfo_msgsize(void) @@ -1665,9 +1667,11 @@ static inline size_t userpolicy_type_attrsize(void) #ifdef CONFIG_XFRM_SUB_POLICY static int copy_to_user_policy_type(u8 type, struct sk_buff *skb) { - struct xfrm_userpolicy_type upt = { - .type = type, - }; + struct xfrm_userpolicy_type upt; + + /* Sadly there are two holes in struct xfrm_userpolicy_type */ + memset(&upt, 0, sizeof(upt)); + upt.type = type; return nla_put(skb, XFRMA_POLICY_TYPE, sizeof(upt), &upt); } diff --git a/samples/bpf/parse_varlen.c b/samples/bpf/parse_varlen.c index 95c16324760c0be1af8be927e1adffae0b582525..0b6f22feb2c9ce37787ea5384276c85a4e1171eb 100644 --- a/samples/bpf/parse_varlen.c +++ b/samples/bpf/parse_varlen.c @@ -6,6 +6,7 @@ */ #define KBUILD_MODNAME "foo" #include +#include #include #include #include @@ -108,11 +109,6 @@ static int parse_ipv6(void *data, uint64_t nh_off, void *data_end) return 0; } -struct vlan_hdr { - uint16_t h_vlan_TCI; - uint16_t h_vlan_encapsulated_proto; -}; - SEC("varlen") int handle_ingress(struct __sk_buff *skb) { diff --git a/samples/bpf/test_overhead_user.c b/samples/bpf/test_overhead_user.c index d291167fd3c78c8649c07ad6b1023f9a3ad7bb19..7dad9a3168e1e5a05dc96aea65ed45f457a3a08b 100644 --- a/samples/bpf/test_overhead_user.c +++ b/samples/bpf/test_overhead_user.c @@ -6,6 +6,7 @@ */ #define _GNU_SOURCE #include +#include #include #include #include @@ -44,8 +45,13 @@ static void test_task_rename(int cpu) exit(1); } start_time = time_get_ns(); - for (i = 0; i < MAX_CNT; i++) - write(fd, buf, sizeof(buf)); + for (i = 0; i < MAX_CNT; i++) { + if (write(fd, buf, sizeof(buf)) < 0) { + printf("task rename failed: %s\n", strerror(errno)); + close(fd); + return; + } + } printf("task_rename:%d: %lld events per sec\n", cpu, MAX_CNT * 1000000000ll / (time_get_ns() - start_time)); close(fd); @@ -63,8 +69,13 @@ static void test_urandom_read(int cpu) exit(1); } start_time = time_get_ns(); - for (i = 0; i < MAX_CNT; i++) - read(fd, buf, sizeof(buf)); + for (i = 0; i < MAX_CNT; i++) { + if (read(fd, buf, sizeof(buf)) < 0) { + printf("failed to read from /dev/urandom: %s\n", strerror(errno)); + close(fd); + return; + } + } printf("urandom_read:%d: %lld events per sec\n", cpu, MAX_CNT * 1000000000ll / (time_get_ns() - start_time)); close(fd); diff --git a/samples/bpf/trace_event_user.c b/samples/bpf/trace_event_user.c index 7bd827b84a67988fd6fc8a36eec43fc22e623032..c7d525e5696ec01d569074199cf633fec53b646a 100644 --- a/samples/bpf/trace_event_user.c +++ b/samples/bpf/trace_event_user.c @@ -121,6 +121,16 @@ static void print_stacks(void) } } +static inline int generate_load(void) +{ + if (system("dd if=/dev/zero of=/dev/null count=5000k status=none") < 0) { + printf("failed to generate some load with dd: %s\n", strerror(errno)); + return -1; + } + + return 0; +} + static void test_perf_event_all_cpu(struct perf_event_attr *attr) { int nr_cpus = sysconf(_SC_NPROCESSORS_CONF); @@ -138,7 +148,11 @@ static void test_perf_event_all_cpu(struct perf_event_attr *attr) assert(ioctl(pmu_fd[i], PERF_EVENT_IOC_SET_BPF, prog_fd[0]) == 0); assert(ioctl(pmu_fd[i], PERF_EVENT_IOC_ENABLE) == 0); } - system("dd if=/dev/zero of=/dev/null count=5000k status=none"); + + if (generate_load() < 0) { + error = 1; + goto all_cpu_err; + } print_stacks(); all_cpu_err: for (i--; i >= 0; i--) { @@ -152,7 +166,7 @@ all_cpu_err: static void test_perf_event_task(struct perf_event_attr *attr) { - int pmu_fd; + int pmu_fd, error = 0; /* open task bound event */ pmu_fd = sys_perf_event_open(attr, 0, -1, -1, 0); @@ -162,10 +176,17 @@ static void test_perf_event_task(struct perf_event_attr *attr) } assert(ioctl(pmu_fd, PERF_EVENT_IOC_SET_BPF, prog_fd[0]) == 0); assert(ioctl(pmu_fd, PERF_EVENT_IOC_ENABLE) == 0); - system("dd if=/dev/zero of=/dev/null count=5000k status=none"); + + if (generate_load() < 0) { + error = 1; + goto err; + } print_stacks(); +err: ioctl(pmu_fd, PERF_EVENT_IOC_DISABLE); close(pmu_fd); + if (error) + int_exit(0); } static void test_bpf_perf_event(void) diff --git a/scripts/Kbuild.include b/scripts/Kbuild.include index 62adfa96a37a60c7b2876c3864a227d61a7c7066..bfe611d52a580d263409728fc4696f2b66dce93f 100644 --- a/scripts/Kbuild.include +++ b/scripts/Kbuild.include @@ -403,3 +403,6 @@ endif endef # ############################################################################### + +# delete partially updated (i.e. corrupted) files on error +.DELETE_ON_ERROR: diff --git a/scripts/depmod.sh b/scripts/depmod.sh index f41b0a4b575cee19b6eeb56dc76612fb91fec734..cf5b2b24b3cf13b256e2d9952ff2f2232a8ffd0c 100755 --- a/scripts/depmod.sh +++ b/scripts/depmod.sh @@ -16,9 +16,9 @@ if ! test -r System.map ; then fi if [ -z $(command -v $DEPMOD) ]; then - echo "'make modules_install' requires $DEPMOD. Please install it." >&2 + echo "Warning: 'make modules_install' requires $DEPMOD. Please install it." >&2 echo "This is probably in the kmod package." >&2 - exit 1 + exit 0 fi # older versions of depmod don't support -P diff --git a/scripts/gcc-plugins/gcc-common.h b/scripts/gcc-plugins/gcc-common.h index ffd1dfaa1cc10b7212e7dade49c2b310a0dbd4a2..f467500533776c7394e83f4ae4fa47fb76521394 100644 --- a/scripts/gcc-plugins/gcc-common.h +++ b/scripts/gcc-plugins/gcc-common.h @@ -97,6 +97,10 @@ #include "predict.h" #include "ipa-utils.h" +#if BUILDING_GCC_VERSION >= 8000 +#include "stringpool.h" +#endif + #if BUILDING_GCC_VERSION >= 4009 #include "attribs.h" #include "varasm.h" diff --git a/scripts/gcc-plugins/latent_entropy_plugin.c b/scripts/gcc-plugins/latent_entropy_plugin.c index 65264960910d36878c5b8be4213638e09d3af728..cbe1d6c4b1a51757a7cb7bced388bcdf57e308fa 100644 --- a/scripts/gcc-plugins/latent_entropy_plugin.c +++ b/scripts/gcc-plugins/latent_entropy_plugin.c @@ -255,21 +255,14 @@ static tree handle_latent_entropy_attribute(tree *node, tree name, return NULL_TREE; } -static struct attribute_spec latent_entropy_attr = { - .name = "latent_entropy", - .min_length = 0, - .max_length = 0, - .decl_required = true, - .type_required = false, - .function_type_required = false, - .handler = handle_latent_entropy_attribute, -#if BUILDING_GCC_VERSION >= 4007 - .affects_type_identity = false -#endif -}; +static struct attribute_spec latent_entropy_attr = { }; static void register_attributes(void *event_data __unused, void *data __unused) { + latent_entropy_attr.name = "latent_entropy"; + latent_entropy_attr.decl_required = true; + latent_entropy_attr.handler = handle_latent_entropy_attribute; + register_attribute(&latent_entropy_attr); } diff --git a/scripts/gcc-plugins/randomize_layout_plugin.c b/scripts/gcc-plugins/randomize_layout_plugin.c index 0073af326449864b4da5f7a64677c0de2d5eafa2..c4a345c3715b63ba48a2dc37811dd38fbb7c615a 100644 --- a/scripts/gcc-plugins/randomize_layout_plugin.c +++ b/scripts/gcc-plugins/randomize_layout_plugin.c @@ -580,68 +580,35 @@ static void finish_type(void *event_data, void *data) return; } -static struct attribute_spec randomize_layout_attr = { - .name = "randomize_layout", - // related to args - .min_length = 0, - .max_length = 0, - .decl_required = false, - // need type declaration - .type_required = true, - .function_type_required = false, - .handler = handle_randomize_layout_attr, -#if BUILDING_GCC_VERSION >= 4007 - .affects_type_identity = true -#endif -}; +static struct attribute_spec randomize_layout_attr = { }; +static struct attribute_spec no_randomize_layout_attr = { }; +static struct attribute_spec randomize_considered_attr = { }; +static struct attribute_spec randomize_performed_attr = { }; -static struct attribute_spec no_randomize_layout_attr = { - .name = "no_randomize_layout", - // related to args - .min_length = 0, - .max_length = 0, - .decl_required = false, - // need type declaration - .type_required = true, - .function_type_required = false, - .handler = handle_randomize_layout_attr, +static void register_attributes(void *event_data, void *data) +{ + randomize_layout_attr.name = "randomize_layout"; + randomize_layout_attr.type_required = true; + randomize_layout_attr.handler = handle_randomize_layout_attr; #if BUILDING_GCC_VERSION >= 4007 - .affects_type_identity = true + randomize_layout_attr.affects_type_identity = true; #endif -}; -static struct attribute_spec randomize_considered_attr = { - .name = "randomize_considered", - // related to args - .min_length = 0, - .max_length = 0, - .decl_required = false, - // need type declaration - .type_required = true, - .function_type_required = false, - .handler = handle_randomize_considered_attr, + no_randomize_layout_attr.name = "no_randomize_layout"; + no_randomize_layout_attr.type_required = true; + no_randomize_layout_attr.handler = handle_randomize_layout_attr; #if BUILDING_GCC_VERSION >= 4007 - .affects_type_identity = false + no_randomize_layout_attr.affects_type_identity = true; #endif -}; -static struct attribute_spec randomize_performed_attr = { - .name = "randomize_performed", - // related to args - .min_length = 0, - .max_length = 0, - .decl_required = false, - // need type declaration - .type_required = true, - .function_type_required = false, - .handler = handle_randomize_performed_attr, -#if BUILDING_GCC_VERSION >= 4007 - .affects_type_identity = false -#endif -}; + randomize_considered_attr.name = "randomize_considered"; + randomize_considered_attr.type_required = true; + randomize_considered_attr.handler = handle_randomize_considered_attr; + + randomize_performed_attr.name = "randomize_performed"; + randomize_performed_attr.type_required = true; + randomize_performed_attr.handler = handle_randomize_performed_attr; -static void register_attributes(void *event_data, void *data) -{ register_attribute(&randomize_layout_attr); register_attribute(&no_randomize_layout_attr); register_attribute(&randomize_considered_attr); diff --git a/scripts/gcc-plugins/structleak_plugin.c b/scripts/gcc-plugins/structleak_plugin.c index 3f8dd486817814c5d96bdfbd1753a909562c6829..10292f791e992a058f3fbc38410d58a2dfa39307 100644 --- a/scripts/gcc-plugins/structleak_plugin.c +++ b/scripts/gcc-plugins/structleak_plugin.c @@ -57,21 +57,16 @@ static tree handle_user_attribute(tree *node, tree name, tree args, int flags, b return NULL_TREE; } -static struct attribute_spec user_attr = { - .name = "user", - .min_length = 0, - .max_length = 0, - .decl_required = false, - .type_required = false, - .function_type_required = false, - .handler = handle_user_attribute, -#if BUILDING_GCC_VERSION >= 4007 - .affects_type_identity = true -#endif -}; +static struct attribute_spec user_attr = { }; static void register_attributes(void *event_data, void *data) { + user_attr.name = "user"; + user_attr.handler = handle_user_attribute; +#if BUILDING_GCC_VERSION >= 4007 + user_attr.affects_type_identity = true; +#endif + register_attribute(&user_attr); } diff --git a/scripts/kconfig/zconf.y b/scripts/kconfig/zconf.y index 20d9caa4be990a69bd060e0e514421c9af041041..126e3f2e1ed74be7ed364f0441b021bb743c8dcc 100644 --- a/scripts/kconfig/zconf.y +++ b/scripts/kconfig/zconf.y @@ -31,7 +31,7 @@ struct symbol *symbol_hash[SYMBOL_HASHSIZE]; static struct menu *current_menu, *current_entry; %} -%expect 32 +%expect 31 %union { @@ -345,7 +345,7 @@ choice_block: /* if entry */ -if_entry: T_IF expr nl +if_entry: T_IF expr T_EOL { printd(DEBUG_PARSE, "%s:%d:if\n", zconf_curname(), zconf_lineno()); menu_add_entry(NULL); diff --git a/scripts/mod/modpost.c b/scripts/mod/modpost.c index 54deaa1066cf04a5df710695ed6fa36a7036f9ef..957f6041dd79ea2c9d0bf4cc48a665ff4b98db9a 100644 --- a/scripts/mod/modpost.c +++ b/scripts/mod/modpost.c @@ -677,7 +677,7 @@ static void handle_modversions(struct module *mod, struct elf_info *info, if (ELF_ST_TYPE(sym->st_info) == STT_SPARC_REGISTER) break; if (symname[0] == '.') { - char *munged = strdup(symname); + char *munged = NOFAIL(strdup(symname)); munged[0] = '_'; munged[1] = toupper(munged[1]); symname = munged; @@ -1329,7 +1329,7 @@ static Elf_Sym *find_elf_symbol2(struct elf_info *elf, Elf_Addr addr, static char *sec2annotation(const char *s) { if (match(s, init_exit_sections)) { - char *p = malloc(20); + char *p = NOFAIL(malloc(20)); char *r = p; *p++ = '_'; @@ -1349,7 +1349,7 @@ static char *sec2annotation(const char *s) strcat(p, " "); return r; } else { - return strdup(""); + return NOFAIL(strdup("")); } } @@ -2050,7 +2050,7 @@ void buf_write(struct buffer *buf, const char *s, int len) { if (buf->size - buf->pos < len) { buf->size += len + SZ; - buf->p = realloc(buf->p, buf->size); + buf->p = NOFAIL(realloc(buf->p, buf->size)); } strncpy(buf->p + buf->pos, s, len); buf->pos += len; diff --git a/security/commoncap.c b/security/commoncap.c index 1c1f64582bb5f78c359280fe09d065fb98e34348..ae26ef00698838a336e04d33acbb26709b236555 100644 --- a/security/commoncap.c +++ b/security/commoncap.c @@ -388,7 +388,7 @@ int cap_inode_getsecurity(struct inode *inode, const char *name, void **buffer, if (strcmp(name, "capability") != 0) return -EOPNOTSUPP; - dentry = d_find_alias(inode); + dentry = d_find_any_alias(inode); if (!dentry) return -EINVAL; diff --git a/security/integrity/evm/evm_crypto.c b/security/integrity/evm/evm_crypto.c index 1d32cd20009a3bd35cf77bc11027354c00eb8812..ee9c3de5065abcd6706d1dff965ebbca0048949c 100644 --- a/security/integrity/evm/evm_crypto.c +++ b/security/integrity/evm/evm_crypto.c @@ -94,7 +94,8 @@ static struct shash_desc *init_desc(char type) mutex_lock(&mutex); if (*tfm) goto out; - *tfm = crypto_alloc_shash(algo, 0, CRYPTO_ALG_ASYNC); + *tfm = crypto_alloc_shash(algo, 0, + CRYPTO_ALG_ASYNC | CRYPTO_NOLOAD); if (IS_ERR(*tfm)) { rc = PTR_ERR(*tfm); pr_err("Can not allocate %s (reason: %ld)\n", algo, rc); diff --git a/security/security.c b/security/security.c index 4bf0f571b4ef94df1d3c44b7fed6b7b651c1924f..95a1a0f52880c30461869c32e5aae3399fc3abf6 100644 --- a/security/security.c +++ b/security/security.c @@ -111,6 +111,8 @@ static int lsm_append(char *new, char **result) if (*result == NULL) { *result = kstrdup(new, GFP_KERNEL); + if (*result == NULL) + return -ENOMEM; } else { /* Check if it is the last registered name */ if (match_last_lsm(*result, new)) diff --git a/security/smack/smack_lsm.c b/security/smack/smack_lsm.c index 286171a16ed255e20c396fca595600488c059c2d..c8fd5c10b7c677350eb23992fb9bd050b60d8391 100644 --- a/security/smack/smack_lsm.c +++ b/security/smack/smack_lsm.c @@ -2281,6 +2281,7 @@ static void smack_task_to_inode(struct task_struct *p, struct inode *inode) struct smack_known *skp = smk_of_task_struct(p); isp->smk_inode = skp; + isp->smk_flags |= SMK_INODE_INSTANT; } /* @@ -3959,15 +3960,19 @@ static int smack_socket_sock_rcv_skb(struct sock *sk, struct sk_buff *skb) struct smack_known *skp = NULL; int rc = 0; struct smk_audit_info ad; + u16 family = sk->sk_family; #ifdef CONFIG_AUDIT struct lsm_network_audit net; #endif #if IS_ENABLED(CONFIG_IPV6) struct sockaddr_in6 sadd; int proto; + + if (family == PF_INET6 && skb->protocol == htons(ETH_P_IP)) + family = PF_INET; #endif /* CONFIG_IPV6 */ - switch (sk->sk_family) { + switch (family) { case PF_INET: #ifdef CONFIG_SECURITY_SMACK_NETFILTER /* @@ -3985,7 +3990,7 @@ static int smack_socket_sock_rcv_skb(struct sock *sk, struct sk_buff *skb) */ netlbl_secattr_init(&secattr); - rc = netlbl_skbuff_getattr(skb, sk->sk_family, &secattr); + rc = netlbl_skbuff_getattr(skb, family, &secattr); if (rc == 0) skp = smack_from_secattr(&secattr, ssp); else @@ -3998,7 +4003,7 @@ access_check: #endif #ifdef CONFIG_AUDIT smk_ad_init_net(&ad, __func__, LSM_AUDIT_DATA_NET, &net); - ad.a.u.net->family = sk->sk_family; + ad.a.u.net->family = family; ad.a.u.net->netif = skb->skb_iif; ipv4_skb_to_auditdata(skb, &ad.a, NULL); #endif @@ -4012,7 +4017,7 @@ access_check: rc = smk_bu_note("IPv4 delivery", skp, ssp->smk_in, MAY_WRITE, rc); if (rc != 0) - netlbl_skbuff_err(skb, sk->sk_family, rc, 0); + netlbl_skbuff_err(skb, family, rc, 0); break; #if IS_ENABLED(CONFIG_IPV6) case PF_INET6: @@ -4028,7 +4033,7 @@ access_check: skp = smack_net_ambient; #ifdef CONFIG_AUDIT smk_ad_init_net(&ad, __func__, LSM_AUDIT_DATA_NET, &net); - ad.a.u.net->family = sk->sk_family; + ad.a.u.net->family = family; ad.a.u.net->netif = skb->skb_iif; ipv6_skb_to_auditdata(skb, &ad.a, NULL); #endif /* CONFIG_AUDIT */ diff --git a/sound/aoa/core/gpio-feature.c b/sound/aoa/core/gpio-feature.c index 71960089e207b1032f462bca61512bf93eafbc6a..65557421fe0bad1c02d7975c545828d7e5bb4ec4 100644 --- a/sound/aoa/core/gpio-feature.c +++ b/sound/aoa/core/gpio-feature.c @@ -88,8 +88,10 @@ static struct device_node *get_gpio(char *name, } reg = of_get_property(np, "reg", NULL); - if (!reg) + if (!reg) { + of_node_put(np); return NULL; + } *gpioptr = *reg; diff --git a/sound/core/pcm_lib.c b/sound/core/pcm_lib.c index faa67861cbc17e1ff7a9e409f76c717179d22370..729a85a6211d6eb649a796917f45131ed2f0c4cd 100644 --- a/sound/core/pcm_lib.c +++ b/sound/core/pcm_lib.c @@ -629,27 +629,33 @@ EXPORT_SYMBOL(snd_interval_refine); static int snd_interval_refine_first(struct snd_interval *i) { + const unsigned int last_max = i->max; + if (snd_BUG_ON(snd_interval_empty(i))) return -EINVAL; if (snd_interval_single(i)) return 0; i->max = i->min; - i->openmax = i->openmin; - if (i->openmax) + if (i->openmin) i->max++; + /* only exclude max value if also excluded before refine */ + i->openmax = (i->openmax && i->max >= last_max); return 1; } static int snd_interval_refine_last(struct snd_interval *i) { + const unsigned int last_min = i->min; + if (snd_BUG_ON(snd_interval_empty(i))) return -EINVAL; if (snd_interval_single(i)) return 0; i->min = i->max; - i->openmin = i->openmax; - if (i->openmin) + if (i->openmax) i->min--; + /* only exclude min value if also excluded before refine */ + i->openmin = (i->openmin && i->min <= last_min); return 1; } diff --git a/sound/core/seq/seq_clientmgr.c b/sound/core/seq/seq_clientmgr.c index a4c571cb3b87de4fea7728dac3ace68bea83356b..350c33ec82b3f06a481a36d97e3a22e111e85e8c 100644 --- a/sound/core/seq/seq_clientmgr.c +++ b/sound/core/seq/seq_clientmgr.c @@ -2001,7 +2001,8 @@ static int snd_seq_ioctl_query_next_client(struct snd_seq_client *client, struct snd_seq_client *cptr = NULL; /* search for next client */ - info->client++; + if (info->client < INT_MAX) + info->client++; if (info->client < 0) info->client = 0; for (; info->client < SNDRV_SEQ_MAX_CLIENTS; info->client++) { diff --git a/sound/firewire/bebob/bebob.c b/sound/firewire/bebob/bebob.c index 730ea91d9be886d9a16ff903ecc91f322c711e50..93676354f87f4e5c23d897c3d8cee568b6f69ac6 100644 --- a/sound/firewire/bebob/bebob.c +++ b/sound/firewire/bebob/bebob.c @@ -263,6 +263,8 @@ do_registration(struct work_struct *work) error: mutex_unlock(&devices_mutex); snd_bebob_stream_destroy_duplex(bebob); + kfree(bebob->maudio_special_quirk); + bebob->maudio_special_quirk = NULL; snd_card_free(bebob->card); dev_info(&bebob->unit->device, "Sound card registration failed: %d\n", err); diff --git a/sound/firewire/bebob/bebob_maudio.c b/sound/firewire/bebob/bebob_maudio.c index bd55620c6a479315f6787eb26de2c73bf4913eb6..c266997ad299d93c71632bebba2c83db6cf9bc09 100644 --- a/sound/firewire/bebob/bebob_maudio.c +++ b/sound/firewire/bebob/bebob_maudio.c @@ -96,17 +96,13 @@ int snd_bebob_maudio_load_firmware(struct fw_unit *unit) struct fw_device *device = fw_parent_device(unit); int err, rcode; u64 date; - __le32 cues[3] = { - cpu_to_le32(MAUDIO_BOOTLOADER_CUE1), - cpu_to_le32(MAUDIO_BOOTLOADER_CUE2), - cpu_to_le32(MAUDIO_BOOTLOADER_CUE3) - }; + __le32 *cues; /* check date of software used to build */ err = snd_bebob_read_block(unit, INFO_OFFSET_SW_DATE, &date, sizeof(u64)); if (err < 0) - goto end; + return err; /* * firmware version 5058 or later has date later than "20070401", but * 'date' is not null-terminated. @@ -114,20 +110,28 @@ int snd_bebob_maudio_load_firmware(struct fw_unit *unit) if (date < 0x3230303730343031LL) { dev_err(&unit->device, "Use firmware version 5058 or later\n"); - err = -ENOSYS; - goto end; + return -ENXIO; } + cues = kmalloc_array(3, sizeof(*cues), GFP_KERNEL); + if (!cues) + return -ENOMEM; + + cues[0] = cpu_to_le32(MAUDIO_BOOTLOADER_CUE1); + cues[1] = cpu_to_le32(MAUDIO_BOOTLOADER_CUE2); + cues[2] = cpu_to_le32(MAUDIO_BOOTLOADER_CUE3); + rcode = fw_run_transaction(device->card, TCODE_WRITE_BLOCK_REQUEST, device->node_id, device->generation, device->max_speed, BEBOB_ADDR_REG_REQ, - cues, sizeof(cues)); + cues, 3 * sizeof(*cues)); + kfree(cues); if (rcode != RCODE_COMPLETE) { dev_err(&unit->device, "Failed to send a cue to load firmware\n"); err = -EIO; } -end: + return err; } @@ -290,10 +294,6 @@ snd_bebob_maudio_special_discover(struct snd_bebob *bebob, bool is1814) bebob->midi_output_ports = 2; } end: - if (err < 0) { - kfree(params); - bebob->maudio_special_quirk = NULL; - } mutex_unlock(&bebob->mutex); return err; } diff --git a/sound/firewire/digi00x/digi00x.c b/sound/firewire/digi00x/digi00x.c index 1f5e1d23f31a7132a5dfb5f3f34468b1517a89d0..ef689997d6a5b55f3d273ab4f8125fc87796cdd9 100644 --- a/sound/firewire/digi00x/digi00x.c +++ b/sound/firewire/digi00x/digi00x.c @@ -49,6 +49,7 @@ static void dg00x_free(struct snd_dg00x *dg00x) fw_unit_put(dg00x->unit); mutex_destroy(&dg00x->mutex); + kfree(dg00x); } static void dg00x_card_free(struct snd_card *card) diff --git a/sound/firewire/fireface/ff-protocol-ff400.c b/sound/firewire/fireface/ff-protocol-ff400.c index 12aa15df435d1cca2bf6f3cf42b60704b2340004..9f5036442ab964325c428b4ea978e67e4eadcec9 100644 --- a/sound/firewire/fireface/ff-protocol-ff400.c +++ b/sound/firewire/fireface/ff-protocol-ff400.c @@ -146,6 +146,7 @@ static int ff400_switch_fetching_mode(struct snd_ff *ff, bool enable) { __le32 *reg; int i; + int err; reg = kzalloc(sizeof(__le32) * 18, GFP_KERNEL); if (reg == NULL) @@ -163,9 +164,11 @@ static int ff400_switch_fetching_mode(struct snd_ff *ff, bool enable) reg[i] = cpu_to_le32(0x00000001); } - return snd_fw_transaction(ff->unit, TCODE_WRITE_BLOCK_REQUEST, - FF400_FETCH_PCM_FRAMES, reg, - sizeof(__le32) * 18, 0); + err = snd_fw_transaction(ff->unit, TCODE_WRITE_BLOCK_REQUEST, + FF400_FETCH_PCM_FRAMES, reg, + sizeof(__le32) * 18, 0); + kfree(reg); + return err; } static void ff400_dump_sync_status(struct snd_ff *ff, diff --git a/sound/firewire/fireworks/fireworks.c b/sound/firewire/fireworks/fireworks.c index 71a0613d3da040c49ef633f4877f98d8db219a99..f2d073365cf6365e5d3e5c841a7288a93c5b1ec5 100644 --- a/sound/firewire/fireworks/fireworks.c +++ b/sound/firewire/fireworks/fireworks.c @@ -301,6 +301,8 @@ error: snd_efw_transaction_remove_instance(efw); snd_efw_stream_destroy_duplex(efw); snd_card_free(efw->card); + kfree(efw->resp_buf); + efw->resp_buf = NULL; dev_info(&efw->unit->device, "Sound card registration failed: %d\n", err); } diff --git a/sound/firewire/oxfw/oxfw.c b/sound/firewire/oxfw/oxfw.c index 413ab6313bb66515c284734ca92c87bef014c5fd..a315d5b6b86b5af105f86ca8d91ce3bde40a9672 100644 --- a/sound/firewire/oxfw/oxfw.c +++ b/sound/firewire/oxfw/oxfw.c @@ -136,6 +136,7 @@ static void oxfw_free(struct snd_oxfw *oxfw) kfree(oxfw->spec); mutex_destroy(&oxfw->mutex); + kfree(oxfw); } /* @@ -213,6 +214,7 @@ static int detect_quirks(struct snd_oxfw *oxfw) static void do_registration(struct work_struct *work) { struct snd_oxfw *oxfw = container_of(work, struct snd_oxfw, dwork.work); + int i; int err; if (oxfw->registered) @@ -275,7 +277,15 @@ error: snd_oxfw_stream_destroy_simplex(oxfw, &oxfw->rx_stream); if (oxfw->has_output) snd_oxfw_stream_destroy_simplex(oxfw, &oxfw->tx_stream); + for (i = 0; i < SND_OXFW_STREAM_FORMAT_ENTRIES; ++i) { + kfree(oxfw->tx_stream_formats[i]); + oxfw->tx_stream_formats[i] = NULL; + kfree(oxfw->rx_stream_formats[i]); + oxfw->rx_stream_formats[i] = NULL; + } snd_card_free(oxfw->card); + kfree(oxfw->spec); + oxfw->spec = NULL; dev_info(&oxfw->unit->device, "Sound card registration failed: %d\n", err); } diff --git a/sound/firewire/tascam/tascam.c b/sound/firewire/tascam/tascam.c index 44ad41fb7374070458de78e991330a744d3f0aff..d3fdc463a884e31e15518f2780c4ca080573a9de 100644 --- a/sound/firewire/tascam/tascam.c +++ b/sound/firewire/tascam/tascam.c @@ -93,6 +93,7 @@ static void tscm_free(struct snd_tscm *tscm) fw_unit_put(tscm->unit); mutex_destroy(&tscm->mutex); + kfree(tscm); } static void tscm_card_free(struct snd_card *card) diff --git a/sound/isa/msnd/msnd_pinnacle.c b/sound/isa/msnd/msnd_pinnacle.c index 45e561c425bfbdc02b89a090961b333facad898f..a3cf837c0ffdff3bd2b7aa1cdbc474be39f64487 100644 --- a/sound/isa/msnd/msnd_pinnacle.c +++ b/sound/isa/msnd/msnd_pinnacle.c @@ -82,10 +82,10 @@ static void set_default_audio_parameters(struct snd_msnd *chip) { - chip->play_sample_size = DEFSAMPLESIZE; + chip->play_sample_size = snd_pcm_format_width(DEFSAMPLESIZE); chip->play_sample_rate = DEFSAMPLERATE; chip->play_channels = DEFCHANNELS; - chip->capture_sample_size = DEFSAMPLESIZE; + chip->capture_sample_size = snd_pcm_format_width(DEFSAMPLESIZE); chip->capture_sample_rate = DEFSAMPLERATE; chip->capture_channels = DEFCHANNELS; } diff --git a/sound/pci/emu10k1/emufx.c b/sound/pci/emu10k1/emufx.c index a2b56b188be4d90d9c51547e5fd8a7013700b14f..d68bb40d367602e4e31eefaf1c64f85cd23db378 100644 --- a/sound/pci/emu10k1/emufx.c +++ b/sound/pci/emu10k1/emufx.c @@ -2547,7 +2547,7 @@ static int snd_emu10k1_fx8010_ioctl(struct snd_hwdep * hw, struct file *file, un emu->support_tlv = 1; return put_user(SNDRV_EMU10K1_VERSION, (int __user *)argp); case SNDRV_EMU10K1_IOCTL_INFO: - info = kmalloc(sizeof(*info), GFP_KERNEL); + info = kzalloc(sizeof(*info), GFP_KERNEL); if (!info) return -ENOMEM; snd_emu10k1_fx8010_info(emu, info); diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c index 417abbb1f72caf1971da89a93be77253ad81e438..8a027973f2adc415d2d88aae2d7c3dc5b952c1fe 100644 --- a/sound/pci/hda/hda_codec.c +++ b/sound/pci/hda/hda_codec.c @@ -3923,7 +3923,8 @@ void snd_hda_bus_reset_codecs(struct hda_bus *bus) list_for_each_codec(codec, bus) { /* FIXME: maybe a better way needed for forced reset */ - cancel_delayed_work_sync(&codec->jackpoll_work); + if (current_work() != &codec->jackpoll_work.work) + cancel_delayed_work_sync(&codec->jackpoll_work); #ifdef CONFIG_PM if (hda_codec_is_power_on(codec)) { hda_call_codec_suspend(codec); diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c index 22c13ad6a9ae4afdf074f9194332dea1c4e4d432..873d9824fbcff07c93359ea7670d687a197a0669 100644 --- a/sound/pci/hda/hda_intel.c +++ b/sound/pci/hda/hda_intel.c @@ -2510,7 +2510,8 @@ static const struct pci_device_id azx_ids[] = { .driver_data = AZX_DRIVER_GENERIC | AZX_DCAPS_PRESET_ATI_SB }, /* AMD Raven */ { PCI_DEVICE(0x1022, 0x15e3), - .driver_data = AZX_DRIVER_GENERIC | AZX_DCAPS_PRESET_ATI_SB }, + .driver_data = AZX_DRIVER_GENERIC | AZX_DCAPS_PRESET_ATI_SB | + AZX_DCAPS_PM_RUNTIME }, /* ATI HDMI */ { PCI_DEVICE(0x1002, 0x0002), .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS }, diff --git a/sound/soc/codecs/cs4265.c b/sound/soc/codecs/cs4265.c index fd966bb851cbcb0c2dcd8b5a3713c26b5e6ec7c8..2c11ea1c1541ca1b783286511722b8c6f8a6499e 100644 --- a/sound/soc/codecs/cs4265.c +++ b/sound/soc/codecs/cs4265.c @@ -154,11 +154,12 @@ static const struct snd_kcontrol_new cs4265_snd_controls[] = { SOC_SINGLE("E to F Buffer Disable Switch", CS4265_SPDIF_CTL1, 6, 1, 0), SOC_ENUM("C Data Access", cam_mode_enum), + SOC_SINGLE("SPDIF Switch", CS4265_SPDIF_CTL2, 5, 1, 1), SOC_SINGLE("Validity Bit Control Switch", CS4265_SPDIF_CTL2, 3, 1, 0), SOC_ENUM("SPDIF Mono/Stereo", spdif_mono_stereo_enum), - SOC_SINGLE("MMTLR Data Switch", 0, - 1, 1, 0), + SOC_SINGLE("MMTLR Data Switch", CS4265_SPDIF_CTL2, + 0, 1, 0), SOC_ENUM("Mono Channel Select", spdif_mono_select_enum), SND_SOC_BYTES("C Data Buffer", CS4265_C_DATA_BUFF, 24), }; @@ -221,10 +222,11 @@ static const struct snd_soc_dapm_route cs4265_audio_map[] = { {"LINEOUTR", NULL, "DAC"}, {"SPDIFOUT", NULL, "SPDIF"}, + {"Pre-amp MIC", NULL, "MICL"}, + {"Pre-amp MIC", NULL, "MICR"}, + {"ADC Mux", "MIC", "Pre-amp MIC"}, {"ADC Mux", "LINEIN", "LINEINL"}, {"ADC Mux", "LINEIN", "LINEINR"}, - {"ADC Mux", "MIC", "MICL"}, - {"ADC Mux", "MIC", "MICR"}, {"ADC", NULL, "ADC Mux"}, {"DOUT", NULL, "ADC"}, {"DAI1 Capture", NULL, "DOUT"}, @@ -496,7 +498,8 @@ static int cs4265_set_bias_level(struct snd_soc_codec *codec, SNDRV_PCM_RATE_176400 | SNDRV_PCM_RATE_192000) #define CS4265_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_U16_LE | \ - SNDRV_PCM_FMTBIT_S24_LE | SNDRV_PCM_FMTBIT_U24_LE) + SNDRV_PCM_FMTBIT_S24_LE | SNDRV_PCM_FMTBIT_U24_LE | \ + SNDRV_PCM_FMTBIT_S32_LE | SNDRV_PCM_FMTBIT_U32_LE) static const struct snd_soc_dai_ops cs4265_ops = { .hw_params = cs4265_pcm_hw_params, diff --git a/sound/soc/codecs/rt5514.c b/sound/soc/codecs/rt5514.c index e52e68b562382cd83a2723dcc2277f526e51cbc1..1bfc8db1826a198f14ae3fc020d4d2e018ea213d 100644 --- a/sound/soc/codecs/rt5514.c +++ b/sound/soc/codecs/rt5514.c @@ -64,8 +64,8 @@ static const struct reg_sequence rt5514_patch[] = { {RT5514_ANA_CTRL_LDO10, 0x00028604}, {RT5514_ANA_CTRL_ADCFED, 0x00000800}, {RT5514_ASRC_IN_CTRL1, 0x00000003}, - {RT5514_DOWNFILTER0_CTRL3, 0x10000362}, - {RT5514_DOWNFILTER1_CTRL3, 0x10000362}, + {RT5514_DOWNFILTER0_CTRL3, 0x10000352}, + {RT5514_DOWNFILTER1_CTRL3, 0x10000352}, }; static const struct reg_default rt5514_reg[] = { @@ -92,10 +92,10 @@ static const struct reg_default rt5514_reg[] = { {RT5514_ASRC_IN_CTRL1, 0x00000003}, {RT5514_DOWNFILTER0_CTRL1, 0x00020c2f}, {RT5514_DOWNFILTER0_CTRL2, 0x00020c2f}, - {RT5514_DOWNFILTER0_CTRL3, 0x10000362}, + {RT5514_DOWNFILTER0_CTRL3, 0x10000352}, {RT5514_DOWNFILTER1_CTRL1, 0x00020c2f}, {RT5514_DOWNFILTER1_CTRL2, 0x00020c2f}, - {RT5514_DOWNFILTER1_CTRL3, 0x10000362}, + {RT5514_DOWNFILTER1_CTRL3, 0x10000352}, {RT5514_ANA_CTRL_LDO10, 0x00028604}, {RT5514_ANA_CTRL_LDO18_16, 0x02000345}, {RT5514_ANA_CTRL_ADC12, 0x0000a2a8}, diff --git a/sound/soc/codecs/rt5677.c b/sound/soc/codecs/rt5677.c index 0791fec398fb63a67c88d206b28536b6e5571079..1cd20b88a3a9c600637e1c90a3e0dec1a922edb2 100644 --- a/sound/soc/codecs/rt5677.c +++ b/sound/soc/codecs/rt5677.c @@ -5017,7 +5017,7 @@ static const struct i2c_device_id rt5677_i2c_id[] = { MODULE_DEVICE_TABLE(i2c, rt5677_i2c_id); static const struct of_device_id rt5677_of_match[] = { - { .compatible = "realtek,rt5677", RT5677 }, + { .compatible = "realtek,rt5677", .data = (const void *)RT5677 }, { } }; MODULE_DEVICE_TABLE(of, rt5677_of_match); diff --git a/sound/soc/codecs/wm8994.c b/sound/soc/codecs/wm8994.c index 3896523b71e9c924d1c62dbb0661f6945e840c4f..f289762cd676ea9f044e913d6a22fe2ec683134f 100644 --- a/sound/soc/codecs/wm8994.c +++ b/sound/soc/codecs/wm8994.c @@ -2431,6 +2431,7 @@ static int wm8994_set_dai_sysclk(struct snd_soc_dai *dai, snd_soc_update_bits(codec, WM8994_POWER_MANAGEMENT_2, WM8994_OPCLK_ENA, 0); } + break; default: return -EINVAL; diff --git a/sound/soc/samsung/i2s.c b/sound/soc/samsung/i2s.c index f058f2bdd519cda894faa89d56b320310182008f..33b4d14af2b3b51601477b5962cb8641d612bdfc 100644 --- a/sound/soc/samsung/i2s.c +++ b/sound/soc/samsung/i2s.c @@ -552,8 +552,11 @@ static int i2s_set_sysclk(struct snd_soc_dai *dai, } ret = clk_prepare_enable(i2s->op_clk); - if (ret) + if (ret) { + clk_put(i2s->op_clk); + i2s->op_clk = NULL; goto err; + } i2s->rclk_srcrate = clk_get_rate(i2s->op_clk); /* Over-ride the other's */ diff --git a/sound/soc/samsung/tm2_wm5110.c b/sound/soc/samsung/tm2_wm5110.c index 68698f3d72f96931ffc20b50510da436a2a87166..52e2fbe446d1c3a88ac0f644fe02d9f14bce361e 100644 --- a/sound/soc/samsung/tm2_wm5110.c +++ b/sound/soc/samsung/tm2_wm5110.c @@ -436,8 +436,7 @@ static int tm2_probe(struct platform_device *pdev) snd_soc_card_set_drvdata(card, priv); card->dev = dev; - priv->gpio_mic_bias = devm_gpiod_get(dev, "mic-bias", - GPIOF_OUT_INIT_LOW); + priv->gpio_mic_bias = devm_gpiod_get(dev, "mic-bias", GPIOD_OUT_HIGH); if (IS_ERR(priv->gpio_mic_bias)) { dev_err(dev, "Failed to get mic bias gpio\n"); return PTR_ERR(priv->gpio_mic_bias); diff --git a/sound/soc/sh/rcar/core.c b/sound/soc/sh/rcar/core.c index 107133297e8dd169855e3953ebcf0ca630ef336a..9896e736fa5cc728857265e5b634987fdfb57c8c 100644 --- a/sound/soc/sh/rcar/core.c +++ b/sound/soc/sh/rcar/core.c @@ -925,12 +925,23 @@ static void rsnd_soc_dai_shutdown(struct snd_pcm_substream *substream, rsnd_dai_call(nolock_stop, io, priv); } +static int rsnd_soc_dai_prepare(struct snd_pcm_substream *substream, + struct snd_soc_dai *dai) +{ + struct rsnd_priv *priv = rsnd_dai_to_priv(dai); + struct rsnd_dai *rdai = rsnd_dai_to_rdai(dai); + struct rsnd_dai_stream *io = rsnd_rdai_to_io(rdai, substream); + + return rsnd_dai_call(prepare, io, priv); +} + static const struct snd_soc_dai_ops rsnd_soc_dai_ops = { .startup = rsnd_soc_dai_startup, .shutdown = rsnd_soc_dai_shutdown, .trigger = rsnd_soc_dai_trigger, .set_fmt = rsnd_soc_dai_set_fmt, .set_tdm_slot = rsnd_soc_set_dai_tdm_slot, + .prepare = rsnd_soc_dai_prepare, }; void rsnd_parse_connect_common(struct rsnd_dai *rdai, diff --git a/sound/soc/sh/rcar/rsnd.h b/sound/soc/sh/rcar/rsnd.h index c5de71f2dc8c429b9ed3aabab509ded1651b92f9..1768a0ae469d02bc41abf1480fcad2cadb110b08 100644 --- a/sound/soc/sh/rcar/rsnd.h +++ b/sound/soc/sh/rcar/rsnd.h @@ -283,6 +283,9 @@ struct rsnd_mod_ops { int (*nolock_stop)(struct rsnd_mod *mod, struct rsnd_dai_stream *io, struct rsnd_priv *priv); + int (*prepare)(struct rsnd_mod *mod, + struct rsnd_dai_stream *io, + struct rsnd_priv *priv); }; struct rsnd_dai_stream; @@ -312,6 +315,7 @@ struct rsnd_mod { * H 0: fallback * H 0: hw_params * H 0: pointer + * H 0: prepare */ #define __rsnd_mod_shift_nolock_start 0 #define __rsnd_mod_shift_nolock_stop 0 @@ -326,6 +330,7 @@ struct rsnd_mod { #define __rsnd_mod_shift_fallback 28 /* always called */ #define __rsnd_mod_shift_hw_params 28 /* always called */ #define __rsnd_mod_shift_pointer 28 /* always called */ +#define __rsnd_mod_shift_prepare 28 /* always called */ #define __rsnd_mod_add_probe 0 #define __rsnd_mod_add_remove 0 @@ -340,6 +345,7 @@ struct rsnd_mod { #define __rsnd_mod_add_fallback 0 #define __rsnd_mod_add_hw_params 0 #define __rsnd_mod_add_pointer 0 +#define __rsnd_mod_add_prepare 0 #define __rsnd_mod_call_probe 0 #define __rsnd_mod_call_remove 0 @@ -354,6 +360,7 @@ struct rsnd_mod { #define __rsnd_mod_call_pointer 0 #define __rsnd_mod_call_nolock_start 0 #define __rsnd_mod_call_nolock_stop 1 +#define __rsnd_mod_call_prepare 0 #define rsnd_mod_to_priv(mod) ((mod)->priv) #define rsnd_mod_id(mod) ((mod) ? (mod)->id : -1) diff --git a/sound/soc/sh/rcar/ssi.c b/sound/soc/sh/rcar/ssi.c index f0fb85fda42d8c27db81ef6e85990a85ad6eea78..34223c8c28a871a995ff863ddb4a5d1921173470 100644 --- a/sound/soc/sh/rcar/ssi.c +++ b/sound/soc/sh/rcar/ssi.c @@ -280,7 +280,7 @@ static int rsnd_ssi_master_clk_start(struct rsnd_mod *mod, if (rsnd_ssi_is_multi_slave(mod, io)) return 0; - if (ssi->usrcnt > 1) { + if (ssi->rate) { if (ssi->rate != rate) { dev_err(dev, "SSI parent/child should use same rate\n"); return -EINVAL; @@ -482,7 +482,6 @@ static int rsnd_ssi_init(struct rsnd_mod *mod, struct rsnd_priv *priv) { struct rsnd_ssi *ssi = rsnd_mod_to_ssi(mod); - int ret; if (!rsnd_ssi_is_run_mods(mod, io)) return 0; @@ -493,10 +492,6 @@ static int rsnd_ssi_init(struct rsnd_mod *mod, rsnd_mod_power_on(mod); - ret = rsnd_ssi_master_clk_start(mod, io); - if (ret < 0) - return ret; - rsnd_ssi_config_init(mod, io); rsnd_ssi_register_setup(mod); @@ -847,6 +842,13 @@ static int rsnd_ssi_pointer(struct rsnd_mod *mod, return 0; } +static int rsnd_ssi_prepare(struct rsnd_mod *mod, + struct rsnd_dai_stream *io, + struct rsnd_priv *priv) +{ + return rsnd_ssi_master_clk_start(mod, io); +} + static struct rsnd_mod_ops rsnd_ssi_pio_ops = { .name = SSI_NAME, .probe = rsnd_ssi_common_probe, @@ -859,6 +861,7 @@ static struct rsnd_mod_ops rsnd_ssi_pio_ops = { .pointer= rsnd_ssi_pointer, .pcm_new = rsnd_ssi_pcm_new, .hw_params = rsnd_ssi_hw_params, + .prepare = rsnd_ssi_prepare, }; static int rsnd_ssi_dma_probe(struct rsnd_mod *mod, @@ -935,6 +938,7 @@ static struct rsnd_mod_ops rsnd_ssi_dma_ops = { .pcm_new = rsnd_ssi_pcm_new, .fallback = rsnd_ssi_fallback, .hw_params = rsnd_ssi_hw_params, + .prepare = rsnd_ssi_prepare, }; int rsnd_ssi_is_dma_mode(struct rsnd_mod *mod) diff --git a/sound/soc/sirf/sirf-usp.c b/sound/soc/sirf/sirf-usp.c index 77e7dcf969d0ce2e642f881c497d534bb7029df5..d70fcd4a1adf7428884bdf04400a5ecbb998845a 100644 --- a/sound/soc/sirf/sirf-usp.c +++ b/sound/soc/sirf/sirf-usp.c @@ -370,10 +370,9 @@ static int sirf_usp_pcm_probe(struct platform_device *pdev) platform_set_drvdata(pdev, usp); mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - base = devm_ioremap(&pdev->dev, mem_res->start, - resource_size(mem_res)); - if (base == NULL) - return -ENOMEM; + base = devm_ioremap_resource(&pdev->dev, mem_res); + if (IS_ERR(base)) + return PTR_ERR(base); usp->regmap = devm_regmap_init_mmio(&pdev->dev, base, &sirf_usp_regmap_config); if (IS_ERR(usp->regmap)) diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c index 1c9f6a0d234f0a25657a6ea139319fb87b3e42f5..53c9d7525639362db277e0f6bd90ec09d6a6e4a8 100644 --- a/sound/soc/soc-dapm.c +++ b/sound/soc/soc-dapm.c @@ -4005,6 +4005,13 @@ int snd_soc_dapm_link_dai_widgets(struct snd_soc_card *card) continue; } + /* let users know there is no DAI to link */ + if (!dai_w->priv) { + dev_dbg(card->dev, "dai widget %s has no DAI\n", + dai_w->name); + continue; + } + dai = dai_w->priv; /* ...find all widgets with the same stream and link them */ diff --git a/sound/soc/soc-pcm.c b/sound/soc/soc-pcm.c index 3d0dab8282ad7d2816f0964028522bc7e35a5c8b..6fc85199ac73774235981063fe952f30b7d96dc3 100644 --- a/sound/soc/soc-pcm.c +++ b/sound/soc/soc-pcm.c @@ -1607,6 +1607,14 @@ static u64 dpcm_runtime_base_format(struct snd_pcm_substream *substream) int i; for (i = 0; i < be->num_codecs; i++) { + /* + * Skip CODECs which don't support the current stream + * type. See soc_pcm_init_runtime_hw() for more details + */ + if (!snd_soc_dai_stream_valid(be->codec_dais[i], + stream)) + continue; + codec_dai_drv = be->codec_dais[i]->driver; if (stream == SNDRV_PCM_STREAM_PLAYBACK) codec_stream = &codec_dai_drv->playback; diff --git a/sound/soc/zte/zx-tdm.c b/sound/soc/zte/zx-tdm.c index dc955272f58b07350b042852e20209c9fc9fb08a..389272eeba9a6caad7e5d9074dff8c17a725ad82 100644 --- a/sound/soc/zte/zx-tdm.c +++ b/sound/soc/zte/zx-tdm.c @@ -144,8 +144,8 @@ static void zx_tdm_rx_dma_en(struct zx_tdm_info *tdm, bool on) #define ZX_TDM_RATES (SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000) #define ZX_TDM_FMTBIT \ - (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FORMAT_MU_LAW | \ - SNDRV_PCM_FORMAT_A_LAW) + (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_MU_LAW | \ + SNDRV_PCM_FMTBIT_A_LAW) static int zx_tdm_dai_probe(struct snd_soc_dai *dai) { diff --git a/sound/usb/quirks-table.h b/sound/usb/quirks-table.h index 69bf5cf1e91ef1576e1c91193036d2b504977129..15cbe25657037ae4d6ba427862a0500326a85d6f 100644 --- a/sound/usb/quirks-table.h +++ b/sound/usb/quirks-table.h @@ -2875,7 +2875,8 @@ YAMAHA_DEVICE(0x7010, "UB99"), */ #define AU0828_DEVICE(vid, pid, vname, pname) { \ - USB_DEVICE_VENDOR_SPEC(vid, pid), \ + .idVendor = vid, \ + .idProduct = pid, \ .match_flags = USB_DEVICE_ID_MATCH_DEVICE | \ USB_DEVICE_ID_MATCH_INT_CLASS | \ USB_DEVICE_ID_MATCH_INT_SUBCLASS, \ diff --git a/tools/build/Makefile b/tools/build/Makefile index 5eb4b5ad79cb778f0e949a07f719743d7a62d3c7..5edf65e684ab70bb65bfd0e8dc821a61b605be6f 100644 --- a/tools/build/Makefile +++ b/tools/build/Makefile @@ -43,7 +43,7 @@ $(OUTPUT)fixdep-in.o: FORCE $(Q)$(MAKE) $(build)=fixdep $(OUTPUT)fixdep: $(OUTPUT)fixdep-in.o - $(QUIET_LINK)$(HOSTCC) $(LDFLAGS) -o $@ $< + $(QUIET_LINK)$(HOSTCC) $(HOSTLDFLAGS) -o $@ $< FORCE: diff --git a/tools/hv/hv_kvp_daemon.c b/tools/hv/hv_kvp_daemon.c index 4c99c57736cefd155326293be053a7c4ffef9c6a..3965186b375a1dc372ec983f7dd561f3c6e505b0 100644 --- a/tools/hv/hv_kvp_daemon.c +++ b/tools/hv/hv_kvp_daemon.c @@ -286,7 +286,7 @@ static int kvp_key_delete(int pool, const __u8 *key, int key_size) * Found a match; just move the remaining * entries up. */ - if (i == num_records) { + if (i == (num_records - 1)) { kvp_file_info[pool].num_records--; kvp_update_file(pool); return 0; diff --git a/tools/objtool/elf.c b/tools/objtool/elf.c index 4e60e105583ee803916589ca56df0e81e12b8fb3..0d1acb704f641df7762f12f159afd70313089cc0 100644 --- a/tools/objtool/elf.c +++ b/tools/objtool/elf.c @@ -302,19 +302,34 @@ static int read_symbols(struct elf *elf) continue; sym->pfunc = sym->cfunc = sym; coldstr = strstr(sym->name, ".cold."); - if (coldstr) { - coldstr[0] = '\0'; - pfunc = find_symbol_by_name(elf, sym->name); - coldstr[0] = '.'; - - if (!pfunc) { - WARN("%s(): can't find parent function", - sym->name); - goto err; - } - - sym->pfunc = pfunc; - pfunc->cfunc = sym; + if (!coldstr) + continue; + + coldstr[0] = '\0'; + pfunc = find_symbol_by_name(elf, sym->name); + coldstr[0] = '.'; + + if (!pfunc) { + WARN("%s(): can't find parent function", + sym->name); + goto err; + } + + sym->pfunc = pfunc; + pfunc->cfunc = sym; + + /* + * Unfortunately, -fnoreorder-functions puts the child + * inside the parent. Remove the overlap so we can + * have sane assumptions. + * + * Note that pfunc->len now no longer matches + * pfunc->sym.st_size. + */ + if (sym->sec == pfunc->sec && + sym->offset >= pfunc->offset && + sym->offset + sym->len == pfunc->offset + pfunc->len) { + pfunc->len -= sym->len; } } } diff --git a/tools/perf/arch/powerpc/util/skip-callchain-idx.c b/tools/perf/arch/powerpc/util/skip-callchain-idx.c index 0c370f81e00280c6428ddfe0975584edcb9d02a7..9a53f6e9ef43e8a69473dbb6ca8d660a0236c672 100644 --- a/tools/perf/arch/powerpc/util/skip-callchain-idx.c +++ b/tools/perf/arch/powerpc/util/skip-callchain-idx.c @@ -58,9 +58,13 @@ static int check_return_reg(int ra_regno, Dwarf_Frame *frame) } /* - * Check if return address is on the stack. + * Check if return address is on the stack. If return address + * is in a register (typically R0), it is yet to be saved on + * the stack. */ - if (nops != 0 || ops != NULL) + if ((nops != 0 || ops != NULL) && + !(nops == 1 && ops[0].atom == DW_OP_regx && + ops[0].number2 == 0 && ops[0].offset == 0)) return 0; /* @@ -243,10 +247,10 @@ int arch_skip_callchain_idx(struct thread *thread, struct ip_callchain *chain) u64 ip; u64 skip_slot = -1; - if (chain->nr < 3) + if (!chain || chain->nr < 3) return skip_slot; - ip = chain->ips[2]; + ip = chain->ips[1]; thread__find_addr_location(thread, PERF_RECORD_MISC_USER, MAP__FUNCTION, ip, &al); diff --git a/tools/perf/arch/powerpc/util/sym-handling.c b/tools/perf/arch/powerpc/util/sym-handling.c index 53d83d7e6a096a4d49a0b6f0cbfafc15a27866af..20e7d74d86cd16e86c8fd60e5839222d476f6409 100644 --- a/tools/perf/arch/powerpc/util/sym-handling.c +++ b/tools/perf/arch/powerpc/util/sym-handling.c @@ -141,8 +141,10 @@ void arch__post_process_probe_trace_events(struct perf_probe_event *pev, for (i = 0; i < ntevs; i++) { tev = &pev->tevs[i]; map__for_each_symbol(map, sym, tmp) { - if (map->unmap_ip(map, sym->start) == tev->point.address) + if (map->unmap_ip(map, sym->start) == tev->point.address) { arch__fix_tev_from_maps(pev, tev, map, sym); + break; + } } } } diff --git a/tools/perf/arch/x86/util/perf_regs.c b/tools/perf/arch/x86/util/perf_regs.c index 4b2caf6d48e794d3cd5a88aaa978db10aabb3185..fead6b3b4206e409fc4042ce5d850cae2629bae9 100644 --- a/tools/perf/arch/x86/util/perf_regs.c +++ b/tools/perf/arch/x86/util/perf_regs.c @@ -226,7 +226,7 @@ int arch_sdt_arg_parse_op(char *old_op, char **new_op) else if (rm[2].rm_so != rm[2].rm_eo) prefix[0] = '+'; else - strncpy(prefix, "+0", 2); + scnprintf(prefix, sizeof(prefix), "+0"); } /* Rename register */ diff --git a/tools/perf/bench/numa.c b/tools/perf/bench/numa.c index 944070e98a2cd9dc4903e3be02278b1a7f8f3500..0afcc7eccc6191c81324e704bff811724c8e85ea 100644 --- a/tools/perf/bench/numa.c +++ b/tools/perf/bench/numa.c @@ -1098,7 +1098,7 @@ static void *worker_thread(void *__tdata) u8 *global_data; u8 *process_data; u8 *thread_data; - u64 bytes_done; + u64 bytes_done, secs; long work_done; u32 l; struct rusage rusage; @@ -1254,7 +1254,8 @@ static void *worker_thread(void *__tdata) timersub(&stop, &start0, &diff); td->runtime_ns = diff.tv_sec * NSEC_PER_SEC; td->runtime_ns += diff.tv_usec * NSEC_PER_USEC; - td->speed_gbs = bytes_done / (td->runtime_ns / NSEC_PER_SEC) / 1e9; + secs = td->runtime_ns / NSEC_PER_SEC; + td->speed_gbs = secs ? bytes_done / secs / 1e9 : 0; getrusage(RUSAGE_THREAD, &rusage); td->system_time_ns = rusage.ru_stime.tv_sec * NSEC_PER_SEC; diff --git a/tools/perf/builtin-c2c.c b/tools/perf/builtin-c2c.c index 3479a1bc7caa13a928b2db09d972d1efdda64921..fb76423022e86c7cd3fd128b8fe8468fb76c82e9 100644 --- a/tools/perf/builtin-c2c.c +++ b/tools/perf/builtin-c2c.c @@ -2229,6 +2229,9 @@ static int perf_c2c__browse_cacheline(struct hist_entry *he) " s Togle full lenght of symbol and source line columns \n" " q Return back to cacheline list \n"; + if (!he) + return 0; + /* Display compact version first. */ c2c.symbol_full = false; diff --git a/tools/perf/jvmti/jvmti_agent.c b/tools/perf/jvmti/jvmti_agent.c index cf36de7ea25587907d50db9c44e3dae5c3746c1a..c1d20d95143413a9eaaf740393fe11cbdb9a18da 100644 --- a/tools/perf/jvmti/jvmti_agent.c +++ b/tools/perf/jvmti/jvmti_agent.c @@ -35,6 +35,7 @@ #include #include /* for gettid() */ #include +#include #include "jvmti_agent.h" #include "../util/jitdump.h" @@ -249,7 +250,7 @@ void *jvmti_open(void) /* * jitdump file name */ - snprintf(dump_path, PATH_MAX, "%s/jit-%i.dump", jit_path, getpid()); + scnprintf(dump_path, PATH_MAX, "%s/jit-%i.dump", jit_path, getpid()); fd = open(dump_path, O_CREAT|O_TRUNC|O_RDWR, 0666); if (fd == -1) diff --git a/tools/perf/perf.h b/tools/perf/perf.h index 55086389fc06dceb41eabd598137c3933dbfd81e..96f62dd7e3ed3e2cd6589ac9dec70d67d274278f 100644 --- a/tools/perf/perf.h +++ b/tools/perf/perf.h @@ -24,7 +24,9 @@ static inline unsigned long long rdclock(void) return ts.tv_sec * 1000000000ULL + ts.tv_nsec; } +#ifndef MAX_NR_CPUS #define MAX_NR_CPUS 1024 +#endif extern const char *input_name; extern bool perf_host, perf_guest; diff --git a/tools/perf/tests/builtin-test.c b/tools/perf/tests/builtin-test.c index 53d06f37406a22526638b3a9e58643caca736fba..5966f1f9b160ef9e63ffe1bbcc4bfeaee7bc0481 100644 --- a/tools/perf/tests/builtin-test.c +++ b/tools/perf/tests/builtin-test.c @@ -589,7 +589,7 @@ static int __cmd_test(int argc, const char *argv[], struct intlist *skiplist) for (subi = 0; subi < subn; subi++) { pr_info("%2d.%1d: %-*s:", i, subi + 1, subw, t->subtest.get_desc(subi)); - err = test_and_print(t, skip, subi); + err = test_and_print(t, skip, subi + 1); if (err != TEST_OK && t->subtest.skip_if_fail) skip = true; } diff --git a/tools/perf/tests/topology.c b/tools/perf/tests/topology.c index 81ede20f49d7b08afdc7f3aea4722f6cd82d2bee..4e9dad8c97632f1fa6c8dd33624a5ad1ad932ccb 100644 --- a/tools/perf/tests/topology.c +++ b/tools/perf/tests/topology.c @@ -43,6 +43,7 @@ static int session_write_header(char *path) perf_header__set_feat(&session->header, HEADER_CPU_TOPOLOGY); perf_header__set_feat(&session->header, HEADER_NRCPUS); + perf_header__set_feat(&session->header, HEADER_ARCH); session->header.data_size += DATA_SIZE; diff --git a/tools/perf/util/auxtrace.c b/tools/perf/util/auxtrace.c index 5547457566a7130d25e97d012811e6ac8775ecf0..bbb9823e93b95ecf343863cf665b07f580c6e65e 100644 --- a/tools/perf/util/auxtrace.c +++ b/tools/perf/util/auxtrace.c @@ -197,6 +197,9 @@ static int auxtrace_queues__grow(struct auxtrace_queues *queues, for (i = 0; i < queues->nr_queues; i++) { list_splice_tail(&queues->queue_array[i].head, &queue_array[i].head); + queue_array[i].tid = queues->queue_array[i].tid; + queue_array[i].cpu = queues->queue_array[i].cpu; + queue_array[i].set = queues->queue_array[i].set; queue_array[i].priv = queues->queue_array[i].priv; } diff --git a/tools/perf/util/c++/clang.cpp b/tools/perf/util/c++/clang.cpp index bf31ceab33bd487d0021ccd9818384dde51fd371..89512504551b0b198a44ebe30e81d0972b86ce77 100644 --- a/tools/perf/util/c++/clang.cpp +++ b/tools/perf/util/c++/clang.cpp @@ -146,8 +146,15 @@ getBPFObjectFromModule(llvm::Module *Module) raw_svector_ostream ostream(*Buffer); legacy::PassManager PM; - if (TargetMachine->addPassesToEmitFile(PM, ostream, - TargetMachine::CGFT_ObjectFile)) { + bool NotAdded; +#if CLANG_VERSION_MAJOR < 7 + NotAdded = TargetMachine->addPassesToEmitFile(PM, ostream, + TargetMachine::CGFT_ObjectFile); +#else + NotAdded = TargetMachine->addPassesToEmitFile(PM, ostream, nullptr, + TargetMachine::CGFT_ObjectFile); +#endif + if (NotAdded) { llvm::errs() << "TargetMachine can't emit a file of this type\n"; return std::unique_ptr>(nullptr);; } diff --git a/tools/perf/util/comm.c b/tools/perf/util/comm.c index 8808570f8e9c25244bbd5886d4d2afd51ad09cf7..327e01cb1aa2f6647a36144d4c984c104520d5ab 100644 --- a/tools/perf/util/comm.c +++ b/tools/perf/util/comm.c @@ -18,9 +18,10 @@ static struct rb_root comm_str_root; static struct comm_str *comm_str__get(struct comm_str *cs) { - if (cs) - refcount_inc(&cs->refcnt); - return cs; + if (cs && refcount_inc_not_zero(&cs->refcnt)) + return cs; + + return NULL; } static void comm_str__put(struct comm_str *cs) @@ -62,9 +63,14 @@ static struct comm_str *comm_str__findnew(const char *str, struct rb_root *root) parent = *p; iter = rb_entry(parent, struct comm_str, rb_node); + /* + * If we race with comm_str__put, iter->refcnt is 0 + * and it will be removed within comm_str__put call + * shortly, ignore it in this search. + */ cmp = strcmp(str, iter->str); - if (!cmp) - return comm_str__get(iter); + if (!cmp && comm_str__get(iter)) + return iter; if (cmp < 0) p = &(*p)->rb_left; diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c index 226a9245d1db7e5437553d45578c3a61f9dc552b..2227ee92d8e21f69456b33c1673d529121fe9422 100644 --- a/tools/perf/util/evsel.c +++ b/tools/perf/util/evsel.c @@ -824,6 +824,12 @@ static void apply_config_terms(struct perf_evsel *evsel, } } +static bool is_dummy_event(struct perf_evsel *evsel) +{ + return (evsel->attr.type == PERF_TYPE_SOFTWARE) && + (evsel->attr.config == PERF_COUNT_SW_DUMMY); +} + /* * The enable_on_exec/disabled value strategy: * @@ -1054,6 +1060,14 @@ void perf_evsel__config(struct perf_evsel *evsel, struct record_opts *opts, else perf_evsel__reset_sample_bit(evsel, PERIOD); } + + /* + * For initial_delay, a dummy event is added implicitly. + * The software event will trigger -EOPNOTSUPP error out, + * if BRANCH_STACK bit is set. + */ + if (opts->initial_delay && is_dummy_event(evsel)) + perf_evsel__reset_sample_bit(evsel, BRANCH_STACK); } static int perf_evsel__alloc_fd(struct perf_evsel *evsel, int ncpus, int nthreads) diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c index ba0cea8fef724f26499f8e14c74484d8bb9601a8..1ceb332575bd12a798665538a7c00884d8901af0 100644 --- a/tools/perf/util/header.c +++ b/tools/perf/util/header.c @@ -1824,6 +1824,7 @@ static int process_cpu_topology(struct feat_fd *ff, void *data __maybe_unused) int cpu_nr = ff->ph->env.nr_cpus_avail; u64 size = 0; struct perf_header *ph = ff->ph; + bool do_core_id_test = true; ph->env.cpu = calloc(cpu_nr, sizeof(*ph->env.cpu)); if (!ph->env.cpu) @@ -1878,6 +1879,13 @@ static int process_cpu_topology(struct feat_fd *ff, void *data __maybe_unused) return 0; } + /* On s390 the socket_id number is not related to the numbers of cpus. + * The socket_id number might be higher than the numbers of cpus. + * This depends on the configuration. + */ + if (ph->env.arch && !strncmp(ph->env.arch, "s390", 4)) + do_core_id_test = false; + for (i = 0; i < (u32)cpu_nr; i++) { if (do_read_u32(ff, &nr)) goto free_cpu; @@ -1887,7 +1895,7 @@ static int process_cpu_topology(struct feat_fd *ff, void *data __maybe_unused) if (do_read_u32(ff, &nr)) goto free_cpu; - if (nr != (u32)-1 && nr > (u32)cpu_nr) { + if (do_core_id_test && nr != (u32)-1 && nr > (u32)cpu_nr) { pr_debug("socket_id number is too big." "You may need to upgrade the perf tool.\n"); goto free_cpu; @@ -2201,7 +2209,7 @@ static const struct feature_ops feat_ops[HEADER_LAST_FEATURE] = { FEAT_OPR(NUMA_TOPOLOGY, numa_topology, true), FEAT_OPN(BRANCH_STACK, branch_stack, false), FEAT_OPR(PMU_MAPPINGS, pmu_mappings, false), - FEAT_OPN(GROUP_DESC, group_desc, false), + FEAT_OPR(GROUP_DESC, group_desc, false), FEAT_OPN(AUXTRACE, auxtrace, false), FEAT_OPN(STAT, stat, false), FEAT_OPN(CACHE, cache, true), diff --git a/tools/perf/util/llvm-utils.c b/tools/perf/util/llvm-utils.c index 4952b429caa7384e8ff25d0e580e7656d20c6e6d..2bdaac048a0a983a3d0af033ca8a22470377c965 100644 --- a/tools/perf/util/llvm-utils.c +++ b/tools/perf/util/llvm-utils.c @@ -265,16 +265,16 @@ static const char *kinc_fetch_script = "#!/usr/bin/env sh\n" "if ! test -d \"$KBUILD_DIR\"\n" "then\n" -" exit -1\n" +" exit 1\n" "fi\n" "if ! test -f \"$KBUILD_DIR/include/generated/autoconf.h\"\n" "then\n" -" exit -1\n" +" exit 1\n" "fi\n" "TMPDIR=`mktemp -d`\n" "if test -z \"$TMPDIR\"\n" "then\n" -" exit -1\n" +" exit 1\n" "fi\n" "cat << EOF > $TMPDIR/Makefile\n" "obj-y := dummy.o\n" diff --git a/tools/perf/util/namespaces.c b/tools/perf/util/namespaces.c index a58e91197729a40305d071809b95d83b49bc8548..1ef0049860a8b21307645dd2bfd6216b978845f5 100644 --- a/tools/perf/util/namespaces.c +++ b/tools/perf/util/namespaces.c @@ -138,6 +138,9 @@ struct nsinfo *nsinfo__copy(struct nsinfo *nsi) { struct nsinfo *nnsi; + if (nsi == NULL) + return NULL; + nnsi = calloc(1, sizeof(*nnsi)); if (nnsi != NULL) { nnsi->pid = nsi->pid; diff --git a/tools/perf/util/parse-events.y b/tools/perf/util/parse-events.y index 988310cd3049cba330a16ebc07b67994ce3ba973..b6115cbdf84295b09a75fb07da77da9ee622e667 100644 --- a/tools/perf/util/parse-events.y +++ b/tools/perf/util/parse-events.y @@ -226,11 +226,16 @@ event_def: event_pmu | event_pmu: PE_NAME opt_pmu_config { + struct parse_events_state *parse_state = _parse_state; + struct parse_events_error *error = parse_state->error; struct list_head *list, *orig_terms, *terms; if (parse_events_copy_term_list($2, &orig_terms)) YYABORT; + if (error) + error->idx = @1.first_column; + ALLOC_LIST(list); if (parse_events_add_pmu(_parse_state, list, $1, $2)) { struct perf_pmu *pmu = NULL; diff --git a/tools/perf/util/scripting-engines/trace-event-python.c b/tools/perf/util/scripting-engines/trace-event-python.c index c7187f067d3140ab0d0c378160fa3c732a7f3e21..f03fa7a835a1d0d829edd7604144a3f8300294d8 100644 --- a/tools/perf/util/scripting-engines/trace-event-python.c +++ b/tools/perf/util/scripting-engines/trace-event-python.c @@ -643,14 +643,11 @@ static void python_process_tracepoint(struct perf_sample *sample, if (_PyTuple_Resize(&t, n) == -1) Py_FatalError("error resizing Python tuple"); - if (!dict) { + if (!dict) call_object(handler, t, handler_name); - } else { + else call_object(handler, t, default_handler_name); - Py_DECREF(dict); - } - Py_XDECREF(all_entries_dict); Py_DECREF(t); } @@ -970,7 +967,6 @@ static void python_process_general_event(struct perf_sample *sample, call_object(handler, t, handler_name); - Py_DECREF(dict); Py_DECREF(t); } diff --git a/tools/power/x86/turbostat/turbostat.c b/tools/power/x86/turbostat/turbostat.c index bd9c6b31a504df654e16a9389abbb9028cfa2bb7..1512086c8cb855af64d216cd78464d1ab0481255 100644 --- a/tools/power/x86/turbostat/turbostat.c +++ b/tools/power/x86/turbostat/turbostat.c @@ -1038,9 +1038,7 @@ void format_all_counters(struct thread_data *t, struct core_data *c, struct pkg_ if (!printed || !summary_only) print_header("\t"); - if (topo.num_cpus > 1) - format_counters(&average.threads, &average.cores, - &average.packages); + format_counters(&average.threads, &average.cores, &average.packages); printed = 1; @@ -4031,7 +4029,9 @@ void process_cpuid() family = (fms >> 8) & 0xf; model = (fms >> 4) & 0xf; stepping = fms & 0xf; - if (family == 6 || family == 0xf) + if (family == 0xf) + family += (fms >> 20) & 0xff; + if (family >= 6) model += ((fms >> 16) & 0xf) << 4; if (!quiet) { diff --git a/tools/testing/nvdimm/pmem-dax.c b/tools/testing/nvdimm/pmem-dax.c index b53596ad601bb4964231c58151308f61221868f5..2e7fd822796911c2875a1dad87dca67fe185d589 100644 --- a/tools/testing/nvdimm/pmem-dax.c +++ b/tools/testing/nvdimm/pmem-dax.c @@ -31,17 +31,21 @@ long __pmem_direct_access(struct pmem_device *pmem, pgoff_t pgoff, if (get_nfit_res(pmem->phys_addr + offset)) { struct page *page; - *kaddr = pmem->virt_addr + offset; + if (kaddr) + *kaddr = pmem->virt_addr + offset; page = vmalloc_to_page(pmem->virt_addr + offset); - *pfn = page_to_pfn_t(page); + if (pfn) + *pfn = page_to_pfn_t(page); pr_debug_ratelimited("%s: pmem: %p pgoff: %#lx pfn: %#lx\n", __func__, pmem, pgoff, page_to_pfn(page)); return 1; } - *kaddr = pmem->virt_addr + offset; - *pfn = phys_to_pfn_t(pmem->phys_addr + offset, pmem->pfn_flags); + if (kaddr) + *kaddr = pmem->virt_addr + offset; + if (pfn) + *pfn = phys_to_pfn_t(pmem->phys_addr + offset, pmem->pfn_flags); /* * If badblocks are present, limit known good range to the diff --git a/tools/testing/selftests/bpf/test_kmod.sh b/tools/testing/selftests/bpf/test_kmod.sh index ed4774d8d6ed1c792f14825628a2c486acf7df08..0f7b9aa9c6a5d445d1c46d9d564f3bce40dd86e9 100755 --- a/tools/testing/selftests/bpf/test_kmod.sh +++ b/tools/testing/selftests/bpf/test_kmod.sh @@ -1,6 +1,15 @@ #!/bin/sh # SPDX-License-Identifier: GPL-2.0 +# Kselftest framework requirement - SKIP code is 4. +ksft_skip=4 + +msg="skip all tests:" +if [ "$(id -u)" != "0" ]; then + echo $msg please run this as root >&2 + exit $ksft_skip +fi + SRC_TREE=../../../../ test_run() diff --git a/tools/testing/selftests/bpf/test_verifier.c b/tools/testing/selftests/bpf/test_verifier.c index 9167ee97631475baddbb4dc168b9b36ccdf01e04..041dbbb30ff0aa8551e615a5084d1ea64e20c0f3 100644 --- a/tools/testing/selftests/bpf/test_verifier.c +++ b/tools/testing/selftests/bpf/test_verifier.c @@ -5895,7 +5895,7 @@ static struct bpf_test tests[] = { BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_MOV64_REG(BPF_REG_0, 0), + BPF_MOV64_IMM(BPF_REG_0, 0), BPF_EXIT_INSN(), }, .fixup_map_in_map = { 3 }, @@ -5918,7 +5918,7 @@ static struct bpf_test tests[] = { BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_MOV64_REG(BPF_REG_0, 0), + BPF_MOV64_IMM(BPF_REG_0, 0), BPF_EXIT_INSN(), }, .fixup_map_in_map = { 3 }, @@ -5941,7 +5941,7 @@ static struct bpf_test tests[] = { BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_MOV64_REG(BPF_REG_0, 0), + BPF_MOV64_IMM(BPF_REG_0, 0), BPF_EXIT_INSN(), }, .fixup_map_in_map = { 3 }, diff --git a/tools/testing/selftests/ftrace/test.d/00basic/snapshot.tc b/tools/testing/selftests/ftrace/test.d/00basic/snapshot.tc new file mode 100644 index 0000000000000000000000000000000000000000..3b1f45e13a2e7f21a61bc34ac964582222e88cb8 --- /dev/null +++ b/tools/testing/selftests/ftrace/test.d/00basic/snapshot.tc @@ -0,0 +1,28 @@ +#!/bin/sh +# description: Snapshot and tracing setting +# flags: instance + +[ ! -f snapshot ] && exit_unsupported + +echo "Set tracing off" +echo 0 > tracing_on + +echo "Allocate and take a snapshot" +echo 1 > snapshot + +# Since trace buffer is empty, snapshot is also empty, but allocated +grep -q "Snapshot is allocated" snapshot + +echo "Ensure keep tracing off" +test `cat tracing_on` -eq 0 + +echo "Set tracing on" +echo 1 > tracing_on + +echo "Take a snapshot again" +echo 1 > snapshot + +echo "Ensure keep tracing on" +test `cat tracing_on` -eq 1 + +exit 0 diff --git a/tools/testing/selftests/powerpc/harness.c b/tools/testing/selftests/powerpc/harness.c index 66d31de60b9ae93ee53175b33983e3d86d67795f..9d7166dfad1eae570d2f24a48605bfa0a2f4848a 100644 --- a/tools/testing/selftests/powerpc/harness.c +++ b/tools/testing/selftests/powerpc/harness.c @@ -85,13 +85,13 @@ wait: return status; } -static void alarm_handler(int signum) +static void sig_handler(int signum) { - /* Jut wake us up from waitpid */ + /* Just wake us up from waitpid */ } -static struct sigaction alarm_action = { - .sa_handler = alarm_handler, +static struct sigaction sig_action = { + .sa_handler = sig_handler, }; void test_harness_set_timeout(uint64_t time) @@ -106,8 +106,14 @@ int test_harness(int (test_function)(void), char *name) test_start(name); test_set_git_version(GIT_VERSION); - if (sigaction(SIGALRM, &alarm_action, NULL)) { - perror("sigaction"); + if (sigaction(SIGINT, &sig_action, NULL)) { + perror("sigaction (sigint)"); + test_error(name); + return 1; + } + + if (sigaction(SIGALRM, &sig_action, NULL)) { + perror("sigaction (sigalrm)"); test_error(name); return 1; } diff --git a/tools/testing/selftests/pstore/pstore_post_reboot_tests b/tools/testing/selftests/pstore/pstore_post_reboot_tests index 6ccb154cb4aa4f36184811d406ed9f4317647e4f..22f8df1ad7d484418235b6dadd290baca3bf3c6c 100755 --- a/tools/testing/selftests/pstore/pstore_post_reboot_tests +++ b/tools/testing/selftests/pstore/pstore_post_reboot_tests @@ -7,13 +7,16 @@ # # Released under the terms of the GPL v2. +# Kselftest framework requirement - SKIP code is 4. +ksft_skip=4 + . ./common_tests if [ -e $REBOOT_FLAG ]; then rm $REBOOT_FLAG else prlog "pstore_crash_test has not been executed yet. we skip further tests." - exit 0 + exit $ksft_skip fi prlog -n "Mounting pstore filesystem ... " diff --git a/tools/testing/selftests/static_keys/test_static_keys.sh b/tools/testing/selftests/static_keys/test_static_keys.sh index 24cff498b31aa831b388e638929f29c36db07dbd..fc9f8cde7d4223c3fd564105942d4d648acb8627 100755 --- a/tools/testing/selftests/static_keys/test_static_keys.sh +++ b/tools/testing/selftests/static_keys/test_static_keys.sh @@ -2,6 +2,19 @@ # SPDX-License-Identifier: GPL-2.0 # Runs static keys kernel module tests +# Kselftest framework requirement - SKIP code is 4. +ksft_skip=4 + +if ! /sbin/modprobe -q -n test_static_key_base; then + echo "static_key: module test_static_key_base is not found [SKIP]" + exit $ksft_skip +fi + +if ! /sbin/modprobe -q -n test_static_keys; then + echo "static_key: module test_static_keys is not found [SKIP]" + exit $ksft_skip +fi + if /sbin/modprobe -q test_static_key_base; then if /sbin/modprobe -q test_static_keys; then echo "static_key: ok" diff --git a/tools/testing/selftests/sync/config b/tools/testing/selftests/sync/config new file mode 100644 index 0000000000000000000000000000000000000000..1ab7e8130db246556499244d7318c4e6853fd5e6 --- /dev/null +++ b/tools/testing/selftests/sync/config @@ -0,0 +1,4 @@ +CONFIG_STAGING=y +CONFIG_ANDROID=y +CONFIG_SYNC=y +CONFIG_SW_SYNC=y diff --git a/tools/testing/selftests/sysctl/sysctl.sh b/tools/testing/selftests/sysctl/sysctl.sh index ec232c3cfcaac3b8f52936eabb908a0c316183f8..584eb8ea780a49220782d08e104756199fc19934 100755 --- a/tools/testing/selftests/sysctl/sysctl.sh +++ b/tools/testing/selftests/sysctl/sysctl.sh @@ -14,6 +14,9 @@ # This performs a series tests against the proc sysctl interface. +# Kselftest framework requirement - SKIP code is 4. +ksft_skip=4 + TEST_NAME="sysctl" TEST_DRIVER="test_${TEST_NAME}" TEST_DIR=$(dirname $0) @@ -41,7 +44,7 @@ test_modprobe() echo "$0: $DIR not present" >&2 echo "You must have the following enabled in your kernel:" >&2 cat $TEST_DIR/config >&2 - exit 1 + exit $ksft_skip fi } @@ -98,28 +101,30 @@ test_reqs() uid=$(id -u) if [ $uid -ne 0 ]; then echo $msg must be run as root >&2 - exit 0 + exit $ksft_skip fi if ! which perl 2> /dev/null > /dev/null; then echo "$0: You need perl installed" - exit 1 + exit $ksft_skip fi if ! which getconf 2> /dev/null > /dev/null; then echo "$0: You need getconf installed" - exit 1 + exit $ksft_skip fi if ! which diff 2> /dev/null > /dev/null; then echo "$0: You need diff installed" - exit 1 + exit $ksft_skip fi } function load_req_mod() { - trap "test_modprobe" EXIT - if [ ! -d $DIR ]; then + if ! modprobe -q -n $TEST_DRIVER; then + echo "$0: module $TEST_DRIVER not found [SKIP]" + exit $ksft_skip + fi modprobe $TEST_DRIVER if [ $? -ne 0 ]; then exit @@ -765,6 +770,7 @@ function parse_args() test_reqs allow_user_defaults check_production_sysctl_writes_strict +test_modprobe load_req_mod trap "test_finish" EXIT diff --git a/tools/testing/selftests/timers/raw_skew.c b/tools/testing/selftests/timers/raw_skew.c index ca6cd146aafe15420cb345ef3ba934dd21e13220..dcf73c5dab6e12e9074d15efae1ba9c65a139d60 100644 --- a/tools/testing/selftests/timers/raw_skew.c +++ b/tools/testing/selftests/timers/raw_skew.c @@ -134,6 +134,11 @@ int main(int argv, char **argc) printf(" %lld.%i(act)", ppm/1000, abs((int)(ppm%1000))); if (llabs(eppm - ppm) > 1000) { + if (tx1.offset || tx2.offset || + tx1.freq != tx2.freq || tx1.tick != tx2.tick) { + printf(" [SKIP]\n"); + return ksft_exit_skip("The clock was adjusted externally. Shutdown NTPd or other time sync daemons\n"); + } printf(" [FAILED]\n"); return ksft_exit_fail(); } diff --git a/tools/testing/selftests/user/test_user_copy.sh b/tools/testing/selftests/user/test_user_copy.sh index d60506fc77f8bcba61f222db0b0df05a38e2e68b..f9b31a57439b759c1813ca94ac948a998e9dca51 100755 --- a/tools/testing/selftests/user/test_user_copy.sh +++ b/tools/testing/selftests/user/test_user_copy.sh @@ -2,6 +2,13 @@ # SPDX-License-Identifier: GPL-2.0 # Runs copy_to/from_user infrastructure using test_user_copy kernel module +# Kselftest framework requirement - SKIP code is 4. +ksft_skip=4 + +if ! /sbin/modprobe -q -n test_user_copy; then + echo "user: module test_user_copy is not found [SKIP]" + exit $ksft_skip +fi if /sbin/modprobe -q test_user_copy; then /sbin/modprobe -q -r test_user_copy echo "user_copy: ok" diff --git a/tools/testing/selftests/vm/compaction_test.c b/tools/testing/selftests/vm/compaction_test.c index 1097f04e4d80e6cff93bb9e912c4c38894cd5959..bcec71250873108efdeae50eab0874b3924204c4 100644 --- a/tools/testing/selftests/vm/compaction_test.c +++ b/tools/testing/selftests/vm/compaction_test.c @@ -16,6 +16,8 @@ #include #include +#include "../kselftest.h" + #define MAP_SIZE 1048576 struct map_list { @@ -169,7 +171,7 @@ int main(int argc, char **argv) printf("Either the sysctl compact_unevictable_allowed is not\n" "set to 1 or couldn't read the proc file.\n" "Skipping the test\n"); - return 0; + return KSFT_SKIP; } lim.rlim_cur = RLIM_INFINITY; diff --git a/tools/testing/selftests/vm/mlock2-tests.c b/tools/testing/selftests/vm/mlock2-tests.c index 4997b9222cfa5055f9c07f4f1f0a1454bae89d6e..637b6d0ac0d0bf63d88ff5f5782a65453b486a7a 100644 --- a/tools/testing/selftests/vm/mlock2-tests.c +++ b/tools/testing/selftests/vm/mlock2-tests.c @@ -9,6 +9,8 @@ #include #include "mlock2.h" +#include "../kselftest.h" + struct vm_boundaries { unsigned long start; unsigned long end; @@ -303,7 +305,7 @@ static int test_mlock_lock() if (mlock2_(map, 2 * page_size, 0)) { if (errno == ENOSYS) { printf("Cannot call new mlock family, skipping test\n"); - _exit(0); + _exit(KSFT_SKIP); } perror("mlock2(0)"); goto unmap; @@ -412,7 +414,7 @@ static int test_mlock_onfault() if (mlock2_(map, 2 * page_size, MLOCK_ONFAULT)) { if (errno == ENOSYS) { printf("Cannot call new mlock family, skipping test\n"); - _exit(0); + _exit(KSFT_SKIP); } perror("mlock2(MLOCK_ONFAULT)"); goto unmap; @@ -425,7 +427,7 @@ static int test_mlock_onfault() if (munlock(map, 2 * page_size)) { if (errno == ENOSYS) { printf("Cannot call new mlock family, skipping test\n"); - _exit(0); + _exit(KSFT_SKIP); } perror("munlock()"); goto unmap; @@ -457,7 +459,7 @@ static int test_lock_onfault_of_present() if (mlock2_(map, 2 * page_size, MLOCK_ONFAULT)) { if (errno == ENOSYS) { printf("Cannot call new mlock family, skipping test\n"); - _exit(0); + _exit(KSFT_SKIP); } perror("mlock2(MLOCK_ONFAULT)"); goto unmap; @@ -583,7 +585,7 @@ static int test_vma_management(bool call_mlock) if (call_mlock && mlock2_(map, 3 * page_size, MLOCK_ONFAULT)) { if (errno == ENOSYS) { printf("Cannot call new mlock family, skipping test\n"); - _exit(0); + _exit(KSFT_SKIP); } perror("mlock(ONFAULT)\n"); goto out; diff --git a/tools/testing/selftests/vm/run_vmtests b/tools/testing/selftests/vm/run_vmtests index 45708aa3ce470bab45f81c2cc432342cdafb421d..57ab6ac0d4a4c82c44ae04009276bc0fc14a841a 100755 --- a/tools/testing/selftests/vm/run_vmtests +++ b/tools/testing/selftests/vm/run_vmtests @@ -2,6 +2,9 @@ # SPDX-License-Identifier: GPL-2.0 #please run as root +# Kselftest framework requirement - SKIP code is 4. +ksft_skip=4 + mnt=./huge exitcode=0 @@ -36,7 +39,7 @@ if [ -n "$freepgs" ] && [ -n "$hpgsize_KB" ]; then echo $(( $lackpgs + $nr_hugepgs )) > /proc/sys/vm/nr_hugepages if [ $? -ne 0 ]; then echo "Please run this test as root" - exit 1 + exit $ksft_skip fi while read name size unit; do if [ "$name" = "HugePages_Free:" ]; then diff --git a/tools/testing/selftests/vm/userfaultfd.c b/tools/testing/selftests/vm/userfaultfd.c index de2f9ec8a87fb342a7a595a13b009358d9eae000..7b8171e3128a8715a62a10e020c69ea3ca1c5321 100644 --- a/tools/testing/selftests/vm/userfaultfd.c +++ b/tools/testing/selftests/vm/userfaultfd.c @@ -69,6 +69,8 @@ #include #include +#include "../kselftest.h" + #ifdef __NR_userfaultfd static unsigned long nr_cpus, nr_pages, nr_pages_per_cpu, page_size; @@ -1322,7 +1324,7 @@ int main(int argc, char **argv) int main(void) { printf("skip: Skipping userfaultfd test (missing __NR_userfaultfd)\n"); - return 0; + return KSFT_SKIP; } #endif /* __NR_userfaultfd */ diff --git a/tools/testing/selftests/x86/sigreturn.c b/tools/testing/selftests/x86/sigreturn.c index 246145b84a127c341fd1fdc4fb41bcf6c7d51644..4d9dc3f2fd7048212181c51f03cef4d1650e07c9 100644 --- a/tools/testing/selftests/x86/sigreturn.c +++ b/tools/testing/selftests/x86/sigreturn.c @@ -610,21 +610,41 @@ static int test_valid_sigreturn(int cs_bits, bool use_16bit_ss, int force_ss) */ for (int i = 0; i < NGREG; i++) { greg_t req = requested_regs[i], res = resulting_regs[i]; + if (i == REG_TRAPNO || i == REG_IP) continue; /* don't care */ - if (i == REG_SP) { - printf("\tSP: %llx -> %llx\n", (unsigned long long)req, - (unsigned long long)res); + if (i == REG_SP) { /* - * In many circumstances, the high 32 bits of rsp - * are zeroed. For example, we could be a real - * 32-bit program, or we could hit any of a number - * of poorly-documented IRET or segmented ESP - * oddities. If this happens, it's okay. + * If we were using a 16-bit stack segment, then + * the kernel is a bit stuck: IRET only restores + * the low 16 bits of ESP/RSP if SS is 16-bit. + * The kernel uses a hack to restore bits 31:16, + * but that hack doesn't help with bits 63:32. + * On Intel CPUs, bits 63:32 end up zeroed, and, on + * AMD CPUs, they leak the high bits of the kernel + * espfix64 stack pointer. There's very little that + * the kernel can do about it. + * + * Similarly, if we are returning to a 32-bit context, + * the CPU will often lose the high 32 bits of RSP. */ - if (res == (req & 0xFFFFFFFF)) - continue; /* OK; not expected to work */ + + if (res == req) + continue; + + if (cs_bits != 64 && ((res ^ req) & 0xFFFFFFFF) == 0) { + printf("[NOTE]\tSP: %llx -> %llx\n", + (unsigned long long)req, + (unsigned long long)res); + continue; + } + + printf("[FAIL]\tSP mismatch: requested 0x%llx; got 0x%llx\n", + (unsigned long long)requested_regs[i], + (unsigned long long)resulting_regs[i]); + nerrs++; + continue; } bool ignore_reg = false; @@ -654,25 +674,18 @@ static int test_valid_sigreturn(int cs_bits, bool use_16bit_ss, int force_ss) #endif /* Sanity check on the kernel */ - if (i == REG_CX && requested_regs[i] != resulting_regs[i]) { + if (i == REG_CX && req != res) { printf("[FAIL]\tCX (saved SP) mismatch: requested 0x%llx; got 0x%llx\n", - (unsigned long long)requested_regs[i], - (unsigned long long)resulting_regs[i]); + (unsigned long long)req, + (unsigned long long)res); nerrs++; continue; } - if (requested_regs[i] != resulting_regs[i] && !ignore_reg) { - /* - * SP is particularly interesting here. The - * usual cause of failures is that we hit the - * nasty IRET case of returning to a 16-bit SS, - * in which case bits 16:31 of the *kernel* - * stack pointer persist in ESP. - */ + if (req != res && !ignore_reg) { printf("[FAIL]\tReg %d mismatch: requested 0x%llx; got 0x%llx\n", - i, (unsigned long long)requested_regs[i], - (unsigned long long)resulting_regs[i]); + i, (unsigned long long)req, + (unsigned long long)res); nerrs++; } } diff --git a/tools/testing/selftests/zram/zram.sh b/tools/testing/selftests/zram/zram.sh index 754de7da426a80a2ae386042d30a5904b44446e6..232e958ec454756501f2caa8eaf2133067fe10ac 100755 --- a/tools/testing/selftests/zram/zram.sh +++ b/tools/testing/selftests/zram/zram.sh @@ -2,6 +2,9 @@ # SPDX-License-Identifier: GPL-2.0 TCID="zram.sh" +# Kselftest framework requirement - SKIP code is 4. +ksft_skip=4 + . ./zram_lib.sh run_zram () { @@ -24,5 +27,5 @@ elif [ -b /dev/zram0 ]; then else echo "$TCID : No zram.ko module or /dev/zram0 device file not found" echo "$TCID : CONFIG_ZRAM is not set" - exit 1 + exit $ksft_skip fi diff --git a/tools/testing/selftests/zram/zram_lib.sh b/tools/testing/selftests/zram/zram_lib.sh index f6a9c73e7a442e7988b0820ebc809a342981df91..9e73a4fb9b0aa9b2a2e81368badfbe278876695d 100755 --- a/tools/testing/selftests/zram/zram_lib.sh +++ b/tools/testing/selftests/zram/zram_lib.sh @@ -18,6 +18,9 @@ MODULE=0 dev_makeswap=-1 dev_mounted=-1 +# Kselftest framework requirement - SKIP code is 4. +ksft_skip=4 + trap INT check_prereqs() @@ -27,7 +30,7 @@ check_prereqs() if [ $uid -ne 0 ]; then echo $msg must be run as root >&2 - exit 0 + exit $ksft_skip fi } diff --git a/tools/usb/ffs-test.c b/tools/usb/ffs-test.c index 95dd14648ba51cf9fcc7f86a38e81e8f47aa9f61..0f395dfb7774c3f87fffc337c89afbd8b5f247dd 100644 --- a/tools/usb/ffs-test.c +++ b/tools/usb/ffs-test.c @@ -44,12 +44,25 @@ /******************** Little Endian Handling ********************************/ -#define cpu_to_le16(x) htole16(x) -#define cpu_to_le32(x) htole32(x) +/* + * cpu_to_le16/32 are used when initializing structures, a context where a + * function call is not allowed. To solve this, we code cpu_to_le16/32 in a way + * that allows them to be used when initializing structures. + */ + +#if __BYTE_ORDER == __LITTLE_ENDIAN +#define cpu_to_le16(x) (x) +#define cpu_to_le32(x) (x) +#else +#define cpu_to_le16(x) ((((x) >> 8) & 0xffu) | (((x) & 0xffu) << 8)) +#define cpu_to_le32(x) \ + ((((x) & 0xff000000u) >> 24) | (((x) & 0x00ff0000u) >> 8) | \ + (((x) & 0x0000ff00u) << 8) | (((x) & 0x000000ffu) << 24)) +#endif + #define le32_to_cpu(x) le32toh(x) #define le16_to_cpu(x) le16toh(x) - /******************** Messages and Errors ***********************************/ static const char argv0[] = "ffs-test"; diff --git a/virt/kvm/arm/mmu.c b/virt/kvm/arm/mmu.c index b69798a7880e5295464227e48124aefe36295bd1..ec275b8472a9b7f26704960b87a5e62588f02a4f 100644 --- a/virt/kvm/arm/mmu.c +++ b/virt/kvm/arm/mmu.c @@ -901,19 +901,35 @@ static int stage2_set_pmd_huge(struct kvm *kvm, struct kvm_mmu_memory_cache pmd = stage2_get_pmd(kvm, cache, addr); VM_BUG_ON(!pmd); - /* - * Mapping in huge pages should only happen through a fault. If a - * page is merged into a transparent huge page, the individual - * subpages of that huge page should be unmapped through MMU - * notifiers before we get here. - * - * Merging of CompoundPages is not supported; they should become - * splitting first, unmapped, merged, and mapped back in on-demand. - */ - VM_BUG_ON(pmd_present(*pmd) && pmd_pfn(*pmd) != pmd_pfn(*new_pmd)); - old_pmd = *pmd; if (pmd_present(old_pmd)) { + /* + * Multiple vcpus faulting on the same PMD entry, can + * lead to them sequentially updating the PMD with the + * same value. Following the break-before-make + * (pmd_clear() followed by tlb_flush()) process can + * hinder forward progress due to refaults generated + * on missing translations. + * + * Skip updating the page table if the entry is + * unchanged. + */ + if (pmd_val(old_pmd) == pmd_val(*new_pmd)) + return 0; + + /* + * Mapping in huge pages should only happen through a + * fault. If a page is merged into a transparent huge + * page, the individual subpages of that huge page + * should be unmapped through MMU notifiers before we + * get here. + * + * Merging of CompoundPages is not supported; they + * should become splitting first, unmapped, merged, + * and mapped back in on-demand. + */ + VM_BUG_ON(pmd_pfn(old_pmd) != pmd_pfn(*new_pmd)); + pmd_clear(pmd); kvm_tlb_flush_vmid_ipa(kvm, addr); } else { @@ -969,6 +985,10 @@ static int stage2_set_pte(struct kvm *kvm, struct kvm_mmu_memory_cache *cache, /* Create 2nd stage page table mapping - Level 3 */ old_pte = *pte; if (pte_present(old_pte)) { + /* Skip page table update if there is no change */ + if (pte_val(old_pte) == pte_val(*new_pte)) + return 0; + kvm_set_pte(pte, __pte(0)); kvm_tlb_flush_vmid_ipa(kvm, addr); } else { diff --git a/virt/kvm/arm/vgic/vgic-init.c b/virt/kvm/arm/vgic/vgic-init.c index 5801261f3adddeaab819f4ffd23ecd5839d03c8a..6f7f26ae72b4505d8ef1a3fbf1606ad44fcdc16c 100644 --- a/virt/kvm/arm/vgic/vgic-init.c +++ b/virt/kvm/arm/vgic/vgic-init.c @@ -277,6 +277,10 @@ int vgic_init(struct kvm *kvm) if (vgic_initialized(kvm)) return 0; + /* Are we also in the middle of creating a VCPU? */ + if (kvm->created_vcpus != atomic_read(&kvm->online_vcpus)) + return -EBUSY; + /* freeze the number of spis */ if (!dist->nr_spis) dist->nr_spis = VGIC_NR_IRQS_LEGACY - VGIC_NR_PRIVATE_IRQS; diff --git a/virt/kvm/arm/vgic/vgic-mmio-v2.c b/virt/kvm/arm/vgic/vgic-mmio-v2.c index af003268bf3ef2536719c8b17f49f68583fa7970..7ea5928244fa38dbb6aa1d54648a893935a79268 100644 --- a/virt/kvm/arm/vgic/vgic-mmio-v2.c +++ b/virt/kvm/arm/vgic/vgic-mmio-v2.c @@ -348,6 +348,9 @@ static void vgic_mmio_write_apr(struct kvm_vcpu *vcpu, if (n > vgic_v3_max_apr_idx(vcpu)) return; + + n = array_index_nospec(n, 4); + /* GICv3 only uses ICH_AP1Rn for memory mapped (GICv2) guests */ vgicv3->vgic_ap1r[n] = val; } diff --git a/virt/kvm/arm/vgic/vgic-v3.c b/virt/kvm/arm/vgic/vgic-v3.c index 6b4fcd52f14c86f84e414092a6985e8510a11b0a..a37b03c254575f4a4e4493d7996b27902877f34b 100644 --- a/virt/kvm/arm/vgic/vgic-v3.c +++ b/virt/kvm/arm/vgic/vgic-v3.c @@ -492,11 +492,6 @@ int vgic_v3_probe(const struct gic_kvm_info *info) pr_warn("GICV physical address 0x%llx not page aligned\n", (unsigned long long)info->vcpu.start); kvm_vgic_global_state.vcpu_base = 0; - } else if (!PAGE_ALIGNED(resource_size(&info->vcpu))) { - pr_warn("GICV size 0x%llx not a multiple of page size 0x%lx\n", - (unsigned long long)resource_size(&info->vcpu), - PAGE_SIZE); - kvm_vgic_global_state.vcpu_base = 0; } else { kvm_vgic_global_state.vcpu_base = info->vcpu.start; kvm_vgic_global_state.can_emulate_gicv2 = true; diff --git a/virt/kvm/eventfd.c b/virt/kvm/eventfd.c index 58a9b31b0dd54192e35e1821ba2399d0c780fce5..088734a700e939eb75c7dd46b6a98e989e12df79 100644 --- a/virt/kvm/eventfd.c +++ b/virt/kvm/eventfd.c @@ -405,11 +405,6 @@ kvm_irqfd_assign(struct kvm *kvm, struct kvm_irqfd *args) if (events & POLLIN) schedule_work(&irqfd->inject); - /* - * do not drop the file until the irqfd is fully initialized, otherwise - * we might race against the POLLHUP - */ - fdput(f); #ifdef CONFIG_HAVE_KVM_IRQ_BYPASS if (kvm_arch_has_irq_bypass()) { irqfd->consumer.token = (void *)irqfd->eventfd; @@ -425,6 +420,12 @@ kvm_irqfd_assign(struct kvm *kvm, struct kvm_irqfd *args) #endif srcu_read_unlock(&kvm->irq_srcu, idx); + + /* + * do not drop the file until the irqfd is fully initialized, otherwise + * we might race against the POLLHUP + */ + fdput(f); return 0; fail: