Last active
November 20, 2020 03:32
-
-
Save jiblime/ee9f22f4073fbd8ee8add4ffd78abe70 to your computer and use it in GitHub Desktop.
Andi Kleen's lto-5.8-1 branch patch updated for 5.9.0
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
diff --git a/Documentation/kbuild/lto-build.rst b/Documentation/kbuild/lto-build.rst | |
new file mode 100644 | |
index 000000000000..ae147c3cccd0 | |
--- /dev/null | |
+++ b/Documentation/kbuild/lto-build.rst | |
@@ -0,0 +1,74 @@ | |
+===================================================== | |
+gcc link time optimization (LTO) for the Linux kernel | |
+===================================================== | |
+ | |
+Link Time Optimization allows the compiler to optimize the complete program | |
+instead of just each file. | |
+ | |
+The compiler can inline functions between files and do various other global | |
+optimizations, like specializing functions for common parameters, | |
+determing when global variables are clobbered, making functions pure/const, | |
+propagating constants globally, removing unneeded data and others. | |
+ | |
+It will also drop unused functions which can make the kernel | |
+image smaller in some circumstances, in particular for small kernel | |
+configurations. | |
+ | |
+For small monolithic kernels it can throw away unused code very effectively | |
+(especially when modules are disabled) and usually shrinks | |
+the code size. | |
+ | |
+Build time and memory consumption at build time will increase, depending | |
+on the size of the largest binary. Modular kernels are less affected. | |
+With LTO incremental builds are less incremental, as always the whole | |
+binary needs to be re-optimized (but not re-parsed) | |
+ | |
+Oops can be somewhat more difficult to read, due to the more aggressive | |
+inlining: it helps to use scripts/faddr2line. | |
+ | |
+Normal "reasonable" builds work with less than 4GB of RAM, but very large | |
+configurations like allyesconfig typically need more memory. The actual | |
+memory needed depends on the available memory (gcc sizes its garbage | |
+collector pools based on that or on the ulimit -m limits) and | |
+the compiler version. | |
+ | |
+Configuration: | |
+-------------- | |
+- Enable CONFIG_LTO_MENU and then disable CONFIG_LTO_DISABLE. | |
+This is mainly to not have allyesconfig default to LTO. | |
+ | |
+Requirements: | |
+------------- | |
+- Enough memory: 4GB for a standard build, more for allyesconfig | |
+The peak memory usage happens single threaded (when lto-wpa merges types), | |
+so dialing back -j options will not help much. | |
+ | |
+A 32bit compiler is unlikely to work due to the memory requirements. | |
+You can however build a kernel targeted at 32bit on a 64bit host. | |
+ | |
+FAQs: | |
+----- | |
+Q: I get a section type attribute conflict | |
+A: Usually because of someone doing | |
+const __initdata (should be const __initconst) or const __read_mostly | |
+(should be just const). Check both symbols reported by gcc. | |
+ | |
+References: | |
+----------- | |
+ | |
+Presentation on Kernel LTO | |
+(note, performance numbers/details outdated. In particular gcc 4.9 fixed | |
+most of the build time problems): | |
+http://halobates.de/kernel-lto.pdf | |
+ | |
+Generic gcc LTO: | |
+http://www.ucw.cz/~hubicka/slides/labs2013.pdf | |
+http://www.hipeac.net/system/files/barcelona.pdf | |
+ | |
+Somewhat outdated too: | |
+http://gcc.gnu.org/projects/lto/lto.pdf | |
+http://gcc.gnu.org/projects/lto/whopr.pdf | |
+ | |
+Happy Link-Time-Optimizing! | |
+ | |
+Andi Kleen | |
diff --git a/Makefile b/Makefile | |
index fe0164a654c7..16a135f9274a 100644 | |
--- a/Makefile | |
+++ b/Makefile | |
@@ -439,6 +439,7 @@ STRIP = llvm-strip | |
else | |
CC = $(CROSS_COMPILE)gcc | |
LD = $(CROSS_COMPILE)ld | |
+LDFINAL = $(LD) | |
AR = $(CROSS_COMPILE)ar | |
NM = $(CROSS_COMPILE)nm | |
OBJCOPY = $(CROSS_COMPILE)objcopy | |
@@ -512,7 +513,7 @@ CLANG_FLAGS := | |
export ARCH SRCARCH CONFIG_SHELL BASH HOSTCC KBUILD_HOSTCFLAGS CROSS_COMPILE LD CC | |
export CPP AR NM STRIP OBJCOPY OBJDUMP OBJSIZE READELF PAHOLE LEX YACC AWK INSTALLKERNEL | |
export PERL PYTHON PYTHON3 CHECK CHECKFLAGS MAKE UTS_MACHINE HOSTCXX | |
-export KGZIP KBZIP2 KLZOP LZMA LZ4 XZ ZSTD | |
+export KGZIP KBZIP2 KLZOP LZMA LZ4 XZ ZSTD LDFINAL | |
export KBUILD_HOSTCXXFLAGS KBUILD_HOSTLDFLAGS KBUILD_HOSTLDLIBS LDFLAGS_MODULE | |
export KBUILD_CPPFLAGS NOSTDINC_FLAGS LINUXINCLUDE OBJCOPYFLAGS KBUILD_LDFLAGS | |
@@ -961,6 +961,7 @@ include-$(CONFIG_KCSAN) += scripts/Make | |
include-$(CONFIG_UBSAN) += scripts/Makefile.ubsan | |
include-$(CONFIG_KCOV) += scripts/Makefile.kcov | |
include-$(CONFIG_GCC_PLUGINS) += scripts/Makefile.gcc-plugins | |
+include-$(CONFIG_LTO) += scripts/Makefile.lto | |
include $(addprefix $(srctree)/, $(include-y)) | |
@@ -1155,7 +1156,7 @@ ARCH_POSTLINK := $(wildcard $(srctree)/a | |
# Final link of vmlinux with optional arch pass after final link | |
cmd_link-vmlinux = \ | |
- $(CONFIG_SHELL) $< "$(LD)" "$(KBUILD_LDFLAGS)" "$(LDFLAGS_vmlinux)"; \ | |
+ $(CONFIG_SHELL) $< "$(LDFINAL)" "$(KBUILD_LDFLAGS)" "$(LDFLAGS_vmlinux)"; \ | |
$(if $(ARCH_POSTLINK), $(MAKE) -f $(ARCH_POSTLINK) $@, true) | |
vmlinux: scripts/link-vmlinux.sh autoksyms_recursive $(vmlinux-deps) FORCE | |
@@ -1774,10 +1776,15 @@ clean: $(clean-dirs) | |
-o -name '*.lex.c' -o -name '*.tab.[ch]' \ | |
-o -name '*.asn1.[ch]' \ | |
-o -name '*.symtypes' -o -name 'modules.order' \ | |
- -o -name '.tmp_*.o.*' \ | |
+ -o -name '.tmp_*.o' \ | |
+ -o -name '.tmp_*.sym' \ | |
+ -o -name '.tmp_*.S' \ | |
+ -o -name '.tmp_System.map' \ | |
+ -o -name '*.ver.[co]' \ | |
+ -o -name '.tmp_*_vermerged.[co]' \ | |
-o -name '*.c.[012]*.*' \ | |
-o -name '*.ll' \ | |
- -o -name '*.gcno' \) -type f -print | xargs rm -f | |
+ -o -name '*.gcno' -o -name '.kallsyms.pad' \) -type f -print | xargs rm -f | |
# Generate tags for editors | |
# --------------------------------------------------------------------------- | |
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig | |
index 883da0abf779..4b5023dcfe5b 100644 | |
--- a/arch/x86/Kconfig | |
+++ b/arch/x86/Kconfig | |
@@ -147,7 +147,9 @@ config X86 | |
select HAVE_ARCH_MMAP_RND_BITS if MMU | |
select HAVE_ARCH_MMAP_RND_COMPAT_BITS if MMU && COMPAT | |
select HAVE_ARCH_COMPAT_MMAP_BASES if MMU && COMPAT | |
- select HAVE_ARCH_PREL32_RELOCATIONS | |
+ # LTO can move assembler to different files, so all | |
+ # the init functions would need to be global for this to work | |
+ select HAVE_ARCH_PREL32_RELOCATIONS if !LTO | |
select HAVE_ARCH_SECCOMP_FILTER | |
select HAVE_ARCH_THREAD_STRUCT_WHITELIST | |
select HAVE_ARCH_STACKLEAK | |
@@ -224,6 +226,7 @@ config X86 | |
select PCI_DOMAINS if PCI | |
select PCI_LOCKLESS_CONFIG if PCI | |
select PERF_EVENTS | |
+ select ARCH_SUPPORTS_LTO | |
select RTC_LIB | |
select RTC_MC146818_LIB | |
select SPARSE_IRQ | |
diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c | |
index 8fd39ff74a49..9479767991c3 100644 | |
--- a/arch/x86/kernel/alternative.c | |
+++ b/arch/x86/kernel/alternative.c | |
@@ -631,11 +631,12 @@ extern struct paravirt_patch_site __start_parainstructions[], | |
* convention such that we can 'call' it from assembly. | |
*/ | |
-extern void int3_magic(unsigned int *ptr); /* defined in asm */ | |
+extern __visible void int3_magic(unsigned int *ptr); /* defined in asm */ | |
asm ( | |
" .pushsection .init.text, \"ax\", @progbits\n" | |
" .type int3_magic, @function\n" | |
+" .globl int3_magic\n" | |
"int3_magic:\n" | |
" movl $1, (%" _ASM_ARG1 ")\n" | |
" ret\n" | |
@@ -643,7 +644,7 @@ asm ( | |
" .popsection\n" | |
); | |
-extern __initdata unsigned long int3_selftest_ip; /* defined in asm below */ | |
+extern __initdata __visible unsigned long int3_selftest_ip; /* defined in asm below */ | |
static int __init | |
int3_exception_notify(struct notifier_block *self, unsigned long val, void *data) | |
@@ -684,6 +685,7 @@ static void __init int3_selftest(void) | |
asm volatile ("1: int3; nop; nop; nop; nop\n\t" | |
".pushsection .init.data,\"aw\"\n\t" | |
".align " __ASM_SEL(4, 8) "\n\t" | |
+ ".globl int3_selftest_ip\n\t" | |
".type int3_selftest_ip, @object\n\t" | |
".size int3_selftest_ip, " __ASM_SEL(4, 8) "\n\t" | |
"int3_selftest_ip:\n\t" | |
diff --git a/arch/x86/purgatory/Makefile b/arch/x86/purgatory/Makefile | |
index 088bd764e0b7..095184b0baa1 100644 | |
--- a/arch/x86/purgatory/Makefile | |
+++ b/arch/x86/purgatory/Makefile | |
@@ -1,5 +1,6 @@ | |
# SPDX-License-Identifier: GPL-2.0 | |
OBJECT_FILES_NON_STANDARD := y | |
+KBUILD_CFLAGS += $(DISABLE_LTO) | |
purgatory-y := purgatory.o stack.o setup-x86_$(BITS).o sha256.o entry64.o string.o | |
diff --git a/arch/x86/realmode/Makefile b/arch/x86/realmode/Makefile | |
index 6b1f3a4eeb44..7152f47934eb 100644 | |
--- a/arch/x86/realmode/Makefile | |
+++ b/arch/x86/realmode/Makefile | |
@@ -10,6 +10,7 @@ | |
# Sanitizer runtimes are unavailable and cannot be linked here. | |
KASAN_SANITIZE := n | |
KCSAN_SANITIZE := n | |
+KBUILD_CFLAGS += $(DISABLE_LTO) | |
OBJECT_FILES_NON_STANDARD := y | |
subdir- := rm | |
diff --git a/drivers/firmware/efi/libstub/Makefile b/drivers/firmware/efi/libstub/Makefile | |
index 4cce372edaf4..51784bd75d9e 100644 | |
--- a/drivers/firmware/efi/libstub/Makefile | |
+++ b/drivers/firmware/efi/libstub/Makefile | |
@@ -57,6 +57,7 @@ efi-deps-y := fdt_rw.c fdt_ro.c fdt_wip.c fdt.c fdt_empty_tree.c fdt_sw.c | |
$(obj)/lib-%.o: $(srctree)/lib/%.c FORCE | |
$(call if_changed_rule,cc_o_c) | |
+ $(if $(CONFIG_MODVERSIONS),touch $(@:.o=.ver.c)) | |
lib-$(CONFIG_EFI_GENERIC_STUB) += efi-stub.o fdt.o string.o \ | |
$(patsubst %.c,lib-%.o,$(efi-deps-y)) | |
@@ -123,4 +124,5 @@ quiet_cmd_stubcopy = STUBCPY $@ | |
echo "$@: absolute symbol references not allowed in the EFI stub" >&2; \ | |
/bin/false; \ | |
fi; \ | |
- $(OBJCOPY) $(STUBCOPY_FLAGS-y) $< $@ | |
+ $(OBJCOPY) $(STUBCOPY_FLAGS-y) $< $@; \ | |
+ $(if $(CONFIG_MODVERSIONS),touch $(@:.o=.ver.c),true) | |
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_iommu.c b/drivers/gpu/drm/amd/amdkfd/kfd_iommu.c | |
index 7c8786b9eb0a..e70ad263890d 100644 | |
--- a/drivers/gpu/drm/amd/amdkfd/kfd_iommu.c | |
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_iommu.c | |
@@ -330,8 +330,8 @@ int kfd_iommu_resume(struct kfd_dev *kfd) | |
} | |
extern bool amd_iommu_pc_supported(void); | |
-extern u8 amd_iommu_pc_get_max_banks(u16 devid); | |
-extern u8 amd_iommu_pc_get_max_counters(u16 devid); | |
+extern u8 amd_iommu_pc_get_max_banks(unsigned devid); | |
+extern u8 amd_iommu_pc_get_max_counters(unsigned devid); | |
/** kfd_iommu_add_perf_counters - Add IOMMU performance counters to topology | |
*/ | |
diff --git a/drivers/media/platform/sti/delta/delta-ipc.c b/drivers/media/platform/sti/delta/delta-ipc.c | |
index 186d88f02ecd..371429d81ea1 100644 | |
--- a/drivers/media/platform/sti/delta/delta-ipc.c | |
+++ b/drivers/media/platform/sti/delta/delta-ipc.c | |
@@ -175,8 +175,8 @@ int delta_ipc_open(struct delta_ctx *pctx, const char *name, | |
msg.ipc_buf_size = ipc_buf_size; | |
msg.ipc_buf_paddr = ctx->ipc_buf->paddr; | |
- memcpy(msg.name, name, sizeof(msg.name)); | |
- msg.name[sizeof(msg.name) - 1] = 0; | |
+ memset(msg.name, 0, sizeof(msg.name)); | |
+ strcpy(msg.name, name); | |
msg.param_size = param->size; | |
memcpy(ctx->ipc_buf->vaddr, param->data, msg.param_size); | |
diff --git a/drivers/misc/lkdtm/Makefile b/drivers/misc/lkdtm/Makefile | |
index c70b3822013f..2a7157158bee 100644 | |
--- a/drivers/misc/lkdtm/Makefile | |
+++ b/drivers/misc/lkdtm/Makefile | |
@@ -20,3 +20,4 @@ OBJCOPYFLAGS_rodata_objcopy.o := \ | |
targets += rodata.o rodata_objcopy.o | |
$(obj)/rodata_objcopy.o: $(obj)/rodata.o FORCE | |
$(call if_changed,objcopy) | |
+ $(if $(CONFIG_MODVERSIONS),touch $(obj)/rodata_objcopy.ver.c) | |
diff --git a/drivers/ras/cec.c b/drivers/ras/cec.c | |
index 569d9ad2c594..4016020a957c 100644 | |
--- a/drivers/ras/cec.c | |
+++ b/drivers/ras/cec.c | |
@@ -553,20 +553,20 @@ static struct notifier_block cec_nb = { | |
.priority = MCE_PRIO_CEC, | |
}; | |
-static void __init cec_init(void) | |
+static int __init cec_init(void) | |
{ | |
if (ce_arr.disabled) | |
- return; | |
+ return 0; | |
ce_arr.array = (void *)get_zeroed_page(GFP_KERNEL); | |
if (!ce_arr.array) { | |
pr_err("Error allocating CE array page!\n"); | |
- return; | |
+ return -ENOMEM; | |
} | |
if (create_debugfs_nodes()) { | |
free_page((unsigned long)ce_arr.array); | |
- return; | |
+ return -ENOMEM; | |
} | |
INIT_DELAYED_WORK(&cec_work, cec_work_fn); | |
@@ -575,6 +575,7 @@ static void __init cec_init(void) | |
mce_register_decode_chain(&cec_nb); | |
pr_info("Correctable Errors collector initialized.\n"); | |
+ return 0; | |
} | |
late_initcall(cec_init); | |
diff --git a/drivers/xen/time.c b/drivers/xen/time.c | |
index 108edbcbc040..fc7dd451b019 100644 | |
--- a/drivers/xen/time.c | |
+++ b/drivers/xen/time.c | |
@@ -144,7 +144,7 @@ void xen_get_runstate_snapshot(struct vcpu_runstate_info *res) | |
} | |
/* return true when a vcpu could run but has no real cpu to run on */ | |
-bool xen_vcpu_stolen(int vcpu) | |
+__visible bool xen_vcpu_stolen(int vcpu) | |
{ | |
return per_cpu(xen_runstate, vcpu).state == RUNSTATE_runnable; | |
} | |
diff --git a/include/asm-generic/export.h b/include/asm-generic/export.h | |
index 365345f9a9e3..7192c7599305 100644 | |
--- a/include/asm-generic/export.h | |
+++ b/include/asm-generic/export.h | |
@@ -43,17 +43,6 @@ __ksymtab_\name: | |
__kstrtab_\name: | |
.asciz "\name" | |
.previous | |
-#ifdef CONFIG_MODVERSIONS | |
- .section ___kcrctab\sec+\name,"a" | |
- .balign KCRC_ALIGN | |
-#if defined(CONFIG_MODULE_REL_CRCS) | |
- .long __crc_\name - . | |
-#else | |
- .long __crc_\name | |
-#endif | |
- .weak __crc_\name | |
- .previous | |
-#endif | |
#endif | |
.endm | |
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h | |
index db600ef218d7..2b369176ab48 100644 | |
--- a/include/asm-generic/vmlinux.lds.h | |
+++ b/include/asm-generic/vmlinux.lds.h | |
@@ -437,6 +437,12 @@ | |
\ | |
TRACEDATA \ | |
\ | |
+ .kallsyms : AT(ADDR(.kallsyms) - LOAD_OFFSET) { \ | |
+ __start_kallsyms = .; \ | |
+ *(.kallsyms) \ | |
+ __end_kallsyms = .; \ | |
+ } \ | |
+ \ | |
/* Kernel symbol table: Normal symbols */ \ | |
__ksymtab : AT(ADDR(__ksymtab) - LOAD_OFFSET) { \ | |
__start___ksymtab = .; \ | |
diff --git a/include/linux/compiler_attributes.h b/include/linux/compiler_attributes.h | |
index c8f03d2969df..81111ed82c0f 100644 | |
--- a/include/linux/compiler_attributes.h | |
+++ b/include/linux/compiler_attributes.h | |
@@ -39,6 +39,7 @@ | |
# define __GCC4_has_attribute___externally_visible__ 1 | |
# define __GCC4_has_attribute___noclone__ 1 | |
# define __GCC4_has_attribute___nonstring__ 0 | |
+# define __GCC4_has_attribute___no_reorder__ 0 | |
# define __GCC4_has_attribute___no_sanitize_address__ (__GNUC_MINOR__ >= 8) | |
# define __GCC4_has_attribute___no_sanitize_undefined__ (__GNUC_MINOR__ >= 9) | |
# define __GCC4_has_attribute___fallthrough__ 0 | |
@@ -271,4 +272,14 @@ | |
*/ | |
#define __weak __attribute__((__weak__)) | |
+/* | |
+ * https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#Common-Function-Attributes | |
+ */ | |
+ | |
+#if __has_attribute(__no_reorder__) | |
+#define __noreorder __attribute__((no_reorder)) | |
+#else | |
+#define __noreorder | |
+#endif | |
+ | |
#endif /* __LINUX_COMPILER_ATTRIBUTES_H */ | |
diff --git a/include/linux/export.h b/include/linux/export.h | |
index fceb5e855717..1f53c280174f 100644 | |
--- a/include/linux/export.h | |
+++ b/include/linux/export.h | |
@@ -19,26 +19,6 @@ extern struct module __this_module; | |
#define THIS_MODULE ((struct module *)0) | |
#endif | |
-#ifdef CONFIG_MODVERSIONS | |
-/* Mark the CRC weak since genksyms apparently decides not to | |
- * generate a checksums for some symbols */ | |
-#if defined(CONFIG_MODULE_REL_CRCS) | |
-#define __CRC_SYMBOL(sym, sec) \ | |
- asm(" .section \"___kcrctab" sec "+" #sym "\", \"a\" \n" \ | |
- " .weak __crc_" #sym " \n" \ | |
- " .long __crc_" #sym " - . \n" \ | |
- " .previous \n") | |
-#else | |
-#define __CRC_SYMBOL(sym, sec) \ | |
- asm(" .section \"___kcrctab" sec "+" #sym "\", \"a\" \n" \ | |
- " .weak __crc_" #sym " \n" \ | |
- " .long __crc_" #sym " \n" \ | |
- " .previous \n") | |
-#endif | |
-#else | |
-#define __CRC_SYMBOL(sym, sec) | |
-#endif | |
- | |
#ifdef CONFIG_HAVE_ARCH_PREL32_RELOCATIONS | |
#include <linux/compiler.h> | |
/* | |
@@ -78,7 +58,7 @@ struct kernel_symbol { | |
#ifdef __GENKSYMS__ | |
-#define ___EXPORT_SYMBOL(sym, sec, ns) __GENKSYMS_EXPORT_SYMBOL(sym) | |
+#define ___EXPORT_SYMBOL(sym, sec, ns) __GENKSYMS_EXPORT_SYMBOL(sec, sym) | |
#else | |
@@ -96,12 +76,13 @@ struct kernel_symbol { | |
*/ | |
#define ___EXPORT_SYMBOL(sym, sec, ns) \ | |
extern typeof(sym) sym; \ | |
- extern const char __kstrtab_##sym[]; \ | |
- extern const char __kstrtabns_##sym[]; \ | |
- __CRC_SYMBOL(sym, sec); \ | |
+ extern const char __visible __kstrtab_##sym[]; \ | |
+ extern const char __visible __kstrtabns_##sym[]; \ | |
asm(" .section \"__ksymtab_strings\",\"aMS\",%progbits,1 \n" \ | |
+ " .globl __kstrtab_" #sym " \n" \ | |
"__kstrtab_" #sym ": \n" \ | |
" .asciz \"" #sym "\" \n" \ | |
+ " .globl __kstrtabns_" #sym " \n" \ | |
"__kstrtabns_" #sym ": \n" \ | |
" .asciz \"" ns "\" \n" \ | |
" .previous \n"); \ | |
diff --git a/include/linux/init.h b/include/linux/init.h | |
index 212fc9e2f691..4cd2d79c6a21 100644 | |
--- a/include/linux/init.h | |
+++ b/include/linux/init.h | |
@@ -193,7 +193,7 @@ extern bool initcall_debug; | |
".previous \n"); | |
#else | |
#define ___define_initcall(fn, id, __sec) \ | |
- static initcall_t __initcall_##fn##id __used \ | |
+ static initcall_t __initcall_##fn##id __used __noreorder \ | |
__attribute__((__section__(#__sec ".init"))) = fn; | |
#endif | |
diff --git a/include/linux/linkage.h b/include/linux/linkage.h | |
index d796ec20d114..56a6f16b0e5b 100644 | |
--- a/include/linux/linkage.h | |
+++ b/include/linux/linkage.h | |
@@ -23,17 +23,13 @@ | |
#endif | |
#ifndef cond_syscall | |
-#define cond_syscall(x) asm( \ | |
- ".weak " __stringify(x) "\n\t" \ | |
- ".set " __stringify(x) "," \ | |
- __stringify(sys_ni_syscall)) | |
+#define cond_syscall(x) \ | |
+ extern long x(void) __attribute__((alias("sys_ni_syscall"), weak)); | |
#endif | |
#ifndef SYSCALL_ALIAS | |
-#define SYSCALL_ALIAS(alias, name) asm( \ | |
- ".globl " __stringify(alias) "\n\t" \ | |
- ".set " __stringify(alias) "," \ | |
- __stringify(name)) | |
+#define SYSCALL_ALIAS(a, name) \ | |
+ long a(void) __attribute__((alias(__stringify(name)))) | |
#endif | |
#define __page_aligned_data __section(.data..page_aligned) __aligned(PAGE_SIZE) | |
diff --git a/init/Kconfig b/init/Kconfig | |
index 0498af567f70..9fc76c39199b 100644 | |
--- a/init/Kconfig | |
+++ b/init/Kconfig | |
@@ -1319,6 +1319,86 @@ config LD_DEAD_CODE_DATA_ELIMINATION | |
present. This option is not well tested yet, so use at your | |
own risk. | |
+config ARCH_SUPPORTS_LTO | |
+ bool | |
+ | |
+# Some ar versions leak file descriptors when using the LTO | |
+# plugin and cause strange errors when ulimit -n is too low. | |
+# Pick an arbitrary threshold, which should be enough for most | |
+# kernel configs. This was a regression that is only | |
+# in some transient binutils version, so either older or | |
+# new enough is ok. | |
+# This might not be the exact range with this bug. | |
+config BAD_AR | |
+ depends on LD_VERSION = 230000000 | |
+ depends on $(shell,ulimit -n) < 4000 | |
+ def_bool y | |
+ | |
+config LTO_MENU | |
+ bool "Enable gcc link time optimization (LTO)" | |
+ depends on ARCH_SUPPORTS_LTO | |
+# for now. Does LLVM need the gcc infrastructure at all? | |
+ depends on CC_IS_GCC | |
+# 4.7 works mostly, but it sometimes loses symbols on large builds | |
+# This can be worked around by marking those symbols visible, | |
+# but that is fairly ugly and the problem is gone with 4.8 | |
+# 4.8 was very slow | |
+# 4.9 was missing __attribute__((noreorder)) for ordering initcalls, | |
+# and needed -fno-toplevel-reorder, which can lead to missing symbols | |
+# 5.0 ICEs with newer kernels | |
+# so only support 6.0+ | |
+ depends on GCC_VERSION >= 60000 | |
+# binutils before 2.27 has various problems with plugins | |
+ depends on LD_VERSION >= 227000000 | |
+ depends on !BAD_AR | |
+ help | |
+ Enable whole program (link time) optimizations (LTO) for the the | |
+ whole kernel and each module. This usually increases compile time, | |
+ but can lead to better code. It allows the compiler to inline | |
+ functions between different files and do other global optimization. | |
+ It allows the compiler to drop unused code. | |
+ | |
+ With this option the compiler will also do some global checking over | |
+ different source files. | |
+ | |
+ This requires a gcc 6.0 or later compiler and not too old binutils. | |
+ | |
+ On larger non modular configurations this may need more than 4GB of | |
+ RAM for the link phase. It will likely not work on those with a | |
+ 32bit hosted compiler. | |
+ | |
+ For more information see Documentation/kbild/lto-build.rst | |
+ | |
+config LTO_DISABLE | |
+ bool "Disable LTO again" | |
+ depends on LTO_MENU | |
+ default n | |
+ help | |
+ This option is merely here so that allyesconfig or allmodconfig do | |
+ not enable LTO. If you want to actually use LTO do not enable. | |
+ | |
+config LTO | |
+ bool | |
+ default y | |
+ depends on LTO_MENU && !LTO_DISABLE | |
+ | |
+config LTO_CP_CLONE | |
+ bool "Allow aggressive cloning for function specialization" | |
+ depends on LTO | |
+ help | |
+ Allow the compiler to clone and specialize functions for specific | |
+ arguments when it determines these arguments are very commonly | |
+ called. Experimential. Will increase text size. | |
+ | |
+config SINGLE_LINK | |
+ bool "Use single linking step for final vmlinux" | |
+ default y if LTO | |
+ # for now. In theory should work everywhere except for um | |
+ depends on ARCH_SUPPORTS_LTO | |
+ help | |
+ Use only a single linking step for the final vmlinux, making | |
+ the build slightly faster. | |
+ | |
config SYSCTL | |
bool | |
@@ -1643,6 +1723,17 @@ config KALLSYMS_BASE_RELATIVE | |
time constants, and no relocation pass is required at runtime to fix | |
up the entries based on the runtime load address of the kernel. | |
+config KALLSYMS_SINGLE | |
+ bool "Single pass kallsyms" | |
+ default y if LTO | |
+ # For now. Needs to be tested on other architectures. | |
+ depends on X86 | |
+ depends on !(KALLSYMS_ALL && LTO) | |
+ help | |
+ Use a single pass to generate kallsyms. This will speed up the build, | |
+ but might slightly increase the binary size. Also an experimental | |
+ feature. Only works for functions currently with LTO. | |
+ | |
# end of the "standard kernel features (expert users)" menu | |
# syscall, maps, verifier | |
diff --git a/kernel/Makefile b/kernel/Makefile | |
index f3218bc5ec69..2496ed72e8f5 100644 | |
--- a/kernel/Makefile | |
+++ b/kernel/Makefile | |
@@ -37,9 +37,6 @@ KASAN_SANITIZE_kcov.o := n | |
KCSAN_SANITIZE_kcov.o := n | |
CFLAGS_kcov.o := $(call cc-option, -fno-conserve-stack -fno-stack-protector) | |
-# cond_syscall is currently not LTO compatible | |
-CFLAGS_sys_ni.o = $(DISABLE_LTO) | |
- | |
obj-y += sched/ | |
obj-y += locking/ | |
obj-y += power/ | |
diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c | |
index 9df4cc9a2907..fc2e139e01b2 100644 | |
--- a/kernel/bpf/core.c | |
+++ b/kernel/bpf/core.c | |
@@ -1364,7 +1364,7 @@ u64 __weak bpf_probe_read_kernel(void *dst, u32 size, const void *unsafe_ptr) | |
* | |
* Decode and execute eBPF instructions. | |
*/ | |
-static u64 __no_fgcse ___bpf_prog_run(u64 *regs, const struct bpf_insn *insn, u64 *stack) | |
+static u64 __noreorder __no_fgcse ___bpf_prog_run(u64 *regs, const struct bpf_insn *insn, u64 *stack) | |
{ | |
#define BPF_INSN_2_LBL(x, y) [BPF_##x | BPF_##y] = &&x##_##y | |
#define BPF_INSN_3_LBL(x, y, z) [BPF_##x | BPF_##y | BPF_##z] = &&x##_##y##_##z | |
diff --git a/kernel/locking/spinlock.c b/kernel/locking/spinlock.c | |
index 0ff08380f531..f2b65270e6e4 100644 | |
--- a/kernel/locking/spinlock.c | |
+++ b/kernel/locking/spinlock.c | |
@@ -130,7 +130,7 @@ BUILD_LOCK_OPS(write, rwlock); | |
#endif | |
#ifndef CONFIG_INLINE_SPIN_TRYLOCK | |
-int __lockfunc _raw_spin_trylock(raw_spinlock_t *lock) | |
+noinline int __lockfunc _raw_spin_trylock(raw_spinlock_t *lock) | |
{ | |
return __raw_spin_trylock(lock); | |
} | |
@@ -138,7 +138,7 @@ EXPORT_SYMBOL(_raw_spin_trylock); | |
#endif | |
#ifndef CONFIG_INLINE_SPIN_TRYLOCK_BH | |
-int __lockfunc _raw_spin_trylock_bh(raw_spinlock_t *lock) | |
+noinline int __lockfunc _raw_spin_trylock_bh(raw_spinlock_t *lock) | |
{ | |
return __raw_spin_trylock_bh(lock); | |
} | |
@@ -146,7 +146,7 @@ EXPORT_SYMBOL(_raw_spin_trylock_bh); | |
#endif | |
#ifndef CONFIG_INLINE_SPIN_LOCK | |
-void __lockfunc _raw_spin_lock(raw_spinlock_t *lock) | |
+noinline void __lockfunc _raw_spin_lock(raw_spinlock_t *lock) | |
{ | |
__raw_spin_lock(lock); | |
} | |
@@ -154,7 +154,7 @@ EXPORT_SYMBOL(_raw_spin_lock); | |
#endif | |
#ifndef CONFIG_INLINE_SPIN_LOCK_IRQSAVE | |
-unsigned long __lockfunc _raw_spin_lock_irqsave(raw_spinlock_t *lock) | |
+noinline unsigned long __lockfunc _raw_spin_lock_irqsave(raw_spinlock_t *lock) | |
{ | |
return __raw_spin_lock_irqsave(lock); | |
} | |
@@ -162,7 +162,7 @@ EXPORT_SYMBOL(_raw_spin_lock_irqsave); | |
#endif | |
#ifndef CONFIG_INLINE_SPIN_LOCK_IRQ | |
-void __lockfunc _raw_spin_lock_irq(raw_spinlock_t *lock) | |
+noinline void __lockfunc _raw_spin_lock_irq(raw_spinlock_t *lock) | |
{ | |
__raw_spin_lock_irq(lock); | |
} | |
@@ -170,7 +170,7 @@ EXPORT_SYMBOL(_raw_spin_lock_irq); | |
#endif | |
#ifndef CONFIG_INLINE_SPIN_LOCK_BH | |
-void __lockfunc _raw_spin_lock_bh(raw_spinlock_t *lock) | |
+noinline void __lockfunc _raw_spin_lock_bh(raw_spinlock_t *lock) | |
{ | |
__raw_spin_lock_bh(lock); | |
} | |
@@ -178,7 +178,7 @@ EXPORT_SYMBOL(_raw_spin_lock_bh); | |
#endif | |
#ifdef CONFIG_UNINLINE_SPIN_UNLOCK | |
-void __lockfunc _raw_spin_unlock(raw_spinlock_t *lock) | |
+noinline void __lockfunc _raw_spin_unlock(raw_spinlock_t *lock) | |
{ | |
__raw_spin_unlock(lock); | |
} | |
@@ -186,7 +186,7 @@ EXPORT_SYMBOL(_raw_spin_unlock); | |
#endif | |
#ifndef CONFIG_INLINE_SPIN_UNLOCK_IRQRESTORE | |
-void __lockfunc _raw_spin_unlock_irqrestore(raw_spinlock_t *lock, unsigned long flags) | |
+noinline void __lockfunc _raw_spin_unlock_irqrestore(raw_spinlock_t *lock, unsigned long flags) | |
{ | |
__raw_spin_unlock_irqrestore(lock, flags); | |
} | |
@@ -194,7 +194,7 @@ EXPORT_SYMBOL(_raw_spin_unlock_irqrestore); | |
#endif | |
#ifndef CONFIG_INLINE_SPIN_UNLOCK_IRQ | |
-void __lockfunc _raw_spin_unlock_irq(raw_spinlock_t *lock) | |
+noinline void __lockfunc _raw_spin_unlock_irq(raw_spinlock_t *lock) | |
{ | |
__raw_spin_unlock_irq(lock); | |
} | |
@@ -202,7 +202,7 @@ EXPORT_SYMBOL(_raw_spin_unlock_irq); | |
#endif | |
#ifndef CONFIG_INLINE_SPIN_UNLOCK_BH | |
-void __lockfunc _raw_spin_unlock_bh(raw_spinlock_t *lock) | |
+noinline void __lockfunc _raw_spin_unlock_bh(raw_spinlock_t *lock) | |
{ | |
__raw_spin_unlock_bh(lock); | |
} | |
@@ -210,7 +210,7 @@ EXPORT_SYMBOL(_raw_spin_unlock_bh); | |
#endif | |
#ifndef CONFIG_INLINE_READ_TRYLOCK | |
-int __lockfunc _raw_read_trylock(rwlock_t *lock) | |
+noinline int __lockfunc _raw_read_trylock(rwlock_t *lock) | |
{ | |
return __raw_read_trylock(lock); | |
} | |
@@ -218,7 +218,7 @@ EXPORT_SYMBOL(_raw_read_trylock); | |
#endif | |
#ifndef CONFIG_INLINE_READ_LOCK | |
-void __lockfunc _raw_read_lock(rwlock_t *lock) | |
+noinline void __lockfunc _raw_read_lock(rwlock_t *lock) | |
{ | |
__raw_read_lock(lock); | |
} | |
@@ -226,7 +226,7 @@ EXPORT_SYMBOL(_raw_read_lock); | |
#endif | |
#ifndef CONFIG_INLINE_READ_LOCK_IRQSAVE | |
-unsigned long __lockfunc _raw_read_lock_irqsave(rwlock_t *lock) | |
+noinline unsigned long __lockfunc _raw_read_lock_irqsave(rwlock_t *lock) | |
{ | |
return __raw_read_lock_irqsave(lock); | |
} | |
@@ -234,7 +234,7 @@ EXPORT_SYMBOL(_raw_read_lock_irqsave); | |
#endif | |
#ifndef CONFIG_INLINE_READ_LOCK_IRQ | |
-void __lockfunc _raw_read_lock_irq(rwlock_t *lock) | |
+noinline void __lockfunc _raw_read_lock_irq(rwlock_t *lock) | |
{ | |
__raw_read_lock_irq(lock); | |
} | |
@@ -242,7 +242,7 @@ EXPORT_SYMBOL(_raw_read_lock_irq); | |
#endif | |
#ifndef CONFIG_INLINE_READ_LOCK_BH | |
-void __lockfunc _raw_read_lock_bh(rwlock_t *lock) | |
+noinline void __lockfunc _raw_read_lock_bh(rwlock_t *lock) | |
{ | |
__raw_read_lock_bh(lock); | |
} | |
@@ -250,7 +250,7 @@ EXPORT_SYMBOL(_raw_read_lock_bh); | |
#endif | |
#ifndef CONFIG_INLINE_READ_UNLOCK | |
-void __lockfunc _raw_read_unlock(rwlock_t *lock) | |
+noinline void __lockfunc _raw_read_unlock(rwlock_t *lock) | |
{ | |
__raw_read_unlock(lock); | |
} | |
@@ -258,7 +258,7 @@ EXPORT_SYMBOL(_raw_read_unlock); | |
#endif | |
#ifndef CONFIG_INLINE_READ_UNLOCK_IRQRESTORE | |
-void __lockfunc _raw_read_unlock_irqrestore(rwlock_t *lock, unsigned long flags) | |
+noinline void __lockfunc _raw_read_unlock_irqrestore(rwlock_t *lock, unsigned long flags) | |
{ | |
__raw_read_unlock_irqrestore(lock, flags); | |
} | |
@@ -266,7 +266,7 @@ EXPORT_SYMBOL(_raw_read_unlock_irqrestore); | |
#endif | |
#ifndef CONFIG_INLINE_READ_UNLOCK_IRQ | |
-void __lockfunc _raw_read_unlock_irq(rwlock_t *lock) | |
+noinline void __lockfunc _raw_read_unlock_irq(rwlock_t *lock) | |
{ | |
__raw_read_unlock_irq(lock); | |
} | |
@@ -274,7 +274,7 @@ EXPORT_SYMBOL(_raw_read_unlock_irq); | |
#endif | |
#ifndef CONFIG_INLINE_READ_UNLOCK_BH | |
-void __lockfunc _raw_read_unlock_bh(rwlock_t *lock) | |
+noinline void __lockfunc _raw_read_unlock_bh(rwlock_t *lock) | |
{ | |
__raw_read_unlock_bh(lock); | |
} | |
@@ -282,7 +282,7 @@ EXPORT_SYMBOL(_raw_read_unlock_bh); | |
#endif | |
#ifndef CONFIG_INLINE_WRITE_TRYLOCK | |
-int __lockfunc _raw_write_trylock(rwlock_t *lock) | |
+noinline int __lockfunc _raw_write_trylock(rwlock_t *lock) | |
{ | |
return __raw_write_trylock(lock); | |
} | |
@@ -290,7 +290,7 @@ EXPORT_SYMBOL(_raw_write_trylock); | |
#endif | |
#ifndef CONFIG_INLINE_WRITE_LOCK | |
-void __lockfunc _raw_write_lock(rwlock_t *lock) | |
+noinline void __lockfunc _raw_write_lock(rwlock_t *lock) | |
{ | |
__raw_write_lock(lock); | |
} | |
@@ -298,7 +298,7 @@ EXPORT_SYMBOL(_raw_write_lock); | |
#endif | |
#ifndef CONFIG_INLINE_WRITE_LOCK_IRQSAVE | |
-unsigned long __lockfunc _raw_write_lock_irqsave(rwlock_t *lock) | |
+noinline unsigned long __lockfunc _raw_write_lock_irqsave(rwlock_t *lock) | |
{ | |
return __raw_write_lock_irqsave(lock); | |
} | |
@@ -306,7 +306,7 @@ EXPORT_SYMBOL(_raw_write_lock_irqsave); | |
#endif | |
#ifndef CONFIG_INLINE_WRITE_LOCK_IRQ | |
-void __lockfunc _raw_write_lock_irq(rwlock_t *lock) | |
+noinline void __lockfunc _raw_write_lock_irq(rwlock_t *lock) | |
{ | |
__raw_write_lock_irq(lock); | |
} | |
@@ -314,7 +314,7 @@ EXPORT_SYMBOL(_raw_write_lock_irq); | |
#endif | |
#ifndef CONFIG_INLINE_WRITE_LOCK_BH | |
-void __lockfunc _raw_write_lock_bh(rwlock_t *lock) | |
+noinline void __lockfunc _raw_write_lock_bh(rwlock_t *lock) | |
{ | |
__raw_write_lock_bh(lock); | |
} | |
@@ -322,7 +322,7 @@ EXPORT_SYMBOL(_raw_write_lock_bh); | |
#endif | |
#ifndef CONFIG_INLINE_WRITE_UNLOCK | |
-void __lockfunc _raw_write_unlock(rwlock_t *lock) | |
+noinline void __lockfunc _raw_write_unlock(rwlock_t *lock) | |
{ | |
__raw_write_unlock(lock); | |
} | |
@@ -330,7 +330,7 @@ EXPORT_SYMBOL(_raw_write_unlock); | |
#endif | |
#ifndef CONFIG_INLINE_WRITE_UNLOCK_IRQRESTORE | |
-void __lockfunc _raw_write_unlock_irqrestore(rwlock_t *lock, unsigned long flags) | |
+noinline void __lockfunc _raw_write_unlock_irqrestore(rwlock_t *lock, unsigned long flags) | |
{ | |
__raw_write_unlock_irqrestore(lock, flags); | |
} | |
@@ -338,7 +338,7 @@ EXPORT_SYMBOL(_raw_write_unlock_irqrestore); | |
#endif | |
#ifndef CONFIG_INLINE_WRITE_UNLOCK_IRQ | |
-void __lockfunc _raw_write_unlock_irq(rwlock_t *lock) | |
+noinline void __lockfunc _raw_write_unlock_irq(rwlock_t *lock) | |
{ | |
__raw_write_unlock_irq(lock); | |
} | |
@@ -346,7 +346,7 @@ EXPORT_SYMBOL(_raw_write_unlock_irq); | |
#endif | |
#ifndef CONFIG_INLINE_WRITE_UNLOCK_BH | |
-void __lockfunc _raw_write_unlock_bh(rwlock_t *lock) | |
+noinline void __lockfunc _raw_write_unlock_bh(rwlock_t *lock) | |
{ | |
__raw_write_unlock_bh(lock); | |
} | |
--- a/scripts/Makefile 2020-10-28 | |
+++ b/scripts/Makefile 2020-10-28 | |
@@ -5,6 +5,7 @@ | |
hostprogs-always-$(CONFIG_BUILD_BIN2C) += bin2c | |
hostprogs-always-$(CONFIG_KALLSYMS) += kallsyms | |
+hostprogs-always-$(CONFIG_KALLSYMS_SINGLE) += patchfile | |
hostprogs-always-$(BUILD_C_RECORDMCOUNT) += recordmcount | |
hostprogs-always-$(CONFIG_BUILDTIME_TABLE_SORT) += sorttable | |
hostprogs-always-$(CONFIG_ASN1) += asn1_compiler | |
diff --git a/scripts/Makefile.build b/scripts/Makefile.build | |
index 2e8810b7e5ed..9ef096afd050 100644 | |
--- a/scripts/Makefile.build | |
+++ b/scripts/Makefile.build | |
@@ -38,6 +38,8 @@ subdir-ccflags-y := | |
include scripts/Kbuild.include | |
+include scripts/Makefile.crc | |
+ | |
# The filename Kbuild has precedence over Makefile | |
kbuild-dir := $(if $(filter /%,$(src)),$(src),$(srctree)/$(src)) | |
kbuild-file := $(if $(wildcard $(kbuild-dir)/Kbuild),$(kbuild-dir)/Kbuild,$(kbuild-dir)/Makefile) | |
@@ -122,7 +124,7 @@ $(obj)/%.i: $(src)/%.c FORCE | |
# These mirror gensymtypes_S and co below, keep them in synch. | |
cmd_gensymtypes_c = \ | |
$(CPP) -D__GENKSYMS__ $(c_flags) $< | \ | |
- scripts/genksyms/genksyms $(if $(1), -T $(2)) \ | |
+ scripts/genksyms/genksyms -c $(if $(1), -T $(2)) \ | |
$(patsubst y,-R,$(CONFIG_MODULE_REL_CRCS)) \ | |
$(if $(KBUILD_PRESERVE),-p) \ | |
-r $(firstword $(wildcard $(2:.symtypes=.symref) /dev/null)) | |
@@ -154,24 +156,25 @@ ifdef CONFIG_MODVERSIONS | |
# When module versioning is enabled the following steps are executed: | |
# o compile a <file>.o from <file>.c | |
# o if <file>.o doesn't contain a __ksymtab version, i.e. does | |
-# not export symbols, it's done. | |
+# not export symbols, we generate an empty <file>.ver.c and | |
+# we are done. | |
# o otherwise, we calculate symbol versions using the good old | |
-# genksyms on the preprocessed source and postprocess them in a way | |
-# that they are usable as a linker script | |
-# o generate .tmp_<file>.o from <file>.o using the linker to | |
-# replace the unresolved symbols __crc_exported_symbol with | |
-# the actual value of the checksum generated by genksyms | |
-# o remove .tmp_<file>.o to <file>.o | |
+# genksyms on the preprocessed source and generate C to | |
+# a <file>.ver.c file. The C file contains symbols with the | |
+# the CRC value for a symbol. | |
+# o later the .ver.c files are concatenated and linked with the | |
+# the kernel or module. | |
+# o We also generate a .ver.o, but that is only used by modpost | |
+# in case this was a single file module. | |
cmd_modversions_c = \ | |
if $(OBJDUMP) -h $@ | grep -q __ksymtab; then \ | |
$(call cmd_gensymtypes_c,$(KBUILD_SYMTYPES),$(@:.o=.symtypes)) \ | |
- > $(@D)/.tmp_$(@F:.o=.ver); \ | |
- \ | |
- $(LD) $(KBUILD_LDFLAGS) -r -o $(@D)/.tmp_$(@F) $@ \ | |
- -T $(@D)/.tmp_$(@F:.o=.ver); \ | |
- mv -f $(@D)/.tmp_$(@F) $@; \ | |
- rm -f $(@D)/.tmp_$(@F:.o=.ver); \ | |
+ > $(@D)/$(@F:.o=.ver.c); \ | |
+ $(if $(single-file-module), \ | |
+ $(CC) -c -o $(@D)/$(@F:.o=.ver.o) $(@D)/$(@F:.o=.ver.c);) \ | |
+ else \ | |
+ echo > $(@D)/$(@F:.o=.ver.c); \ | |
fi | |
endif | |
@@ -289,7 +292,8 @@ $(obj)/%.mod: $(obj)/%.o FORCE | |
$(call if_changed,mod) | |
quiet_cmd_cc_lst_c = MKLST $@ | |
- cmd_cc_lst_c = $(CC) $(c_flags) -g -c -o $*.o $< && \ | |
+ cmd_cc_lst_c = $(if $(CONFIG_LTO),$(warning Listing in LTO mode does not match final binary)) \ | |
+ $(CC) $(c_flags) -g -c -o $*.o $< && \ | |
$(CONFIG_SHELL) $(srctree)/scripts/makelst $*.o \ | |
System.map $(OBJDUMP) > $@ | |
@@ -317,7 +321,7 @@ cmd_gensymtypes_S = \ | |
grep "\<___EXPORT_SYMBOL\>" | \ | |
sed 's/.*___EXPORT_SYMBOL[[:space:]]*\([a-zA-Z0-9_]*\)[[:space:]]*,.*/EXPORT_SYMBOL(\1);/' ; } | \ | |
$(CPP) -D__GENKSYMS__ $(c_flags) -xc - | \ | |
- scripts/genksyms/genksyms $(if $(1), -T $(2)) \ | |
+ scripts/genksyms/genksyms -c $(if $(1), -T $(2)) \ | |
$(patsubst y,-R,$(CONFIG_MODULE_REL_CRCS)) \ | |
$(if $(KBUILD_PRESERVE),-p) \ | |
-r $(firstword $(wildcard $(2:.symtypes=.symref) /dev/null)) | |
@@ -348,12 +352,9 @@ ifdef CONFIG_ASM_MODVERSIONS | |
cmd_modversions_S = \ | |
if $(OBJDUMP) -h $@ | grep -q __ksymtab; then \ | |
$(call cmd_gensymtypes_S,$(KBUILD_SYMTYPES),$(@:.o=.symtypes)) \ | |
- > $(@D)/.tmp_$(@F:.o=.ver); \ | |
- \ | |
- $(LD) $(KBUILD_LDFLAGS) -r -o $(@D)/.tmp_$(@F) $@ \ | |
- -T $(@D)/.tmp_$(@F:.o=.ver); \ | |
- mv -f $(@D)/.tmp_$(@F) $@; \ | |
- rm -f $(@D)/.tmp_$(@F:.o=.ver); \ | |
+ > $(@D)/$(@F:.o=.ver.c); \ | |
+ else \ | |
+ echo > $(@D)/$(@F:.o=.ver.c); \ | |
fi | |
endif | |
@@ -394,7 +395,9 @@ $(subdir-modorder): $(obj)/%/modules.order: $(obj)/% ; | |
# | |
quiet_cmd_ar_builtin = AR $@ | |
- cmd_ar_builtin = rm -f $@; $(AR) cDPrST $@ $(real-prereqs) | |
+ cmd_ar_builtin = $(call merge_ksyms,.a,$(real-prereqs)); \ | |
+ rm -f $@; \ | |
+ $(AR) cDPrST $@ $(real-prereqs) $$TO | |
$(obj)/built-in.a: $(real-obj-y) FORCE | |
$(call if_changed,ar_builtin) | |
@@ -424,8 +427,11 @@ $(obj)/lib.a: $(lib-y) FORCE | |
# Do not replace $(filter %.o,^) with $(real-prereqs). When a single object | |
# module is turned into a multi object module, $^ will contain header file | |
# dependencies recorded in the .*.cmd file. | |
-quiet_cmd_link_multi-m = LD [M] $@ | |
- cmd_link_multi-m = $(LD) $(ld_flags) -r -o $@ $(filter %.o,$^) | |
+quiet_cmd_link_multi-m = LDFINAL [M] $@ | |
+ cmd_link_multi-m = $(call merge_ksyms,.o,$(filter %.o,$^)); \ | |
+ $(LDFINAL) $(ld_flags) -r $(KBUILD_MOD_LDFLAGS) \ | |
+ -o $@ $(filter %.o,$^) $$TO; \ | |
+ $(call update-ksyms,$@) | |
$(multi-used-m): FORCE | |
$(call if_changed,link_multi-m) | |
diff --git a/scripts/Makefile.crc b/scripts/Makefile.crc | |
new file mode 100644 | |
index 000000000000..f79a660f9b4b | |
--- /dev/null | |
+++ b/scripts/Makefile.crc | |
@@ -0,0 +1,28 @@ | |
+# SPDX-License-Identifier: GPL-2.0 | |
+# | |
+# include after auto.conf | |
+ | |
+ifdef CONFIG_MODVERSIONS | |
+# collect all the CRCs for kernel symbols in a single vermerged.o | |
+# $1: postfix of target | |
+# $2: input files | |
+# produces merged object in $$TO shell variable in same recipe | |
+# | |
+# The strange shell use is to keep the recipe inside shell argument limits. | |
+# We filter out all files that do not contain crcs. | |
+merge_ksyms = \ | |
+ TC=$(@D)/.tmp_$(@F:$(1)=_vermerged.c); \ | |
+ TO=$(@D)/.tmp_$(@F:$(1)=_vermerged.o); \ | |
+ cat $(shell find $(patsubst %.o,%.ver.c,$(filter %.o,$(2))) \ | |
+ /dev/null -type f -size +2) /dev/null > $$TC; \ | |
+ $(CC) $(c_flags) -c -o $$TO $$TC; \ | |
+ rm -f $$TC | |
+ | |
+# after immediate linking generate a dummy .ver.c for the next step | |
+# it's not needed anymore becauses the CRCs are already linked in | |
+# $1: target | |
+update-ksyms = echo > $(1:.o=.ver.c) | |
+else | |
+merge_ksyms = true | |
+update-ksyms = true | |
+endif | |
diff --git a/scripts/Makefile.lib b/scripts/Makefile.lib | |
index 916b2f7f7098..52e49602ba2b 100644 | |
--- a/scripts/Makefile.lib | |
+++ b/scripts/Makefile.lib | |
@@ -173,11 +173,13 @@ endif | |
endif | |
part-of-module = $(if $(filter $(basename $@).o, $(real-obj-m)),y) | |
+single-file-module = $(if $(words $(obj-m)),1,,y) | |
quiet_modtag = $(if $(part-of-module),[M], ) | |
modkern_cflags = \ | |
$(if $(part-of-module), \ | |
- $(KBUILD_CFLAGS_MODULE) $(CFLAGS_MODULE), \ | |
+ $(if $(single-file-module),$(DISABLE_LTO)) \ | |
+ $(KBUILD_CFLAGS_MODULE) $(CFLAGS_MODULE), \ | |
$(KBUILD_CFLAGS_KERNEL) $(CFLAGS_KERNEL) $(modfile_flags)) | |
modkern_aflags = $(if $(part-of-module), \ | |
@@ -245,7 +247,8 @@ quiet_cmd_ld = LD $@ | |
# --------------------------------------------------------------------------- | |
quiet_cmd_ar = AR $@ | |
- cmd_ar = rm -f $@; $(AR) cDPrsT $@ $(real-prereqs) | |
+ cmd_ar = $(call merge_ksyms,.a,$(real-prereqs)); \ | |
+ rm -f $@; $(AR) cDPrsT $@ $(real-prereqs) $$TO | |
# Objcopy | |
# --------------------------------------------------------------------------- | |
diff --git a/scripts/Makefile.lto b/scripts/Makefile.lto | |
new file mode 100644 | |
index 000000000000..dcaaedadf9ce | |
--- /dev/null | |
+++ b/scripts/Makefile.lto | |
@@ -0,0 +1,68 @@ | |
+# | |
+# Support for gcc link time optimization | |
+# | |
+ | |
+DISABLE_LTO := | |
+LTO_CFLAGS := | |
+KBUILD_MOD_LDFLAGS := | |
+KBUILD_MODPOST_LDFLAGS := | |
+ | |
+export DISABLE_LTO | |
+export LTO_CFLAGS | |
+export KBUILD_MOD_LDFLAGS | |
+export KBUILD_MODPOST_LDFLAGS | |
+ | |
+ifdef CONFIG_LTO | |
+ LTO_CFLAGS := -flto | |
+ LTO_FINAL_CFLAGS := -fuse-linker-plugin | |
+ | |
+ # gcc 8.x doesn't generate debuginfo if we don't | |
+ # specify -g on the final linking command line. | |
+ LTO_FINAL_CFLAGS += $(filter -g%, $(KBUILD_CFLAGS)) | |
+ | |
+# would be needed to support < 5.0 | |
+# LTO_FINAL_CFLAGS += -fno-toplevel-reorder | |
+ | |
+ LTO_FINAL_CFLAGS += -flto=jobserver | |
+ | |
+ KBUILD_MOD_LDFLAGS += -flinker-output=nolto-rel | |
+ | |
+ # do full LTO before main kernel modpost | |
+ # XXX should switch to running modpost on the final executable | |
+ # to avoid the time overhead | |
+ KBUILD_MODPOST_LDFLAGS += -flinker-output=nolto-rel | |
+ | |
+ # don't compile everything twice | |
+ # requires plugin ar | |
+ LTO_CFLAGS += -fno-fat-lto-objects | |
+ | |
+ # Used to disable LTO for specific files (e.g. vdso) | |
+ DISABLE_LTO := -fno-lto | |
+ | |
+ LTO_FINAL_CFLAGS += ${LTO_CFLAGS} -fwhole-program | |
+ | |
+ KBUILD_CFLAGS += ${LTO_CFLAGS} | |
+ | |
+ifdef CONFIG_LTO_CP_CLONE | |
+ LTO_FINAL_CFLAGS += -fipa-cp-clone | |
+endif | |
+ | |
+ # allow extra flags from command line | |
+ LTO_FINAL_CFLAGS += ${LTO_EXTRA_CFLAGS} | |
+ | |
+ # For LTO we need to use gcc to do the linking, not ld | |
+ # directly. Use a wrapper to convert the ld command line | |
+ # to gcc | |
+ LDFINAL := ${CONFIG_SHELL} ${srctree}/scripts/gcc-ld \ | |
+ ${LTO_FINAL_CFLAGS} | |
+ | |
+ # LTO gcc creates a lot of files in TMPDIR, and with /tmp as tmpfs | |
+ # it's easy to drive the machine OOM. Use the object directory | |
+ # instead for temporaries. | |
+ TMPDIR ?= $(objtree) | |
+ export TMPDIR | |
+ | |
+ # use plugin aware tools | |
+ AR = $(CROSS_COMPILE)gcc-ar | |
+ NM = $(CROSS_COMPILE)gcc-nm | |
+endif # CONFIG_LTO | |
diff --git a/scripts/Makefile.modfinal b/scripts/Makefile.modfinal | |
index 411c1e600e7d..b7bba83ee91b 100644 | |
--- a/scripts/Makefile.modfinal | |
+++ b/scripts/Makefile.modfinal | |
@@ -6,7 +6,9 @@ | |
PHONY := __modfinal | |
__modfinal: | |
+include include/config/auto.conf | |
include $(srctree)/scripts/Kbuild.include | |
+include $(srctree)/scripts/Makefile.crc | |
# for c_flags | |
include $(srctree)/scripts/Makefile.lib | |
@@ -29,12 +31,13 @@ quiet_cmd_cc_o_c = CC [M] $@ | |
ARCH_POSTLINK := $(wildcard $(srctree)/arch/$(SRCARCH)/Makefile.postlink) | |
-quiet_cmd_ld_ko_o = LD [M] $@ | |
+quiet_cmd_ld_ko_o = LDFINAL [M] $@ | |
cmd_ld_ko_o = \ | |
- $(LD) -r $(KBUILD_LDFLAGS) \ | |
+ $(call merge_ksyms,.ko,$(filter-out %.mod.o,$(filter %.o,$^))); \ | |
+ $(LDFINAL) -r $(KBUILD_MOD_LDFLAGS) $(KBUILD_LDFLAGS) \ | |
$(KBUILD_LDFLAGS_MODULE) $(LDFLAGS_MODULE) \ | |
$(addprefix -T , $(KBUILD_LDS_MODULE)) \ | |
- -o $@ $(filter %.o, $^); \ | |
+ -o $@ $(filter %.o, $^) $$TO; \ | |
$(if $(ARCH_POSTLINK), $(MAKE) -f $(ARCH_POSTLINK) $@, true) | |
$(modules): %.ko: %.o %.mod.o $(KBUILD_LDS_MODULE) FORCE | |
diff --git a/scripts/elf_file_offset b/scripts/elf_file_offset | |
new file mode 100644 | |
index 000000000000..2c8ee49d5b96 | |
--- /dev/null | |
+++ b/scripts/elf_file_offset | |
@@ -0,0 +1,24 @@ | |
+#!/bin/bash | |
+# find the file offset of a section in a ELF file | |
+# objdump --section-headers elf-file | | |
+# gawk -f elf_file_offset filesize=SIZE section=SECTIONNAME | |
+# gawk needed for strtonum() | |
+#Idx Name Size VMA LMA File off Algn | |
+# 4 .kallsyms 001fd648 ffffffff81b1c068 0000000001b1c068 00d1c068 2**3 | |
+ | |
+$2 == section { | |
+ old = strtonum("0x" $3) | |
+ new = strtonum(filesize) | |
+ if (old < new) { | |
+ print "Not enough padding in vmlinux for new kallsyms, missing",new-old > "/dev/stderr" | |
+ print "Please lower (=increase) PAD_RATIO in kallsyms.c" | |
+ exit 1 | |
+ } | |
+ print "0x" $6 | |
+ # XXX doesn't exit in gawk 4.1.0 ?!? | |
+ #exit(0) | |
+} | |
+#END { | |
+# print section " not found" > "/dev/stderr" | |
+# exit 1 | |
+#} | |
diff --git a/scripts/gcc-ld b/scripts/gcc-ld | |
index 997b818c3962..cf4907b1b357 100755 | |
--- a/scripts/gcc-ld | |
+++ b/scripts/gcc-ld | |
@@ -8,11 +8,12 @@ ARGS="-nostdlib" | |
while [ "$1" != "" ] ; do | |
case "$1" in | |
- -save-temps|-m32|-m64) N="$1" ;; | |
+ -save-temps*|-m32|-m64) N="$1" ;; | |
-r) N="$1" ;; | |
+ -flinker-output*) N="$1" ;; | |
-[Wg]*) N="$1" ;; | |
-[olv]|-[Ofd]*|-nostdlib) N="$1" ;; | |
- --end-group|--start-group) | |
+ --end-group|--start-group|--whole-archive|--no-whole-archive) | |
N="-Wl,$1" ;; | |
-[RTFGhIezcbyYu]*|\ | |
--script|--defsym|-init|-Map|--oformat|-rpath|\ | |
@@ -27,4 +28,6 @@ while [ "$1" != "" ] ; do | |
shift | |
done | |
+[ -n "$V" ] && echo >&2 $CC $ARGS | |
+ | |
exec $CC $ARGS | |
diff --git a/scripts/genksyms/genksyms.c b/scripts/genksyms/genksyms.c | |
index 23eff234184f..be47e6d30c82 100644 | |
--- a/scripts/genksyms/genksyms.c | |
+++ b/scripts/genksyms/genksyms.c | |
@@ -33,7 +33,7 @@ char *cur_filename, *source_file; | |
int in_source_file; | |
static int flag_debug, flag_dump_defs, flag_reference, flag_dump_types, | |
- flag_preserve, flag_warnings, flag_rel_crcs; | |
+ flag_preserve, flag_warnings, flag_rel_crcs, flag_c_output; | |
static int errors; | |
static int nsyms; | |
@@ -631,7 +631,7 @@ static unsigned long expand_and_crc_sym(struct symbol *sym, unsigned long crc) | |
return crc; | |
} | |
-void export_symbol(const char *name) | |
+void export_symbol(const char *sec, const char *name) | |
{ | |
struct symbol *sym; | |
@@ -681,10 +681,15 @@ void export_symbol(const char *name) | |
fputs(">\n", debugfile); | |
/* Used as a linker script. */ | |
- printf(!flag_rel_crcs ? "__crc_%s = 0x%08lx;\n" : | |
- "SECTIONS { .rodata : ALIGN(4) { " | |
- "__crc_%s = .; LONG(0x%08lx); } }\n", | |
- name, crc); | |
+ if (flag_c_output) | |
+ printf("int __attribute__((section(\".kcrctab%.*s%s\"))) __crc_%s = %#lx;\n", | |
+ sec[0] ? (int)strlen(sec) - 2 : 0, sec[0] ? sec + 1 : sec, | |
+ name, name, crc); | |
+ else | |
+ printf(!flag_rel_crcs ? "__crc_%s = 0x%08lx;\n" : | |
+ "SECTIONS { .rodata : ALIGN(4) { " | |
+ "__crc_%s = .; LONG(0x%08lx); } }\n", | |
+ name, crc); | |
} | |
} | |
@@ -734,6 +739,7 @@ static void genksyms_usage(void) | |
" -h, --help Print this message\n" | |
" -V, --version Print the release version\n" | |
" -R, --relative-crc Emit section relative symbol CRCs\n" | |
+ " -c, --c-output Generate C output\n" | |
#else /* __GNU_LIBRARY__ */ | |
" -s Select symbol prefix\n" | |
" -d Increment the debug level (repeatable)\n" | |
@@ -746,6 +752,7 @@ static void genksyms_usage(void) | |
" -h Print this message\n" | |
" -V Print the release version\n" | |
" -R Emit section relative symbol CRCs\n" | |
+ " -c Generate C output\n" | |
#endif /* __GNU_LIBRARY__ */ | |
, stderr); | |
} | |
@@ -767,13 +774,14 @@ int main(int argc, char **argv) | |
{"version", 0, 0, 'V'}, | |
{"help", 0, 0, 'h'}, | |
{"relative-crc", 0, 0, 'R'}, | |
+ {"c-output", 0, 0, 'c'}, | |
{0, 0, 0, 0} | |
}; | |
- while ((o = getopt_long(argc, argv, "s:dwqVDr:T:phR", | |
+ while ((o = getopt_long(argc, argv, "s:dwqVDr:T:phRc", | |
&long_opts[0], NULL)) != EOF) | |
#else /* __GNU_LIBRARY__ */ | |
- while ((o = getopt(argc, argv, "s:dwqVDr:T:phR")) != EOF) | |
+ while ((o = getopt(argc, argv, "s:dwqVDr:T:phRc")) != EOF) | |
#endif /* __GNU_LIBRARY__ */ | |
switch (o) { | |
case 'd': | |
@@ -799,6 +807,9 @@ int main(int argc, char **argv) | |
return 1; | |
} | |
break; | |
+ case 'c': | |
+ flag_c_output = 1; | |
+ break; | |
case 'T': | |
flag_dump_types = 1; | |
dumpfile = fopen(optarg, "w"); | |
diff --git a/scripts/genksyms/genksyms.h b/scripts/genksyms/genksyms.h | |
index 2bcdb9bebab4..f8fe2e9a418e 100644 | |
--- a/scripts/genksyms/genksyms.h | |
+++ b/scripts/genksyms/genksyms.h | |
@@ -53,7 +53,7 @@ extern int in_source_file; | |
struct symbol *find_symbol(const char *name, enum symbol_type ns, int exact); | |
struct symbol *add_symbol(const char *name, enum symbol_type type, | |
struct string_list *defn, int is_extern); | |
-void export_symbol(const char *); | |
+void export_symbol(const char *, const char *); | |
void free_node(struct string_list *list); | |
void free_list(struct string_list *s, struct string_list *e); | |
diff --git a/scripts/genksyms/parse.y b/scripts/genksyms/parse.y | |
index e22b42245bcc..1f30b3d8f7c7 100644 | |
--- a/scripts/genksyms/parse.y | |
+++ b/scripts/genksyms/parse.y | |
@@ -489,8 +489,10 @@ asm_phrase_opt: | |
; | |
export_definition: | |
- EXPORT_SYMBOL_KEYW '(' IDENT ')' ';' | |
- { export_symbol((*$3)->string); $$ = $5; } | |
+ EXPORT_SYMBOL_KEYW '(' STRING ',' IDENT ')' ';' | |
+ { export_symbol((*$3)->string, (*$5)->string); $$ = $7; } | |
+ | EXPORT_SYMBOL_KEYW '(' IDENT ')' ';' | |
+ { export_symbol("", (*$3)->string); $$ = $5; } | |
; | |
diff --git a/scripts/kallsyms.c b/scripts/kallsyms.c | |
index 6dc3078649fa..8bbb133427a6 100644 | |
--- a/scripts/kallsyms.c | |
+++ b/scripts/kallsyms.c | |
@@ -25,6 +25,13 @@ | |
#include <ctype.h> | |
#include <limits.h> | |
+/* | |
+ * The ratio to increase the padding, by how much the final kallsyms | |
+ * can be larger. This is for symbols that are not visible before | |
+ * final linking. | |
+ */ | |
+#define PAD_RATIO 20 /* 1/x = ~5% */ | |
+ | |
#define ARRAY_SIZE(arr) (sizeof(arr) / sizeof(arr[0])) | |
#define KSYM_NAME_LEN 128 | |
@@ -42,6 +49,14 @@ struct addr_range { | |
unsigned long long start, end; | |
}; | |
+enum pads { | |
+ PAD_OFF, | |
+ PAD_NAMES, | |
+ PAD_MARKERS, | |
+ PAD_TOKTAB, | |
+ NUM_PAD | |
+}; | |
+ | |
static unsigned long long _text; | |
static unsigned long long relative_base; | |
static struct addr_range text_ranges[] = { | |
@@ -71,7 +86,7 @@ static unsigned char best_table_len[256]; | |
static void usage(void) | |
{ | |
fprintf(stderr, "Usage: kallsyms [--all-symbols] " | |
- "[--base-relative] < in.map > out.S\n"); | |
+ "[--base-relative] [--pad=A,B,C] [--pad-file=name] < in.map > out.S\n"); | |
exit(1); | |
} | |
@@ -120,6 +135,7 @@ static bool is_ignored_symbol(const char *name, char type) | |
}; | |
const char * const *p; | |
+ char *p2; | |
/* Exclude symbols which vary between passes. */ | |
for (p = ignored_symbols; *p; p++) | |
@@ -143,6 +159,12 @@ static bool is_ignored_symbol(const char *name, char type) | |
if (type == 'N' || type == 'n') | |
return true; | |
+ /* Don't include const symbols in the text section | |
+ * unless --all-symbols is specified. | |
+ */ | |
+ if (toupper(type) != 'T' && !all_symbols) | |
+ return true; | |
+ | |
if (toupper(type) == 'A') { | |
/* Keep these useful absolute symbols */ | |
if (strcmp(name, "__kernel_syscall_via_break") && | |
@@ -152,6 +174,18 @@ static bool is_ignored_symbol(const char *name, char type) | |
return true; | |
} | |
+ /* gcc-nm produces extra weak symbols for C files | |
+ * in the form | |
+ * 000000000003aa8b W version.c.36323a88 | |
+ * ignore they are outside the supported range | |
+ * and confuse the symbol generation, and they | |
+ * are not useful for symbolization. | |
+ */ | |
+ if ((type = 'W' || type == 'w') && | |
+ (p2 = strstr(name, ".c.")) && | |
+ isxdigit(p2[3])) | |
+ return true; | |
+ | |
return false; | |
} | |
@@ -363,7 +397,14 @@ static int symbol_absolute(const struct sym_entry *s) | |
return s->percpu_absolute; | |
} | |
-static void write_src(void) | |
+static void bad_padding(char *msg, int diff) | |
+{ | |
+ fprintf(stderr, "kallsyms: %s padding too short: %d missing\n", | |
+ msg, diff); | |
+ exit(EXIT_FAILURE); | |
+} | |
+ | |
+static void write_src(int *pad, int *opad) | |
{ | |
unsigned int i, k, off; | |
unsigned int best_idx[256]; | |
@@ -379,7 +420,8 @@ static void write_src(void) | |
printf("#define ALGN .balign 4\n"); | |
printf("#endif\n"); | |
- printf("\t.section .rodata, \"a\"\n"); | |
+ printf("#ifndef NO_SYMS\n"); | |
+ printf("\t.section .kallsyms, \"a\"\n"); | |
if (!base_relative) | |
output_label("kallsyms_addresses"); | |
@@ -422,14 +464,30 @@ static void write_src(void) | |
printf("\tPTR\t%#llx\n", table[i]->addr); | |
} | |
} | |
+ if (pad) { | |
+ if (i > pad[PAD_OFF]) | |
+ bad_padding("address pointers", i - pad[PAD_OFF]); | |
+ for (; i < pad[PAD_OFF]; i++) | |
+ printf("\t%s\t0\n", base_relative ? ".long" : "PTR"); | |
+ } else { | |
+ for (i = 0; i < table_cnt / PAD_RATIO; i++) | |
+ printf("\t%s\t0\n", base_relative ? ".long" : "PTR"); | |
+ opad[PAD_OFF] = table_cnt + table_cnt/PAD_RATIO; | |
+ } | |
printf("\n"); | |
+ printf("#endif\n"); | |
if (base_relative) { | |
+ printf("#ifndef NO_REL\n"); | |
+ printf("\t.section .rodata, \"a\"\n"); | |
output_label("kallsyms_relative_base"); | |
output_address(relative_base); | |
printf("\n"); | |
+ printf("\t.previous\n"); | |
+ printf("#endif\n"); | |
} | |
+ printf("#ifndef NO_SYMS\n"); | |
output_label("kallsyms_num_syms"); | |
printf("\t.long\t%u\n", table_cnt); | |
printf("\n"); | |
@@ -456,11 +514,31 @@ static void write_src(void) | |
off += table[i]->len + 1; | |
} | |
+ if (pad) { | |
+ if (off > pad[PAD_NAMES]) | |
+ bad_padding("name table", off - pad[PAD_NAMES]); | |
+ if (off < pad[PAD_NAMES]) | |
+ printf("\t.fill %d,1,0\n", pad[PAD_NAMES] - off); | |
+ } else { | |
+ printf("\t.fill %d,1,0\n", off/PAD_RATIO); | |
+ off += off/PAD_RATIO; | |
+ opad[PAD_NAMES] = off; | |
+ } | |
printf("\n"); | |
output_label("kallsyms_markers"); | |
for (i = 0; i < ((table_cnt + 255) >> 8); i++) | |
printf("\t.long\t%u\n", markers[i]); | |
+ if (pad) { | |
+ if (i > pad[PAD_MARKERS]) | |
+ bad_padding("markers", i - pad[PAD_MARKERS]); | |
+ for (; i < pad[PAD_MARKERS]; i++) | |
+ printf("\t.long\t0\n"); | |
+ } else { | |
+ for (k = 0; k < i/PAD_RATIO; k++) | |
+ printf("\t.long\t0\n"); | |
+ opad[PAD_MARKERS] = i + i/PAD_RATIO; | |
+ } | |
printf("\n"); | |
free(markers); | |
@@ -473,12 +551,23 @@ static void write_src(void) | |
printf("\t.asciz\t\"%s\"\n", buf); | |
off += strlen(buf) + 1; | |
} | |
+ if (pad) { | |
+ if (off > pad[PAD_TOKTAB]) | |
+ bad_padding("token table", off - pad[PAD_TOKTAB]); | |
+ if (off < pad[PAD_TOKTAB]) | |
+ printf("\t.fill %d,1,0\n", pad[PAD_TOKTAB] - off); | |
+ } else { | |
+ printf("\t.fill %d,1,0\n", off/PAD_RATIO); | |
+ off += off/PAD_RATIO; | |
+ opad[PAD_TOKTAB] = off; | |
+ } | |
printf("\n"); | |
output_label("kallsyms_token_index"); | |
for (i = 0; i < 256; i++) | |
printf("\t.short\t%d\n", best_idx[i]); | |
printf("\n"); | |
+ printf("#endif\n"); | |
} | |
@@ -743,6 +832,10 @@ static void record_relative_base(void) | |
int main(int argc, char **argv) | |
{ | |
+ int inpad[NUM_PAD], opad[NUM_PAD]; | |
+ int *inpadp = NULL; | |
+ FILE *opadf = NULL; | |
+ | |
if (argc >= 2) { | |
int i; | |
for (i = 1; i < argc; i++) { | |
@@ -752,7 +845,23 @@ int main(int argc, char **argv) | |
absolute_percpu = 1; | |
else if (strcmp(argv[i], "--base-relative") == 0) | |
base_relative = 1; | |
- else | |
+ else if (strncmp(argv[i], "--pad=", 6) == 0) { | |
+ inpadp = inpad; | |
+ if (sscanf(argv[i] + 6, "%d,%d,%d,%d", | |
+ inpad + 0, | |
+ inpad + 1, | |
+ inpad + 2, | |
+ inpad + 3) != NUM_PAD) { | |
+ fprintf(stderr, "Bad pad list\n"); | |
+ exit(EXIT_FAILURE); | |
+ } | |
+ } else if (strncmp(argv[i], "--pad-file=", 11) == 0) { | |
+ opadf = fopen(argv[i] + 11, "w"); | |
+ if (!opadf) { | |
+ fprintf(stderr, "Cannot open %s", argv[i]+11); | |
+ exit(EXIT_FAILURE); | |
+ } | |
+ } else | |
usage(); | |
} | |
} else if (argc != 1) | |
@@ -766,7 +875,11 @@ int main(int argc, char **argv) | |
if (base_relative) | |
record_relative_base(); | |
optimize_token_table(); | |
- write_src(); | |
- | |
+ write_src(inpadp, opad); | |
+ if (opadf) { | |
+ fprintf(opadf, "--pad=%d,%d,%d,%d\n", | |
+ opad[0], opad[1], opad[2], opad[3]); | |
+ fclose(opadf); | |
+ } | |
return 0; | |
} | |
diff --git a/scripts/link-vmlinux.sh b/scripts/link-vmlinux.sh | |
index 92dd745906f4..2bb837495680 100755 | |
--- a/scripts/link-vmlinux.sh | |
+++ b/scripts/link-vmlinux.sh | |
@@ -52,7 +52,7 @@ modpost_link() | |
${KBUILD_VMLINUX_LIBS} \ | |
--end-group" | |
- ${LD} ${KBUILD_LDFLAGS} -r -o ${1} ${objects} | |
+ ${LDFINAL} ${KBUILD_LDFLAGS} -r ${KBUILD_MODPOST_LDFLAGS} -o ${1} ${objects} | |
} | |
objtool_link() | |
@@ -88,13 +88,15 @@ vmlinux_link() | |
local objects | |
local strip_debug | |
- info LD ${output} | |
+ info LDFINAL ${output} | |
# skip output file argument | |
shift | |
# The kallsyms linking does not need debug symbols included. | |
- if [ "$output" != "${output#.tmp_vmlinux.kallsyms}" ] ; then | |
+ # except for LTO because gcc 10 LTO changes the layout of the data segment | |
+ # with --strip-debug | |
+ if [ "$output" != "${output#.tmp_vmlinux.kallsyms}" -a -z "$CONFIG_LTO" ] ; then | |
strip_debug=-Wl,--strip-debug | |
fi | |
@@ -107,7 +109,7 @@ vmlinux_link() | |
--end-group \ | |
${@}" | |
- ${LD} ${KBUILD_LDFLAGS} ${LDFLAGS_vmlinux} \ | |
+ ${LDFINAL} ${KBUILD_LDFLAGS} ${LDFLAGS_vmlinux} \ | |
${strip_debug#-Wl,} \ | |
-o ${output} \ | |
-T ${lds} ${objects} | |
@@ -165,8 +167,8 @@ gen_btf() | |
printf '\1' | dd of=${2} conv=notrunc bs=1 seek=16 status=none | |
} | |
-# Create ${2} .o file with all symbols from the ${1} object file | |
-kallsyms() | |
+# Create ${2} .S file with all symbols from the ${1} object file | |
+kallsyms_s() | |
{ | |
info KSYM ${2} | |
local kallsymopt; | |
@@ -182,14 +184,54 @@ kallsyms() | |
if [ -n "${CONFIG_KALLSYMS_BASE_RELATIVE}" ]; then | |
kallsymopt="${kallsymopt} --base-relative" | |
fi | |
+ kallsymopt="${kallsymopt} $3 $4 $5" | |
+ | |
+ local afile="${2}" | |
+ | |
+ ( | |
+ if [ -n "$CONFIG_LTO" -a -n "$CONFIG_KALLSYMS_SINGLE" -a -n "$CONFIG_CC_IS_GCC" ] && | |
+ ( ${OBJDUMP} -h ${1} | grep -q gnu\.lto) ; then | |
+ # workaround for slim LTO gcc-nm not outputing static symbols | |
+ # http://gcc.gnu.org/PR60016 | |
+ # generate a fake symbol table based on the LTO function sections. | |
+ # This unfortunately "knows" about the internal LTO file format | |
+ # and only works for functions | |
+ | |
+ # read the function names directly from the LTO object | |
+ objdump -h ${1} | | |
+ awk '/gnu\.lto_[a-z]/ { gsub(/\.gnu\.lto_/,""); gsub(/\..*/, ""); print "0 t " $2 } ' | |
+ # read the non LTO symbols with readelf (which doesn't use the LTO plugin, | |
+ # so we only get pure ELF symbols) | |
+ # readelf doesn't handle ar, so we have to expand the objects | |
+ echo ${1} | sed 's/ /\n/g' | grep built-in.a | while read i ; do | |
+ ${AR} t $i | while read j ; do readelf -s $j ; done | |
+ done | awk 'NF >= 8 { print "0 t " $8 } ' | |
+ # now handle the objects | |
+ echo ${1} | sed 's/ /\n/g' | grep '\.o$' | while read i ; do | |
+ readelf -s $i | |
+ done | awk 'NF >= 8 { | |
+ if ($8 !~ /Name|__gnu_lto_slim|\.c(\.[0-9a-f]+)?/) { print "0 t " $8 } | |
+ }' | |
+ else | |
+ ${NM} -n ${1} | |
+ fi | |
+ ) | scripts/kallsyms ${kallsymopt} > ${afile} | |
+} | |
+kallsyms_o() | |
+{ | |
local aflags="${KBUILD_AFLAGS} ${KBUILD_AFLAGS_KERNEL} \ | |
${NOSTDINC_FLAGS} ${LINUXINCLUDE} ${KBUILD_CPPFLAGS}" | |
- local afile="`basename ${2} .o`.S" | |
+ ${CC} $3 $4 $5 ${aflags} -c -o ${2} ${1} | |
+} | |
- ${NM} -n ${1} | scripts/kallsyms ${kallsymopt} > ${afile} | |
- ${CC} ${aflags} -c -o ${2} ${afile} | |
+# Create ${2} .o file with all symbols from the ${1} object file | |
+kallsyms() | |
+{ | |
+ local s=`basename $2 .o`.S | |
+ kallsyms_s "$1" $s $3 $4 $5 $6 $7 | |
+ kallsyms_o $s $2 | |
} | |
# Perform one step in kallsyms generation, including temporary linking of | |
@@ -219,6 +261,11 @@ sorttable() | |
# Delete output files in case of error | |
cleanup() | |
{ | |
+ # don't delete for make -i | |
+ case "$MFLAGS" in | |
+ *-i*) return ;; | |
+ esac | |
+ | |
rm -f .btf.* | |
rm -f .tmp_System.map | |
rm -f .tmp_vmlinux* | |
@@ -270,7 +317,7 @@ fi; | |
${MAKE} -f "${srctree}/scripts/Makefile.build" obj=init need-builtin=1 | |
#link vmlinux.o | |
-info LD vmlinux.o | |
+info LDFINAL vmlinux.o | |
modpost_link vmlinux.o | |
objtool_link vmlinux.o | |
@@ -297,7 +344,24 @@ fi | |
kallsymso="" | |
kallsymso_prev="" | |
kallsyms_vmlinux="" | |
-if [ -n "${CONFIG_KALLSYMS}" ]; then | |
+kallsymsorel="" | |
+if [ -n "${CONFIG_KALLSYMS}" -a -n "${CONFIG_KALLSYMS_SINGLE}" ]; then | |
+ # Generate kallsyms from the top level object files | |
+ # this is slightly off, and has wrong addresses, | |
+ # but gives us the conservative max length of the kallsyms | |
+ # table to link in something with the right size. | |
+ info KALLSYMS1 .tmp_kallsyms1.o | |
+ kallsyms_s "${KBUILD_VMLINUX_OBJS} ${KBUILD_VMLINUX_LIBS}" \ | |
+ .tmp_kallsyms1.S \ | |
+ --all-symbols \ | |
+ "--pad-file=.kallsyms_pad" | |
+ # split the object into kallsyms with relocations and no relocations | |
+ # the relocations part does not change in step 2 | |
+ kallsyms_o .tmp_kallsyms1.S .tmp_kallsyms1.o -DNO_REL | |
+ kallsyms_o .tmp_kallsyms1.S .tmp_kallsyms1rel.o -DNO_SYMS | |
+ kallsymso=.tmp_kallsyms1.o | |
+ kallsymsorel=.tmp_kallsyms1rel.o | |
+elif [ -n "${CONFIG_KALLSYMS}" ]; then | |
# kallsyms support | |
# Generate section listing all symbols and add it into vmlinux | |
@@ -334,7 +398,53 @@ if [ -n "${CONFIG_KALLSYMS}" ]; then | |
fi | |
fi | |
-vmlinux_link vmlinux "${kallsymso}" ${btf_vmlinux_bin_o} | |
+if [ -z "${CONFIG_SINGLE_LINK}" ] ; then | |
+ | |
+info LDFINAL vmlinux | |
+vmlinux_link vmlinux "${kallsymso} ${kallsymsorel}" ${btf_vmlinux_bin_o} | |
+ | |
+else | |
+ | |
+# Reuse the partial linking from the modpost vmlinux.o earlier | |
+ | |
+info LD vmlinux | |
+${LD} ${KBUILD_LDFLAGS} ${LDFLAGS_vmlinux} \ | |
+ -o vmlinux \ | |
+ -T ${objtree}/${KBUILD_LDS} \ | |
+ vmlinux.o ${kallsymso} ${kallsymsorel} ${btf_vmlinux_bin_o} | |
+ | |
+fi | |
+ | |
+if [ -n "${CONFIG_KALLSYMS}" -a -n "${CONFIG_KALLSYMS_SINGLE}" ] ; then | |
+ # Now regenerate the kallsyms table and patch it into the | |
+ # previously linked file. We tell kallsyms to pad it | |
+ # to the previous length, so that no symbol changes. | |
+ info KALLSYMS2 .tmp_kallsyms2.o | |
+ kallsyms_s vmlinux .tmp_kallsyms2.S `cat .kallsyms_pad` | |
+ kallsyms_o .tmp_kallsyms2.S .tmp_kallsyms2.o -DNO_REL | |
+ | |
+ # sanity check the offsets | |
+ ${NM} .tmp_kallsyms1.o >.tmp_kallsyms1.nm | |
+ ${NM} .tmp_kallsyms2.o >.tmp_kallsyms2.nm | |
+ cmp .tmp_kallsyms1.nm .tmp_kallsyms2.nm | |
+ rm .tmp_kallsyms[12].nm | |
+ | |
+ info OBJCOPY .tmp_kallsyms2.bin | |
+ ${OBJCOPY} -O binary .tmp_kallsyms2.o .tmp_kallsyms2.bin | |
+ | |
+ info PATCHFILE vmlinux | |
+ EF=scripts/elf_file_offset | |
+ if [ ! -r $EF ] ; then EF=source/$EF ; fi | |
+ SIZE=`stat -c%s .tmp_kallsyms2.bin` | |
+ OFF=`${OBJDUMP} --section-headers vmlinux | | |
+ gawk -f $EF -v section=.kallsyms -v filesize=$SIZE` | |
+ if [ -z "$OFF" ] ; then | |
+ echo "Cannot find .kallsyms section in vmlinux binary" | |
+ exit 1 | |
+ fi | |
+ scripts/patchfile vmlinux $OFF .tmp_kallsyms2.bin | |
+ kallsyms_vmlinux=vmlinux | |
+fi | |
if [ -n "${CONFIG_BUILDTIME_TABLE_SORT}" ]; then | |
info SORTTAB vmlinux | |
diff --git a/scripts/mod/modpost.c b/scripts/mod/modpost.c | |
index 6aea65c65745..a481de34f1ee 100644 | |
--- a/scripts/mod/modpost.c | |
+++ b/scripts/mod/modpost.c | |
@@ -1979,6 +1979,25 @@ static char *remove_dot(char *s) | |
return s; | |
} | |
+static bool open_ver_o(const char *name, struct elf_info *info) | |
+{ | |
+ int nlen = strlen(name); | |
+ char *n = NOFAIL(malloc(nlen + 10)); | |
+ char *p; | |
+ bool ret; | |
+ | |
+ if (nlen > 6 && !strcmp(name + nlen - 6, ".ver.o")) | |
+ return false; | |
+ strcpy(n, name); | |
+ p = strrchr(n, '.'); | |
+ if (p) | |
+ *p = 0; | |
+ strcat(n, ".ver.o"); | |
+ ret = !access(n, R_OK) && parse_elf(info, n); | |
+ free(n); | |
+ return ret; | |
+} | |
+ | |
static void read_symbols(const char *modname) | |
{ | |
const char *symname; | |
@@ -1986,8 +2005,9 @@ static void read_symbols(const char *modname) | |
char *license; | |
char *namespace; | |
struct module *mod; | |
- struct elf_info info = { }; | |
+ struct elf_info info = { }, vinfo = { }; | |
Elf_Sym *sym; | |
+ bool have_ver_o; | |
if (!parse_elf(&info, modname)) | |
return; | |
@@ -2002,6 +2022,8 @@ static void read_symbols(const char *modname) | |
free(tmp); | |
} | |
+ have_ver_o = open_ver_o(modname, &vinfo); | |
+ | |
if (!mod->is_vmlinux) { | |
license = get_modinfo(&info, "license"); | |
if (!license) | |
@@ -2045,6 +2067,20 @@ static void read_symbols(const char *modname) | |
symname + strlen("__crc_")); | |
} | |
+ if (have_ver_o) { | |
+ /* | |
+ * Also read CRCs from a .ver.o if available. They will be linked | |
+ * into the module after modpost. | |
+ */ | |
+ for (sym = vinfo.symtab_start; sym < vinfo.symtab_stop; sym++) { | |
+ symname = remove_dot(vinfo.strtab + sym->st_name); | |
+ if (strstarts(symname, "__crc_")) { | |
+ handle_modversion(mod, &vinfo, sym, | |
+ symname + strlen("__crc_")); | |
+ } | |
+ } | |
+ } | |
+ | |
// check for static EXPORT_SYMBOL_* functions && global vars | |
for (sym = info.symtab_start; sym < info.symtab_stop; sym++) { | |
unsigned char bind = ELF_ST_BIND(sym->st_info); | |
@@ -2069,6 +2105,8 @@ static void read_symbols(const char *modname) | |
} | |
parse_elf_finish(&info); | |
+ if (have_ver_o) | |
+ parse_elf_finish(&vinfo); | |
/* Our trick to get versioning for module struct etc. - it's | |
* never passed as an argument to an exported function, so | |
diff --git a/scripts/patchfile.c b/scripts/patchfile.c | |
new file mode 100644 | |
index 000000000000..1a3414d11803 | |
--- /dev/null | |
+++ b/scripts/patchfile.c | |
@@ -0,0 +1,81 @@ | |
+/* Patch file at specific offset | |
+ * patchfile file-to-patch offset patch-file [len-of-patch] | |
+ */ | |
+#define _GNU_SOURCE 1 | |
+#include <sys/mman.h> | |
+#include <unistd.h> | |
+#include <sys/fcntl.h> | |
+#include <sys/stat.h> | |
+#include <stdio.h> | |
+#include <unistd.h> | |
+#include <stdlib.h> | |
+ | |
+#define ROUNDUP(x, y) (((x) + (y) - 1) & ~((y) - 1)) | |
+ | |
+static void *mmapfile(char *file, size_t *size) | |
+{ | |
+ int pagesize = sysconf(_SC_PAGESIZE); | |
+ int fd = open(file, O_RDONLY); | |
+ void *res = NULL; | |
+ struct stat st; | |
+ | |
+ *size = 0; | |
+ if (fd < 0) | |
+ return NULL; | |
+ if (fstat(fd, &st) >= 0) { | |
+ *size = st.st_size; | |
+ res = mmap(NULL, ROUNDUP(st.st_size, pagesize), | |
+ PROT_READ, MAP_SHARED, | |
+ fd, 0); | |
+ if (res == (void *)-1) | |
+ res = NULL; | |
+ } | |
+ close(fd); | |
+ return res; | |
+} | |
+ | |
+static void usage(void) | |
+{ | |
+ fprintf(stderr, "Usage: patchfile file-to-patch offset file-to-patch-in\n"); | |
+ exit(1); | |
+} | |
+ | |
+static size_t get_num(char *s) | |
+{ | |
+ char *endp; | |
+ size_t v = strtoul(s, &endp, 0); | |
+ if (s == endp) | |
+ usage(); | |
+ return v; | |
+} | |
+ | |
+int main(int ac, char **av) | |
+{ | |
+ char *patch; | |
+ size_t patchsize; | |
+ int infd; | |
+ size_t offset; | |
+ | |
+ if (ac != 5 && ac != 4) | |
+ usage(); | |
+ offset = get_num(av[2]); | |
+ patch = mmapfile(av[3], &patchsize); | |
+ if (av[4]) { | |
+ size_t newsize = get_num(av[4]); | |
+ if (newsize > patchsize) | |
+ fprintf(stderr, "kallsyms: warning, size larger than patch\n"); | |
+ if (newsize < patchsize) | |
+ patchsize = newsize; | |
+ } | |
+ infd = open(av[1], O_RDWR); | |
+ if (infd < 0) { | |
+ fprintf(stderr, "Cannot open %s\n", av[1]); | |
+ exit(1); | |
+ } | |
+ if (pwrite(infd, patch, patchsize, offset) != patchsize) { | |
+ fprintf(stderr, "Cannot write patch to %s\n", av[1]); | |
+ exit(1); | |
+ } | |
+ close(infd); | |
+ return 0; | |
+} |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Offending lines: 6701, 6702 | |
--- | |
diff --git a/Documentation/networking/j1939.rst b/Documentation/networking/j1939.rst | |
index f5be243d250a4..4b0db514b2010 100644 | |
--- a/Documentation/networking/j1939.rst | |
+++ b/Documentation/networking/j1939.rst | |
@@ -414,8 +414,8 @@ Send: | |
.can_family = AF_CAN, | |
.can_addr.j1939 = { | |
.name = J1939_NO_NAME; | |
- .pgn = 0x30, | |
- .addr = 0x12300, | |
+ .addr = 0x30, | |
+ .pgn = 0x12300, | |
}, | |
}; | |
diff --git a/Makefile b/Makefile | |
index ac292d6dd2627..59728422b9dbb 100644 | |
--- a/Makefile | |
+++ b/Makefile | |
@@ -1,7 +1,7 @@ | |
# SPDX-License-Identifier: GPL-2.0 | |
VERSION = 5 | |
PATCHLEVEL = 9 | |
-SUBLEVEL = 8 | |
+SUBLEVEL = 9 | |
EXTRAVERSION = | |
NAME = Kleptomaniac Octopus | |
@@ -973,8 +973,8 @@ KBUILD_CPPFLAGS += $(KCPPFLAGS) | |
KBUILD_AFLAGS += $(KAFLAGS) | |
KBUILD_CFLAGS += $(KCFLAGS) | |
-KBUILD_LDFLAGS_MODULE += --build-id | |
-LDFLAGS_vmlinux += --build-id | |
+KBUILD_LDFLAGS_MODULE += --build-id=sha1 | |
+LDFLAGS_vmlinux += --build-id=sha1 | |
ifeq ($(CONFIG_STRIP_ASM_SYMS),y) | |
LDFLAGS_vmlinux += $(call ld-option, -X,) | |
diff --git a/arch/arc/kernel/head.S b/arch/arc/kernel/head.S | |
index 17fd1ed700cca..9152782444b55 100644 | |
--- a/arch/arc/kernel/head.S | |
+++ b/arch/arc/kernel/head.S | |
@@ -67,7 +67,22 @@ | |
sr r5, [ARC_REG_LPB_CTRL] | |
1: | |
#endif /* CONFIG_ARC_LPB_DISABLE */ | |
-#endif | |
+ | |
+ /* On HSDK, CCMs need to remapped super early */ | |
+#ifdef CONFIG_ARC_SOC_HSDK | |
+ mov r6, 0x60000000 | |
+ lr r5, [ARC_REG_ICCM_BUILD] | |
+ breq r5, 0, 1f | |
+ sr r6, [ARC_REG_AUX_ICCM] | |
+1: | |
+ lr r5, [ARC_REG_DCCM_BUILD] | |
+ breq r5, 0, 2f | |
+ sr r6, [ARC_REG_AUX_DCCM] | |
+2: | |
+#endif /* CONFIG_ARC_SOC_HSDK */ | |
+ | |
+#endif /* CONFIG_ISA_ARCV2 */ | |
+ | |
; Config DSP_CTRL properly, so kernel may use integer multiply, | |
; multiply-accumulate, and divide operations | |
DSP_EARLY_INIT | |
diff --git a/arch/arc/plat-hsdk/platform.c b/arch/arc/plat-hsdk/platform.c | |
index 0b961a2a10b8e..22c9e2c9c0283 100644 | |
--- a/arch/arc/plat-hsdk/platform.c | |
+++ b/arch/arc/plat-hsdk/platform.c | |
@@ -17,22 +17,6 @@ int arc_hsdk_axi_dmac_coherent __section(.data) = 0; | |
#define ARC_CCM_UNUSED_ADDR 0x60000000 | |
-static void __init hsdk_init_per_cpu(unsigned int cpu) | |
-{ | |
- /* | |
- * By default ICCM is mapped to 0x7z while this area is used for | |
- * kernel virtual mappings, so move it to currently unused area. | |
- */ | |
- if (cpuinfo_arc700[cpu].iccm.sz) | |
- write_aux_reg(ARC_REG_AUX_ICCM, ARC_CCM_UNUSED_ADDR); | |
- | |
- /* | |
- * By default DCCM is mapped to 0x8z while this area is used by kernel, | |
- * so move it to currently unused area. | |
- */ | |
- if (cpuinfo_arc700[cpu].dccm.sz) | |
- write_aux_reg(ARC_REG_AUX_DCCM, ARC_CCM_UNUSED_ADDR); | |
-} | |
#define ARC_PERIPHERAL_BASE 0xf0000000 | |
#define CREG_BASE (ARC_PERIPHERAL_BASE + 0x1000) | |
@@ -339,5 +323,4 @@ static const char *hsdk_compat[] __initconst = { | |
MACHINE_START(SIMULATION, "hsdk") | |
.dt_compat = hsdk_compat, | |
.init_early = hsdk_init_early, | |
- .init_per_cpu = hsdk_init_per_cpu, | |
MACHINE_END | |
diff --git a/arch/arm/include/asm/kprobes.h b/arch/arm/include/asm/kprobes.h | |
index 213607a1f45c1..e26a278d301ab 100644 | |
--- a/arch/arm/include/asm/kprobes.h | |
+++ b/arch/arm/include/asm/kprobes.h | |
@@ -44,20 +44,20 @@ int kprobe_exceptions_notify(struct notifier_block *self, | |
unsigned long val, void *data); | |
/* optinsn template addresses */ | |
-extern __visible kprobe_opcode_t optprobe_template_entry; | |
-extern __visible kprobe_opcode_t optprobe_template_val; | |
-extern __visible kprobe_opcode_t optprobe_template_call; | |
-extern __visible kprobe_opcode_t optprobe_template_end; | |
-extern __visible kprobe_opcode_t optprobe_template_sub_sp; | |
-extern __visible kprobe_opcode_t optprobe_template_add_sp; | |
-extern __visible kprobe_opcode_t optprobe_template_restore_begin; | |
-extern __visible kprobe_opcode_t optprobe_template_restore_orig_insn; | |
-extern __visible kprobe_opcode_t optprobe_template_restore_end; | |
+extern __visible kprobe_opcode_t optprobe_template_entry[]; | |
+extern __visible kprobe_opcode_t optprobe_template_val[]; | |
+extern __visible kprobe_opcode_t optprobe_template_call[]; | |
+extern __visible kprobe_opcode_t optprobe_template_end[]; | |
+extern __visible kprobe_opcode_t optprobe_template_sub_sp[]; | |
+extern __visible kprobe_opcode_t optprobe_template_add_sp[]; | |
+extern __visible kprobe_opcode_t optprobe_template_restore_begin[]; | |
+extern __visible kprobe_opcode_t optprobe_template_restore_orig_insn[]; | |
+extern __visible kprobe_opcode_t optprobe_template_restore_end[]; | |
#define MAX_OPTIMIZED_LENGTH 4 | |
#define MAX_OPTINSN_SIZE \ | |
- ((unsigned long)&optprobe_template_end - \ | |
- (unsigned long)&optprobe_template_entry) | |
+ ((unsigned long)optprobe_template_end - \ | |
+ (unsigned long)optprobe_template_entry) | |
#define RELATIVEJUMP_SIZE 4 | |
struct arch_optimized_insn { | |
diff --git a/arch/arm/probes/kprobes/opt-arm.c b/arch/arm/probes/kprobes/opt-arm.c | |
index 7a449df0b3591..c78180172120f 100644 | |
--- a/arch/arm/probes/kprobes/opt-arm.c | |
+++ b/arch/arm/probes/kprobes/opt-arm.c | |
@@ -85,21 +85,21 @@ asm ( | |
"optprobe_template_end:\n"); | |
#define TMPL_VAL_IDX \ | |
- ((unsigned long *)&optprobe_template_val - (unsigned long *)&optprobe_template_entry) | |
+ ((unsigned long *)optprobe_template_val - (unsigned long *)optprobe_template_entry) | |
#define TMPL_CALL_IDX \ | |
- ((unsigned long *)&optprobe_template_call - (unsigned long *)&optprobe_template_entry) | |
+ ((unsigned long *)optprobe_template_call - (unsigned long *)optprobe_template_entry) | |
#define TMPL_END_IDX \ | |
- ((unsigned long *)&optprobe_template_end - (unsigned long *)&optprobe_template_entry) | |
+ ((unsigned long *)optprobe_template_end - (unsigned long *)optprobe_template_entry) | |
#define TMPL_ADD_SP \ | |
- ((unsigned long *)&optprobe_template_add_sp - (unsigned long *)&optprobe_template_entry) | |
+ ((unsigned long *)optprobe_template_add_sp - (unsigned long *)optprobe_template_entry) | |
#define TMPL_SUB_SP \ | |
- ((unsigned long *)&optprobe_template_sub_sp - (unsigned long *)&optprobe_template_entry) | |
+ ((unsigned long *)optprobe_template_sub_sp - (unsigned long *)optprobe_template_entry) | |
#define TMPL_RESTORE_BEGIN \ | |
- ((unsigned long *)&optprobe_template_restore_begin - (unsigned long *)&optprobe_template_entry) | |
+ ((unsigned long *)optprobe_template_restore_begin - (unsigned long *)optprobe_template_entry) | |
#define TMPL_RESTORE_ORIGN_INSN \ | |
- ((unsigned long *)&optprobe_template_restore_orig_insn - (unsigned long *)&optprobe_template_entry) | |
+ ((unsigned long *)optprobe_template_restore_orig_insn - (unsigned long *)optprobe_template_entry) | |
#define TMPL_RESTORE_END \ | |
- ((unsigned long *)&optprobe_template_restore_end - (unsigned long *)&optprobe_template_entry) | |
+ ((unsigned long *)optprobe_template_restore_end - (unsigned long *)optprobe_template_entry) | |
/* | |
* ARM can always optimize an instruction when using ARM ISA, except | |
@@ -234,7 +234,7 @@ int arch_prepare_optimized_kprobe(struct optimized_kprobe *op, struct kprobe *or | |
} | |
/* Copy arch-dep-instance from template. */ | |
- memcpy(code, (unsigned long *)&optprobe_template_entry, | |
+ memcpy(code, (unsigned long *)optprobe_template_entry, | |
TMPL_END_IDX * sizeof(kprobe_opcode_t)); | |
/* Adjust buffer according to instruction. */ | |
diff --git a/arch/arm/vdso/Makefile b/arch/arm/vdso/Makefile | |
index a54f70731d9f1..150ce6e6a5d31 100644 | |
--- a/arch/arm/vdso/Makefile | |
+++ b/arch/arm/vdso/Makefile | |
@@ -19,7 +19,7 @@ ccflags-y += -DDISABLE_BRANCH_PROFILING -DBUILD_VDSO32 | |
ldflags-$(CONFIG_CPU_ENDIAN_BE8) := --be8 | |
ldflags-y := -Bsymbolic --no-undefined -soname=linux-vdso.so.1 \ | |
-z max-page-size=4096 -nostdlib -shared $(ldflags-y) \ | |
- --hash-style=sysv --build-id \ | |
+ --hash-style=sysv --build-id=sha1 \ | |
-T | |
obj-$(CONFIG_VDSO) += vdso.o | |
diff --git a/arch/arm64/kernel/kexec_image.c b/arch/arm64/kernel/kexec_image.c | |
index af9987c154cab..66adee8b5fc81 100644 | |
--- a/arch/arm64/kernel/kexec_image.c | |
+++ b/arch/arm64/kernel/kexec_image.c | |
@@ -43,7 +43,7 @@ static void *image_load(struct kimage *image, | |
u64 flags, value; | |
bool be_image, be_kernel; | |
struct kexec_buf kbuf; | |
- unsigned long text_offset; | |
+ unsigned long text_offset, kernel_segment_number; | |
struct kexec_segment *kernel_segment; | |
int ret; | |
@@ -88,11 +88,37 @@ static void *image_load(struct kimage *image, | |
/* Adjust kernel segment with TEXT_OFFSET */ | |
kbuf.memsz += text_offset; | |
- ret = kexec_add_buffer(&kbuf); | |
- if (ret) | |
+ kernel_segment_number = image->nr_segments; | |
+ | |
+ /* | |
+ * The location of the kernel segment may make it impossible to satisfy | |
+ * the other segment requirements, so we try repeatedly to find a | |
+ * location that will work. | |
+ */ | |
+ while ((ret = kexec_add_buffer(&kbuf)) == 0) { | |
+ /* Try to load additional data */ | |
+ kernel_segment = &image->segment[kernel_segment_number]; | |
+ ret = load_other_segments(image, kernel_segment->mem, | |
+ kernel_segment->memsz, initrd, | |
+ initrd_len, cmdline); | |
+ if (!ret) | |
+ break; | |
+ | |
+ /* | |
+ * We couldn't find space for the other segments; erase the | |
+ * kernel segment and try the next available hole. | |
+ */ | |
+ image->nr_segments -= 1; | |
+ kbuf.buf_min = kernel_segment->mem + kernel_segment->memsz; | |
+ kbuf.mem = KEXEC_BUF_MEM_UNKNOWN; | |
+ } | |
+ | |
+ if (ret) { | |
+ pr_err("Could not find any suitable kernel location!"); | |
return ERR_PTR(ret); | |
+ } | |
- kernel_segment = &image->segment[image->nr_segments - 1]; | |
+ kernel_segment = &image->segment[kernel_segment_number]; | |
kernel_segment->mem += text_offset; | |
kernel_segment->memsz -= text_offset; | |
image->start = kernel_segment->mem; | |
@@ -101,12 +127,7 @@ static void *image_load(struct kimage *image, | |
kernel_segment->mem, kbuf.bufsz, | |
kernel_segment->memsz); | |
- /* Load additional data */ | |
- ret = load_other_segments(image, | |
- kernel_segment->mem, kernel_segment->memsz, | |
- initrd, initrd_len, cmdline); | |
- | |
- return ERR_PTR(ret); | |
+ return 0; | |
} | |
#ifdef CONFIG_KEXEC_IMAGE_VERIFY_SIG | |
diff --git a/arch/arm64/kernel/machine_kexec_file.c b/arch/arm64/kernel/machine_kexec_file.c | |
index 361a1143e09ee..e443df8569881 100644 | |
--- a/arch/arm64/kernel/machine_kexec_file.c | |
+++ b/arch/arm64/kernel/machine_kexec_file.c | |
@@ -242,6 +242,11 @@ static int prepare_elf_headers(void **addr, unsigned long *sz) | |
return ret; | |
} | |
+/* | |
+ * Tries to add the initrd and DTB to the image. If it is not possible to find | |
+ * valid locations, this function will undo changes to the image and return non | |
+ * zero. | |
+ */ | |
int load_other_segments(struct kimage *image, | |
unsigned long kernel_load_addr, | |
unsigned long kernel_size, | |
@@ -250,7 +255,8 @@ int load_other_segments(struct kimage *image, | |
{ | |
struct kexec_buf kbuf; | |
void *headers, *dtb = NULL; | |
- unsigned long headers_sz, initrd_load_addr = 0, dtb_len; | |
+ unsigned long headers_sz, initrd_load_addr = 0, dtb_len, | |
+ orig_segments = image->nr_segments; | |
int ret = 0; | |
kbuf.image = image; | |
@@ -336,6 +342,7 @@ int load_other_segments(struct kimage *image, | |
return 0; | |
out_err: | |
+ image->nr_segments = orig_segments; | |
vfree(dtb); | |
return ret; | |
} | |
diff --git a/arch/arm64/kernel/vdso/Makefile b/arch/arm64/kernel/vdso/Makefile | |
index 45d5cfe464290..871915097f9d1 100644 | |
--- a/arch/arm64/kernel/vdso/Makefile | |
+++ b/arch/arm64/kernel/vdso/Makefile | |
@@ -24,7 +24,7 @@ btildflags-$(CONFIG_ARM64_BTI_KERNEL) += -z force-bti | |
# routines, as x86 does (see 6f121e548f83 ("x86, vdso: Reimplement vdso.so | |
# preparation in build-time C")). | |
ldflags-y := -shared -nostdlib -soname=linux-vdso.so.1 --hash-style=sysv \ | |
- -Bsymbolic $(call ld-option, --no-eh-frame-hdr) --build-id -n \ | |
+ -Bsymbolic $(call ld-option, --no-eh-frame-hdr) --build-id=sha1 -n \ | |
$(btildflags-y) -T | |
ccflags-y := -fno-common -fno-builtin -fno-stack-protector -ffixed-x18 | |
diff --git a/arch/arm64/kernel/vdso32/Makefile b/arch/arm64/kernel/vdso32/Makefile | |
index d6adb4677c25f..4fa4b3fe8efb7 100644 | |
--- a/arch/arm64/kernel/vdso32/Makefile | |
+++ b/arch/arm64/kernel/vdso32/Makefile | |
@@ -128,7 +128,7 @@ VDSO_LDFLAGS += -Wl,-Bsymbolic -Wl,--no-undefined -Wl,-soname=linux-vdso.so.1 | |
VDSO_LDFLAGS += -Wl,-z,max-page-size=4096 -Wl,-z,common-page-size=4096 | |
VDSO_LDFLAGS += -nostdlib -shared -mfloat-abi=soft | |
VDSO_LDFLAGS += -Wl,--hash-style=sysv | |
-VDSO_LDFLAGS += -Wl,--build-id | |
+VDSO_LDFLAGS += -Wl,--build-id=sha1 | |
VDSO_LDFLAGS += $(call cc32-ldoption,-fuse-ld=bfd) | |
diff --git a/arch/arm64/kvm/hypercalls.c b/arch/arm64/kvm/hypercalls.c | |
index 550dfa3e53cdd..b8e7f6c4524f4 100644 | |
--- a/arch/arm64/kvm/hypercalls.c | |
+++ b/arch/arm64/kvm/hypercalls.c | |
@@ -31,7 +31,7 @@ int kvm_hvc_call_handler(struct kvm_vcpu *vcpu) | |
val = SMCCC_RET_SUCCESS; | |
break; | |
case KVM_BP_HARDEN_NOT_REQUIRED: | |
- val = SMCCC_RET_NOT_REQUIRED; | |
+ val = SMCCC_ARCH_WORKAROUND_RET_UNAFFECTED; | |
break; | |
} | |
break; | |
diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c | |
index 3d26b47a13430..7a4ad984d54e0 100644 | |
--- a/arch/arm64/kvm/mmu.c | |
+++ b/arch/arm64/kvm/mmu.c | |
@@ -1920,6 +1920,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, | |
if (kvm_is_device_pfn(pfn)) { | |
mem_type = PAGE_S2_DEVICE; | |
flags |= KVM_S2PTE_FLAG_IS_IOMAP; | |
+ force_pte = true; | |
} else if (logging_active) { | |
/* | |
* Faults on pages in a memslot with logging enabled | |
diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c | |
index de5a5a80ae99a..f24f659f5a31e 100644 | |
--- a/arch/arm64/kvm/sys_regs.c | |
+++ b/arch/arm64/kvm/sys_regs.c | |
@@ -1193,16 +1193,6 @@ static unsigned int sve_visibility(const struct kvm_vcpu *vcpu, | |
return REG_HIDDEN_USER | REG_HIDDEN_GUEST; | |
} | |
-/* Visibility overrides for SVE-specific ID registers */ | |
-static unsigned int sve_id_visibility(const struct kvm_vcpu *vcpu, | |
- const struct sys_reg_desc *rd) | |
-{ | |
- if (vcpu_has_sve(vcpu)) | |
- return 0; | |
- | |
- return REG_HIDDEN_USER; | |
-} | |
- | |
/* Generate the emulated ID_AA64ZFR0_EL1 value exposed to the guest */ | |
static u64 guest_id_aa64zfr0_el1(const struct kvm_vcpu *vcpu) | |
{ | |
@@ -1229,9 +1219,6 @@ static int get_id_aa64zfr0_el1(struct kvm_vcpu *vcpu, | |
{ | |
u64 val; | |
- if (WARN_ON(!vcpu_has_sve(vcpu))) | |
- return -ENOENT; | |
- | |
val = guest_id_aa64zfr0_el1(vcpu); | |
return reg_to_user(uaddr, &val, reg->id); | |
} | |
@@ -1244,9 +1231,6 @@ static int set_id_aa64zfr0_el1(struct kvm_vcpu *vcpu, | |
int err; | |
u64 val; | |
- if (WARN_ON(!vcpu_has_sve(vcpu))) | |
- return -ENOENT; | |
- | |
err = reg_from_user(&val, uaddr, id); | |
if (err) | |
return err; | |
@@ -1509,7 +1493,7 @@ static const struct sys_reg_desc sys_reg_descs[] = { | |
ID_SANITISED(ID_AA64PFR1_EL1), | |
ID_UNALLOCATED(4,2), | |
ID_UNALLOCATED(4,3), | |
- { SYS_DESC(SYS_ID_AA64ZFR0_EL1), access_id_aa64zfr0_el1, .get_user = get_id_aa64zfr0_el1, .set_user = set_id_aa64zfr0_el1, .visibility = sve_id_visibility }, | |
+ { SYS_DESC(SYS_ID_AA64ZFR0_EL1), access_id_aa64zfr0_el1, .get_user = get_id_aa64zfr0_el1, .set_user = set_id_aa64zfr0_el1, }, | |
ID_UNALLOCATED(4,5), | |
ID_UNALLOCATED(4,6), | |
ID_UNALLOCATED(4,7), | |
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c | |
index 75df62fea1b68..a834e7fb0e250 100644 | |
--- a/arch/arm64/mm/mmu.c | |
+++ b/arch/arm64/mm/mmu.c | |
@@ -1433,11 +1433,28 @@ static void __remove_pgd_mapping(pgd_t *pgdir, unsigned long start, u64 size) | |
free_empty_tables(start, end, PAGE_OFFSET, PAGE_END); | |
} | |
+static bool inside_linear_region(u64 start, u64 size) | |
+{ | |
+ /* | |
+ * Linear mapping region is the range [PAGE_OFFSET..(PAGE_END - 1)] | |
+ * accommodating both its ends but excluding PAGE_END. Max physical | |
+ * range which can be mapped inside this linear mapping range, must | |
+ * also be derived from its end points. | |
+ */ | |
+ return start >= __pa(_PAGE_OFFSET(vabits_actual)) && | |
+ (start + size - 1) <= __pa(PAGE_END - 1); | |
+} | |
+ | |
int arch_add_memory(int nid, u64 start, u64 size, | |
struct mhp_params *params) | |
{ | |
int ret, flags = 0; | |
+ if (!inside_linear_region(start, size)) { | |
+ pr_err("[%llx %llx] is outside linear mapping region\n", start, start + size); | |
+ return -EINVAL; | |
+ } | |
+ | |
if (rodata_full || debug_pagealloc_enabled()) | |
flags = NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS; | |
diff --git a/arch/mips/vdso/Makefile b/arch/mips/vdso/Makefile | |
index 57fe832352819..5810cc12bc1d9 100644 | |
--- a/arch/mips/vdso/Makefile | |
+++ b/arch/mips/vdso/Makefile | |
@@ -61,7 +61,7 @@ endif | |
# VDSO linker flags. | |
ldflags-y := -Bsymbolic --no-undefined -soname=linux-vdso.so.1 \ | |
$(filter -E%,$(KBUILD_CFLAGS)) -nostdlib -shared \ | |
- -G 0 --eh-frame-hdr --hash-style=sysv --build-id -T | |
+ -G 0 --eh-frame-hdr --hash-style=sysv --build-id=sha1 -T | |
CFLAGS_REMOVE_vdso.o = -pg | |
diff --git a/arch/powerpc/kernel/eeh_cache.c b/arch/powerpc/kernel/eeh_cache.c | |
index 6b50bf15d8c19..bf3270426d82d 100644 | |
--- a/arch/powerpc/kernel/eeh_cache.c | |
+++ b/arch/powerpc/kernel/eeh_cache.c | |
@@ -264,8 +264,9 @@ static int eeh_addr_cache_show(struct seq_file *s, void *v) | |
{ | |
struct pci_io_addr_range *piar; | |
struct rb_node *n; | |
+ unsigned long flags; | |
- spin_lock(&pci_io_addr_cache_root.piar_lock); | |
+ spin_lock_irqsave(&pci_io_addr_cache_root.piar_lock, flags); | |
for (n = rb_first(&pci_io_addr_cache_root.rb_root); n; n = rb_next(n)) { | |
piar = rb_entry(n, struct pci_io_addr_range, rb_node); | |
@@ -273,7 +274,7 @@ static int eeh_addr_cache_show(struct seq_file *s, void *v) | |
(piar->flags & IORESOURCE_IO) ? "i/o" : "mem", | |
&piar->addr_lo, &piar->addr_hi, pci_name(piar->pcidev)); | |
} | |
- spin_unlock(&pci_io_addr_cache_root.piar_lock); | |
+ spin_unlock_irqrestore(&pci_io_addr_cache_root.piar_lock, flags); | |
return 0; | |
} | |
diff --git a/arch/powerpc/kernel/head_32.S b/arch/powerpc/kernel/head_32.S | |
index a5a612deef66e..898c2fe4ac673 100644 | |
--- a/arch/powerpc/kernel/head_32.S | |
+++ b/arch/powerpc/kernel/head_32.S | |
@@ -472,11 +472,7 @@ InstructionTLBMiss: | |
cmplw 0,r1,r3 | |
#endif | |
mfspr r2, SPRN_SPRG_PGDIR | |
-#ifdef CONFIG_SWAP | |
li r1,_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_EXEC | |
-#else | |
- li r1,_PAGE_PRESENT | _PAGE_EXEC | |
-#endif | |
#if defined(CONFIG_MODULES) || defined(CONFIG_DEBUG_PAGEALLOC) | |
bgt- 112f | |
lis r2, (swapper_pg_dir - PAGE_OFFSET)@ha /* if kernel address, use */ | |
@@ -538,11 +534,7 @@ DataLoadTLBMiss: | |
lis r1, TASK_SIZE@h /* check if kernel address */ | |
cmplw 0,r1,r3 | |
mfspr r2, SPRN_SPRG_PGDIR | |
-#ifdef CONFIG_SWAP | |
li r1, _PAGE_PRESENT | _PAGE_ACCESSED | |
-#else | |
- li r1, _PAGE_PRESENT | |
-#endif | |
bgt- 112f | |
lis r2, (swapper_pg_dir - PAGE_OFFSET)@ha /* if kernel address, use */ | |
addi r2, r2, (swapper_pg_dir - PAGE_OFFSET)@l /* kernel page table */ | |
@@ -618,11 +610,7 @@ DataStoreTLBMiss: | |
lis r1, TASK_SIZE@h /* check if kernel address */ | |
cmplw 0,r1,r3 | |
mfspr r2, SPRN_SPRG_PGDIR | |
-#ifdef CONFIG_SWAP | |
li r1, _PAGE_RW | _PAGE_DIRTY | _PAGE_PRESENT | _PAGE_ACCESSED | |
-#else | |
- li r1, _PAGE_RW | _PAGE_DIRTY | _PAGE_PRESENT | |
-#endif | |
bgt- 112f | |
lis r2, (swapper_pg_dir - PAGE_OFFSET)@ha /* if kernel address, use */ | |
addi r2, r2, (swapper_pg_dir - PAGE_OFFSET)@l /* kernel page table */ | |
diff --git a/arch/riscv/kernel/head.S b/arch/riscv/kernel/head.S | |
index 0a4e81b8dc795..5a0ae2eaf5e2f 100644 | |
--- a/arch/riscv/kernel/head.S | |
+++ b/arch/riscv/kernel/head.S | |
@@ -27,12 +27,17 @@ ENTRY(_start) | |
/* reserved */ | |
.word 0 | |
.balign 8 | |
+#ifdef CONFIG_RISCV_M_MODE | |
+ /* Image load offset (0MB) from start of RAM for M-mode */ | |
+ .dword 0 | |
+#else | |
#if __riscv_xlen == 64 | |
/* Image load offset(2MB) from start of RAM */ | |
.dword 0x200000 | |
#else | |
/* Image load offset(4MB) from start of RAM */ | |
.dword 0x400000 | |
+#endif | |
#endif | |
/* Effective size of kernel image */ | |
.dword _end - _start | |
diff --git a/arch/riscv/kernel/vdso/.gitignore b/arch/riscv/kernel/vdso/.gitignore | |
index 11ebee9e4c1d6..3a19def868ecc 100644 | |
--- a/arch/riscv/kernel/vdso/.gitignore | |
+++ b/arch/riscv/kernel/vdso/.gitignore | |
@@ -1,3 +1,4 @@ | |
# SPDX-License-Identifier: GPL-2.0-only | |
vdso.lds | |
*.tmp | |
+vdso-syms.S | |
diff --git a/arch/riscv/kernel/vdso/Makefile b/arch/riscv/kernel/vdso/Makefile | |
index 478e7338ddc10..cb8f9e4cfcbf8 100644 | |
--- a/arch/riscv/kernel/vdso/Makefile | |
+++ b/arch/riscv/kernel/vdso/Makefile | |
@@ -43,19 +43,14 @@ $(obj)/vdso.o: $(obj)/vdso.so | |
SYSCFLAGS_vdso.so.dbg = $(c_flags) | |
$(obj)/vdso.so.dbg: $(src)/vdso.lds $(obj-vdso) FORCE | |
$(call if_changed,vdsold) | |
+SYSCFLAGS_vdso.so.dbg = -shared -s -Wl,-soname=linux-vdso.so.1 \ | |
+ -Wl,--build-id -Wl,--hash-style=both | |
# We also create a special relocatable object that should mirror the symbol | |
# table and layout of the linked DSO. With ld --just-symbols we can then | |
# refer to these symbols in the kernel code rather than hand-coded addresses. | |
- | |
-SYSCFLAGS_vdso.so.dbg = -shared -s -Wl,-soname=linux-vdso.so.1 \ | |
- -Wl,--build-id -Wl,--hash-style=both | |
-$(obj)/vdso-dummy.o: $(src)/vdso.lds $(obj)/rt_sigreturn.o FORCE | |
- $(call if_changed,vdsold) | |
- | |
-LDFLAGS_vdso-syms.o := -r --just-symbols | |
-$(obj)/vdso-syms.o: $(obj)/vdso-dummy.o FORCE | |
- $(call if_changed,ld) | |
+$(obj)/vdso-syms.S: $(obj)/vdso.so FORCE | |
+ $(call if_changed,so2s) | |
# strip rule for the .so file | |
$(obj)/%.so: OBJCOPYFLAGS := -S | |
@@ -73,6 +68,11 @@ quiet_cmd_vdsold = VDSOLD $@ | |
$(patsubst %, -G __vdso_%, $(vdso-syms)) [email protected] $@ && \ | |
rm [email protected] | |
+# Extracts symbol offsets from the VDSO, converting them into an assembly file | |
+# that contains the same symbols at the same offsets. | |
+quiet_cmd_so2s = SO2S $@ | |
+ cmd_so2s = $(NM) -D $< | $(srctree)/$(src)/so2s.sh > $@ | |
+ | |
# install commands for the unstripped file | |
quiet_cmd_vdso_install = INSTALL $@ | |
cmd_vdso_install = cp $(obj)/[email protected] $(MODLIB)/vdso/$@ | |
diff --git a/arch/riscv/kernel/vdso/so2s.sh b/arch/riscv/kernel/vdso/so2s.sh | |
new file mode 100755 | |
index 0000000000000..e64cb6d9440e7 | |
--- /dev/null | |
+++ b/arch/riscv/kernel/vdso/so2s.sh | |
@@ -0,0 +1,6 @@ | |
+#!/bin/sh | |
+# SPDX-License-Identifier: GPL-2.0+ | |
+# Copyright 2020 Palmer Dabbelt <[email protected]> | |
+ | |
+sed 's!\([0-9a-f]*\) T \([a-z0-9_]*\)\(@@LINUX_4.15\)*!.global \2\n.set \2,0x\1!' \ | |
+| grep '^\.' | |
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c | |
index 85700bd85f98d..3b4c3140c18e7 100644 | |
--- a/arch/s390/kernel/smp.c | |
+++ b/arch/s390/kernel/smp.c | |
@@ -855,13 +855,14 @@ void __init smp_detect_cpus(void) | |
static void smp_init_secondary(void) | |
{ | |
- int cpu = smp_processor_id(); | |
+ int cpu = raw_smp_processor_id(); | |
S390_lowcore.last_update_clock = get_tod_clock(); | |
restore_access_regs(S390_lowcore.access_regs_save_area); | |
set_cpu_flag(CIF_ASCE_PRIMARY); | |
set_cpu_flag(CIF_ASCE_SECONDARY); | |
cpu_init(); | |
+ rcu_cpu_starting(cpu); | |
preempt_disable(); | |
init_cpu_timer(); | |
vtime_init(); | |
diff --git a/arch/s390/kernel/vdso64/Makefile b/arch/s390/kernel/vdso64/Makefile | |
index 4a66a1cb919b1..edc473b32e420 100644 | |
--- a/arch/s390/kernel/vdso64/Makefile | |
+++ b/arch/s390/kernel/vdso64/Makefile | |
@@ -19,7 +19,7 @@ KBUILD_AFLAGS_64 += -m64 -s | |
KBUILD_CFLAGS_64 := $(filter-out -m64,$(KBUILD_CFLAGS)) | |
KBUILD_CFLAGS_64 += -m64 -fPIC -shared -fno-common -fno-builtin | |
ldflags-y := -fPIC -shared -nostdlib -soname=linux-vdso64.so.1 \ | |
- --hash-style=both --build-id -T | |
+ --hash-style=both --build-id=sha1 -T | |
$(targets:%=$(obj)/%.dbg): KBUILD_CFLAGS = $(KBUILD_CFLAGS_64) | |
$(targets:%=$(obj)/%.dbg): KBUILD_AFLAGS = $(KBUILD_AFLAGS_64) | |
diff --git a/arch/sparc/vdso/Makefile b/arch/sparc/vdso/Makefile | |
index f44355e46f31f..469dd23887abb 100644 | |
--- a/arch/sparc/vdso/Makefile | |
+++ b/arch/sparc/vdso/Makefile | |
@@ -115,7 +115,7 @@ quiet_cmd_vdso = VDSO $@ | |
-T $(filter %.lds,$^) $(filter %.o,$^) && \ | |
sh $(srctree)/$(src)/checkundef.sh '$(OBJDUMP)' '$@' | |
-VDSO_LDFLAGS = -shared --hash-style=both --build-id -Bsymbolic | |
+VDSO_LDFLAGS = -shared --hash-style=both --build-id=sha1 -Bsymbolic | |
GCOV_PROFILE := n | |
# | |
diff --git a/arch/x86/boot/compressed/mem_encrypt.S b/arch/x86/boot/compressed/mem_encrypt.S | |
index dd07e7b41b115..3092ae173f94e 100644 | |
--- a/arch/x86/boot/compressed/mem_encrypt.S | |
+++ b/arch/x86/boot/compressed/mem_encrypt.S | |
@@ -81,6 +81,19 @@ SYM_FUNC_START(set_sev_encryption_mask) | |
bts %rax, sme_me_mask(%rip) /* Create the encryption mask */ | |
+ /* | |
+ * Read MSR_AMD64_SEV again and store it to sev_status. Can't do this in | |
+ * get_sev_encryption_bit() because this function is 32-bit code and | |
+ * shared between 64-bit and 32-bit boot path. | |
+ */ | |
+ movl $MSR_AMD64_SEV, %ecx /* Read the SEV MSR */ | |
+ rdmsr | |
+ | |
+ /* Store MSR value in sev_status */ | |
+ shlq $32, %rdx | |
+ orq %rdx, %rax | |
+ movq %rax, sev_status(%rip) | |
+ | |
.Lno_sev_mask: | |
movq %rbp, %rsp /* Restore original stack pointer */ | |
@@ -96,5 +109,6 @@ SYM_FUNC_END(set_sev_encryption_mask) | |
#ifdef CONFIG_AMD_MEM_ENCRYPT | |
.balign 8 | |
-SYM_DATA(sme_me_mask, .quad 0) | |
+SYM_DATA(sme_me_mask, .quad 0) | |
+SYM_DATA(sev_status, .quad 0) | |
#endif | |
diff --git a/arch/x86/entry/vdso/Makefile b/arch/x86/entry/vdso/Makefile | |
index 215376d975a29..ebba25ed9a386 100644 | |
--- a/arch/x86/entry/vdso/Makefile | |
+++ b/arch/x86/entry/vdso/Makefile | |
@@ -176,7 +176,7 @@ quiet_cmd_vdso = VDSO $@ | |
-T $(filter %.lds,$^) $(filter %.o,$^) && \ | |
sh $(srctree)/$(src)/checkundef.sh '$(NM)' '$@' | |
-VDSO_LDFLAGS = -shared --hash-style=both --build-id \ | |
+VDSO_LDFLAGS = -shared --hash-style=both --build-id=sha1 \ | |
$(call ld-option, --eh-frame-hdr) -Bsymbolic | |
GCOV_PROFILE := n | |
diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c | |
index d3f0db463f96a..581fb7223ad0e 100644 | |
--- a/arch/x86/kernel/cpu/bugs.c | |
+++ b/arch/x86/kernel/cpu/bugs.c | |
@@ -1254,6 +1254,14 @@ static int ssb_prctl_set(struct task_struct *task, unsigned long ctrl) | |
return 0; | |
} | |
+static bool is_spec_ib_user_controlled(void) | |
+{ | |
+ return spectre_v2_user_ibpb == SPECTRE_V2_USER_PRCTL || | |
+ spectre_v2_user_ibpb == SPECTRE_V2_USER_SECCOMP || | |
+ spectre_v2_user_stibp == SPECTRE_V2_USER_PRCTL || | |
+ spectre_v2_user_stibp == SPECTRE_V2_USER_SECCOMP; | |
+} | |
+ | |
static int ib_prctl_set(struct task_struct *task, unsigned long ctrl) | |
{ | |
switch (ctrl) { | |
@@ -1261,16 +1269,26 @@ static int ib_prctl_set(struct task_struct *task, unsigned long ctrl) | |
if (spectre_v2_user_ibpb == SPECTRE_V2_USER_NONE && | |
spectre_v2_user_stibp == SPECTRE_V2_USER_NONE) | |
return 0; | |
+ | |
/* | |
- * Indirect branch speculation is always disabled in strict | |
- * mode. It can neither be enabled if it was force-disabled | |
- * by a previous prctl call. | |
+ * With strict mode for both IBPB and STIBP, the instruction | |
+ * code paths avoid checking this task flag and instead, | |
+ * unconditionally run the instruction. However, STIBP and IBPB | |
+ * are independent and either can be set to conditionally | |
+ * enabled regardless of the mode of the other. | |
+ * | |
+ * If either is set to conditional, allow the task flag to be | |
+ * updated, unless it was force-disabled by a previous prctl | |
+ * call. Currently, this is possible on an AMD CPU which has the | |
+ * feature X86_FEATURE_AMD_STIBP_ALWAYS_ON. In this case, if the | |
+ * kernel is booted with 'spectre_v2_user=seccomp', then | |
+ * spectre_v2_user_ibpb == SPECTRE_V2_USER_SECCOMP and | |
+ * spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT_PREFERRED. | |
*/ | |
- if (spectre_v2_user_ibpb == SPECTRE_V2_USER_STRICT || | |
- spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT || | |
- spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT_PREFERRED || | |
+ if (!is_spec_ib_user_controlled() || | |
task_spec_ib_force_disable(task)) | |
return -EPERM; | |
+ | |
task_clear_spec_ib_disable(task); | |
task_update_spec_tif(task); | |
break; | |
@@ -1283,10 +1301,10 @@ static int ib_prctl_set(struct task_struct *task, unsigned long ctrl) | |
if (spectre_v2_user_ibpb == SPECTRE_V2_USER_NONE && | |
spectre_v2_user_stibp == SPECTRE_V2_USER_NONE) | |
return -EPERM; | |
- if (spectre_v2_user_ibpb == SPECTRE_V2_USER_STRICT || | |
- spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT || | |
- spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT_PREFERRED) | |
+ | |
+ if (!is_spec_ib_user_controlled()) | |
return 0; | |
+ | |
task_set_spec_ib_disable(task); | |
if (ctrl == PR_SPEC_FORCE_DISABLE) | |
task_set_spec_ib_force_disable(task); | |
@@ -1351,20 +1369,17 @@ static int ib_prctl_get(struct task_struct *task) | |
if (spectre_v2_user_ibpb == SPECTRE_V2_USER_NONE && | |
spectre_v2_user_stibp == SPECTRE_V2_USER_NONE) | |
return PR_SPEC_ENABLE; | |
- else if (spectre_v2_user_ibpb == SPECTRE_V2_USER_STRICT || | |
- spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT || | |
- spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT_PREFERRED) | |
- return PR_SPEC_DISABLE; | |
- else if (spectre_v2_user_ibpb == SPECTRE_V2_USER_PRCTL || | |
- spectre_v2_user_ibpb == SPECTRE_V2_USER_SECCOMP || | |
- spectre_v2_user_stibp == SPECTRE_V2_USER_PRCTL || | |
- spectre_v2_user_stibp == SPECTRE_V2_USER_SECCOMP) { | |
+ else if (is_spec_ib_user_controlled()) { | |
if (task_spec_ib_force_disable(task)) | |
return PR_SPEC_PRCTL | PR_SPEC_FORCE_DISABLE; | |
if (task_spec_ib_disable(task)) | |
return PR_SPEC_PRCTL | PR_SPEC_DISABLE; | |
return PR_SPEC_PRCTL | PR_SPEC_ENABLE; | |
- } else | |
+ } else if (spectre_v2_user_ibpb == SPECTRE_V2_USER_STRICT || | |
+ spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT || | |
+ spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT_PREFERRED) | |
+ return PR_SPEC_DISABLE; | |
+ else | |
return PR_SPEC_NOT_AFFECTED; | |
} | |
diff --git a/block/genhd.c b/block/genhd.c | |
index 99c64641c3148..c50ddbf2a2294 100644 | |
--- a/block/genhd.c | |
+++ b/block/genhd.c | |
@@ -49,7 +49,7 @@ static void disk_release_events(struct gendisk *disk); | |
* Set disk capacity and notify if the size is not currently | |
* zero and will not be set to zero | |
*/ | |
-void set_capacity_revalidate_and_notify(struct gendisk *disk, sector_t size, | |
+bool set_capacity_revalidate_and_notify(struct gendisk *disk, sector_t size, | |
bool revalidate) | |
{ | |
sector_t capacity = get_capacity(disk); | |
@@ -63,7 +63,10 @@ void set_capacity_revalidate_and_notify(struct gendisk *disk, sector_t size, | |
char *envp[] = { "RESIZE=1", NULL }; | |
kobject_uevent_env(&disk_to_dev(disk)->kobj, KOBJ_CHANGE, envp); | |
+ return true; | |
} | |
+ | |
+ return false; | |
} | |
EXPORT_SYMBOL_GPL(set_capacity_revalidate_and_notify); | |
diff --git a/drivers/accessibility/speakup/main.c b/drivers/accessibility/speakup/main.c | |
index ddfd12afe3b9a..9db09012a00ef 100644 | |
--- a/drivers/accessibility/speakup/main.c | |
+++ b/drivers/accessibility/speakup/main.c | |
@@ -357,7 +357,6 @@ static void speakup_cut(struct vc_data *vc) | |
mark_cut_flag = 0; | |
synth_printf("%s\n", spk_msg_get(MSG_CUT)); | |
- speakup_clear_selection(); | |
ret = speakup_set_selection(tty); | |
switch (ret) { | |
diff --git a/drivers/accessibility/speakup/selection.c b/drivers/accessibility/speakup/selection.c | |
index 032f3264fba12..7df7afad5ab42 100644 | |
--- a/drivers/accessibility/speakup/selection.c | |
+++ b/drivers/accessibility/speakup/selection.c | |
@@ -22,13 +22,6 @@ struct speakup_selection_work { | |
struct tty_struct *tty; | |
}; | |
-void speakup_clear_selection(void) | |
-{ | |
- console_lock(); | |
- clear_selection(); | |
- console_unlock(); | |
-} | |
- | |
static void __speakup_set_selection(struct work_struct *work) | |
{ | |
struct speakup_selection_work *ssw = | |
@@ -51,6 +44,10 @@ static void __speakup_set_selection(struct work_struct *work) | |
goto unref; | |
} | |
+ console_lock(); | |
+ clear_selection(); | |
+ console_unlock(); | |
+ | |
set_selection_kernel(&sel, tty); | |
unref: | |
diff --git a/drivers/accessibility/speakup/speakup.h b/drivers/accessibility/speakup/speakup.h | |
index 74fe49c2c5110..33594f5a79837 100644 | |
--- a/drivers/accessibility/speakup/speakup.h | |
+++ b/drivers/accessibility/speakup/speakup.h | |
@@ -70,7 +70,6 @@ void spk_do_flush(void); | |
void speakup_start_ttys(void); | |
void synth_buffer_add(u16 ch); | |
void synth_buffer_clear(void); | |
-void speakup_clear_selection(void); | |
int speakup_set_selection(struct tty_struct *tty); | |
void speakup_cancel_selection(void); | |
int speakup_paste_selection(struct tty_struct *tty); | |
diff --git a/drivers/accessibility/speakup/spk_ttyio.c b/drivers/accessibility/speakup/spk_ttyio.c | |
index a831ff64f8ba5..ecc39983e9464 100644 | |
--- a/drivers/accessibility/speakup/spk_ttyio.c | |
+++ b/drivers/accessibility/speakup/spk_ttyio.c | |
@@ -298,11 +298,13 @@ static unsigned char ttyio_in(int timeout) | |
struct spk_ldisc_data *ldisc_data = speakup_tty->disc_data; | |
char rv; | |
- if (wait_for_completion_timeout(&ldisc_data->completion, | |
+ if (!timeout) { | |
+ if (!try_wait_for_completion(&ldisc_data->completion)) | |
+ return 0xff; | |
+ } else if (wait_for_completion_timeout(&ldisc_data->completion, | |
usecs_to_jiffies(timeout)) == 0) { | |
- if (timeout) | |
- pr_warn("spk_ttyio: timeout (%d) while waiting for input\n", | |
- timeout); | |
+ pr_warn("spk_ttyio: timeout (%d) while waiting for input\n", | |
+ timeout); | |
return 0xff; | |
} | |
diff --git a/drivers/accessibility/speakup/spk_types.h b/drivers/accessibility/speakup/spk_types.h | |
index 7398f1196e103..91fca3033a45a 100644 | |
--- a/drivers/accessibility/speakup/spk_types.h | |
+++ b/drivers/accessibility/speakup/spk_types.h | |
@@ -32,6 +32,10 @@ enum { | |
E_NEW_DEFAULT, | |
}; | |
+/* | |
+ * Note: add new members at the end, speakupmap.h depends on the values of the | |
+ * enum starting from SPELL_DELAY (see inc_dec_var) | |
+ */ | |
enum var_id_t { | |
VERSION = 0, SYNTH, SILENT, SYNTH_DIRECT, | |
KEYMAP, CHARS, | |
@@ -42,9 +46,9 @@ enum var_id_t { | |
SAY_CONTROL, SAY_WORD_CTL, NO_INTERRUPT, KEY_ECHO, | |
SPELL_DELAY, PUNC_LEVEL, READING_PUNC, | |
ATTRIB_BLEEP, BLEEPS, | |
- RATE, PITCH, INFLECTION, VOL, TONE, PUNCT, VOICE, FREQUENCY, LANG, | |
+ RATE, PITCH, VOL, TONE, PUNCT, VOICE, FREQUENCY, LANG, | |
DIRECT, PAUSE, | |
- CAPS_START, CAPS_STOP, CHARTAB, | |
+ CAPS_START, CAPS_STOP, CHARTAB, INFLECTION, | |
MAXVARS | |
}; | |
diff --git a/drivers/block/loop.c b/drivers/block/loop.c | |
index d3394191e1682..32fa3062736c4 100644 | |
--- a/drivers/block/loop.c | |
+++ b/drivers/block/loop.c | |
@@ -255,7 +255,8 @@ static void loop_set_size(struct loop_device *lo, loff_t size) | |
bd_set_size(bdev, size << SECTOR_SHIFT); | |
- set_capacity_revalidate_and_notify(lo->lo_disk, size, false); | |
+ if (!set_capacity_revalidate_and_notify(lo->lo_disk, size, false)) | |
+ kobject_uevent(&disk_to_dev(bdev->bd_disk)->kobj, KOBJ_CHANGE); | |
} | |
static inline int | |
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c | |
index f46e26c9d9b3c..36c46fe078556 100644 | |
--- a/drivers/block/nbd.c | |
+++ b/drivers/block/nbd.c | |
@@ -296,7 +296,7 @@ static void nbd_size_clear(struct nbd_device *nbd) | |
} | |
} | |
-static void nbd_size_update(struct nbd_device *nbd) | |
+static void nbd_size_update(struct nbd_device *nbd, bool start) | |
{ | |
struct nbd_config *config = nbd->config; | |
struct block_device *bdev = bdget_disk(nbd->disk, 0); | |
@@ -312,7 +312,8 @@ static void nbd_size_update(struct nbd_device *nbd) | |
if (bdev) { | |
if (bdev->bd_disk) { | |
bd_set_size(bdev, config->bytesize); | |
- set_blocksize(bdev, config->blksize); | |
+ if (start) | |
+ set_blocksize(bdev, config->blksize); | |
} else | |
bdev->bd_invalidated = 1; | |
bdput(bdev); | |
@@ -327,7 +328,7 @@ static void nbd_size_set(struct nbd_device *nbd, loff_t blocksize, | |
config->blksize = blocksize; | |
config->bytesize = blocksize * nr_blocks; | |
if (nbd->task_recv != NULL) | |
- nbd_size_update(nbd); | |
+ nbd_size_update(nbd, false); | |
} | |
static void nbd_complete_rq(struct request *req) | |
@@ -1307,7 +1308,7 @@ static int nbd_start_device(struct nbd_device *nbd) | |
args->index = i; | |
queue_work(nbd->recv_workq, &args->work); | |
} | |
- nbd_size_update(nbd); | |
+ nbd_size_update(nbd, true); | |
return error; | |
} | |
@@ -1516,6 +1517,7 @@ static void nbd_release(struct gendisk *disk, fmode_t mode) | |
if (test_bit(NBD_RT_DISCONNECT_ON_CLOSE, &nbd->config->runtime_flags) && | |
bdev->bd_openers == 0) | |
nbd_disconnect_and_put(nbd); | |
+ bdput(bdev); | |
nbd_config_put(nbd); | |
nbd_put(nbd); | |
diff --git a/drivers/block/null_blk.h b/drivers/block/null_blk.h | |
index 206309ecc7e4e..7562cd6cd6816 100644 | |
--- a/drivers/block/null_blk.h | |
+++ b/drivers/block/null_blk.h | |
@@ -44,6 +44,7 @@ struct nullb_device { | |
unsigned int nr_zones; | |
struct blk_zone *zones; | |
sector_t zone_size_sects; | |
+ spinlock_t zone_lock; | |
unsigned long *zone_locks; | |
unsigned long size; /* device size in MB */ | |
diff --git a/drivers/block/null_blk_zoned.c b/drivers/block/null_blk_zoned.c | |
index 495713d6c989b..d9102327357c2 100644 | |
--- a/drivers/block/null_blk_zoned.c | |
+++ b/drivers/block/null_blk_zoned.c | |
@@ -46,10 +46,20 @@ int null_init_zoned_dev(struct nullb_device *dev, struct request_queue *q) | |
if (!dev->zones) | |
return -ENOMEM; | |
- dev->zone_locks = bitmap_zalloc(dev->nr_zones, GFP_KERNEL); | |
- if (!dev->zone_locks) { | |
- kvfree(dev->zones); | |
- return -ENOMEM; | |
+ /* | |
+ * With memory backing, the zone_lock spinlock needs to be temporarily | |
+ * released to avoid scheduling in atomic context. To guarantee zone | |
+ * information protection, use a bitmap to lock zones with | |
+ * wait_on_bit_lock_io(). Sleeping on the lock is OK as memory backing | |
+ * implies that the queue is marked with BLK_MQ_F_BLOCKING. | |
+ */ | |
+ spin_lock_init(&dev->zone_lock); | |
+ if (dev->memory_backed) { | |
+ dev->zone_locks = bitmap_zalloc(dev->nr_zones, GFP_KERNEL); | |
+ if (!dev->zone_locks) { | |
+ kvfree(dev->zones); | |
+ return -ENOMEM; | |
+ } | |
} | |
if (dev->zone_nr_conv >= dev->nr_zones) { | |
@@ -118,12 +128,16 @@ void null_free_zoned_dev(struct nullb_device *dev) | |
static inline void null_lock_zone(struct nullb_device *dev, unsigned int zno) | |
{ | |
- wait_on_bit_lock_io(dev->zone_locks, zno, TASK_UNINTERRUPTIBLE); | |
+ if (dev->memory_backed) | |
+ wait_on_bit_lock_io(dev->zone_locks, zno, TASK_UNINTERRUPTIBLE); | |
+ spin_lock_irq(&dev->zone_lock); | |
} | |
static inline void null_unlock_zone(struct nullb_device *dev, unsigned int zno) | |
{ | |
- clear_and_wake_up_bit(zno, dev->zone_locks); | |
+ spin_unlock_irq(&dev->zone_lock); | |
+ if (dev->memory_backed) | |
+ clear_and_wake_up_bit(zno, dev->zone_locks); | |
} | |
int null_report_zones(struct gendisk *disk, sector_t sector, | |
@@ -233,7 +247,12 @@ static blk_status_t null_zone_write(struct nullb_cmd *cmd, sector_t sector, | |
if (zone->cond != BLK_ZONE_COND_EXP_OPEN) | |
zone->cond = BLK_ZONE_COND_IMP_OPEN; | |
+ if (dev->memory_backed) | |
+ spin_unlock_irq(&dev->zone_lock); | |
ret = null_process_cmd(cmd, REQ_OP_WRITE, sector, nr_sectors); | |
+ if (dev->memory_backed) | |
+ spin_lock_irq(&dev->zone_lock); | |
+ | |
if (ret != BLK_STS_OK) | |
break; | |
diff --git a/drivers/char/tpm/eventlog/efi.c b/drivers/char/tpm/eventlog/efi.c | |
index 6bb023de17f1f..35229e5143cac 100644 | |
--- a/drivers/char/tpm/eventlog/efi.c | |
+++ b/drivers/char/tpm/eventlog/efi.c | |
@@ -41,6 +41,11 @@ int tpm_read_log_efi(struct tpm_chip *chip) | |
log_size = log_tbl->size; | |
memunmap(log_tbl); | |
+ if (!log_size) { | |
+ pr_warn("UEFI TPM log area empty\n"); | |
+ return -EIO; | |
+ } | |
+ | |
log_tbl = memremap(efi.tpm_log, sizeof(*log_tbl) + log_size, | |
MEMREMAP_WB); | |
if (!log_tbl) { | |
diff --git a/drivers/char/tpm/tpm_tis.c b/drivers/char/tpm/tpm_tis.c | |
index 0b214963539de..4ed6e660273a4 100644 | |
--- a/drivers/char/tpm/tpm_tis.c | |
+++ b/drivers/char/tpm/tpm_tis.c | |
@@ -27,6 +27,7 @@ | |
#include <linux/of.h> | |
#include <linux/of_device.h> | |
#include <linux/kernel.h> | |
+#include <linux/dmi.h> | |
#include "tpm.h" | |
#include "tpm_tis_core.h" | |
@@ -49,8 +50,8 @@ static inline struct tpm_tis_tcg_phy *to_tpm_tis_tcg_phy(struct tpm_tis_data *da | |
return container_of(data, struct tpm_tis_tcg_phy, priv); | |
} | |
-static bool interrupts = true; | |
-module_param(interrupts, bool, 0444); | |
+static int interrupts = -1; | |
+module_param(interrupts, int, 0444); | |
MODULE_PARM_DESC(interrupts, "Enable interrupts"); | |
static bool itpm; | |
@@ -63,6 +64,28 @@ module_param(force, bool, 0444); | |
MODULE_PARM_DESC(force, "Force device probe rather than using ACPI entry"); | |
#endif | |
+static int tpm_tis_disable_irq(const struct dmi_system_id *d) | |
+{ | |
+ if (interrupts == -1) { | |
+ pr_notice("tpm_tis: %s detected: disabling interrupts.\n", d->ident); | |
+ interrupts = 0; | |
+ } | |
+ | |
+ return 0; | |
+} | |
+ | |
+static const struct dmi_system_id tpm_tis_dmi_table[] = { | |
+ { | |
+ .callback = tpm_tis_disable_irq, | |
+ .ident = "ThinkPad T490s", | |
+ .matches = { | |
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), | |
+ DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T490s"), | |
+ }, | |
+ }, | |
+ {} | |
+}; | |
+ | |
#if defined(CONFIG_PNP) && defined(CONFIG_ACPI) | |
static int has_hid(struct acpi_device *dev, const char *hid) | |
{ | |
@@ -192,6 +215,8 @@ static int tpm_tis_init(struct device *dev, struct tpm_info *tpm_info) | |
int irq = -1; | |
int rc; | |
+ dmi_check_system(tpm_tis_dmi_table); | |
+ | |
rc = check_acpi_tpm2(dev); | |
if (rc) | |
return rc; | |
diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c | |
index a2da8f768b94c..1836cc56e357b 100644 | |
--- a/drivers/char/virtio_console.c | |
+++ b/drivers/char/virtio_console.c | |
@@ -435,12 +435,12 @@ static struct port_buffer *alloc_buf(struct virtio_device *vdev, size_t buf_size | |
/* | |
* Allocate DMA memory from ancestor. When a virtio | |
* device is created by remoteproc, the DMA memory is | |
- * associated with the grandparent device: | |
- * vdev => rproc => platform-dev. | |
+ * associated with the parent device: | |
+ * virtioY => remoteprocX#vdevYbuffer. | |
*/ | |
- if (!vdev->dev.parent || !vdev->dev.parent->parent) | |
+ buf->dev = vdev->dev.parent; | |
+ if (!buf->dev) | |
goto free_buf; | |
- buf->dev = vdev->dev.parent->parent; | |
/* Increase device refcnt to avoid freeing it */ | |
get_device(buf->dev); | |
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c | |
index e97ff004ac6a9..ac0d529902ccd 100644 | |
--- a/drivers/cpufreq/cpufreq.c | |
+++ b/drivers/cpufreq/cpufreq.c | |
@@ -2233,7 +2233,7 @@ static int cpufreq_init_governor(struct cpufreq_policy *policy) | |
return -EINVAL; | |
/* Platform doesn't want dynamic frequency switching ? */ | |
- if (policy->governor->dynamic_switching && | |
+ if (policy->governor->flags & CPUFREQ_GOV_DYNAMIC_SWITCHING && | |
cpufreq_driver->flags & CPUFREQ_NO_AUTO_DYNAMIC_SWITCHING) { | |
struct cpufreq_governor *gov = cpufreq_fallback_governor(); | |
@@ -2259,6 +2259,8 @@ static int cpufreq_init_governor(struct cpufreq_policy *policy) | |
} | |
} | |
+ policy->strict_target = !!(policy->governor->flags & CPUFREQ_GOV_STRICT_TARGET); | |
+ | |
return 0; | |
} | |
diff --git a/drivers/cpufreq/cpufreq_governor.h b/drivers/cpufreq/cpufreq_governor.h | |
index c56773c257579..bab8e61403771 100644 | |
--- a/drivers/cpufreq/cpufreq_governor.h | |
+++ b/drivers/cpufreq/cpufreq_governor.h | |
@@ -156,7 +156,7 @@ void cpufreq_dbs_governor_limits(struct cpufreq_policy *policy); | |
#define CPUFREQ_DBS_GOVERNOR_INITIALIZER(_name_) \ | |
{ \ | |
.name = _name_, \ | |
- .dynamic_switching = true, \ | |
+ .flags = CPUFREQ_GOV_DYNAMIC_SWITCHING, \ | |
.owner = THIS_MODULE, \ | |
.init = cpufreq_dbs_governor_init, \ | |
.exit = cpufreq_dbs_governor_exit, \ | |
diff --git a/drivers/cpufreq/cpufreq_performance.c b/drivers/cpufreq/cpufreq_performance.c | |
index 71c1d9aba7727..addd93f2a4202 100644 | |
--- a/drivers/cpufreq/cpufreq_performance.c | |
+++ b/drivers/cpufreq/cpufreq_performance.c | |
@@ -20,6 +20,7 @@ static void cpufreq_gov_performance_limits(struct cpufreq_policy *policy) | |
static struct cpufreq_governor cpufreq_gov_performance = { | |
.name = "performance", | |
.owner = THIS_MODULE, | |
+ .flags = CPUFREQ_GOV_STRICT_TARGET, | |
.limits = cpufreq_gov_performance_limits, | |
}; | |
diff --git a/drivers/cpufreq/cpufreq_powersave.c b/drivers/cpufreq/cpufreq_powersave.c | |
index 7749522355b59..8d830d860e912 100644 | |
--- a/drivers/cpufreq/cpufreq_powersave.c | |
+++ b/drivers/cpufreq/cpufreq_powersave.c | |
@@ -21,6 +21,7 @@ static struct cpufreq_governor cpufreq_gov_powersave = { | |
.name = "powersave", | |
.limits = cpufreq_gov_powersave_limits, | |
.owner = THIS_MODULE, | |
+ .flags = CPUFREQ_GOV_STRICT_TARGET, | |
}; | |
MODULE_AUTHOR("Dominik Brodowski <[email protected]>"); | |
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c | |
index ef15ec4959c5c..9397f5e75e7a7 100644 | |
--- a/drivers/cpufreq/intel_pstate.c | |
+++ b/drivers/cpufreq/intel_pstate.c | |
@@ -2509,7 +2509,7 @@ static void intel_cpufreq_trace(struct cpudata *cpu, unsigned int trace_type, in | |
} | |
static void intel_cpufreq_adjust_hwp(struct cpudata *cpu, u32 target_pstate, | |
- bool fast_switch) | |
+ bool strict, bool fast_switch) | |
{ | |
u64 prev = READ_ONCE(cpu->hwp_req_cached), value = prev; | |
@@ -2521,7 +2521,7 @@ static void intel_cpufreq_adjust_hwp(struct cpudata *cpu, u32 target_pstate, | |
* field in it, so opportunistically update the max too if needed. | |
*/ | |
value &= ~HWP_MAX_PERF(~0L); | |
- value |= HWP_MAX_PERF(cpu->max_perf_ratio); | |
+ value |= HWP_MAX_PERF(strict ? target_pstate : cpu->max_perf_ratio); | |
if (value == prev) | |
return; | |
@@ -2544,14 +2544,16 @@ static void intel_cpufreq_adjust_perf_ctl(struct cpudata *cpu, | |
pstate_funcs.get_val(cpu, target_pstate)); | |
} | |
-static int intel_cpufreq_update_pstate(struct cpudata *cpu, int target_pstate, | |
- bool fast_switch) | |
+static int intel_cpufreq_update_pstate(struct cpufreq_policy *policy, | |
+ int target_pstate, bool fast_switch) | |
{ | |
+ struct cpudata *cpu = all_cpu_data[policy->cpu]; | |
int old_pstate = cpu->pstate.current_pstate; | |
target_pstate = intel_pstate_prepare_request(cpu, target_pstate); | |
if (hwp_active) { | |
- intel_cpufreq_adjust_hwp(cpu, target_pstate, fast_switch); | |
+ intel_cpufreq_adjust_hwp(cpu, target_pstate, | |
+ policy->strict_target, fast_switch); | |
cpu->pstate.current_pstate = target_pstate; | |
} else if (target_pstate != old_pstate) { | |
intel_cpufreq_adjust_perf_ctl(cpu, target_pstate, fast_switch); | |
@@ -2591,7 +2593,7 @@ static int intel_cpufreq_target(struct cpufreq_policy *policy, | |
break; | |
} | |
- target_pstate = intel_cpufreq_update_pstate(cpu, target_pstate, false); | |
+ target_pstate = intel_cpufreq_update_pstate(policy, target_pstate, false); | |
freqs.new = target_pstate * cpu->pstate.scaling; | |
@@ -2610,7 +2612,7 @@ static unsigned int intel_cpufreq_fast_switch(struct cpufreq_policy *policy, | |
target_pstate = DIV_ROUND_UP(target_freq, cpu->pstate.scaling); | |
- target_pstate = intel_cpufreq_update_pstate(cpu, target_pstate, true); | |
+ target_pstate = intel_cpufreq_update_pstate(policy, target_pstate, true); | |
return target_pstate * cpu->pstate.scaling; | |
} | |
diff --git a/drivers/crypto/chelsio/chcr_ktls.c b/drivers/crypto/chelsio/chcr_ktls.c | |
index c5cce024886ac..dc5e22bc64b39 100644 | |
--- a/drivers/crypto/chelsio/chcr_ktls.c | |
+++ b/drivers/crypto/chelsio/chcr_ktls.c | |
@@ -659,7 +659,8 @@ int chcr_ktls_cpl_set_tcb_rpl(struct adapter *adap, unsigned char *input) | |
} | |
static void *__chcr_write_cpl_set_tcb_ulp(struct chcr_ktls_info *tx_info, | |
- u32 tid, void *pos, u16 word, u64 mask, | |
+ u32 tid, void *pos, u16 word, | |
+ struct sge_eth_txq *q, u64 mask, | |
u64 val, u32 reply) | |
{ | |
struct cpl_set_tcb_field_core *cpl; | |
@@ -668,7 +669,10 @@ static void *__chcr_write_cpl_set_tcb_ulp(struct chcr_ktls_info *tx_info, | |
/* ULP_TXPKT */ | |
txpkt = pos; | |
- txpkt->cmd_dest = htonl(ULPTX_CMD_V(ULP_TX_PKT) | ULP_TXPKT_DEST_V(0)); | |
+ txpkt->cmd_dest = htonl(ULPTX_CMD_V(ULP_TX_PKT) | | |
+ ULP_TXPKT_CHANNELID_V(tx_info->port_id) | | |
+ ULP_TXPKT_FID_V(q->q.cntxt_id) | | |
+ ULP_TXPKT_RO_F); | |
txpkt->len = htonl(DIV_ROUND_UP(CHCR_SET_TCB_FIELD_LEN, 16)); | |
/* ULPTX_IDATA sub-command */ | |
@@ -723,7 +727,7 @@ static void *chcr_write_cpl_set_tcb_ulp(struct chcr_ktls_info *tx_info, | |
} else { | |
u8 buf[48] = {0}; | |
- __chcr_write_cpl_set_tcb_ulp(tx_info, tid, buf, word, | |
+ __chcr_write_cpl_set_tcb_ulp(tx_info, tid, buf, word, q, | |
mask, val, reply); | |
return chcr_copy_to_txd(buf, &q->q, pos, | |
@@ -731,7 +735,7 @@ static void *chcr_write_cpl_set_tcb_ulp(struct chcr_ktls_info *tx_info, | |
} | |
} | |
- pos = __chcr_write_cpl_set_tcb_ulp(tx_info, tid, pos, word, | |
+ pos = __chcr_write_cpl_set_tcb_ulp(tx_info, tid, pos, word, q, | |
mask, val, reply); | |
/* check again if we are at the end of the queue */ | |
@@ -926,6 +930,7 @@ chcr_ktls_write_tcp_options(struct chcr_ktls_info *tx_info, struct sk_buff *skb, | |
struct iphdr *ip; | |
int credits; | |
u8 buf[150]; | |
+ u64 cntrl1; | |
void *pos; | |
iplen = skb_network_header_len(skb); | |
@@ -964,22 +969,28 @@ chcr_ktls_write_tcp_options(struct chcr_ktls_info *tx_info, struct sk_buff *skb, | |
TXPKT_PF_V(tx_info->adap->pf)); | |
cpl->pack = 0; | |
cpl->len = htons(pktlen); | |
- /* checksum offload */ | |
- cpl->ctrl1 = 0; | |
- | |
- pos = cpl + 1; | |
memcpy(buf, skb->data, pktlen); | |
if (tx_info->ip_family == AF_INET) { | |
/* we need to correct ip header len */ | |
ip = (struct iphdr *)(buf + maclen); | |
ip->tot_len = htons(pktlen - maclen); | |
+ cntrl1 = TXPKT_CSUM_TYPE_V(TX_CSUM_TCPIP); | |
#if IS_ENABLED(CONFIG_IPV6) | |
} else { | |
ip6 = (struct ipv6hdr *)(buf + maclen); | |
ip6->payload_len = htons(pktlen - maclen - iplen); | |
+ cntrl1 = TXPKT_CSUM_TYPE_V(TX_CSUM_TCPIP6); | |
#endif | |
} | |
+ | |
+ cntrl1 |= T6_TXPKT_ETHHDR_LEN_V(maclen - ETH_HLEN) | | |
+ TXPKT_IPHDR_LEN_V(iplen); | |
+ /* checksum offload */ | |
+ cpl->ctrl1 = cpu_to_be64(cntrl1); | |
+ | |
+ pos = cpl + 1; | |
+ | |
/* now take care of the tcp header, if fin is not set then clear push | |
* bit as well, and if fin is set, it will be sent at the last so we | |
* need to update the tcp sequence number as per the last packet. | |
diff --git a/drivers/firmware/xilinx/zynqmp.c b/drivers/firmware/xilinx/zynqmp.c | |
index 8d1ff2454e2e3..efb8a66efc684 100644 | |
--- a/drivers/firmware/xilinx/zynqmp.c | |
+++ b/drivers/firmware/xilinx/zynqmp.c | |
@@ -147,6 +147,9 @@ static int zynqmp_pm_feature(u32 api_id) | |
return 0; | |
/* Return value if feature is already checked */ | |
+ if (api_id > ARRAY_SIZE(zynqmp_pm_features)) | |
+ return PM_FEATURE_INVALID; | |
+ | |
if (zynqmp_pm_features[api_id] != PM_FEATURE_UNCHECKED) | |
return zynqmp_pm_features[api_id]; | |
diff --git a/drivers/gpio/gpio-aspeed.c b/drivers/gpio/gpio-aspeed.c | |
index e44d5de2a1201..b966f5e28ebff 100644 | |
--- a/drivers/gpio/gpio-aspeed.c | |
+++ b/drivers/gpio/gpio-aspeed.c | |
@@ -1114,6 +1114,7 @@ static const struct aspeed_gpio_config ast2500_config = | |
static const struct aspeed_bank_props ast2600_bank_props[] = { | |
/* input output */ | |
+ {4, 0xffffffff, 0x00ffffff}, /* Q/R/S/T */ | |
{5, 0xffffffff, 0xffffff00}, /* U/V/W/X */ | |
{6, 0x0000ffff, 0x0000ffff}, /* Y/Z */ | |
{ }, | |
diff --git a/drivers/gpio/gpio-pcie-idio-24.c b/drivers/gpio/gpio-pcie-idio-24.c | |
index a68941d19ac60..2a07fd96707ee 100644 | |
--- a/drivers/gpio/gpio-pcie-idio-24.c | |
+++ b/drivers/gpio/gpio-pcie-idio-24.c | |
@@ -28,6 +28,47 @@ | |
#include <linux/spinlock.h> | |
#include <linux/types.h> | |
+/* | |
+ * PLX PEX8311 PCI LCS_INTCSR Interrupt Control/Status | |
+ * | |
+ * Bit: Description | |
+ * 0: Enable Interrupt Sources (Bit 0) | |
+ * 1: Enable Interrupt Sources (Bit 1) | |
+ * 2: Generate Internal PCI Bus Internal SERR# Interrupt | |
+ * 3: Mailbox Interrupt Enable | |
+ * 4: Power Management Interrupt Enable | |
+ * 5: Power Management Interrupt | |
+ * 6: Slave Read Local Data Parity Check Error Enable | |
+ * 7: Slave Read Local Data Parity Check Error Status | |
+ * 8: Internal PCI Wire Interrupt Enable | |
+ * 9: PCI Express Doorbell Interrupt Enable | |
+ * 10: PCI Abort Interrupt Enable | |
+ * 11: Local Interrupt Input Enable | |
+ * 12: Retry Abort Enable | |
+ * 13: PCI Express Doorbell Interrupt Active | |
+ * 14: PCI Abort Interrupt Active | |
+ * 15: Local Interrupt Input Active | |
+ * 16: Local Interrupt Output Enable | |
+ * 17: Local Doorbell Interrupt Enable | |
+ * 18: DMA Channel 0 Interrupt Enable | |
+ * 19: DMA Channel 1 Interrupt Enable | |
+ * 20: Local Doorbell Interrupt Active | |
+ * 21: DMA Channel 0 Interrupt Active | |
+ * 22: DMA Channel 1 Interrupt Active | |
+ * 23: Built-In Self-Test (BIST) Interrupt Active | |
+ * 24: Direct Master was the Bus Master during a Master or Target Abort | |
+ * 25: DMA Channel 0 was the Bus Master during a Master or Target Abort | |
+ * 26: DMA Channel 1 was the Bus Master during a Master or Target Abort | |
+ * 27: Target Abort after internal 256 consecutive Master Retrys | |
+ * 28: PCI Bus wrote data to LCS_MBOX0 | |
+ * 29: PCI Bus wrote data to LCS_MBOX1 | |
+ * 30: PCI Bus wrote data to LCS_MBOX2 | |
+ * 31: PCI Bus wrote data to LCS_MBOX3 | |
+ */ | |
+#define PLX_PEX8311_PCI_LCS_INTCSR 0x68 | |
+#define INTCSR_INTERNAL_PCI_WIRE BIT(8) | |
+#define INTCSR_LOCAL_INPUT BIT(11) | |
+ | |
/** | |
* struct idio_24_gpio_reg - GPIO device registers structure | |
* @out0_7: Read: FET Outputs 0-7 | |
@@ -92,6 +133,7 @@ struct idio_24_gpio_reg { | |
struct idio_24_gpio { | |
struct gpio_chip chip; | |
raw_spinlock_t lock; | |
+ __u8 __iomem *plx; | |
struct idio_24_gpio_reg __iomem *reg; | |
unsigned long irq_mask; | |
}; | |
@@ -334,13 +376,13 @@ static void idio_24_irq_mask(struct irq_data *data) | |
unsigned long flags; | |
const unsigned long bit_offset = irqd_to_hwirq(data) - 24; | |
unsigned char new_irq_mask; | |
- const unsigned long bank_offset = bit_offset/8 * 8; | |
+ const unsigned long bank_offset = bit_offset / 8; | |
unsigned char cos_enable_state; | |
raw_spin_lock_irqsave(&idio24gpio->lock, flags); | |
- idio24gpio->irq_mask &= BIT(bit_offset); | |
- new_irq_mask = idio24gpio->irq_mask >> bank_offset; | |
+ idio24gpio->irq_mask &= ~BIT(bit_offset); | |
+ new_irq_mask = idio24gpio->irq_mask >> bank_offset * 8; | |
if (!new_irq_mask) { | |
cos_enable_state = ioread8(&idio24gpio->reg->cos_enable); | |
@@ -363,12 +405,12 @@ static void idio_24_irq_unmask(struct irq_data *data) | |
unsigned long flags; | |
unsigned char prev_irq_mask; | |
const unsigned long bit_offset = irqd_to_hwirq(data) - 24; | |
- const unsigned long bank_offset = bit_offset/8 * 8; | |
+ const unsigned long bank_offset = bit_offset / 8; | |
unsigned char cos_enable_state; | |
raw_spin_lock_irqsave(&idio24gpio->lock, flags); | |
- prev_irq_mask = idio24gpio->irq_mask >> bank_offset; | |
+ prev_irq_mask = idio24gpio->irq_mask >> bank_offset * 8; | |
idio24gpio->irq_mask |= BIT(bit_offset); | |
if (!prev_irq_mask) { | |
@@ -455,6 +497,7 @@ static int idio_24_probe(struct pci_dev *pdev, const struct pci_device_id *id) | |
struct device *const dev = &pdev->dev; | |
struct idio_24_gpio *idio24gpio; | |
int err; | |
+ const size_t pci_plx_bar_index = 1; | |
const size_t pci_bar_index = 2; | |
const char *const name = pci_name(pdev); | |
struct gpio_irq_chip *girq; | |
@@ -469,12 +512,13 @@ static int idio_24_probe(struct pci_dev *pdev, const struct pci_device_id *id) | |
return err; | |
} | |
- err = pcim_iomap_regions(pdev, BIT(pci_bar_index), name); | |
+ err = pcim_iomap_regions(pdev, BIT(pci_plx_bar_index) | BIT(pci_bar_index), name); | |
if (err) { | |
dev_err(dev, "Unable to map PCI I/O addresses (%d)\n", err); | |
return err; | |
} | |
+ idio24gpio->plx = pcim_iomap_table(pdev)[pci_plx_bar_index]; | |
idio24gpio->reg = pcim_iomap_table(pdev)[pci_bar_index]; | |
idio24gpio->chip.label = name; | |
@@ -504,6 +548,12 @@ static int idio_24_probe(struct pci_dev *pdev, const struct pci_device_id *id) | |
/* Software board reset */ | |
iowrite8(0, &idio24gpio->reg->soft_reset); | |
+ /* | |
+ * enable PLX PEX8311 internal PCI wire interrupt and local interrupt | |
+ * input | |
+ */ | |
+ iowrite8((INTCSR_INTERNAL_PCI_WIRE | INTCSR_LOCAL_INPUT) >> 8, | |
+ idio24gpio->plx + PLX_PEX8311_PCI_LCS_INTCSR + 1); | |
err = devm_gpiochip_add_data(dev, &idio24gpio->chip, idio24gpio); | |
if (err) { | |
diff --git a/drivers/gpio/gpio-sifive.c b/drivers/gpio/gpio-sifive.c | |
index c54dd08f2cbfd..d5eb9ca119016 100644 | |
--- a/drivers/gpio/gpio-sifive.c | |
+++ b/drivers/gpio/gpio-sifive.c | |
@@ -183,7 +183,7 @@ static int sifive_gpio_probe(struct platform_device *pdev) | |
return PTR_ERR(chip->regs); | |
ngpio = of_irq_count(node); | |
- if (ngpio >= SIFIVE_GPIO_MAX) { | |
+ if (ngpio > SIFIVE_GPIO_MAX) { | |
dev_err(dev, "Too many GPIO interrupts (max=%d)\n", | |
SIFIVE_GPIO_MAX); | |
return -ENXIO; | |
diff --git a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c | |
index 20f108818b2b9..a3c3fe96515f2 100644 | |
--- a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c | |
+++ b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c | |
@@ -1071,22 +1071,19 @@ static int cik_sdma_soft_reset(void *handle) | |
{ | |
u32 srbm_soft_reset = 0; | |
struct amdgpu_device *adev = (struct amdgpu_device *)handle; | |
- u32 tmp = RREG32(mmSRBM_STATUS2); | |
+ u32 tmp; | |
- if (tmp & SRBM_STATUS2__SDMA_BUSY_MASK) { | |
- /* sdma0 */ | |
- tmp = RREG32(mmSDMA0_F32_CNTL + SDMA0_REGISTER_OFFSET); | |
- tmp |= SDMA0_F32_CNTL__HALT_MASK; | |
- WREG32(mmSDMA0_F32_CNTL + SDMA0_REGISTER_OFFSET, tmp); | |
- srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_SDMA_MASK; | |
- } | |
- if (tmp & SRBM_STATUS2__SDMA1_BUSY_MASK) { | |
- /* sdma1 */ | |
- tmp = RREG32(mmSDMA0_F32_CNTL + SDMA1_REGISTER_OFFSET); | |
- tmp |= SDMA0_F32_CNTL__HALT_MASK; | |
- WREG32(mmSDMA0_F32_CNTL + SDMA1_REGISTER_OFFSET, tmp); | |
- srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_SDMA1_MASK; | |
- } | |
+ /* sdma0 */ | |
+ tmp = RREG32(mmSDMA0_F32_CNTL + SDMA0_REGISTER_OFFSET); | |
+ tmp |= SDMA0_F32_CNTL__HALT_MASK; | |
+ WREG32(mmSDMA0_F32_CNTL + SDMA0_REGISTER_OFFSET, tmp); | |
+ srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_SDMA_MASK; | |
+ | |
+ /* sdma1 */ | |
+ tmp = RREG32(mmSDMA0_F32_CNTL + SDMA1_REGISTER_OFFSET); | |
+ tmp |= SDMA0_F32_CNTL__HALT_MASK; | |
+ WREG32(mmSDMA0_F32_CNTL + SDMA1_REGISTER_OFFSET, tmp); | |
+ srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_SDMA1_MASK; | |
if (srbm_soft_reset) { | |
tmp = RREG32(mmSRBM_SOFT_RESET); | |
diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c | |
index c28ebf41530aa..254ab2ada70a0 100644 | |
--- a/drivers/gpu/drm/amd/amdgpu/soc15.c | |
+++ b/drivers/gpu/drm/amd/amdgpu/soc15.c | |
@@ -1220,8 +1220,7 @@ static int soc15_common_early_init(void *handle) | |
adev->pg_flags = AMD_PG_SUPPORT_SDMA | | |
AMD_PG_SUPPORT_MMHUB | | |
- AMD_PG_SUPPORT_VCN | | |
- AMD_PG_SUPPORT_VCN_DPG; | |
+ AMD_PG_SUPPORT_VCN; | |
} else { | |
adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG | | |
AMD_CG_SUPPORT_GFX_MGLS | | |
diff --git a/drivers/gpu/drm/amd/display/dc/irq/dcn30/irq_service_dcn30.c b/drivers/gpu/drm/amd/display/dc/irq/dcn30/irq_service_dcn30.c | |
index 49689f71f4f1e..0effbb2bd74a6 100644 | |
--- a/drivers/gpu/drm/amd/display/dc/irq/dcn30/irq_service_dcn30.c | |
+++ b/drivers/gpu/drm/amd/display/dc/irq/dcn30/irq_service_dcn30.c | |
@@ -306,8 +306,8 @@ irq_source_info_dcn30[DAL_IRQ_SOURCES_NUMBER] = { | |
pflip_int_entry(1), | |
pflip_int_entry(2), | |
pflip_int_entry(3), | |
- [DC_IRQ_SOURCE_PFLIP5] = dummy_irq_entry(), | |
- [DC_IRQ_SOURCE_PFLIP6] = dummy_irq_entry(), | |
+ pflip_int_entry(4), | |
+ pflip_int_entry(5), | |
[DC_IRQ_SOURCE_PFLIP_UNDERLAY0] = dummy_irq_entry(), | |
gpio_pad_int_entry(0), | |
gpio_pad_int_entry(1), | |
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/ci_baco.c b/drivers/gpu/drm/amd/powerplay/hwmgr/ci_baco.c | |
index 3be40114e63d2..45f608838f6eb 100644 | |
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/ci_baco.c | |
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/ci_baco.c | |
@@ -142,12 +142,12 @@ static const struct baco_cmd_entry exit_baco_tbl[] = | |
{ CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_BCLK_OFF_MASK, BACO_CNTL__BACO_BCLK_OFF__SHIFT, 0, 0x00 }, | |
{ CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_POWER_OFF_MASK, BACO_CNTL__BACO_POWER_OFF__SHIFT, 0, 0x00 }, | |
{ CMD_DELAY_MS, 0, 0, 0, 20, 0 }, | |
- { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__PWRGOOD_BF_MASK, 0, 0xffffffff, 0x20 }, | |
+ { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__PWRGOOD_BF_MASK, 0, 0xffffffff, 0x200 }, | |
{ CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_ISO_DIS_MASK, BACO_CNTL__BACO_ISO_DIS__SHIFT, 0, 0x01 }, | |
- { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__PWRGOOD_MASK, 0, 5, 0x1c }, | |
+ { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__PWRGOOD_MASK, 0, 5, 0x1c00 }, | |
{ CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_ANA_ISO_DIS_MASK, BACO_CNTL__BACO_ANA_ISO_DIS__SHIFT, 0, 0x01 }, | |
{ CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_RESET_EN_MASK, BACO_CNTL__BACO_RESET_EN__SHIFT, 0, 0x00 }, | |
- { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__RCU_BIF_CONFIG_DONE_MASK, 0, 5, 0x10 }, | |
+ { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__RCU_BIF_CONFIG_DONE_MASK, 0, 5, 0x100 }, | |
{ CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_EN_MASK, BACO_CNTL__BACO_EN__SHIFT, 0, 0x00 }, | |
{ CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__BACO_MODE_MASK, 0, 0xffffffff, 0x00 } | |
}; | |
@@ -155,6 +155,7 @@ static const struct baco_cmd_entry exit_baco_tbl[] = | |
static const struct baco_cmd_entry clean_baco_tbl[] = | |
{ | |
{ CMD_WRITE, mmBIOS_SCRATCH_6, 0, 0, 0, 0 }, | |
+ { CMD_WRITE, mmBIOS_SCRATCH_7, 0, 0, 0, 0 }, | |
{ CMD_WRITE, mmCP_PFP_UCODE_ADDR, 0, 0, 0, 0 } | |
}; | |
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c | |
index fc63d9e32e1f8..c8ee931075e52 100644 | |
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c | |
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c | |
@@ -1541,6 +1541,10 @@ static int smu7_disable_dpm_tasks(struct pp_hwmgr *hwmgr) | |
PP_ASSERT_WITH_CODE((tmp_result == 0), | |
"Failed to reset to default!", result = tmp_result); | |
+ tmp_result = smum_stop_smc(hwmgr); | |
+ PP_ASSERT_WITH_CODE((tmp_result == 0), | |
+ "Failed to stop smc!", result = tmp_result); | |
+ | |
tmp_result = smu7_force_switch_to_arbf0(hwmgr); | |
PP_ASSERT_WITH_CODE((tmp_result == 0), | |
"Failed to force to switch arbf0!", result = tmp_result); | |
diff --git a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h | |
index 15ed6cbdf3660..91cdc53472f01 100644 | |
--- a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h | |
+++ b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h | |
@@ -229,6 +229,7 @@ struct pp_smumgr_func { | |
bool (*is_hw_avfs_present)(struct pp_hwmgr *hwmgr); | |
int (*update_dpm_settings)(struct pp_hwmgr *hwmgr, void *profile_setting); | |
int (*smc_table_manager)(struct pp_hwmgr *hwmgr, uint8_t *table, uint16_t table_id, bool rw); /*rw: true for read, false for write */ | |
+ int (*stop_smc)(struct pp_hwmgr *hwmgr); | |
}; | |
struct pp_hwmgr_func { | |
diff --git a/drivers/gpu/drm/amd/powerplay/inc/smumgr.h b/drivers/gpu/drm/amd/powerplay/inc/smumgr.h | |
index ad100b533d049..5f46f1a4f38ef 100644 | |
--- a/drivers/gpu/drm/amd/powerplay/inc/smumgr.h | |
+++ b/drivers/gpu/drm/amd/powerplay/inc/smumgr.h | |
@@ -113,4 +113,6 @@ extern int smum_update_dpm_settings(struct pp_hwmgr *hwmgr, void *profile_settin | |
extern int smum_smc_table_manager(struct pp_hwmgr *hwmgr, uint8_t *table, uint16_t table_id, bool rw); | |
+extern int smum_stop_smc(struct pp_hwmgr *hwmgr); | |
+ | |
#endif | |
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c | |
index e4d1f3d66ef48..329bf4d44bbce 100644 | |
--- a/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c | |
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c | |
@@ -2726,10 +2726,7 @@ static int ci_initialize_mc_reg_table(struct pp_hwmgr *hwmgr) | |
static bool ci_is_dpm_running(struct pp_hwmgr *hwmgr) | |
{ | |
- return (1 == PHM_READ_INDIRECT_FIELD(hwmgr->device, | |
- CGS_IND_REG__SMC, FEATURE_STATUS, | |
- VOLTAGE_CONTROLLER_ON)) | |
- ? true : false; | |
+ return ci_is_smc_ram_running(hwmgr); | |
} | |
static int ci_smu_init(struct pp_hwmgr *hwmgr) | |
@@ -2939,6 +2936,29 @@ static int ci_update_smc_table(struct pp_hwmgr *hwmgr, uint32_t type) | |
return 0; | |
} | |
+static void ci_reset_smc(struct pp_hwmgr *hwmgr) | |
+{ | |
+ PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, | |
+ SMC_SYSCON_RESET_CNTL, | |
+ rst_reg, 1); | |
+} | |
+ | |
+ | |
+static void ci_stop_smc_clock(struct pp_hwmgr *hwmgr) | |
+{ | |
+ PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, | |
+ SMC_SYSCON_CLOCK_CNTL_0, | |
+ ck_disable, 1); | |
+} | |
+ | |
+static int ci_stop_smc(struct pp_hwmgr *hwmgr) | |
+{ | |
+ ci_reset_smc(hwmgr); | |
+ ci_stop_smc_clock(hwmgr); | |
+ | |
+ return 0; | |
+} | |
+ | |
const struct pp_smumgr_func ci_smu_funcs = { | |
.name = "ci_smu", | |
.smu_init = ci_smu_init, | |
@@ -2964,4 +2984,5 @@ const struct pp_smumgr_func ci_smu_funcs = { | |
.is_dpm_running = ci_is_dpm_running, | |
.update_dpm_settings = ci_update_dpm_settings, | |
.update_smc_table = ci_update_smc_table, | |
+ .stop_smc = ci_stop_smc, | |
}; | |
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c | |
index b6fb480668416..b6921db3c1305 100644 | |
--- a/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c | |
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c | |
@@ -245,3 +245,11 @@ int smum_smc_table_manager(struct pp_hwmgr *hwmgr, uint8_t *table, uint16_t tabl | |
return -EINVAL; | |
} | |
+ | |
+int smum_stop_smc(struct pp_hwmgr *hwmgr) | |
+{ | |
+ if (hwmgr->smumgr_funcs->stop_smc) | |
+ return hwmgr->smumgr_funcs->stop_smc(hwmgr); | |
+ | |
+ return 0; | |
+} | |
diff --git a/drivers/gpu/drm/gma500/psb_irq.c b/drivers/gpu/drm/gma500/psb_irq.c | |
index 15eb3770d817e..361e3a0c5ab6b 100644 | |
--- a/drivers/gpu/drm/gma500/psb_irq.c | |
+++ b/drivers/gpu/drm/gma500/psb_irq.c | |
@@ -347,6 +347,7 @@ int psb_irq_postinstall(struct drm_device *dev) | |
{ | |
struct drm_psb_private *dev_priv = dev->dev_private; | |
unsigned long irqflags; | |
+ unsigned int i; | |
spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags); | |
@@ -359,20 +360,12 @@ int psb_irq_postinstall(struct drm_device *dev) | |
PSB_WVDC32(dev_priv->vdc_irq_mask, PSB_INT_ENABLE_R); | |
PSB_WVDC32(0xFFFFFFFF, PSB_HWSTAM); | |
- if (dev->vblank[0].enabled) | |
- psb_enable_pipestat(dev_priv, 0, PIPE_VBLANK_INTERRUPT_ENABLE); | |
- else | |
- psb_disable_pipestat(dev_priv, 0, PIPE_VBLANK_INTERRUPT_ENABLE); | |
- | |
- if (dev->vblank[1].enabled) | |
- psb_enable_pipestat(dev_priv, 1, PIPE_VBLANK_INTERRUPT_ENABLE); | |
- else | |
- psb_disable_pipestat(dev_priv, 1, PIPE_VBLANK_INTERRUPT_ENABLE); | |
- | |
- if (dev->vblank[2].enabled) | |
- psb_enable_pipestat(dev_priv, 2, PIPE_VBLANK_INTERRUPT_ENABLE); | |
- else | |
- psb_disable_pipestat(dev_priv, 2, PIPE_VBLANK_INTERRUPT_ENABLE); | |
+ for (i = 0; i < dev->num_crtcs; ++i) { | |
+ if (dev->vblank[i].enabled) | |
+ psb_enable_pipestat(dev_priv, i, PIPE_VBLANK_INTERRUPT_ENABLE); | |
+ else | |
+ psb_disable_pipestat(dev_priv, i, PIPE_VBLANK_INTERRUPT_ENABLE); | |
+ } | |
if (dev_priv->ops->hotplug_enable) | |
dev_priv->ops->hotplug_enable(dev, true); | |
@@ -385,6 +378,7 @@ void psb_irq_uninstall(struct drm_device *dev) | |
{ | |
struct drm_psb_private *dev_priv = dev->dev_private; | |
unsigned long irqflags; | |
+ unsigned int i; | |
spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags); | |
@@ -393,14 +387,10 @@ void psb_irq_uninstall(struct drm_device *dev) | |
PSB_WVDC32(0xFFFFFFFF, PSB_HWSTAM); | |
- if (dev->vblank[0].enabled) | |
- psb_disable_pipestat(dev_priv, 0, PIPE_VBLANK_INTERRUPT_ENABLE); | |
- | |
- if (dev->vblank[1].enabled) | |
- psb_disable_pipestat(dev_priv, 1, PIPE_VBLANK_INTERRUPT_ENABLE); | |
- | |
- if (dev->vblank[2].enabled) | |
- psb_disable_pipestat(dev_priv, 2, PIPE_VBLANK_INTERRUPT_ENABLE); | |
+ for (i = 0; i < dev->num_crtcs; ++i) { | |
+ if (dev->vblank[i].enabled) | |
+ psb_disable_pipestat(dev_priv, i, PIPE_VBLANK_INTERRUPT_ENABLE); | |
+ } | |
dev_priv->vdc_irq_mask &= _PSB_IRQ_SGX_FLAG | | |
_PSB_IRQ_MSVDX_FLAG | | |
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_domain.c b/drivers/gpu/drm/i915/gem/i915_gem_domain.c | |
index 7f76fc68f498a..ba8758011e297 100644 | |
--- a/drivers/gpu/drm/i915/gem/i915_gem_domain.c | |
+++ b/drivers/gpu/drm/i915/gem/i915_gem_domain.c | |
@@ -484,21 +484,6 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data, | |
if (!obj) | |
return -ENOENT; | |
- /* | |
- * Already in the desired write domain? Nothing for us to do! | |
- * | |
- * We apply a little bit of cunning here to catch a broader set of | |
- * no-ops. If obj->write_domain is set, we must be in the same | |
- * obj->read_domains, and only that domain. Therefore, if that | |
- * obj->write_domain matches the request read_domains, we are | |
- * already in the same read/write domain and can skip the operation, | |
- * without having to further check the requested write_domain. | |
- */ | |
- if (READ_ONCE(obj->write_domain) == read_domains) { | |
- err = 0; | |
- goto out; | |
- } | |
- | |
/* | |
* Try to flush the object off the GPU without holding the lock. | |
* We will repeat the flush holding the lock in the normal manner | |
@@ -536,6 +521,19 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data, | |
if (err) | |
goto out; | |
+ /* | |
+ * Already in the desired write domain? Nothing for us to do! | |
+ * | |
+ * We apply a little bit of cunning here to catch a broader set of | |
+ * no-ops. If obj->write_domain is set, we must be in the same | |
+ * obj->read_domains, and only that domain. Therefore, if that | |
+ * obj->write_domain matches the request read_domains, we are | |
+ * already in the same read/write domain and can skip the operation, | |
+ * without having to further check the requested write_domain. | |
+ */ | |
+ if (READ_ONCE(obj->write_domain) == read_domains) | |
+ goto out_unpin; | |
+ | |
err = i915_gem_object_lock_interruptible(obj); | |
if (err) | |
goto out_unpin; | |
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_cs.c b/drivers/gpu/drm/i915/gt/intel_engine_cs.c | |
index 26087dd797824..3b841eddce256 100644 | |
--- a/drivers/gpu/drm/i915/gt/intel_engine_cs.c | |
+++ b/drivers/gpu/drm/i915/gt/intel_engine_cs.c | |
@@ -370,7 +370,8 @@ static void __setup_engine_capabilities(struct intel_engine_cs *engine) | |
* instances. | |
*/ | |
if ((INTEL_GEN(i915) >= 11 && | |
- engine->gt->info.vdbox_sfc_access & engine->mask) || | |
+ (engine->gt->info.vdbox_sfc_access & | |
+ BIT(engine->instance))) || | |
(INTEL_GEN(i915) >= 9 && engine->instance == 0)) | |
engine->uabi_capabilities |= | |
I915_VIDEO_AND_ENHANCE_CLASS_CAPABILITY_SFC; | |
diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c | |
index bc64f773dcdb4..034d0a8d24c8c 100644 | |
--- a/drivers/gpu/drm/i915/i915_vma.c | |
+++ b/drivers/gpu/drm/i915/i915_vma.c | |
@@ -315,8 +315,10 @@ static void __vma_release(struct dma_fence_work *work) | |
{ | |
struct i915_vma_work *vw = container_of(work, typeof(*vw), base); | |
- if (vw->pinned) | |
+ if (vw->pinned) { | |
__i915_gem_object_unpin_pages(vw->pinned); | |
+ i915_gem_object_put(vw->pinned); | |
+ } | |
} | |
static const struct dma_fence_work_ops bind_ops = { | |
@@ -430,7 +432,7 @@ int i915_vma_bind(struct i915_vma *vma, | |
if (vma->obj) { | |
__i915_gem_object_pin_pages(vma->obj); | |
- work->pinned = vma->obj; | |
+ work->pinned = i915_gem_object_get(vma->obj); | |
} | |
} else { | |
ret = vma->ops->bind_vma(vma->vm, vma, cache_level, bind_flags); | |
diff --git a/drivers/gpu/drm/panfrost/panfrost_device.c b/drivers/gpu/drm/panfrost/panfrost_device.c | |
index b172087eee6ae..36b5c8fea3eba 100644 | |
--- a/drivers/gpu/drm/panfrost/panfrost_device.c | |
+++ b/drivers/gpu/drm/panfrost/panfrost_device.c | |
@@ -214,58 +214,67 @@ int panfrost_device_init(struct panfrost_device *pfdev) | |
return err; | |
} | |
+ err = panfrost_devfreq_init(pfdev); | |
+ if (err) { | |
+ if (err != -EPROBE_DEFER) | |
+ dev_err(pfdev->dev, "devfreq init failed %d\n", err); | |
+ goto out_clk; | |
+ } | |
+ | |
err = panfrost_regulator_init(pfdev); | |
if (err) | |
- goto err_out0; | |
+ goto out_devfreq; | |
err = panfrost_reset_init(pfdev); | |
if (err) { | |
dev_err(pfdev->dev, "reset init failed %d\n", err); | |
- goto err_out1; | |
+ goto out_regulator; | |
} | |
err = panfrost_pm_domain_init(pfdev); | |
if (err) | |
- goto err_out2; | |
+ goto out_reset; | |
res = platform_get_resource(pfdev->pdev, IORESOURCE_MEM, 0); | |
pfdev->iomem = devm_ioremap_resource(pfdev->dev, res); | |
if (IS_ERR(pfdev->iomem)) { | |
dev_err(pfdev->dev, "failed to ioremap iomem\n"); | |
err = PTR_ERR(pfdev->iomem); | |
- goto err_out3; | |
+ goto out_pm_domain; | |
} | |
err = panfrost_gpu_init(pfdev); | |
if (err) | |
- goto err_out3; | |
+ goto out_pm_domain; | |
err = panfrost_mmu_init(pfdev); | |
if (err) | |
- goto err_out4; | |
+ goto out_gpu; | |
err = panfrost_job_init(pfdev); | |
if (err) | |
- goto err_out5; | |
+ goto out_mmu; | |
err = panfrost_perfcnt_init(pfdev); | |
if (err) | |
- goto err_out6; | |
+ goto out_job; | |
return 0; | |
-err_out6: | |
+out_job: | |
panfrost_job_fini(pfdev); | |
-err_out5: | |
+out_mmu: | |
panfrost_mmu_fini(pfdev); | |
-err_out4: | |
+out_gpu: | |
panfrost_gpu_fini(pfdev); | |
-err_out3: | |
+out_pm_domain: | |
panfrost_pm_domain_fini(pfdev); | |
-err_out2: | |
+out_reset: | |
panfrost_reset_fini(pfdev); | |
-err_out1: | |
+out_regulator: | |
panfrost_regulator_fini(pfdev); | |
-err_out0: | |
+out_devfreq: | |
+ panfrost_devfreq_fini(pfdev); | |
+out_clk: | |
panfrost_clk_fini(pfdev); | |
return err; | |
} | |
@@ -278,6 +287,7 @@ void panfrost_device_fini(struct panfrost_device *pfdev) | |
panfrost_gpu_fini(pfdev); | |
panfrost_pm_domain_fini(pfdev); | |
panfrost_reset_fini(pfdev); | |
+ panfrost_devfreq_fini(pfdev); | |
panfrost_regulator_fini(pfdev); | |
panfrost_clk_fini(pfdev); | |
} | |
diff --git a/drivers/gpu/drm/panfrost/panfrost_drv.c b/drivers/gpu/drm/panfrost/panfrost_drv.c | |
index f6d5d03201fad..5d95917f923a1 100644 | |
--- a/drivers/gpu/drm/panfrost/panfrost_drv.c | |
+++ b/drivers/gpu/drm/panfrost/panfrost_drv.c | |
@@ -14,7 +14,6 @@ | |
#include <drm/drm_utils.h> | |
#include "panfrost_device.h" | |
-#include "panfrost_devfreq.h" | |
#include "panfrost_gem.h" | |
#include "panfrost_mmu.h" | |
#include "panfrost_job.h" | |
@@ -606,13 +605,6 @@ static int panfrost_probe(struct platform_device *pdev) | |
goto err_out0; | |
} | |
- err = panfrost_devfreq_init(pfdev); | |
- if (err) { | |
- if (err != -EPROBE_DEFER) | |
- dev_err(&pdev->dev, "Fatal error during devfreq init\n"); | |
- goto err_out1; | |
- } | |
- | |
pm_runtime_set_active(pfdev->dev); | |
pm_runtime_mark_last_busy(pfdev->dev); | |
pm_runtime_enable(pfdev->dev); | |
@@ -625,17 +617,16 @@ static int panfrost_probe(struct platform_device *pdev) | |
*/ | |
err = drm_dev_register(ddev, 0); | |
if (err < 0) | |
- goto err_out2; | |
+ goto err_out1; | |
panfrost_gem_shrinker_init(ddev); | |
return 0; | |
-err_out2: | |
- pm_runtime_disable(pfdev->dev); | |
- panfrost_devfreq_fini(pfdev); | |
err_out1: | |
+ pm_runtime_disable(pfdev->dev); | |
panfrost_device_fini(pfdev); | |
+ pm_runtime_set_suspended(pfdev->dev); | |
err_out0: | |
drm_dev_put(ddev); | |
return err; | |
@@ -650,10 +641,9 @@ static int panfrost_remove(struct platform_device *pdev) | |
panfrost_gem_shrinker_cleanup(ddev); | |
pm_runtime_get_sync(pfdev->dev); | |
- panfrost_devfreq_fini(pfdev); | |
- panfrost_device_fini(pfdev); | |
- pm_runtime_put_sync_suspend(pfdev->dev); | |
pm_runtime_disable(pfdev->dev); | |
+ panfrost_device_fini(pfdev); | |
+ pm_runtime_set_suspended(pfdev->dev); | |
drm_dev_put(ddev); | |
return 0; | |
diff --git a/drivers/gpu/drm/vc4/vc4_bo.c b/drivers/gpu/drm/vc4/vc4_bo.c | |
index 74ceebd62fbce..073b528f33337 100644 | |
--- a/drivers/gpu/drm/vc4/vc4_bo.c | |
+++ b/drivers/gpu/drm/vc4/vc4_bo.c | |
@@ -1005,6 +1005,7 @@ int vc4_get_tiling_ioctl(struct drm_device *dev, void *data, | |
return 0; | |
} | |
+static void vc4_bo_cache_destroy(struct drm_device *dev, void *unused); | |
int vc4_bo_cache_init(struct drm_device *dev) | |
{ | |
struct vc4_dev *vc4 = to_vc4_dev(dev); | |
@@ -1033,10 +1034,10 @@ int vc4_bo_cache_init(struct drm_device *dev) | |
INIT_WORK(&vc4->bo_cache.time_work, vc4_bo_cache_time_work); | |
timer_setup(&vc4->bo_cache.time_timer, vc4_bo_cache_time_timer, 0); | |
- return 0; | |
+ return drmm_add_action_or_reset(dev, vc4_bo_cache_destroy, NULL); | |
} | |
-void vc4_bo_cache_destroy(struct drm_device *dev) | |
+static void vc4_bo_cache_destroy(struct drm_device *dev, void *unused) | |
{ | |
struct vc4_dev *vc4 = to_vc4_dev(dev); | |
int i; | |
diff --git a/drivers/gpu/drm/vc4/vc4_drv.c b/drivers/gpu/drm/vc4/vc4_drv.c | |
index f6995e7f6eb6e..c7aeaba3fabe8 100644 | |
--- a/drivers/gpu/drm/vc4/vc4_drv.c | |
+++ b/drivers/gpu/drm/vc4/vc4_drv.c | |
@@ -311,7 +311,6 @@ unbind_all: | |
gem_destroy: | |
vc4_gem_destroy(drm); | |
drm_mode_config_cleanup(drm); | |
- vc4_bo_cache_destroy(drm); | |
dev_put: | |
drm_dev_put(drm); | |
return ret; | |
diff --git a/drivers/gpu/drm/vc4/vc4_drv.h b/drivers/gpu/drm/vc4/vc4_drv.h | |
index fa19160c801f8..528c28895a8e0 100644 | |
--- a/drivers/gpu/drm/vc4/vc4_drv.h | |
+++ b/drivers/gpu/drm/vc4/vc4_drv.h | |
@@ -14,6 +14,7 @@ | |
#include <drm/drm_device.h> | |
#include <drm/drm_encoder.h> | |
#include <drm/drm_gem_cma_helper.h> | |
+#include <drm/drm_managed.h> | |
#include <drm/drm_mm.h> | |
#include <drm/drm_modeset_lock.h> | |
@@ -786,7 +787,6 @@ struct drm_gem_object *vc4_prime_import_sg_table(struct drm_device *dev, | |
struct sg_table *sgt); | |
void *vc4_prime_vmap(struct drm_gem_object *obj); | |
int vc4_bo_cache_init(struct drm_device *dev); | |
-void vc4_bo_cache_destroy(struct drm_device *dev); | |
int vc4_bo_inc_usecnt(struct vc4_bo *bo); | |
void vc4_bo_dec_usecnt(struct vc4_bo *bo); | |
void vc4_bo_add_to_purgeable_pool(struct vc4_bo *bo); | |
diff --git a/drivers/hv/hv_balloon.c b/drivers/hv/hv_balloon.c | |
index 32e3bc0aa665a..0f50295d02149 100644 | |
--- a/drivers/hv/hv_balloon.c | |
+++ b/drivers/hv/hv_balloon.c | |
@@ -1275,7 +1275,7 @@ static void balloon_up(struct work_struct *dummy) | |
/* Refuse to balloon below the floor. */ | |
if (avail_pages < num_pages || avail_pages - num_pages < floor) { | |
- pr_warn("Balloon request will be partially fulfilled. %s\n", | |
+ pr_info("Balloon request will be partially fulfilled. %s\n", | |
avail_pages < num_pages ? "Not enough memory." : | |
"Balloon floor reached."); | |
diff --git a/drivers/hwmon/amd_energy.c b/drivers/hwmon/amd_energy.c | |
index 29603742c8583..0dee535e6c851 100644 | |
--- a/drivers/hwmon/amd_energy.c | |
+++ b/drivers/hwmon/amd_energy.c | |
@@ -209,7 +209,7 @@ static umode_t amd_energy_is_visible(const void *_data, | |
enum hwmon_sensor_types type, | |
u32 attr, int channel) | |
{ | |
- return 0444; | |
+ return 0440; | |
} | |
static int energy_accumulator(void *p) | |
diff --git a/drivers/hwmon/applesmc.c b/drivers/hwmon/applesmc.c | |
index a18887990f4a2..79b498f816fe9 100644 | |
--- a/drivers/hwmon/applesmc.c | |
+++ b/drivers/hwmon/applesmc.c | |
@@ -32,6 +32,7 @@ | |
#include <linux/hwmon.h> | |
#include <linux/workqueue.h> | |
#include <linux/err.h> | |
+#include <linux/bits.h> | |
/* data port used by Apple SMC */ | |
#define APPLESMC_DATA_PORT 0x300 | |
@@ -42,10 +43,13 @@ | |
#define APPLESMC_MAX_DATA_LENGTH 32 | |
-/* wait up to 128 ms for a status change. */ | |
-#define APPLESMC_MIN_WAIT 0x0010 | |
-#define APPLESMC_RETRY_WAIT 0x0100 | |
-#define APPLESMC_MAX_WAIT 0x20000 | |
+/* Apple SMC status bits */ | |
+#define SMC_STATUS_AWAITING_DATA BIT(0) /* SMC has data waiting to be read */ | |
+#define SMC_STATUS_IB_CLOSED BIT(1) /* Will ignore any input */ | |
+#define SMC_STATUS_BUSY BIT(2) /* Command in progress */ | |
+ | |
+/* Initial wait is 8us */ | |
+#define APPLESMC_MIN_WAIT 0x0008 | |
#define APPLESMC_READ_CMD 0x10 | |
#define APPLESMC_WRITE_CMD 0x11 | |
@@ -151,65 +155,84 @@ static unsigned int key_at_index; | |
static struct workqueue_struct *applesmc_led_wq; | |
/* | |
- * wait_read - Wait for a byte to appear on SMC port. Callers must | |
- * hold applesmc_lock. | |
+ * Wait for specific status bits with a mask on the SMC. | |
+ * Used before all transactions. | |
+ * This does 10 fast loops of 8us then exponentially backs off for a | |
+ * minimum total wait of 262ms. Depending on usleep_range this could | |
+ * run out past 500ms. | |
*/ | |
-static int wait_read(void) | |
+ | |
+static int wait_status(u8 val, u8 mask) | |
{ | |
- unsigned long end = jiffies + (APPLESMC_MAX_WAIT * HZ) / USEC_PER_SEC; | |
u8 status; | |
int us; | |
+ int i; | |
- for (us = APPLESMC_MIN_WAIT; us < APPLESMC_MAX_WAIT; us <<= 1) { | |
- usleep_range(us, us * 16); | |
+ us = APPLESMC_MIN_WAIT; | |
+ for (i = 0; i < 24 ; i++) { | |
status = inb(APPLESMC_CMD_PORT); | |
- /* read: wait for smc to settle */ | |
- if (status & 0x01) | |
+ if ((status & mask) == val) | |
return 0; | |
- /* timeout: give up */ | |
- if (time_after(jiffies, end)) | |
- break; | |
+ usleep_range(us, us * 2); | |
+ if (i > 9) | |
+ us <<= 1; | |
} | |
- | |
- pr_warn("wait_read() fail: 0x%02x\n", status); | |
return -EIO; | |
} | |
-/* | |
- * send_byte - Write to SMC port, retrying when necessary. Callers | |
- * must hold applesmc_lock. | |
- */ | |
+/* send_byte - Write to SMC data port. Callers must hold applesmc_lock. */ | |
+ | |
static int send_byte(u8 cmd, u16 port) | |
{ | |
- u8 status; | |
- int us; | |
- unsigned long end = jiffies + (APPLESMC_MAX_WAIT * HZ) / USEC_PER_SEC; | |
+ int status; | |
+ | |
+ status = wait_status(0, SMC_STATUS_IB_CLOSED); | |
+ if (status) | |
+ return status; | |
+ /* | |
+ * This needs to be a separate read looking for bit 0x04 | |
+ * after bit 0x02 falls. If consolidated with the wait above | |
+ * this extra read may not happen if status returns both | |
+ * simultaneously and this would appear to be required. | |
+ */ | |
+ status = wait_status(SMC_STATUS_BUSY, SMC_STATUS_BUSY); | |
+ if (status) | |
+ return status; | |
outb(cmd, port); | |
- for (us = APPLESMC_MIN_WAIT; us < APPLESMC_MAX_WAIT; us <<= 1) { | |
- usleep_range(us, us * 16); | |
- status = inb(APPLESMC_CMD_PORT); | |
- /* write: wait for smc to settle */ | |
- if (status & 0x02) | |
- continue; | |
- /* ready: cmd accepted, return */ | |
- if (status & 0x04) | |
- return 0; | |
- /* timeout: give up */ | |
- if (time_after(jiffies, end)) | |
- break; | |
- /* busy: long wait and resend */ | |
- udelay(APPLESMC_RETRY_WAIT); | |
- outb(cmd, port); | |
- } | |
- | |
- pr_warn("send_byte(0x%02x, 0x%04x) fail: 0x%02x\n", cmd, port, status); | |
- return -EIO; | |
+ return 0; | |
} | |
+/* send_command - Write a command to the SMC. Callers must hold applesmc_lock. */ | |
+ | |
static int send_command(u8 cmd) | |
{ | |
- return send_byte(cmd, APPLESMC_CMD_PORT); | |
+ int ret; | |
+ | |
+ ret = wait_status(0, SMC_STATUS_IB_CLOSED); | |
+ if (ret) | |
+ return ret; | |
+ outb(cmd, APPLESMC_CMD_PORT); | |
+ return 0; | |
+} | |
+ | |
+/* | |
+ * Based on logic from the Apple driver. This is issued before any interaction | |
+ * If busy is stuck high, issue a read command to reset the SMC state machine. | |
+ * If busy is stuck high after the command then the SMC is jammed. | |
+ */ | |
+ | |
+static int smc_sane(void) | |
+{ | |
+ int ret; | |
+ | |
+ ret = wait_status(0, SMC_STATUS_BUSY); | |
+ if (!ret) | |
+ return ret; | |
+ ret = send_command(APPLESMC_READ_CMD); | |
+ if (ret) | |
+ return ret; | |
+ return wait_status(0, SMC_STATUS_BUSY); | |
} | |
static int send_argument(const char *key) | |
@@ -226,6 +249,11 @@ static int read_smc(u8 cmd, const char *key, u8 *buffer, u8 len) | |
{ | |
u8 status, data = 0; | |
int i; | |
+ int ret; | |
+ | |
+ ret = smc_sane(); | |
+ if (ret) | |
+ return ret; | |
if (send_command(cmd) || send_argument(key)) { | |
pr_warn("%.4s: read arg fail\n", key); | |
@@ -239,7 +267,8 @@ static int read_smc(u8 cmd, const char *key, u8 *buffer, u8 len) | |
} | |
for (i = 0; i < len; i++) { | |
- if (wait_read()) { | |
+ if (wait_status(SMC_STATUS_AWAITING_DATA | SMC_STATUS_BUSY, | |
+ SMC_STATUS_AWAITING_DATA | SMC_STATUS_BUSY)) { | |
pr_warn("%.4s: read data[%d] fail\n", key, i); | |
return -EIO; | |
} | |
@@ -250,19 +279,24 @@ static int read_smc(u8 cmd, const char *key, u8 *buffer, u8 len) | |
for (i = 0; i < 16; i++) { | |
udelay(APPLESMC_MIN_WAIT); | |
status = inb(APPLESMC_CMD_PORT); | |
- if (!(status & 0x01)) | |
+ if (!(status & SMC_STATUS_AWAITING_DATA)) | |
break; | |
data = inb(APPLESMC_DATA_PORT); | |
} | |
if (i) | |
pr_warn("flushed %d bytes, last value is: %d\n", i, data); | |
- return 0; | |
+ return wait_status(0, SMC_STATUS_BUSY); | |
} | |
static int write_smc(u8 cmd, const char *key, const u8 *buffer, u8 len) | |
{ | |
int i; | |
+ int ret; | |
+ | |
+ ret = smc_sane(); | |
+ if (ret) | |
+ return ret; | |
if (send_command(cmd) || send_argument(key)) { | |
pr_warn("%s: write arg fail\n", key); | |
@@ -281,7 +315,7 @@ static int write_smc(u8 cmd, const char *key, const u8 *buffer, u8 len) | |
} | |
} | |
- return 0; | |
+ return wait_status(0, SMC_STATUS_BUSY); | |
} | |
static int read_register_count(unsigned int *count) | |
diff --git a/drivers/hwtracing/coresight/coresight-etm-perf.c b/drivers/hwtracing/coresight/coresight-etm-perf.c | |
index be591b557df94..9a8d03e62a750 100644 | |
--- a/drivers/hwtracing/coresight/coresight-etm-perf.c | |
+++ b/drivers/hwtracing/coresight/coresight-etm-perf.c | |
@@ -210,7 +210,7 @@ static void *etm_setup_aux(struct perf_event *event, void **pages, | |
u32 id; | |
int cpu = event->cpu; | |
cpumask_t *mask; | |
- struct coresight_device *sink; | |
+ struct coresight_device *sink = NULL; | |
struct etm_event_data *event_data = NULL; | |
event_data = alloc_event_data(cpu); | |
@@ -222,8 +222,6 @@ static void *etm_setup_aux(struct perf_event *event, void **pages, | |
if (event->attr.config2) { | |
id = (u32)event->attr.config2; | |
sink = coresight_get_sink_by_id(id); | |
- } else { | |
- sink = coresight_get_enabled_sink(true); | |
} | |
mask = &event_data->mask; | |
diff --git a/drivers/i2c/busses/i2c-designware-slave.c b/drivers/i2c/busses/i2c-designware-slave.c | |
index 44974b53a6268..0d15f4c1e9f7e 100644 | |
--- a/drivers/i2c/busses/i2c-designware-slave.c | |
+++ b/drivers/i2c/busses/i2c-designware-slave.c | |
@@ -159,7 +159,6 @@ static int i2c_dw_irq_handler_slave(struct dw_i2c_dev *dev) | |
u32 raw_stat, stat, enabled, tmp; | |
u8 val = 0, slave_activity; | |
- regmap_read(dev->map, DW_IC_INTR_STAT, &stat); | |
regmap_read(dev->map, DW_IC_ENABLE, &enabled); | |
regmap_read(dev->map, DW_IC_RAW_INTR_STAT, &raw_stat); | |
regmap_read(dev->map, DW_IC_STATUS, &tmp); | |
@@ -168,32 +167,30 @@ static int i2c_dw_irq_handler_slave(struct dw_i2c_dev *dev) | |
if (!enabled || !(raw_stat & ~DW_IC_INTR_ACTIVITY) || !dev->slave) | |
return 0; | |
+ stat = i2c_dw_read_clear_intrbits_slave(dev); | |
dev_dbg(dev->dev, | |
"%#x STATUS SLAVE_ACTIVITY=%#x : RAW_INTR_STAT=%#x : INTR_STAT=%#x\n", | |
enabled, slave_activity, raw_stat, stat); | |
- if ((stat & DW_IC_INTR_RX_FULL) && (stat & DW_IC_INTR_STOP_DET)) | |
- i2c_slave_event(dev->slave, I2C_SLAVE_WRITE_REQUESTED, &val); | |
+ if (stat & DW_IC_INTR_RX_FULL) { | |
+ if (dev->status != STATUS_WRITE_IN_PROGRESS) { | |
+ dev->status = STATUS_WRITE_IN_PROGRESS; | |
+ i2c_slave_event(dev->slave, I2C_SLAVE_WRITE_REQUESTED, | |
+ &val); | |
+ } | |
+ | |
+ regmap_read(dev->map, DW_IC_DATA_CMD, &tmp); | |
+ val = tmp; | |
+ if (!i2c_slave_event(dev->slave, I2C_SLAVE_WRITE_RECEIVED, | |
+ &val)) | |
+ dev_vdbg(dev->dev, "Byte %X acked!", val); | |
+ } | |
if (stat & DW_IC_INTR_RD_REQ) { | |
if (slave_activity) { | |
- if (stat & DW_IC_INTR_RX_FULL) { | |
- regmap_read(dev->map, DW_IC_DATA_CMD, &tmp); | |
- val = tmp; | |
- | |
- if (!i2c_slave_event(dev->slave, | |
- I2C_SLAVE_WRITE_RECEIVED, | |
- &val)) { | |
- dev_vdbg(dev->dev, "Byte %X acked!", | |
- val); | |
- } | |
- regmap_read(dev->map, DW_IC_CLR_RD_REQ, &tmp); | |
- stat = i2c_dw_read_clear_intrbits_slave(dev); | |
- } else { | |
- regmap_read(dev->map, DW_IC_CLR_RD_REQ, &tmp); | |
- regmap_read(dev->map, DW_IC_CLR_RX_UNDER, &tmp); | |
- stat = i2c_dw_read_clear_intrbits_slave(dev); | |
- } | |
+ regmap_read(dev->map, DW_IC_CLR_RD_REQ, &tmp); | |
+ | |
+ dev->status = STATUS_READ_IN_PROGRESS; | |
if (!i2c_slave_event(dev->slave, | |
I2C_SLAVE_READ_REQUESTED, | |
&val)) | |
@@ -205,21 +202,11 @@ static int i2c_dw_irq_handler_slave(struct dw_i2c_dev *dev) | |
if (!i2c_slave_event(dev->slave, I2C_SLAVE_READ_PROCESSED, | |
&val)) | |
regmap_read(dev->map, DW_IC_CLR_RX_DONE, &tmp); | |
- | |
- i2c_slave_event(dev->slave, I2C_SLAVE_STOP, &val); | |
- stat = i2c_dw_read_clear_intrbits_slave(dev); | |
- return 1; | |
} | |
- if (stat & DW_IC_INTR_RX_FULL) { | |
- regmap_read(dev->map, DW_IC_DATA_CMD, &tmp); | |
- val = tmp; | |
- if (!i2c_slave_event(dev->slave, I2C_SLAVE_WRITE_RECEIVED, | |
- &val)) | |
- dev_vdbg(dev->dev, "Byte %X acked!", val); | |
- } else { | |
+ if (stat & DW_IC_INTR_STOP_DET) { | |
+ dev->status = STATUS_IDLE; | |
i2c_slave_event(dev->slave, I2C_SLAVE_STOP, &val); | |
- stat = i2c_dw_read_clear_intrbits_slave(dev); | |
} | |
return 1; | |
@@ -230,7 +217,6 @@ static irqreturn_t i2c_dw_isr_slave(int this_irq, void *dev_id) | |
struct dw_i2c_dev *dev = dev_id; | |
int ret; | |
- i2c_dw_read_clear_intrbits_slave(dev); | |
ret = i2c_dw_irq_handler_slave(dev); | |
if (ret > 0) | |
complete(&dev->cmd_complete); | |
diff --git a/drivers/i2c/busses/i2c-mt65xx.c b/drivers/i2c/busses/i2c-mt65xx.c | |
index 0cbdfbe605b55..33de99b7bc20c 100644 | |
--- a/drivers/i2c/busses/i2c-mt65xx.c | |
+++ b/drivers/i2c/busses/i2c-mt65xx.c | |
@@ -475,6 +475,10 @@ static void mtk_i2c_init_hw(struct mtk_i2c *i2c) | |
{ | |
u16 control_reg; | |
+ writel(I2C_DMA_HARD_RST, i2c->pdmabase + OFFSET_RST); | |
+ udelay(50); | |
+ writel(I2C_DMA_CLR_FLAG, i2c->pdmabase + OFFSET_RST); | |
+ | |
mtk_i2c_writew(i2c, I2C_SOFT_RST, OFFSET_SOFTRESET); | |
/* Set ioconfig */ | |
@@ -529,10 +533,6 @@ static void mtk_i2c_init_hw(struct mtk_i2c *i2c) | |
mtk_i2c_writew(i2c, control_reg, OFFSET_CONTROL); | |
mtk_i2c_writew(i2c, I2C_DELAY_LEN, OFFSET_DELAY_LEN); | |
- | |
- writel(I2C_DMA_HARD_RST, i2c->pdmabase + OFFSET_RST); | |
- udelay(50); | |
- writel(I2C_DMA_CLR_FLAG, i2c->pdmabase + OFFSET_RST); | |
} | |
static const struct i2c_spec_values *mtk_i2c_get_spec(unsigned int speed) | |
diff --git a/drivers/i2c/busses/i2c-sh_mobile.c b/drivers/i2c/busses/i2c-sh_mobile.c | |
index cab7255599991..bdd60770779ad 100644 | |
--- a/drivers/i2c/busses/i2c-sh_mobile.c | |
+++ b/drivers/i2c/busses/i2c-sh_mobile.c | |
@@ -129,6 +129,7 @@ struct sh_mobile_i2c_data { | |
int sr; | |
bool send_stop; | |
bool stop_after_dma; | |
+ bool atomic_xfer; | |
struct resource *res; | |
struct dma_chan *dma_tx; | |
@@ -330,13 +331,15 @@ static unsigned char i2c_op(struct sh_mobile_i2c_data *pd, enum sh_mobile_i2c_op | |
ret = iic_rd(pd, ICDR); | |
break; | |
case OP_RX_STOP: /* enable DTE interrupt, issue stop */ | |
- iic_wr(pd, ICIC, | |
- ICIC_DTEE | ICIC_WAITE | ICIC_ALE | ICIC_TACKE); | |
+ if (!pd->atomic_xfer) | |
+ iic_wr(pd, ICIC, | |
+ ICIC_DTEE | ICIC_WAITE | ICIC_ALE | ICIC_TACKE); | |
iic_wr(pd, ICCR, ICCR_ICE | ICCR_RACK); | |
break; | |
case OP_RX_STOP_DATA: /* enable DTE interrupt, read data, issue stop */ | |
- iic_wr(pd, ICIC, | |
- ICIC_DTEE | ICIC_WAITE | ICIC_ALE | ICIC_TACKE); | |
+ if (!pd->atomic_xfer) | |
+ iic_wr(pd, ICIC, | |
+ ICIC_DTEE | ICIC_WAITE | ICIC_ALE | ICIC_TACKE); | |
ret = iic_rd(pd, ICDR); | |
iic_wr(pd, ICCR, ICCR_ICE | ICCR_RACK); | |
break; | |
@@ -429,7 +432,8 @@ static irqreturn_t sh_mobile_i2c_isr(int irq, void *dev_id) | |
if (wakeup) { | |
pd->sr |= SW_DONE; | |
- wake_up(&pd->wait); | |
+ if (!pd->atomic_xfer) | |
+ wake_up(&pd->wait); | |
} | |
/* defeat write posting to avoid spurious WAIT interrupts */ | |
@@ -581,6 +585,9 @@ static void start_ch(struct sh_mobile_i2c_data *pd, struct i2c_msg *usr_msg, | |
pd->pos = -1; | |
pd->sr = 0; | |
+ if (pd->atomic_xfer) | |
+ return; | |
+ | |
pd->dma_buf = i2c_get_dma_safe_msg_buf(pd->msg, 8); | |
if (pd->dma_buf) | |
sh_mobile_i2c_xfer_dma(pd); | |
@@ -637,15 +644,13 @@ static int poll_busy(struct sh_mobile_i2c_data *pd) | |
return i ? 0 : -ETIMEDOUT; | |
} | |
-static int sh_mobile_i2c_xfer(struct i2c_adapter *adapter, | |
- struct i2c_msg *msgs, | |
- int num) | |
+static int sh_mobile_xfer(struct sh_mobile_i2c_data *pd, | |
+ struct i2c_msg *msgs, int num) | |
{ | |
- struct sh_mobile_i2c_data *pd = i2c_get_adapdata(adapter); | |
struct i2c_msg *msg; | |
int err = 0; | |
int i; | |
- long timeout; | |
+ long time_left; | |
/* Wake up device and enable clock */ | |
pm_runtime_get_sync(pd->dev); | |
@@ -662,15 +667,35 @@ static int sh_mobile_i2c_xfer(struct i2c_adapter *adapter, | |
if (do_start) | |
i2c_op(pd, OP_START); | |
- /* The interrupt handler takes care of the rest... */ | |
- timeout = wait_event_timeout(pd->wait, | |
- pd->sr & (ICSR_TACK | SW_DONE), | |
- adapter->timeout); | |
- | |
- /* 'stop_after_dma' tells if DMA transfer was complete */ | |
- i2c_put_dma_safe_msg_buf(pd->dma_buf, pd->msg, pd->stop_after_dma); | |
+ if (pd->atomic_xfer) { | |
+ unsigned long j = jiffies + pd->adap.timeout; | |
+ | |
+ time_left = time_before_eq(jiffies, j); | |
+ while (time_left && | |
+ !(pd->sr & (ICSR_TACK | SW_DONE))) { | |
+ unsigned char sr = iic_rd(pd, ICSR); | |
+ | |
+ if (sr & (ICSR_AL | ICSR_TACK | | |
+ ICSR_WAIT | ICSR_DTE)) { | |
+ sh_mobile_i2c_isr(0, pd); | |
+ udelay(150); | |
+ } else { | |
+ cpu_relax(); | |
+ } | |
+ time_left = time_before_eq(jiffies, j); | |
+ } | |
+ } else { | |
+ /* The interrupt handler takes care of the rest... */ | |
+ time_left = wait_event_timeout(pd->wait, | |
+ pd->sr & (ICSR_TACK | SW_DONE), | |
+ pd->adap.timeout); | |
+ | |
+ /* 'stop_after_dma' tells if DMA xfer was complete */ | |
+ i2c_put_dma_safe_msg_buf(pd->dma_buf, pd->msg, | |
+ pd->stop_after_dma); | |
+ } | |
- if (!timeout) { | |
+ if (!time_left) { | |
dev_err(pd->dev, "Transfer request timed out\n"); | |
if (pd->dma_direction != DMA_NONE) | |
sh_mobile_i2c_cleanup_dma(pd); | |
@@ -696,14 +721,35 @@ static int sh_mobile_i2c_xfer(struct i2c_adapter *adapter, | |
return err ?: num; | |
} | |
+static int sh_mobile_i2c_xfer(struct i2c_adapter *adapter, | |
+ struct i2c_msg *msgs, | |
+ int num) | |
+{ | |
+ struct sh_mobile_i2c_data *pd = i2c_get_adapdata(adapter); | |
+ | |
+ pd->atomic_xfer = false; | |
+ return sh_mobile_xfer(pd, msgs, num); | |
+} | |
+ | |
+static int sh_mobile_i2c_xfer_atomic(struct i2c_adapter *adapter, | |
+ struct i2c_msg *msgs, | |
+ int num) | |
+{ | |
+ struct sh_mobile_i2c_data *pd = i2c_get_adapdata(adapter); | |
+ | |
+ pd->atomic_xfer = true; | |
+ return sh_mobile_xfer(pd, msgs, num); | |
+} | |
+ | |
static u32 sh_mobile_i2c_func(struct i2c_adapter *adapter) | |
{ | |
return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL | I2C_FUNC_PROTOCOL_MANGLING; | |
} | |
static const struct i2c_algorithm sh_mobile_i2c_algorithm = { | |
- .functionality = sh_mobile_i2c_func, | |
- .master_xfer = sh_mobile_i2c_xfer, | |
+ .functionality = sh_mobile_i2c_func, | |
+ .master_xfer = sh_mobile_i2c_xfer, | |
+ .master_xfer_atomic = sh_mobile_i2c_xfer_atomic, | |
}; | |
static const struct i2c_adapter_quirks sh_mobile_i2c_quirks = { | |
diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c | |
index 0065eb17ae36b..1b096305de1a4 100644 | |
--- a/drivers/infiniband/ulp/srpt/ib_srpt.c | |
+++ b/drivers/infiniband/ulp/srpt/ib_srpt.c | |
@@ -622,10 +622,11 @@ static int srpt_refresh_port(struct srpt_port *sport) | |
/** | |
* srpt_unregister_mad_agent - unregister MAD callback functions | |
* @sdev: SRPT HCA pointer. | |
+ * #port_cnt: number of ports with registered MAD | |
* | |
* Note: It is safe to call this function more than once for the same device. | |
*/ | |
-static void srpt_unregister_mad_agent(struct srpt_device *sdev) | |
+static void srpt_unregister_mad_agent(struct srpt_device *sdev, int port_cnt) | |
{ | |
struct ib_port_modify port_modify = { | |
.clr_port_cap_mask = IB_PORT_DEVICE_MGMT_SUP, | |
@@ -633,7 +634,7 @@ static void srpt_unregister_mad_agent(struct srpt_device *sdev) | |
struct srpt_port *sport; | |
int i; | |
- for (i = 1; i <= sdev->device->phys_port_cnt; i++) { | |
+ for (i = 1; i <= port_cnt; i++) { | |
sport = &sdev->port[i - 1]; | |
WARN_ON(sport->port != i); | |
if (sport->mad_agent) { | |
@@ -3185,7 +3186,8 @@ static int srpt_add_one(struct ib_device *device) | |
if (ret) { | |
pr_err("MAD registration failed for %s-%d.\n", | |
dev_name(&sdev->device->dev), i); | |
- goto err_event; | |
+ i--; | |
+ goto err_port; | |
} | |
} | |
@@ -3197,7 +3199,8 @@ static int srpt_add_one(struct ib_device *device) | |
pr_debug("added %s.\n", dev_name(&device->dev)); | |
return 0; | |
-err_event: | |
+err_port: | |
+ srpt_unregister_mad_agent(sdev, i); | |
ib_unregister_event_handler(&sdev->event_handler); | |
err_cm: | |
if (sdev->cm_id) | |
@@ -3221,7 +3224,7 @@ static void srpt_remove_one(struct ib_device *device, void *client_data) | |
struct srpt_device *sdev = client_data; | |
int i; | |
- srpt_unregister_mad_agent(sdev); | |
+ srpt_unregister_mad_agent(sdev, sdev->device->phys_port_cnt); | |
ib_unregister_event_handler(&sdev->event_handler); | |
diff --git a/drivers/iommu/amd/amd_iommu_types.h b/drivers/iommu/amd/amd_iommu_types.h | |
index 30a5d412255a4..427484c455891 100644 | |
--- a/drivers/iommu/amd/amd_iommu_types.h | |
+++ b/drivers/iommu/amd/amd_iommu_types.h | |
@@ -406,7 +406,11 @@ extern bool amd_iommu_np_cache; | |
/* Only true if all IOMMUs support device IOTLBs */ | |
extern bool amd_iommu_iotlb_sup; | |
-#define MAX_IRQS_PER_TABLE 256 | |
+/* | |
+ * AMD IOMMU hardware only support 512 IRTEs despite | |
+ * the architectural limitation of 2048 entries. | |
+ */ | |
+#define MAX_IRQS_PER_TABLE 512 | |
#define IRQ_TABLE_ALIGNMENT 128 | |
struct irq_remap_table { | |
diff --git a/drivers/iommu/intel/svm.c b/drivers/iommu/intel/svm.c | |
index 95c3164a2302f..20fa8c7fcd8e7 100644 | |
--- a/drivers/iommu/intel/svm.c | |
+++ b/drivers/iommu/intel/svm.c | |
@@ -278,6 +278,7 @@ int intel_svm_bind_gpasid(struct iommu_domain *domain, struct device *dev, | |
struct intel_iommu *iommu = device_to_iommu(dev, NULL, NULL); | |
struct intel_svm_dev *sdev = NULL; | |
struct dmar_domain *dmar_domain; | |
+ struct device_domain_info *info; | |
struct intel_svm *svm = NULL; | |
int ret = 0; | |
@@ -302,6 +303,10 @@ int intel_svm_bind_gpasid(struct iommu_domain *domain, struct device *dev, | |
if (data->hpasid <= 0 || data->hpasid >= PASID_MAX) | |
return -EINVAL; | |
+ info = get_domain_info(dev); | |
+ if (!info) | |
+ return -EINVAL; | |
+ | |
dmar_domain = to_dmar_domain(domain); | |
mutex_lock(&pasid_mutex); | |
@@ -349,6 +354,7 @@ int intel_svm_bind_gpasid(struct iommu_domain *domain, struct device *dev, | |
goto out; | |
} | |
sdev->dev = dev; | |
+ sdev->sid = PCI_DEVID(info->bus, info->devfn); | |
/* Only count users if device has aux domains */ | |
if (iommu_dev_feature_enabled(dev, IOMMU_DEV_FEAT_AUX)) | |
@@ -995,7 +1001,7 @@ no_pasid: | |
resp.qw0 = QI_PGRP_PASID(req->pasid) | | |
QI_PGRP_DID(req->rid) | | |
QI_PGRP_PASID_P(req->pasid_present) | | |
- QI_PGRP_PDP(req->pasid_present) | | |
+ QI_PGRP_PDP(req->priv_data_present) | | |
QI_PGRP_RESP_CODE(result) | | |
QI_PGRP_RESP_TYPE; | |
resp.qw1 = QI_PGRP_IDX(req->prg_index) | | |
diff --git a/drivers/irqchip/irq-sifive-plic.c b/drivers/irqchip/irq-sifive-plic.c | |
index eaa3e9fe54e91..6f432d2a5cebd 100644 | |
--- a/drivers/irqchip/irq-sifive-plic.c | |
+++ b/drivers/irqchip/irq-sifive-plic.c | |
@@ -99,7 +99,7 @@ static inline void plic_irq_toggle(const struct cpumask *mask, | |
struct irq_data *d, int enable) | |
{ | |
int cpu; | |
- struct plic_priv *priv = irq_get_chip_data(d->irq); | |
+ struct plic_priv *priv = irq_data_get_irq_chip_data(d); | |
writel(enable, priv->regs + PRIORITY_BASE + d->hwirq * PRIORITY_PER_ID); | |
for_each_cpu(cpu, mask) { | |
@@ -115,7 +115,7 @@ static void plic_irq_unmask(struct irq_data *d) | |
{ | |
struct cpumask amask; | |
unsigned int cpu; | |
- struct plic_priv *priv = irq_get_chip_data(d->irq); | |
+ struct plic_priv *priv = irq_data_get_irq_chip_data(d); | |
cpumask_and(&amask, &priv->lmask, cpu_online_mask); | |
cpu = cpumask_any_and(irq_data_get_affinity_mask(d), | |
@@ -127,7 +127,7 @@ static void plic_irq_unmask(struct irq_data *d) | |
static void plic_irq_mask(struct irq_data *d) | |
{ | |
- struct plic_priv *priv = irq_get_chip_data(d->irq); | |
+ struct plic_priv *priv = irq_data_get_irq_chip_data(d); | |
plic_irq_toggle(&priv->lmask, d, 0); | |
} | |
@@ -138,7 +138,7 @@ static int plic_set_affinity(struct irq_data *d, | |
{ | |
unsigned int cpu; | |
struct cpumask amask; | |
- struct plic_priv *priv = irq_get_chip_data(d->irq); | |
+ struct plic_priv *priv = irq_data_get_irq_chip_data(d); | |
cpumask_and(&amask, &priv->lmask, mask_val); | |
@@ -151,7 +151,7 @@ static int plic_set_affinity(struct irq_data *d, | |
return -EINVAL; | |
plic_irq_toggle(&priv->lmask, d, 0); | |
- plic_irq_toggle(cpumask_of(cpu), d, 1); | |
+ plic_irq_toggle(cpumask_of(cpu), d, !irqd_irq_masked(d)); | |
irq_data_update_effective_affinity(d, cpumask_of(cpu)); | |
diff --git a/drivers/mfd/sprd-sc27xx-spi.c b/drivers/mfd/sprd-sc27xx-spi.c | |
index f8a8b918c60d9..6b7956604a0f0 100644 | |
--- a/drivers/mfd/sprd-sc27xx-spi.c | |
+++ b/drivers/mfd/sprd-sc27xx-spi.c | |
@@ -189,7 +189,7 @@ static int sprd_pmic_probe(struct spi_device *spi) | |
ddata->irqs[i].mask = BIT(i); | |
ret = devm_regmap_add_irq_chip(&spi->dev, ddata->regmap, ddata->irq, | |
- IRQF_ONESHOT | IRQF_NO_SUSPEND, 0, | |
+ IRQF_ONESHOT, 0, | |
&ddata->irq_chip, &ddata->irq_data); | |
if (ret) { | |
dev_err(&spi->dev, "Failed to add PMIC irq chip %d\n", ret); | |
@@ -202,9 +202,34 @@ static int sprd_pmic_probe(struct spi_device *spi) | |
return ret; | |
} | |
+ device_init_wakeup(&spi->dev, true); | |
return 0; | |
} | |
+#ifdef CONFIG_PM_SLEEP | |
+static int sprd_pmic_suspend(struct device *dev) | |
+{ | |
+ struct sprd_pmic *ddata = dev_get_drvdata(dev); | |
+ | |
+ if (device_may_wakeup(dev)) | |
+ enable_irq_wake(ddata->irq); | |
+ | |
+ return 0; | |
+} | |
+ | |
+static int sprd_pmic_resume(struct device *dev) | |
+{ | |
+ struct sprd_pmic *ddata = dev_get_drvdata(dev); | |
+ | |
+ if (device_may_wakeup(dev)) | |
+ disable_irq_wake(ddata->irq); | |
+ | |
+ return 0; | |
+} | |
+#endif | |
+ | |
+static SIMPLE_DEV_PM_OPS(sprd_pmic_pm_ops, sprd_pmic_suspend, sprd_pmic_resume); | |
+ | |
static const struct of_device_id sprd_pmic_match[] = { | |
{ .compatible = "sprd,sc2731", .data = &sc2731_data }, | |
{}, | |
@@ -215,6 +240,7 @@ static struct spi_driver sprd_pmic_driver = { | |
.driver = { | |
.name = "sc27xx-pmic", | |
.of_match_table = sprd_pmic_match, | |
+ .pm = &sprd_pmic_pm_ops, | |
}, | |
.probe = sprd_pmic_probe, | |
}; | |
diff --git a/drivers/misc/mei/client.h b/drivers/misc/mei/client.h | |
index 2f8954def591b..98ff7af531a1c 100644 | |
--- a/drivers/misc/mei/client.h | |
+++ b/drivers/misc/mei/client.h | |
@@ -164,11 +164,11 @@ static inline u8 mei_cl_me_id(const struct mei_cl *cl) | |
* | |
* @cl: host client | |
* | |
- * Return: mtu | |
+ * Return: mtu or 0 if client is not connected | |
*/ | |
static inline size_t mei_cl_mtu(const struct mei_cl *cl) | |
{ | |
- return cl->me_cl->props.max_msg_length; | |
+ return cl->me_cl ? cl->me_cl->props.max_msg_length : 0; | |
} | |
/** | |
diff --git a/drivers/mmc/host/renesas_sdhi_core.c b/drivers/mmc/host/renesas_sdhi_core.c | |
index 904f5237d8f7e..13880fc76d340 100644 | |
--- a/drivers/mmc/host/renesas_sdhi_core.c | |
+++ b/drivers/mmc/host/renesas_sdhi_core.c | |
@@ -997,6 +997,7 @@ int renesas_sdhi_remove(struct platform_device *pdev) | |
tmio_mmc_host_remove(host); | |
renesas_sdhi_clk_disable(host); | |
+ tmio_mmc_host_free(host); | |
return 0; | |
} | |
diff --git a/drivers/mmc/host/sdhci-of-esdhc.c b/drivers/mmc/host/sdhci-of-esdhc.c | |
index 156e75302df56..856e888d2195c 100644 | |
--- a/drivers/mmc/host/sdhci-of-esdhc.c | |
+++ b/drivers/mmc/host/sdhci-of-esdhc.c | |
@@ -1324,6 +1324,8 @@ static struct soc_device_attribute soc_fixup_sdhc_clkdivs[] = { | |
static struct soc_device_attribute soc_unreliable_pulse_detection[] = { | |
{ .family = "QorIQ LX2160A", .revision = "1.0", }, | |
+ { .family = "QorIQ LX2160A", .revision = "2.0", }, | |
+ { .family = "QorIQ LS1028A", .revision = "1.0", }, | |
{ }, | |
}; | |
diff --git a/drivers/mtd/spi-nor/core.c b/drivers/mtd/spi-nor/core.c | |
index b37d6c1936de1..f0ae7a01703a1 100644 | |
--- a/drivers/mtd/spi-nor/core.c | |
+++ b/drivers/mtd/spi-nor/core.c | |
@@ -3008,13 +3008,15 @@ static int spi_nor_set_addr_width(struct spi_nor *nor) | |
/* already configured from SFDP */ | |
} else if (nor->info->addr_width) { | |
nor->addr_width = nor->info->addr_width; | |
- } else if (nor->mtd.size > 0x1000000) { | |
- /* enable 4-byte addressing if the device exceeds 16MiB */ | |
- nor->addr_width = 4; | |
} else { | |
nor->addr_width = 3; | |
} | |
+ if (nor->addr_width == 3 && nor->mtd.size > 0x1000000) { | |
+ /* enable 4-byte addressing if the device exceeds 16MiB */ | |
+ nor->addr_width = 4; | |
+ } | |
+ | |
if (nor->addr_width > SPI_NOR_MAX_ADDR_WIDTH) { | |
dev_dbg(nor->dev, "address width is too large: %u\n", | |
nor->addr_width); | |
diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c | |
index 68834a2853c9d..d5e52ffc7ed25 100644 | |
--- a/drivers/net/can/dev.c | |
+++ b/drivers/net/can/dev.c | |
@@ -486,9 +486,13 @@ __can_get_echo_skb(struct net_device *dev, unsigned int idx, u8 *len_ptr) | |
*/ | |
struct sk_buff *skb = priv->echo_skb[idx]; | |
struct canfd_frame *cf = (struct canfd_frame *)skb->data; | |
- u8 len = cf->len; | |
- *len_ptr = len; | |
+ /* get the real payload length for netdev statistics */ | |
+ if (cf->can_id & CAN_RTR_FLAG) | |
+ *len_ptr = 0; | |
+ else | |
+ *len_ptr = cf->len; | |
+ | |
priv->echo_skb[idx] = NULL; | |
return skb; | |
@@ -512,7 +516,11 @@ unsigned int can_get_echo_skb(struct net_device *dev, unsigned int idx) | |
if (!skb) | |
return 0; | |
- netif_rx(skb); | |
+ skb_get(skb); | |
+ if (netif_rx(skb) == NET_RX_SUCCESS) | |
+ dev_consume_skb_any(skb); | |
+ else | |
+ dev_kfree_skb_any(skb); | |
return len; | |
} | |
diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c | |
index bc21a82cf3a76..a330d6c56242e 100644 | |
--- a/drivers/net/can/flexcan.c | |
+++ b/drivers/net/can/flexcan.c | |
@@ -321,8 +321,7 @@ static const struct flexcan_devtype_data fsl_vf610_devtype_data = { | |
static const struct flexcan_devtype_data fsl_ls1021a_r2_devtype_data = { | |
.quirks = FLEXCAN_QUIRK_DISABLE_RXFG | FLEXCAN_QUIRK_ENABLE_EACEN_RRS | | |
- FLEXCAN_QUIRK_DISABLE_MECR | FLEXCAN_QUIRK_BROKEN_PERR_STATE | | |
- FLEXCAN_QUIRK_USE_OFF_TIMESTAMP, | |
+ FLEXCAN_QUIRK_BROKEN_PERR_STATE | FLEXCAN_QUIRK_USE_OFF_TIMESTAMP, | |
}; | |
static const struct can_bittiming_const flexcan_bittiming_const = { | |
@@ -1696,6 +1695,8 @@ static int flexcan_remove(struct platform_device *pdev) | |
{ | |
struct net_device *dev = platform_get_drvdata(pdev); | |
+ device_set_wakeup_enable(&pdev->dev, false); | |
+ device_set_wakeup_capable(&pdev->dev, false); | |
unregister_flexcandev(dev); | |
pm_runtime_disable(&pdev->dev); | |
free_candev(dev); | |
diff --git a/drivers/net/can/peak_canfd/peak_canfd.c b/drivers/net/can/peak_canfd/peak_canfd.c | |
index 10aa3e457c33d..40c33b8a5fda3 100644 | |
--- a/drivers/net/can/peak_canfd/peak_canfd.c | |
+++ b/drivers/net/can/peak_canfd/peak_canfd.c | |
@@ -262,8 +262,7 @@ static int pucan_handle_can_rx(struct peak_canfd_priv *priv, | |
cf_len = get_can_dlc(pucan_msg_get_dlc(msg)); | |
/* if this frame is an echo, */ | |
- if ((rx_msg_flags & PUCAN_MSG_LOOPED_BACK) && | |
- !(rx_msg_flags & PUCAN_MSG_SELF_RECEIVE)) { | |
+ if (rx_msg_flags & PUCAN_MSG_LOOPED_BACK) { | |
unsigned long flags; | |
spin_lock_irqsave(&priv->echo_lock, flags); | |
@@ -277,7 +276,13 @@ static int pucan_handle_can_rx(struct peak_canfd_priv *priv, | |
netif_wake_queue(priv->ndev); | |
spin_unlock_irqrestore(&priv->echo_lock, flags); | |
- return 0; | |
+ | |
+ /* if this frame is only an echo, stop here. Otherwise, | |
+ * continue to push this application self-received frame into | |
+ * its own rx queue. | |
+ */ | |
+ if (!(rx_msg_flags & PUCAN_MSG_SELF_RECEIVE)) | |
+ return 0; | |
} | |
/* otherwise, it should be pushed into rx fifo */ | |
diff --git a/drivers/net/can/rx-offload.c b/drivers/net/can/rx-offload.c | |
index e8328910a2349..0283b5cad746a 100644 | |
--- a/drivers/net/can/rx-offload.c | |
+++ b/drivers/net/can/rx-offload.c | |
@@ -245,7 +245,7 @@ int can_rx_offload_queue_sorted(struct can_rx_offload *offload, | |
if (skb_queue_len(&offload->skb_queue) > | |
offload->skb_queue_len_max) { | |
- kfree_skb(skb); | |
+ dev_kfree_skb_any(skb); | |
return -ENOBUFS; | |
} | |
@@ -290,7 +290,7 @@ int can_rx_offload_queue_tail(struct can_rx_offload *offload, | |
{ | |
if (skb_queue_len(&offload->skb_queue) > | |
offload->skb_queue_len_max) { | |
- kfree_skb(skb); | |
+ dev_kfree_skb_any(skb); | |
return -ENOBUFS; | |
} | |
diff --git a/drivers/net/can/ti_hecc.c b/drivers/net/can/ti_hecc.c | |
index 94b1491b569f3..228ecd45ca6c1 100644 | |
--- a/drivers/net/can/ti_hecc.c | |
+++ b/drivers/net/can/ti_hecc.c | |
@@ -950,7 +950,7 @@ static int ti_hecc_probe(struct platform_device *pdev) | |
err = clk_prepare_enable(priv->clk); | |
if (err) { | |
dev_err(&pdev->dev, "clk_prepare_enable() failed\n"); | |
- goto probe_exit_clk; | |
+ goto probe_exit_release_clk; | |
} | |
priv->offload.mailbox_read = ti_hecc_mailbox_read; | |
@@ -959,7 +959,7 @@ static int ti_hecc_probe(struct platform_device *pdev) | |
err = can_rx_offload_add_timestamp(ndev, &priv->offload); | |
if (err) { | |
dev_err(&pdev->dev, "can_rx_offload_add_timestamp() failed\n"); | |
- goto probe_exit_clk; | |
+ goto probe_exit_disable_clk; | |
} | |
err = register_candev(ndev); | |
@@ -977,7 +977,9 @@ static int ti_hecc_probe(struct platform_device *pdev) | |
probe_exit_offload: | |
can_rx_offload_del(&priv->offload); | |
-probe_exit_clk: | |
+probe_exit_disable_clk: | |
+ clk_disable_unprepare(priv->clk); | |
+probe_exit_release_clk: | |
clk_put(priv->clk); | |
probe_exit_candev: | |
free_candev(ndev); | |
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_core.c b/drivers/net/can/usb/peak_usb/pcan_usb_core.c | |
index d91df34e7fa88..c2764799f9efb 100644 | |
--- a/drivers/net/can/usb/peak_usb/pcan_usb_core.c | |
+++ b/drivers/net/can/usb/peak_usb/pcan_usb_core.c | |
@@ -130,14 +130,55 @@ void peak_usb_get_ts_time(struct peak_time_ref *time_ref, u32 ts, ktime_t *time) | |
/* protect from getting time before setting now */ | |
if (ktime_to_ns(time_ref->tv_host)) { | |
u64 delta_us; | |
+ s64 delta_ts = 0; | |
+ | |
+ /* General case: dev_ts_1 < dev_ts_2 < ts, with: | |
+ * | |
+ * - dev_ts_1 = previous sync timestamp | |
+ * - dev_ts_2 = last sync timestamp | |
+ * - ts = event timestamp | |
+ * - ts_period = known sync period (theoretical) | |
+ * ~ dev_ts2 - dev_ts1 | |
+ * *but*: | |
+ * | |
+ * - time counters wrap (see adapter->ts_used_bits) | |
+ * - sometimes, dev_ts_1 < ts < dev_ts2 | |
+ * | |
+ * "normal" case (sync time counters increase): | |
+ * must take into account case when ts wraps (tsw) | |
+ * | |
+ * < ts_period > < > | |
+ * | | | | |
+ * ---+--------+----+-------0-+--+--> | |
+ * ts_dev_1 | ts_dev_2 | | |
+ * ts tsw | |
+ */ | |
+ if (time_ref->ts_dev_1 < time_ref->ts_dev_2) { | |
+ /* case when event time (tsw) wraps */ | |
+ if (ts < time_ref->ts_dev_1) | |
+ delta_ts = 1 << time_ref->adapter->ts_used_bits; | |
+ | |
+ /* Otherwise, sync time counter (ts_dev_2) has wrapped: | |
+ * handle case when event time (tsn) hasn't. | |
+ * | |
+ * < ts_period > < > | |
+ * | | | | |
+ * ---+--------+--0-+---------+--+--> | |
+ * ts_dev_1 | ts_dev_2 | | |
+ * tsn ts | |
+ */ | |
+ } else if (time_ref->ts_dev_1 < ts) { | |
+ delta_ts = -(1 << time_ref->adapter->ts_used_bits); | |
+ } | |
- delta_us = ts - time_ref->ts_dev_2; | |
- if (ts < time_ref->ts_dev_2) | |
- delta_us &= (1 << time_ref->adapter->ts_used_bits) - 1; | |
+ /* add delay between last sync and event timestamps */ | |
+ delta_ts += (signed int)(ts - time_ref->ts_dev_2); | |
- delta_us += time_ref->ts_total; | |
+ /* add time from beginning to last sync */ | |
+ delta_ts += time_ref->ts_total; | |
- delta_us *= time_ref->adapter->us_per_ts_scale; | |
+ /* convert ticks number into microseconds */ | |
+ delta_us = delta_ts * time_ref->adapter->us_per_ts_scale; | |
delta_us >>= time_ref->adapter->us_per_ts_shift; | |
*time = ktime_add_us(time_ref->tv_host_0, delta_us); | |
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_fd.c b/drivers/net/can/usb/peak_usb/pcan_usb_fd.c | |
index 47cc1ff5b88e8..dee3e689b54da 100644 | |
--- a/drivers/net/can/usb/peak_usb/pcan_usb_fd.c | |
+++ b/drivers/net/can/usb/peak_usb/pcan_usb_fd.c | |
@@ -468,12 +468,18 @@ static int pcan_usb_fd_decode_canmsg(struct pcan_usb_fd_if *usb_if, | |
struct pucan_msg *rx_msg) | |
{ | |
struct pucan_rx_msg *rm = (struct pucan_rx_msg *)rx_msg; | |
- struct peak_usb_device *dev = usb_if->dev[pucan_msg_get_channel(rm)]; | |
- struct net_device *netdev = dev->netdev; | |
+ struct peak_usb_device *dev; | |
+ struct net_device *netdev; | |
struct canfd_frame *cfd; | |
struct sk_buff *skb; | |
const u16 rx_msg_flags = le16_to_cpu(rm->flags); | |
+ if (pucan_msg_get_channel(rm) >= ARRAY_SIZE(usb_if->dev)) | |
+ return -ENOMEM; | |
+ | |
+ dev = usb_if->dev[pucan_msg_get_channel(rm)]; | |
+ netdev = dev->netdev; | |
+ | |
if (rx_msg_flags & PUCAN_MSG_EXT_DATA_LEN) { | |
/* CANFD frame case */ | |
skb = alloc_canfd_skb(netdev, &cfd); | |
@@ -519,15 +525,21 @@ static int pcan_usb_fd_decode_status(struct pcan_usb_fd_if *usb_if, | |
struct pucan_msg *rx_msg) | |
{ | |
struct pucan_status_msg *sm = (struct pucan_status_msg *)rx_msg; | |
- struct peak_usb_device *dev = usb_if->dev[pucan_stmsg_get_channel(sm)]; | |
- struct pcan_usb_fd_device *pdev = | |
- container_of(dev, struct pcan_usb_fd_device, dev); | |
+ struct pcan_usb_fd_device *pdev; | |
enum can_state new_state = CAN_STATE_ERROR_ACTIVE; | |
enum can_state rx_state, tx_state; | |
- struct net_device *netdev = dev->netdev; | |
+ struct peak_usb_device *dev; | |
+ struct net_device *netdev; | |
struct can_frame *cf; | |
struct sk_buff *skb; | |
+ if (pucan_stmsg_get_channel(sm) >= ARRAY_SIZE(usb_if->dev)) | |
+ return -ENOMEM; | |
+ | |
+ dev = usb_if->dev[pucan_stmsg_get_channel(sm)]; | |
+ pdev = container_of(dev, struct pcan_usb_fd_device, dev); | |
+ netdev = dev->netdev; | |
+ | |
/* nothing should be sent while in BUS_OFF state */ | |
if (dev->can.state == CAN_STATE_BUS_OFF) | |
return 0; | |
@@ -579,9 +591,14 @@ static int pcan_usb_fd_decode_error(struct pcan_usb_fd_if *usb_if, | |
struct pucan_msg *rx_msg) | |
{ | |
struct pucan_error_msg *er = (struct pucan_error_msg *)rx_msg; | |
- struct peak_usb_device *dev = usb_if->dev[pucan_ermsg_get_channel(er)]; | |
- struct pcan_usb_fd_device *pdev = | |
- container_of(dev, struct pcan_usb_fd_device, dev); | |
+ struct pcan_usb_fd_device *pdev; | |
+ struct peak_usb_device *dev; | |
+ | |
+ if (pucan_ermsg_get_channel(er) >= ARRAY_SIZE(usb_if->dev)) | |
+ return -EINVAL; | |
+ | |
+ dev = usb_if->dev[pucan_ermsg_get_channel(er)]; | |
+ pdev = container_of(dev, struct pcan_usb_fd_device, dev); | |
/* keep a trace of tx and rx error counters for later use */ | |
pdev->bec.txerr = er->tx_err_cnt; | |
@@ -595,11 +612,17 @@ static int pcan_usb_fd_decode_overrun(struct pcan_usb_fd_if *usb_if, | |
struct pucan_msg *rx_msg) | |
{ | |
struct pcan_ufd_ovr_msg *ov = (struct pcan_ufd_ovr_msg *)rx_msg; | |
- struct peak_usb_device *dev = usb_if->dev[pufd_omsg_get_channel(ov)]; | |
- struct net_device *netdev = dev->netdev; | |
+ struct peak_usb_device *dev; | |
+ struct net_device *netdev; | |
struct can_frame *cf; | |
struct sk_buff *skb; | |
+ if (pufd_omsg_get_channel(ov) >= ARRAY_SIZE(usb_if->dev)) | |
+ return -EINVAL; | |
+ | |
+ dev = usb_if->dev[pufd_omsg_get_channel(ov)]; | |
+ netdev = dev->netdev; | |
+ | |
/* allocate an skb to store the error frame */ | |
skb = alloc_can_err_skb(netdev, &cf); | |
if (!skb) | |
@@ -716,6 +739,9 @@ static int pcan_usb_fd_encode_msg(struct peak_usb_device *dev, | |
u16 tx_msg_size, tx_msg_flags; | |
u8 can_dlc; | |
+ if (cfd->len > CANFD_MAX_DLEN) | |
+ return -EINVAL; | |
+ | |
tx_msg_size = ALIGN(sizeof(struct pucan_tx_msg) + cfd->len, 4); | |
tx_msg->size = cpu_to_le16(tx_msg_size); | |
tx_msg->type = cpu_to_le16(PUCAN_MSG_CAN_TX); | |
diff --git a/drivers/net/can/xilinx_can.c b/drivers/net/can/xilinx_can.c | |
index c1dbab8c896d5..748ff70f6a7bf 100644 | |
--- a/drivers/net/can/xilinx_can.c | |
+++ b/drivers/net/can/xilinx_can.c | |
@@ -1391,7 +1391,7 @@ static int xcan_open(struct net_device *ndev) | |
if (ret < 0) { | |
netdev_err(ndev, "%s: pm_runtime_get failed(%d)\n", | |
__func__, ret); | |
- return ret; | |
+ goto err; | |
} | |
ret = request_irq(ndev->irq, xcan_interrupt, priv->irq_flags, | |
@@ -1475,6 +1475,7 @@ static int xcan_get_berr_counter(const struct net_device *ndev, | |
if (ret < 0) { | |
netdev_err(ndev, "%s: pm_runtime_get failed(%d)\n", | |
__func__, ret); | |
+ pm_runtime_put(priv->dev); | |
return ret; | |
} | |
@@ -1789,7 +1790,7 @@ static int xcan_probe(struct platform_device *pdev) | |
if (ret < 0) { | |
netdev_err(ndev, "%s: pm_runtime_get failed(%d)\n", | |
__func__, ret); | |
- goto err_pmdisable; | |
+ goto err_disableclks; | |
} | |
if (priv->read_reg(priv, XCAN_SR_OFFSET) != XCAN_SR_CONFIG_MASK) { | |
@@ -1824,7 +1825,6 @@ static int xcan_probe(struct platform_device *pdev) | |
err_disableclks: | |
pm_runtime_put(priv->dev); | |
-err_pmdisable: | |
pm_runtime_disable(&pdev->dev); | |
err_free: | |
free_candev(ndev); | |
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c | |
index 47bfb2e95e2db..343177d71f70a 100644 | |
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c | |
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c | |
@@ -2712,6 +2712,10 @@ static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg) | |
spin_unlock_bh(&vsi->mac_filter_hash_lock); | |
goto error_param; | |
} | |
+ if (is_valid_ether_addr(al->list[i].addr) && | |
+ is_zero_ether_addr(vf->default_lan_addr.addr)) | |
+ ether_addr_copy(vf->default_lan_addr.addr, | |
+ al->list[i].addr); | |
} | |
} | |
spin_unlock_bh(&vsi->mac_filter_hash_lock); | |
@@ -2739,6 +2743,7 @@ static int i40e_vc_del_mac_addr_msg(struct i40e_vf *vf, u8 *msg) | |
{ | |
struct virtchnl_ether_addr_list *al = | |
(struct virtchnl_ether_addr_list *)msg; | |
+ bool was_unimac_deleted = false; | |
struct i40e_pf *pf = vf->pf; | |
struct i40e_vsi *vsi = NULL; | |
i40e_status ret = 0; | |
@@ -2758,6 +2763,8 @@ static int i40e_vc_del_mac_addr_msg(struct i40e_vf *vf, u8 *msg) | |
ret = I40E_ERR_INVALID_MAC_ADDR; | |
goto error_param; | |
} | |
+ if (ether_addr_equal(al->list[i].addr, vf->default_lan_addr.addr)) | |
+ was_unimac_deleted = true; | |
} | |
vsi = pf->vsi[vf->lan_vsi_idx]; | |
@@ -2778,10 +2785,25 @@ static int i40e_vc_del_mac_addr_msg(struct i40e_vf *vf, u8 *msg) | |
dev_err(&pf->pdev->dev, "Unable to program VF %d MAC filters, error %d\n", | |
vf->vf_id, ret); | |
+ if (vf->trusted && was_unimac_deleted) { | |
+ struct i40e_mac_filter *f; | |
+ struct hlist_node *h; | |
+ u8 *macaddr = NULL; | |
+ int bkt; | |
+ | |
+ /* set last unicast mac address as default */ | |
+ spin_lock_bh(&vsi->mac_filter_hash_lock); | |
+ hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) { | |
+ if (is_valid_ether_addr(f->macaddr)) | |
+ macaddr = f->macaddr; | |
+ } | |
+ if (macaddr) | |
+ ether_addr_copy(vf->default_lan_addr.addr, macaddr); | |
+ spin_unlock_bh(&vsi->mac_filter_hash_lock); | |
+ } | |
error_param: | |
/* send the response to the VF */ | |
- return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DEL_ETH_ADDR, | |
- ret); | |
+ return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DEL_ETH_ADDR, ret); | |
} | |
/** | |
diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c | |
index 9593aa4eea369..1358a39c34ad3 100644 | |
--- a/drivers/net/ethernet/intel/igc/igc_main.c | |
+++ b/drivers/net/ethernet/intel/igc/igc_main.c | |
@@ -3890,21 +3890,23 @@ static int igc_change_mtu(struct net_device *netdev, int new_mtu) | |
} | |
/** | |
- * igc_get_stats - Get System Network Statistics | |
+ * igc_get_stats64 - Get System Network Statistics | |
* @netdev: network interface device structure | |
+ * @stats: rtnl_link_stats64 pointer | |
* | |
* Returns the address of the device statistics structure. | |
* The statistics are updated here and also from the timer callback. | |
*/ | |
-static struct net_device_stats *igc_get_stats(struct net_device *netdev) | |
+static void igc_get_stats64(struct net_device *netdev, | |
+ struct rtnl_link_stats64 *stats) | |
{ | |
struct igc_adapter *adapter = netdev_priv(netdev); | |
+ spin_lock(&adapter->stats64_lock); | |
if (!test_bit(__IGC_RESETTING, &adapter->state)) | |
igc_update_stats(adapter); | |
- | |
- /* only return the current stats */ | |
- return &netdev->stats; | |
+ memcpy(stats, &adapter->stats64, sizeof(*stats)); | |
+ spin_unlock(&adapter->stats64_lock); | |
} | |
static netdev_features_t igc_fix_features(struct net_device *netdev, | |
@@ -4833,7 +4835,7 @@ static const struct net_device_ops igc_netdev_ops = { | |
.ndo_set_rx_mode = igc_set_rx_mode, | |
.ndo_set_mac_address = igc_set_mac, | |
.ndo_change_mtu = igc_change_mtu, | |
- .ndo_get_stats = igc_get_stats, | |
+ .ndo_get_stats64 = igc_get_stats64, | |
.ndo_fix_features = igc_fix_features, | |
.ndo_set_features = igc_set_features, | |
.ndo_features_check = igc_features_check, | |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c | |
index 79cc42d88eec6..38ea249159f60 100644 | |
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c | |
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c | |
@@ -107,12 +107,16 @@ void mlx5e_rep_update_flows(struct mlx5e_priv *priv, | |
mlx5e_tc_encap_flows_del(priv, e, &flow_list); | |
if (neigh_connected && !(e->flags & MLX5_ENCAP_ENTRY_VALID)) { | |
+ struct net_device *route_dev; | |
+ | |
ether_addr_copy(e->h_dest, ha); | |
ether_addr_copy(eth->h_dest, ha); | |
/* Update the encap source mac, in case that we delete | |
* the flows when encap source mac changed. | |
*/ | |
- ether_addr_copy(eth->h_source, e->route_dev->dev_addr); | |
+ route_dev = __dev_get_by_index(dev_net(priv->netdev), e->route_dev_ifindex); | |
+ if (route_dev) | |
+ ether_addr_copy(eth->h_source, route_dev->dev_addr); | |
mlx5e_tc_encap_flows_add(priv, e, &flow_list); | |
} | |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c | |
index 7cce85faa16fa..90930e54b6f28 100644 | |
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c | |
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c | |
@@ -77,13 +77,13 @@ static int get_route_and_out_devs(struct mlx5e_priv *priv, | |
return 0; | |
} | |
-static int mlx5e_route_lookup_ipv4(struct mlx5e_priv *priv, | |
- struct net_device *mirred_dev, | |
- struct net_device **out_dev, | |
- struct net_device **route_dev, | |
- struct flowi4 *fl4, | |
- struct neighbour **out_n, | |
- u8 *out_ttl) | |
+static int mlx5e_route_lookup_ipv4_get(struct mlx5e_priv *priv, | |
+ struct net_device *mirred_dev, | |
+ struct net_device **out_dev, | |
+ struct net_device **route_dev, | |
+ struct flowi4 *fl4, | |
+ struct neighbour **out_n, | |
+ u8 *out_ttl) | |
{ | |
struct neighbour *n; | |
struct rtable *rt; | |
@@ -117,18 +117,28 @@ static int mlx5e_route_lookup_ipv4(struct mlx5e_priv *priv, | |
ip_rt_put(rt); | |
return ret; | |
} | |
+ dev_hold(*route_dev); | |
if (!(*out_ttl)) | |
*out_ttl = ip4_dst_hoplimit(&rt->dst); | |
n = dst_neigh_lookup(&rt->dst, &fl4->daddr); | |
ip_rt_put(rt); | |
- if (!n) | |
+ if (!n) { | |
+ dev_put(*route_dev); | |
return -ENOMEM; | |
+ } | |
*out_n = n; | |
return 0; | |
} | |
+static void mlx5e_route_lookup_ipv4_put(struct net_device *route_dev, | |
+ struct neighbour *n) | |
+{ | |
+ neigh_release(n); | |
+ dev_put(route_dev); | |
+} | |
+ | |
static const char *mlx5e_netdev_kind(struct net_device *dev) | |
{ | |
if (dev->rtnl_link_ops) | |
@@ -193,8 +203,8 @@ int mlx5e_tc_tun_create_header_ipv4(struct mlx5e_priv *priv, | |
fl4.saddr = tun_key->u.ipv4.src; | |
ttl = tun_key->ttl; | |
- err = mlx5e_route_lookup_ipv4(priv, mirred_dev, &out_dev, &route_dev, | |
- &fl4, &n, &ttl); | |
+ err = mlx5e_route_lookup_ipv4_get(priv, mirred_dev, &out_dev, &route_dev, | |
+ &fl4, &n, &ttl); | |
if (err) | |
return err; | |
@@ -223,7 +233,7 @@ int mlx5e_tc_tun_create_header_ipv4(struct mlx5e_priv *priv, | |
e->m_neigh.family = n->ops->family; | |
memcpy(&e->m_neigh.dst_ip, n->primary_key, n->tbl->key_len); | |
e->out_dev = out_dev; | |
- e->route_dev = route_dev; | |
+ e->route_dev_ifindex = route_dev->ifindex; | |
/* It's important to add the neigh to the hash table before checking | |
* the neigh validity state. So if we'll get a notification, in case the | |
@@ -278,7 +288,7 @@ int mlx5e_tc_tun_create_header_ipv4(struct mlx5e_priv *priv, | |
e->flags |= MLX5_ENCAP_ENTRY_VALID; | |
mlx5e_rep_queue_neigh_stats_work(netdev_priv(out_dev)); | |
- neigh_release(n); | |
+ mlx5e_route_lookup_ipv4_put(route_dev, n); | |
return err; | |
destroy_neigh_entry: | |
@@ -286,18 +296,18 @@ destroy_neigh_entry: | |
free_encap: | |
kfree(encap_header); | |
release_neigh: | |
- neigh_release(n); | |
+ mlx5e_route_lookup_ipv4_put(route_dev, n); | |
return err; | |
} | |
#if IS_ENABLED(CONFIG_INET) && IS_ENABLED(CONFIG_IPV6) | |
-static int mlx5e_route_lookup_ipv6(struct mlx5e_priv *priv, | |
- struct net_device *mirred_dev, | |
- struct net_device **out_dev, | |
- struct net_device **route_dev, | |
- struct flowi6 *fl6, | |
- struct neighbour **out_n, | |
- u8 *out_ttl) | |
+static int mlx5e_route_lookup_ipv6_get(struct mlx5e_priv *priv, | |
+ struct net_device *mirred_dev, | |
+ struct net_device **out_dev, | |
+ struct net_device **route_dev, | |
+ struct flowi6 *fl6, | |
+ struct neighbour **out_n, | |
+ u8 *out_ttl) | |
{ | |
struct dst_entry *dst; | |
struct neighbour *n; | |
@@ -318,15 +328,25 @@ static int mlx5e_route_lookup_ipv6(struct mlx5e_priv *priv, | |
return ret; | |
} | |
+ dev_hold(*route_dev); | |
n = dst_neigh_lookup(dst, &fl6->daddr); | |
dst_release(dst); | |
- if (!n) | |
+ if (!n) { | |
+ dev_put(*route_dev); | |
return -ENOMEM; | |
+ } | |
*out_n = n; | |
return 0; | |
} | |
+static void mlx5e_route_lookup_ipv6_put(struct net_device *route_dev, | |
+ struct neighbour *n) | |
+{ | |
+ neigh_release(n); | |
+ dev_put(route_dev); | |
+} | |
+ | |
int mlx5e_tc_tun_create_header_ipv6(struct mlx5e_priv *priv, | |
struct net_device *mirred_dev, | |
struct mlx5e_encap_entry *e) | |
@@ -348,8 +368,8 @@ int mlx5e_tc_tun_create_header_ipv6(struct mlx5e_priv *priv, | |
fl6.daddr = tun_key->u.ipv6.dst; | |
fl6.saddr = tun_key->u.ipv6.src; | |
- err = mlx5e_route_lookup_ipv6(priv, mirred_dev, &out_dev, &route_dev, | |
- &fl6, &n, &ttl); | |
+ err = mlx5e_route_lookup_ipv6_get(priv, mirred_dev, &out_dev, &route_dev, | |
+ &fl6, &n, &ttl); | |
if (err) | |
return err; | |
@@ -378,7 +398,7 @@ int mlx5e_tc_tun_create_header_ipv6(struct mlx5e_priv *priv, | |
e->m_neigh.family = n->ops->family; | |
memcpy(&e->m_neigh.dst_ip, n->primary_key, n->tbl->key_len); | |
e->out_dev = out_dev; | |
- e->route_dev = route_dev; | |
+ e->route_dev_ifindex = route_dev->ifindex; | |
/* It's importent to add the neigh to the hash table before checking | |
* the neigh validity state. So if we'll get a notification, in case the | |
@@ -433,7 +453,7 @@ int mlx5e_tc_tun_create_header_ipv6(struct mlx5e_priv *priv, | |
e->flags |= MLX5_ENCAP_ENTRY_VALID; | |
mlx5e_rep_queue_neigh_stats_work(netdev_priv(out_dev)); | |
- neigh_release(n); | |
+ mlx5e_route_lookup_ipv6_put(route_dev, n); | |
return err; | |
destroy_neigh_entry: | |
@@ -441,7 +461,7 @@ destroy_neigh_entry: | |
free_encap: | |
kfree(encap_header); | |
release_neigh: | |
- neigh_release(n); | |
+ mlx5e_route_lookup_ipv6_put(route_dev, n); | |
return err; | |
} | |
#endif | |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c | |
index 55e65a438de70..fcaeb30778bc7 100644 | |
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c | |
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c | |
@@ -122,9 +122,9 @@ void mlx5e_activate_xsk(struct mlx5e_channel *c) | |
set_bit(MLX5E_RQ_STATE_ENABLED, &c->xskrq.state); | |
/* TX queue is created active. */ | |
- spin_lock(&c->async_icosq_lock); | |
+ spin_lock_bh(&c->async_icosq_lock); | |
mlx5e_trigger_irq(&c->async_icosq); | |
- spin_unlock(&c->async_icosq_lock); | |
+ spin_unlock_bh(&c->async_icosq_lock); | |
} | |
void mlx5e_deactivate_xsk(struct mlx5e_channel *c) | |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.c | |
index 4d892f6cecb3e..4de70cee80c0a 100644 | |
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.c | |
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.c | |
@@ -36,9 +36,9 @@ int mlx5e_xsk_wakeup(struct net_device *dev, u32 qid, u32 flags) | |
if (test_and_set_bit(MLX5E_SQ_STATE_PENDING_XSK_TX, &c->async_icosq.state)) | |
return 0; | |
- spin_lock(&c->async_icosq_lock); | |
+ spin_lock_bh(&c->async_icosq_lock); | |
mlx5e_trigger_irq(&c->async_icosq); | |
- spin_unlock(&c->async_icosq_lock); | |
+ spin_unlock_bh(&c->async_icosq_lock); | |
} | |
return 0; | |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c | |
index 6bbfcf18107d2..979ff5658a3f7 100644 | |
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c | |
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c | |
@@ -188,7 +188,7 @@ static int post_rx_param_wqes(struct mlx5e_channel *c, | |
err = 0; | |
sq = &c->async_icosq; | |
- spin_lock(&c->async_icosq_lock); | |
+ spin_lock_bh(&c->async_icosq_lock); | |
cseg = post_static_params(sq, priv_rx); | |
if (IS_ERR(cseg)) | |
@@ -199,7 +199,7 @@ static int post_rx_param_wqes(struct mlx5e_channel *c, | |
mlx5e_notify_hw(&sq->wq, sq->pc, sq->uar_map, cseg); | |
unlock: | |
- spin_unlock(&c->async_icosq_lock); | |
+ spin_unlock_bh(&c->async_icosq_lock); | |
return err; | |
@@ -265,10 +265,10 @@ resync_post_get_progress_params(struct mlx5e_icosq *sq, | |
BUILD_BUG_ON(MLX5E_KTLS_GET_PROGRESS_WQEBBS != 1); | |
- spin_lock(&sq->channel->async_icosq_lock); | |
+ spin_lock_bh(&sq->channel->async_icosq_lock); | |
if (unlikely(!mlx5e_wqc_has_room_for(&sq->wq, sq->cc, sq->pc, 1))) { | |
- spin_unlock(&sq->channel->async_icosq_lock); | |
+ spin_unlock_bh(&sq->channel->async_icosq_lock); | |
err = -ENOSPC; | |
goto err_dma_unmap; | |
} | |
@@ -299,7 +299,7 @@ resync_post_get_progress_params(struct mlx5e_icosq *sq, | |
icosq_fill_wi(sq, pi, &wi); | |
sq->pc++; | |
mlx5e_notify_hw(&sq->wq, sq->pc, sq->uar_map, cseg); | |
- spin_unlock(&sq->channel->async_icosq_lock); | |
+ spin_unlock_bh(&sq->channel->async_icosq_lock); | |
return 0; | |
@@ -360,7 +360,7 @@ static int resync_handle_seq_match(struct mlx5e_ktls_offload_context_rx *priv_rx | |
err = 0; | |
sq = &c->async_icosq; | |
- spin_lock(&c->async_icosq_lock); | |
+ spin_lock_bh(&c->async_icosq_lock); | |
cseg = post_static_params(sq, priv_rx); | |
if (IS_ERR(cseg)) { | |
@@ -372,7 +372,7 @@ static int resync_handle_seq_match(struct mlx5e_ktls_offload_context_rx *priv_rx | |
mlx5e_notify_hw(&sq->wq, sq->pc, sq->uar_map, cseg); | |
priv_rx->stats->tls_resync_res_ok++; | |
unlock: | |
- spin_unlock(&c->async_icosq_lock); | |
+ spin_unlock_bh(&c->async_icosq_lock); | |
return err; | |
} | |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c | |
index 42ec28e298348..f399973a44eb0 100644 | |
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c | |
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c | |
@@ -5226,6 +5226,7 @@ static void mlx5e_nic_disable(struct mlx5e_priv *priv) | |
mlx5e_disable_async_events(priv); | |
mlx5_lag_remove(mdev); | |
+ mlx5_vxlan_reset_to_default(mdev->vxlan); | |
} | |
int mlx5e_update_nic_rx(struct mlx5e_priv *priv) | |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h | |
index 0d1562e20118c..963a6d98840ac 100644 | |
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h | |
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h | |
@@ -187,7 +187,7 @@ struct mlx5e_encap_entry { | |
unsigned char h_dest[ETH_ALEN]; /* destination eth addr */ | |
struct net_device *out_dev; | |
- struct net_device *route_dev; | |
+ int route_dev_ifindex; | |
struct mlx5e_tc_tunnel *tunnel; | |
int reformat_type; | |
u8 flags; | |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c | |
index 64c8ac5eabf6a..a0a4398408b85 100644 | |
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c | |
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c | |
@@ -1566,7 +1566,7 @@ int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget) | |
} while ((++work_done < budget) && (cqe = mlx5_cqwq_get_cqe(cqwq))); | |
out: | |
- if (rq->xdp_prog) | |
+ if (rcu_access_pointer(rq->xdp_prog)) | |
mlx5e_xdp_rx_poll_complete(rq); | |
mlx5_cqwq_update_db_record(cqwq); | |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c | |
index 1c93f92d9210a..44947b054dc4c 100644 | |
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c | |
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c | |
@@ -4430,6 +4430,7 @@ __mlx5e_add_fdb_flow(struct mlx5e_priv *priv, | |
return flow; | |
err_free: | |
+ dealloc_mod_hdr_actions(&parse_attr->mod_hdr_acts); | |
mlx5e_flow_put(priv, flow); | |
out: | |
return ERR_PTR(err); | |
@@ -4564,6 +4565,7 @@ mlx5e_add_nic_flow(struct mlx5e_priv *priv, | |
return 0; | |
err_free: | |
+ dealloc_mod_hdr_actions(&parse_attr->mod_hdr_acts); | |
mlx5e_flow_put(priv, flow); | |
kvfree(parse_attr); | |
out: | |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c | |
index 6e6a9a5639928..e8e6294c7ccae 100644 | |
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c | |
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c | |
@@ -1902,8 +1902,6 @@ int mlx5_devlink_port_function_hw_addr_get(struct devlink *devlink, | |
ether_addr_copy(hw_addr, vport->info.mac); | |
*hw_addr_len = ETH_ALEN; | |
err = 0; | |
- } else { | |
- NL_SET_ERR_MSG_MOD(extack, "Eswitch vport is disabled"); | |
} | |
mutex_unlock(&esw->state_lock); | |
return err; | |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c | |
index 75fa44eee434d..d4755d61dd740 100644 | |
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c | |
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c | |
@@ -1994,10 +1994,11 @@ void mlx5_del_flow_rules(struct mlx5_flow_handle *handle) | |
down_write_ref_node(&fte->node, false); | |
for (i = handle->num_rules - 1; i >= 0; i--) | |
tree_remove_node(&handle->rule[i]->node, true); | |
- if (fte->modify_mask && fte->dests_size) { | |
- modify_fte(fte); | |
+ if (fte->dests_size) { | |
+ if (fte->modify_mask) | |
+ modify_fte(fte); | |
up_write_ref_node(&fte->node, false); | |
- } else { | |
+ } else if (list_empty(&fte->node.children)) { | |
del_hw_fte(&fte->node); | |
/* Avoid double call to del_hw_fte */ | |
fte->node.del_hw_func = NULL; | |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/vxlan.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/vxlan.c | |
index 3315afe2f8dce..38084400ee8fa 100644 | |
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/vxlan.c | |
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/vxlan.c | |
@@ -167,6 +167,17 @@ struct mlx5_vxlan *mlx5_vxlan_create(struct mlx5_core_dev *mdev) | |
} | |
void mlx5_vxlan_destroy(struct mlx5_vxlan *vxlan) | |
+{ | |
+ if (!mlx5_vxlan_allowed(vxlan)) | |
+ return; | |
+ | |
+ mlx5_vxlan_del_port(vxlan, IANA_VXLAN_UDP_PORT); | |
+ WARN_ON(!hash_empty(vxlan->htable)); | |
+ | |
+ kfree(vxlan); | |
+} | |
+ | |
+void mlx5_vxlan_reset_to_default(struct mlx5_vxlan *vxlan) | |
{ | |
struct mlx5_vxlan_port *vxlanp; | |
struct hlist_node *tmp; | |
@@ -175,12 +186,12 @@ void mlx5_vxlan_destroy(struct mlx5_vxlan *vxlan) | |
if (!mlx5_vxlan_allowed(vxlan)) | |
return; | |
- /* Lockless since we are the only hash table consumers*/ | |
hash_for_each_safe(vxlan->htable, bkt, tmp, vxlanp, hlist) { | |
- hash_del(&vxlanp->hlist); | |
- mlx5_vxlan_core_del_port_cmd(vxlan->mdev, vxlanp->udp_port); | |
- kfree(vxlanp); | |
+ /* Don't delete default UDP port added by the HW. | |
+ * Remove only user configured ports | |
+ */ | |
+ if (vxlanp->udp_port == IANA_VXLAN_UDP_PORT) | |
+ continue; | |
+ mlx5_vxlan_del_port(vxlan, vxlanp->udp_port); | |
} | |
- | |
- kfree(vxlan); | |
} | |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/vxlan.h b/drivers/net/ethernet/mellanox/mlx5/core/lib/vxlan.h | |
index ec766529f49b6..34ef662da35ed 100644 | |
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/vxlan.h | |
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/vxlan.h | |
@@ -56,6 +56,7 @@ void mlx5_vxlan_destroy(struct mlx5_vxlan *vxlan); | |
int mlx5_vxlan_add_port(struct mlx5_vxlan *vxlan, u16 port); | |
int mlx5_vxlan_del_port(struct mlx5_vxlan *vxlan, u16 port); | |
bool mlx5_vxlan_lookup_port(struct mlx5_vxlan *vxlan, u16 port); | |
+void mlx5_vxlan_reset_to_default(struct mlx5_vxlan *vxlan); | |
#else | |
static inline struct mlx5_vxlan* | |
mlx5_vxlan_create(struct mlx5_core_dev *mdev) { return ERR_PTR(-EOPNOTSUPP); } | |
@@ -63,6 +64,7 @@ static inline void mlx5_vxlan_destroy(struct mlx5_vxlan *vxlan) { return; } | |
static inline int mlx5_vxlan_add_port(struct mlx5_vxlan *vxlan, u16 port) { return -EOPNOTSUPP; } | |
static inline int mlx5_vxlan_del_port(struct mlx5_vxlan *vxlan, u16 port) { return -EOPNOTSUPP; } | |
static inline bool mlx5_vxlan_lookup_port(struct mlx5_vxlan *vxlan, u16 port) { return false; } | |
+static inline void mlx5_vxlan_reset_to_default(struct mlx5_vxlan *vxlan) { return; } | |
#endif | |
#endif /* __MLX5_VXLAN_H__ */ | |
diff --git a/drivers/net/ethernet/microchip/lan743x_main.c b/drivers/net/ethernet/microchip/lan743x_main.c | |
index de93cc6ebc1ac..bc368136bccc6 100644 | |
--- a/drivers/net/ethernet/microchip/lan743x_main.c | |
+++ b/drivers/net/ethernet/microchip/lan743x_main.c | |
@@ -675,14 +675,12 @@ clean_up: | |
static int lan743x_dp_write(struct lan743x_adapter *adapter, | |
u32 select, u32 addr, u32 length, u32 *buf) | |
{ | |
- int ret = -EIO; | |
u32 dp_sel; | |
int i; | |
- mutex_lock(&adapter->dp_lock); | |
if (lan743x_csr_wait_for_bit(adapter, DP_SEL, DP_SEL_DPRDY_, | |
1, 40, 100, 100)) | |
- goto unlock; | |
+ return -EIO; | |
dp_sel = lan743x_csr_read(adapter, DP_SEL); | |
dp_sel &= ~DP_SEL_MASK_; | |
dp_sel |= select; | |
@@ -694,13 +692,10 @@ static int lan743x_dp_write(struct lan743x_adapter *adapter, | |
lan743x_csr_write(adapter, DP_CMD, DP_CMD_WRITE_); | |
if (lan743x_csr_wait_for_bit(adapter, DP_SEL, DP_SEL_DPRDY_, | |
1, 40, 100, 100)) | |
- goto unlock; | |
+ return -EIO; | |
} | |
- ret = 0; | |
-unlock: | |
- mutex_unlock(&adapter->dp_lock); | |
- return ret; | |
+ return 0; | |
} | |
static u32 lan743x_mac_mii_access(u16 id, u16 index, int read) | |
@@ -1020,16 +1015,16 @@ static void lan743x_phy_close(struct lan743x_adapter *adapter) | |
static int lan743x_phy_open(struct lan743x_adapter *adapter) | |
{ | |
struct lan743x_phy *phy = &adapter->phy; | |
+ struct phy_device *phydev = NULL; | |
struct device_node *phynode; | |
- struct phy_device *phydev; | |
struct net_device *netdev; | |
int ret = -EIO; | |
netdev = adapter->netdev; | |
phynode = of_node_get(adapter->pdev->dev.of_node); | |
- adapter->phy_mode = PHY_INTERFACE_MODE_GMII; | |
if (phynode) { | |
+ /* try devicetree phy, or fixed link */ | |
of_get_phy_mode(phynode, &adapter->phy_mode); | |
if (of_phy_is_fixed_link(phynode)) { | |
@@ -1045,13 +1040,15 @@ static int lan743x_phy_open(struct lan743x_adapter *adapter) | |
lan743x_phy_link_status_change, 0, | |
adapter->phy_mode); | |
of_node_put(phynode); | |
- if (!phydev) | |
- goto return_error; | |
- } else { | |
+ } | |
+ | |
+ if (!phydev) { | |
+ /* try internal phy */ | |
phydev = phy_find_first(adapter->mdiobus); | |
if (!phydev) | |
goto return_error; | |
+ adapter->phy_mode = PHY_INTERFACE_MODE_GMII; | |
ret = phy_connect_direct(netdev, phydev, | |
lan743x_phy_link_status_change, | |
adapter->phy_mode); | |
@@ -2735,7 +2732,6 @@ static int lan743x_hardware_init(struct lan743x_adapter *adapter, | |
adapter->intr.irq = adapter->pdev->irq; | |
lan743x_csr_write(adapter, INT_EN_CLR, 0xFFFFFFFF); | |
- mutex_init(&adapter->dp_lock); | |
ret = lan743x_gpio_init(adapter); | |
if (ret) | |
diff --git a/drivers/net/ethernet/microchip/lan743x_main.h b/drivers/net/ethernet/microchip/lan743x_main.h | |
index c61a404113179..a536f4a4994df 100644 | |
--- a/drivers/net/ethernet/microchip/lan743x_main.h | |
+++ b/drivers/net/ethernet/microchip/lan743x_main.h | |
@@ -712,9 +712,6 @@ struct lan743x_adapter { | |
struct lan743x_csr csr; | |
struct lan743x_intr intr; | |
- /* lock, used to prevent concurrent access to data port */ | |
- struct mutex dp_lock; | |
- | |
struct lan743x_gpio gpio; | |
struct lan743x_ptp ptp; | |
diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c | |
index c74d9c02a805f..515d9116dfadf 100644 | |
--- a/drivers/net/ethernet/realtek/r8169_main.c | |
+++ b/drivers/net/ethernet/realtek/r8169_main.c | |
@@ -4145,7 +4145,8 @@ static bool rtl8169_tso_csum_v2(struct rtl8169_private *tp, | |
opts[1] |= transport_offset << TCPHO_SHIFT; | |
} else { | |
if (unlikely(skb->len < ETH_ZLEN && rtl_test_hw_pad_bug(tp))) | |
- return !eth_skb_pad(skb); | |
+ /* eth_skb_pad would free the skb on error */ | |
+ return !__skb_put_padto(skb, ETH_ZLEN, false); | |
} | |
return true; | |
@@ -4324,18 +4325,9 @@ static netdev_features_t rtl8169_features_check(struct sk_buff *skb, | |
rtl_chip_supports_csum_v2(tp)) | |
features &= ~NETIF_F_ALL_TSO; | |
} else if (skb->ip_summed == CHECKSUM_PARTIAL) { | |
- if (skb->len < ETH_ZLEN) { | |
- switch (tp->mac_version) { | |
- case RTL_GIGA_MAC_VER_11: | |
- case RTL_GIGA_MAC_VER_12: | |
- case RTL_GIGA_MAC_VER_17: | |
- case RTL_GIGA_MAC_VER_34: | |
- features &= ~NETIF_F_CSUM_MASK; | |
- break; | |
- default: | |
- break; | |
- } | |
- } | |
+ /* work around hw bug on some chip versions */ | |
+ if (skb->len < ETH_ZLEN) | |
+ features &= ~NETIF_F_CSUM_MASK; | |
if (transport_offset > TCPHO_MAX && | |
rtl_chip_supports_csum_v2(tp)) | |
diff --git a/drivers/net/phy/realtek.c b/drivers/net/phy/realtek.c | |
index 0f09609718007..81a614f903c4a 100644 | |
--- a/drivers/net/phy/realtek.c | |
+++ b/drivers/net/phy/realtek.c | |
@@ -542,6 +542,8 @@ static struct phy_driver realtek_drvs[] = { | |
{ | |
PHY_ID_MATCH_EXACT(0x00008201), | |
.name = "RTL8201CP Ethernet", | |
+ .read_page = rtl821x_read_page, | |
+ .write_page = rtl821x_write_page, | |
}, { | |
PHY_ID_MATCH_EXACT(0x001cc816), | |
.name = "RTL8201F Fast Ethernet", | |
diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c | |
index 60c1aadece89a..f2793ffde1913 100644 | |
--- a/drivers/net/vrf.c | |
+++ b/drivers/net/vrf.c | |
@@ -608,8 +608,7 @@ static netdev_tx_t vrf_xmit(struct sk_buff *skb, struct net_device *dev) | |
return ret; | |
} | |
-static int vrf_finish_direct(struct net *net, struct sock *sk, | |
- struct sk_buff *skb) | |
+static void vrf_finish_direct(struct sk_buff *skb) | |
{ | |
struct net_device *vrf_dev = skb->dev; | |
@@ -628,7 +627,8 @@ static int vrf_finish_direct(struct net *net, struct sock *sk, | |
skb_pull(skb, ETH_HLEN); | |
} | |
- return 1; | |
+ /* reset skb device */ | |
+ nf_reset_ct(skb); | |
} | |
#if IS_ENABLED(CONFIG_IPV6) | |
@@ -707,15 +707,41 @@ static struct sk_buff *vrf_ip6_out_redirect(struct net_device *vrf_dev, | |
return skb; | |
} | |
+static int vrf_output6_direct_finish(struct net *net, struct sock *sk, | |
+ struct sk_buff *skb) | |
+{ | |
+ vrf_finish_direct(skb); | |
+ | |
+ return vrf_ip6_local_out(net, sk, skb); | |
+} | |
+ | |
static int vrf_output6_direct(struct net *net, struct sock *sk, | |
struct sk_buff *skb) | |
{ | |
+ int err = 1; | |
+ | |
skb->protocol = htons(ETH_P_IPV6); | |
- return NF_HOOK_COND(NFPROTO_IPV6, NF_INET_POST_ROUTING, | |
- net, sk, skb, NULL, skb->dev, | |
- vrf_finish_direct, | |
- !(IPCB(skb)->flags & IPSKB_REROUTED)); | |
+ if (!(IPCB(skb)->flags & IPSKB_REROUTED)) | |
+ err = nf_hook(NFPROTO_IPV6, NF_INET_POST_ROUTING, net, sk, skb, | |
+ NULL, skb->dev, vrf_output6_direct_finish); | |
+ | |
+ if (likely(err == 1)) | |
+ vrf_finish_direct(skb); | |
+ | |
+ return err; | |
+} | |
+ | |
+static int vrf_ip6_out_direct_finish(struct net *net, struct sock *sk, | |
+ struct sk_buff *skb) | |
+{ | |
+ int err; | |
+ | |
+ err = vrf_output6_direct(net, sk, skb); | |
+ if (likely(err == 1)) | |
+ err = vrf_ip6_local_out(net, sk, skb); | |
+ | |
+ return err; | |
} | |
static struct sk_buff *vrf_ip6_out_direct(struct net_device *vrf_dev, | |
@@ -728,18 +754,15 @@ static struct sk_buff *vrf_ip6_out_direct(struct net_device *vrf_dev, | |
skb->dev = vrf_dev; | |
err = nf_hook(NFPROTO_IPV6, NF_INET_LOCAL_OUT, net, sk, | |
- skb, NULL, vrf_dev, vrf_output6_direct); | |
+ skb, NULL, vrf_dev, vrf_ip6_out_direct_finish); | |
if (likely(err == 1)) | |
err = vrf_output6_direct(net, sk, skb); | |
- /* reset skb device */ | |
if (likely(err == 1)) | |
- nf_reset_ct(skb); | |
- else | |
- skb = NULL; | |
+ return skb; | |
- return skb; | |
+ return NULL; | |
} | |
static struct sk_buff *vrf_ip6_out(struct net_device *vrf_dev, | |
@@ -919,15 +942,41 @@ static struct sk_buff *vrf_ip_out_redirect(struct net_device *vrf_dev, | |
return skb; | |
} | |
+static int vrf_output_direct_finish(struct net *net, struct sock *sk, | |
+ struct sk_buff *skb) | |
+{ | |
+ vrf_finish_direct(skb); | |
+ | |
+ return vrf_ip_local_out(net, sk, skb); | |
+} | |
+ | |
static int vrf_output_direct(struct net *net, struct sock *sk, | |
struct sk_buff *skb) | |
{ | |
+ int err = 1; | |
+ | |
skb->protocol = htons(ETH_P_IP); | |
- return NF_HOOK_COND(NFPROTO_IPV4, NF_INET_POST_ROUTING, | |
- net, sk, skb, NULL, skb->dev, | |
- vrf_finish_direct, | |
- !(IPCB(skb)->flags & IPSKB_REROUTED)); | |
+ if (!(IPCB(skb)->flags & IPSKB_REROUTED)) | |
+ err = nf_hook(NFPROTO_IPV4, NF_INET_POST_ROUTING, net, sk, skb, | |
+ NULL, skb->dev, vrf_output_direct_finish); | |
+ | |
+ if (likely(err == 1)) | |
+ vrf_finish_direct(skb); | |
+ | |
+ return err; | |
+} | |
+ | |
+static int vrf_ip_out_direct_finish(struct net *net, struct sock *sk, | |
+ struct sk_buff *skb) | |
+{ | |
+ int err; | |
+ | |
+ err = vrf_output_direct(net, sk, skb); | |
+ if (likely(err == 1)) | |
+ err = vrf_ip_local_out(net, sk, skb); | |
+ | |
+ return err; | |
} | |
static struct sk_buff *vrf_ip_out_direct(struct net_device *vrf_dev, | |
@@ -940,18 +989,15 @@ static struct sk_buff *vrf_ip_out_direct(struct net_device *vrf_dev, | |
skb->dev = vrf_dev; | |
err = nf_hook(NFPROTO_IPV4, NF_INET_LOCAL_OUT, net, sk, | |
- skb, NULL, vrf_dev, vrf_output_direct); | |
+ skb, NULL, vrf_dev, vrf_ip_out_direct_finish); | |
if (likely(err == 1)) | |
err = vrf_output_direct(net, sk, skb); | |
- /* reset skb device */ | |
if (likely(err == 1)) | |
- nf_reset_ct(skb); | |
- else | |
- skb = NULL; | |
+ return skb; | |
- return skb; | |
+ return NULL; | |
} | |
static struct sk_buff *vrf_ip_out(struct net_device *vrf_dev, | |
diff --git a/drivers/net/wan/cosa.c b/drivers/net/wan/cosa.c | |
index f8aed0696d775..2369ca250cd65 100644 | |
--- a/drivers/net/wan/cosa.c | |
+++ b/drivers/net/wan/cosa.c | |
@@ -889,6 +889,7 @@ static ssize_t cosa_write(struct file *file, | |
chan->tx_status = 1; | |
spin_unlock_irqrestore(&cosa->lock, flags); | |
up(&chan->wsem); | |
+ kfree(kbuf); | |
return -ERESTARTSYS; | |
} | |
} | |
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c | |
index b353995bdd457..f4c2a8d83f50d 100644 | |
--- a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c | |
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c | |
@@ -974,7 +974,7 @@ static bool ath9k_rx_prepare(struct ath9k_htc_priv *priv, | |
struct ath_htc_rx_status *rxstatus; | |
struct ath_rx_status rx_stats; | |
bool decrypt_error = false; | |
- __be16 rs_datalen; | |
+ u16 rs_datalen; | |
bool is_phyerr; | |
if (skb->len < HTC_RX_FRAME_HEADER_SIZE) { | |
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c | |
index 893e29624c16b..349fba056cb65 100644 | |
--- a/drivers/nvme/host/core.c | |
+++ b/drivers/nvme/host/core.c | |
@@ -1946,6 +1946,50 @@ static int nvme_setup_streams_ns(struct nvme_ctrl *ctrl, struct nvme_ns *ns, | |
return 0; | |
} | |
+static int nvme_configure_metadata(struct nvme_ns *ns, struct nvme_id_ns *id) | |
+{ | |
+ struct nvme_ctrl *ctrl = ns->ctrl; | |
+ | |
+ /* | |
+ * The PI implementation requires the metadata size to be equal to the | |
+ * t10 pi tuple size. | |
+ */ | |
+ ns->ms = le16_to_cpu(id->lbaf[id->flbas & NVME_NS_FLBAS_LBA_MASK].ms); | |
+ if (ns->ms == sizeof(struct t10_pi_tuple)) | |
+ ns->pi_type = id->dps & NVME_NS_DPS_PI_MASK; | |
+ else | |
+ ns->pi_type = 0; | |
+ | |
+ ns->features &= ~(NVME_NS_METADATA_SUPPORTED | NVME_NS_EXT_LBAS); | |
+ if (!ns->ms || !(ctrl->ops->flags & NVME_F_METADATA_SUPPORTED)) | |
+ return 0; | |
+ if (ctrl->ops->flags & NVME_F_FABRICS) { | |
+ /* | |
+ * The NVMe over Fabrics specification only supports metadata as | |
+ * part of the extended data LBA. We rely on HCA/HBA support to | |
+ * remap the separate metadata buffer from the block layer. | |
+ */ | |
+ if (WARN_ON_ONCE(!(id->flbas & NVME_NS_FLBAS_META_EXT))) | |
+ return -EINVAL; | |
+ if (ctrl->max_integrity_segments) | |
+ ns->features |= | |
+ (NVME_NS_METADATA_SUPPORTED | NVME_NS_EXT_LBAS); | |
+ } else { | |
+ /* | |
+ * For PCIe controllers, we can't easily remap the separate | |
+ * metadata buffer from the block layer and thus require a | |
+ * separate metadata buffer for block layer metadata/PI support. | |
+ * We allow extended LBAs for the passthrough interface, though. | |
+ */ | |
+ if (id->flbas & NVME_NS_FLBAS_META_EXT) | |
+ ns->features |= NVME_NS_EXT_LBAS; | |
+ else | |
+ ns->features |= NVME_NS_METADATA_SUPPORTED; | |
+ } | |
+ | |
+ return 0; | |
+} | |
+ | |
static void nvme_update_disk_info(struct gendisk *disk, | |
struct nvme_ns *ns, struct nvme_id_ns *id) | |
{ | |
@@ -1957,7 +2001,7 @@ static void nvme_update_disk_info(struct gendisk *disk, | |
/* unsupported block size, set capacity to 0 later */ | |
bs = (1 << 9); | |
} | |
- blk_mq_freeze_queue(disk->queue); | |
+ | |
blk_integrity_unregister(disk); | |
atomic_bs = phys_bs = bs; | |
@@ -2020,10 +2064,6 @@ static void nvme_update_disk_info(struct gendisk *disk, | |
if (id->nsattr & NVME_NS_ATTR_RO) | |
set_disk_ro(disk, true); | |
- else | |
- set_disk_ro(disk, false); | |
- | |
- blk_mq_unfreeze_queue(disk->queue); | |
} | |
static inline bool nvme_first_scan(struct gendisk *disk) | |
@@ -2070,6 +2110,7 @@ static int __nvme_revalidate_disk(struct gendisk *disk, struct nvme_id_ns *id) | |
struct nvme_ctrl *ctrl = ns->ctrl; | |
int ret; | |
+ blk_mq_freeze_queue(ns->disk->queue); | |
/* | |
* If identify namespace failed, use default 512 byte block size so | |
* block layer can use before failing read/write for 0 capacity. | |
@@ -2087,57 +2128,38 @@ static int __nvme_revalidate_disk(struct gendisk *disk, struct nvme_id_ns *id) | |
dev_warn(ctrl->device, | |
"failed to add zoned namespace:%u ret:%d\n", | |
ns->head->ns_id, ret); | |
- return ret; | |
+ goto out_unfreeze; | |
} | |
break; | |
default: | |
dev_warn(ctrl->device, "unknown csi:%u ns:%u\n", | |
ns->head->ids.csi, ns->head->ns_id); | |
- return -ENODEV; | |
- } | |
- | |
- ns->features = 0; | |
- ns->ms = le16_to_cpu(id->lbaf[lbaf].ms); | |
- /* the PI implementation requires metadata equal t10 pi tuple size */ | |
- if (ns->ms == sizeof(struct t10_pi_tuple)) | |
- ns->pi_type = id->dps & NVME_NS_DPS_PI_MASK; | |
- else | |
- ns->pi_type = 0; | |
- | |
- if (ns->ms) { | |
- /* | |
- * For PCIe only the separate metadata pointer is supported, | |
- * as the block layer supplies metadata in a separate bio_vec | |
- * chain. For Fabrics, only metadata as part of extended data | |
- * LBA is supported on the wire per the Fabrics specification, | |
- * but the HBA/HCA will do the remapping from the separate | |
- * metadata buffers for us. | |
- */ | |
- if (id->flbas & NVME_NS_FLBAS_META_EXT) { | |
- ns->features |= NVME_NS_EXT_LBAS; | |
- if ((ctrl->ops->flags & NVME_F_FABRICS) && | |
- (ctrl->ops->flags & NVME_F_METADATA_SUPPORTED) && | |
- ctrl->max_integrity_segments) | |
- ns->features |= NVME_NS_METADATA_SUPPORTED; | |
- } else { | |
- if (WARN_ON_ONCE(ctrl->ops->flags & NVME_F_FABRICS)) | |
- return -EINVAL; | |
- if (ctrl->ops->flags & NVME_F_METADATA_SUPPORTED) | |
- ns->features |= NVME_NS_METADATA_SUPPORTED; | |
- } | |
+ ret = -ENODEV; | |
+ goto out_unfreeze; | |
} | |
+ ret = nvme_configure_metadata(ns, id); | |
+ if (ret) | |
+ goto out_unfreeze; | |
nvme_set_chunk_sectors(ns, id); | |
nvme_update_disk_info(disk, ns, id); | |
+ blk_mq_unfreeze_queue(ns->disk->queue); | |
+ | |
#ifdef CONFIG_NVME_MULTIPATH | |
if (ns->head->disk) { | |
+ blk_mq_freeze_queue(ns->head->disk->queue); | |
nvme_update_disk_info(ns->head->disk, ns, id); | |
blk_stack_limits(&ns->head->disk->queue->limits, | |
&ns->queue->limits, 0); | |
nvme_mpath_update_disk_size(ns->head->disk); | |
+ blk_mq_unfreeze_queue(ns->head->disk->queue); | |
} | |
#endif | |
return 0; | |
+ | |
+out_unfreeze: | |
+ blk_mq_unfreeze_queue(ns->disk->queue); | |
+ return ret; | |
} | |
static int _nvme_revalidate_disk(struct gendisk *disk) | |
@@ -4641,8 +4663,7 @@ void nvme_start_queues(struct nvme_ctrl *ctrl) | |
} | |
EXPORT_SYMBOL_GPL(nvme_start_queues); | |
- | |
-void nvme_sync_queues(struct nvme_ctrl *ctrl) | |
+void nvme_sync_io_queues(struct nvme_ctrl *ctrl) | |
{ | |
struct nvme_ns *ns; | |
@@ -4650,7 +4671,12 @@ void nvme_sync_queues(struct nvme_ctrl *ctrl) | |
list_for_each_entry(ns, &ctrl->namespaces, list) | |
blk_sync_queue(ns->queue); | |
up_read(&ctrl->namespaces_rwsem); | |
+} | |
+EXPORT_SYMBOL_GPL(nvme_sync_io_queues); | |
+void nvme_sync_queues(struct nvme_ctrl *ctrl) | |
+{ | |
+ nvme_sync_io_queues(ctrl); | |
if (ctrl->admin_q) | |
blk_sync_queue(ctrl->admin_q); | |
} | |
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h | |
index 2aaedfa43ed86..97fbd61191b33 100644 | |
--- a/drivers/nvme/host/nvme.h | |
+++ b/drivers/nvme/host/nvme.h | |
@@ -602,6 +602,7 @@ void nvme_stop_queues(struct nvme_ctrl *ctrl); | |
void nvme_start_queues(struct nvme_ctrl *ctrl); | |
void nvme_kill_queues(struct nvme_ctrl *ctrl); | |
void nvme_sync_queues(struct nvme_ctrl *ctrl); | |
+void nvme_sync_io_queues(struct nvme_ctrl *ctrl); | |
void nvme_unfreeze(struct nvme_ctrl *ctrl); | |
void nvme_wait_freeze(struct nvme_ctrl *ctrl); | |
int nvme_wait_freeze_timeout(struct nvme_ctrl *ctrl, long timeout); | |
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c | |
index 8984796db0c80..a6af96aaa0eb7 100644 | |
--- a/drivers/nvme/host/pci.c | |
+++ b/drivers/nvme/host/pci.c | |
@@ -198,6 +198,7 @@ struct nvme_queue { | |
u32 q_depth; | |
u16 cq_vector; | |
u16 sq_tail; | |
+ u16 last_sq_tail; | |
u16 cq_head; | |
u16 qid; | |
u8 cq_phase; | |
@@ -455,11 +456,24 @@ static int nvme_pci_map_queues(struct blk_mq_tag_set *set) | |
return 0; | |
} | |
-static inline void nvme_write_sq_db(struct nvme_queue *nvmeq) | |
+/* | |
+ * Write sq tail if we are asked to, or if the next command would wrap. | |
+ */ | |
+static inline void nvme_write_sq_db(struct nvme_queue *nvmeq, bool write_sq) | |
{ | |
+ if (!write_sq) { | |
+ u16 next_tail = nvmeq->sq_tail + 1; | |
+ | |
+ if (next_tail == nvmeq->q_depth) | |
+ next_tail = 0; | |
+ if (next_tail != nvmeq->last_sq_tail) | |
+ return; | |
+ } | |
+ | |
if (nvme_dbbuf_update_and_check_event(nvmeq->sq_tail, | |
nvmeq->dbbuf_sq_db, nvmeq->dbbuf_sq_ei)) | |
writel(nvmeq->sq_tail, nvmeq->q_db); | |
+ nvmeq->last_sq_tail = nvmeq->sq_tail; | |
} | |
/** | |
@@ -476,8 +490,7 @@ static void nvme_submit_cmd(struct nvme_queue *nvmeq, struct nvme_command *cmd, | |
cmd, sizeof(*cmd)); | |
if (++nvmeq->sq_tail == nvmeq->q_depth) | |
nvmeq->sq_tail = 0; | |
- if (write_sq) | |
- nvme_write_sq_db(nvmeq); | |
+ nvme_write_sq_db(nvmeq, write_sq); | |
spin_unlock(&nvmeq->sq_lock); | |
} | |
@@ -486,7 +499,8 @@ static void nvme_commit_rqs(struct blk_mq_hw_ctx *hctx) | |
struct nvme_queue *nvmeq = hctx->driver_data; | |
spin_lock(&nvmeq->sq_lock); | |
- nvme_write_sq_db(nvmeq); | |
+ if (nvmeq->sq_tail != nvmeq->last_sq_tail) | |
+ nvme_write_sq_db(nvmeq, true); | |
spin_unlock(&nvmeq->sq_lock); | |
} | |
@@ -1496,6 +1510,7 @@ static void nvme_init_queue(struct nvme_queue *nvmeq, u16 qid) | |
struct nvme_dev *dev = nvmeq->dev; | |
nvmeq->sq_tail = 0; | |
+ nvmeq->last_sq_tail = 0; | |
nvmeq->cq_head = 0; | |
nvmeq->cq_phase = 1; | |
nvmeq->q_db = &dev->dbs[qid * 2 * dev->db_stride]; | |
diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c | |
index 3a598e91e816d..f91c20e3daf7b 100644 | |
--- a/drivers/nvme/host/rdma.c | |
+++ b/drivers/nvme/host/rdma.c | |
@@ -122,7 +122,6 @@ struct nvme_rdma_ctrl { | |
struct sockaddr_storage src_addr; | |
struct nvme_ctrl ctrl; | |
- struct mutex teardown_lock; | |
bool use_inline_data; | |
u32 io_queues[HCTX_MAX_TYPES]; | |
}; | |
@@ -1010,8 +1009,8 @@ out_free_io_queues: | |
static void nvme_rdma_teardown_admin_queue(struct nvme_rdma_ctrl *ctrl, | |
bool remove) | |
{ | |
- mutex_lock(&ctrl->teardown_lock); | |
blk_mq_quiesce_queue(ctrl->ctrl.admin_q); | |
+ blk_sync_queue(ctrl->ctrl.admin_q); | |
nvme_rdma_stop_queue(&ctrl->queues[0]); | |
if (ctrl->ctrl.admin_tagset) { | |
blk_mq_tagset_busy_iter(ctrl->ctrl.admin_tagset, | |
@@ -1021,16 +1020,15 @@ static void nvme_rdma_teardown_admin_queue(struct nvme_rdma_ctrl *ctrl, | |
if (remove) | |
blk_mq_unquiesce_queue(ctrl->ctrl.admin_q); | |
nvme_rdma_destroy_admin_queue(ctrl, remove); | |
- mutex_unlock(&ctrl->teardown_lock); | |
} | |
static void nvme_rdma_teardown_io_queues(struct nvme_rdma_ctrl *ctrl, | |
bool remove) | |
{ | |
- mutex_lock(&ctrl->teardown_lock); | |
if (ctrl->ctrl.queue_count > 1) { | |
nvme_start_freeze(&ctrl->ctrl); | |
nvme_stop_queues(&ctrl->ctrl); | |
+ nvme_sync_io_queues(&ctrl->ctrl); | |
nvme_rdma_stop_io_queues(ctrl); | |
if (ctrl->ctrl.tagset) { | |
blk_mq_tagset_busy_iter(ctrl->ctrl.tagset, | |
@@ -1041,7 +1039,6 @@ static void nvme_rdma_teardown_io_queues(struct nvme_rdma_ctrl *ctrl, | |
nvme_start_queues(&ctrl->ctrl); | |
nvme_rdma_destroy_io_queues(ctrl, remove); | |
} | |
- mutex_unlock(&ctrl->teardown_lock); | |
} | |
static void nvme_rdma_free_ctrl(struct nvme_ctrl *nctrl) | |
@@ -1975,16 +1972,12 @@ static void nvme_rdma_complete_timed_out(struct request *rq) | |
{ | |
struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq); | |
struct nvme_rdma_queue *queue = req->queue; | |
- struct nvme_rdma_ctrl *ctrl = queue->ctrl; | |
- /* fence other contexts that may complete the command */ | |
- mutex_lock(&ctrl->teardown_lock); | |
nvme_rdma_stop_queue(queue); | |
- if (!blk_mq_request_completed(rq)) { | |
+ if (blk_mq_request_started(rq) && !blk_mq_request_completed(rq)) { | |
nvme_req(rq)->status = NVME_SC_HOST_ABORTED_CMD; | |
blk_mq_complete_request(rq); | |
} | |
- mutex_unlock(&ctrl->teardown_lock); | |
} | |
static enum blk_eh_timer_return | |
@@ -2319,7 +2312,6 @@ static struct nvme_ctrl *nvme_rdma_create_ctrl(struct device *dev, | |
return ERR_PTR(-ENOMEM); | |
ctrl->ctrl.opts = opts; | |
INIT_LIST_HEAD(&ctrl->list); | |
- mutex_init(&ctrl->teardown_lock); | |
if (!(opts->mask & NVMF_OPT_TRSVCID)) { | |
opts->trsvcid = | |
diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c | |
index d6a3e14873542..c0c33320fe659 100644 | |
--- a/drivers/nvme/host/tcp.c | |
+++ b/drivers/nvme/host/tcp.c | |
@@ -124,7 +124,6 @@ struct nvme_tcp_ctrl { | |
struct sockaddr_storage src_addr; | |
struct nvme_ctrl ctrl; | |
- struct mutex teardown_lock; | |
struct work_struct err_work; | |
struct delayed_work connect_work; | |
struct nvme_tcp_request async_req; | |
@@ -1886,8 +1885,8 @@ out_free_queue: | |
static void nvme_tcp_teardown_admin_queue(struct nvme_ctrl *ctrl, | |
bool remove) | |
{ | |
- mutex_lock(&to_tcp_ctrl(ctrl)->teardown_lock); | |
blk_mq_quiesce_queue(ctrl->admin_q); | |
+ blk_sync_queue(ctrl->admin_q); | |
nvme_tcp_stop_queue(ctrl, 0); | |
if (ctrl->admin_tagset) { | |
blk_mq_tagset_busy_iter(ctrl->admin_tagset, | |
@@ -1897,18 +1896,17 @@ static void nvme_tcp_teardown_admin_queue(struct nvme_ctrl *ctrl, | |
if (remove) | |
blk_mq_unquiesce_queue(ctrl->admin_q); | |
nvme_tcp_destroy_admin_queue(ctrl, remove); | |
- mutex_unlock(&to_tcp_ctrl(ctrl)->teardown_lock); | |
} | |
static void nvme_tcp_teardown_io_queues(struct nvme_ctrl *ctrl, | |
bool remove) | |
{ | |
- mutex_lock(&to_tcp_ctrl(ctrl)->teardown_lock); | |
if (ctrl->queue_count <= 1) | |
- goto out; | |
+ return; | |
blk_mq_quiesce_queue(ctrl->admin_q); | |
nvme_start_freeze(ctrl); | |
nvme_stop_queues(ctrl); | |
+ nvme_sync_io_queues(ctrl); | |
nvme_tcp_stop_io_queues(ctrl); | |
if (ctrl->tagset) { | |
blk_mq_tagset_busy_iter(ctrl->tagset, | |
@@ -1918,8 +1916,6 @@ static void nvme_tcp_teardown_io_queues(struct nvme_ctrl *ctrl, | |
if (remove) | |
nvme_start_queues(ctrl); | |
nvme_tcp_destroy_io_queues(ctrl, remove); | |
-out: | |
- mutex_unlock(&to_tcp_ctrl(ctrl)->teardown_lock); | |
} | |
static void nvme_tcp_reconnect_or_remove(struct nvme_ctrl *ctrl) | |
@@ -2171,14 +2167,11 @@ static void nvme_tcp_complete_timed_out(struct request *rq) | |
struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq); | |
struct nvme_ctrl *ctrl = &req->queue->ctrl->ctrl; | |
- /* fence other contexts that may complete the command */ | |
- mutex_lock(&to_tcp_ctrl(ctrl)->teardown_lock); | |
nvme_tcp_stop_queue(ctrl, nvme_tcp_queue_id(req->queue)); | |
- if (!blk_mq_request_completed(rq)) { | |
+ if (blk_mq_request_started(rq) && !blk_mq_request_completed(rq)) { | |
nvme_req(rq)->status = NVME_SC_HOST_ABORTED_CMD; | |
blk_mq_complete_request(rq); | |
} | |
- mutex_unlock(&to_tcp_ctrl(ctrl)->teardown_lock); | |
} | |
static enum blk_eh_timer_return | |
@@ -2455,7 +2448,6 @@ static struct nvme_ctrl *nvme_tcp_create_ctrl(struct device *dev, | |
nvme_tcp_reconnect_ctrl_work); | |
INIT_WORK(&ctrl->err_work, nvme_tcp_error_recovery_work); | |
INIT_WORK(&ctrl->ctrl.reset_work, nvme_reset_ctrl_work); | |
- mutex_init(&ctrl->teardown_lock); | |
if (!(opts->mask & NVMF_OPT_TRSVCID)) { | |
opts->trsvcid = | |
diff --git a/drivers/of/address.c b/drivers/of/address.c | |
index da4f7341323f2..37ac311843090 100644 | |
--- a/drivers/of/address.c | |
+++ b/drivers/of/address.c | |
@@ -1043,11 +1043,13 @@ out: | |
*/ | |
bool of_dma_is_coherent(struct device_node *np) | |
{ | |
- struct device_node *node = of_node_get(np); | |
+ struct device_node *node; | |
if (IS_ENABLED(CONFIG_OF_DMA_DEFAULT_COHERENT)) | |
return true; | |
+ node = of_node_get(np); | |
+ | |
while (node) { | |
if (of_property_read_bool(node, "dma-coherent")) { | |
of_node_put(node); | |
diff --git a/drivers/opp/core.c b/drivers/opp/core.c | |
index 1a95ad40795be..a963df7bd2749 100644 | |
--- a/drivers/opp/core.c | |
+++ b/drivers/opp/core.c | |
@@ -1160,6 +1160,10 @@ static void _opp_table_kref_release(struct kref *kref) | |
struct opp_device *opp_dev, *temp; | |
int i; | |
+ /* Drop the lock as soon as we can */ | |
+ list_del(&opp_table->node); | |
+ mutex_unlock(&opp_table_lock); | |
+ | |
_of_clear_opp_table(opp_table); | |
/* Release clk */ | |
@@ -1187,10 +1191,7 @@ static void _opp_table_kref_release(struct kref *kref) | |
mutex_destroy(&opp_table->genpd_virt_dev_lock); | |
mutex_destroy(&opp_table->lock); | |
- list_del(&opp_table->node); | |
kfree(opp_table); | |
- | |
- mutex_unlock(&opp_table_lock); | |
} | |
void dev_pm_opp_put_opp_table(struct opp_table *opp_table) | |
diff --git a/drivers/pci/controller/pci-mvebu.c b/drivers/pci/controller/pci-mvebu.c | |
index c39978b750ec6..653c0b3d29125 100644 | |
--- a/drivers/pci/controller/pci-mvebu.c | |
+++ b/drivers/pci/controller/pci-mvebu.c | |
@@ -960,25 +960,16 @@ static void mvebu_pcie_powerdown(struct mvebu_pcie_port *port) | |
} | |
/* | |
- * We can't use devm_of_pci_get_host_bridge_resources() because we | |
- * need to parse our special DT properties encoding the MEM and IO | |
- * apertures. | |
+ * devm_of_pci_get_host_bridge_resources() only sets up translateable resources, | |
+ * so we need extra resource setup parsing our special DT properties encoding | |
+ * the MEM and IO apertures. | |
*/ | |
static int mvebu_pcie_parse_request_resources(struct mvebu_pcie *pcie) | |
{ | |
struct device *dev = &pcie->pdev->dev; | |
- struct device_node *np = dev->of_node; | |
struct pci_host_bridge *bridge = pci_host_bridge_from_priv(pcie); | |
int ret; | |
- /* Get the bus range */ | |
- ret = of_pci_parse_bus_range(np, &pcie->busn); | |
- if (ret) { | |
- dev_err(dev, "failed to parse bus-range property: %d\n", ret); | |
- return ret; | |
- } | |
- pci_add_resource(&bridge->windows, &pcie->busn); | |
- | |
/* Get the PCIe memory aperture */ | |
mvebu_mbus_get_pcie_mem_aperture(&pcie->mem); | |
if (resource_size(&pcie->mem) == 0) { | |
@@ -988,6 +979,9 @@ static int mvebu_pcie_parse_request_resources(struct mvebu_pcie *pcie) | |
pcie->mem.name = "PCI MEM"; | |
pci_add_resource(&bridge->windows, &pcie->mem); | |
+ ret = devm_request_resource(dev, &iomem_resource, &pcie->mem); | |
+ if (ret) | |
+ return ret; | |
/* Get the PCIe IO aperture */ | |
mvebu_mbus_get_pcie_io_aperture(&pcie->io); | |
@@ -1001,9 +995,12 @@ static int mvebu_pcie_parse_request_resources(struct mvebu_pcie *pcie) | |
pcie->realio.name = "PCI I/O"; | |
pci_add_resource(&bridge->windows, &pcie->realio); | |
+ ret = devm_request_resource(dev, &ioport_resource, &pcie->realio); | |
+ if (ret) | |
+ return ret; | |
} | |
- return devm_request_pci_bus_resources(dev, &bridge->windows); | |
+ return 0; | |
} | |
/* | |
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c | |
index e39c5499770ff..b2fed944903e2 100644 | |
--- a/drivers/pci/pci.c | |
+++ b/drivers/pci/pci.c | |
@@ -3503,8 +3503,13 @@ void pci_acs_init(struct pci_dev *dev) | |
{ | |
dev->acs_cap = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ACS); | |
- if (dev->acs_cap) | |
- pci_enable_acs(dev); | |
+ /* | |
+ * Attempt to enable ACS regardless of capability because some Root | |
+ * Ports (e.g. those quirked with *_intel_pch_acs_*) do not have | |
+ * the standard ACS capability but still support ACS via those | |
+ * quirks. | |
+ */ | |
+ pci_enable_acs(dev); | |
} | |
/** | |
diff --git a/drivers/pinctrl/aspeed/pinctrl-aspeed.c b/drivers/pinctrl/aspeed/pinctrl-aspeed.c | |
index 3e6567355d97d..1d603732903fe 100644 | |
--- a/drivers/pinctrl/aspeed/pinctrl-aspeed.c | |
+++ b/drivers/pinctrl/aspeed/pinctrl-aspeed.c | |
@@ -286,13 +286,14 @@ int aspeed_pinmux_set_mux(struct pinctrl_dev *pctldev, unsigned int function, | |
static bool aspeed_expr_is_gpio(const struct aspeed_sig_expr *expr) | |
{ | |
/* | |
- * The signal type is GPIO if the signal name has "GPIO" as a prefix. | |
+ * The signal type is GPIO if the signal name has "GPI" as a prefix. | |
* strncmp (rather than strcmp) is used to implement the prefix | |
* requirement. | |
* | |
- * expr->signal might look like "GPIOT3" in the GPIO case. | |
+ * expr->signal might look like "GPIOB1" in the GPIO case. | |
+ * expr->signal might look like "GPIT0" in the GPI case. | |
*/ | |
- return strncmp(expr->signal, "GPIO", 4) == 0; | |
+ return strncmp(expr->signal, "GPI", 3) == 0; | |
} | |
static bool aspeed_gpio_in_exprs(const struct aspeed_sig_expr **exprs) | |
diff --git a/drivers/pinctrl/intel/pinctrl-intel.c b/drivers/pinctrl/intel/pinctrl-intel.c | |
index b64997b303e0c..31e7840bc5e25 100644 | |
--- a/drivers/pinctrl/intel/pinctrl-intel.c | |
+++ b/drivers/pinctrl/intel/pinctrl-intel.c | |
@@ -62,10 +62,10 @@ | |
#define PADCFG1_TERM_UP BIT(13) | |
#define PADCFG1_TERM_SHIFT 10 | |
#define PADCFG1_TERM_MASK GENMASK(12, 10) | |
-#define PADCFG1_TERM_20K 4 | |
-#define PADCFG1_TERM_2K 3 | |
-#define PADCFG1_TERM_5K 2 | |
-#define PADCFG1_TERM_1K 1 | |
+#define PADCFG1_TERM_20K BIT(2) | |
+#define PADCFG1_TERM_5K BIT(1) | |
+#define PADCFG1_TERM_1K BIT(0) | |
+#define PADCFG1_TERM_833 (BIT(1) | BIT(0)) | |
#define PADCFG2 0x008 | |
#define PADCFG2_DEBEN BIT(0) | |
@@ -549,12 +549,12 @@ static int intel_config_get_pull(struct intel_pinctrl *pctrl, unsigned int pin, | |
return -EINVAL; | |
switch (term) { | |
+ case PADCFG1_TERM_833: | |
+ *arg = 833; | |
+ break; | |
case PADCFG1_TERM_1K: | |
*arg = 1000; | |
break; | |
- case PADCFG1_TERM_2K: | |
- *arg = 2000; | |
- break; | |
case PADCFG1_TERM_5K: | |
*arg = 5000; | |
break; | |
@@ -570,6 +570,11 @@ static int intel_config_get_pull(struct intel_pinctrl *pctrl, unsigned int pin, | |
return -EINVAL; | |
switch (term) { | |
+ case PADCFG1_TERM_833: | |
+ if (!(community->features & PINCTRL_FEATURE_1K_PD)) | |
+ return -EINVAL; | |
+ *arg = 833; | |
+ break; | |
case PADCFG1_TERM_1K: | |
if (!(community->features & PINCTRL_FEATURE_1K_PD)) | |
return -EINVAL; | |
@@ -678,6 +683,10 @@ static int intel_config_set_pull(struct intel_pinctrl *pctrl, unsigned int pin, | |
value |= PADCFG1_TERM_UP; | |
+ /* Set default strength value in case none is given */ | |
+ if (arg == 1) | |
+ arg = 5000; | |
+ | |
switch (arg) { | |
case 20000: | |
value |= PADCFG1_TERM_20K << PADCFG1_TERM_SHIFT; | |
@@ -685,12 +694,12 @@ static int intel_config_set_pull(struct intel_pinctrl *pctrl, unsigned int pin, | |
case 5000: | |
value |= PADCFG1_TERM_5K << PADCFG1_TERM_SHIFT; | |
break; | |
- case 2000: | |
- value |= PADCFG1_TERM_2K << PADCFG1_TERM_SHIFT; | |
- break; | |
case 1000: | |
value |= PADCFG1_TERM_1K << PADCFG1_TERM_SHIFT; | |
break; | |
+ case 833: | |
+ value |= PADCFG1_TERM_833 << PADCFG1_TERM_SHIFT; | |
+ break; | |
default: | |
ret = -EINVAL; | |
} | |
@@ -700,6 +709,10 @@ static int intel_config_set_pull(struct intel_pinctrl *pctrl, unsigned int pin, | |
case PIN_CONFIG_BIAS_PULL_DOWN: | |
value &= ~(PADCFG1_TERM_UP | PADCFG1_TERM_MASK); | |
+ /* Set default strength value in case none is given */ | |
+ if (arg == 1) | |
+ arg = 5000; | |
+ | |
switch (arg) { | |
case 20000: | |
value |= PADCFG1_TERM_20K << PADCFG1_TERM_SHIFT; | |
@@ -714,6 +727,13 @@ static int intel_config_set_pull(struct intel_pinctrl *pctrl, unsigned int pin, | |
} | |
value |= PADCFG1_TERM_1K << PADCFG1_TERM_SHIFT; | |
break; | |
+ case 833: | |
+ if (!(community->features & PINCTRL_FEATURE_1K_PD)) { | |
+ ret = -EINVAL; | |
+ break; | |
+ } | |
+ value |= PADCFG1_TERM_833 << PADCFG1_TERM_SHIFT; | |
+ break; | |
default: | |
ret = -EINVAL; | |
} | |
diff --git a/drivers/pinctrl/pinctrl-amd.c b/drivers/pinctrl/pinctrl-amd.c | |
index 9a760f5cd7ed5..4aea3e05e8c65 100644 | |
--- a/drivers/pinctrl/pinctrl-amd.c | |
+++ b/drivers/pinctrl/pinctrl-amd.c | |
@@ -156,7 +156,7 @@ static int amd_gpio_set_debounce(struct gpio_chip *gc, unsigned offset, | |
pin_reg |= BIT(DB_TMR_OUT_UNIT_OFF); | |
pin_reg &= ~BIT(DB_TMR_LARGE_OFF); | |
} else if (debounce < 250000) { | |
- time = debounce / 15600; | |
+ time = debounce / 15625; | |
pin_reg |= time & DB_TMR_OUT_MASK; | |
pin_reg &= ~BIT(DB_TMR_OUT_UNIT_OFF); | |
pin_reg |= BIT(DB_TMR_LARGE_OFF); | |
@@ -166,14 +166,14 @@ static int amd_gpio_set_debounce(struct gpio_chip *gc, unsigned offset, | |
pin_reg |= BIT(DB_TMR_OUT_UNIT_OFF); | |
pin_reg |= BIT(DB_TMR_LARGE_OFF); | |
} else { | |
- pin_reg &= ~DB_CNTRl_MASK; | |
+ pin_reg &= ~(DB_CNTRl_MASK << DB_CNTRL_OFF); | |
ret = -EINVAL; | |
} | |
} else { | |
pin_reg &= ~BIT(DB_TMR_OUT_UNIT_OFF); | |
pin_reg &= ~BIT(DB_TMR_LARGE_OFF); | |
pin_reg &= ~DB_TMR_OUT_MASK; | |
- pin_reg &= ~DB_CNTRl_MASK; | |
+ pin_reg &= ~(DB_CNTRl_MASK << DB_CNTRL_OFF); | |
} | |
writel(pin_reg, gpio_dev->base + offset * 4); | |
raw_spin_unlock_irqrestore(&gpio_dev->lock, flags); | |
diff --git a/drivers/pinctrl/pinctrl-mcp23s08_spi.c b/drivers/pinctrl/pinctrl-mcp23s08_spi.c | |
index 1f47a661b0a79..7c72cffe14127 100644 | |
--- a/drivers/pinctrl/pinctrl-mcp23s08_spi.c | |
+++ b/drivers/pinctrl/pinctrl-mcp23s08_spi.c | |
@@ -119,7 +119,7 @@ static int mcp23s08_spi_regmap_init(struct mcp23s08 *mcp, struct device *dev, | |
return -EINVAL; | |
} | |
- copy = devm_kmemdup(dev, &config, sizeof(config), GFP_KERNEL); | |
+ copy = devm_kmemdup(dev, config, sizeof(*config), GFP_KERNEL); | |
if (!copy) | |
return -ENOMEM; | |
diff --git a/drivers/pinctrl/qcom/pinctrl-msm.c b/drivers/pinctrl/qcom/pinctrl-msm.c | |
index 1df232266f63a..1554f0275067e 100644 | |
--- a/drivers/pinctrl/qcom/pinctrl-msm.c | |
+++ b/drivers/pinctrl/qcom/pinctrl-msm.c | |
@@ -815,21 +815,14 @@ static void msm_gpio_irq_clear_unmask(struct irq_data *d, bool status_clear) | |
static void msm_gpio_irq_enable(struct irq_data *d) | |
{ | |
- /* | |
- * Clear the interrupt that may be pending before we enable | |
- * the line. | |
- * This is especially a problem with the GPIOs routed to the | |
- * PDC. These GPIOs are direct-connect interrupts to the GIC. | |
- * Disabling the interrupt line at the PDC does not prevent | |
- * the interrupt from being latched at the GIC. The state at | |
- * GIC needs to be cleared before enabling. | |
- */ | |
- if (d->parent_data) { | |
- irq_chip_set_parent_state(d, IRQCHIP_STATE_PENDING, 0); | |
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d); | |
+ struct msm_pinctrl *pctrl = gpiochip_get_data(gc); | |
+ | |
+ if (d->parent_data) | |
irq_chip_enable_parent(d); | |
- } | |
- msm_gpio_irq_clear_unmask(d, true); | |
+ if (!test_bit(d->hwirq, pctrl->skip_wake_irqs)) | |
+ msm_gpio_irq_clear_unmask(d, true); | |
} | |
static void msm_gpio_irq_disable(struct irq_data *d) | |
@@ -1104,6 +1097,19 @@ static int msm_gpio_irq_reqres(struct irq_data *d) | |
ret = -EINVAL; | |
goto out; | |
} | |
+ | |
+ /* | |
+ * Clear the interrupt that may be pending before we enable | |
+ * the line. | |
+ * This is especially a problem with the GPIOs routed to the | |
+ * PDC. These GPIOs are direct-connect interrupts to the GIC. | |
+ * Disabling the interrupt line at the PDC does not prevent | |
+ * the interrupt from being latched at the GIC. The state at | |
+ * GIC needs to be cleared before enabling. | |
+ */ | |
+ if (d->parent_data && test_bit(d->hwirq, pctrl->skip_wake_irqs)) | |
+ irq_chip_set_parent_state(d, IRQCHIP_STATE_PENDING, 0); | |
+ | |
return 0; | |
out: | |
module_put(gc->owner); | |
diff --git a/drivers/pinctrl/qcom/pinctrl-sm8250.c b/drivers/pinctrl/qcom/pinctrl-sm8250.c | |
index 826df0d637eaa..af144e724bd9c 100644 | |
--- a/drivers/pinctrl/qcom/pinctrl-sm8250.c | |
+++ b/drivers/pinctrl/qcom/pinctrl-sm8250.c | |
@@ -1313,6 +1313,22 @@ static const struct msm_pingroup sm8250_groups[] = { | |
[183] = SDC_PINGROUP(sdc2_data, 0xb7000, 9, 0), | |
}; | |
+static const struct msm_gpio_wakeirq_map sm8250_pdc_map[] = { | |
+ { 0, 79 }, { 1, 84 }, { 2, 80 }, { 3, 82 }, { 4, 107 }, { 7, 43 }, | |
+ { 11, 42 }, { 14, 44 }, { 15, 52 }, { 19, 67 }, { 23, 68 }, { 24, 105 }, | |
+ { 27, 92 }, { 28, 106 }, { 31, 69 }, { 35, 70 }, { 39, 37 }, | |
+ { 40, 108 }, { 43, 71 }, { 45, 72 }, { 47, 83 }, { 51, 74 }, { 55, 77 }, | |
+ { 59, 78 }, { 63, 75 }, { 64, 81 }, { 65, 87 }, { 66, 88 }, { 67, 89 }, | |
+ { 68, 54 }, { 70, 85 }, { 77, 46 }, { 80, 90 }, { 81, 91 }, { 83, 97 }, | |
+ { 84, 98 }, { 86, 99 }, { 87, 100 }, { 88, 101 }, { 89, 102 }, | |
+ { 92, 103 }, { 93, 104 }, { 100, 53 }, { 103, 47 }, { 104, 48 }, | |
+ { 108, 49 }, { 109, 94 }, { 110, 95 }, { 111, 96 }, { 112, 55 }, | |
+ { 113, 56 }, { 118, 50 }, { 121, 51 }, { 122, 57 }, { 123, 58 }, | |
+ { 124, 45 }, { 126, 59 }, { 128, 76 }, { 129, 86 }, { 132, 93 }, | |
+ { 133, 65 }, { 134, 66 }, { 136, 62 }, { 137, 63 }, { 138, 64 }, | |
+ { 142, 60 }, { 143, 61 } | |
+}; | |
+ | |
static const struct msm_pinctrl_soc_data sm8250_pinctrl = { | |
.pins = sm8250_pins, | |
.npins = ARRAY_SIZE(sm8250_pins), | |
@@ -1323,6 +1339,8 @@ static const struct msm_pinctrl_soc_data sm8250_pinctrl = { | |
.ngpios = 181, | |
.tiles = sm8250_tiles, | |
.ntiles = ARRAY_SIZE(sm8250_tiles), | |
+ .wakeirq_map = sm8250_pdc_map, | |
+ .nwakeirq_map = ARRAY_SIZE(sm8250_pdc_map), | |
}; | |
static int sm8250_pinctrl_probe(struct platform_device *pdev) | |
diff --git a/drivers/scsi/device_handler/scsi_dh_alua.c b/drivers/scsi/device_handler/scsi_dh_alua.c | |
index f32da0ca529e0..308bda2e9c000 100644 | |
--- a/drivers/scsi/device_handler/scsi_dh_alua.c | |
+++ b/drivers/scsi/device_handler/scsi_dh_alua.c | |
@@ -658,8 +658,8 @@ static int alua_rtpg(struct scsi_device *sdev, struct alua_port_group *pg) | |
rcu_read_lock(); | |
list_for_each_entry_rcu(h, | |
&tmp_pg->dh_list, node) { | |
- /* h->sdev should always be valid */ | |
- BUG_ON(!h->sdev); | |
+ if (!h->sdev) | |
+ continue; | |
h->sdev->access_state = desc[0]; | |
} | |
rcu_read_unlock(); | |
@@ -705,7 +705,8 @@ static int alua_rtpg(struct scsi_device *sdev, struct alua_port_group *pg) | |
pg->expiry = 0; | |
rcu_read_lock(); | |
list_for_each_entry_rcu(h, &pg->dh_list, node) { | |
- BUG_ON(!h->sdev); | |
+ if (!h->sdev) | |
+ continue; | |
h->sdev->access_state = | |
(pg->state & SCSI_ACCESS_STATE_MASK); | |
if (pg->pref) | |
@@ -1147,7 +1148,6 @@ static void alua_bus_detach(struct scsi_device *sdev) | |
spin_lock(&h->pg_lock); | |
pg = rcu_dereference_protected(h->pg, lockdep_is_held(&h->pg_lock)); | |
rcu_assign_pointer(h->pg, NULL); | |
- h->sdev = NULL; | |
spin_unlock(&h->pg_lock); | |
if (pg) { | |
spin_lock_irq(&pg->lock); | |
@@ -1156,6 +1156,7 @@ static void alua_bus_detach(struct scsi_device *sdev) | |
kref_put(&pg->kref, release_port_group); | |
} | |
sdev->handler_data = NULL; | |
+ synchronize_rcu(); | |
kfree(h); | |
} | |
diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c | |
index 48d5da59262b4..aed59ec20ad9e 100644 | |
--- a/drivers/scsi/hpsa.c | |
+++ b/drivers/scsi/hpsa.c | |
@@ -8854,7 +8854,7 @@ reinit_after_soft_reset: | |
/* hook into SCSI subsystem */ | |
rc = hpsa_scsi_add_host(h); | |
if (rc) | |
- goto clean7; /* perf, sg, cmd, irq, shost, pci, lu, aer/h */ | |
+ goto clean8; /* lastlogicals, perf, sg, cmd, irq, shost, pci, lu, aer/h */ | |
/* Monitor the controller for firmware lockups */ | |
h->heartbeat_sample_interval = HEARTBEAT_SAMPLE_INTERVAL; | |
@@ -8869,6 +8869,8 @@ reinit_after_soft_reset: | |
HPSA_EVENT_MONITOR_INTERVAL); | |
return 0; | |
+clean8: /* lastlogicals, perf, sg, cmd, irq, shost, pci, lu, aer/h */ | |
+ kfree(h->lastlogicals); | |
clean7: /* perf, sg, cmd, irq, shost, pci, lu, aer/h */ | |
hpsa_free_performant_mode(h); | |
h->access.set_intr_mask(h, HPSA_INTR_OFF); | |
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c | |
index e86682dc34eca..87d05c1950870 100644 | |
--- a/drivers/scsi/mpt3sas/mpt3sas_base.c | |
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.c | |
@@ -1742,6 +1742,13 @@ _base_irqpoll(struct irq_poll *irqpoll, int budget) | |
reply_q->irq_poll_scheduled = false; | |
reply_q->irq_line_enable = true; | |
enable_irq(reply_q->os_irq); | |
+ /* | |
+ * Go for one more round of processing the | |
+ * reply descriptor post queue incase if HBA | |
+ * Firmware has posted some reply descriptors | |
+ * while reenabling the IRQ. | |
+ */ | |
+ _base_process_reply_queue(reply_q); | |
} | |
return num_entries; | |
diff --git a/drivers/scsi/ufs/ufshcd-crypto.c b/drivers/scsi/ufs/ufshcd-crypto.c | |
index d2edbd960ebff..07310b12a5dc8 100644 | |
--- a/drivers/scsi/ufs/ufshcd-crypto.c | |
+++ b/drivers/scsi/ufs/ufshcd-crypto.c | |
@@ -59,7 +59,7 @@ static int ufshcd_crypto_keyslot_program(struct blk_keyslot_manager *ksm, | |
u8 data_unit_mask = key->crypto_cfg.data_unit_size / 512; | |
int i; | |
int cap_idx = -1; | |
- union ufs_crypto_cfg_entry cfg = { 0 }; | |
+ union ufs_crypto_cfg_entry cfg = {}; | |
int err; | |
BUILD_BUG_ON(UFS_CRYPTO_KEY_SIZE_INVALID != 0); | |
@@ -100,7 +100,7 @@ static int ufshcd_clear_keyslot(struct ufs_hba *hba, int slot) | |
* Clear the crypto cfg on the device. Clearing CFGE | |
* might not be sufficient, so just clear the entire cfg. | |
*/ | |
- union ufs_crypto_cfg_entry cfg = { 0 }; | |
+ union ufs_crypto_cfg_entry cfg = {}; | |
return ufshcd_program_key(hba, &cfg, slot); | |
} | |
diff --git a/drivers/spi/spi-bcm2835.c b/drivers/spi/spi-bcm2835.c | |
index 8ed3623be8a4b..9605abaaec670 100644 | |
--- a/drivers/spi/spi-bcm2835.c | |
+++ b/drivers/spi/spi-bcm2835.c | |
@@ -1193,7 +1193,6 @@ static int bcm2835_spi_setup(struct spi_device *spi) | |
struct spi_controller *ctlr = spi->controller; | |
struct bcm2835_spi *bs = spi_controller_get_devdata(ctlr); | |
struct gpio_chip *chip; | |
- enum gpio_lookup_flags lflags; | |
u32 cs; | |
/* | |
@@ -1261,7 +1260,7 @@ static int bcm2835_spi_setup(struct spi_device *spi) | |
spi->cs_gpiod = gpiochip_request_own_desc(chip, 8 - spi->chip_select, | |
DRV_NAME, | |
- lflags, | |
+ GPIO_LOOKUP_FLAGS_DEFAULT, | |
GPIOD_OUT_LOW); | |
if (IS_ERR(spi->cs_gpiod)) | |
return PTR_ERR(spi->cs_gpiod); | |
diff --git a/drivers/spi/spi-fsl-dspi.c b/drivers/spi/spi-fsl-dspi.c | |
index 108a7d50d2c37..a96762ffb70b6 100644 | |
--- a/drivers/spi/spi-fsl-dspi.c | |
+++ b/drivers/spi/spi-fsl-dspi.c | |
@@ -1106,12 +1106,11 @@ MODULE_DEVICE_TABLE(of, fsl_dspi_dt_ids); | |
#ifdef CONFIG_PM_SLEEP | |
static int dspi_suspend(struct device *dev) | |
{ | |
- struct spi_controller *ctlr = dev_get_drvdata(dev); | |
- struct fsl_dspi *dspi = spi_controller_get_devdata(ctlr); | |
+ struct fsl_dspi *dspi = dev_get_drvdata(dev); | |
if (dspi->irq) | |
disable_irq(dspi->irq); | |
- spi_controller_suspend(ctlr); | |
+ spi_controller_suspend(dspi->ctlr); | |
clk_disable_unprepare(dspi->clk); | |
pinctrl_pm_select_sleep_state(dev); | |
@@ -1121,8 +1120,7 @@ static int dspi_suspend(struct device *dev) | |
static int dspi_resume(struct device *dev) | |
{ | |
- struct spi_controller *ctlr = dev_get_drvdata(dev); | |
- struct fsl_dspi *dspi = spi_controller_get_devdata(ctlr); | |
+ struct fsl_dspi *dspi = dev_get_drvdata(dev); | |
int ret; | |
pinctrl_pm_select_default_state(dev); | |
@@ -1130,7 +1128,7 @@ static int dspi_resume(struct device *dev) | |
ret = clk_prepare_enable(dspi->clk); | |
if (ret) | |
return ret; | |
- spi_controller_resume(ctlr); | |
+ spi_controller_resume(dspi->ctlr); | |
if (dspi->irq) | |
enable_irq(dspi->irq); | |
diff --git a/drivers/spi/spi-imx.c b/drivers/spi/spi-imx.c | |
index e38e5ad3c7068..9aac515b718c8 100644 | |
--- a/drivers/spi/spi-imx.c | |
+++ b/drivers/spi/spi-imx.c | |
@@ -1674,15 +1674,18 @@ static int spi_imx_probe(struct platform_device *pdev) | |
goto out_master_put; | |
} | |
- pm_runtime_enable(spi_imx->dev); | |
+ ret = clk_prepare_enable(spi_imx->clk_per); | |
+ if (ret) | |
+ goto out_master_put; | |
+ | |
+ ret = clk_prepare_enable(spi_imx->clk_ipg); | |
+ if (ret) | |
+ goto out_put_per; | |
+ | |
pm_runtime_set_autosuspend_delay(spi_imx->dev, MXC_RPM_TIMEOUT); | |
pm_runtime_use_autosuspend(spi_imx->dev); | |
- | |
- ret = pm_runtime_get_sync(spi_imx->dev); | |
- if (ret < 0) { | |
- dev_err(spi_imx->dev, "failed to enable clock\n"); | |
- goto out_runtime_pm_put; | |
- } | |
+ pm_runtime_set_active(spi_imx->dev); | |
+ pm_runtime_enable(spi_imx->dev); | |
spi_imx->spi_clk = clk_get_rate(spi_imx->clk_per); | |
/* | |
@@ -1722,8 +1725,12 @@ out_bitbang_start: | |
spi_imx_sdma_exit(spi_imx); | |
out_runtime_pm_put: | |
pm_runtime_dont_use_autosuspend(spi_imx->dev); | |
- pm_runtime_put_sync(spi_imx->dev); | |
+ pm_runtime_set_suspended(&pdev->dev); | |
pm_runtime_disable(spi_imx->dev); | |
+ | |
+ clk_disable_unprepare(spi_imx->clk_ipg); | |
+out_put_per: | |
+ clk_disable_unprepare(spi_imx->clk_per); | |
out_master_put: | |
spi_master_put(master); | |
diff --git a/drivers/thunderbolt/nhi.c b/drivers/thunderbolt/nhi.c | |
index 5f7489fa1327b..a331e52789e33 100644 | |
--- a/drivers/thunderbolt/nhi.c | |
+++ b/drivers/thunderbolt/nhi.c | |
@@ -405,12 +405,23 @@ static int ring_request_msix(struct tb_ring *ring, bool no_suspend) | |
ring->vector = ret; | |
- ring->irq = pci_irq_vector(ring->nhi->pdev, ring->vector); | |
- if (ring->irq < 0) | |
- return ring->irq; | |
+ ret = pci_irq_vector(ring->nhi->pdev, ring->vector); | |
+ if (ret < 0) | |
+ goto err_ida_remove; | |
+ | |
+ ring->irq = ret; | |
irqflags = no_suspend ? IRQF_NO_SUSPEND : 0; | |
- return request_irq(ring->irq, ring_msix, irqflags, "thunderbolt", ring); | |
+ ret = request_irq(ring->irq, ring_msix, irqflags, "thunderbolt", ring); | |
+ if (ret) | |
+ goto err_ida_remove; | |
+ | |
+ return 0; | |
+ | |
+err_ida_remove: | |
+ ida_simple_remove(&nhi->msix_ida, ring->vector); | |
+ | |
+ return ret; | |
} | |
static void ring_release_msix(struct tb_ring *ring) | |
diff --git a/drivers/thunderbolt/xdomain.c b/drivers/thunderbolt/xdomain.c | |
index 48907853732ac..c00ad817042e1 100644 | |
--- a/drivers/thunderbolt/xdomain.c | |
+++ b/drivers/thunderbolt/xdomain.c | |
@@ -881,6 +881,7 @@ static void enumerate_services(struct tb_xdomain *xd) | |
id = ida_simple_get(&xd->service_ids, 0, 0, GFP_KERNEL); | |
if (id < 0) { | |
+ kfree(svc->key); | |
kfree(svc); | |
break; | |
} | |
diff --git a/drivers/uio/uio.c b/drivers/uio/uio.c | |
index 6dca744e39e95..be06f1a961c2c 100644 | |
--- a/drivers/uio/uio.c | |
+++ b/drivers/uio/uio.c | |
@@ -413,10 +413,10 @@ static int uio_get_minor(struct uio_device *idev) | |
return retval; | |
} | |
-static void uio_free_minor(struct uio_device *idev) | |
+static void uio_free_minor(unsigned long minor) | |
{ | |
mutex_lock(&minor_lock); | |
- idr_remove(&uio_idr, idev->minor); | |
+ idr_remove(&uio_idr, minor); | |
mutex_unlock(&minor_lock); | |
} | |
@@ -990,7 +990,7 @@ err_request_irq: | |
err_uio_dev_add_attributes: | |
device_del(&idev->dev); | |
err_device_create: | |
- uio_free_minor(idev); | |
+ uio_free_minor(idev->minor); | |
put_device(&idev->dev); | |
return ret; | |
} | |
@@ -1042,11 +1042,13 @@ EXPORT_SYMBOL_GPL(__devm_uio_register_device); | |
void uio_unregister_device(struct uio_info *info) | |
{ | |
struct uio_device *idev; | |
+ unsigned long minor; | |
if (!info || !info->uio_dev) | |
return; | |
idev = info->uio_dev; | |
+ minor = idev->minor; | |
mutex_lock(&idev->info_lock); | |
uio_dev_del_attributes(idev); | |
@@ -1062,7 +1064,7 @@ void uio_unregister_device(struct uio_info *info) | |
device_unregister(&idev->dev); | |
- uio_free_minor(idev); | |
+ uio_free_minor(minor); | |
return; | |
} | |
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c | |
index 71664bfcf1bd8..ce73f7d588c9b 100644 | |
--- a/drivers/usb/class/cdc-acm.c | |
+++ b/drivers/usb/class/cdc-acm.c | |
@@ -1706,6 +1706,15 @@ static const struct usb_device_id acm_ids[] = { | |
{ USB_DEVICE(0x0870, 0x0001), /* Metricom GS Modem */ | |
.driver_info = NO_UNION_NORMAL, /* has no union descriptor */ | |
}, | |
+ { USB_DEVICE(0x045b, 0x023c), /* Renesas USB Download mode */ | |
+ .driver_info = DISABLE_ECHO, /* Don't echo banner */ | |
+ }, | |
+ { USB_DEVICE(0x045b, 0x0248), /* Renesas USB Download mode */ | |
+ .driver_info = DISABLE_ECHO, /* Don't echo banner */ | |
+ }, | |
+ { USB_DEVICE(0x045b, 0x024D), /* Renesas USB Download mode */ | |
+ .driver_info = DISABLE_ECHO, /* Don't echo banner */ | |
+ }, | |
{ USB_DEVICE(0x0e8d, 0x0003), /* FIREFLY, MediaTek Inc; [email protected] */ | |
.driver_info = NO_UNION_NORMAL, /* has no union descriptor */ | |
}, | |
diff --git a/drivers/usb/dwc3/dwc3-pci.c b/drivers/usb/dwc3/dwc3-pci.c | |
index 242b6210380a4..bae6a70664c80 100644 | |
--- a/drivers/usb/dwc3/dwc3-pci.c | |
+++ b/drivers/usb/dwc3/dwc3-pci.c | |
@@ -40,6 +40,7 @@ | |
#define PCI_DEVICE_ID_INTEL_TGPLP 0xa0ee | |
#define PCI_DEVICE_ID_INTEL_TGPH 0x43ee | |
#define PCI_DEVICE_ID_INTEL_JSP 0x4dee | |
+#define PCI_DEVICE_ID_INTEL_ADLS 0x7ae1 | |
#define PCI_INTEL_BXT_DSM_GUID "732b85d5-b7a7-4a1b-9ba0-4bbd00ffd511" | |
#define PCI_INTEL_BXT_FUNC_PMU_PWR 4 | |
@@ -367,6 +368,9 @@ static const struct pci_device_id dwc3_pci_id_table[] = { | |
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_JSP), | |
(kernel_ulong_t) &dwc3_pci_intel_properties, }, | |
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ADLS), | |
+ (kernel_ulong_t) &dwc3_pci_intel_properties, }, | |
+ | |
{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_NL_USB), | |
(kernel_ulong_t) &dwc3_pci_amd_properties, }, | |
{ } /* Terminating Entry */ | |
diff --git a/drivers/usb/gadget/legacy/raw_gadget.c b/drivers/usb/gadget/legacy/raw_gadget.c | |
index e01e366d89cd5..062dfac303996 100644 | |
--- a/drivers/usb/gadget/legacy/raw_gadget.c | |
+++ b/drivers/usb/gadget/legacy/raw_gadget.c | |
@@ -564,9 +564,12 @@ static int raw_ioctl_event_fetch(struct raw_dev *dev, unsigned long value) | |
return -ENODEV; | |
} | |
length = min(arg.length, event->length); | |
- if (copy_to_user((void __user *)value, event, sizeof(*event) + length)) | |
+ if (copy_to_user((void __user *)value, event, sizeof(*event) + length)) { | |
+ kfree(event); | |
return -EFAULT; | |
+ } | |
+ kfree(event); | |
return 0; | |
} | |
diff --git a/drivers/usb/gadget/udc/fsl_udc_core.c b/drivers/usb/gadget/udc/fsl_udc_core.c | |
index a6f7b2594c090..c0cb007b749ff 100644 | |
--- a/drivers/usb/gadget/udc/fsl_udc_core.c | |
+++ b/drivers/usb/gadget/udc/fsl_udc_core.c | |
@@ -1051,7 +1051,7 @@ static int fsl_ep_fifo_status(struct usb_ep *_ep) | |
u32 bitmask; | |
struct ep_queue_head *qh; | |
- if (!_ep || _ep->desc || !(_ep->desc->bEndpointAddress&0xF)) | |
+ if (!_ep || !_ep->desc || !(_ep->desc->bEndpointAddress&0xF)) | |
return -ENODEV; | |
ep = container_of(_ep, struct fsl_ep, ep); | |
diff --git a/drivers/usb/gadget/udc/goku_udc.c b/drivers/usb/gadget/udc/goku_udc.c | |
index 25c1d6ab5adb4..3e1267d38774f 100644 | |
--- a/drivers/usb/gadget/udc/goku_udc.c | |
+++ b/drivers/usb/gadget/udc/goku_udc.c | |
@@ -1760,6 +1760,7 @@ static int goku_probe(struct pci_dev *pdev, const struct pci_device_id *id) | |
goto err; | |
} | |
+ pci_set_drvdata(pdev, dev); | |
spin_lock_init(&dev->lock); | |
dev->pdev = pdev; | |
dev->gadget.ops = &goku_ops; | |
@@ -1793,7 +1794,6 @@ static int goku_probe(struct pci_dev *pdev, const struct pci_device_id *id) | |
} | |
dev->regs = (struct goku_udc_regs __iomem *) base; | |
- pci_set_drvdata(pdev, dev); | |
INFO(dev, "%s\n", driver_desc); | |
INFO(dev, "version: " DRIVER_VERSION " %s\n", dmastr()); | |
INFO(dev, "irq %d, pci mem %p\n", pdev->irq, base); | |
diff --git a/drivers/usb/host/xhci-histb.c b/drivers/usb/host/xhci-histb.c | |
index 5546e7e013a88..08369857686e7 100644 | |
--- a/drivers/usb/host/xhci-histb.c | |
+++ b/drivers/usb/host/xhci-histb.c | |
@@ -240,7 +240,7 @@ static int xhci_histb_probe(struct platform_device *pdev) | |
/* Initialize dma_mask and coherent_dma_mask to 32-bits */ | |
ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32)); | |
if (ret) | |
- return ret; | |
+ goto disable_pm; | |
hcd = usb_create_hcd(driver, dev, dev_name(dev)); | |
if (!hcd) { | |
diff --git a/drivers/usb/misc/apple-mfi-fastcharge.c b/drivers/usb/misc/apple-mfi-fastcharge.c | |
index 579d8c84de42c..9de0171b51776 100644 | |
--- a/drivers/usb/misc/apple-mfi-fastcharge.c | |
+++ b/drivers/usb/misc/apple-mfi-fastcharge.c | |
@@ -120,8 +120,10 @@ static int apple_mfi_fc_set_property(struct power_supply *psy, | |
dev_dbg(&mfi->udev->dev, "prop: %d\n", psp); | |
ret = pm_runtime_get_sync(&mfi->udev->dev); | |
- if (ret < 0) | |
+ if (ret < 0) { | |
+ pm_runtime_put_noidle(&mfi->udev->dev); | |
return ret; | |
+ } | |
switch (psp) { | |
case POWER_SUPPLY_PROP_CHARGE_TYPE: | |
diff --git a/drivers/usb/musb/musb_dsps.c b/drivers/usb/musb/musb_dsps.c | |
index 30085b2be7b90..5892f3ce0cdc8 100644 | |
--- a/drivers/usb/musb/musb_dsps.c | |
+++ b/drivers/usb/musb/musb_dsps.c | |
@@ -429,10 +429,12 @@ static int dsps_musb_init(struct musb *musb) | |
struct platform_device *parent = to_platform_device(dev->parent); | |
const struct dsps_musb_wrapper *wrp = glue->wrp; | |
void __iomem *reg_base; | |
+ struct resource *r; | |
u32 rev, val; | |
int ret; | |
- reg_base = devm_platform_ioremap_resource_byname(parent, "control"); | |
+ r = platform_get_resource_byname(parent, IORESOURCE_MEM, "control"); | |
+ reg_base = devm_ioremap_resource(dev, r); | |
if (IS_ERR(reg_base)) | |
return PTR_ERR(reg_base); | |
musb->ctrl_base = reg_base; | |
diff --git a/drivers/usb/typec/ucsi/psy.c b/drivers/usb/typec/ucsi/psy.c | |
index 26ed0b520749a..571a51e162346 100644 | |
--- a/drivers/usb/typec/ucsi/psy.c | |
+++ b/drivers/usb/typec/ucsi/psy.c | |
@@ -238,4 +238,13 @@ void ucsi_unregister_port_psy(struct ucsi_connector *con) | |
return; | |
power_supply_unregister(con->psy); | |
+ con->psy = NULL; | |
+} | |
+ | |
+void ucsi_port_psy_changed(struct ucsi_connector *con) | |
+{ | |
+ if (IS_ERR_OR_NULL(con->psy)) | |
+ return; | |
+ | |
+ power_supply_changed(con->psy); | |
} | |
diff --git a/drivers/usb/typec/ucsi/ucsi.c b/drivers/usb/typec/ucsi/ucsi.c | |
index 758b988ac518a..51a570d40a42e 100644 | |
--- a/drivers/usb/typec/ucsi/ucsi.c | |
+++ b/drivers/usb/typec/ucsi/ucsi.c | |
@@ -643,8 +643,10 @@ static void ucsi_handle_connector_change(struct work_struct *work) | |
role = !!(con->status.flags & UCSI_CONSTAT_PWR_DIR); | |
if (con->status.change & UCSI_CONSTAT_POWER_OPMODE_CHANGE || | |
- con->status.change & UCSI_CONSTAT_POWER_LEVEL_CHANGE) | |
+ con->status.change & UCSI_CONSTAT_POWER_LEVEL_CHANGE) { | |
ucsi_pwr_opmode_change(con); | |
+ ucsi_port_psy_changed(con); | |
+ } | |
if (con->status.change & UCSI_CONSTAT_POWER_DIR_CHANGE) { | |
typec_set_pwr_role(con->port, role); | |
@@ -674,6 +676,8 @@ static void ucsi_handle_connector_change(struct work_struct *work) | |
ucsi_register_partner(con); | |
else | |
ucsi_unregister_partner(con); | |
+ | |
+ ucsi_port_psy_changed(con); | |
} | |
if (con->status.change & UCSI_CONSTAT_CAM_CHANGE) { | |
@@ -994,6 +998,7 @@ static int ucsi_register_port(struct ucsi *ucsi, int index) | |
!!(con->status.flags & UCSI_CONSTAT_PWR_DIR)); | |
ucsi_pwr_opmode_change(con); | |
ucsi_register_partner(con); | |
+ ucsi_port_psy_changed(con); | |
} | |
if (con->partner) { | |
diff --git a/drivers/usb/typec/ucsi/ucsi.h b/drivers/usb/typec/ucsi/ucsi.h | |
index cba6f77bea61b..b7a92f2460507 100644 | |
--- a/drivers/usb/typec/ucsi/ucsi.h | |
+++ b/drivers/usb/typec/ucsi/ucsi.h | |
@@ -340,9 +340,11 @@ int ucsi_resume(struct ucsi *ucsi); | |
#if IS_ENABLED(CONFIG_POWER_SUPPLY) | |
int ucsi_register_port_psy(struct ucsi_connector *con); | |
void ucsi_unregister_port_psy(struct ucsi_connector *con); | |
+void ucsi_port_psy_changed(struct ucsi_connector *con); | |
#else | |
static inline int ucsi_register_port_psy(struct ucsi_connector *con) { return 0; } | |
static inline void ucsi_unregister_port_psy(struct ucsi_connector *con) { } | |
+static inline void ucsi_port_psy_changed(struct ucsi_connector *con) { } | |
#endif /* CONFIG_POWER_SUPPLY */ | |
#if IS_ENABLED(CONFIG_TYPEC_DP_ALTMODE) | |
diff --git a/drivers/vfio/pci/vfio_pci.c b/drivers/vfio/pci/vfio_pci.c | |
index 1ab1f5cda4ac2..bfdc010a6b043 100644 | |
--- a/drivers/vfio/pci/vfio_pci.c | |
+++ b/drivers/vfio/pci/vfio_pci.c | |
@@ -385,7 +385,7 @@ static int vfio_pci_enable(struct vfio_pci_device *vdev) | |
pdev->vendor == PCI_VENDOR_ID_INTEL && | |
IS_ENABLED(CONFIG_VFIO_PCI_IGD)) { | |
ret = vfio_pci_igd_init(vdev); | |
- if (ret) { | |
+ if (ret && ret != -ENODEV) { | |
pci_warn(pdev, "Failed to setup Intel IGD regions\n"); | |
goto disable_exit; | |
} | |
diff --git a/drivers/vfio/pci/vfio_pci_rdwr.c b/drivers/vfio/pci/vfio_pci_rdwr.c | |
index 9e353c484ace2..a0b5fc8e46f4d 100644 | |
--- a/drivers/vfio/pci/vfio_pci_rdwr.c | |
+++ b/drivers/vfio/pci/vfio_pci_rdwr.c | |
@@ -356,34 +356,60 @@ ssize_t vfio_pci_vga_rw(struct vfio_pci_device *vdev, char __user *buf, | |
return done; | |
} | |
-static int vfio_pci_ioeventfd_handler(void *opaque, void *unused) | |
+static void vfio_pci_ioeventfd_do_write(struct vfio_pci_ioeventfd *ioeventfd, | |
+ bool test_mem) | |
{ | |
- struct vfio_pci_ioeventfd *ioeventfd = opaque; | |
- | |
switch (ioeventfd->count) { | |
case 1: | |
- vfio_pci_iowrite8(ioeventfd->vdev, ioeventfd->test_mem, | |
+ vfio_pci_iowrite8(ioeventfd->vdev, test_mem, | |
ioeventfd->data, ioeventfd->addr); | |
break; | |
case 2: | |
- vfio_pci_iowrite16(ioeventfd->vdev, ioeventfd->test_mem, | |
+ vfio_pci_iowrite16(ioeventfd->vdev, test_mem, | |
ioeventfd->data, ioeventfd->addr); | |
break; | |
case 4: | |
- vfio_pci_iowrite32(ioeventfd->vdev, ioeventfd->test_mem, | |
+ vfio_pci_iowrite32(ioeventfd->vdev, test_mem, | |
ioeventfd->data, ioeventfd->addr); | |
break; | |
#ifdef iowrite64 | |
case 8: | |
- vfio_pci_iowrite64(ioeventfd->vdev, ioeventfd->test_mem, | |
+ vfio_pci_iowrite64(ioeventfd->vdev, test_mem, | |
ioeventfd->data, ioeventfd->addr); | |
break; | |
#endif | |
} | |
+} | |
+ | |
+static int vfio_pci_ioeventfd_handler(void *opaque, void *unused) | |
+{ | |
+ struct vfio_pci_ioeventfd *ioeventfd = opaque; | |
+ struct vfio_pci_device *vdev = ioeventfd->vdev; | |
+ | |
+ if (ioeventfd->test_mem) { | |
+ if (!down_read_trylock(&vdev->memory_lock)) | |
+ return 1; /* Lock contended, use thread */ | |
+ if (!__vfio_pci_memory_enabled(vdev)) { | |
+ up_read(&vdev->memory_lock); | |
+ return 0; | |
+ } | |
+ } | |
+ | |
+ vfio_pci_ioeventfd_do_write(ioeventfd, false); | |
+ | |
+ if (ioeventfd->test_mem) | |
+ up_read(&vdev->memory_lock); | |
return 0; | |
} | |
+static void vfio_pci_ioeventfd_thread(void *opaque, void *unused) | |
+{ | |
+ struct vfio_pci_ioeventfd *ioeventfd = opaque; | |
+ | |
+ vfio_pci_ioeventfd_do_write(ioeventfd, ioeventfd->test_mem); | |
+} | |
+ | |
long vfio_pci_ioeventfd(struct vfio_pci_device *vdev, loff_t offset, | |
uint64_t data, int count, int fd) | |
{ | |
@@ -457,7 +483,8 @@ long vfio_pci_ioeventfd(struct vfio_pci_device *vdev, loff_t offset, | |
ioeventfd->test_mem = vdev->pdev->resource[bar].flags & IORESOURCE_MEM; | |
ret = vfio_virqfd_enable(ioeventfd, vfio_pci_ioeventfd_handler, | |
- NULL, NULL, &ioeventfd->virqfd, fd); | |
+ vfio_pci_ioeventfd_thread, NULL, | |
+ &ioeventfd->virqfd, fd); | |
if (ret) { | |
kfree(ioeventfd); | |
goto out_unlock; | |
diff --git a/drivers/vfio/platform/vfio_platform_common.c b/drivers/vfio/platform/vfio_platform_common.c | |
index c0771a9567fb5..fb4b385191f28 100644 | |
--- a/drivers/vfio/platform/vfio_platform_common.c | |
+++ b/drivers/vfio/platform/vfio_platform_common.c | |
@@ -267,7 +267,7 @@ static int vfio_platform_open(void *device_data) | |
ret = pm_runtime_get_sync(vdev->device); | |
if (ret < 0) | |
- goto err_pm; | |
+ goto err_rst; | |
ret = vfio_platform_call_reset(vdev, &extra_dbg); | |
if (ret && vdev->reset_required) { | |
@@ -284,7 +284,6 @@ static int vfio_platform_open(void *device_data) | |
err_rst: | |
pm_runtime_put(vdev->device); | |
-err_pm: | |
vfio_platform_irq_cleanup(vdev); | |
err_irq: | |
vfio_platform_regions_cleanup(vdev); | |
diff --git a/fs/afs/write.c b/fs/afs/write.c | |
index 50371207f3273..c9195fc67fd8f 100644 | |
--- a/fs/afs/write.c | |
+++ b/fs/afs/write.c | |
@@ -169,11 +169,14 @@ int afs_write_end(struct file *file, struct address_space *mapping, | |
unsigned int f, from = pos & (PAGE_SIZE - 1); | |
unsigned int t, to = from + copied; | |
loff_t i_size, maybe_i_size; | |
- int ret; | |
+ int ret = 0; | |
_enter("{%llx:%llu},{%lx}", | |
vnode->fid.vid, vnode->fid.vnode, page->index); | |
+ if (copied == 0) | |
+ goto out; | |
+ | |
maybe_i_size = pos + copied; | |
i_size = i_size_read(&vnode->vfs_inode); | |
diff --git a/fs/afs/xattr.c b/fs/afs/xattr.c | |
index 38884d6c57cdc..95c573dcda116 100644 | |
--- a/fs/afs/xattr.c | |
+++ b/fs/afs/xattr.c | |
@@ -148,11 +148,6 @@ static const struct xattr_handler afs_xattr_afs_acl_handler = { | |
.set = afs_xattr_set_acl, | |
}; | |
-static void yfs_acl_put(struct afs_operation *op) | |
-{ | |
- yfs_free_opaque_acl(op->yacl); | |
-} | |
- | |
static const struct afs_operation_ops yfs_fetch_opaque_acl_operation = { | |
.issue_yfs_rpc = yfs_fs_fetch_opaque_acl, | |
.success = afs_acl_success, | |
@@ -246,7 +241,7 @@ error: | |
static const struct afs_operation_ops yfs_store_opaque_acl2_operation = { | |
.issue_yfs_rpc = yfs_fs_store_opaque_acl2, | |
.success = afs_acl_success, | |
- .put = yfs_acl_put, | |
+ .put = afs_acl_put, | |
}; | |
/* | |
diff --git a/fs/afs/yfsclient.c b/fs/afs/yfsclient.c | |
index 3b1239b7e90d8..bd787e71a657f 100644 | |
--- a/fs/afs/yfsclient.c | |
+++ b/fs/afs/yfsclient.c | |
@@ -1990,6 +1990,7 @@ void yfs_fs_store_opaque_acl2(struct afs_operation *op) | |
memcpy(bp, acl->data, acl->size); | |
if (acl->size != size) | |
memset((void *)bp + acl->size, 0, size - acl->size); | |
+ bp += size / sizeof(__be32); | |
yfs_check_req(call, bp); | |
trace_afs_make_fs_call(call, &vp->fid); | |
diff --git a/fs/btrfs/dev-replace.c b/fs/btrfs/dev-replace.c | |
index b58b33051a89d..fb1833bccd04e 100644 | |
--- a/fs/btrfs/dev-replace.c | |
+++ b/fs/btrfs/dev-replace.c | |
@@ -95,6 +95,17 @@ int btrfs_init_dev_replace(struct btrfs_fs_info *fs_info) | |
ret = btrfs_search_slot(NULL, dev_root, &key, path, 0, 0); | |
if (ret) { | |
no_valid_dev_replace_entry_found: | |
+ /* | |
+ * We don't have a replace item or it's corrupted. If there is | |
+ * a replace target, fail the mount. | |
+ */ | |
+ if (btrfs_find_device(fs_info->fs_devices, | |
+ BTRFS_DEV_REPLACE_DEVID, NULL, NULL, false)) { | |
+ btrfs_err(fs_info, | |
+ "found replace target device without a valid replace item"); | |
+ ret = -EUCLEAN; | |
+ goto out; | |
+ } | |
ret = 0; | |
dev_replace->replace_state = | |
BTRFS_IOCTL_DEV_REPLACE_STATE_NEVER_STARTED; | |
@@ -147,8 +158,19 @@ no_valid_dev_replace_entry_found: | |
case BTRFS_IOCTL_DEV_REPLACE_STATE_NEVER_STARTED: | |
case BTRFS_IOCTL_DEV_REPLACE_STATE_FINISHED: | |
case BTRFS_IOCTL_DEV_REPLACE_STATE_CANCELED: | |
- dev_replace->srcdev = NULL; | |
- dev_replace->tgtdev = NULL; | |
+ /* | |
+ * We don't have an active replace item but if there is a | |
+ * replace target, fail the mount. | |
+ */ | |
+ if (btrfs_find_device(fs_info->fs_devices, | |
+ BTRFS_DEV_REPLACE_DEVID, NULL, NULL, false)) { | |
+ btrfs_err(fs_info, | |
+ "replace devid present without an active replace item"); | |
+ ret = -EUCLEAN; | |
+ } else { | |
+ dev_replace->srcdev = NULL; | |
+ dev_replace->tgtdev = NULL; | |
+ } | |
break; | |
case BTRFS_IOCTL_DEV_REPLACE_STATE_STARTED: | |
case BTRFS_IOCTL_DEV_REPLACE_STATE_SUSPENDED: | |
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c | |
index 2a5dc42f07505..daa1e1638a925 100644 | |
--- a/fs/btrfs/ioctl.c | |
+++ b/fs/btrfs/ioctl.c | |
@@ -1261,6 +1261,7 @@ static int cluster_pages_for_defrag(struct inode *inode, | |
u64 page_start; | |
u64 page_end; | |
u64 page_cnt; | |
+ u64 start = (u64)start_index << PAGE_SHIFT; | |
int ret; | |
int i; | |
int i_done; | |
@@ -1277,8 +1278,7 @@ static int cluster_pages_for_defrag(struct inode *inode, | |
page_cnt = min_t(u64, (u64)num_pages, (u64)file_end - start_index + 1); | |
ret = btrfs_delalloc_reserve_space(BTRFS_I(inode), &data_reserved, | |
- start_index << PAGE_SHIFT, | |
- page_cnt << PAGE_SHIFT); | |
+ start, page_cnt << PAGE_SHIFT); | |
if (ret) | |
return ret; | |
i_done = 0; | |
@@ -1367,8 +1367,7 @@ again: | |
btrfs_mod_outstanding_extents(BTRFS_I(inode), 1); | |
spin_unlock(&BTRFS_I(inode)->lock); | |
btrfs_delalloc_release_space(BTRFS_I(inode), data_reserved, | |
- start_index << PAGE_SHIFT, | |
- (page_cnt - i_done) << PAGE_SHIFT, true); | |
+ start, (page_cnt - i_done) << PAGE_SHIFT, true); | |
} | |
@@ -1395,8 +1394,7 @@ out: | |
put_page(pages[i]); | |
} | |
btrfs_delalloc_release_space(BTRFS_I(inode), data_reserved, | |
- start_index << PAGE_SHIFT, | |
- page_cnt << PAGE_SHIFT, true); | |
+ start, page_cnt << PAGE_SHIFT, true); | |
btrfs_delalloc_release_extents(BTRFS_I(inode), page_cnt << PAGE_SHIFT); | |
extent_changeset_free(data_reserved); | |
return ret; | |
diff --git a/fs/btrfs/ref-verify.c b/fs/btrfs/ref-verify.c | |
index 7f03dbe5b609d..78693d3dd15bc 100644 | |
--- a/fs/btrfs/ref-verify.c | |
+++ b/fs/btrfs/ref-verify.c | |
@@ -860,6 +860,7 @@ int btrfs_ref_tree_mod(struct btrfs_fs_info *fs_info, | |
"dropping a ref for a root that doesn't have a ref on the block"); | |
dump_block_entry(fs_info, be); | |
dump_ref_action(fs_info, ra); | |
+ kfree(ref); | |
kfree(ra); | |
goto out_unlock; | |
} | |
diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c | |
index 4ba1ab9cc76db..5df1cf6bd274e 100644 | |
--- a/fs/btrfs/relocation.c | |
+++ b/fs/btrfs/relocation.c | |
@@ -1646,6 +1646,7 @@ static noinline_for_stack int merge_reloc_root(struct reloc_control *rc, | |
struct btrfs_root_item *root_item; | |
struct btrfs_path *path; | |
struct extent_buffer *leaf; | |
+ int reserve_level; | |
int level; | |
int max_level; | |
int replaced = 0; | |
@@ -1694,7 +1695,8 @@ static noinline_for_stack int merge_reloc_root(struct reloc_control *rc, | |
* Thus the needed metadata size is at most root_level * nodesize, | |
* and * 2 since we have two trees to COW. | |
*/ | |
- min_reserved = fs_info->nodesize * btrfs_root_level(root_item) * 2; | |
+ reserve_level = max_t(int, 1, btrfs_root_level(root_item)); | |
+ min_reserved = fs_info->nodesize * reserve_level * 2; | |
memset(&next_key, 0, sizeof(next_key)); | |
while (1) { | |
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c | |
index 309734fdd1580..578bbe544c8b5 100644 | |
--- a/fs/btrfs/volumes.c | |
+++ b/fs/btrfs/volumes.c | |
@@ -1064,22 +1064,13 @@ again: | |
continue; | |
} | |
- if (device->devid == BTRFS_DEV_REPLACE_DEVID) { | |
- /* | |
- * In the first step, keep the device which has | |
- * the correct fsid and the devid that is used | |
- * for the dev_replace procedure. | |
- * In the second step, the dev_replace state is | |
- * read from the device tree and it is known | |
- * whether the procedure is really active or | |
- * not, which means whether this device is | |
- * used or whether it should be removed. | |
- */ | |
- if (step == 0 || test_bit(BTRFS_DEV_STATE_REPLACE_TGT, | |
- &device->dev_state)) { | |
- continue; | |
- } | |
- } | |
+ /* | |
+ * We have already validated the presence of BTRFS_DEV_REPLACE_DEVID, | |
+ * in btrfs_init_dev_replace() so just continue. | |
+ */ | |
+ if (device->devid == BTRFS_DEV_REPLACE_DEVID) | |
+ continue; | |
+ | |
if (device->bdev) { | |
blkdev_put(device->bdev, device->mode); | |
device->bdev = NULL; | |
@@ -1088,9 +1079,6 @@ again: | |
if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) { | |
list_del_init(&device->dev_alloc_list); | |
clear_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state); | |
- if (!test_bit(BTRFS_DEV_STATE_REPLACE_TGT, | |
- &device->dev_state)) | |
- fs_devices->rw_devices--; | |
} | |
list_del_init(&device->dev_list); | |
fs_devices->num_devices--; | |
diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c | |
index 034b3f4fdd3a7..64a64a29f5c79 100644 | |
--- a/fs/ceph/caps.c | |
+++ b/fs/ceph/caps.c | |
@@ -4064,7 +4064,7 @@ void ceph_handle_caps(struct ceph_mds_session *session, | |
vino.snap, inode); | |
mutex_lock(&session->s_mutex); | |
- session->s_seq++; | |
+ inc_session_sequence(session); | |
dout(" mds%d seq %lld cap seq %u\n", session->s_mds, session->s_seq, | |
(unsigned)seq); | |
diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c | |
index 76d8d9495d1d4..b2214679baf4e 100644 | |
--- a/fs/ceph/mds_client.c | |
+++ b/fs/ceph/mds_client.c | |
@@ -4227,7 +4227,7 @@ static void handle_lease(struct ceph_mds_client *mdsc, | |
dname.len, dname.name); | |
mutex_lock(&session->s_mutex); | |
- session->s_seq++; | |
+ inc_session_sequence(session); | |
if (!inode) { | |
dout("handle_lease no inode %llx\n", vino.ino); | |
@@ -4381,28 +4381,48 @@ static void maybe_recover_session(struct ceph_mds_client *mdsc) | |
bool check_session_state(struct ceph_mds_session *s) | |
{ | |
- if (s->s_state == CEPH_MDS_SESSION_CLOSING) { | |
- dout("resending session close request for mds%d\n", | |
- s->s_mds); | |
- request_close_session(s); | |
- return false; | |
- } | |
- if (s->s_ttl && time_after(jiffies, s->s_ttl)) { | |
- if (s->s_state == CEPH_MDS_SESSION_OPEN) { | |
+ switch (s->s_state) { | |
+ case CEPH_MDS_SESSION_OPEN: | |
+ if (s->s_ttl && time_after(jiffies, s->s_ttl)) { | |
s->s_state = CEPH_MDS_SESSION_HUNG; | |
pr_info("mds%d hung\n", s->s_mds); | |
} | |
- } | |
- if (s->s_state == CEPH_MDS_SESSION_NEW || | |
- s->s_state == CEPH_MDS_SESSION_RESTARTING || | |
- s->s_state == CEPH_MDS_SESSION_CLOSED || | |
- s->s_state == CEPH_MDS_SESSION_REJECTED) | |
- /* this mds is failed or recovering, just wait */ | |
+ break; | |
+ case CEPH_MDS_SESSION_CLOSING: | |
+ /* Should never reach this when we're unmounting */ | |
+ WARN_ON_ONCE(true); | |
+ fallthrough; | |
+ case CEPH_MDS_SESSION_NEW: | |
+ case CEPH_MDS_SESSION_RESTARTING: | |
+ case CEPH_MDS_SESSION_CLOSED: | |
+ case CEPH_MDS_SESSION_REJECTED: | |
return false; | |
+ } | |
return true; | |
} | |
+/* | |
+ * If the sequence is incremented while we're waiting on a REQUEST_CLOSE reply, | |
+ * then we need to retransmit that request. | |
+ */ | |
+void inc_session_sequence(struct ceph_mds_session *s) | |
+{ | |
+ lockdep_assert_held(&s->s_mutex); | |
+ | |
+ s->s_seq++; | |
+ | |
+ if (s->s_state == CEPH_MDS_SESSION_CLOSING) { | |
+ int ret; | |
+ | |
+ dout("resending session close request for mds%d\n", s->s_mds); | |
+ ret = request_close_session(s); | |
+ if (ret < 0) | |
+ pr_err("unable to close session to mds%d: %d\n", | |
+ s->s_mds, ret); | |
+ } | |
+} | |
+ | |
/* | |
* delayed work -- periodically trim expired leases, renew caps with mds | |
*/ | |
diff --git a/fs/ceph/mds_client.h b/fs/ceph/mds_client.h | |
index 658800605bfb4..11f20a4d36bc5 100644 | |
--- a/fs/ceph/mds_client.h | |
+++ b/fs/ceph/mds_client.h | |
@@ -480,6 +480,7 @@ struct ceph_mds_client { | |
extern const char *ceph_mds_op_name(int op); | |
extern bool check_session_state(struct ceph_mds_session *s); | |
+void inc_session_sequence(struct ceph_mds_session *s); | |
extern struct ceph_mds_session * | |
__ceph_lookup_mds_session(struct ceph_mds_client *, int mds); | |
diff --git a/fs/ceph/quota.c b/fs/ceph/quota.c | |
index cc2c4d40b0222..2b213f864c564 100644 | |
--- a/fs/ceph/quota.c | |
+++ b/fs/ceph/quota.c | |
@@ -53,7 +53,7 @@ void ceph_handle_quota(struct ceph_mds_client *mdsc, | |
/* increment msg sequence number */ | |
mutex_lock(&session->s_mutex); | |
- session->s_seq++; | |
+ inc_session_sequence(session); | |
mutex_unlock(&session->s_mutex); | |
/* lookup inode */ | |
diff --git a/fs/ceph/snap.c b/fs/ceph/snap.c | |
index 923be9399b21c..cc9a9bfc790a3 100644 | |
--- a/fs/ceph/snap.c | |
+++ b/fs/ceph/snap.c | |
@@ -873,7 +873,7 @@ void ceph_handle_snap(struct ceph_mds_client *mdsc, | |
ceph_snap_op_name(op), split, trace_len); | |
mutex_lock(&session->s_mutex); | |
- session->s_seq++; | |
+ inc_session_sequence(session); | |
mutex_unlock(&session->s_mutex); | |
down_write(&mdsc->snap_rwsem); | |
diff --git a/fs/cifs/cifs_unicode.c b/fs/cifs/cifs_unicode.c | |
index 498777d859eb5..9bd03a2310328 100644 | |
--- a/fs/cifs/cifs_unicode.c | |
+++ b/fs/cifs/cifs_unicode.c | |
@@ -488,7 +488,13 @@ cifsConvertToUTF16(__le16 *target, const char *source, int srclen, | |
else if (map_chars == SFM_MAP_UNI_RSVD) { | |
bool end_of_string; | |
- if (i == srclen - 1) | |
+ /** | |
+ * Remap spaces and periods found at the end of every | |
+ * component of the path. The special cases of '.' and | |
+ * '..' do not need to be dealt with explicitly because | |
+ * they are addressed in namei.c:link_path_walk(). | |
+ **/ | |
+ if ((i == srclen - 1) || (source[i+1] == '\\')) | |
end_of_string = true; | |
else | |
end_of_string = false; | |
diff --git a/fs/erofs/inode.c b/fs/erofs/inode.c | |
index 139d0bed42f88..3e21c0e8adae7 100644 | |
--- a/fs/erofs/inode.c | |
+++ b/fs/erofs/inode.c | |
@@ -107,11 +107,9 @@ static struct page *erofs_read_inode(struct inode *inode, | |
i_gid_write(inode, le32_to_cpu(die->i_gid)); | |
set_nlink(inode, le32_to_cpu(die->i_nlink)); | |
- /* ns timestamp */ | |
- inode->i_mtime.tv_sec = inode->i_ctime.tv_sec = | |
- le64_to_cpu(die->i_ctime); | |
- inode->i_mtime.tv_nsec = inode->i_ctime.tv_nsec = | |
- le32_to_cpu(die->i_ctime_nsec); | |
+ /* extended inode has its own timestamp */ | |
+ inode->i_ctime.tv_sec = le64_to_cpu(die->i_ctime); | |
+ inode->i_ctime.tv_nsec = le32_to_cpu(die->i_ctime_nsec); | |
inode->i_size = le64_to_cpu(die->i_size); | |
@@ -149,11 +147,9 @@ static struct page *erofs_read_inode(struct inode *inode, | |
i_gid_write(inode, le16_to_cpu(dic->i_gid)); | |
set_nlink(inode, le16_to_cpu(dic->i_nlink)); | |
- /* use build time to derive all file time */ | |
- inode->i_mtime.tv_sec = inode->i_ctime.tv_sec = | |
- sbi->build_time; | |
- inode->i_mtime.tv_nsec = inode->i_ctime.tv_nsec = | |
- sbi->build_time_nsec; | |
+ /* use build time for compact inodes */ | |
+ inode->i_ctime.tv_sec = sbi->build_time; | |
+ inode->i_ctime.tv_nsec = sbi->build_time_nsec; | |
inode->i_size = le32_to_cpu(dic->i_size); | |
if (erofs_inode_is_data_compressed(vi->datalayout)) | |
@@ -167,6 +163,11 @@ static struct page *erofs_read_inode(struct inode *inode, | |
goto err_out; | |
} | |
+ inode->i_mtime.tv_sec = inode->i_ctime.tv_sec; | |
+ inode->i_atime.tv_sec = inode->i_ctime.tv_sec; | |
+ inode->i_mtime.tv_nsec = inode->i_ctime.tv_nsec; | |
+ inode->i_atime.tv_nsec = inode->i_ctime.tv_nsec; | |
+ | |
if (!nblks) | |
/* measure inode.i_blocks as generic filesystems */ | |
inode->i_blocks = roundup(inode->i_size, EROFS_BLKSIZ) >> 9; | |
diff --git a/fs/erofs/zdata.c b/fs/erofs/zdata.c | |
index 6c939def00f95..118d9fe02c4e2 100644 | |
--- a/fs/erofs/zdata.c | |
+++ b/fs/erofs/zdata.c | |
@@ -1080,8 +1080,11 @@ out_allocpage: | |
cond_resched(); | |
goto repeat; | |
} | |
- set_page_private(page, (unsigned long)pcl); | |
- SetPagePrivate(page); | |
+ | |
+ if (tocache) { | |
+ set_page_private(page, (unsigned long)pcl); | |
+ SetPagePrivate(page); | |
+ } | |
out: /* the only exit (for tracing and debugging) */ | |
return page; | |
} | |
diff --git a/fs/ext4/inline.c b/fs/ext4/inline.c | |
index 75c97bca08156..9ebdb8684bbb5 100644 | |
--- a/fs/ext4/inline.c | |
+++ b/fs/ext4/inline.c | |
@@ -1880,6 +1880,7 @@ int ext4_inline_data_truncate(struct inode *inode, int *has_inline) | |
ext4_write_lock_xattr(inode, &no_expand); | |
if (!ext4_has_inline_data(inode)) { | |
+ ext4_write_unlock_xattr(inode, &no_expand); | |
*has_inline = 0; | |
ext4_journal_stop(handle); | |
return 0; | |
diff --git a/fs/ext4/super.c b/fs/ext4/super.c | |
index 20378050df09c..0925bc1deee09 100644 | |
--- a/fs/ext4/super.c | |
+++ b/fs/ext4/super.c | |
@@ -1829,8 +1829,8 @@ static const struct mount_opts { | |
{Opt_noquota, (EXT4_MOUNT_QUOTA | EXT4_MOUNT_USRQUOTA | | |
EXT4_MOUNT_GRPQUOTA | EXT4_MOUNT_PRJQUOTA), | |
MOPT_CLEAR | MOPT_Q}, | |
- {Opt_usrjquota, 0, MOPT_Q}, | |
- {Opt_grpjquota, 0, MOPT_Q}, | |
+ {Opt_usrjquota, 0, MOPT_Q | MOPT_STRING}, | |
+ {Opt_grpjquota, 0, MOPT_Q | MOPT_STRING}, | |
{Opt_offusrjquota, 0, MOPT_Q}, | |
{Opt_offgrpjquota, 0, MOPT_Q}, | |
{Opt_jqfmt_vfsold, QFMT_VFS_OLD, MOPT_QFMT}, | |
diff --git a/fs/gfs2/rgrp.c b/fs/gfs2/rgrp.c | |
index 1bba5a9d45fa3..ac306895bbbcc 100644 | |
--- a/fs/gfs2/rgrp.c | |
+++ b/fs/gfs2/rgrp.c | |
@@ -719,9 +719,9 @@ void gfs2_clear_rgrpd(struct gfs2_sbd *sdp) | |
} | |
gfs2_free_clones(rgd); | |
+ return_all_reservations(rgd); | |
kfree(rgd->rd_bits); | |
rgd->rd_bits = NULL; | |
- return_all_reservations(rgd); | |
kmem_cache_free(gfs2_rgrpd_cachep, rgd); | |
} | |
} | |
@@ -1374,6 +1374,9 @@ int gfs2_fitrim(struct file *filp, void __user *argp) | |
if (!capable(CAP_SYS_ADMIN)) | |
return -EPERM; | |
+ if (!test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags)) | |
+ return -EROFS; | |
+ | |
if (!blk_queue_discard(q)) | |
return -EOPNOTSUPP; | |
diff --git a/fs/gfs2/super.c b/fs/gfs2/super.c | |
index 32ae1a7cdaed8..831f6e31d6821 100644 | |
--- a/fs/gfs2/super.c | |
+++ b/fs/gfs2/super.c | |
@@ -732,6 +732,7 @@ restart: | |
gfs2_jindex_free(sdp); | |
/* Take apart glock structures and buffer lists */ | |
gfs2_gl_hash_clear(sdp); | |
+ truncate_inode_pages_final(&sdp->sd_aspace); | |
gfs2_delete_debugfs_file(sdp); | |
/* Unmount the locking protocol */ | |
gfs2_lm_unmount(sdp); | |
diff --git a/fs/io_uring.c b/fs/io_uring.c | |
index 1033e0e18f24f..352bd3ad446be 100644 | |
--- a/fs/io_uring.c | |
+++ b/fs/io_uring.c | |
@@ -952,20 +952,33 @@ static void io_sq_thread_drop_mm(void) | |
if (mm) { | |
kthread_unuse_mm(mm); | |
mmput(mm); | |
+ current->mm = NULL; | |
} | |
} | |
static int __io_sq_thread_acquire_mm(struct io_ring_ctx *ctx) | |
{ | |
- if (!current->mm) { | |
- if (unlikely(!(ctx->flags & IORING_SETUP_SQPOLL) || | |
- !ctx->sqo_task->mm || | |
- !mmget_not_zero(ctx->sqo_task->mm))) | |
- return -EFAULT; | |
- kthread_use_mm(ctx->sqo_task->mm); | |
+ struct mm_struct *mm; | |
+ | |
+ if (current->mm) | |
+ return 0; | |
+ | |
+ /* Should never happen */ | |
+ if (unlikely(!(ctx->flags & IORING_SETUP_SQPOLL))) | |
+ return -EFAULT; | |
+ | |
+ task_lock(ctx->sqo_task); | |
+ mm = ctx->sqo_task->mm; | |
+ if (unlikely(!mm || !mmget_not_zero(mm))) | |
+ mm = NULL; | |
+ task_unlock(ctx->sqo_task); | |
+ | |
+ if (mm) { | |
+ kthread_use_mm(mm); | |
+ return 0; | |
} | |
- return 0; | |
+ return -EFAULT; | |
} | |
static int io_sq_thread_acquire_mm(struct io_ring_ctx *ctx, | |
@@ -8865,6 +8878,7 @@ static int io_uring_create(unsigned entries, struct io_uring_params *p, | |
* to a power-of-two, if it isn't already. We do NOT impose | |
* any cq vs sq ring sizing. | |
*/ | |
+ p->cq_entries = roundup_pow_of_two(p->cq_entries); | |
if (p->cq_entries < p->sq_entries) | |
return -EINVAL; | |
if (p->cq_entries > IORING_MAX_CQ_ENTRIES) { | |
@@ -8872,7 +8886,6 @@ static int io_uring_create(unsigned entries, struct io_uring_params *p, | |
return -EINVAL; | |
p->cq_entries = IORING_MAX_CQ_ENTRIES; | |
} | |
- p->cq_entries = roundup_pow_of_two(p->cq_entries); | |
} else { | |
p->cq_entries = 2 * p->sq_entries; | |
} | |
diff --git a/fs/iomap/buffered-io.c b/fs/iomap/buffered-io.c | |
index b115e7d47fcec..238613443bec2 100644 | |
--- a/fs/iomap/buffered-io.c | |
+++ b/fs/iomap/buffered-io.c | |
@@ -1395,6 +1395,7 @@ iomap_writepage_map(struct iomap_writepage_ctx *wpc, | |
WARN_ON_ONCE(!wpc->ioend && !list_empty(&submit_list)); | |
WARN_ON_ONCE(!PageLocked(page)); | |
WARN_ON_ONCE(PageWriteback(page)); | |
+ WARN_ON_ONCE(PageDirty(page)); | |
/* | |
* We cannot cancel the ioend directly here on error. We may have | |
@@ -1415,21 +1416,9 @@ iomap_writepage_map(struct iomap_writepage_ctx *wpc, | |
unlock_page(page); | |
goto done; | |
} | |
- | |
- /* | |
- * If the page was not fully cleaned, we need to ensure that the | |
- * higher layers come back to it correctly. That means we need | |
- * to keep the page dirty, and for WB_SYNC_ALL writeback we need | |
- * to ensure the PAGECACHE_TAG_TOWRITE index mark is not removed | |
- * so another attempt to write this page in this writeback sweep | |
- * will be made. | |
- */ | |
- set_page_writeback_keepwrite(page); | |
- } else { | |
- clear_page_dirty_for_io(page); | |
- set_page_writeback(page); | |
} | |
+ set_page_writeback(page); | |
unlock_page(page); | |
/* | |
diff --git a/fs/jbd2/checkpoint.c b/fs/jbd2/checkpoint.c | |
index 263f02ad8ebf8..472932b9e6bca 100644 | |
--- a/fs/jbd2/checkpoint.c | |
+++ b/fs/jbd2/checkpoint.c | |
@@ -106,6 +106,8 @@ static int __try_to_free_cp_buf(struct journal_head *jh) | |
* for a checkpoint to free up some space in the log. | |
*/ | |
void __jbd2_log_wait_for_space(journal_t *journal) | |
+__acquires(&journal->j_state_lock) | |
+__releases(&journal->j_state_lock) | |
{ | |
int nblocks, space_left; | |
/* assert_spin_locked(&journal->j_state_lock); */ | |
diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c | |
index 43985738aa860..d54f04674e8e5 100644 | |
--- a/fs/jbd2/transaction.c | |
+++ b/fs/jbd2/transaction.c | |
@@ -195,8 +195,10 @@ static void wait_transaction_switching(journal_t *journal) | |
DEFINE_WAIT(wait); | |
if (WARN_ON(!journal->j_running_transaction || | |
- journal->j_running_transaction->t_state != T_SWITCH)) | |
+ journal->j_running_transaction->t_state != T_SWITCH)) { | |
+ read_unlock(&journal->j_state_lock); | |
return; | |
+ } | |
prepare_to_wait(&journal->j_wait_transaction_locked, &wait, | |
TASK_UNINTERRUPTIBLE); | |
read_unlock(&journal->j_state_lock); | |
diff --git a/fs/nfs/nfs42xattr.c b/fs/nfs/nfs42xattr.c | |
index 86777996cfecd..55b44a42d6256 100644 | |
--- a/fs/nfs/nfs42xattr.c | |
+++ b/fs/nfs/nfs42xattr.c | |
@@ -1048,8 +1048,10 @@ out4: | |
void nfs4_xattr_cache_exit(void) | |
{ | |
+ unregister_shrinker(&nfs4_xattr_large_entry_shrinker); | |
unregister_shrinker(&nfs4_xattr_entry_shrinker); | |
unregister_shrinker(&nfs4_xattr_cache_shrinker); | |
+ list_lru_destroy(&nfs4_xattr_large_entry_lru); | |
list_lru_destroy(&nfs4_xattr_entry_lru); | |
list_lru_destroy(&nfs4_xattr_cache_lru); | |
kmem_cache_destroy(nfs4_xattr_cache_cachep); | |
diff --git a/fs/nfs/nfs42xdr.c b/fs/nfs/nfs42xdr.c | |
index cc50085e151c5..d0ddf90c9be48 100644 | |
--- a/fs/nfs/nfs42xdr.c | |
+++ b/fs/nfs/nfs42xdr.c | |
@@ -179,7 +179,7 @@ | |
1 + nfs4_xattr_name_maxsz + 1) | |
#define decode_setxattr_maxsz (op_decode_hdr_maxsz + decode_change_info_maxsz) | |
#define encode_listxattrs_maxsz (op_encode_hdr_maxsz + 2 + 1) | |
-#define decode_listxattrs_maxsz (op_decode_hdr_maxsz + 2 + 1 + 1) | |
+#define decode_listxattrs_maxsz (op_decode_hdr_maxsz + 2 + 1 + 1 + 1) | |
#define encode_removexattr_maxsz (op_encode_hdr_maxsz + 1 + \ | |
nfs4_xattr_name_maxsz) | |
#define decode_removexattr_maxsz (op_decode_hdr_maxsz + \ | |
@@ -504,7 +504,7 @@ static void encode_listxattrs(struct xdr_stream *xdr, | |
{ | |
__be32 *p; | |
- encode_op_hdr(xdr, OP_LISTXATTRS, decode_listxattrs_maxsz + 1, hdr); | |
+ encode_op_hdr(xdr, OP_LISTXATTRS, decode_listxattrs_maxsz, hdr); | |
p = reserve_space(xdr, 12); | |
if (unlikely(!p)) | |
diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c | |
index 84e10aef14175..3ba17b5fc9286 100644 | |
--- a/fs/nfsd/nfs4proc.c | |
+++ b/fs/nfsd/nfs4proc.c | |
@@ -1299,7 +1299,7 @@ nfsd4_cleanup_inter_ssc(struct vfsmount *ss_mnt, struct nfsd_file *src, | |
struct nfsd_file *dst) | |
{ | |
nfs42_ssc_close(src->nf_file); | |
- nfsd_file_put(src); | |
+ /* 'src' is freed by nfsd4_do_async_copy */ | |
nfsd_file_put(dst); | |
mntput(ss_mnt); | |
} | |
@@ -1486,6 +1486,7 @@ do_callback: | |
cb_copy = kzalloc(sizeof(struct nfsd4_copy), GFP_KERNEL); | |
if (!cb_copy) | |
goto out; | |
+ refcount_set(&cb_copy->refcount, 1); | |
memcpy(&cb_copy->cp_res, ©->cp_res, sizeof(copy->cp_res)); | |
cb_copy->cp_clp = copy->cp_clp; | |
cb_copy->nfserr = copy->nfserr; | |
diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c | |
index 1d91dd1e8711c..2febc76e9de70 100644 | |
--- a/fs/ocfs2/super.c | |
+++ b/fs/ocfs2/super.c | |
@@ -1713,6 +1713,7 @@ static void ocfs2_inode_init_once(void *data) | |
oi->ip_blkno = 0ULL; | |
oi->ip_clusters = 0; | |
+ oi->ip_next_orphan = NULL; | |
ocfs2_resv_init_once(&oi->ip_la_data_resv); | |
diff --git a/fs/xfs/libxfs/xfs_alloc.c b/fs/xfs/libxfs/xfs_alloc.c | |
index 852b536551b53..15640015be9d2 100644 | |
--- a/fs/xfs/libxfs/xfs_alloc.c | |
+++ b/fs/xfs/libxfs/xfs_alloc.c | |
@@ -2467,6 +2467,7 @@ xfs_defer_agfl_block( | |
new->xefi_startblock = XFS_AGB_TO_FSB(mp, agno, agbno); | |
new->xefi_blockcount = 1; | |
new->xefi_oinfo = *oinfo; | |
+ new->xefi_skip_discard = false; | |
trace_xfs_agfl_free_defer(mp, agno, 0, agbno, 1); | |
diff --git a/fs/xfs/libxfs/xfs_bmap.h b/fs/xfs/libxfs/xfs_bmap.h | |
index e1bd484e55485..6747e97a79490 100644 | |
--- a/fs/xfs/libxfs/xfs_bmap.h | |
+++ b/fs/xfs/libxfs/xfs_bmap.h | |
@@ -52,9 +52,9 @@ struct xfs_extent_free_item | |
{ | |
xfs_fsblock_t xefi_startblock;/* starting fs block number */ | |
xfs_extlen_t xefi_blockcount;/* number of blocks in extent */ | |
+ bool xefi_skip_discard; | |
struct list_head xefi_list; | |
struct xfs_owner_info xefi_oinfo; /* extent owner */ | |
- bool xefi_skip_discard; | |
}; | |
#define XFS_BMAP_MAX_NMAP 4 | |
diff --git a/fs/xfs/libxfs/xfs_rmap.c b/fs/xfs/libxfs/xfs_rmap.c | |
index 27c39268c31f7..82117b1ee34cb 100644 | |
--- a/fs/xfs/libxfs/xfs_rmap.c | |
+++ b/fs/xfs/libxfs/xfs_rmap.c | |
@@ -1514,7 +1514,7 @@ xfs_rmap_convert_shared( | |
* record for our insertion point. This will also give us the record for | |
* start block contiguity tests. | |
*/ | |
- error = xfs_rmap_lookup_le_range(cur, bno, owner, offset, flags, | |
+ error = xfs_rmap_lookup_le_range(cur, bno, owner, offset, oldext, | |
&PREV, &i); | |
if (error) | |
goto done; | |
diff --git a/fs/xfs/libxfs/xfs_rmap_btree.c b/fs/xfs/libxfs/xfs_rmap_btree.c | |
index beb81c84a9375..577a66381327c 100644 | |
--- a/fs/xfs/libxfs/xfs_rmap_btree.c | |
+++ b/fs/xfs/libxfs/xfs_rmap_btree.c | |
@@ -243,8 +243,8 @@ xfs_rmapbt_key_diff( | |
else if (y > x) | |
return -1; | |
- x = XFS_RMAP_OFF(be64_to_cpu(kp->rm_offset)); | |
- y = rec->rm_offset; | |
+ x = be64_to_cpu(kp->rm_offset); | |
+ y = xfs_rmap_irec_offset_pack(rec); | |
if (x > y) | |
return 1; | |
else if (y > x) | |
@@ -275,8 +275,8 @@ xfs_rmapbt_diff_two_keys( | |
else if (y > x) | |
return -1; | |
- x = XFS_RMAP_OFF(be64_to_cpu(kp1->rm_offset)); | |
- y = XFS_RMAP_OFF(be64_to_cpu(kp2->rm_offset)); | |
+ x = be64_to_cpu(kp1->rm_offset); | |
+ y = be64_to_cpu(kp2->rm_offset); | |
if (x > y) | |
return 1; | |
else if (y > x) | |
@@ -390,8 +390,8 @@ xfs_rmapbt_keys_inorder( | |
return 1; | |
else if (a > b) | |
return 0; | |
- a = XFS_RMAP_OFF(be64_to_cpu(k1->rmap.rm_offset)); | |
- b = XFS_RMAP_OFF(be64_to_cpu(k2->rmap.rm_offset)); | |
+ a = be64_to_cpu(k1->rmap.rm_offset); | |
+ b = be64_to_cpu(k2->rmap.rm_offset); | |
if (a <= b) | |
return 1; | |
return 0; | |
@@ -420,8 +420,8 @@ xfs_rmapbt_recs_inorder( | |
return 1; | |
else if (a > b) | |
return 0; | |
- a = XFS_RMAP_OFF(be64_to_cpu(r1->rmap.rm_offset)); | |
- b = XFS_RMAP_OFF(be64_to_cpu(r2->rmap.rm_offset)); | |
+ a = be64_to_cpu(r1->rmap.rm_offset); | |
+ b = be64_to_cpu(r2->rmap.rm_offset); | |
if (a <= b) | |
return 1; | |
return 0; | |
diff --git a/fs/xfs/scrub/bmap.c b/fs/xfs/scrub/bmap.c | |
index 955302e7cdde9..412e2ec55e388 100644 | |
--- a/fs/xfs/scrub/bmap.c | |
+++ b/fs/xfs/scrub/bmap.c | |
@@ -113,6 +113,8 @@ xchk_bmap_get_rmap( | |
if (info->whichfork == XFS_ATTR_FORK) | |
rflags |= XFS_RMAP_ATTR_FORK; | |
+ if (irec->br_state == XFS_EXT_UNWRITTEN) | |
+ rflags |= XFS_RMAP_UNWRITTEN; | |
/* | |
* CoW staging extents are owned (on disk) by the refcountbt, so | |
diff --git a/fs/xfs/scrub/inode.c b/fs/xfs/scrub/inode.c | |
index 6d483ab29e639..1bea029b634a6 100644 | |
--- a/fs/xfs/scrub/inode.c | |
+++ b/fs/xfs/scrub/inode.c | |
@@ -121,8 +121,7 @@ xchk_inode_flags( | |
goto bad; | |
/* rt flags require rt device */ | |
- if ((flags & (XFS_DIFLAG_REALTIME | XFS_DIFLAG_RTINHERIT)) && | |
- !mp->m_rtdev_targp) | |
+ if ((flags & XFS_DIFLAG_REALTIME) && !mp->m_rtdev_targp) | |
goto bad; | |
/* new rt bitmap flag only valid for rbmino */ | |
diff --git a/fs/xfs/scrub/refcount.c b/fs/xfs/scrub/refcount.c | |
index beaeb6fa31197..dd672e6bbc75c 100644 | |
--- a/fs/xfs/scrub/refcount.c | |
+++ b/fs/xfs/scrub/refcount.c | |
@@ -170,7 +170,6 @@ xchk_refcountbt_process_rmap_fragments( | |
*/ | |
INIT_LIST_HEAD(&worklist); | |
rbno = NULLAGBLOCK; | |
- nr = 1; | |
/* Make sure the fragments actually /are/ in agbno order. */ | |
bno = 0; | |
@@ -184,15 +183,14 @@ xchk_refcountbt_process_rmap_fragments( | |
* Find all the rmaps that start at or before the refc extent, | |
* and put them on the worklist. | |
*/ | |
+ nr = 0; | |
list_for_each_entry_safe(frag, n, &refchk->fragments, list) { | |
- if (frag->rm.rm_startblock > refchk->bno) | |
- goto done; | |
+ if (frag->rm.rm_startblock > refchk->bno || nr > target_nr) | |
+ break; | |
bno = frag->rm.rm_startblock + frag->rm.rm_blockcount; | |
if (bno < rbno) | |
rbno = bno; | |
list_move_tail(&frag->list, &worklist); | |
- if (nr == target_nr) | |
- break; | |
nr++; | |
} | |
diff --git a/fs/xfs/xfs_aops.c b/fs/xfs/xfs_aops.c | |
index b35611882ff9c..e4210779cd79e 100644 | |
--- a/fs/xfs/xfs_aops.c | |
+++ b/fs/xfs/xfs_aops.c | |
@@ -346,8 +346,8 @@ xfs_map_blocks( | |
ssize_t count = i_blocksize(inode); | |
xfs_fileoff_t offset_fsb = XFS_B_TO_FSBT(mp, offset); | |
xfs_fileoff_t end_fsb = XFS_B_TO_FSB(mp, offset + count); | |
- xfs_fileoff_t cow_fsb = NULLFILEOFF; | |
- int whichfork = XFS_DATA_FORK; | |
+ xfs_fileoff_t cow_fsb; | |
+ int whichfork; | |
struct xfs_bmbt_irec imap; | |
struct xfs_iext_cursor icur; | |
int retries = 0; | |
@@ -381,6 +381,8 @@ xfs_map_blocks( | |
* landed in a hole and we skip the block. | |
*/ | |
retry: | |
+ cow_fsb = NULLFILEOFF; | |
+ whichfork = XFS_DATA_FORK; | |
xfs_ilock(ip, XFS_ILOCK_SHARED); | |
ASSERT(ip->i_df.if_format != XFS_DINODE_FMT_BTREE || | |
(ip->i_df.if_flags & XFS_IFEXTENTS)); | |
diff --git a/fs/xfs/xfs_iops.c b/fs/xfs/xfs_iops.c | |
index 80a13c8561d85..bf93a7152181c 100644 | |
--- a/fs/xfs/xfs_iops.c | |
+++ b/fs/xfs/xfs_iops.c | |
@@ -911,6 +911,16 @@ xfs_setattr_size( | |
error = iomap_zero_range(inode, oldsize, newsize - oldsize, | |
&did_zeroing, &xfs_buffered_write_iomap_ops); | |
} else { | |
+ /* | |
+ * iomap won't detect a dirty page over an unwritten block (or a | |
+ * cow block over a hole) and subsequently skips zeroing the | |
+ * newly post-EOF portion of the page. Flush the new EOF to | |
+ * convert the block before the pagecache truncate. | |
+ */ | |
+ error = filemap_write_and_wait_range(inode->i_mapping, newsize, | |
+ newsize); | |
+ if (error) | |
+ return error; | |
error = iomap_truncate_page(inode, newsize, &did_zeroing, | |
&xfs_buffered_write_iomap_ops); | |
} | |
diff --git a/fs/xfs/xfs_pnfs.c b/fs/xfs/xfs_pnfs.c | |
index b101feb2aab45..f3082a957d5e1 100644 | |
--- a/fs/xfs/xfs_pnfs.c | |
+++ b/fs/xfs/xfs_pnfs.c | |
@@ -134,7 +134,7 @@ xfs_fs_map_blocks( | |
goto out_unlock; | |
error = invalidate_inode_pages2(inode->i_mapping); | |
if (WARN_ON_ONCE(error)) | |
- return error; | |
+ goto out_unlock; | |
end_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)offset + length); | |
offset_fsb = XFS_B_TO_FSBT(mp, offset); | |
diff --git a/include/linux/arm-smccc.h b/include/linux/arm-smccc.h | |
index 15c706fb0a377..0e50ba3e88d71 100644 | |
--- a/include/linux/arm-smccc.h | |
+++ b/include/linux/arm-smccc.h | |
@@ -86,6 +86,8 @@ | |
ARM_SMCCC_SMC_32, \ | |
0, 0x7fff) | |
+#define SMCCC_ARCH_WORKAROUND_RET_UNAFFECTED 1 | |
+ | |
/* Paravirtualised time calls (defined by ARM DEN0057A) */ | |
#define ARM_SMCCC_HV_PV_TIME_FEATURES \ | |
ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \ | |
diff --git a/include/linux/can/skb.h b/include/linux/can/skb.h | |
index 900b9f4e06054..fc61cf4eff1c9 100644 | |
--- a/include/linux/can/skb.h | |
+++ b/include/linux/can/skb.h | |
@@ -61,21 +61,17 @@ static inline void can_skb_set_owner(struct sk_buff *skb, struct sock *sk) | |
*/ | |
static inline struct sk_buff *can_create_echo_skb(struct sk_buff *skb) | |
{ | |
- if (skb_shared(skb)) { | |
- struct sk_buff *nskb = skb_clone(skb, GFP_ATOMIC); | |
+ struct sk_buff *nskb; | |
- if (likely(nskb)) { | |
- can_skb_set_owner(nskb, skb->sk); | |
- consume_skb(skb); | |
- return nskb; | |
- } else { | |
- kfree_skb(skb); | |
- return NULL; | |
- } | |
+ nskb = skb_clone(skb, GFP_ATOMIC); | |
+ if (unlikely(!nskb)) { | |
+ kfree_skb(skb); | |
+ return NULL; | |
} | |
- /* we can assume to have an unshared skb with proper owner */ | |
- return skb; | |
+ can_skb_set_owner(nskb, skb->sk); | |
+ consume_skb(skb); | |
+ return nskb; | |
} | |
#endif /* !_CAN_SKB_H */ | |
diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h | |
index 7a3769040d7dc..3017ebd400546 100644 | |
--- a/include/linux/compiler-gcc.h | |
+++ b/include/linux/compiler-gcc.h | |
@@ -175,5 +175,3 @@ | |
#else | |
#define __diag_GCC_8(s) | |
#endif | |
- | |
-#define __no_fgcse __attribute__((optimize("-fno-gcse"))) | |
diff --git a/include/linux/compiler_types.h b/include/linux/compiler_types.h | |
index 6e390d58a9f8c..ac3fa37a84f94 100644 | |
--- a/include/linux/compiler_types.h | |
+++ b/include/linux/compiler_types.h | |
@@ -247,10 +247,6 @@ struct ftrace_likely_data { | |
#define asm_inline asm | |
#endif | |
-#ifndef __no_fgcse | |
-# define __no_fgcse | |
-#endif | |
- | |
/* Are two types/vars the same type (ignoring qualifiers)? */ | |
#define __same_type(a, b) __builtin_types_compatible_p(typeof(a), typeof(b)) | |
diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h | |
index 2e900fd461f2e..e7170bf019eb8 100644 | |
--- a/include/linux/cpufreq.h | |
+++ b/include/linux/cpufreq.h | |
@@ -109,6 +109,12 @@ struct cpufreq_policy { | |
bool fast_switch_possible; | |
bool fast_switch_enabled; | |
+ /* | |
+ * Set if the CPUFREQ_GOV_STRICT_TARGET flag is set for the current | |
+ * governor. | |
+ */ | |
+ bool strict_target; | |
+ | |
/* | |
* Preferred average time interval between consecutive invocations of | |
* the driver to set the frequency for this policy. To be set by the | |
@@ -565,12 +571,20 @@ struct cpufreq_governor { | |
char *buf); | |
int (*store_setspeed) (struct cpufreq_policy *policy, | |
unsigned int freq); | |
- /* For governors which change frequency dynamically by themselves */ | |
- bool dynamic_switching; | |
struct list_head governor_list; | |
struct module *owner; | |
+ u8 flags; | |
}; | |
+/* Governor flags */ | |
+ | |
+/* For governors which change frequency dynamically by themselves */ | |
+#define CPUFREQ_GOV_DYNAMIC_SWITCHING BIT(0) | |
+ | |
+/* For governors wanting the target frequency to be set exactly */ | |
+#define CPUFREQ_GOV_STRICT_TARGET BIT(1) | |
+ | |
+ | |
/* Pass a target to the cpufreq driver */ | |
unsigned int cpufreq_driver_fast_switch(struct cpufreq_policy *policy, | |
unsigned int target_freq); | |
diff --git a/include/linux/genhd.h b/include/linux/genhd.h | |
index 4ab853461dff2..475b286ea10b5 100644 | |
--- a/include/linux/genhd.h | |
+++ b/include/linux/genhd.h | |
@@ -315,7 +315,7 @@ static inline int get_disk_ro(struct gendisk *disk) | |
extern void disk_block_events(struct gendisk *disk); | |
extern void disk_unblock_events(struct gendisk *disk); | |
extern void disk_flush_events(struct gendisk *disk, unsigned int mask); | |
-extern void set_capacity_revalidate_and_notify(struct gendisk *disk, | |
+extern bool set_capacity_revalidate_and_notify(struct gendisk *disk, | |
sector_t size, bool revalidate); | |
extern unsigned int disk_clear_events(struct gendisk *disk, unsigned int mask); | |
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h | |
index d0b036123c6ab..fa635207fe96d 100644 | |
--- a/include/linux/memcontrol.h | |
+++ b/include/linux/memcontrol.h | |
@@ -897,12 +897,19 @@ static inline void count_memcg_event_mm(struct mm_struct *mm, | |
static inline void memcg_memory_event(struct mem_cgroup *memcg, | |
enum memcg_memory_event event) | |
{ | |
+ bool swap_event = event == MEMCG_SWAP_HIGH || event == MEMCG_SWAP_MAX || | |
+ event == MEMCG_SWAP_FAIL; | |
+ | |
atomic_long_inc(&memcg->memory_events_local[event]); | |
- cgroup_file_notify(&memcg->events_local_file); | |
+ if (!swap_event) | |
+ cgroup_file_notify(&memcg->events_local_file); | |
do { | |
atomic_long_inc(&memcg->memory_events[event]); | |
- cgroup_file_notify(&memcg->events_file); | |
+ if (swap_event) | |
+ cgroup_file_notify(&memcg->swap_events_file); | |
+ else | |
+ cgroup_file_notify(&memcg->events_file); | |
if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) | |
break; | |
diff --git a/include/linux/netfilter/nfnetlink.h b/include/linux/netfilter/nfnetlink.h | |
index 89016d08f6a27..f6267e2883f26 100644 | |
--- a/include/linux/netfilter/nfnetlink.h | |
+++ b/include/linux/netfilter/nfnetlink.h | |
@@ -24,6 +24,12 @@ struct nfnl_callback { | |
const u_int16_t attr_count; /* number of nlattr's */ | |
}; | |
+enum nfnl_abort_action { | |
+ NFNL_ABORT_NONE = 0, | |
+ NFNL_ABORT_AUTOLOAD, | |
+ NFNL_ABORT_VALIDATE, | |
+}; | |
+ | |
struct nfnetlink_subsystem { | |
const char *name; | |
__u8 subsys_id; /* nfnetlink subsystem ID */ | |
@@ -31,7 +37,8 @@ struct nfnetlink_subsystem { | |
const struct nfnl_callback *cb; /* callback for individual types */ | |
struct module *owner; | |
int (*commit)(struct net *net, struct sk_buff *skb); | |
- int (*abort)(struct net *net, struct sk_buff *skb, bool autoload); | |
+ int (*abort)(struct net *net, struct sk_buff *skb, | |
+ enum nfnl_abort_action action); | |
void (*cleanup)(struct net *net); | |
bool (*valid_genid)(struct net *net, u32 genid); | |
}; | |
diff --git a/include/linux/netfilter_ipv4.h b/include/linux/netfilter_ipv4.h | |
index 082e2c41b7ff9..5b70ca868bb19 100644 | |
--- a/include/linux/netfilter_ipv4.h | |
+++ b/include/linux/netfilter_ipv4.h | |
@@ -16,7 +16,7 @@ struct ip_rt_info { | |
u_int32_t mark; | |
}; | |
-int ip_route_me_harder(struct net *net, struct sk_buff *skb, unsigned addr_type); | |
+int ip_route_me_harder(struct net *net, struct sock *sk, struct sk_buff *skb, unsigned addr_type); | |
struct nf_queue_entry; | |
diff --git a/include/linux/netfilter_ipv6.h b/include/linux/netfilter_ipv6.h | |
index 9b67394471e1c..48314ade1506f 100644 | |
--- a/include/linux/netfilter_ipv6.h | |
+++ b/include/linux/netfilter_ipv6.h | |
@@ -42,7 +42,7 @@ struct nf_ipv6_ops { | |
#if IS_MODULE(CONFIG_IPV6) | |
int (*chk_addr)(struct net *net, const struct in6_addr *addr, | |
const struct net_device *dev, int strict); | |
- int (*route_me_harder)(struct net *net, struct sk_buff *skb); | |
+ int (*route_me_harder)(struct net *net, struct sock *sk, struct sk_buff *skb); | |
int (*dev_get_saddr)(struct net *net, const struct net_device *dev, | |
const struct in6_addr *daddr, unsigned int srcprefs, | |
struct in6_addr *saddr); | |
@@ -143,9 +143,9 @@ static inline int nf_br_ip6_fragment(struct net *net, struct sock *sk, | |
#endif | |
} | |
-int ip6_route_me_harder(struct net *net, struct sk_buff *skb); | |
+int ip6_route_me_harder(struct net *net, struct sock *sk, struct sk_buff *skb); | |
-static inline int nf_ip6_route_me_harder(struct net *net, struct sk_buff *skb) | |
+static inline int nf_ip6_route_me_harder(struct net *net, struct sock *sk, struct sk_buff *skb) | |
{ | |
#if IS_MODULE(CONFIG_IPV6) | |
const struct nf_ipv6_ops *v6_ops = nf_get_ipv6_ops(); | |
@@ -153,9 +153,9 @@ static inline int nf_ip6_route_me_harder(struct net *net, struct sk_buff *skb) | |
if (!v6_ops) | |
return -EHOSTUNREACH; | |
- return v6_ops->route_me_harder(net, skb); | |
+ return v6_ops->route_me_harder(net, sk, skb); | |
#elif IS_BUILTIN(CONFIG_IPV6) | |
- return ip6_route_me_harder(net, skb); | |
+ return ip6_route_me_harder(net, sk, skb); | |
#else | |
return -EHOSTUNREACH; | |
#endif | |
diff --git a/include/trace/events/sunrpc.h b/include/trace/events/sunrpc.h | |
index 65d7dfbbc9cd7..ca2f27b9f919d 100644 | |
--- a/include/trace/events/sunrpc.h | |
+++ b/include/trace/events/sunrpc.h | |
@@ -607,10 +607,10 @@ TRACE_EVENT(rpc_xdr_overflow, | |
__field(size_t, tail_len) | |
__field(unsigned int, page_len) | |
__field(unsigned int, len) | |
- __string(progname, | |
- xdr->rqst->rq_task->tk_client->cl_program->name) | |
- __string(procedure, | |
- xdr->rqst->rq_task->tk_msg.rpc_proc->p_name) | |
+ __string(progname, xdr->rqst ? | |
+ xdr->rqst->rq_task->tk_client->cl_program->name : "unknown") | |
+ __string(procedure, xdr->rqst ? | |
+ xdr->rqst->rq_task->tk_msg.rpc_proc->p_name : "unknown") | |
), | |
TP_fast_assign( | |
diff --git a/init/main.c b/init/main.c | |
index e880b4ecb3147..ddfd6421c70aa 100644 | |
--- a/init/main.c | |
+++ b/init/main.c | |
@@ -267,14 +267,24 @@ static void * __init get_boot_config_from_initrd(u32 *_size, u32 *_csum) | |
u32 size, csum; | |
char *data; | |
u32 *hdr; | |
+ int i; | |
if (!initrd_end) | |
return NULL; | |
data = (char *)initrd_end - BOOTCONFIG_MAGIC_LEN; | |
- if (memcmp(data, BOOTCONFIG_MAGIC, BOOTCONFIG_MAGIC_LEN)) | |
- return NULL; | |
+ /* | |
+ * Since Grub may align the size of initrd to 4, we must | |
+ * check the preceding 3 bytes as well. | |
+ */ | |
+ for (i = 0; i < 4; i++) { | |
+ if (!memcmp(data, BOOTCONFIG_MAGIC, BOOTCONFIG_MAGIC_LEN)) | |
+ goto found; | |
+ data--; | |
+ } | |
+ return NULL; | |
+found: | |
hdr = (u32 *)(data - 8); | |
size = hdr[0]; | |
csum = hdr[1]; | |
diff --git a/kernel/bpf/Makefile b/kernel/bpf/Makefile | |
index e6eb9c0402dab..0cc0de72163dc 100644 | |
--- a/kernel/bpf/Makefile | |
+++ b/kernel/bpf/Makefile | |
@@ -1,6 +1,10 @@ | |
# SPDX-License-Identifier: GPL-2.0 | |
obj-y := core.o | |
-CFLAGS_core.o += $(call cc-disable-warning, override-init) | |
+ifneq ($(CONFIG_BPF_JIT_ALWAYS_ON),y) | |
+# ___bpf_prog_run() needs GCSE disabled on x86; see 3193c0836f203 for details | |
+cflags-nogcse-$(CONFIG_X86)$(CONFIG_CC_IS_GCC) := -fno-gcse | |
+endif | |
+CFLAGS_core.o += $(call cc-disable-warning, override-init) $(cflags-nogcse-yy) | |
obj-$(CONFIG_BPF_SYSCALL) += syscall.o verifier.o inode.o helpers.o tnum.o bpf_iter.o map_iter.o task_iter.o prog_iter.o | |
obj-$(CONFIG_BPF_SYSCALL) += hashtab.o arraymap.o percpu_freelist.o bpf_lru_list.o lpm_trie.o map_in_map.o | |
diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c | |
index ed0b3578867c0..3cb26e82549ac 100644 | |
--- a/kernel/bpf/core.c | |
+++ b/kernel/bpf/core.c | |
@@ -1364,7 +1364,7 @@ u64 __weak bpf_probe_read_kernel(void *dst, u32 size, const void *unsafe_ptr) | |
* | |
* Decode and execute eBPF instructions. | |
*/ | |
-static u64 __noreorder __no_fgcse ___bpf_prog_run(u64 *regs, const struct bpf_insn *insn, u64 *stack) | |
+static u64 __noreorder ___bpf_prog_run(u64 *regs, const struct bpf_insn *insn, u64 *stack) | |
{ | |
#define BPF_INSN_2_LBL(x, y) [BPF_##x | BPF_##y] = &&x##_##y | |
#define BPF_INSN_3_LBL(x, y, z) [BPF_##x | BPF_##y | BPF_##z] = &&x##_##y##_##z | |
diff --git a/kernel/bpf/hashtab.c b/kernel/bpf/hashtab.c | |
index 7df28a45c66bf..15364543b2c0f 100644 | |
--- a/kernel/bpf/hashtab.c | |
+++ b/kernel/bpf/hashtab.c | |
@@ -821,6 +821,32 @@ static void pcpu_copy_value(struct bpf_htab *htab, void __percpu *pptr, | |
} | |
} | |
+static void pcpu_init_value(struct bpf_htab *htab, void __percpu *pptr, | |
+ void *value, bool onallcpus) | |
+{ | |
+ /* When using prealloc and not setting the initial value on all cpus, | |
+ * zero-fill element values for other cpus (just as what happens when | |
+ * not using prealloc). Otherwise, bpf program has no way to ensure | |
+ * known initial values for cpus other than current one | |
+ * (onallcpus=false always when coming from bpf prog). | |
+ */ | |
+ if (htab_is_prealloc(htab) && !onallcpus) { | |
+ u32 size = round_up(htab->map.value_size, 8); | |
+ int current_cpu = raw_smp_processor_id(); | |
+ int cpu; | |
+ | |
+ for_each_possible_cpu(cpu) { | |
+ if (cpu == current_cpu) | |
+ bpf_long_memcpy(per_cpu_ptr(pptr, cpu), value, | |
+ size); | |
+ else | |
+ memset(per_cpu_ptr(pptr, cpu), 0, size); | |
+ } | |
+ } else { | |
+ pcpu_copy_value(htab, pptr, value, onallcpus); | |
+ } | |
+} | |
+ | |
static bool fd_htab_map_needs_adjust(const struct bpf_htab *htab) | |
{ | |
return htab->map.map_type == BPF_MAP_TYPE_HASH_OF_MAPS && | |
@@ -891,7 +917,7 @@ static struct htab_elem *alloc_htab_elem(struct bpf_htab *htab, void *key, | |
} | |
} | |
- pcpu_copy_value(htab, pptr, value, onallcpus); | |
+ pcpu_init_value(htab, pptr, value, onallcpus); | |
if (!prealloc) | |
htab_elem_set_ptr(l_new, key_size, pptr); | |
@@ -1183,7 +1209,7 @@ static int __htab_lru_percpu_map_update_elem(struct bpf_map *map, void *key, | |
pcpu_copy_value(htab, htab_elem_get_ptr(l_old, key_size), | |
value, onallcpus); | |
} else { | |
- pcpu_copy_value(htab, htab_elem_get_ptr(l_new, key_size), | |
+ pcpu_init_value(htab, htab_elem_get_ptr(l_new, key_size), | |
value, onallcpus); | |
hlist_nulls_add_head_rcu(&l_new->hash_node, head); | |
l_new = NULL; | |
diff --git a/kernel/dma/swiotlb.c b/kernel/dma/swiotlb.c | |
index c19379fabd200..9924214df60aa 100644 | |
--- a/kernel/dma/swiotlb.c | |
+++ b/kernel/dma/swiotlb.c | |
@@ -231,6 +231,7 @@ int __init swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose) | |
io_tlb_orig_addr[i] = INVALID_PHYS_ADDR; | |
} | |
io_tlb_index = 0; | |
+ no_iotlb_memory = false; | |
if (verbose) | |
swiotlb_print_info(); | |
@@ -262,9 +263,11 @@ swiotlb_init(int verbose) | |
if (vstart && !swiotlb_init_with_tbl(vstart, io_tlb_nslabs, verbose)) | |
return; | |
- if (io_tlb_start) | |
+ if (io_tlb_start) { | |
memblock_free_early(io_tlb_start, | |
PAGE_ALIGN(io_tlb_nslabs << IO_TLB_SHIFT)); | |
+ io_tlb_start = 0; | |
+ } | |
pr_warn("Cannot allocate buffer"); | |
no_iotlb_memory = true; | |
} | |
@@ -362,6 +365,7 @@ swiotlb_late_init_with_tbl(char *tlb, unsigned long nslabs) | |
io_tlb_orig_addr[i] = INVALID_PHYS_ADDR; | |
} | |
io_tlb_index = 0; | |
+ no_iotlb_memory = false; | |
swiotlb_print_info(); | |
diff --git a/kernel/events/core.c b/kernel/events/core.c | |
index 98a603098f23e..a06ac60d346f1 100644 | |
--- a/kernel/events/core.c | |
+++ b/kernel/events/core.c | |
@@ -2565,11 +2565,8 @@ group_sched_in(struct perf_event *group_event, | |
pmu->start_txn(pmu, PERF_PMU_TXN_ADD); | |
- if (event_sched_in(group_event, cpuctx, ctx)) { | |
- pmu->cancel_txn(pmu); | |
- perf_mux_hrtimer_restart(cpuctx); | |
- return -EAGAIN; | |
- } | |
+ if (event_sched_in(group_event, cpuctx, ctx)) | |
+ goto error; | |
/* | |
* Schedule in siblings as one group (if any): | |
@@ -2598,10 +2595,8 @@ group_error: | |
} | |
event_sched_out(group_event, cpuctx, ctx); | |
+error: | |
pmu->cancel_txn(pmu); | |
- | |
- perf_mux_hrtimer_restart(cpuctx); | |
- | |
return -EAGAIN; | |
} | |
@@ -3657,6 +3652,7 @@ static int merge_sched_in(struct perf_event *event, void *data) | |
*can_add_hw = 0; | |
ctx->rotate_necessary = 1; | |
+ perf_mux_hrtimer_restart(cpuctx); | |
} | |
return 0; | |
diff --git a/kernel/events/internal.h b/kernel/events/internal.h | |
index fcbf5616a4411..402054e755f27 100644 | |
--- a/kernel/events/internal.h | |
+++ b/kernel/events/internal.h | |
@@ -211,7 +211,7 @@ static inline int get_recursion_context(int *recursion) | |
rctx = 3; | |
else if (in_irq()) | |
rctx = 2; | |
- else if (in_softirq()) | |
+ else if (in_serving_softirq()) | |
rctx = 1; | |
else | |
rctx = 0; | |
diff --git a/kernel/exit.c b/kernel/exit.c | |
index 733e80f334e71..f5d2333cb5db1 100644 | |
--- a/kernel/exit.c | |
+++ b/kernel/exit.c | |
@@ -454,7 +454,10 @@ static void exit_mm(void) | |
mmap_read_unlock(mm); | |
self.task = current; | |
- self.next = xchg(&core_state->dumper.next, &self); | |
+ if (self.task->flags & PF_SIGNALED) | |
+ self.next = xchg(&core_state->dumper.next, &self); | |
+ else | |
+ self.task = NULL; | |
/* | |
* Implies mb(), the result of xchg() must be visible | |
* to core_state->dumper. | |
diff --git a/kernel/futex.c b/kernel/futex.c | |
index 6c00c0952313a..139953d456e33 100644 | |
--- a/kernel/futex.c | |
+++ b/kernel/futex.c | |
@@ -788,8 +788,9 @@ static void put_pi_state(struct futex_pi_state *pi_state) | |
*/ | |
if (pi_state->owner) { | |
struct task_struct *owner; | |
+ unsigned long flags; | |
- raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock); | |
+ raw_spin_lock_irqsave(&pi_state->pi_mutex.wait_lock, flags); | |
owner = pi_state->owner; | |
if (owner) { | |
raw_spin_lock(&owner->pi_lock); | |
@@ -797,7 +798,7 @@ static void put_pi_state(struct futex_pi_state *pi_state) | |
raw_spin_unlock(&owner->pi_lock); | |
} | |
rt_mutex_proxy_unlock(&pi_state->pi_mutex, owner); | |
- raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock); | |
+ raw_spin_unlock_irqrestore(&pi_state->pi_mutex.wait_lock, flags); | |
} | |
if (current->pi_state_cache) { | |
diff --git a/kernel/irq/Kconfig b/kernel/irq/Kconfig | |
index 10a5aff4eecc8..164a031cfdb66 100644 | |
--- a/kernel/irq/Kconfig | |
+++ b/kernel/irq/Kconfig | |
@@ -82,6 +82,7 @@ config IRQ_FASTEOI_HIERARCHY_HANDLERS | |
# Generic IRQ IPI support | |
config GENERIC_IRQ_IPI | |
bool | |
+ select IRQ_DOMAIN_HIERARCHY | |
# Generic MSI interrupt support | |
config GENERIC_MSI_IRQ | |
diff --git a/kernel/reboot.c b/kernel/reboot.c | |
index e7b78d5ae1abf..af6f23d8bea16 100644 | |
--- a/kernel/reboot.c | |
+++ b/kernel/reboot.c | |
@@ -551,22 +551,22 @@ static int __init reboot_setup(char *str) | |
break; | |
case 's': | |
- { | |
- int rc; | |
- | |
- if (isdigit(*(str+1))) { | |
- rc = kstrtoint(str+1, 0, &reboot_cpu); | |
- if (rc) | |
- return rc; | |
- } else if (str[1] == 'm' && str[2] == 'p' && | |
- isdigit(*(str+3))) { | |
- rc = kstrtoint(str+3, 0, &reboot_cpu); | |
- if (rc) | |
- return rc; | |
- } else | |
+ if (isdigit(*(str+1))) | |
+ reboot_cpu = simple_strtoul(str+1, NULL, 0); | |
+ else if (str[1] == 'm' && str[2] == 'p' && | |
+ isdigit(*(str+3))) | |
+ reboot_cpu = simple_strtoul(str+3, NULL, 0); | |
+ else | |
*mode = REBOOT_SOFT; | |
+ if (reboot_cpu >= num_possible_cpus()) { | |
+ pr_err("Ignoring the CPU number in reboot= option. " | |
+ "CPU %d exceeds possible cpu number %d\n", | |
+ reboot_cpu, num_possible_cpus()); | |
+ reboot_cpu = 0; | |
+ break; | |
+ } | |
break; | |
- } | |
+ | |
case 'g': | |
*mode = REBOOT_GPIO; | |
break; | |
diff --git a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c | |
index 59d511e326730..4e4d052ebaf38 100644 | |
--- a/kernel/sched/cpufreq_schedutil.c | |
+++ b/kernel/sched/cpufreq_schedutil.c | |
@@ -896,7 +896,7 @@ static void sugov_limits(struct cpufreq_policy *policy) | |
struct cpufreq_governor schedutil_gov = { | |
.name = "schedutil", | |
.owner = THIS_MODULE, | |
- .dynamic_switching = true, | |
+ .flags = CPUFREQ_GOV_DYNAMIC_SWITCHING, | |
.init = sugov_init, | |
.exit = sugov_exit, | |
.start = sugov_start, | |
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c | |
index 6e2fb7dc41bf3..1c76a0faf3cd1 100644 | |
--- a/kernel/trace/trace.c | |
+++ b/kernel/trace/trace.c | |
@@ -2611,7 +2611,7 @@ trace_event_buffer_lock_reserve(struct trace_buffer **current_rb, | |
/* | |
* If tracing is off, but we have triggers enabled | |
* we still need to look at the event data. Use the temp_buffer | |
- * to store the trace event for the tigger to use. It's recusive | |
+ * to store the trace event for the trigger to use. It's recursive | |
* safe and will not be recorded anywhere. | |
*/ | |
if (!entry && trace_file->flags & EVENT_FILE_FL_TRIGGER_COND) { | |
@@ -2934,7 +2934,7 @@ static void __ftrace_trace_stack(struct trace_buffer *buffer, | |
stackidx = __this_cpu_inc_return(ftrace_stack_reserve) - 1; | |
/* This should never happen. If it does, yell once and skip */ | |
- if (WARN_ON_ONCE(stackidx > FTRACE_KSTACK_NESTING)) | |
+ if (WARN_ON_ONCE(stackidx >= FTRACE_KSTACK_NESTING)) | |
goto out; | |
/* | |
diff --git a/kernel/watchdog.c b/kernel/watchdog.c | |
index 5abb5b22ad130..71109065bd8eb 100644 | |
--- a/kernel/watchdog.c | |
+++ b/kernel/watchdog.c | |
@@ -44,8 +44,6 @@ int __read_mostly soft_watchdog_user_enabled = 1; | |
int __read_mostly watchdog_thresh = 10; | |
static int __read_mostly nmi_watchdog_available; | |
-static struct cpumask watchdog_allowed_mask __read_mostly; | |
- | |
struct cpumask watchdog_cpumask __read_mostly; | |
unsigned long *watchdog_cpumask_bits = cpumask_bits(&watchdog_cpumask); | |
@@ -162,6 +160,8 @@ static void lockup_detector_update_enable(void) | |
int __read_mostly sysctl_softlockup_all_cpu_backtrace; | |
#endif | |
+static struct cpumask watchdog_allowed_mask __read_mostly; | |
+ | |
/* Global variables, exported for sysctl */ | |
unsigned int __read_mostly softlockup_panic = | |
CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE; | |
diff --git a/mm/compaction.c b/mm/compaction.c | |
index 176dcded298ee..cc1a7f600a865 100644 | |
--- a/mm/compaction.c | |
+++ b/mm/compaction.c | |
@@ -818,6 +818,10 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn, | |
* delay for some time until fewer pages are isolated | |
*/ | |
while (unlikely(too_many_isolated(pgdat))) { | |
+ /* stop isolation if there are still pages not migrated */ | |
+ if (cc->nr_migratepages) | |
+ return 0; | |
+ | |
/* async migration should just abort */ | |
if (cc->mode == MIGRATE_ASYNC) | |
return 0; | |
@@ -1013,8 +1017,8 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn, | |
isolate_success: | |
list_add(&page->lru, &cc->migratepages); | |
- cc->nr_migratepages++; | |
- nr_isolated++; | |
+ cc->nr_migratepages += compound_nr(page); | |
+ nr_isolated += compound_nr(page); | |
/* | |
* Avoid isolating too much unless this block is being | |
@@ -1022,7 +1026,7 @@ isolate_success: | |
* or a lock is contended. For contention, isolate quickly to | |
* potentially remove one source of contention. | |
*/ | |
- if (cc->nr_migratepages == COMPACT_CLUSTER_MAX && | |
+ if (cc->nr_migratepages >= COMPACT_CLUSTER_MAX && | |
!cc->rescan && !cc->contended) { | |
++low_pfn; | |
break; | |
@@ -1133,7 +1137,7 @@ isolate_migratepages_range(struct compact_control *cc, unsigned long start_pfn, | |
if (!pfn) | |
break; | |
- if (cc->nr_migratepages == COMPACT_CLUSTER_MAX) | |
+ if (cc->nr_migratepages >= COMPACT_CLUSTER_MAX) | |
break; | |
} | |
diff --git a/mm/gup.c b/mm/gup.c | |
index e869c634cc9a6..094e8ce99acb7 100644 | |
--- a/mm/gup.c | |
+++ b/mm/gup.c | |
@@ -1637,8 +1637,11 @@ check_again: | |
/* | |
* drop the above get_user_pages reference. | |
*/ | |
- for (i = 0; i < nr_pages; i++) | |
- put_page(pages[i]); | |
+ if (gup_flags & FOLL_PIN) | |
+ unpin_user_pages(pages, nr_pages); | |
+ else | |
+ for (i = 0; i < nr_pages; i++) | |
+ put_page(pages[i]); | |
if (migrate_pages(&cma_page_list, alloc_migration_target, NULL, | |
(unsigned long)&mtc, MIGRATE_SYNC, MR_CONTIG_RANGE)) { | |
@@ -1718,8 +1721,11 @@ static long __gup_longterm_locked(struct mm_struct *mm, | |
goto out; | |
if (check_dax_vmas(vmas_tmp, rc)) { | |
- for (i = 0; i < rc; i++) | |
- put_page(pages[i]); | |
+ if (gup_flags & FOLL_PIN) | |
+ unpin_user_pages(pages, rc); | |
+ else | |
+ for (i = 0; i < rc; i++) | |
+ put_page(pages[i]); | |
rc = -EOPNOTSUPP; | |
goto out; | |
} | |
diff --git a/mm/hugetlb.c b/mm/hugetlb.c | |
index b853a11de14f2..4a579b8903290 100644 | |
--- a/mm/hugetlb.c | |
+++ b/mm/hugetlb.c | |
@@ -1578,104 +1578,24 @@ int PageHeadHuge(struct page *page_head) | |
return page_head[1].compound_dtor == HUGETLB_PAGE_DTOR; | |
} | |
-/* | |
- * Find address_space associated with hugetlbfs page. | |
- * Upon entry page is locked and page 'was' mapped although mapped state | |
- * could change. If necessary, use anon_vma to find vma and associated | |
- * address space. The returned mapping may be stale, but it can not be | |
- * invalid as page lock (which is held) is required to destroy mapping. | |
- */ | |
-static struct address_space *_get_hugetlb_page_mapping(struct page *hpage) | |
-{ | |
- struct anon_vma *anon_vma; | |
- pgoff_t pgoff_start, pgoff_end; | |
- struct anon_vma_chain *avc; | |
- struct address_space *mapping = page_mapping(hpage); | |
- | |
- /* Simple file based mapping */ | |
- if (mapping) | |
- return mapping; | |
- | |
- /* | |
- * Even anonymous hugetlbfs mappings are associated with an | |
- * underlying hugetlbfs file (see hugetlb_file_setup in mmap | |
- * code). Find a vma associated with the anonymous vma, and | |
- * use the file pointer to get address_space. | |
- */ | |
- anon_vma = page_lock_anon_vma_read(hpage); | |
- if (!anon_vma) | |
- return mapping; /* NULL */ | |
- | |
- /* Use first found vma */ | |
- pgoff_start = page_to_pgoff(hpage); | |
- pgoff_end = pgoff_start + pages_per_huge_page(page_hstate(hpage)) - 1; | |
- anon_vma_interval_tree_foreach(avc, &anon_vma->rb_root, | |
- pgoff_start, pgoff_end) { | |
- struct vm_area_struct *vma = avc->vma; | |
- | |
- mapping = vma->vm_file->f_mapping; | |
- break; | |
- } | |
- | |
- anon_vma_unlock_read(anon_vma); | |
- return mapping; | |
-} | |
- | |
/* | |
* Find and lock address space (mapping) in write mode. | |
* | |
- * Upon entry, the page is locked which allows us to find the mapping | |
- * even in the case of an anon page. However, locking order dictates | |
- * the i_mmap_rwsem be acquired BEFORE the page lock. This is hugetlbfs | |
- * specific. So, we first try to lock the sema while still holding the | |
- * page lock. If this works, great! If not, then we need to drop the | |
- * page lock and then acquire i_mmap_rwsem and reacquire page lock. Of | |
- * course, need to revalidate state along the way. | |
+ * Upon entry, the page is locked which means that page_mapping() is | |
+ * stable. Due to locking order, we can only trylock_write. If we can | |
+ * not get the lock, simply return NULL to caller. | |
*/ | |
struct address_space *hugetlb_page_mapping_lock_write(struct page *hpage) | |
{ | |
- struct address_space *mapping, *mapping2; | |
+ struct address_space *mapping = page_mapping(hpage); | |
- mapping = _get_hugetlb_page_mapping(hpage); | |
-retry: | |
if (!mapping) | |
return mapping; | |
- /* | |
- * If no contention, take lock and return | |
- */ | |
if (i_mmap_trylock_write(mapping)) | |
return mapping; | |
- /* | |
- * Must drop page lock and wait on mapping sema. | |
- * Note: Once page lock is dropped, mapping could become invalid. | |
- * As a hack, increase map count until we lock page again. | |
- */ | |
- atomic_inc(&hpage->_mapcount); | |
- unlock_page(hpage); | |
- i_mmap_lock_write(mapping); | |
- lock_page(hpage); | |
- atomic_add_negative(-1, &hpage->_mapcount); | |
- | |
- /* verify page is still mapped */ | |
- if (!page_mapped(hpage)) { | |
- i_mmap_unlock_write(mapping); | |
- return NULL; | |
- } | |
- | |
- /* | |
- * Get address space again and verify it is the same one | |
- * we locked. If not, drop lock and retry. | |
- */ | |
- mapping2 = _get_hugetlb_page_mapping(hpage); | |
- if (mapping2 != mapping) { | |
- i_mmap_unlock_write(mapping); | |
- mapping = mapping2; | |
- goto retry; | |
- } | |
- | |
- return mapping; | |
+ return NULL; | |
} | |
pgoff_t __basepage_index(struct page *page) | |
diff --git a/mm/memcontrol.c b/mm/memcontrol.c | |
index 9eefdb9cc2303..51ce5d172855a 100644 | |
--- a/mm/memcontrol.c | |
+++ b/mm/memcontrol.c | |
@@ -4068,11 +4068,17 @@ static int memcg_stat_show(struct seq_file *m, void *v) | |
(u64)memsw * PAGE_SIZE); | |
for (i = 0; i < ARRAY_SIZE(memcg1_stats); i++) { | |
+ unsigned long nr; | |
+ | |
if (memcg1_stats[i] == MEMCG_SWAP && !do_memsw_account()) | |
continue; | |
+ nr = memcg_page_state(memcg, memcg1_stats[i]); | |
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE | |
+ if (memcg1_stats[i] == NR_ANON_THPS) | |
+ nr *= HPAGE_PMD_NR; | |
+#endif | |
seq_printf(m, "total_%s %llu\n", memcg1_stat_names[i], | |
- (u64)memcg_page_state(memcg, memcg1_stats[i]) * | |
- PAGE_SIZE); | |
+ (u64)nr * PAGE_SIZE); | |
} | |
for (i = 0; i < ARRAY_SIZE(memcg1_events); i++) | |
@@ -5298,7 +5304,13 @@ mem_cgroup_css_alloc(struct cgroup_subsys_state *parent_css) | |
memcg->swappiness = mem_cgroup_swappiness(parent); | |
memcg->oom_kill_disable = parent->oom_kill_disable; | |
} | |
- if (parent && parent->use_hierarchy) { | |
+ if (!parent) { | |
+ page_counter_init(&memcg->memory, NULL); | |
+ page_counter_init(&memcg->swap, NULL); | |
+ page_counter_init(&memcg->memsw, NULL); | |
+ page_counter_init(&memcg->kmem, NULL); | |
+ page_counter_init(&memcg->tcpmem, NULL); | |
+ } else if (parent->use_hierarchy) { | |
memcg->use_hierarchy = true; | |
page_counter_init(&memcg->memory, &parent->memory); | |
page_counter_init(&memcg->swap, &parent->swap); | |
@@ -5306,11 +5318,11 @@ mem_cgroup_css_alloc(struct cgroup_subsys_state *parent_css) | |
page_counter_init(&memcg->kmem, &parent->kmem); | |
page_counter_init(&memcg->tcpmem, &parent->tcpmem); | |
} else { | |
- page_counter_init(&memcg->memory, NULL); | |
- page_counter_init(&memcg->swap, NULL); | |
- page_counter_init(&memcg->memsw, NULL); | |
- page_counter_init(&memcg->kmem, NULL); | |
- page_counter_init(&memcg->tcpmem, NULL); | |
+ page_counter_init(&memcg->memory, &root_mem_cgroup->memory); | |
+ page_counter_init(&memcg->swap, &root_mem_cgroup->swap); | |
+ page_counter_init(&memcg->memsw, &root_mem_cgroup->memsw); | |
+ page_counter_init(&memcg->kmem, &root_mem_cgroup->kmem); | |
+ page_counter_init(&memcg->tcpmem, &root_mem_cgroup->tcpmem); | |
/* | |
* Deeper hierachy with use_hierarchy == false doesn't make | |
* much sense so let cgroup subsystem know about this | |
diff --git a/mm/memory-failure.c b/mm/memory-failure.c | |
index f1aa6433f4041..665431272de98 100644 | |
--- a/mm/memory-failure.c | |
+++ b/mm/memory-failure.c | |
@@ -1031,27 +1031,25 @@ static bool hwpoison_user_mappings(struct page *p, unsigned long pfn, | |
if (!PageHuge(hpage)) { | |
unmap_success = try_to_unmap(hpage, ttu); | |
} else { | |
- /* | |
- * For hugetlb pages, try_to_unmap could potentially call | |
- * huge_pmd_unshare. Because of this, take semaphore in | |
- * write mode here and set TTU_RMAP_LOCKED to indicate we | |
- * have taken the lock at this higer level. | |
- * | |
- * Note that the call to hugetlb_page_mapping_lock_write | |
- * is necessary even if mapping is already set. It handles | |
- * ugliness of potentially having to drop page lock to obtain | |
- * i_mmap_rwsem. | |
- */ | |
- mapping = hugetlb_page_mapping_lock_write(hpage); | |
- | |
- if (mapping) { | |
- unmap_success = try_to_unmap(hpage, | |
+ if (!PageAnon(hpage)) { | |
+ /* | |
+ * For hugetlb pages in shared mappings, try_to_unmap | |
+ * could potentially call huge_pmd_unshare. Because of | |
+ * this, take semaphore in write mode here and set | |
+ * TTU_RMAP_LOCKED to indicate we have taken the lock | |
+ * at this higer level. | |
+ */ | |
+ mapping = hugetlb_page_mapping_lock_write(hpage); | |
+ if (mapping) { | |
+ unmap_success = try_to_unmap(hpage, | |
ttu|TTU_RMAP_LOCKED); | |
- i_mmap_unlock_write(mapping); | |
+ i_mmap_unlock_write(mapping); | |
+ } else { | |
+ pr_info("Memory failure: %#lx: could not lock mapping for mapped huge page\n", pfn); | |
+ unmap_success = false; | |
+ } | |
} else { | |
- pr_info("Memory failure: %#lx: could not find mapping for mapped huge page\n", | |
- pfn); | |
- unmap_success = false; | |
+ unmap_success = try_to_unmap(hpage, ttu); | |
} | |
} | |
if (!unmap_success) | |
diff --git a/mm/migrate.c b/mm/migrate.c | |
index 04a98bb2f568f..25592b45a8174 100644 | |
--- a/mm/migrate.c | |
+++ b/mm/migrate.c | |
@@ -1333,34 +1333,38 @@ static int unmap_and_move_huge_page(new_page_t get_new_page, | |
goto put_anon; | |
if (page_mapped(hpage)) { | |
- /* | |
- * try_to_unmap could potentially call huge_pmd_unshare. | |
- * Because of this, take semaphore in write mode here and | |
- * set TTU_RMAP_LOCKED to let lower levels know we have | |
- * taken the lock. | |
- */ | |
- mapping = hugetlb_page_mapping_lock_write(hpage); | |
- if (unlikely(!mapping)) | |
- goto unlock_put_anon; | |
+ bool mapping_locked = false; | |
+ enum ttu_flags ttu = TTU_MIGRATION|TTU_IGNORE_MLOCK| | |
+ TTU_IGNORE_ACCESS; | |
+ | |
+ if (!PageAnon(hpage)) { | |
+ /* | |
+ * In shared mappings, try_to_unmap could potentially | |
+ * call huge_pmd_unshare. Because of this, take | |
+ * semaphore in write mode here and set TTU_RMAP_LOCKED | |
+ * to let lower levels know we have taken the lock. | |
+ */ | |
+ mapping = hugetlb_page_mapping_lock_write(hpage); | |
+ if (unlikely(!mapping)) | |
+ goto unlock_put_anon; | |
+ | |
+ mapping_locked = true; | |
+ ttu |= TTU_RMAP_LOCKED; | |
+ } | |
- try_to_unmap(hpage, | |
- TTU_MIGRATION|TTU_IGNORE_MLOCK|TTU_IGNORE_ACCESS| | |
- TTU_RMAP_LOCKED); | |
+ try_to_unmap(hpage, ttu); | |
page_was_mapped = 1; | |
- /* | |
- * Leave mapping locked until after subsequent call to | |
- * remove_migration_ptes() | |
- */ | |
+ | |
+ if (mapping_locked) | |
+ i_mmap_unlock_write(mapping); | |
} | |
if (!page_mapped(hpage)) | |
rc = move_to_new_page(new_hpage, hpage, mode); | |
- if (page_was_mapped) { | |
+ if (page_was_mapped) | |
remove_migration_ptes(hpage, | |
- rc == MIGRATEPAGE_SUCCESS ? new_hpage : hpage, true); | |
- i_mmap_unlock_write(mapping); | |
- } | |
+ rc == MIGRATEPAGE_SUCCESS ? new_hpage : hpage, false); | |
unlock_put_anon: | |
unlock_page(new_hpage); | |
diff --git a/mm/rmap.c b/mm/rmap.c | |
index 9425260774a1f..5ebf78ae01cbb 100644 | |
--- a/mm/rmap.c | |
+++ b/mm/rmap.c | |
@@ -1413,9 +1413,6 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma, | |
/* | |
* If sharing is possible, start and end will be adjusted | |
* accordingly. | |
- * | |
- * If called for a huge page, caller must hold i_mmap_rwsem | |
- * in write mode as it is possible to call huge_pmd_unshare. | |
*/ | |
adjust_range_if_pmd_sharing_possible(vma, &range.start, | |
&range.end); | |
@@ -1462,7 +1459,7 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma, | |
subpage = page - page_to_pfn(page) + pte_pfn(*pvmw.pte); | |
address = pvmw.address; | |
- if (PageHuge(page)) { | |
+ if (PageHuge(page) && !PageAnon(page)) { | |
/* | |
* To call huge_pmd_unshare, i_mmap_rwsem must be | |
* held in write mode. Caller needs to explicitly | |
diff --git a/mm/slub.c b/mm/slub.c | |
index 0cbe67f13946e..50cab2089067f 100644 | |
--- a/mm/slub.c | |
+++ b/mm/slub.c | |
@@ -2848,7 +2848,7 @@ redo: | |
object = c->freelist; | |
page = c->page; | |
- if (unlikely(!object || !node_match(page, node))) { | |
+ if (unlikely(!object || !page || !node_match(page, node))) { | |
object = __slab_alloc(s, gfpflags, node, addr, c); | |
stat(s, ALLOC_SLOWPATH); | |
} else { | |
diff --git a/mm/vmscan.c b/mm/vmscan.c | |
index 466fc3144fffc..8b11736c4c438 100644 | |
--- a/mm/vmscan.c | |
+++ b/mm/vmscan.c | |
@@ -1514,7 +1514,8 @@ unsigned int reclaim_clean_pages_from_list(struct zone *zone, | |
nr_reclaimed = shrink_page_list(&clean_pages, zone->zone_pgdat, &sc, | |
TTU_IGNORE_ACCESS, &stat, true); | |
list_splice(&clean_pages, page_list); | |
- mod_node_page_state(zone->zone_pgdat, NR_ISOLATED_FILE, -nr_reclaimed); | |
+ mod_node_page_state(zone->zone_pgdat, NR_ISOLATED_FILE, | |
+ -(long)nr_reclaimed); | |
/* | |
* Since lazyfree pages are isolated from file LRU from the beginning, | |
* they will rotate back to anonymous LRU in the end if it failed to | |
@@ -1524,7 +1525,7 @@ unsigned int reclaim_clean_pages_from_list(struct zone *zone, | |
mod_node_page_state(zone->zone_pgdat, NR_ISOLATED_ANON, | |
stat.nr_lazyfree_fail); | |
mod_node_page_state(zone->zone_pgdat, NR_ISOLATED_FILE, | |
- -stat.nr_lazyfree_fail); | |
+ -(long)stat.nr_lazyfree_fail); | |
return nr_reclaimed; | |
} | |
diff --git a/net/can/j1939/socket.c b/net/can/j1939/socket.c | |
index 1be4c898b2fa8..f23966526a885 100644 | |
--- a/net/can/j1939/socket.c | |
+++ b/net/can/j1939/socket.c | |
@@ -475,6 +475,12 @@ static int j1939_sk_bind(struct socket *sock, struct sockaddr *uaddr, int len) | |
goto out_release_sock; | |
} | |
+ if (!(ndev->flags & IFF_UP)) { | |
+ dev_put(ndev); | |
+ ret = -ENETDOWN; | |
+ goto out_release_sock; | |
+ } | |
+ | |
priv = j1939_netdev_start(ndev); | |
dev_put(ndev); | |
if (IS_ERR(priv)) { | |
diff --git a/net/core/devlink.c b/net/core/devlink.c | |
index 80ec1cd81c647..9a8abc30659c6 100644 | |
--- a/net/core/devlink.c | |
+++ b/net/core/devlink.c | |
@@ -7675,8 +7675,6 @@ static int __devlink_port_attrs_set(struct devlink_port *devlink_port, | |
{ | |
struct devlink_port_attrs *attrs = &devlink_port->attrs; | |
- if (WARN_ON(devlink_port->registered)) | |
- return -EEXIST; | |
devlink_port->attrs_set = true; | |
attrs->flavour = flavour; | |
if (attrs->switch_id.id_len) { | |
@@ -7700,6 +7698,8 @@ void devlink_port_attrs_set(struct devlink_port *devlink_port, | |
{ | |
int ret; | |
+ if (WARN_ON(devlink_port->registered)) | |
+ return; | |
devlink_port->attrs = *attrs; | |
ret = __devlink_port_attrs_set(devlink_port, attrs->flavour); | |
if (ret) | |
@@ -7719,6 +7719,8 @@ void devlink_port_attrs_pci_pf_set(struct devlink_port *devlink_port, u16 pf) | |
struct devlink_port_attrs *attrs = &devlink_port->attrs; | |
int ret; | |
+ if (WARN_ON(devlink_port->registered)) | |
+ return; | |
ret = __devlink_port_attrs_set(devlink_port, | |
DEVLINK_PORT_FLAVOUR_PCI_PF); | |
if (ret) | |
@@ -7741,6 +7743,8 @@ void devlink_port_attrs_pci_vf_set(struct devlink_port *devlink_port, | |
struct devlink_port_attrs *attrs = &devlink_port->attrs; | |
int ret; | |
+ if (WARN_ON(devlink_port->registered)) | |
+ return; | |
ret = __devlink_port_attrs_set(devlink_port, | |
DEVLINK_PORT_FLAVOUR_PCI_VF); | |
if (ret) | |
diff --git a/net/ethtool/features.c b/net/ethtool/features.c | |
index 495635f152ba6..1b2a3fb6e7f64 100644 | |
--- a/net/ethtool/features.c | |
+++ b/net/ethtool/features.c | |
@@ -296,7 +296,7 @@ int ethnl_set_features(struct sk_buff *skb, struct genl_info *info) | |
active_diff_mask, compact); | |
} | |
if (mod) | |
- ethtool_notify(dev, ETHTOOL_MSG_FEATURES_NTF, NULL); | |
+ netdev_features_change(dev); | |
out_rtnl: | |
rtnl_unlock(); | |
diff --git a/net/ipv4/ip_tunnel_core.c b/net/ipv4/ip_tunnel_core.c | |
index b2ea1a8c5fd66..22c3e9e23dc9b 100644 | |
--- a/net/ipv4/ip_tunnel_core.c | |
+++ b/net/ipv4/ip_tunnel_core.c | |
@@ -263,7 +263,7 @@ static int iptunnel_pmtud_check_icmp(struct sk_buff *skb, int mtu) | |
const struct icmphdr *icmph = icmp_hdr(skb); | |
const struct iphdr *iph = ip_hdr(skb); | |
- if (mtu <= 576 || iph->frag_off != htons(IP_DF)) | |
+ if (mtu < 576 || iph->frag_off != htons(IP_DF)) | |
return 0; | |
if (ipv4_is_lbcast(iph->daddr) || ipv4_is_multicast(iph->daddr) || | |
@@ -359,7 +359,7 @@ static int iptunnel_pmtud_check_icmpv6(struct sk_buff *skb, int mtu) | |
__be16 frag_off; | |
int offset; | |
- if (mtu <= IPV6_MIN_MTU) | |
+ if (mtu < IPV6_MIN_MTU) | |
return 0; | |
if (stype == IPV6_ADDR_ANY || stype == IPV6_ADDR_MULTICAST || | |
diff --git a/net/ipv4/netfilter.c b/net/ipv4/netfilter.c | |
index a058213b77a78..7c841037c5334 100644 | |
--- a/net/ipv4/netfilter.c | |
+++ b/net/ipv4/netfilter.c | |
@@ -17,17 +17,19 @@ | |
#include <net/netfilter/nf_queue.h> | |
/* route_me_harder function, used by iptable_nat, iptable_mangle + ip_queue */ | |
-int ip_route_me_harder(struct net *net, struct sk_buff *skb, unsigned int addr_type) | |
+int ip_route_me_harder(struct net *net, struct sock *sk, struct sk_buff *skb, unsigned int addr_type) | |
{ | |
const struct iphdr *iph = ip_hdr(skb); | |
struct rtable *rt; | |
struct flowi4 fl4 = {}; | |
__be32 saddr = iph->saddr; | |
- const struct sock *sk = skb_to_full_sk(skb); | |
- __u8 flags = sk ? inet_sk_flowi_flags(sk) : 0; | |
+ __u8 flags; | |
struct net_device *dev = skb_dst(skb)->dev; | |
unsigned int hh_len; | |
+ sk = sk_to_full_sk(sk); | |
+ flags = sk ? inet_sk_flowi_flags(sk) : 0; | |
+ | |
if (addr_type == RTN_UNSPEC) | |
addr_type = inet_addr_type_dev_table(net, dev, saddr); | |
if (addr_type == RTN_LOCAL || addr_type == RTN_UNICAST) | |
diff --git a/net/ipv4/netfilter/iptable_mangle.c b/net/ipv4/netfilter/iptable_mangle.c | |
index f703a717ab1d2..8330795892730 100644 | |
--- a/net/ipv4/netfilter/iptable_mangle.c | |
+++ b/net/ipv4/netfilter/iptable_mangle.c | |
@@ -62,7 +62,7 @@ ipt_mangle_out(struct sk_buff *skb, const struct nf_hook_state *state) | |
iph->daddr != daddr || | |
skb->mark != mark || | |
iph->tos != tos) { | |
- err = ip_route_me_harder(state->net, skb, RTN_UNSPEC); | |
+ err = ip_route_me_harder(state->net, state->sk, skb, RTN_UNSPEC); | |
if (err < 0) | |
ret = NF_DROP_ERR(err); | |
} | |
diff --git a/net/ipv4/netfilter/nf_reject_ipv4.c b/net/ipv4/netfilter/nf_reject_ipv4.c | |
index 9dcfa4e461b65..93b07739807b2 100644 | |
--- a/net/ipv4/netfilter/nf_reject_ipv4.c | |
+++ b/net/ipv4/netfilter/nf_reject_ipv4.c | |
@@ -145,7 +145,7 @@ void nf_send_reset(struct net *net, struct sk_buff *oldskb, int hook) | |
ip4_dst_hoplimit(skb_dst(nskb))); | |
nf_reject_ip_tcphdr_put(nskb, oldskb, oth); | |
- if (ip_route_me_harder(net, nskb, RTN_UNSPEC)) | |
+ if (ip_route_me_harder(net, nskb->sk, nskb, RTN_UNSPEC)) | |
goto free_nskb; | |
niph = ip_hdr(nskb); | |
diff --git a/net/ipv4/syncookies.c b/net/ipv4/syncookies.c | |
index e037566315412..8ff92f96ccdd6 100644 | |
--- a/net/ipv4/syncookies.c | |
+++ b/net/ipv4/syncookies.c | |
@@ -331,7 +331,7 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb) | |
__u32 cookie = ntohl(th->ack_seq) - 1; | |
struct sock *ret = sk; | |
struct request_sock *req; | |
- int mss; | |
+ int full_space, mss; | |
struct rtable *rt; | |
__u8 rcv_wscale; | |
struct flowi4 fl4; | |
@@ -427,8 +427,13 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb) | |
/* Try to redo what tcp_v4_send_synack did. */ | |
req->rsk_window_clamp = tp->window_clamp ? :dst_metric(&rt->dst, RTAX_WINDOW); | |
+ /* limit the window selection if the user enforce a smaller rx buffer */ | |
+ full_space = tcp_full_space(sk); | |
+ if (sk->sk_userlocks & SOCK_RCVBUF_LOCK && | |
+ (req->rsk_window_clamp > full_space || req->rsk_window_clamp == 0)) | |
+ req->rsk_window_clamp = full_space; | |
- tcp_select_initial_window(sk, tcp_full_space(sk), req->mss, | |
+ tcp_select_initial_window(sk, full_space, req->mss, | |
&req->rsk_rcv_wnd, &req->rsk_window_clamp, | |
ireq->wscale_ok, &rcv_wscale, | |
dst_metric(&rt->dst, RTAX_INITRWND)); | |
diff --git a/net/ipv4/udp_offload.c b/net/ipv4/udp_offload.c | |
index e67a66fbf27b8..c62805cd31319 100644 | |
--- a/net/ipv4/udp_offload.c | |
+++ b/net/ipv4/udp_offload.c | |
@@ -366,7 +366,7 @@ out: | |
static struct sk_buff *udp_gro_receive_segment(struct list_head *head, | |
struct sk_buff *skb) | |
{ | |
- struct udphdr *uh = udp_hdr(skb); | |
+ struct udphdr *uh = udp_gro_udphdr(skb); | |
struct sk_buff *pp = NULL; | |
struct udphdr *uh2; | |
struct sk_buff *p; | |
@@ -500,12 +500,22 @@ out: | |
} | |
EXPORT_SYMBOL(udp_gro_receive); | |
+static struct sock *udp4_gro_lookup_skb(struct sk_buff *skb, __be16 sport, | |
+ __be16 dport) | |
+{ | |
+ const struct iphdr *iph = skb_gro_network_header(skb); | |
+ | |
+ return __udp4_lib_lookup(dev_net(skb->dev), iph->saddr, sport, | |
+ iph->daddr, dport, inet_iif(skb), | |
+ inet_sdif(skb), &udp_table, NULL); | |
+} | |
+ | |
INDIRECT_CALLABLE_SCOPE | |
struct sk_buff *udp4_gro_receive(struct list_head *head, struct sk_buff *skb) | |
{ | |
struct udphdr *uh = udp_gro_udphdr(skb); | |
+ struct sock *sk = NULL; | |
struct sk_buff *pp; | |
- struct sock *sk; | |
if (unlikely(!uh)) | |
goto flush; | |
@@ -523,7 +533,10 @@ struct sk_buff *udp4_gro_receive(struct list_head *head, struct sk_buff *skb) | |
skip: | |
NAPI_GRO_CB(skb)->is_ipv6 = 0; | |
rcu_read_lock(); | |
- sk = static_branch_unlikely(&udp_encap_needed_key) ? udp4_lib_lookup_skb(skb, uh->source, uh->dest) : NULL; | |
+ | |
+ if (static_branch_unlikely(&udp_encap_needed_key)) | |
+ sk = udp4_gro_lookup_skb(skb, uh->source, uh->dest); | |
+ | |
pp = udp_gro_receive(head, skb, uh, sk); | |
rcu_read_unlock(); | |
return pp; | |
diff --git a/net/ipv4/xfrm4_tunnel.c b/net/ipv4/xfrm4_tunnel.c | |
index dc19aff7c2e00..fb0648e7fb32f 100644 | |
--- a/net/ipv4/xfrm4_tunnel.c | |
+++ b/net/ipv4/xfrm4_tunnel.c | |
@@ -64,14 +64,14 @@ static int xfrm_tunnel_err(struct sk_buff *skb, u32 info) | |
static struct xfrm_tunnel xfrm_tunnel_handler __read_mostly = { | |
.handler = xfrm_tunnel_rcv, | |
.err_handler = xfrm_tunnel_err, | |
- .priority = 3, | |
+ .priority = 4, | |
}; | |
#if IS_ENABLED(CONFIG_IPV6) | |
static struct xfrm_tunnel xfrm64_tunnel_handler __read_mostly = { | |
.handler = xfrm_tunnel_rcv, | |
.err_handler = xfrm_tunnel_err, | |
- .priority = 2, | |
+ .priority = 3, | |
}; | |
#endif | |
diff --git a/net/ipv6/netfilter.c b/net/ipv6/netfilter.c | |
index 6d0e942d082d4..ab9a279dd6d47 100644 | |
--- a/net/ipv6/netfilter.c | |
+++ b/net/ipv6/netfilter.c | |
@@ -20,10 +20,10 @@ | |
#include <net/netfilter/ipv6/nf_defrag_ipv6.h> | |
#include "../bridge/br_private.h" | |
-int ip6_route_me_harder(struct net *net, struct sk_buff *skb) | |
+int ip6_route_me_harder(struct net *net, struct sock *sk_partial, struct sk_buff *skb) | |
{ | |
const struct ipv6hdr *iph = ipv6_hdr(skb); | |
- struct sock *sk = sk_to_full_sk(skb->sk); | |
+ struct sock *sk = sk_to_full_sk(sk_partial); | |
unsigned int hh_len; | |
struct dst_entry *dst; | |
int strict = (ipv6_addr_type(&iph->daddr) & | |
@@ -84,7 +84,7 @@ static int nf_ip6_reroute(struct sk_buff *skb, | |
if (!ipv6_addr_equal(&iph->daddr, &rt_info->daddr) || | |
!ipv6_addr_equal(&iph->saddr, &rt_info->saddr) || | |
skb->mark != rt_info->mark) | |
- return ip6_route_me_harder(entry->state.net, skb); | |
+ return ip6_route_me_harder(entry->state.net, entry->state.sk, skb); | |
} | |
return 0; | |
} | |
diff --git a/net/ipv6/netfilter/ip6table_mangle.c b/net/ipv6/netfilter/ip6table_mangle.c | |
index 1a2748611e003..cee74803d7a1c 100644 | |
--- a/net/ipv6/netfilter/ip6table_mangle.c | |
+++ b/net/ipv6/netfilter/ip6table_mangle.c | |
@@ -57,7 +57,7 @@ ip6t_mangle_out(struct sk_buff *skb, const struct nf_hook_state *state) | |
skb->mark != mark || | |
ipv6_hdr(skb)->hop_limit != hop_limit || | |
flowlabel != *((u_int32_t *)ipv6_hdr(skb)))) { | |
- err = ip6_route_me_harder(state->net, skb); | |
+ err = ip6_route_me_harder(state->net, state->sk, skb); | |
if (err < 0) | |
ret = NF_DROP_ERR(err); | |
} | |
diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c | |
index 5e2c34c0ac973..5e7983cb61546 100644 | |
--- a/net/ipv6/sit.c | |
+++ b/net/ipv6/sit.c | |
@@ -1128,7 +1128,6 @@ static void ipip6_tunnel_bind_dev(struct net_device *dev) | |
if (tdev && !netif_is_l3_master(tdev)) { | |
int t_hlen = tunnel->hlen + sizeof(struct iphdr); | |
- dev->hard_header_len = tdev->hard_header_len + sizeof(struct iphdr); | |
dev->mtu = tdev->mtu - t_hlen; | |
if (dev->mtu < IPV6_MIN_MTU) | |
dev->mtu = IPV6_MIN_MTU; | |
@@ -1426,7 +1425,6 @@ static void ipip6_tunnel_setup(struct net_device *dev) | |
dev->priv_destructor = ipip6_dev_free; | |
dev->type = ARPHRD_SIT; | |
- dev->hard_header_len = LL_MAX_HEADER + t_hlen; | |
dev->mtu = ETH_DATA_LEN - t_hlen; | |
dev->min_mtu = IPV6_MIN_MTU; | |
dev->max_mtu = IP6_MAX_MTU - t_hlen; | |
diff --git a/net/ipv6/syncookies.c b/net/ipv6/syncookies.c | |
index e796a64be308c..9b6cae1e49d91 100644 | |
--- a/net/ipv6/syncookies.c | |
+++ b/net/ipv6/syncookies.c | |
@@ -136,7 +136,7 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb) | |
__u32 cookie = ntohl(th->ack_seq) - 1; | |
struct sock *ret = sk; | |
struct request_sock *req; | |
- int mss; | |
+ int full_space, mss; | |
struct dst_entry *dst; | |
__u8 rcv_wscale; | |
u32 tsoff = 0; | |
@@ -241,7 +241,13 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb) | |
} | |
req->rsk_window_clamp = tp->window_clamp ? :dst_metric(dst, RTAX_WINDOW); | |
- tcp_select_initial_window(sk, tcp_full_space(sk), req->mss, | |
+ /* limit the window selection if the user enforce a smaller rx buffer */ | |
+ full_space = tcp_full_space(sk); | |
+ if (sk->sk_userlocks & SOCK_RCVBUF_LOCK && | |
+ (req->rsk_window_clamp > full_space || req->rsk_window_clamp == 0)) | |
+ req->rsk_window_clamp = full_space; | |
+ | |
+ tcp_select_initial_window(sk, full_space, req->mss, | |
&req->rsk_rcv_wnd, &req->rsk_window_clamp, | |
ireq->wscale_ok, &rcv_wscale, | |
dst_metric(dst, RTAX_INITRWND)); | |
diff --git a/net/ipv6/udp_offload.c b/net/ipv6/udp_offload.c | |
index 584157a077596..f9e888d1b9af8 100644 | |
--- a/net/ipv6/udp_offload.c | |
+++ b/net/ipv6/udp_offload.c | |
@@ -111,12 +111,22 @@ out: | |
return segs; | |
} | |
+static struct sock *udp6_gro_lookup_skb(struct sk_buff *skb, __be16 sport, | |
+ __be16 dport) | |
+{ | |
+ const struct ipv6hdr *iph = skb_gro_network_header(skb); | |
+ | |
+ return __udp6_lib_lookup(dev_net(skb->dev), &iph->saddr, sport, | |
+ &iph->daddr, dport, inet6_iif(skb), | |
+ inet6_sdif(skb), &udp_table, NULL); | |
+} | |
+ | |
INDIRECT_CALLABLE_SCOPE | |
struct sk_buff *udp6_gro_receive(struct list_head *head, struct sk_buff *skb) | |
{ | |
struct udphdr *uh = udp_gro_udphdr(skb); | |
+ struct sock *sk = NULL; | |
struct sk_buff *pp; | |
- struct sock *sk; | |
if (unlikely(!uh)) | |
goto flush; | |
@@ -135,7 +145,10 @@ struct sk_buff *udp6_gro_receive(struct list_head *head, struct sk_buff *skb) | |
skip: | |
NAPI_GRO_CB(skb)->is_ipv6 = 1; | |
rcu_read_lock(); | |
- sk = static_branch_unlikely(&udpv6_encap_needed_key) ? udp6_lib_lookup_skb(skb, uh->source, uh->dest) : NULL; | |
+ | |
+ if (static_branch_unlikely(&udpv6_encap_needed_key)) | |
+ sk = udp6_gro_lookup_skb(skb, uh->source, uh->dest); | |
+ | |
pp = udp_gro_receive(head, skb, uh, sk); | |
rcu_read_unlock(); | |
return pp; | |
diff --git a/net/ipv6/xfrm6_tunnel.c b/net/ipv6/xfrm6_tunnel.c | |
index 25b7ebda2fabf..f696d46e69100 100644 | |
--- a/net/ipv6/xfrm6_tunnel.c | |
+++ b/net/ipv6/xfrm6_tunnel.c | |
@@ -303,13 +303,13 @@ static const struct xfrm_type xfrm6_tunnel_type = { | |
static struct xfrm6_tunnel xfrm6_tunnel_handler __read_mostly = { | |
.handler = xfrm6_tunnel_rcv, | |
.err_handler = xfrm6_tunnel_err, | |
- .priority = 2, | |
+ .priority = 3, | |
}; | |
static struct xfrm6_tunnel xfrm46_tunnel_handler __read_mostly = { | |
.handler = xfrm6_tunnel_rcv, | |
.err_handler = xfrm6_tunnel_err, | |
- .priority = 2, | |
+ .priority = 3, | |
}; | |
static int __net_init xfrm6_tunnel_net_init(struct net *net) | |
diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c | |
index a95af62acb529..6214d083279bc 100644 | |
--- a/net/iucv/af_iucv.c | |
+++ b/net/iucv/af_iucv.c | |
@@ -1434,7 +1434,8 @@ static int iucv_sock_shutdown(struct socket *sock, int how) | |
break; | |
} | |
- if (how == SEND_SHUTDOWN || how == SHUTDOWN_MASK) { | |
+ if ((how == SEND_SHUTDOWN || how == SHUTDOWN_MASK) && | |
+ sk->sk_state == IUCV_CONNECTED) { | |
if (iucv->transport == AF_IUCV_TRANS_IUCV) { | |
txmsg.class = 0; | |
txmsg.tag = 0; | |
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c | |
index 2e400b0ff6961..0f30f50c46b1b 100644 | |
--- a/net/mac80211/mlme.c | |
+++ b/net/mac80211/mlme.c | |
@@ -5359,6 +5359,7 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata, | |
struct cfg80211_assoc_request *req) | |
{ | |
bool is_6ghz = req->bss->channel->band == NL80211_BAND_6GHZ; | |
+ bool is_5ghz = req->bss->channel->band == NL80211_BAND_5GHZ; | |
struct ieee80211_local *local = sdata->local; | |
struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; | |
struct ieee80211_bss *bss = (void *)req->bss->priv; | |
@@ -5507,7 +5508,7 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata, | |
if (vht_ie && vht_ie[1] >= sizeof(struct ieee80211_vht_cap)) | |
memcpy(&assoc_data->ap_vht_cap, vht_ie + 2, | |
sizeof(struct ieee80211_vht_cap)); | |
- else if (!is_6ghz) | |
+ else if (is_5ghz) | |
ifmgd->flags |= IEEE80211_STA_DISABLE_VHT | | |
IEEE80211_STA_DISABLE_HE; | |
rcu_read_unlock(); | |
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c | |
index aa486e202a57c..ca1e8cd75b22b 100644 | |
--- a/net/mac80211/tx.c | |
+++ b/net/mac80211/tx.c | |
@@ -1938,19 +1938,24 @@ static bool ieee80211_tx(struct ieee80211_sub_if_data *sdata, | |
/* device xmit handlers */ | |
+enum ieee80211_encrypt { | |
+ ENCRYPT_NO, | |
+ ENCRYPT_MGMT, | |
+ ENCRYPT_DATA, | |
+}; | |
+ | |
static int ieee80211_skb_resize(struct ieee80211_sub_if_data *sdata, | |
struct sk_buff *skb, | |
- int head_need, bool may_encrypt) | |
+ int head_need, | |
+ enum ieee80211_encrypt encrypt) | |
{ | |
struct ieee80211_local *local = sdata->local; | |
- struct ieee80211_hdr *hdr; | |
bool enc_tailroom; | |
int tail_need = 0; | |
- hdr = (struct ieee80211_hdr *) skb->data; | |
- enc_tailroom = may_encrypt && | |
- (sdata->crypto_tx_tailroom_needed_cnt || | |
- ieee80211_is_mgmt(hdr->frame_control)); | |
+ enc_tailroom = encrypt == ENCRYPT_MGMT || | |
+ (encrypt == ENCRYPT_DATA && | |
+ sdata->crypto_tx_tailroom_needed_cnt); | |
if (enc_tailroom) { | |
tail_need = IEEE80211_ENCRYPT_TAILROOM; | |
@@ -1981,23 +1986,29 @@ void ieee80211_xmit(struct ieee80211_sub_if_data *sdata, | |
{ | |
struct ieee80211_local *local = sdata->local; | |
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); | |
- struct ieee80211_hdr *hdr; | |
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; | |
int headroom; | |
- bool may_encrypt; | |
+ enum ieee80211_encrypt encrypt; | |
- may_encrypt = !(info->flags & IEEE80211_TX_INTFL_DONT_ENCRYPT); | |
+ if (info->flags & IEEE80211_TX_INTFL_DONT_ENCRYPT) | |
+ encrypt = ENCRYPT_NO; | |
+ else if (ieee80211_is_mgmt(hdr->frame_control)) | |
+ encrypt = ENCRYPT_MGMT; | |
+ else | |
+ encrypt = ENCRYPT_DATA; | |
headroom = local->tx_headroom; | |
- if (may_encrypt) | |
+ if (encrypt != ENCRYPT_NO) | |
headroom += sdata->encrypt_headroom; | |
headroom -= skb_headroom(skb); | |
headroom = max_t(int, 0, headroom); | |
- if (ieee80211_skb_resize(sdata, skb, headroom, may_encrypt)) { | |
+ if (ieee80211_skb_resize(sdata, skb, headroom, encrypt)) { | |
ieee80211_free_txskb(&local->hw, skb); | |
return; | |
} | |
+ /* reload after potential resize */ | |
hdr = (struct ieee80211_hdr *) skb->data; | |
info->control.vif = &sdata->vif; | |
@@ -2822,7 +2833,7 @@ static struct sk_buff *ieee80211_build_hdr(struct ieee80211_sub_if_data *sdata, | |
head_need += sdata->encrypt_headroom; | |
head_need += local->tx_headroom; | |
head_need = max_t(int, 0, head_need); | |
- if (ieee80211_skb_resize(sdata, skb, head_need, true)) { | |
+ if (ieee80211_skb_resize(sdata, skb, head_need, ENCRYPT_DATA)) { | |
ieee80211_free_txskb(&local->hw, skb); | |
skb = NULL; | |
return ERR_PTR(-ENOMEM); | |
@@ -3496,7 +3507,7 @@ static bool ieee80211_xmit_fast(struct ieee80211_sub_if_data *sdata, | |
if (unlikely(ieee80211_skb_resize(sdata, skb, | |
max_t(int, extra_head + hw_headroom - | |
skb_headroom(skb), 0), | |
- false))) { | |
+ ENCRYPT_NO))) { | |
kfree_skb(skb); | |
return true; | |
} | |
diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c | |
index b295eb6e9580b..9adfa8a18579a 100644 | |
--- a/net/mptcp/protocol.c | |
+++ b/net/mptcp/protocol.c | |
@@ -2122,6 +2122,7 @@ static struct proto mptcp_prot = { | |
.memory_pressure = &tcp_memory_pressure, | |
.stream_memory_free = mptcp_memory_free, | |
.sysctl_wmem_offset = offsetof(struct net, ipv4.sysctl_tcp_wmem), | |
+ .sysctl_rmem_offset = offsetof(struct net, ipv4.sysctl_tcp_rmem), | |
.sysctl_mem = sysctl_tcp_mem, | |
.obj_size = sizeof(struct mptcp_sock), | |
.slab_flags = SLAB_TYPESAFE_BY_RCU, | |
diff --git a/net/netfilter/ipset/ip_set_core.c b/net/netfilter/ipset/ip_set_core.c | |
index 920b7c4331f0c..2643dc982eb4e 100644 | |
--- a/net/netfilter/ipset/ip_set_core.c | |
+++ b/net/netfilter/ipset/ip_set_core.c | |
@@ -652,13 +652,14 @@ ip_set_match_extensions(struct ip_set *set, const struct ip_set_ext *ext, | |
if (SET_WITH_COUNTER(set)) { | |
struct ip_set_counter *counter = ext_counter(data, set); | |
+ ip_set_update_counter(counter, ext, flags); | |
+ | |
if (flags & IPSET_FLAG_MATCH_COUNTERS && | |
!(ip_set_match_counter(ip_set_get_packets(counter), | |
mext->packets, mext->packets_op) && | |
ip_set_match_counter(ip_set_get_bytes(counter), | |
mext->bytes, mext->bytes_op))) | |
return false; | |
- ip_set_update_counter(counter, ext, flags); | |
} | |
if (SET_WITH_SKBINFO(set)) | |
ip_set_get_skbinfo(ext_skbinfo(data, set), | |
diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c | |
index e3668a6e54e47..570d8ef6fb8b6 100644 | |
--- a/net/netfilter/ipvs/ip_vs_core.c | |
+++ b/net/netfilter/ipvs/ip_vs_core.c | |
@@ -742,12 +742,12 @@ static int ip_vs_route_me_harder(struct netns_ipvs *ipvs, int af, | |
struct dst_entry *dst = skb_dst(skb); | |
if (dst->dev && !(dst->dev->flags & IFF_LOOPBACK) && | |
- ip6_route_me_harder(ipvs->net, skb) != 0) | |
+ ip6_route_me_harder(ipvs->net, skb->sk, skb) != 0) | |
return 1; | |
} else | |
#endif | |
if (!(skb_rtable(skb)->rt_flags & RTCF_LOCAL) && | |
- ip_route_me_harder(ipvs->net, skb, RTN_LOCAL) != 0) | |
+ ip_route_me_harder(ipvs->net, skb->sk, skb, RTN_LOCAL) != 0) | |
return 1; | |
return 0; | |
diff --git a/net/netfilter/nf_nat_proto.c b/net/netfilter/nf_nat_proto.c | |
index 59151dc07fdc1..e87b6bd6b3cdb 100644 | |
--- a/net/netfilter/nf_nat_proto.c | |
+++ b/net/netfilter/nf_nat_proto.c | |
@@ -715,7 +715,7 @@ nf_nat_ipv4_local_fn(void *priv, struct sk_buff *skb, | |
if (ct->tuplehash[dir].tuple.dst.u3.ip != | |
ct->tuplehash[!dir].tuple.src.u3.ip) { | |
- err = ip_route_me_harder(state->net, skb, RTN_UNSPEC); | |
+ err = ip_route_me_harder(state->net, state->sk, skb, RTN_UNSPEC); | |
if (err < 0) | |
ret = NF_DROP_ERR(err); | |
} | |
@@ -953,7 +953,7 @@ nf_nat_ipv6_local_fn(void *priv, struct sk_buff *skb, | |
if (!nf_inet_addr_cmp(&ct->tuplehash[dir].tuple.dst.u3, | |
&ct->tuplehash[!dir].tuple.src.u3)) { | |
- err = nf_ip6_route_me_harder(state->net, skb); | |
+ err = nf_ip6_route_me_harder(state->net, state->sk, skb); | |
if (err < 0) | |
ret = NF_DROP_ERR(err); | |
} | |
diff --git a/net/netfilter/nf_synproxy_core.c b/net/netfilter/nf_synproxy_core.c | |
index 9cca35d229273..d7d34a62d3bf5 100644 | |
--- a/net/netfilter/nf_synproxy_core.c | |
+++ b/net/netfilter/nf_synproxy_core.c | |
@@ -446,7 +446,7 @@ synproxy_send_tcp(struct net *net, | |
skb_dst_set_noref(nskb, skb_dst(skb)); | |
nskb->protocol = htons(ETH_P_IP); | |
- if (ip_route_me_harder(net, nskb, RTN_UNSPEC)) | |
+ if (ip_route_me_harder(net, nskb->sk, nskb, RTN_UNSPEC)) | |
goto free_nskb; | |
if (nfct) { | |
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c | |
index 72f3ee47e478f..4305d96334082 100644 | |
--- a/net/netfilter/nf_tables_api.c | |
+++ b/net/netfilter/nf_tables_api.c | |
@@ -7076,7 +7076,7 @@ static void nf_tables_flowtable_notify(struct nft_ctx *ctx, | |
GFP_KERNEL); | |
kfree(buf); | |
- if (ctx->report && | |
+ if (!ctx->report && | |
!nfnetlink_has_listeners(ctx->net, NFNLGRP_NFTABLES)) | |
return; | |
@@ -7198,7 +7198,7 @@ static void nf_tables_gen_notify(struct net *net, struct sk_buff *skb, | |
audit_log_nfcfg("?:0;?:0", 0, net->nft.base_seq, | |
AUDIT_NFT_OP_GEN_REGISTER, GFP_KERNEL); | |
- if (nlmsg_report(nlh) && | |
+ if (!nlmsg_report(nlh) && | |
!nfnetlink_has_listeners(net, NFNLGRP_NFTABLES)) | |
return; | |
@@ -7992,12 +7992,16 @@ static void nf_tables_abort_release(struct nft_trans *trans) | |
kfree(trans); | |
} | |
-static int __nf_tables_abort(struct net *net, bool autoload) | |
+static int __nf_tables_abort(struct net *net, enum nfnl_abort_action action) | |
{ | |
struct nft_trans *trans, *next; | |
struct nft_trans_elem *te; | |
struct nft_hook *hook; | |
+ if (action == NFNL_ABORT_VALIDATE && | |
+ nf_tables_validate(net) < 0) | |
+ return -EAGAIN; | |
+ | |
list_for_each_entry_safe_reverse(trans, next, &net->nft.commit_list, | |
list) { | |
switch (trans->msg_type) { | |
@@ -8129,7 +8133,7 @@ static int __nf_tables_abort(struct net *net, bool autoload) | |
nf_tables_abort_release(trans); | |
} | |
- if (autoload) | |
+ if (action == NFNL_ABORT_AUTOLOAD) | |
nf_tables_module_autoload(net); | |
else | |
nf_tables_module_autoload_cleanup(net); | |
@@ -8142,9 +8146,10 @@ static void nf_tables_cleanup(struct net *net) | |
nft_validate_state_update(net, NFT_VALIDATE_SKIP); | |
} | |
-static int nf_tables_abort(struct net *net, struct sk_buff *skb, bool autoload) | |
+static int nf_tables_abort(struct net *net, struct sk_buff *skb, | |
+ enum nfnl_abort_action action) | |
{ | |
- int ret = __nf_tables_abort(net, autoload); | |
+ int ret = __nf_tables_abort(net, action); | |
mutex_unlock(&net->nft.commit_mutex); | |
@@ -8775,7 +8780,7 @@ static void __net_exit nf_tables_exit_net(struct net *net) | |
{ | |
mutex_lock(&net->nft.commit_mutex); | |
if (!list_empty(&net->nft.commit_list)) | |
- __nf_tables_abort(net, false); | |
+ __nf_tables_abort(net, NFNL_ABORT_NONE); | |
__nft_release_tables(net); | |
mutex_unlock(&net->nft.commit_mutex); | |
WARN_ON_ONCE(!list_empty(&net->nft.tables)); | |
diff --git a/net/netfilter/nfnetlink.c b/net/netfilter/nfnetlink.c | |
index 3a2e64e13b227..212c37f53f5f4 100644 | |
--- a/net/netfilter/nfnetlink.c | |
+++ b/net/netfilter/nfnetlink.c | |
@@ -316,7 +316,7 @@ static void nfnetlink_rcv_batch(struct sk_buff *skb, struct nlmsghdr *nlh, | |
return netlink_ack(skb, nlh, -EINVAL, NULL); | |
replay: | |
status = 0; | |
- | |
+replay_abort: | |
skb = netlink_skb_clone(oskb, GFP_KERNEL); | |
if (!skb) | |
return netlink_ack(oskb, nlh, -ENOMEM, NULL); | |
@@ -482,7 +482,7 @@ ack: | |
} | |
done: | |
if (status & NFNL_BATCH_REPLAY) { | |
- ss->abort(net, oskb, true); | |
+ ss->abort(net, oskb, NFNL_ABORT_AUTOLOAD); | |
nfnl_err_reset(&err_list); | |
kfree_skb(skb); | |
module_put(ss->owner); | |
@@ -493,11 +493,25 @@ done: | |
status |= NFNL_BATCH_REPLAY; | |
goto done; | |
} else if (err) { | |
- ss->abort(net, oskb, false); | |
+ ss->abort(net, oskb, NFNL_ABORT_NONE); | |
netlink_ack(oskb, nlmsg_hdr(oskb), err, NULL); | |
} | |
} else { | |
- ss->abort(net, oskb, false); | |
+ enum nfnl_abort_action abort_action; | |
+ | |
+ if (status & NFNL_BATCH_FAILURE) | |
+ abort_action = NFNL_ABORT_NONE; | |
+ else | |
+ abort_action = NFNL_ABORT_VALIDATE; | |
+ | |
+ err = ss->abort(net, oskb, abort_action); | |
+ if (err == -EAGAIN) { | |
+ nfnl_err_reset(&err_list); | |
+ kfree_skb(skb); | |
+ module_put(ss->owner); | |
+ status |= NFNL_BATCH_FAILURE; | |
+ goto replay_abort; | |
+ } | |
} | |
if (ss->cleanup) | |
ss->cleanup(net); | |
diff --git a/net/netfilter/nft_chain_route.c b/net/netfilter/nft_chain_route.c | |
index 8826bbe71136c..edd02cda57fca 100644 | |
--- a/net/netfilter/nft_chain_route.c | |
+++ b/net/netfilter/nft_chain_route.c | |
@@ -42,7 +42,7 @@ static unsigned int nf_route_table_hook4(void *priv, | |
iph->daddr != daddr || | |
skb->mark != mark || | |
iph->tos != tos) { | |
- err = ip_route_me_harder(state->net, skb, RTN_UNSPEC); | |
+ err = ip_route_me_harder(state->net, state->sk, skb, RTN_UNSPEC); | |
if (err < 0) | |
ret = NF_DROP_ERR(err); | |
} | |
@@ -92,7 +92,7 @@ static unsigned int nf_route_table_hook6(void *priv, | |
skb->mark != mark || | |
ipv6_hdr(skb)->hop_limit != hop_limit || | |
flowlabel != *((u32 *)ipv6_hdr(skb)))) { | |
- err = nf_ip6_route_me_harder(state->net, skb); | |
+ err = nf_ip6_route_me_harder(state->net, state->sk, skb); | |
if (err < 0) | |
ret = NF_DROP_ERR(err); | |
} | |
diff --git a/net/netfilter/utils.c b/net/netfilter/utils.c | |
index cedf47ab3c6f9..2182d361e273f 100644 | |
--- a/net/netfilter/utils.c | |
+++ b/net/netfilter/utils.c | |
@@ -191,8 +191,8 @@ static int nf_ip_reroute(struct sk_buff *skb, const struct nf_queue_entry *entry | |
skb->mark == rt_info->mark && | |
iph->daddr == rt_info->daddr && | |
iph->saddr == rt_info->saddr)) | |
- return ip_route_me_harder(entry->state.net, skb, | |
- RTN_UNSPEC); | |
+ return ip_route_me_harder(entry->state.net, entry->state.sk, | |
+ skb, RTN_UNSPEC); | |
} | |
#endif | |
return 0; | |
diff --git a/net/tipc/topsrv.c b/net/tipc/topsrv.c | |
index 1489cfb941d8e..d29b962264c3d 100644 | |
--- a/net/tipc/topsrv.c | |
+++ b/net/tipc/topsrv.c | |
@@ -665,12 +665,18 @@ static int tipc_topsrv_start(struct net *net) | |
ret = tipc_topsrv_work_start(srv); | |
if (ret < 0) | |
- return ret; | |
+ goto err_start; | |
ret = tipc_topsrv_create_listener(srv); | |
if (ret < 0) | |
- tipc_topsrv_work_stop(srv); | |
+ goto err_create; | |
+ return 0; | |
+ | |
+err_create: | |
+ tipc_topsrv_work_stop(srv); | |
+err_start: | |
+ kfree(srv); | |
return ret; | |
} | |
diff --git a/net/wireless/core.c b/net/wireless/core.c | |
index 354b0ccbdc240..e025493171262 100644 | |
--- a/net/wireless/core.c | |
+++ b/net/wireless/core.c | |
@@ -1248,8 +1248,7 @@ void cfg80211_stop_iface(struct wiphy *wiphy, struct wireless_dev *wdev, | |
} | |
EXPORT_SYMBOL(cfg80211_stop_iface); | |
-void cfg80211_init_wdev(struct cfg80211_registered_device *rdev, | |
- struct wireless_dev *wdev) | |
+void cfg80211_init_wdev(struct wireless_dev *wdev) | |
{ | |
mutex_init(&wdev->mtx); | |
INIT_LIST_HEAD(&wdev->event_list); | |
@@ -1260,6 +1259,30 @@ void cfg80211_init_wdev(struct cfg80211_registered_device *rdev, | |
spin_lock_init(&wdev->pmsr_lock); | |
INIT_WORK(&wdev->pmsr_free_wk, cfg80211_pmsr_free_wk); | |
+#ifdef CONFIG_CFG80211_WEXT | |
+ wdev->wext.default_key = -1; | |
+ wdev->wext.default_mgmt_key = -1; | |
+ wdev->wext.connect.auth_type = NL80211_AUTHTYPE_AUTOMATIC; | |
+#endif | |
+ | |
+ if (wdev->wiphy->flags & WIPHY_FLAG_PS_ON_BY_DEFAULT) | |
+ wdev->ps = true; | |
+ else | |
+ wdev->ps = false; | |
+ /* allow mac80211 to determine the timeout */ | |
+ wdev->ps_timeout = -1; | |
+ | |
+ if ((wdev->iftype == NL80211_IFTYPE_STATION || | |
+ wdev->iftype == NL80211_IFTYPE_P2P_CLIENT || | |
+ wdev->iftype == NL80211_IFTYPE_ADHOC) && !wdev->use_4addr) | |
+ wdev->netdev->priv_flags |= IFF_DONT_BRIDGE; | |
+ | |
+ INIT_WORK(&wdev->disconnect_wk, cfg80211_autodisconnect_wk); | |
+} | |
+ | |
+void cfg80211_register_wdev(struct cfg80211_registered_device *rdev, | |
+ struct wireless_dev *wdev) | |
+{ | |
/* | |
* We get here also when the interface changes network namespaces, | |
* as it's registered into the new one, but we don't want it to | |
@@ -1293,6 +1316,11 @@ static int cfg80211_netdev_notifier_call(struct notifier_block *nb, | |
switch (state) { | |
case NETDEV_POST_INIT: | |
SET_NETDEV_DEVTYPE(dev, &wiphy_type); | |
+ wdev->netdev = dev; | |
+ /* can only change netns with wiphy */ | |
+ dev->features |= NETIF_F_NETNS_LOCAL; | |
+ | |
+ cfg80211_init_wdev(wdev); | |
break; | |
case NETDEV_REGISTER: | |
/* | |
@@ -1300,35 +1328,12 @@ static int cfg80211_netdev_notifier_call(struct notifier_block *nb, | |
* called within code protected by it when interfaces | |
* are added with nl80211. | |
*/ | |
- /* can only change netns with wiphy */ | |
- dev->features |= NETIF_F_NETNS_LOCAL; | |
- | |
if (sysfs_create_link(&dev->dev.kobj, &rdev->wiphy.dev.kobj, | |
"phy80211")) { | |
pr_err("failed to add phy80211 symlink to netdev!\n"); | |
} | |
- wdev->netdev = dev; | |
-#ifdef CONFIG_CFG80211_WEXT | |
- wdev->wext.default_key = -1; | |
- wdev->wext.default_mgmt_key = -1; | |
- wdev->wext.connect.auth_type = NL80211_AUTHTYPE_AUTOMATIC; | |
-#endif | |
- | |
- if (wdev->wiphy->flags & WIPHY_FLAG_PS_ON_BY_DEFAULT) | |
- wdev->ps = true; | |
- else | |
- wdev->ps = false; | |
- /* allow mac80211 to determine the timeout */ | |
- wdev->ps_timeout = -1; | |
- | |
- if ((wdev->iftype == NL80211_IFTYPE_STATION || | |
- wdev->iftype == NL80211_IFTYPE_P2P_CLIENT || | |
- wdev->iftype == NL80211_IFTYPE_ADHOC) && !wdev->use_4addr) | |
- dev->priv_flags |= IFF_DONT_BRIDGE; | |
- | |
- INIT_WORK(&wdev->disconnect_wk, cfg80211_autodisconnect_wk); | |
- cfg80211_init_wdev(rdev, wdev); | |
+ cfg80211_register_wdev(rdev, wdev); | |
break; | |
case NETDEV_GOING_DOWN: | |
cfg80211_leave(rdev, wdev); | |
diff --git a/net/wireless/core.h b/net/wireless/core.h | |
index 67b0389fca4dc..8cd4a9793298e 100644 | |
--- a/net/wireless/core.h | |
+++ b/net/wireless/core.h | |
@@ -208,8 +208,9 @@ struct wiphy *wiphy_idx_to_wiphy(int wiphy_idx); | |
int cfg80211_switch_netns(struct cfg80211_registered_device *rdev, | |
struct net *net); | |
-void cfg80211_init_wdev(struct cfg80211_registered_device *rdev, | |
- struct wireless_dev *wdev); | |
+void cfg80211_init_wdev(struct wireless_dev *wdev); | |
+void cfg80211_register_wdev(struct cfg80211_registered_device *rdev, | |
+ struct wireless_dev *wdev); | |
static inline void wdev_lock(struct wireless_dev *wdev) | |
__acquires(wdev) | |
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c | |
index e14307f2bddcc..8eb43c47e582a 100644 | |
--- a/net/wireless/nl80211.c | |
+++ b/net/wireless/nl80211.c | |
@@ -3801,7 +3801,8 @@ static int nl80211_new_interface(struct sk_buff *skb, struct genl_info *info) | |
* P2P Device and NAN do not have a netdev, so don't go | |
* through the netdev notifier and must be added here | |
*/ | |
- cfg80211_init_wdev(rdev, wdev); | |
+ cfg80211_init_wdev(wdev); | |
+ cfg80211_register_wdev(rdev, wdev); | |
break; | |
default: | |
break; | |
diff --git a/net/wireless/reg.c b/net/wireless/reg.c | |
index d8a90d3974235..763a45655ac21 100644 | |
--- a/net/wireless/reg.c | |
+++ b/net/wireless/reg.c | |
@@ -3411,7 +3411,7 @@ static void print_rd_rules(const struct ieee80211_regdomain *rd) | |
power_rule = ®_rule->power_rule; | |
if (reg_rule->flags & NL80211_RRF_AUTO_BW) | |
- snprintf(bw, sizeof(bw), "%d KHz, %d KHz AUTO", | |
+ snprintf(bw, sizeof(bw), "%d KHz, %u KHz AUTO", | |
freq_range->max_bandwidth_khz, | |
reg_get_max_bandwidth(rd, reg_rule)); | |
else | |
diff --git a/net/x25/af_x25.c b/net/x25/af_x25.c | |
index 0bbb283f23c96..046d3fee66a90 100644 | |
--- a/net/x25/af_x25.c | |
+++ b/net/x25/af_x25.c | |
@@ -825,7 +825,7 @@ static int x25_connect(struct socket *sock, struct sockaddr *uaddr, | |
sock->state = SS_CONNECTED; | |
rc = 0; | |
out_put_neigh: | |
- if (rc) { | |
+ if (rc && x25->neighbour) { | |
read_lock_bh(&x25_list_lock); | |
x25_neigh_put(x25->neighbour); | |
x25->neighbour = NULL; | |
diff --git a/net/xfrm/xfrm_interface.c b/net/xfrm/xfrm_interface.c | |
index a8f66112c52b4..0bb7963b9f6bc 100644 | |
--- a/net/xfrm/xfrm_interface.c | |
+++ b/net/xfrm/xfrm_interface.c | |
@@ -830,14 +830,14 @@ static struct xfrm6_tunnel xfrmi_ipv6_handler __read_mostly = { | |
.handler = xfrmi6_rcv_tunnel, | |
.cb_handler = xfrmi_rcv_cb, | |
.err_handler = xfrmi6_err, | |
- .priority = -1, | |
+ .priority = 2, | |
}; | |
static struct xfrm6_tunnel xfrmi_ip6ip_handler __read_mostly = { | |
.handler = xfrmi6_rcv_tunnel, | |
.cb_handler = xfrmi_rcv_cb, | |
.err_handler = xfrmi6_err, | |
- .priority = -1, | |
+ .priority = 2, | |
}; | |
#endif | |
@@ -875,14 +875,14 @@ static struct xfrm_tunnel xfrmi_ipip_handler __read_mostly = { | |
.handler = xfrmi4_rcv_tunnel, | |
.cb_handler = xfrmi_rcv_cb, | |
.err_handler = xfrmi4_err, | |
- .priority = -1, | |
+ .priority = 3, | |
}; | |
static struct xfrm_tunnel xfrmi_ipip6_handler __read_mostly = { | |
.handler = xfrmi4_rcv_tunnel, | |
.cb_handler = xfrmi_rcv_cb, | |
.err_handler = xfrmi4_err, | |
- .priority = -1, | |
+ .priority = 2, | |
}; | |
#endif | |
diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c | |
index efc89a92961df..ee6ac32bb06d7 100644 | |
--- a/net/xfrm/xfrm_state.c | |
+++ b/net/xfrm/xfrm_state.c | |
@@ -2004,6 +2004,7 @@ int xfrm_alloc_spi(struct xfrm_state *x, u32 low, u32 high) | |
int err = -ENOENT; | |
__be32 minspi = htonl(low); | |
__be32 maxspi = htonl(high); | |
+ __be32 newspi = 0; | |
u32 mark = x->mark.v & x->mark.m; | |
spin_lock_bh(&x->lock); | |
@@ -2022,21 +2023,22 @@ int xfrm_alloc_spi(struct xfrm_state *x, u32 low, u32 high) | |
xfrm_state_put(x0); | |
goto unlock; | |
} | |
- x->id.spi = minspi; | |
+ newspi = minspi; | |
} else { | |
u32 spi = 0; | |
for (h = 0; h < high-low+1; h++) { | |
spi = low + prandom_u32()%(high-low+1); | |
x0 = xfrm_state_lookup(net, mark, &x->id.daddr, htonl(spi), x->id.proto, x->props.family); | |
if (x0 == NULL) { | |
- x->id.spi = htonl(spi); | |
+ newspi = htonl(spi); | |
break; | |
} | |
xfrm_state_put(x0); | |
} | |
} | |
- if (x->id.spi) { | |
+ if (newspi) { | |
spin_lock_bh(&net->xfrm.xfrm_state_lock); | |
+ x->id.spi = newspi; | |
h = xfrm_spi_hash(net, &x->id.daddr, x->id.spi, x->id.proto, x->props.family); | |
hlist_add_head_rcu(&x->byspi, net->xfrm.state_byspi + h); | |
spin_unlock_bh(&net->xfrm.xfrm_state_lock); | |
diff --git a/security/selinux/ibpkey.c b/security/selinux/ibpkey.c | |
index f68a7617cfb95..3a63a989e55ee 100644 | |
--- a/security/selinux/ibpkey.c | |
+++ b/security/selinux/ibpkey.c | |
@@ -151,8 +151,10 @@ static int sel_ib_pkey_sid_slow(u64 subnet_prefix, u16 pkey_num, u32 *sid) | |
* is valid, it just won't be added to the cache. | |
*/ | |
new = kzalloc(sizeof(*new), GFP_ATOMIC); | |
- if (!new) | |
+ if (!new) { | |
+ ret = -ENOMEM; | |
goto out; | |
+ } | |
new->psec.subnet_prefix = subnet_prefix; | |
new->psec.pkey = pkey_num; | |
diff --git a/sound/hda/ext/hdac_ext_controller.c b/sound/hda/ext/hdac_ext_controller.c | |
index 4d060d5b1db6d..b0c0ef824d7d9 100644 | |
--- a/sound/hda/ext/hdac_ext_controller.c | |
+++ b/sound/hda/ext/hdac_ext_controller.c | |
@@ -148,6 +148,8 @@ struct hdac_ext_link *snd_hdac_ext_bus_get_link(struct hdac_bus *bus, | |
return NULL; | |
if (bus->idx != bus_idx) | |
return NULL; | |
+ if (addr < 0 || addr > 31) | |
+ return NULL; | |
list_for_each_entry(hlink, &bus->hlink_list, list) { | |
for (i = 0; i < HDA_MAX_CODECS; i++) { | |
diff --git a/sound/pci/hda/hda_controller.h b/sound/pci/hda/hda_controller.h | |
index be63ead8161f8..68f9668788ea2 100644 | |
--- a/sound/pci/hda/hda_controller.h | |
+++ b/sound/pci/hda/hda_controller.h | |
@@ -41,7 +41,7 @@ | |
/* 24 unused */ | |
#define AZX_DCAPS_COUNT_LPIB_DELAY (1 << 25) /* Take LPIB as delay */ | |
#define AZX_DCAPS_PM_RUNTIME (1 << 26) /* runtime PM support */ | |
-#define AZX_DCAPS_SUSPEND_SPURIOUS_WAKEUP (1 << 27) /* Workaround for spurious wakeups after suspend */ | |
+/* 27 unused */ | |
#define AZX_DCAPS_CORBRP_SELF_CLEAR (1 << 28) /* CORBRP clears itself after reset */ | |
#define AZX_DCAPS_NO_MSI64 (1 << 29) /* Stick to 32-bit MSIs */ | |
#define AZX_DCAPS_SEPARATE_STREAM_TAG (1 << 30) /* capture and playback use separate stream tag */ | |
@@ -143,6 +143,7 @@ struct azx { | |
unsigned int align_buffer_size:1; | |
unsigned int region_requested:1; | |
unsigned int disabled:1; /* disabled by vga_switcheroo */ | |
+ unsigned int pm_prepared:1; | |
/* GTS present */ | |
unsigned int gts_present:1; | |
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c | |
index 476a8b871daa1..0ae0290eb2bfd 100644 | |
--- a/sound/pci/hda/hda_intel.c | |
+++ b/sound/pci/hda/hda_intel.c | |
@@ -297,8 +297,7 @@ enum { | |
/* PCH for HSW/BDW; with runtime PM */ | |
/* no i915 binding for this as HSW/BDW has another controller for HDMI */ | |
#define AZX_DCAPS_INTEL_PCH \ | |
- (AZX_DCAPS_INTEL_PCH_BASE | AZX_DCAPS_PM_RUNTIME |\ | |
- AZX_DCAPS_SUSPEND_SPURIOUS_WAKEUP) | |
+ (AZX_DCAPS_INTEL_PCH_BASE | AZX_DCAPS_PM_RUNTIME) | |
/* HSW HDMI */ | |
#define AZX_DCAPS_INTEL_HASWELL \ | |
@@ -984,7 +983,7 @@ static void __azx_runtime_suspend(struct azx *chip) | |
display_power(chip, false); | |
} | |
-static void __azx_runtime_resume(struct azx *chip, bool from_rt) | |
+static void __azx_runtime_resume(struct azx *chip) | |
{ | |
struct hda_intel *hda = container_of(chip, struct hda_intel, chip); | |
struct hdac_bus *bus = azx_bus(chip); | |
@@ -1001,7 +1000,8 @@ static void __azx_runtime_resume(struct azx *chip, bool from_rt) | |
azx_init_pci(chip); | |
hda_intel_init_chip(chip, true); | |
- if (from_rt) { | |
+ /* Avoid codec resume if runtime resume is for system suspend */ | |
+ if (!chip->pm_prepared) { | |
list_for_each_codec(codec, &chip->bus) { | |
if (codec->relaxed_resume) | |
continue; | |
@@ -1017,6 +1017,29 @@ static void __azx_runtime_resume(struct azx *chip, bool from_rt) | |
} | |
#ifdef CONFIG_PM_SLEEP | |
+static int azx_prepare(struct device *dev) | |
+{ | |
+ struct snd_card *card = dev_get_drvdata(dev); | |
+ struct azx *chip; | |
+ | |
+ chip = card->private_data; | |
+ chip->pm_prepared = 1; | |
+ | |
+ /* HDA controller always requires different WAKEEN for runtime suspend | |
+ * and system suspend, so don't use direct-complete here. | |
+ */ | |
+ return 0; | |
+} | |
+ | |
+static void azx_complete(struct device *dev) | |
+{ | |
+ struct snd_card *card = dev_get_drvdata(dev); | |
+ struct azx *chip; | |
+ | |
+ chip = card->private_data; | |
+ chip->pm_prepared = 0; | |
+} | |
+ | |
static int azx_suspend(struct device *dev) | |
{ | |
struct snd_card *card = dev_get_drvdata(dev); | |
@@ -1028,15 +1051,7 @@ static int azx_suspend(struct device *dev) | |
chip = card->private_data; | |
bus = azx_bus(chip); | |
- snd_power_change_state(card, SNDRV_CTL_POWER_D3hot); | |
- /* An ugly workaround: direct call of __azx_runtime_suspend() and | |
- * __azx_runtime_resume() for old Intel platforms that suffer from | |
- * spurious wakeups after S3 suspend | |
- */ | |
- if (chip->driver_caps & AZX_DCAPS_SUSPEND_SPURIOUS_WAKEUP) | |
- __azx_runtime_suspend(chip); | |
- else | |
- pm_runtime_force_suspend(dev); | |
+ __azx_runtime_suspend(chip); | |
if (bus->irq >= 0) { | |
free_irq(bus->irq, chip); | |
bus->irq = -1; | |
@@ -1065,11 +1080,7 @@ static int azx_resume(struct device *dev) | |
if (azx_acquire_irq(chip, 1) < 0) | |
return -EIO; | |
- if (chip->driver_caps & AZX_DCAPS_SUSPEND_SPURIOUS_WAKEUP) | |
- __azx_runtime_resume(chip, false); | |
- else | |
- pm_runtime_force_resume(dev); | |
- snd_power_change_state(card, SNDRV_CTL_POWER_D0); | |
+ __azx_runtime_resume(chip); | |
trace_azx_resume(chip); | |
return 0; | |
@@ -1117,10 +1128,7 @@ static int azx_runtime_suspend(struct device *dev) | |
chip = card->private_data; | |
/* enable controller wake up event */ | |
- if (snd_power_get_state(card) == SNDRV_CTL_POWER_D0) { | |
- azx_writew(chip, WAKEEN, azx_readw(chip, WAKEEN) | | |
- STATESTS_INT_MASK); | |
- } | |
+ azx_writew(chip, WAKEEN, azx_readw(chip, WAKEEN) | STATESTS_INT_MASK); | |
__azx_runtime_suspend(chip); | |
trace_azx_runtime_suspend(chip); | |
@@ -1131,18 +1139,14 @@ static int azx_runtime_resume(struct device *dev) | |
{ | |
struct snd_card *card = dev_get_drvdata(dev); | |
struct azx *chip; | |
- bool from_rt = snd_power_get_state(card) == SNDRV_CTL_POWER_D0; | |
if (!azx_is_pm_ready(card)) | |
return 0; | |
chip = card->private_data; | |
- __azx_runtime_resume(chip, from_rt); | |
+ __azx_runtime_resume(chip); | |
/* disable controller Wake Up event*/ | |
- if (from_rt) { | |
- azx_writew(chip, WAKEEN, azx_readw(chip, WAKEEN) & | |
- ~STATESTS_INT_MASK); | |
- } | |
+ azx_writew(chip, WAKEEN, azx_readw(chip, WAKEEN) & ~STATESTS_INT_MASK); | |
trace_azx_runtime_resume(chip); | |
return 0; | |
@@ -1176,6 +1180,8 @@ static int azx_runtime_idle(struct device *dev) | |
static const struct dev_pm_ops azx_pm = { | |
SET_SYSTEM_SLEEP_PM_OPS(azx_suspend, azx_resume) | |
#ifdef CONFIG_PM_SLEEP | |
+ .prepare = azx_prepare, | |
+ .complete = azx_complete, | |
.freeze_noirq = azx_freeze_noirq, | |
.thaw_noirq = azx_thaw_noirq, | |
#endif | |
@@ -2355,6 +2361,7 @@ static int azx_probe_continue(struct azx *chip) | |
if (azx_has_pm_runtime(chip)) { | |
pm_runtime_use_autosuspend(&pci->dev); | |
+ pm_runtime_allow(&pci->dev); | |
pm_runtime_put_autosuspend(&pci->dev); | |
} | |
diff --git a/sound/soc/codecs/cs42l51.c b/sound/soc/codecs/cs42l51.c | |
index 764f2ef8f59df..2b617993b0adb 100644 | |
--- a/sound/soc/codecs/cs42l51.c | |
+++ b/sound/soc/codecs/cs42l51.c | |
@@ -245,8 +245,28 @@ static const struct snd_soc_dapm_widget cs42l51_dapm_widgets[] = { | |
&cs42l51_adcr_mux_controls), | |
}; | |
+static int mclk_event(struct snd_soc_dapm_widget *w, | |
+ struct snd_kcontrol *kcontrol, int event) | |
+{ | |
+ struct snd_soc_component *comp = snd_soc_dapm_to_component(w->dapm); | |
+ struct cs42l51_private *cs42l51 = snd_soc_component_get_drvdata(comp); | |
+ | |
+ switch (event) { | |
+ case SND_SOC_DAPM_PRE_PMU: | |
+ return clk_prepare_enable(cs42l51->mclk_handle); | |
+ case SND_SOC_DAPM_POST_PMD: | |
+ /* Delay mclk shutdown to fulfill power-down sequence requirements */ | |
+ msleep(20); | |
+ clk_disable_unprepare(cs42l51->mclk_handle); | |
+ break; | |
+ } | |
+ | |
+ return 0; | |
+} | |
+ | |
static const struct snd_soc_dapm_widget cs42l51_dapm_mclk_widgets[] = { | |
- SND_SOC_DAPM_CLOCK_SUPPLY("MCLK") | |
+ SND_SOC_DAPM_SUPPLY("MCLK", SND_SOC_NOPM, 0, 0, mclk_event, | |
+ SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD), | |
}; | |
static const struct snd_soc_dapm_route cs42l51_routes[] = { | |
diff --git a/sound/soc/codecs/wcd9335.c b/sound/soc/codecs/wcd9335.c | |
index f2d9d52ee171b..4d2b1ec7c03bb 100644 | |
--- a/sound/soc/codecs/wcd9335.c | |
+++ b/sound/soc/codecs/wcd9335.c | |
@@ -618,7 +618,7 @@ static const char * const sb_tx8_mux_text[] = { | |
"ZERO", "RX_MIX_TX8", "DEC8", "DEC8_192" | |
}; | |
-static const DECLARE_TLV_DB_SCALE(digital_gain, 0, 1, 0); | |
+static const DECLARE_TLV_DB_SCALE(digital_gain, -8400, 100, -8400); | |
static const DECLARE_TLV_DB_SCALE(line_gain, 0, 7, 1); | |
static const DECLARE_TLV_DB_SCALE(analog_gain, 0, 25, 1); | |
static const DECLARE_TLV_DB_SCALE(ear_pa_gain, 0, 150, 0); | |
diff --git a/sound/soc/codecs/wcd934x.c b/sound/soc/codecs/wcd934x.c | |
index 35697b072367a..40f682f5dab8b 100644 | |
--- a/sound/soc/codecs/wcd934x.c | |
+++ b/sound/soc/codecs/wcd934x.c | |
@@ -551,7 +551,7 @@ struct wcd_iir_filter_ctl { | |
struct soc_bytes_ext bytes_ext; | |
}; | |
-static const DECLARE_TLV_DB_SCALE(digital_gain, 0, 1, 0); | |
+static const DECLARE_TLV_DB_SCALE(digital_gain, -8400, 100, -8400); | |
static const DECLARE_TLV_DB_SCALE(line_gain, 0, 7, 1); | |
static const DECLARE_TLV_DB_SCALE(analog_gain, 0, 25, 1); | |
static const DECLARE_TLV_DB_SCALE(ear_pa_gain, 0, 150, 0); | |
diff --git a/sound/soc/codecs/wsa881x.c b/sound/soc/codecs/wsa881x.c | |
index d39d479e23786..5456124457a7c 100644 | |
--- a/sound/soc/codecs/wsa881x.c | |
+++ b/sound/soc/codecs/wsa881x.c | |
@@ -1026,6 +1026,8 @@ static struct snd_soc_dai_driver wsa881x_dais[] = { | |
.id = 0, | |
.playback = { | |
.stream_name = "SPKR Playback", | |
+ .rates = SNDRV_PCM_RATE_48000, | |
+ .formats = SNDRV_PCM_FMTBIT_S16_LE, | |
.rate_max = 48000, | |
.rate_min = 48000, | |
.channels_min = 1, | |
diff --git a/sound/soc/intel/boards/kbl_rt5663_max98927.c b/sound/soc/intel/boards/kbl_rt5663_max98927.c | |
index 3ea4602dfb3ee..9a4b3d0973f65 100644 | |
--- a/sound/soc/intel/boards/kbl_rt5663_max98927.c | |
+++ b/sound/soc/intel/boards/kbl_rt5663_max98927.c | |
@@ -401,17 +401,40 @@ static int kabylake_ssp_fixup(struct snd_soc_pcm_runtime *rtd, | |
struct snd_interval *chan = hw_param_interval(params, | |
SNDRV_PCM_HW_PARAM_CHANNELS); | |
struct snd_mask *fmt = hw_param_mask(params, SNDRV_PCM_HW_PARAM_FORMAT); | |
- struct snd_soc_dpcm *dpcm = container_of( | |
- params, struct snd_soc_dpcm, hw_params); | |
- struct snd_soc_dai_link *fe_dai_link = dpcm->fe->dai_link; | |
- struct snd_soc_dai_link *be_dai_link = dpcm->be->dai_link; | |
+ struct snd_soc_dpcm *dpcm, *rtd_dpcm = NULL; | |
+ | |
+ /* | |
+ * The following loop will be called only for playback stream | |
+ * In this platform, there is only one playback device on every SSP | |
+ */ | |
+ for_each_dpcm_fe(rtd, SNDRV_PCM_STREAM_PLAYBACK, dpcm) { | |
+ rtd_dpcm = dpcm; | |
+ break; | |
+ } | |
+ | |
+ /* | |
+ * This following loop will be called only for capture stream | |
+ * In this platform, there is only one capture device on every SSP | |
+ */ | |
+ for_each_dpcm_fe(rtd, SNDRV_PCM_STREAM_CAPTURE, dpcm) { | |
+ rtd_dpcm = dpcm; | |
+ break; | |
+ } | |
+ | |
+ if (!rtd_dpcm) | |
+ return -EINVAL; | |
+ | |
+ /* | |
+ * The above 2 loops are mutually exclusive based on the stream direction, | |
+ * thus rtd_dpcm variable will never be overwritten | |
+ */ | |
/* | |
* The ADSP will convert the FE rate to 48k, stereo, 24 bit | |
*/ | |
- if (!strcmp(fe_dai_link->name, "Kbl Audio Port") || | |
- !strcmp(fe_dai_link->name, "Kbl Audio Headset Playback") || | |
- !strcmp(fe_dai_link->name, "Kbl Audio Capture Port")) { | |
+ if (!strcmp(rtd_dpcm->fe->dai_link->name, "Kbl Audio Port") || | |
+ !strcmp(rtd_dpcm->fe->dai_link->name, "Kbl Audio Headset Playback") || | |
+ !strcmp(rtd_dpcm->fe->dai_link->name, "Kbl Audio Capture Port")) { | |
rate->min = rate->max = 48000; | |
chan->min = chan->max = 2; | |
snd_mask_none(fmt); | |
@@ -421,7 +444,7 @@ static int kabylake_ssp_fixup(struct snd_soc_pcm_runtime *rtd, | |
* The speaker on the SSP0 supports S16_LE and not S24_LE. | |
* thus changing the mask here | |
*/ | |
- if (!strcmp(be_dai_link->name, "SSP0-Codec")) | |
+ if (!strcmp(rtd_dpcm->be->dai_link->name, "SSP0-Codec")) | |
snd_mask_set_format(fmt, SNDRV_PCM_FORMAT_S16_LE); | |
return 0; | |
diff --git a/sound/soc/mediatek/mt8183/mt8183-da7219-max98357.c b/sound/soc/mediatek/mt8183/mt8183-da7219-max98357.c | |
index a6c690c5308d3..58b76e985f7f3 100644 | |
--- a/sound/soc/mediatek/mt8183/mt8183-da7219-max98357.c | |
+++ b/sound/soc/mediatek/mt8183/mt8183-da7219-max98357.c | |
@@ -624,15 +624,34 @@ static struct snd_soc_codec_conf mt8183_da7219_rt1015_codec_conf[] = { | |
}, | |
}; | |
+static const struct snd_kcontrol_new mt8183_da7219_rt1015_snd_controls[] = { | |
+ SOC_DAPM_PIN_SWITCH("Left Spk"), | |
+ SOC_DAPM_PIN_SWITCH("Right Spk"), | |
+}; | |
+ | |
+static const | |
+struct snd_soc_dapm_widget mt8183_da7219_rt1015_dapm_widgets[] = { | |
+ SND_SOC_DAPM_SPK("Left Spk", NULL), | |
+ SND_SOC_DAPM_SPK("Right Spk", NULL), | |
+ SND_SOC_DAPM_PINCTRL("TDM_OUT_PINCTRL", | |
+ "aud_tdm_out_on", "aud_tdm_out_off"), | |
+}; | |
+ | |
+static const struct snd_soc_dapm_route mt8183_da7219_rt1015_dapm_routes[] = { | |
+ {"Left Spk", NULL, "Left SPO"}, | |
+ {"Right Spk", NULL, "Right SPO"}, | |
+ {"I2S Playback", NULL, "TDM_OUT_PINCTRL"}, | |
+}; | |
+ | |
static struct snd_soc_card mt8183_da7219_rt1015_card = { | |
.name = "mt8183_da7219_rt1015", | |
.owner = THIS_MODULE, | |
- .controls = mt8183_da7219_max98357_snd_controls, | |
- .num_controls = ARRAY_SIZE(mt8183_da7219_max98357_snd_controls), | |
- .dapm_widgets = mt8183_da7219_max98357_dapm_widgets, | |
- .num_dapm_widgets = ARRAY_SIZE(mt8183_da7219_max98357_dapm_widgets), | |
- .dapm_routes = mt8183_da7219_max98357_dapm_routes, | |
- .num_dapm_routes = ARRAY_SIZE(mt8183_da7219_max98357_dapm_routes), | |
+ .controls = mt8183_da7219_rt1015_snd_controls, | |
+ .num_controls = ARRAY_SIZE(mt8183_da7219_rt1015_snd_controls), | |
+ .dapm_widgets = mt8183_da7219_rt1015_dapm_widgets, | |
+ .num_dapm_widgets = ARRAY_SIZE(mt8183_da7219_rt1015_dapm_widgets), | |
+ .dapm_routes = mt8183_da7219_rt1015_dapm_routes, | |
+ .num_dapm_routes = ARRAY_SIZE(mt8183_da7219_rt1015_dapm_routes), | |
.dai_link = mt8183_da7219_dai_links, | |
.num_links = ARRAY_SIZE(mt8183_da7219_dai_links), | |
.aux_dev = &mt8183_da7219_max98357_headset_dev, | |
diff --git a/sound/soc/qcom/sdm845.c b/sound/soc/qcom/sdm845.c | |
index ab1bf23c21a68..6c2760e27ea6f 100644 | |
--- a/sound/soc/qcom/sdm845.c | |
+++ b/sound/soc/qcom/sdm845.c | |
@@ -17,6 +17,7 @@ | |
#include "qdsp6/q6afe.h" | |
#include "../codecs/rt5663.h" | |
+#define DRIVER_NAME "sdm845" | |
#define DEFAULT_SAMPLE_RATE_48K 48000 | |
#define DEFAULT_MCLK_RATE 24576000 | |
#define TDM_BCLK_RATE 6144000 | |
@@ -552,6 +553,7 @@ static int sdm845_snd_platform_probe(struct platform_device *pdev) | |
if (!data) | |
return -ENOMEM; | |
+ card->driver_name = DRIVER_NAME; | |
card->dapm_widgets = sdm845_snd_widgets; | |
card->num_dapm_widgets = ARRAY_SIZE(sdm845_snd_widgets); | |
card->dev = dev; | |
diff --git a/sound/soc/sof/loader.c b/sound/soc/sof/loader.c | |
index b94fa5f5d4808..c90c3f3a3b3ee 100644 | |
--- a/sound/soc/sof/loader.c | |
+++ b/sound/soc/sof/loader.c | |
@@ -118,6 +118,11 @@ int snd_sof_fw_parse_ext_data(struct snd_sof_dev *sdev, u32 bar, u32 offset) | |
case SOF_IPC_EXT_CC_INFO: | |
ret = get_cc_info(sdev, ext_hdr); | |
break; | |
+ case SOF_IPC_EXT_UNUSED: | |
+ case SOF_IPC_EXT_PROBE_INFO: | |
+ case SOF_IPC_EXT_USER_ABI_INFO: | |
+ /* They are supported but we don't do anything here */ | |
+ break; | |
default: | |
dev_warn(sdev->dev, "warning: unknown ext header type %d size 0x%x\n", | |
ext_hdr->type, ext_hdr->hdr.size); | |
diff --git a/tools/bpf/bpftool/prog.c b/tools/bpf/bpftool/prog.c | |
index d393eb8263a60..994506540e564 100644 | |
--- a/tools/bpf/bpftool/prog.c | |
+++ b/tools/bpf/bpftool/prog.c | |
@@ -741,7 +741,7 @@ static int parse_attach_detach_args(int argc, char **argv, int *progfd, | |
} | |
if (*attach_type == BPF_FLOW_DISSECTOR) { | |
- *mapfd = -1; | |
+ *mapfd = 0; | |
return 0; | |
} | |
diff --git a/tools/lib/bpf/hashmap.h b/tools/lib/bpf/hashmap.h | |
index e0af36b0e5d83..6a3c3d8bb4ab8 100644 | |
--- a/tools/lib/bpf/hashmap.h | |
+++ b/tools/lib/bpf/hashmap.h | |
@@ -15,6 +15,9 @@ | |
static inline size_t hash_bits(size_t h, int bits) | |
{ | |
/* shuffle bits and return requested number of upper bits */ | |
+ if (bits == 0) | |
+ return 0; | |
+ | |
#if (__SIZEOF_SIZE_T__ == __SIZEOF_LONG_LONG__) | |
/* LP64 case */ | |
return (h * 11400714819323198485llu) >> (__SIZEOF_LONG_LONG__ * 8 - bits); | |
@@ -162,17 +165,17 @@ bool hashmap__find(const struct hashmap *map, const void *key, void **value); | |
* @key: key to iterate entries for | |
*/ | |
#define hashmap__for_each_key_entry(map, cur, _key) \ | |
- for (cur = ({ size_t bkt = hash_bits(map->hash_fn((_key), map->ctx),\ | |
- map->cap_bits); \ | |
- map->buckets ? map->buckets[bkt] : NULL; }); \ | |
+ for (cur = map->buckets \ | |
+ ? map->buckets[hash_bits(map->hash_fn((_key), map->ctx), map->cap_bits)] \ | |
+ : NULL; \ | |
cur; \ | |
cur = cur->next) \ | |
if (map->equal_fn(cur->key, (_key), map->ctx)) | |
#define hashmap__for_each_key_entry_safe(map, cur, tmp, _key) \ | |
- for (cur = ({ size_t bkt = hash_bits(map->hash_fn((_key), map->ctx),\ | |
- map->cap_bits); \ | |
- cur = map->buckets ? map->buckets[bkt] : NULL; }); \ | |
+ for (cur = map->buckets \ | |
+ ? map->buckets[hash_bits(map->hash_fn((_key), map->ctx), map->cap_bits)] \ | |
+ : NULL; \ | |
cur && ({ tmp = cur->next; true; }); \ | |
cur = tmp) \ | |
if (map->equal_fn(cur->key, (_key), map->ctx)) | |
diff --git a/tools/perf/builtin-trace.c b/tools/perf/builtin-trace.c | |
index 44a75f234db17..de80534473afa 100644 | |
--- a/tools/perf/builtin-trace.c | |
+++ b/tools/perf/builtin-trace.c | |
@@ -4639,9 +4639,9 @@ do_concat: | |
err = 0; | |
if (lists[0]) { | |
- struct option o = OPT_CALLBACK('e', "event", &trace->evlist, "event", | |
- "event selector. use 'perf list' to list available events", | |
- parse_events_option); | |
+ struct option o = { | |
+ .value = &trace->evlist, | |
+ }; | |
err = parse_events_option(&o, lists[0], 0); | |
} | |
out: | |
@@ -4655,9 +4655,12 @@ static int trace__parse_cgroups(const struct option *opt, const char *str, int u | |
{ | |
struct trace *trace = opt->value; | |
- if (!list_empty(&trace->evlist->core.entries)) | |
- return parse_cgroups(opt, str, unset); | |
- | |
+ if (!list_empty(&trace->evlist->core.entries)) { | |
+ struct option o = { | |
+ .value = &trace->evlist, | |
+ }; | |
+ return parse_cgroups(&o, str, unset); | |
+ } | |
trace->cgroup = evlist__findnew_cgroup(trace->evlist, str); | |
return 0; | |
diff --git a/tools/perf/util/scripting-engines/trace-event-python.c b/tools/perf/util/scripting-engines/trace-event-python.c | |
index 739516fdf6e38..dd5414d4d360f 100644 | |
--- a/tools/perf/util/scripting-engines/trace-event-python.c | |
+++ b/tools/perf/util/scripting-engines/trace-event-python.c | |
@@ -1592,7 +1592,6 @@ static void _free_command_line(wchar_t **command_line, int num) | |
static int python_start_script(const char *script, int argc, const char **argv) | |
{ | |
struct tables *tables = &tables_global; | |
- PyMODINIT_FUNC (*initfunc)(void); | |
#if PY_MAJOR_VERSION < 3 | |
const char **command_line; | |
#else | |
@@ -1607,20 +1606,18 @@ static int python_start_script(const char *script, int argc, const char **argv) | |
FILE *fp; | |
#if PY_MAJOR_VERSION < 3 | |
- initfunc = initperf_trace_context; | |
command_line = malloc((argc + 1) * sizeof(const char *)); | |
command_line[0] = script; | |
for (i = 1; i < argc + 1; i++) | |
command_line[i] = argv[i - 1]; | |
+ PyImport_AppendInittab(name, initperf_trace_context); | |
#else | |
- initfunc = PyInit_perf_trace_context; | |
command_line = malloc((argc + 1) * sizeof(wchar_t *)); | |
command_line[0] = Py_DecodeLocale(script, NULL); | |
for (i = 1; i < argc + 1; i++) | |
command_line[i] = Py_DecodeLocale(argv[i - 1], NULL); | |
+ PyImport_AppendInittab(name, PyInit_perf_trace_context); | |
#endif | |
- | |
- PyImport_AppendInittab(name, initfunc); | |
Py_Initialize(); | |
#if PY_MAJOR_VERSION < 3 | |
diff --git a/tools/perf/util/session.c b/tools/perf/util/session.c | |
index 7a5f03764702b..098080287c687 100644 | |
--- a/tools/perf/util/session.c | |
+++ b/tools/perf/util/session.c | |
@@ -595,6 +595,7 @@ static void perf_event__mmap2_swap(union perf_event *event, | |
event->mmap2.maj = bswap_32(event->mmap2.maj); | |
event->mmap2.min = bswap_32(event->mmap2.min); | |
event->mmap2.ino = bswap_64(event->mmap2.ino); | |
+ event->mmap2.ino_generation = bswap_64(event->mmap2.ino_generation); | |
if (sample_id_all) { | |
void *data = &event->mmap2.filename; | |
@@ -710,6 +711,18 @@ static void perf_event__namespaces_swap(union perf_event *event, | |
swap_sample_id_all(event, &event->namespaces.link_info[i]); | |
} | |
+static void perf_event__cgroup_swap(union perf_event *event, bool sample_id_all) | |
+{ | |
+ event->cgroup.id = bswap_64(event->cgroup.id); | |
+ | |
+ if (sample_id_all) { | |
+ void *data = &event->cgroup.path; | |
+ | |
+ data += PERF_ALIGN(strlen(data) + 1, sizeof(u64)); | |
+ swap_sample_id_all(event, data); | |
+ } | |
+} | |
+ | |
static u8 revbyte(u8 b) | |
{ | |
int rev = (b >> 4) | ((b & 0xf) << 4); | |
@@ -952,6 +965,7 @@ static perf_event__swap_op perf_event__swap_ops[] = { | |
[PERF_RECORD_SWITCH] = perf_event__switch_swap, | |
[PERF_RECORD_SWITCH_CPU_WIDE] = perf_event__switch_swap, | |
[PERF_RECORD_NAMESPACES] = perf_event__namespaces_swap, | |
+ [PERF_RECORD_CGROUP] = perf_event__cgroup_swap, | |
[PERF_RECORD_TEXT_POKE] = perf_event__text_poke_swap, | |
[PERF_RECORD_HEADER_ATTR] = perf_event__hdr_attr_swap, | |
[PERF_RECORD_HEADER_EVENT_TYPE] = perf_event__event_type_swap, | |
diff --git a/tools/testing/kunit/kunit_parser.py b/tools/testing/kunit/kunit_parser.py | |
index f13e0c0d66639..91036d5d51cf6 100644 | |
--- a/tools/testing/kunit/kunit_parser.py | |
+++ b/tools/testing/kunit/kunit_parser.py | |
@@ -65,7 +65,6 @@ def isolate_kunit_output(kernel_output): | |
def raw_output(kernel_output): | |
for line in kernel_output: | |
print(line) | |
- yield line | |
DIVIDER = '=' * 60 | |
@@ -233,7 +232,7 @@ def parse_test_suite(lines: List[str]) -> TestSuite: | |
return None | |
test_suite.name = name | |
expected_test_case_num = parse_subtest_plan(lines) | |
- if not expected_test_case_num: | |
+ if expected_test_case_num is None: | |
return None | |
while expected_test_case_num > 0: | |
test_case = parse_test_case(lines) | |
diff --git a/tools/testing/selftests/bpf/Makefile b/tools/testing/selftests/bpf/Makefile | |
index fc946b7ac288d..daf186f88a636 100644 | |
--- a/tools/testing/selftests/bpf/Makefile | |
+++ b/tools/testing/selftests/bpf/Makefile | |
@@ -133,7 +133,7 @@ $(OUTPUT)/%:%.c | |
$(OUTPUT)/urandom_read: urandom_read.c | |
$(call msg,BINARY,,$@) | |
- $(Q)$(CC) $(LDFLAGS) -o $@ $< $(LDLIBS) -Wl,--build-id | |
+ $(Q)$(CC) $(LDFLAGS) -o $@ $< $(LDLIBS) -Wl,--build-id=sha1 | |
$(OUTPUT)/test_stub.o: test_stub.c $(BPFOBJ) | |
$(call msg,CC,,$@) | |
diff --git a/tools/testing/selftests/bpf/prog_tests/map_init.c b/tools/testing/selftests/bpf/prog_tests/map_init.c | |
new file mode 100644 | |
index 0000000000000..14a31109dd0e0 | |
--- /dev/null | |
+++ b/tools/testing/selftests/bpf/prog_tests/map_init.c | |
@@ -0,0 +1,214 @@ | |
+// SPDX-License-Identifier: GPL-2.0-only | |
+/* Copyright (c) 2020 Tessares SA <http://www.tessares.net> */ | |
+ | |
+#include <test_progs.h> | |
+#include "test_map_init.skel.h" | |
+ | |
+#define TEST_VALUE 0x1234 | |
+#define FILL_VALUE 0xdeadbeef | |
+ | |
+static int nr_cpus; | |
+static int duration; | |
+ | |
+typedef unsigned long long map_key_t; | |
+typedef unsigned long long map_value_t; | |
+typedef struct { | |
+ map_value_t v; /* padding */ | |
+} __bpf_percpu_val_align pcpu_map_value_t; | |
+ | |
+ | |
+static int map_populate(int map_fd, int num) | |
+{ | |
+ pcpu_map_value_t value[nr_cpus]; | |
+ int i, err; | |
+ map_key_t key; | |
+ | |
+ for (i = 0; i < nr_cpus; i++) | |
+ bpf_percpu(value, i) = FILL_VALUE; | |
+ | |
+ for (key = 1; key <= num; key++) { | |
+ err = bpf_map_update_elem(map_fd, &key, value, BPF_NOEXIST); | |
+ if (!ASSERT_OK(err, "bpf_map_update_elem")) | |
+ return -1; | |
+ } | |
+ | |
+ return 0; | |
+} | |
+ | |
+static struct test_map_init *setup(enum bpf_map_type map_type, int map_sz, | |
+ int *map_fd, int populate) | |
+{ | |
+ struct test_map_init *skel; | |
+ int err; | |
+ | |
+ skel = test_map_init__open(); | |
+ if (!ASSERT_OK_PTR(skel, "skel_open")) | |
+ return NULL; | |
+ | |
+ err = bpf_map__set_type(skel->maps.hashmap1, map_type); | |
+ if (!ASSERT_OK(err, "bpf_map__set_type")) | |
+ goto error; | |
+ | |
+ err = bpf_map__set_max_entries(skel->maps.hashmap1, map_sz); | |
+ if (!ASSERT_OK(err, "bpf_map__set_max_entries")) | |
+ goto error; | |
+ | |
+ err = test_map_init__load(skel); | |
+ if (!ASSERT_OK(err, "skel_load")) | |
+ goto error; | |
+ | |
+ *map_fd = bpf_map__fd(skel->maps.hashmap1); | |
+ if (CHECK(*map_fd < 0, "bpf_map__fd", "failed\n")) | |
+ goto error; | |
+ | |
+ err = map_populate(*map_fd, populate); | |
+ if (!ASSERT_OK(err, "map_populate")) | |
+ goto error_map; | |
+ | |
+ return skel; | |
+ | |
+error_map: | |
+ close(*map_fd); | |
+error: | |
+ test_map_init__destroy(skel); | |
+ return NULL; | |
+} | |
+ | |
+/* executes bpf program that updates map with key, value */ | |
+static int prog_run_insert_elem(struct test_map_init *skel, map_key_t key, | |
+ map_value_t value) | |
+{ | |
+ struct test_map_init__bss *bss; | |
+ | |
+ bss = skel->bss; | |
+ | |
+ bss->inKey = key; | |
+ bss->inValue = value; | |
+ bss->inPid = getpid(); | |
+ | |
+ if (!ASSERT_OK(test_map_init__attach(skel), "skel_attach")) | |
+ return -1; | |
+ | |
+ /* Let tracepoint trigger */ | |
+ syscall(__NR_getpgid); | |
+ | |
+ test_map_init__detach(skel); | |
+ | |
+ return 0; | |
+} | |
+ | |
+static int check_values_one_cpu(pcpu_map_value_t *value, map_value_t expected) | |
+{ | |
+ int i, nzCnt = 0; | |
+ map_value_t val; | |
+ | |
+ for (i = 0; i < nr_cpus; i++) { | |
+ val = bpf_percpu(value, i); | |
+ if (val) { | |
+ if (CHECK(val != expected, "map value", | |
+ "unexpected for cpu %d: 0x%llx\n", i, val)) | |
+ return -1; | |
+ nzCnt++; | |
+ } | |
+ } | |
+ | |
+ if (CHECK(nzCnt != 1, "map value", "set for %d CPUs instead of 1!\n", | |
+ nzCnt)) | |
+ return -1; | |
+ | |
+ return 0; | |
+} | |
+ | |
+/* Add key=1 elem with values set for all CPUs | |
+ * Delete elem key=1 | |
+ * Run bpf prog that inserts new key=1 elem with value=0x1234 | |
+ * (bpf prog can only set value for current CPU) | |
+ * Lookup Key=1 and check value is as expected for all CPUs: | |
+ * value set by bpf prog for one CPU, 0 for all others | |
+ */ | |
+static void test_pcpu_map_init(void) | |
+{ | |
+ pcpu_map_value_t value[nr_cpus]; | |
+ struct test_map_init *skel; | |
+ int map_fd, err; | |
+ map_key_t key; | |
+ | |
+ /* max 1 elem in map so insertion is forced to reuse freed entry */ | |
+ skel = setup(BPF_MAP_TYPE_PERCPU_HASH, 1, &map_fd, 1); | |
+ if (!ASSERT_OK_PTR(skel, "prog_setup")) | |
+ return; | |
+ | |
+ /* delete element so the entry can be re-used*/ | |
+ key = 1; | |
+ err = bpf_map_delete_elem(map_fd, &key); | |
+ if (!ASSERT_OK(err, "bpf_map_delete_elem")) | |
+ goto cleanup; | |
+ | |
+ /* run bpf prog that inserts new elem, re-using the slot just freed */ | |
+ err = prog_run_insert_elem(skel, key, TEST_VALUE); | |
+ if (!ASSERT_OK(err, "prog_run_insert_elem")) | |
+ goto cleanup; | |
+ | |
+ /* check that key=1 was re-created by bpf prog */ | |
+ err = bpf_map_lookup_elem(map_fd, &key, value); | |
+ if (!ASSERT_OK(err, "bpf_map_lookup_elem")) | |
+ goto cleanup; | |
+ | |
+ /* and has expected values */ | |
+ check_values_one_cpu(value, TEST_VALUE); | |
+ | |
+cleanup: | |
+ test_map_init__destroy(skel); | |
+} | |
+ | |
+/* Add key=1 and key=2 elems with values set for all CPUs | |
+ * Run bpf prog that inserts new key=3 elem | |
+ * (only for current cpu; other cpus should have initial value = 0) | |
+ * Lookup Key=1 and check value is as expected for all CPUs | |
+ */ | |
+static void test_pcpu_lru_map_init(void) | |
+{ | |
+ pcpu_map_value_t value[nr_cpus]; | |
+ struct test_map_init *skel; | |
+ int map_fd, err; | |
+ map_key_t key; | |
+ | |
+ /* Set up LRU map with 2 elements, values filled for all CPUs. | |
+ * With these 2 elements, the LRU map is full | |
+ */ | |
+ skel = setup(BPF_MAP_TYPE_LRU_PERCPU_HASH, 2, &map_fd, 2); | |
+ if (!ASSERT_OK_PTR(skel, "prog_setup")) | |
+ return; | |
+ | |
+ /* run bpf prog that inserts new key=3 element, re-using LRU slot */ | |
+ key = 3; | |
+ err = prog_run_insert_elem(skel, key, TEST_VALUE); | |
+ if (!ASSERT_OK(err, "prog_run_insert_elem")) | |
+ goto cleanup; | |
+ | |
+ /* check that key=3 replaced one of earlier elements */ | |
+ err = bpf_map_lookup_elem(map_fd, &key, value); | |
+ if (!ASSERT_OK(err, "bpf_map_lookup_elem")) | |
+ goto cleanup; | |
+ | |
+ /* and has expected values */ | |
+ check_values_one_cpu(value, TEST_VALUE); | |
+ | |
+cleanup: | |
+ test_map_init__destroy(skel); | |
+} | |
+ | |
+void test_map_init(void) | |
+{ | |
+ nr_cpus = bpf_num_possible_cpus(); | |
+ if (nr_cpus <= 1) { | |
+ printf("%s:SKIP: >1 cpu needed for this test\n", __func__); | |
+ test__skip(); | |
+ return; | |
+ } | |
+ | |
+ if (test__start_subtest("pcpu_map_init")) | |
+ test_pcpu_map_init(); | |
+ if (test__start_subtest("pcpu_lru_map_init")) | |
+ test_pcpu_lru_map_init(); | |
+} | |
diff --git a/tools/testing/selftests/bpf/progs/test_map_init.c b/tools/testing/selftests/bpf/progs/test_map_init.c | |
new file mode 100644 | |
index 0000000000000..c89d28ead6737 | |
--- /dev/null | |
+++ b/tools/testing/selftests/bpf/progs/test_map_init.c | |
@@ -0,0 +1,33 @@ | |
+// SPDX-License-Identifier: GPL-2.0 | |
+/* Copyright (c) 2020 Tessares SA <http://www.tessares.net> */ | |
+ | |
+#include "vmlinux.h" | |
+#include <bpf/bpf_helpers.h> | |
+ | |
+__u64 inKey = 0; | |
+__u64 inValue = 0; | |
+__u32 inPid = 0; | |
+ | |
+struct { | |
+ __uint(type, BPF_MAP_TYPE_PERCPU_HASH); | |
+ __uint(max_entries, 2); | |
+ __type(key, __u64); | |
+ __type(value, __u64); | |
+} hashmap1 SEC(".maps"); | |
+ | |
+ | |
+SEC("tp/syscalls/sys_enter_getpgid") | |
+int sysenter_getpgid(const void *ctx) | |
+{ | |
+ /* Just do it for once, when called from our own test prog. This | |
+ * ensures the map value is only updated for a single CPU. | |
+ */ | |
+ int cur_pid = bpf_get_current_pid_tgid() >> 32; | |
+ | |
+ if (cur_pid == inPid) | |
+ bpf_map_update_elem(&hashmap1, &inKey, &inValue, BPF_NOEXIST); | |
+ | |
+ return 0; | |
+} | |
+ | |
+char _license[] SEC("license") = "GPL"; | |
diff --git a/tools/testing/selftests/clone3/clone3_cap_checkpoint_restore.c b/tools/testing/selftests/clone3/clone3_cap_checkpoint_restore.c | |
index 9562425aa0a90..614091de4c545 100644 | |
--- a/tools/testing/selftests/clone3/clone3_cap_checkpoint_restore.c | |
+++ b/tools/testing/selftests/clone3/clone3_cap_checkpoint_restore.c | |
@@ -145,7 +145,7 @@ TEST(clone3_cap_checkpoint_restore) | |
test_clone3_supported(); | |
EXPECT_EQ(getuid(), 0) | |
- XFAIL(return, "Skipping all tests as non-root\n"); | |
+ SKIP(return, "Skipping all tests as non-root"); | |
memset(&set_tid, 0, sizeof(set_tid)); | |
diff --git a/tools/testing/selftests/core/close_range_test.c b/tools/testing/selftests/core/close_range_test.c | |
index c99b98b0d461f..575b391ddc78d 100644 | |
--- a/tools/testing/selftests/core/close_range_test.c | |
+++ b/tools/testing/selftests/core/close_range_test.c | |
@@ -44,7 +44,7 @@ TEST(close_range) | |
fd = open("/dev/null", O_RDONLY | O_CLOEXEC); | |
ASSERT_GE(fd, 0) { | |
if (errno == ENOENT) | |
- XFAIL(return, "Skipping test since /dev/null does not exist"); | |
+ SKIP(return, "Skipping test since /dev/null does not exist"); | |
} | |
open_fds[i] = fd; | |
@@ -52,7 +52,7 @@ TEST(close_range) | |
EXPECT_EQ(-1, sys_close_range(open_fds[0], open_fds[100], -1)) { | |
if (errno == ENOSYS) | |
- XFAIL(return, "close_range() syscall not supported"); | |
+ SKIP(return, "close_range() syscall not supported"); | |
} | |
EXPECT_EQ(0, sys_close_range(open_fds[0], open_fds[50], 0)); | |
@@ -108,7 +108,7 @@ TEST(close_range_unshare) | |
fd = open("/dev/null", O_RDONLY | O_CLOEXEC); | |
ASSERT_GE(fd, 0) { | |
if (errno == ENOENT) | |
- XFAIL(return, "Skipping test since /dev/null does not exist"); | |
+ SKIP(return, "Skipping test since /dev/null does not exist"); | |
} | |
open_fds[i] = fd; | |
@@ -197,7 +197,7 @@ TEST(close_range_unshare_capped) | |
fd = open("/dev/null", O_RDONLY | O_CLOEXEC); | |
ASSERT_GE(fd, 0) { | |
if (errno == ENOENT) | |
- XFAIL(return, "Skipping test since /dev/null does not exist"); | |
+ SKIP(return, "Skipping test since /dev/null does not exist"); | |
} | |
open_fds[i] = fd; | |
diff --git a/tools/testing/selftests/filesystems/binderfs/binderfs_test.c b/tools/testing/selftests/filesystems/binderfs/binderfs_test.c | |
index 1d27f52c61e61..477cbb042f5ba 100644 | |
--- a/tools/testing/selftests/filesystems/binderfs/binderfs_test.c | |
+++ b/tools/testing/selftests/filesystems/binderfs/binderfs_test.c | |
@@ -74,7 +74,7 @@ static int __do_binderfs_test(struct __test_metadata *_metadata) | |
ret = mount(NULL, binderfs_mntpt, "binder", 0, 0); | |
EXPECT_EQ(ret, 0) { | |
if (errno == ENODEV) | |
- XFAIL(goto out, "binderfs missing"); | |
+ SKIP(goto out, "binderfs missing"); | |
TH_LOG("%s - Failed to mount binderfs", strerror(errno)); | |
goto rmdir; | |
} | |
@@ -475,10 +475,10 @@ TEST(binderfs_stress) | |
TEST(binderfs_test_privileged) | |
{ | |
if (geteuid() != 0) | |
- XFAIL(return, "Tests are not run as root. Skipping privileged tests"); | |
+ SKIP(return, "Tests are not run as root. Skipping privileged tests"); | |
if (__do_binderfs_test(_metadata)) | |
- XFAIL(return, "The Android binderfs filesystem is not available"); | |
+ SKIP(return, "The Android binderfs filesystem is not available"); | |
} | |
TEST(binderfs_test_unprivileged) | |
@@ -511,7 +511,7 @@ TEST(binderfs_test_unprivileged) | |
ret = wait_for_pid(pid); | |
if (ret) { | |
if (ret == 2) | |
- XFAIL(return, "The Android binderfs filesystem is not available"); | |
+ SKIP(return, "The Android binderfs filesystem is not available"); | |
ASSERT_EQ(ret, 0) { | |
TH_LOG("wait_for_pid() failed"); | |
} | |
diff --git a/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_user.tc b/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_user.tc | |
index a30a9c07290d0..d25d01a197781 100644 | |
--- a/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_user.tc | |
+++ b/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_user.tc | |
@@ -9,12 +9,16 @@ grep -A10 "fetcharg:" README | grep -q '\[u\]<offset>' || exit_unsupported | |
:;: "user-memory access syntax and ustring working on user memory";: | |
echo 'p:myevent do_sys_open path=+0($arg2):ustring path2=+u0($arg2):string' \ | |
> kprobe_events | |
+echo 'p:myevent2 do_sys_openat2 path=+0($arg2):ustring path2=+u0($arg2):string' \ | |
+ >> kprobe_events | |
grep myevent kprobe_events | \ | |
grep -q 'path=+0($arg2):ustring path2=+u0($arg2):string' | |
echo 1 > events/kprobes/myevent/enable | |
+echo 1 > events/kprobes/myevent2/enable | |
echo > /dev/null | |
echo 0 > events/kprobes/myevent/enable | |
+echo 0 > events/kprobes/myevent2/enable | |
grep myevent trace | grep -q 'path="/dev/null" path2="/dev/null"' | |
diff --git a/tools/testing/selftests/lib.mk b/tools/testing/selftests/lib.mk | |
index 7a17ea8157367..66f3317dc3654 100644 | |
--- a/tools/testing/selftests/lib.mk | |
+++ b/tools/testing/selftests/lib.mk | |
@@ -137,7 +137,7 @@ endif | |
ifeq ($(OVERRIDE_TARGETS),) | |
LOCAL_HDRS := $(selfdir)/kselftest_harness.h $(selfdir)/kselftest.h | |
$(OUTPUT)/%:%.c $(LOCAL_HDRS) | |
- $(LINK.c) $^ $(LDLIBS) -o $@ | |
+ $(LINK.c) $(filter-out $(LOCAL_HDRS),$^) $(LDLIBS) -o $@ | |
$(OUTPUT)/%.o:%.S | |
$(COMPILE.S) $^ -o $@ | |
diff --git a/tools/testing/selftests/pidfd/pidfd_open_test.c b/tools/testing/selftests/pidfd/pidfd_open_test.c | |
index b9fe75fc3e517..8a59438ccc78b 100644 | |
--- a/tools/testing/selftests/pidfd/pidfd_open_test.c | |
+++ b/tools/testing/selftests/pidfd/pidfd_open_test.c | |
@@ -6,7 +6,6 @@ | |
#include <inttypes.h> | |
#include <limits.h> | |
#include <linux/types.h> | |
-#include <linux/wait.h> | |
#include <sched.h> | |
#include <signal.h> | |
#include <stdbool.h> | |
diff --git a/tools/testing/selftests/pidfd/pidfd_poll_test.c b/tools/testing/selftests/pidfd/pidfd_poll_test.c | |
index 4b115444dfe90..6108112753573 100644 | |
--- a/tools/testing/selftests/pidfd/pidfd_poll_test.c | |
+++ b/tools/testing/selftests/pidfd/pidfd_poll_test.c | |
@@ -3,7 +3,6 @@ | |
#define _GNU_SOURCE | |
#include <errno.h> | |
#include <linux/types.h> | |
-#include <linux/wait.h> | |
#include <poll.h> | |
#include <signal.h> | |
#include <stdbool.h> | |
diff --git a/tools/testing/selftests/proc/proc-loadavg-001.c b/tools/testing/selftests/proc/proc-loadavg-001.c | |
index 471e2aa280776..fb4fe9188806e 100644 | |
--- a/tools/testing/selftests/proc/proc-loadavg-001.c | |
+++ b/tools/testing/selftests/proc/proc-loadavg-001.c | |
@@ -14,7 +14,6 @@ | |
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. | |
*/ | |
/* Test that /proc/loadavg correctly reports last pid in pid namespace. */ | |
-#define _GNU_SOURCE | |
#include <errno.h> | |
#include <sched.h> | |
#include <sys/types.h> | |
diff --git a/tools/testing/selftests/proc/proc-self-syscall.c b/tools/testing/selftests/proc/proc-self-syscall.c | |
index 9f6d000c02455..8511dcfe67c75 100644 | |
--- a/tools/testing/selftests/proc/proc-self-syscall.c | |
+++ b/tools/testing/selftests/proc/proc-self-syscall.c | |
@@ -13,7 +13,6 @@ | |
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF | |
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. | |
*/ | |
-#define _GNU_SOURCE | |
#include <unistd.h> | |
#include <sys/syscall.h> | |
#include <sys/types.h> | |
diff --git a/tools/testing/selftests/proc/proc-uptime-002.c b/tools/testing/selftests/proc/proc-uptime-002.c | |
index 30e2b78490898..e7ceabed7f51f 100644 | |
--- a/tools/testing/selftests/proc/proc-uptime-002.c | |
+++ b/tools/testing/selftests/proc/proc-uptime-002.c | |
@@ -15,7 +15,6 @@ | |
*/ | |
// Test that values in /proc/uptime increment monotonically | |
// while shifting across CPUs. | |
-#define _GNU_SOURCE | |
#undef NDEBUG | |
#include <assert.h> | |
#include <unistd.h> | |
diff --git a/tools/testing/selftests/tc-testing/tc-tests/filters/tests.json b/tools/testing/selftests/tc-testing/tc-tests/filters/tests.json | |
index bb543bf69d694..361235ad574be 100644 | |
--- a/tools/testing/selftests/tc-testing/tc-tests/filters/tests.json | |
+++ b/tools/testing/selftests/tc-testing/tc-tests/filters/tests.json | |
@@ -100,7 +100,7 @@ | |
], | |
"cmdUnderTest": "$TC filter add dev $DEV2 protocol ip pref 1 ingress flower dst_mac e4:11:22:11:4a:51 action drop", | |
"expExitCode": "0", | |
- "verifyCmd": "$TC filter show terse dev $DEV2 ingress", | |
+ "verifyCmd": "$TC -br filter show dev $DEV2 ingress", | |
"matchPattern": "filter protocol ip pref 1 flower.*handle", | |
"matchCount": "1", | |
"teardown": [ | |
@@ -119,7 +119,7 @@ | |
], | |
"cmdUnderTest": "$TC filter add dev $DEV2 protocol ip pref 1 ingress flower dst_mac e4:11:22:11:4a:51 action drop", | |
"expExitCode": "0", | |
- "verifyCmd": "$TC filter show terse dev $DEV2 ingress", | |
+ "verifyCmd": "$TC -br filter show dev $DEV2 ingress", | |
"matchPattern": " dst_mac e4:11:22:11:4a:51", | |
"matchCount": "0", | |
"teardown": [ | |
diff --git a/tools/testing/selftests/wireguard/netns.sh b/tools/testing/selftests/wireguard/netns.sh | |
index d77f4829f1e07..74c69b75f6f5a 100755 | |
--- a/tools/testing/selftests/wireguard/netns.sh | |
+++ b/tools/testing/selftests/wireguard/netns.sh | |
@@ -316,6 +316,14 @@ pp sleep 3 | |
n2 ping -W 1 -c 1 192.168.241.1 | |
n1 wg set wg0 peer "$pub2" persistent-keepalive 0 | |
+# Test that sk_bound_dev_if works | |
+n1 ping -I wg0 -c 1 -W 1 192.168.241.2 | |
+# What about when the mark changes and the packet must be rerouted? | |
+n1 iptables -t mangle -I OUTPUT -j MARK --set-xmark 1 | |
+n1 ping -c 1 -W 1 192.168.241.2 # First the boring case | |
+n1 ping -I wg0 -c 1 -W 1 192.168.241.2 # Then the sk_bound_dev_if case | |
+n1 iptables -t mangle -D OUTPUT -j MARK --set-xmark 1 | |
+ | |
# Test that onion routing works, even when it loops | |
n1 wg set wg0 peer "$pub3" allowed-ips 192.168.242.2/32 endpoint 192.168.241.2:5 | |
ip1 addr add 192.168.242.1/24 dev wg0 | |
diff --git a/tools/testing/selftests/wireguard/qemu/kernel.config b/tools/testing/selftests/wireguard/qemu/kernel.config | |
index d531de13c95b0..4eecb432a66c1 100644 | |
--- a/tools/testing/selftests/wireguard/qemu/kernel.config | |
+++ b/tools/testing/selftests/wireguard/qemu/kernel.config | |
@@ -18,10 +18,12 @@ CONFIG_NF_NAT=y | |
CONFIG_NETFILTER_XTABLES=y | |
CONFIG_NETFILTER_XT_NAT=y | |
CONFIG_NETFILTER_XT_MATCH_LENGTH=y | |
+CONFIG_NETFILTER_XT_MARK=y | |
CONFIG_NF_CONNTRACK_IPV4=y | |
CONFIG_NF_NAT_IPV4=y | |
CONFIG_IP_NF_IPTABLES=y | |
CONFIG_IP_NF_FILTER=y | |
+CONFIG_IP_NF_MANGLE=y | |
CONFIG_IP_NF_NAT=y | |
CONFIG_IP_ADVANCED_ROUTER=y | |
CONFIG_IP_MULTIPLE_TABLES=y |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
--- a/scripts/Makefile.lto | |
+++ b/scripts/Makefile.lto | |
@@ -67,8 +67,8 @@ endif | |
# LTO gcc creates a lot of files in TMPDIR, and with /tmp as tmpfs | |
# it's easy to drive the machine OOM. Use the object directory | |
# instead for temporaries. | |
- TMPDIR ?= $(objtree) | |
- export TMPDIR | |
+# TMPDIR ?= $(objtree) | |
+# export TMPDIR | |
# use plugin aware tools | |
AR = $(CROSS_COMPILE)gcc-ar |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
--- a/include/linux/export.h | |
+++ b/include/linux/export.h | |
@@ -111,7 +111,7 @@ struct kernel_symbol { | |
* discarded in the final link stage. | |
*/ | |
#define __ksym_marker(sym) \ | |
- static int __ksym_marker_##sym[0] __section(".discard.ksym") __used | |
+ int __ksym_marker_##sym[0] __section(".discard.ksym") __used | |
#define __EXPORT_SYMBOL(sym, sec, ns) \ | |
__ksym_marker(sym); \ |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Revert changes that the lto patch made in order to allow future 5.9.x patches apply cleanly. | |
This should be applied after the lto patch and before 5.9.x patches. | |
--- | |
--- a/drivers/ras/cec.c | |
+++ b/drivers/ras/cec.c | |
@@ -553,20 +553,20 @@ static struct notifier_block cec_nb = { | |
.priority = MCE_PRIO_CEC, | |
}; | |
-static int __init cec_init(void) | |
+static void __init cec_init(void) | |
{ | |
if (ce_arr.disabled) | |
- return 0; | |
+ return; | |
ce_arr.array = (void *)get_zeroed_page(GFP_KERNEL); | |
if (!ce_arr.array) { | |
pr_err("Error allocating CE array page!\n"); | |
- return -ENOMEM; | |
+ return; | |
} | |
if (create_debugfs_nodes()) { | |
free_page((unsigned long)ce_arr.array); | |
- return -ENOMEM; | |
+ return; | |
} | |
INIT_DELAYED_WORK(&cec_work, cec_work_fn); | |
@@ -575,7 +575,6 @@ static int __init cec_init(void) | |
mce_register_decode_chain(&cec_nb); | |
pr_info("Correctable Errors collector initialized.\n"); | |
- return 0; | |
} | |
late_initcall(cec_init); | |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
--- a/scripts/Makefile.lto | |
+++ b/scripts/Makefile.lto | |
@@ -13,7 +13,7 @@ export KBUILD_MOD_LDFLAGS | |
export KBUILD_MODPOST_LDFLAGS | |
ifdef CONFIG_LTO | |
- LTO_CFLAGS := -flto | |
+ LTO_CFLAGS := -flto -flto-compression-level=9 | |
LTO_FINAL_CFLAGS := -fuse-linker-plugin | |
# gcc 8.x doesn't generate debuginfo if we don't |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment