summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--0000_README4
-rw-r--r--1141_linux-4.19.142.patch2088
2 files changed, 2092 insertions, 0 deletions
diff --git a/0000_README b/0000_README
index 1680f0be..4adb8c68 100644
--- a/0000_README
+++ b/0000_README
@@ -603,6 +603,10 @@ Patch: 1140_linux-4.19.141.patch
From: https://www.kernel.org
Desc: Linux 4.19.141
+Patch: 1141_linux-4.19.142.patch
+From: https://www.kernel.org
+Desc: Linux 4.19.142
+
Patch: 1500_XATTR_USER_PREFIX.patch
From: https://bugs.gentoo.org/show_bug.cgi?id=470644
Desc: Support for namespace user.pax.* on tmpfs.
diff --git a/1141_linux-4.19.142.patch b/1141_linux-4.19.142.patch
new file mode 100644
index 00000000..84d26b3a
--- /dev/null
+++ b/1141_linux-4.19.142.patch
@@ -0,0 +1,2088 @@
+diff --git a/Makefile b/Makefile
+index 5b64e11419846..e5e46aecf357f 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 4
+ PATCHLEVEL = 19
+-SUBLEVEL = 141
++SUBLEVEL = 142
+ EXTRAVERSION =
+ NAME = "People's Front"
+
+diff --git a/arch/alpha/include/asm/io.h b/arch/alpha/include/asm/io.h
+index eb09d5aee9106..0bba9e991189d 100644
+--- a/arch/alpha/include/asm/io.h
++++ b/arch/alpha/include/asm/io.h
+@@ -507,10 +507,10 @@ extern inline void writeq(u64 b, volatile void __iomem *addr)
+ }
+ #endif
+
+-#define ioread16be(p) be16_to_cpu(ioread16(p))
+-#define ioread32be(p) be32_to_cpu(ioread32(p))
+-#define iowrite16be(v,p) iowrite16(cpu_to_be16(v), (p))
+-#define iowrite32be(v,p) iowrite32(cpu_to_be32(v), (p))
++#define ioread16be(p) swab16(ioread16(p))
++#define ioread32be(p) swab32(ioread32(p))
++#define iowrite16be(v,p) iowrite16(swab16(v), (p))
++#define iowrite32be(v,p) iowrite32(swab32(v), (p))
+
+ #define inb_p inb
+ #define inw_p inw
+diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h
+index c9128bb187f9a..471859cbfe0bb 100644
+--- a/arch/arm/include/asm/kvm_host.h
++++ b/arch/arm/include/asm/kvm_host.h
+@@ -234,7 +234,7 @@ int __kvm_arm_vcpu_set_events(struct kvm_vcpu *vcpu,
+
+ #define KVM_ARCH_WANT_MMU_NOTIFIER
+ int kvm_unmap_hva_range(struct kvm *kvm,
+- unsigned long start, unsigned long end);
++ unsigned long start, unsigned long end, bool blockable);
+ void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
+
+ unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu);
+diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
+index e9afdfcb8403c..5e720742d6479 100644
+--- a/arch/arm64/include/asm/kvm_host.h
++++ b/arch/arm64/include/asm/kvm_host.h
+@@ -370,7 +370,7 @@ int __kvm_arm_vcpu_set_events(struct kvm_vcpu *vcpu,
+
+ #define KVM_ARCH_WANT_MMU_NOTIFIER
+ int kvm_unmap_hva_range(struct kvm *kvm,
+- unsigned long start, unsigned long end);
++ unsigned long start, unsigned long end, bool blockable);
+ void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
+ int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end);
+ int kvm_test_age_hva(struct kvm *kvm, unsigned long hva);
+diff --git a/arch/m68k/include/asm/m53xxacr.h b/arch/m68k/include/asm/m53xxacr.h
+index 9138a624c5c81..692f90e7fecc1 100644
+--- a/arch/m68k/include/asm/m53xxacr.h
++++ b/arch/m68k/include/asm/m53xxacr.h
+@@ -89,9 +89,9 @@
+ * coherency though in all cases. And for copyback caches we will need
+ * to push cached data as well.
+ */
+-#define CACHE_INIT CACR_CINVA
+-#define CACHE_INVALIDATE CACR_CINVA
+-#define CACHE_INVALIDATED CACR_CINVA
++#define CACHE_INIT (CACHE_MODE + CACR_CINVA - CACR_EC)
++#define CACHE_INVALIDATE (CACHE_MODE + CACR_CINVA)
++#define CACHE_INVALIDATED (CACHE_MODE + CACR_CINVA)
+
+ #define ACR0_MODE ((CONFIG_RAMBASE & 0xff000000) + \
+ (0x000f0000) + \
+diff --git a/arch/mips/include/asm/kvm_host.h b/arch/mips/include/asm/kvm_host.h
+index 2b3fdfc9e0e77..c254761cb8ad9 100644
+--- a/arch/mips/include/asm/kvm_host.h
++++ b/arch/mips/include/asm/kvm_host.h
+@@ -936,7 +936,7 @@ enum kvm_mips_fault_result kvm_trap_emul_gva_fault(struct kvm_vcpu *vcpu,
+
+ #define KVM_ARCH_WANT_MMU_NOTIFIER
+ int kvm_unmap_hva_range(struct kvm *kvm,
+- unsigned long start, unsigned long end);
++ unsigned long start, unsigned long end, bool blockable);
+ void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
+ int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end);
+ int kvm_test_age_hva(struct kvm *kvm, unsigned long hva);
+diff --git a/arch/mips/kvm/mmu.c b/arch/mips/kvm/mmu.c
+index d8dcdb3504059..098a7afd4d384 100644
+--- a/arch/mips/kvm/mmu.c
++++ b/arch/mips/kvm/mmu.c
+@@ -512,7 +512,8 @@ static int kvm_unmap_hva_handler(struct kvm *kvm, gfn_t gfn, gfn_t gfn_end,
+ return 1;
+ }
+
+-int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end)
++int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end,
++ bool blockable)
+ {
+ handle_hva_to_gpa(kvm, start, end, &kvm_unmap_hva_handler, NULL);
+
+diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h
+index 2f95e38f05491..7b54d8412367e 100644
+--- a/arch/powerpc/include/asm/kvm_host.h
++++ b/arch/powerpc/include/asm/kvm_host.h
+@@ -68,7 +68,8 @@
+ #define KVM_ARCH_WANT_MMU_NOTIFIER
+
+ extern int kvm_unmap_hva_range(struct kvm *kvm,
+- unsigned long start, unsigned long end);
++ unsigned long start, unsigned long end,
++ bool blockable);
+ extern int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end);
+ extern int kvm_test_age_hva(struct kvm *kvm, unsigned long hva);
+ extern void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
+diff --git a/arch/powerpc/kvm/book3s.c b/arch/powerpc/kvm/book3s.c
+index cc05f346e0421..bc9d1321dc730 100644
+--- a/arch/powerpc/kvm/book3s.c
++++ b/arch/powerpc/kvm/book3s.c
+@@ -812,7 +812,8 @@ void kvmppc_core_commit_memory_region(struct kvm *kvm,
+ kvm->arch.kvm_ops->commit_memory_region(kvm, mem, old, new);
+ }
+
+-int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end)
++int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end,
++ bool blockable)
+ {
+ return kvm->arch.kvm_ops->unmap_hva_range(kvm, start, end);
+ }
+diff --git a/arch/powerpc/kvm/e500_mmu_host.c b/arch/powerpc/kvm/e500_mmu_host.c
+index 8f2985e46f6f1..bbb02195dc530 100644
+--- a/arch/powerpc/kvm/e500_mmu_host.c
++++ b/arch/powerpc/kvm/e500_mmu_host.c
+@@ -737,7 +737,8 @@ static int kvm_unmap_hva(struct kvm *kvm, unsigned long hva)
+ return 0;
+ }
+
+-int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end)
++int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end,
++ bool blockable)
+ {
+ /* kvm_unmap_hva flushes everything anyways */
+ kvm_unmap_hva(kvm, start);
+diff --git a/arch/powerpc/platforms/pseries/ras.c b/arch/powerpc/platforms/pseries/ras.c
+index e81a285f3a6ce..e827108680f21 100644
+--- a/arch/powerpc/platforms/pseries/ras.c
++++ b/arch/powerpc/platforms/pseries/ras.c
+@@ -118,7 +118,6 @@ static void handle_system_shutdown(char event_modifier)
+ case EPOW_SHUTDOWN_ON_UPS:
+ pr_emerg("Loss of system power detected. System is running on"
+ " UPS/battery. Check RTAS error log for details\n");
+- orderly_poweroff(true);
+ break;
+
+ case EPOW_SHUTDOWN_LOSS_OF_CRITICAL_FUNCTIONS:
+diff --git a/arch/s390/kernel/ptrace.c b/arch/s390/kernel/ptrace.c
+index 65fefbf61e1ca..3ffa2847c110b 100644
+--- a/arch/s390/kernel/ptrace.c
++++ b/arch/s390/kernel/ptrace.c
+@@ -1286,7 +1286,6 @@ static bool is_ri_cb_valid(struct runtime_instr_cb *cb)
+ cb->pc == 1 &&
+ cb->qc == 0 &&
+ cb->reserved2 == 0 &&
+- cb->key == PAGE_DEFAULT_KEY &&
+ cb->reserved3 == 0 &&
+ cb->reserved4 == 0 &&
+ cb->reserved5 == 0 &&
+@@ -1350,7 +1349,11 @@ static int s390_runtime_instr_set(struct task_struct *target,
+ kfree(data);
+ return -EINVAL;
+ }
+-
++ /*
++ * Override access key in any case, since user space should
++ * not be able to set it, nor should it care about it.
++ */
++ ri_cb.key = PAGE_DEFAULT_KEY >> 4;
+ preempt_disable();
+ if (!target->thread.ri_cb)
+ target->thread.ri_cb = data;
+diff --git a/arch/s390/kernel/runtime_instr.c b/arch/s390/kernel/runtime_instr.c
+index 125c7f6e87150..1788a5454b6fc 100644
+--- a/arch/s390/kernel/runtime_instr.c
++++ b/arch/s390/kernel/runtime_instr.c
+@@ -57,7 +57,7 @@ static void init_runtime_instr_cb(struct runtime_instr_cb *cb)
+ cb->k = 1;
+ cb->ps = 1;
+ cb->pc = 1;
+- cb->key = PAGE_DEFAULT_KEY;
++ cb->key = PAGE_DEFAULT_KEY >> 4;
+ cb->v = 1;
+ }
+
+diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
+index ce7b3b22ae86b..4876411a072a7 100644
+--- a/arch/x86/include/asm/kvm_host.h
++++ b/arch/x86/include/asm/kvm_host.h
+@@ -1465,7 +1465,8 @@ asmlinkage void __noreturn kvm_spurious_fault(void);
+ ____kvm_handle_fault_on_reboot(insn, "")
+
+ #define KVM_ARCH_WANT_MMU_NOTIFIER
+-int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end);
++int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end,
++ bool blockable);
+ int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end);
+ int kvm_test_age_hva(struct kvm *kvm, unsigned long hva);
+ void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
+diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
+index 92ff656e18101..a2ff5c214738a 100644
+--- a/arch/x86/kvm/mmu.c
++++ b/arch/x86/kvm/mmu.c
+@@ -1956,7 +1956,8 @@ static int kvm_handle_hva(struct kvm *kvm, unsigned long hva,
+ return kvm_handle_hva_range(kvm, hva, hva + 1, data, handler);
+ }
+
+-int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end)
++int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end,
++ bool blockable)
+ {
+ return kvm_handle_hva_range(kvm, start, end, 0, kvm_unmap_rmapp);
+ }
+diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
+index 5b2440e591fc1..430a4bc66f604 100644
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -857,7 +857,7 @@ int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
+ {
+ unsigned long old_cr4 = kvm_read_cr4(vcpu);
+ unsigned long pdptr_bits = X86_CR4_PGE | X86_CR4_PSE | X86_CR4_PAE |
+- X86_CR4_SMEP | X86_CR4_SMAP | X86_CR4_PKE;
++ X86_CR4_SMEP;
+
+ if (kvm_valid_cr4(vcpu, cr4))
+ return 1;
+diff --git a/arch/x86/pci/xen.c b/arch/x86/pci/xen.c
+index 9112d1cb397bb..22da9bfd8a458 100644
+--- a/arch/x86/pci/xen.c
++++ b/arch/x86/pci/xen.c
+@@ -25,6 +25,7 @@
+ #include <asm/xen/pci.h>
+ #include <asm/xen/cpuid.h>
+ #include <asm/apic.h>
++#include <asm/acpi.h>
+ #include <asm/i8259.h>
+
+ static int xen_pcifront_enable_irq(struct pci_dev *dev)
+diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
+index 8353ab9bd31bd..c5cf9e77fe862 100644
+--- a/drivers/clk/clk.c
++++ b/drivers/clk/clk.c
+@@ -40,6 +40,17 @@ static HLIST_HEAD(clk_root_list);
+ static HLIST_HEAD(clk_orphan_list);
+ static LIST_HEAD(clk_notifier_list);
+
++static struct hlist_head *all_lists[] = {
++ &clk_root_list,
++ &clk_orphan_list,
++ NULL,
++};
++
++static struct hlist_head *orphan_list[] = {
++ &clk_orphan_list,
++ NULL,
++};
++
+ /*** private data structures ***/
+
+ struct clk_core {
+@@ -2618,17 +2629,6 @@ static int inited = 0;
+ static DEFINE_MUTEX(clk_debug_lock);
+ static HLIST_HEAD(clk_debug_list);
+
+-static struct hlist_head *all_lists[] = {
+- &clk_root_list,
+- &clk_orphan_list,
+- NULL,
+-};
+-
+-static struct hlist_head *orphan_list[] = {
+- &clk_orphan_list,
+- NULL,
+-};
+-
+ static void clk_summary_show_one(struct seq_file *s, struct clk_core *c,
+ int level)
+ {
+@@ -3328,6 +3328,34 @@ static const struct clk_ops clk_nodrv_ops = {
+ .set_parent = clk_nodrv_set_parent,
+ };
+
++static void clk_core_evict_parent_cache_subtree(struct clk_core *root,
++ struct clk_core *target)
++{
++ int i;
++ struct clk_core *child;
++
++ for (i = 0; i < root->num_parents; i++)
++ if (root->parents[i] == target)
++ root->parents[i] = NULL;
++
++ hlist_for_each_entry(child, &root->children, child_node)
++ clk_core_evict_parent_cache_subtree(child, target);
++}
++
++/* Remove this clk from all parent caches */
++static void clk_core_evict_parent_cache(struct clk_core *core)
++{
++ struct hlist_head **lists;
++ struct clk_core *root;
++
++ lockdep_assert_held(&prepare_lock);
++
++ for (lists = all_lists; *lists; lists++)
++ hlist_for_each_entry(root, *lists, child_node)
++ clk_core_evict_parent_cache_subtree(root, core);
++
++}
++
+ /**
+ * clk_unregister - unregister a currently registered clock
+ * @clk: clock to unregister
+@@ -3366,6 +3394,8 @@ void clk_unregister(struct clk *clk)
+ clk_core_set_parent_nolock(child, NULL);
+ }
+
++ clk_core_evict_parent_cache(clk->core);
++
+ hlist_del_init(&clk->core->child_node);
+
+ if (clk->core->prepare_count)
+diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
+index e7b3d4ed8eff4..99166000ffb77 100644
+--- a/drivers/cpufreq/intel_pstate.c
++++ b/drivers/cpufreq/intel_pstate.c
+@@ -1431,6 +1431,7 @@ static void intel_pstate_get_cpu_pstates(struct cpudata *cpu)
+
+ intel_pstate_get_hwp_max(cpu->cpu, &phy_max, &current_max);
+ cpu->pstate.turbo_freq = phy_max * cpu->pstate.scaling;
++ cpu->pstate.turbo_pstate = phy_max;
+ } else {
+ cpu->pstate.turbo_freq = cpu->pstate.turbo_pstate * cpu->pstate.scaling;
+ }
+diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c
+index de1bc38ab39fb..a8180f9090fae 100644
+--- a/drivers/firmware/efi/efi.c
++++ b/drivers/firmware/efi/efi.c
+@@ -359,6 +359,7 @@ static int __init efisubsys_init(void)
+ efi_kobj = kobject_create_and_add("efi", firmware_kobj);
+ if (!efi_kobj) {
+ pr_err("efi: Firmware registration failed.\n");
++ destroy_workqueue(efi_rts_wq);
+ return -ENOMEM;
+ }
+
+@@ -395,6 +396,7 @@ err_unregister:
+ generic_ops_unregister();
+ err_put:
+ kobject_put(efi_kobj);
++ destroy_workqueue(efi_rts_wq);
+ return error;
+ }
+
+diff --git a/drivers/gpu/drm/amd/display/include/fixed31_32.h b/drivers/gpu/drm/amd/display/include/fixed31_32.h
+index 52a73332befb9..343f869c5277d 100644
+--- a/drivers/gpu/drm/amd/display/include/fixed31_32.h
++++ b/drivers/gpu/drm/amd/display/include/fixed31_32.h
+@@ -431,6 +431,9 @@ struct fixed31_32 dc_fixpt_log(struct fixed31_32 arg);
+ */
+ static inline struct fixed31_32 dc_fixpt_pow(struct fixed31_32 arg1, struct fixed31_32 arg2)
+ {
++ if (arg1.value == 0)
++ return arg2.value == 0 ? dc_fixpt_one : dc_fixpt_zero;
++
+ return dc_fixpt_exp(
+ dc_fixpt_mul(
+ dc_fixpt_log(arg1),
+diff --git a/drivers/gpu/drm/vgem/vgem_drv.c b/drivers/gpu/drm/vgem/vgem_drv.c
+index 4709f08f39e49..1c1a435d354bc 100644
+--- a/drivers/gpu/drm/vgem/vgem_drv.c
++++ b/drivers/gpu/drm/vgem/vgem_drv.c
+@@ -219,32 +219,6 @@ static int vgem_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
+ return 0;
+ }
+
+-static int vgem_gem_dumb_map(struct drm_file *file, struct drm_device *dev,
+- uint32_t handle, uint64_t *offset)
+-{
+- struct drm_gem_object *obj;
+- int ret;
+-
+- obj = drm_gem_object_lookup(file, handle);
+- if (!obj)
+- return -ENOENT;
+-
+- if (!obj->filp) {
+- ret = -EINVAL;
+- goto unref;
+- }
+-
+- ret = drm_gem_create_mmap_offset(obj);
+- if (ret)
+- goto unref;
+-
+- *offset = drm_vma_node_offset_addr(&obj->vma_node);
+-unref:
+- drm_gem_object_put_unlocked(obj);
+-
+- return ret;
+-}
+-
+ static struct drm_ioctl_desc vgem_ioctls[] = {
+ DRM_IOCTL_DEF_DRV(VGEM_FENCE_ATTACH, vgem_fence_attach_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(VGEM_FENCE_SIGNAL, vgem_fence_signal_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
+@@ -438,7 +412,6 @@ static struct drm_driver vgem_driver = {
+ .fops = &vgem_driver_fops,
+
+ .dumb_create = vgem_gem_dumb_create,
+- .dumb_map_offset = vgem_gem_dumb_map,
+
+ .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
+ .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
+diff --git a/drivers/infiniband/hw/bnxt_re/main.c b/drivers/infiniband/hw/bnxt_re/main.c
+index 589b0d4677d52..f1b666c80f368 100644
+--- a/drivers/infiniband/hw/bnxt_re/main.c
++++ b/drivers/infiniband/hw/bnxt_re/main.c
+@@ -753,7 +753,8 @@ static int bnxt_re_handle_qp_async_event(struct creq_qp_event *qp_event,
+ struct ib_event event;
+ unsigned int flags;
+
+- if (qp->qplib_qp.state == CMDQ_MODIFY_QP_NEW_STATE_ERR) {
++ if (qp->qplib_qp.state == CMDQ_MODIFY_QP_NEW_STATE_ERR &&
++ rdma_is_kernel_res(&qp->ib_qp.res)) {
+ flags = bnxt_re_lock_cqs(qp);
+ bnxt_qplib_add_flush_qp(&qp->qplib_qp);
+ bnxt_re_unlock_cqs(qp, flags);
+diff --git a/drivers/input/mouse/psmouse-base.c b/drivers/input/mouse/psmouse-base.c
+index d3ff1fc09af71..a9040c0fb4c3f 100644
+--- a/drivers/input/mouse/psmouse-base.c
++++ b/drivers/input/mouse/psmouse-base.c
+@@ -2044,7 +2044,7 @@ static int psmouse_get_maxproto(char *buffer, const struct kernel_param *kp)
+ {
+ int type = *((unsigned int *)kp->arg);
+
+- return sprintf(buffer, "%s", psmouse_protocol_by_type(type)->name);
++ return sprintf(buffer, "%s\n", psmouse_protocol_by_type(type)->name);
+ }
+
+ static int __init psmouse_init(void)
+diff --git a/drivers/media/pci/ttpci/budget-core.c b/drivers/media/pci/ttpci/budget-core.c
+index b3dc45b91101d..9b545c7431685 100644
+--- a/drivers/media/pci/ttpci/budget-core.c
++++ b/drivers/media/pci/ttpci/budget-core.c
+@@ -383,20 +383,25 @@ static int budget_register(struct budget *budget)
+ ret = dvbdemux->dmx.add_frontend(&dvbdemux->dmx, &budget->hw_frontend);
+
+ if (ret < 0)
+- return ret;
++ goto err_release_dmx;
+
+ budget->mem_frontend.source = DMX_MEMORY_FE;
+ ret = dvbdemux->dmx.add_frontend(&dvbdemux->dmx, &budget->mem_frontend);
+ if (ret < 0)
+- return ret;
++ goto err_release_dmx;
+
+ ret = dvbdemux->dmx.connect_frontend(&dvbdemux->dmx, &budget->hw_frontend);
+ if (ret < 0)
+- return ret;
++ goto err_release_dmx;
+
+ dvb_net_init(&budget->dvb_adapter, &budget->dvb_net, &dvbdemux->dmx);
+
+ return 0;
++
++err_release_dmx:
++ dvb_dmxdev_release(&budget->dmxdev);
++ dvb_dmx_release(&budget->demux);
++ return ret;
+ }
+
+ static void budget_unregister(struct budget *budget)
+diff --git a/drivers/media/platform/davinci/vpss.c b/drivers/media/platform/davinci/vpss.c
+index 89a86c19579b8..50fc71d0cb9f3 100644
+--- a/drivers/media/platform/davinci/vpss.c
++++ b/drivers/media/platform/davinci/vpss.c
+@@ -514,19 +514,31 @@ static void vpss_exit(void)
+
+ static int __init vpss_init(void)
+ {
++ int ret;
++
+ if (!request_mem_region(VPSS_CLK_CTRL, 4, "vpss_clock_control"))
+ return -EBUSY;
+
+ oper_cfg.vpss_regs_base2 = ioremap(VPSS_CLK_CTRL, 4);
+ if (unlikely(!oper_cfg.vpss_regs_base2)) {
+- release_mem_region(VPSS_CLK_CTRL, 4);
+- return -ENOMEM;
++ ret = -ENOMEM;
++ goto err_ioremap;
+ }
+
+ writel(VPSS_CLK_CTRL_VENCCLKEN |
+- VPSS_CLK_CTRL_DACCLKEN, oper_cfg.vpss_regs_base2);
++ VPSS_CLK_CTRL_DACCLKEN, oper_cfg.vpss_regs_base2);
++
++ ret = platform_driver_register(&vpss_driver);
++ if (ret)
++ goto err_pd_register;
++
++ return 0;
+
+- return platform_driver_register(&vpss_driver);
++err_pd_register:
++ iounmap(oper_cfg.vpss_regs_base2);
++err_ioremap:
++ release_mem_region(VPSS_CLK_CTRL, 4);
++ return ret;
+ }
+ subsys_initcall(vpss_init);
+ module_exit(vpss_exit);
+diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
+index 11429df743067..d32e32e791741 100644
+--- a/drivers/net/bonding/bond_main.c
++++ b/drivers/net/bonding/bond_main.c
+@@ -2029,7 +2029,8 @@ static int bond_release_and_destroy(struct net_device *bond_dev,
+ int ret;
+
+ ret = __bond_release_one(bond_dev, slave_dev, false, true);
+- if (ret == 0 && !bond_has_slaves(bond)) {
++ if (ret == 0 && !bond_has_slaves(bond) &&
++ bond_dev->reg_state != NETREG_UNREGISTERING) {
+ bond_dev->priv_flags |= IFF_DISABLE_NETPOLL;
+ netdev_info(bond_dev, "Destroying bond %s\n",
+ bond_dev->name);
+@@ -2772,6 +2773,9 @@ static int bond_ab_arp_inspect(struct bonding *bond)
+ if (bond_time_in_interval(bond, last_rx, 1)) {
+ bond_propose_link_state(slave, BOND_LINK_UP);
+ commit++;
++ } else if (slave->link == BOND_LINK_BACK) {
++ bond_propose_link_state(slave, BOND_LINK_FAIL);
++ commit++;
+ }
+ continue;
+ }
+@@ -2882,6 +2886,19 @@ static void bond_ab_arp_commit(struct bonding *bond)
+
+ continue;
+
++ case BOND_LINK_FAIL:
++ bond_set_slave_link_state(slave, BOND_LINK_FAIL,
++ BOND_SLAVE_NOTIFY_NOW);
++ bond_set_slave_inactive_flags(slave,
++ BOND_SLAVE_NOTIFY_NOW);
++
++ /* A slave has just been enslaved and has become
++ * the current active slave.
++ */
++ if (rtnl_dereference(bond->curr_active_slave))
++ RCU_INIT_POINTER(bond->current_arp_slave, NULL);
++ continue;
++
+ default:
+ netdev_err(bond->dev, "impossible: new_link %d on slave %s\n",
+ slave->link_new_state, slave->dev->name);
+@@ -2931,8 +2948,6 @@ static bool bond_ab_arp_probe(struct bonding *bond)
+ return should_notify_rtnl;
+ }
+
+- bond_set_slave_inactive_flags(curr_arp_slave, BOND_SLAVE_NOTIFY_LATER);
+-
+ bond_for_each_slave_rcu(bond, slave, iter) {
+ if (!found && !before && bond_slave_is_up(slave))
+ before = slave;
+@@ -4200,13 +4215,23 @@ static netdev_tx_t bond_start_xmit(struct sk_buff *skb, struct net_device *dev)
+ return ret;
+ }
+
++static u32 bond_mode_bcast_speed(struct slave *slave, u32 speed)
++{
++ if (speed == 0 || speed == SPEED_UNKNOWN)
++ speed = slave->speed;
++ else
++ speed = min(speed, slave->speed);
++
++ return speed;
++}
++
+ static int bond_ethtool_get_link_ksettings(struct net_device *bond_dev,
+ struct ethtool_link_ksettings *cmd)
+ {
+ struct bonding *bond = netdev_priv(bond_dev);
+- unsigned long speed = 0;
+ struct list_head *iter;
+ struct slave *slave;
++ u32 speed = 0;
+
+ cmd->base.duplex = DUPLEX_UNKNOWN;
+ cmd->base.port = PORT_OTHER;
+@@ -4218,8 +4243,13 @@ static int bond_ethtool_get_link_ksettings(struct net_device *bond_dev,
+ */
+ bond_for_each_slave(bond, slave, iter) {
+ if (bond_slave_can_tx(slave)) {
+- if (slave->speed != SPEED_UNKNOWN)
+- speed += slave->speed;
++ if (slave->speed != SPEED_UNKNOWN) {
++ if (BOND_MODE(bond) == BOND_MODE_BROADCAST)
++ speed = bond_mode_bcast_speed(slave,
++ speed);
++ else
++ speed += slave->speed;
++ }
+ if (cmd->base.duplex == DUPLEX_UNKNOWN &&
+ slave->duplex != DUPLEX_UNKNOWN)
+ cmd->base.duplex = slave->duplex;
+diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c
+index 11f3993ab7f30..294be86420b6d 100644
+--- a/drivers/net/dsa/b53/b53_common.c
++++ b/drivers/net/dsa/b53/b53_common.c
+@@ -1335,6 +1335,8 @@ static int b53_arl_op(struct b53_device *dev, int op, int port,
+ return ret;
+
+ switch (ret) {
++ case -ETIMEDOUT:
++ return ret;
+ case -ENOSPC:
+ dev_dbg(dev->dev, "{%pM,%.4d} no space left in ARL\n",
+ addr, vid);
+diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c
+index 8736718b17359..55cc70ba5b093 100644
+--- a/drivers/net/ethernet/amazon/ena/ena_netdev.c
++++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c
+@@ -2647,16 +2647,14 @@ static void ena_fw_reset_device(struct work_struct *work)
+ {
+ struct ena_adapter *adapter =
+ container_of(work, struct ena_adapter, reset_task);
+- struct pci_dev *pdev = adapter->pdev;
+
+- if (unlikely(!test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))) {
+- dev_err(&pdev->dev,
+- "device reset schedule while reset bit is off\n");
+- return;
+- }
+ rtnl_lock();
+- ena_destroy_device(adapter, false);
+- ena_restore_device(adapter);
++
++ if (likely(test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))) {
++ ena_destroy_device(adapter, false);
++ ena_restore_device(adapter);
++ }
++
+ rtnl_unlock();
+ }
+
+@@ -3392,8 +3390,11 @@ static void ena_remove(struct pci_dev *pdev)
+ netdev->rx_cpu_rmap = NULL;
+ }
+ #endif /* CONFIG_RFS_ACCEL */
+- del_timer_sync(&adapter->timer_service);
+
++ /* Make sure timer and reset routine won't be called after
++ * freeing device resources.
++ */
++ del_timer_sync(&adapter->timer_service);
+ cancel_work_sync(&adapter->reset_task);
+
+ unregister_netdev(netdev);
+diff --git a/drivers/net/ethernet/cortina/gemini.c b/drivers/net/ethernet/cortina/gemini.c
+index f402af39da42a..16de0fa92ab74 100644
+--- a/drivers/net/ethernet/cortina/gemini.c
++++ b/drivers/net/ethernet/cortina/gemini.c
+@@ -2392,7 +2392,7 @@ static int gemini_ethernet_port_probe(struct platform_device *pdev)
+
+ dev_info(dev, "probe %s ID %d\n", dev_name(dev), id);
+
+- netdev = alloc_etherdev_mq(sizeof(*port), TX_QUEUE_NUM);
++ netdev = devm_alloc_etherdev_mqs(dev, sizeof(*port), TX_QUEUE_NUM, TX_QUEUE_NUM);
+ if (!netdev) {
+ dev_err(dev, "Can't allocate ethernet device #%d\n", id);
+ return -ENOMEM;
+@@ -2526,7 +2526,6 @@ static int gemini_ethernet_port_probe(struct platform_device *pdev)
+ }
+
+ port->netdev = NULL;
+- free_netdev(netdev);
+ return ret;
+ }
+
+@@ -2535,7 +2534,6 @@ static int gemini_ethernet_port_remove(struct platform_device *pdev)
+ struct gemini_ethernet_port *port = platform_get_drvdata(pdev);
+
+ gemini_port_remove(port);
+- free_netdev(port->netdev);
+ return 0;
+ }
+
+diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
+index 48c58f93b124b..3b6da228140e3 100644
+--- a/drivers/net/ethernet/freescale/fec_main.c
++++ b/drivers/net/ethernet/freescale/fec_main.c
+@@ -3659,11 +3659,11 @@ failed_mii_init:
+ failed_irq:
+ failed_init:
+ fec_ptp_stop(pdev);
+- if (fep->reg_phy)
+- regulator_disable(fep->reg_phy);
+ failed_reset:
+ pm_runtime_put_noidle(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
++ if (fep->reg_phy)
++ regulator_disable(fep->reg_phy);
+ failed_regulator:
+ clk_disable_unprepare(fep->clk_ahb);
+ failed_clk_ahb:
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
+index 80e3eec6134ee..a5e5e7e14e6c5 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
++++ b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
+@@ -1206,7 +1206,7 @@ struct i40e_aqc_set_vsi_promiscuous_modes {
+ #define I40E_AQC_SET_VSI_PROMISC_BROADCAST 0x04
+ #define I40E_AQC_SET_VSI_DEFAULT 0x08
+ #define I40E_AQC_SET_VSI_PROMISC_VLAN 0x10
+-#define I40E_AQC_SET_VSI_PROMISC_TX 0x8000
++#define I40E_AQC_SET_VSI_PROMISC_RX_ONLY 0x8000
+ __le16 seid;
+ #define I40E_AQC_VSI_PROM_CMD_SEID_MASK 0x3FF
+ __le16 vlan_tag;
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c
+index eb0ae6ab01e26..e75b4c4872c09 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e_common.c
++++ b/drivers/net/ethernet/intel/i40e/i40e_common.c
+@@ -1970,6 +1970,21 @@ i40e_status i40e_aq_set_phy_debug(struct i40e_hw *hw, u8 cmd_flags,
+ return status;
+ }
+
++/**
++ * i40e_is_aq_api_ver_ge
++ * @aq: pointer to AdminQ info containing HW API version to compare
++ * @maj: API major value
++ * @min: API minor value
++ *
++ * Assert whether current HW API version is greater/equal than provided.
++ **/
++static bool i40e_is_aq_api_ver_ge(struct i40e_adminq_info *aq, u16 maj,
++ u16 min)
++{
++ return (aq->api_maj_ver > maj ||
++ (aq->api_maj_ver == maj && aq->api_min_ver >= min));
++}
++
+ /**
+ * i40e_aq_add_vsi
+ * @hw: pointer to the hw struct
+@@ -2095,18 +2110,16 @@ i40e_status i40e_aq_set_vsi_unicast_promiscuous(struct i40e_hw *hw,
+
+ if (set) {
+ flags |= I40E_AQC_SET_VSI_PROMISC_UNICAST;
+- if (rx_only_promisc &&
+- (((hw->aq.api_maj_ver == 1) && (hw->aq.api_min_ver >= 5)) ||
+- (hw->aq.api_maj_ver > 1)))
+- flags |= I40E_AQC_SET_VSI_PROMISC_TX;
++ if (rx_only_promisc && i40e_is_aq_api_ver_ge(&hw->aq, 1, 5))
++ flags |= I40E_AQC_SET_VSI_PROMISC_RX_ONLY;
+ }
+
+ cmd->promiscuous_flags = cpu_to_le16(flags);
+
+ cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_UNICAST);
+- if (((hw->aq.api_maj_ver >= 1) && (hw->aq.api_min_ver >= 5)) ||
+- (hw->aq.api_maj_ver > 1))
+- cmd->valid_flags |= cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_TX);
++ if (i40e_is_aq_api_ver_ge(&hw->aq, 1, 5))
++ cmd->valid_flags |=
++ cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_RX_ONLY);
+
+ cmd->seid = cpu_to_le16(seid);
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+@@ -2203,11 +2216,17 @@ enum i40e_status_code i40e_aq_set_vsi_uc_promisc_on_vlan(struct i40e_hw *hw,
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_set_vsi_promiscuous_modes);
+
+- if (enable)
++ if (enable) {
+ flags |= I40E_AQC_SET_VSI_PROMISC_UNICAST;
++ if (i40e_is_aq_api_ver_ge(&hw->aq, 1, 5))
++ flags |= I40E_AQC_SET_VSI_PROMISC_RX_ONLY;
++ }
+
+ cmd->promiscuous_flags = cpu_to_le16(flags);
+ cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_UNICAST);
++ if (i40e_is_aq_api_ver_ge(&hw->aq, 1, 5))
++ cmd->valid_flags |=
++ cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_RX_ONLY);
+ cmd->seid = cpu_to_le16(seid);
+ cmd->vlan_tag = cpu_to_le16(vid | I40E_AQC_SET_VSI_VLAN_VALID);
+
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
+index a74b01bf581e9..3200c75b9ed2a 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
++++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
+@@ -14152,6 +14152,9 @@ static void i40e_remove(struct pci_dev *pdev)
+ i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), 0);
+ i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), 0);
+
++ while (test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state))
++ usleep_range(1000, 2000);
++
+ /* no more scheduling of any task */
+ set_bit(__I40E_SUSPENDED, pf->state);
+ set_bit(__I40E_DOWN, pf->state);
+diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
+index e33cbb793b638..4a5d99ecb89d3 100644
+--- a/drivers/net/hyperv/netvsc_drv.c
++++ b/drivers/net/hyperv/netvsc_drv.c
+@@ -513,7 +513,7 @@ static int netvsc_vf_xmit(struct net_device *net, struct net_device *vf_netdev,
+ int rc;
+
+ skb->dev = vf_netdev;
+- skb->queue_mapping = qdisc_skb_cb(skb)->slave_dev_queue_mapping;
++ skb_record_rx_queue(skb, qdisc_skb_cb(skb)->slave_dev_queue_mapping);
+
+ rc = dev_queue_xmit(skb);
+ if (likely(rc == NET_XMIT_SUCCESS || rc == NET_XMIT_CN)) {
+diff --git a/drivers/rtc/rtc-goldfish.c b/drivers/rtc/rtc-goldfish.c
+index a1c44d0c85578..30cbe22c57a8e 100644
+--- a/drivers/rtc/rtc-goldfish.c
++++ b/drivers/rtc/rtc-goldfish.c
+@@ -87,6 +87,7 @@ static int goldfish_rtc_set_alarm(struct device *dev,
+ rtc_alarm64 = rtc_alarm * NSEC_PER_SEC;
+ writel((rtc_alarm64 >> 32), base + TIMER_ALARM_HIGH);
+ writel(rtc_alarm64, base + TIMER_ALARM_LOW);
++ writel(1, base + TIMER_IRQ_ENABLED);
+ } else {
+ /*
+ * if this function was called with enabled=0
+diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c
+index 91aa4bfcf8d61..5bb278a604ed2 100644
+--- a/drivers/s390/scsi/zfcp_fsf.c
++++ b/drivers/s390/scsi/zfcp_fsf.c
+@@ -403,7 +403,7 @@ static void zfcp_fsf_req_complete(struct zfcp_fsf_req *req)
+ return;
+ }
+
+- del_timer(&req->timer);
++ del_timer_sync(&req->timer);
+ zfcp_fsf_protstatus_eval(req);
+ zfcp_fsf_fsfstatus_eval(req);
+ req->handler(req);
+@@ -758,7 +758,7 @@ static int zfcp_fsf_req_send(struct zfcp_fsf_req *req)
+ req->qdio_req.qdio_outb_usage = atomic_read(&qdio->req_q_free);
+ req->issued = get_tod_clock();
+ if (zfcp_qdio_send(qdio, &req->qdio_req)) {
+- del_timer(&req->timer);
++ del_timer_sync(&req->timer);
+ /* lookup request again, list might have changed */
+ zfcp_reqlist_find_rm(adapter->req_list, req_id);
+ zfcp_erp_adapter_reopen(adapter, 0, "fsrs__1");
+diff --git a/drivers/scsi/libfc/fc_disc.c b/drivers/scsi/libfc/fc_disc.c
+index 8839f509b19ab..78cf5b32bca67 100644
+--- a/drivers/scsi/libfc/fc_disc.c
++++ b/drivers/scsi/libfc/fc_disc.c
+@@ -593,8 +593,12 @@ static void fc_disc_gpn_id_resp(struct fc_seq *sp, struct fc_frame *fp,
+
+ if (PTR_ERR(fp) == -FC_EX_CLOSED)
+ goto out;
+- if (IS_ERR(fp))
+- goto redisc;
++ if (IS_ERR(fp)) {
++ mutex_lock(&disc->disc_mutex);
++ fc_disc_restart(disc);
++ mutex_unlock(&disc->disc_mutex);
++ goto out;
++ }
+
+ cp = fc_frame_payload_get(fp, sizeof(*cp));
+ if (!cp)
+@@ -621,7 +625,7 @@ static void fc_disc_gpn_id_resp(struct fc_seq *sp, struct fc_frame *fp,
+ new_rdata->disc_id = disc->disc_id;
+ fc_rport_login(new_rdata);
+ }
+- goto out;
++ goto free_fp;
+ }
+ rdata->disc_id = disc->disc_id;
+ mutex_unlock(&rdata->rp_mutex);
+@@ -638,6 +642,8 @@ redisc:
+ fc_disc_restart(disc);
+ mutex_unlock(&disc->disc_mutex);
+ }
++free_fp:
++ fc_frame_free(fp);
+ out:
+ kref_put(&rdata->kref, fc_rport_destroy);
+ if (!IS_ERR(fp))
+diff --git a/drivers/scsi/ufs/ufs_quirks.h b/drivers/scsi/ufs/ufs_quirks.h
+index 5d2dfdb41a6ff..758d3a67047df 100644
+--- a/drivers/scsi/ufs/ufs_quirks.h
++++ b/drivers/scsi/ufs/ufs_quirks.h
+@@ -21,6 +21,7 @@
+ #define UFS_ANY_VENDOR 0xFFFF
+ #define UFS_ANY_MODEL "ANY_MODEL"
+
++#define UFS_VENDOR_MICRON 0x12C
+ #define UFS_VENDOR_TOSHIBA 0x198
+ #define UFS_VENDOR_SAMSUNG 0x1CE
+ #define UFS_VENDOR_SKHYNIX 0x1AD
+diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
+index bd21c9cdf8183..ab628fd37e026 100644
+--- a/drivers/scsi/ufs/ufshcd.c
++++ b/drivers/scsi/ufs/ufshcd.c
+@@ -218,6 +218,8 @@ ufs_get_desired_pm_lvl_for_dev_link_state(enum ufs_dev_pwr_mode dev_state,
+
+ static struct ufs_dev_fix ufs_fixups[] = {
+ /* UFS cards deviations table */
++ UFS_FIX(UFS_VENDOR_MICRON, UFS_ANY_MODEL,
++ UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM),
+ UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL,
+ UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM),
+ UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL, UFS_DEVICE_NO_VCCQ),
+diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig
+index 671d078349cc6..0a7fd56c1ed9d 100644
+--- a/drivers/spi/Kconfig
++++ b/drivers/spi/Kconfig
+@@ -817,4 +817,7 @@ config SPI_SLAVE_SYSTEM_CONTROL
+
+ endif # SPI_SLAVE
+
++config SPI_DYNAMIC
++ def_bool ACPI || OF_DYNAMIC || SPI_SLAVE
++
+ endif # SPI
+diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
+index f589d8100e957..92e6b6774d98e 100644
+--- a/drivers/spi/spi.c
++++ b/drivers/spi/spi.c
+@@ -432,6 +432,12 @@ static LIST_HEAD(spi_controller_list);
+ */
+ static DEFINE_MUTEX(board_lock);
+
++/*
++ * Prevents addition of devices with same chip select and
++ * addition of devices below an unregistering controller.
++ */
++static DEFINE_MUTEX(spi_add_lock);
++
+ /**
+ * spi_alloc_device - Allocate a new SPI device
+ * @ctlr: Controller to which device is connected
+@@ -510,7 +516,6 @@ static int spi_dev_check(struct device *dev, void *data)
+ */
+ int spi_add_device(struct spi_device *spi)
+ {
+- static DEFINE_MUTEX(spi_add_lock);
+ struct spi_controller *ctlr = spi->controller;
+ struct device *dev = ctlr->dev.parent;
+ int status;
+@@ -538,6 +543,13 @@ int spi_add_device(struct spi_device *spi)
+ goto done;
+ }
+
++ /* Controller may unregister concurrently */
++ if (IS_ENABLED(CONFIG_SPI_DYNAMIC) &&
++ !device_is_registered(&ctlr->dev)) {
++ status = -ENODEV;
++ goto done;
++ }
++
+ if (ctlr->cs_gpios)
+ spi->cs_gpio = ctlr->cs_gpios[spi->chip_select];
+
+@@ -2306,6 +2318,10 @@ void spi_unregister_controller(struct spi_controller *ctlr)
+ struct spi_controller *found;
+ int id = ctlr->bus_num;
+
++ /* Prevent addition of new devices, unregister existing ones */
++ if (IS_ENABLED(CONFIG_SPI_DYNAMIC))
++ mutex_lock(&spi_add_lock);
++
+ device_for_each_child(&ctlr->dev, NULL, __unregister);
+
+ /* First make sure that this controller was ever added */
+@@ -2326,6 +2342,9 @@ void spi_unregister_controller(struct spi_controller *ctlr)
+ if (found == ctlr)
+ idr_remove(&spi_master_idr, id);
+ mutex_unlock(&board_lock);
++
++ if (IS_ENABLED(CONFIG_SPI_DYNAMIC))
++ mutex_unlock(&spi_add_lock);
+ }
+ EXPORT_SYMBOL_GPL(spi_unregister_controller);
+
+diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c
+index 8da89925a874d..9c05e820857aa 100644
+--- a/drivers/target/target_core_user.c
++++ b/drivers/target/target_core_user.c
+@@ -612,7 +612,7 @@ static inline void tcmu_flush_dcache_range(void *vaddr, size_t size)
+ size = round_up(size+offset, PAGE_SIZE);
+
+ while (size) {
+- flush_dcache_page(virt_to_page(start));
++ flush_dcache_page(vmalloc_to_page(start));
+ start += PAGE_SIZE;
+ size -= PAGE_SIZE;
+ }
+diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c
+index 6dbdadb936a89..52083b710b87e 100644
+--- a/drivers/vfio/vfio_iommu_type1.c
++++ b/drivers/vfio/vfio_iommu_type1.c
+@@ -1193,13 +1193,16 @@ static int vfio_bus_type(struct device *dev, void *data)
+ static int vfio_iommu_replay(struct vfio_iommu *iommu,
+ struct vfio_domain *domain)
+ {
+- struct vfio_domain *d;
++ struct vfio_domain *d = NULL;
+ struct rb_node *n;
+ unsigned long limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
+ int ret;
+
+ /* Arbitrarily pick the first domain in the list for lookups */
+- d = list_first_entry(&iommu->domain_list, struct vfio_domain, next);
++ if (!list_empty(&iommu->domain_list))
++ d = list_first_entry(&iommu->domain_list,
++ struct vfio_domain, next);
++
+ n = rb_first(&iommu->dma_list);
+
+ for (; n; n = rb_next(n)) {
+@@ -1217,6 +1220,11 @@ static int vfio_iommu_replay(struct vfio_iommu *iommu,
+ phys_addr_t p;
+ dma_addr_t i;
+
++ if (WARN_ON(!d)) { /* mapped w/o a domain?! */
++ ret = -EINVAL;
++ goto unwind;
++ }
++
+ phys = iommu_iova_to_phys(d->domain, iova);
+
+ if (WARN_ON(!phys)) {
+@@ -1246,7 +1254,7 @@ static int vfio_iommu_replay(struct vfio_iommu *iommu,
+ if (npage <= 0) {
+ WARN_ON(!npage);
+ ret = (int)npage;
+- return ret;
++ goto unwind;
+ }
+
+ phys = pfn << PAGE_SHIFT;
+@@ -1255,14 +1263,67 @@ static int vfio_iommu_replay(struct vfio_iommu *iommu,
+
+ ret = iommu_map(domain->domain, iova, phys,
+ size, dma->prot | domain->prot);
+- if (ret)
+- return ret;
++ if (ret) {
++ if (!dma->iommu_mapped)
++ vfio_unpin_pages_remote(dma, iova,
++ phys >> PAGE_SHIFT,
++ size >> PAGE_SHIFT,
++ true);
++ goto unwind;
++ }
+
+ iova += size;
+ }
++ }
++
++ /* All dmas are now mapped, defer to second tree walk for unwind */
++ for (n = rb_first(&iommu->dma_list); n; n = rb_next(n)) {
++ struct vfio_dma *dma = rb_entry(n, struct vfio_dma, node);
++
+ dma->iommu_mapped = true;
+ }
++
+ return 0;
++
++unwind:
++ for (; n; n = rb_prev(n)) {
++ struct vfio_dma *dma = rb_entry(n, struct vfio_dma, node);
++ dma_addr_t iova;
++
++ if (dma->iommu_mapped) {
++ iommu_unmap(domain->domain, dma->iova, dma->size);
++ continue;
++ }
++
++ iova = dma->iova;
++ while (iova < dma->iova + dma->size) {
++ phys_addr_t phys, p;
++ size_t size;
++ dma_addr_t i;
++
++ phys = iommu_iova_to_phys(domain->domain, iova);
++ if (!phys) {
++ iova += PAGE_SIZE;
++ continue;
++ }
++
++ size = PAGE_SIZE;
++ p = phys + size;
++ i = iova + size;
++ while (i < dma->iova + dma->size &&
++ p == iommu_iova_to_phys(domain->domain, i)) {
++ size += PAGE_SIZE;
++ p += PAGE_SIZE;
++ i += PAGE_SIZE;
++ }
++
++ iommu_unmap(domain->domain, iova, size);
++ vfio_unpin_pages_remote(dma, iova, phys >> PAGE_SHIFT,
++ size >> PAGE_SHIFT, true);
++ }
++ }
++
++ return ret;
+ }
+
+ /*
+diff --git a/drivers/video/fbdev/efifb.c b/drivers/video/fbdev/efifb.c
+index cc1006375cacb..f50cc1a7c31a9 100644
+--- a/drivers/video/fbdev/efifb.c
++++ b/drivers/video/fbdev/efifb.c
+@@ -449,7 +449,7 @@ static int efifb_probe(struct platform_device *dev)
+ info->apertures->ranges[0].base = efifb_fix.smem_start;
+ info->apertures->ranges[0].size = size_remap;
+
+- if (efi_enabled(EFI_BOOT) &&
++ if (efi_enabled(EFI_MEMMAP) &&
+ !efi_mem_desc_lookup(efifb_fix.smem_start, &md)) {
+ if ((efifb_fix.smem_start + efifb_fix.smem_len) >
+ (md.phys_addr + (md.num_pages << EFI_PAGE_SHIFT))) {
+diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c
+index 6228b48d1e127..df7980aef927a 100644
+--- a/drivers/virtio/virtio_ring.c
++++ b/drivers/virtio/virtio_ring.c
+@@ -828,6 +828,9 @@ bool virtqueue_poll(struct virtqueue *_vq, unsigned last_used_idx)
+ {
+ struct vring_virtqueue *vq = to_vvq(_vq);
+
++ if (unlikely(vq->broken))
++ return false;
++
+ virtio_mb(vq->weak_barriers);
+ return (u16)last_used_idx != virtio16_to_cpu(_vq->vdev, vq->vring.used->idx);
+ }
+diff --git a/drivers/xen/preempt.c b/drivers/xen/preempt.c
+index 5f6b77ea34fb5..128375ff80b8c 100644
+--- a/drivers/xen/preempt.c
++++ b/drivers/xen/preempt.c
+@@ -31,7 +31,7 @@ EXPORT_SYMBOL_GPL(xen_in_preemptible_hcall);
+ asmlinkage __visible void xen_maybe_preempt_hcall(void)
+ {
+ if (unlikely(__this_cpu_read(xen_in_preemptible_hcall)
+- && need_resched())) {
++ && need_resched() && !preempt_count())) {
+ /*
+ * Clear flag as we may be rescheduled on a different
+ * cpu.
+diff --git a/fs/afs/dynroot.c b/fs/afs/dynroot.c
+index 069273a2483f9..fc6c42eeb659c 100644
+--- a/fs/afs/dynroot.c
++++ b/fs/afs/dynroot.c
+@@ -299,15 +299,17 @@ void afs_dynroot_depopulate(struct super_block *sb)
+ net->dynroot_sb = NULL;
+ mutex_unlock(&net->proc_cells_lock);
+
+- inode_lock(root->d_inode);
+-
+- /* Remove all the pins for dirs created for manually added cells */
+- list_for_each_entry_safe(subdir, tmp, &root->d_subdirs, d_child) {
+- if (subdir->d_fsdata) {
+- subdir->d_fsdata = NULL;
+- dput(subdir);
++ if (root) {
++ inode_lock(root->d_inode);
++
++ /* Remove all the pins for dirs created for manually added cells */
++ list_for_each_entry_safe(subdir, tmp, &root->d_subdirs, d_child) {
++ if (subdir->d_fsdata) {
++ subdir->d_fsdata = NULL;
++ dput(subdir);
++ }
+ }
+- }
+
+- inode_unlock(root->d_inode);
++ inode_unlock(root->d_inode);
++ }
+ }
+diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
+index 15cb96ad15d8c..554727d82d432 100644
+--- a/fs/btrfs/ctree.h
++++ b/fs/btrfs/ctree.h
+@@ -3271,6 +3271,8 @@ void btrfs_sysfs_remove_mounted(struct btrfs_fs_info *fs_info);
+ int btrfs_parse_options(struct btrfs_fs_info *info, char *options,
+ unsigned long new_flags);
+ int btrfs_sync_fs(struct super_block *sb, int wait);
++char *btrfs_get_subvol_name_from_objectid(struct btrfs_fs_info *fs_info,
++ u64 subvol_objectid);
+
+ static inline __printf(2, 3) __cold
+ void btrfs_no_printk(const struct btrfs_fs_info *fs_info, const char *fmt, ...)
+diff --git a/fs/btrfs/export.c b/fs/btrfs/export.c
+index 1f3755b3a37ae..665ec85cb09b8 100644
+--- a/fs/btrfs/export.c
++++ b/fs/btrfs/export.c
+@@ -57,9 +57,9 @@ static int btrfs_encode_fh(struct inode *inode, u32 *fh, int *max_len,
+ return type;
+ }
+
+-static struct dentry *btrfs_get_dentry(struct super_block *sb, u64 objectid,
+- u64 root_objectid, u32 generation,
+- int check_generation)
++struct dentry *btrfs_get_dentry(struct super_block *sb, u64 objectid,
++ u64 root_objectid, u32 generation,
++ int check_generation)
+ {
+ struct btrfs_fs_info *fs_info = btrfs_sb(sb);
+ struct btrfs_root *root;
+@@ -152,7 +152,7 @@ static struct dentry *btrfs_fh_to_dentry(struct super_block *sb, struct fid *fh,
+ return btrfs_get_dentry(sb, objectid, root_objectid, generation, 1);
+ }
+
+-static struct dentry *btrfs_get_parent(struct dentry *child)
++struct dentry *btrfs_get_parent(struct dentry *child)
+ {
+ struct inode *dir = d_inode(child);
+ struct btrfs_fs_info *fs_info = btrfs_sb(dir->i_sb);
+diff --git a/fs/btrfs/export.h b/fs/btrfs/export.h
+index 57488ecd7d4ef..f32f4113c976a 100644
+--- a/fs/btrfs/export.h
++++ b/fs/btrfs/export.h
+@@ -18,4 +18,9 @@ struct btrfs_fid {
+ u64 parent_root_objectid;
+ } __attribute__ ((packed));
+
++struct dentry *btrfs_get_dentry(struct super_block *sb, u64 objectid,
++ u64 root_objectid, u32 generation,
++ int check_generation);
++struct dentry *btrfs_get_parent(struct dentry *child);
++
+ #endif
+diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
+index 1656ef0e959f0..bdfe159a60da6 100644
+--- a/fs/btrfs/inode.c
++++ b/fs/btrfs/inode.c
+@@ -628,7 +628,21 @@ cont:
+ PAGE_SET_WRITEBACK |
+ page_error_op |
+ PAGE_END_WRITEBACK);
+- goto free_pages_out;
++
++ /*
++ * Ensure we only free the compressed pages if we have
++ * them allocated, as we can still reach here with
++ * inode_need_compress() == false.
++ */
++ if (pages) {
++ for (i = 0; i < nr_pages; i++) {
++ WARN_ON(pages[i]->mapping);
++ put_page(pages[i]);
++ }
++ kfree(pages);
++ }
++
++ return;
+ }
+ }
+
+@@ -706,13 +720,6 @@ cleanup_and_bail_uncompressed:
+ *num_added += 1;
+
+ return;
+-
+-free_pages_out:
+- for (i = 0; i < nr_pages; i++) {
+- WARN_ON(pages[i]->mapping);
+- put_page(pages[i]);
+- }
+- kfree(pages);
+ }
+
+ static void free_async_extent_pages(struct async_extent *async_extent)
+diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
+index ed539496089f1..4d2810a32b4a9 100644
+--- a/fs/btrfs/super.c
++++ b/fs/btrfs/super.c
+@@ -1000,8 +1000,8 @@ out:
+ return error;
+ }
+
+-static char *get_subvol_name_from_objectid(struct btrfs_fs_info *fs_info,
+- u64 subvol_objectid)
++char *btrfs_get_subvol_name_from_objectid(struct btrfs_fs_info *fs_info,
++ u64 subvol_objectid)
+ {
+ struct btrfs_root *root = fs_info->tree_root;
+ struct btrfs_root *fs_root;
+@@ -1282,6 +1282,7 @@ static int btrfs_show_options(struct seq_file *seq, struct dentry *dentry)
+ {
+ struct btrfs_fs_info *info = btrfs_sb(dentry->d_sb);
+ const char *compress_type;
++ const char *subvol_name;
+
+ if (btrfs_test_opt(info, DEGRADED))
+ seq_puts(seq, ",degraded");
+@@ -1366,8 +1367,13 @@ static int btrfs_show_options(struct seq_file *seq, struct dentry *dentry)
+ seq_puts(seq, ",ref_verify");
+ seq_printf(seq, ",subvolid=%llu",
+ BTRFS_I(d_inode(dentry))->root->root_key.objectid);
+- seq_puts(seq, ",subvol=");
+- seq_dentry(seq, dentry, " \t\n\\");
++ subvol_name = btrfs_get_subvol_name_from_objectid(info,
++ BTRFS_I(d_inode(dentry))->root->root_key.objectid);
++ if (!IS_ERR(subvol_name)) {
++ seq_puts(seq, ",subvol=");
++ seq_escape(seq, subvol_name, " \t\n\\");
++ kfree(subvol_name);
++ }
+ return 0;
+ }
+
+@@ -1412,8 +1418,8 @@ static struct dentry *mount_subvol(const char *subvol_name, u64 subvol_objectid,
+ goto out;
+ }
+ }
+- subvol_name = get_subvol_name_from_objectid(btrfs_sb(mnt->mnt_sb),
+- subvol_objectid);
++ subvol_name = btrfs_get_subvol_name_from_objectid(
++ btrfs_sb(mnt->mnt_sb), subvol_objectid);
+ if (IS_ERR(subvol_name)) {
+ root = ERR_CAST(subvol_name);
+ subvol_name = NULL;
+diff --git a/fs/btrfs/sysfs.c b/fs/btrfs/sysfs.c
+index aefb0169d46d7..afec808a763b1 100644
+--- a/fs/btrfs/sysfs.c
++++ b/fs/btrfs/sysfs.c
+@@ -10,6 +10,7 @@
+ #include <linux/kobject.h>
+ #include <linux/bug.h>
+ #include <linux/debugfs.h>
++#include <linux/sched/mm.h>
+
+ #include "ctree.h"
+ #include "disk-io.h"
+@@ -766,7 +767,9 @@ int btrfs_sysfs_add_device_link(struct btrfs_fs_devices *fs_devices,
+ {
+ int error = 0;
+ struct btrfs_device *dev;
++ unsigned int nofs_flag;
+
++ nofs_flag = memalloc_nofs_save();
+ list_for_each_entry(dev, &fs_devices->devices, dev_list) {
+ struct hd_struct *disk;
+ struct kobject *disk_kobj;
+@@ -785,6 +788,7 @@ int btrfs_sysfs_add_device_link(struct btrfs_fs_devices *fs_devices,
+ if (error)
+ break;
+ }
++ memalloc_nofs_restore(nofs_flag);
+
+ return error;
+ }
+diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c
+index a2e903203bf9f..0fa14d8b9c64c 100644
+--- a/fs/ceph/mds_client.c
++++ b/fs/ceph/mds_client.c
+@@ -3682,7 +3682,6 @@ int ceph_mdsc_init(struct ceph_fs_client *fsc)
+ return -ENOMEM;
+ }
+
+- fsc->mdsc = mdsc;
+ init_completion(&mdsc->safe_umount_waiters);
+ init_waitqueue_head(&mdsc->session_close_wq);
+ INIT_LIST_HEAD(&mdsc->waiting_for_map);
+@@ -3723,6 +3722,8 @@ int ceph_mdsc_init(struct ceph_fs_client *fsc)
+
+ strscpy(mdsc->nodename, utsname()->nodename,
+ sizeof(mdsc->nodename));
++
++ fsc->mdsc = mdsc;
+ return 0;
+ }
+
+diff --git a/fs/eventpoll.c b/fs/eventpoll.c
+index 58f48ea0db234..f988ccd064a22 100644
+--- a/fs/eventpoll.c
++++ b/fs/eventpoll.c
+@@ -1890,9 +1890,11 @@ static int ep_loop_check_proc(void *priv, void *cookie, int call_nests)
+ * not already there, and calling reverse_path_check()
+ * during ep_insert().
+ */
+- if (list_empty(&epi->ffd.file->f_tfile_llink))
++ if (list_empty(&epi->ffd.file->f_tfile_llink)) {
++ get_file(epi->ffd.file);
+ list_add(&epi->ffd.file->f_tfile_llink,
+ &tfile_check_list);
++ }
+ }
+ }
+ mutex_unlock(&ep->mtx);
+@@ -1936,6 +1938,7 @@ static void clear_tfile_check_list(void)
+ file = list_first_entry(&tfile_check_list, struct file,
+ f_tfile_llink);
+ list_del_init(&file->f_tfile_llink);
++ fput(file);
+ }
+ INIT_LIST_HEAD(&tfile_check_list);
+ }
+@@ -2091,13 +2094,13 @@ SYSCALL_DEFINE4(epoll_ctl, int, epfd, int, op, int, fd,
+ mutex_lock(&epmutex);
+ if (is_file_epoll(tf.file)) {
+ error = -ELOOP;
+- if (ep_loop_check(ep, tf.file) != 0) {
+- clear_tfile_check_list();
++ if (ep_loop_check(ep, tf.file) != 0)
+ goto error_tgt_fput;
+- }
+- } else
++ } else {
++ get_file(tf.file);
+ list_add(&tf.file->f_tfile_llink,
+ &tfile_check_list);
++ }
+ mutex_lock_nested(&ep->mtx, 0);
+ if (is_file_epoll(tf.file)) {
+ tep = tf.file->private_data;
+@@ -2121,8 +2124,6 @@ SYSCALL_DEFINE4(epoll_ctl, int, epfd, int, op, int, fd,
+ error = ep_insert(ep, &epds, tf.file, fd, full_check);
+ } else
+ error = -EEXIST;
+- if (full_check)
+- clear_tfile_check_list();
+ break;
+ case EPOLL_CTL_DEL:
+ if (epi)
+@@ -2145,8 +2146,10 @@ SYSCALL_DEFINE4(epoll_ctl, int, epfd, int, op, int, fd,
+ mutex_unlock(&ep->mtx);
+
+ error_tgt_fput:
+- if (full_check)
++ if (full_check) {
++ clear_tfile_check_list();
+ mutex_unlock(&epmutex);
++ }
+
+ fdput(tf);
+ error_fput:
+diff --git a/fs/ext4/block_validity.c b/fs/ext4/block_validity.c
+index d203cc935ff83..552164034d340 100644
+--- a/fs/ext4/block_validity.c
++++ b/fs/ext4/block_validity.c
+@@ -68,7 +68,7 @@ static int add_system_zone(struct ext4_system_blocks *system_blks,
+ ext4_fsblk_t start_blk,
+ unsigned int count)
+ {
+- struct ext4_system_zone *new_entry = NULL, *entry;
++ struct ext4_system_zone *new_entry, *entry;
+ struct rb_node **n = &system_blks->root.rb_node, *node;
+ struct rb_node *parent = NULL, *new_node = NULL;
+
+@@ -79,30 +79,20 @@ static int add_system_zone(struct ext4_system_blocks *system_blks,
+ n = &(*n)->rb_left;
+ else if (start_blk >= (entry->start_blk + entry->count))
+ n = &(*n)->rb_right;
+- else {
+- if (start_blk + count > (entry->start_blk +
+- entry->count))
+- entry->count = (start_blk + count -
+- entry->start_blk);
+- new_node = *n;
+- new_entry = rb_entry(new_node, struct ext4_system_zone,
+- node);
+- break;
+- }
++ else /* Unexpected overlap of system zones. */
++ return -EFSCORRUPTED;
+ }
+
+- if (!new_entry) {
+- new_entry = kmem_cache_alloc(ext4_system_zone_cachep,
+- GFP_KERNEL);
+- if (!new_entry)
+- return -ENOMEM;
+- new_entry->start_blk = start_blk;
+- new_entry->count = count;
+- new_node = &new_entry->node;
+-
+- rb_link_node(new_node, parent, n);
+- rb_insert_color(new_node, &system_blks->root);
+- }
++ new_entry = kmem_cache_alloc(ext4_system_zone_cachep,
++ GFP_KERNEL);
++ if (!new_entry)
++ return -ENOMEM;
++ new_entry->start_blk = start_blk;
++ new_entry->count = count;
++ new_node = &new_entry->node;
++
++ rb_link_node(new_node, parent, n);
++ rb_insert_color(new_node, &system_blks->root);
+
+ /* Can we merge to the left? */
+ node = rb_prev(new_node);
+diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
+index a8f2e3549bb95..186a2dd05bd87 100644
+--- a/fs/ext4/namei.c
++++ b/fs/ext4/namei.c
+@@ -1309,8 +1309,8 @@ int ext4_search_dir(struct buffer_head *bh, char *search_buf, int buf_size,
+ ext4_match(fname, de)) {
+ /* found a match - just to be sure, do
+ * a full check */
+- if (ext4_check_dir_entry(dir, NULL, de, bh, bh->b_data,
+- bh->b_size, offset))
++ if (ext4_check_dir_entry(dir, NULL, de, bh, search_buf,
++ buf_size, offset))
+ return -1;
+ *res_dir = de;
+ return 1;
+@@ -1732,7 +1732,7 @@ static struct ext4_dir_entry_2 *do_split(handle_t *handle, struct inode *dir,
+ blocksize, hinfo, map);
+ map -= count;
+ dx_sort_map(map, count);
+- /* Split the existing block in the middle, size-wise */
++ /* Ensure that neither split block is over half full */
+ size = 0;
+ move = 0;
+ for (i = count-1; i >= 0; i--) {
+@@ -1742,8 +1742,18 @@ static struct ext4_dir_entry_2 *do_split(handle_t *handle, struct inode *dir,
+ size += map[i].size;
+ move++;
+ }
+- /* map index at which we will split */
+- split = count - move;
++ /*
++ * map index at which we will split
++ *
++ * If the sum of active entries didn't exceed half the block size, just
++ * split it in half by count; each resulting block will have at least
++ * half the space free.
++ */
++ if (i > 0)
++ split = count - move;
++ else
++ split = count/2;
++
+ hash2 = map[split].hash;
+ continued = hash2 == map[split - 1].hash;
+ dxtrace(printk(KERN_INFO "Split block %lu at %x, %i/%i\n",
+@@ -2344,7 +2354,7 @@ int ext4_generic_delete_entry(handle_t *handle,
+ de = (struct ext4_dir_entry_2 *)entry_buf;
+ while (i < buf_size - csum_size) {
+ if (ext4_check_dir_entry(dir, NULL, de, bh,
+- bh->b_data, bh->b_size, i))
++ entry_buf, buf_size, i))
+ return -EFSCORRUPTED;
+ if (de == de_del) {
+ if (pde)
+diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c
+index a15a22d209090..8a50722bca29e 100644
+--- a/fs/jbd2/journal.c
++++ b/fs/jbd2/journal.c
+@@ -1370,8 +1370,10 @@ static int jbd2_write_superblock(journal_t *journal, int write_flags)
+ int ret;
+
+ /* Buffer got discarded which means block device got invalidated */
+- if (!buffer_mapped(bh))
++ if (!buffer_mapped(bh)) {
++ unlock_buffer(bh);
+ return -EIO;
++ }
+
+ trace_jbd2_write_superblock(journal, write_flags);
+ if (!(journal->j_flags & JBD2_BARRIER))
+diff --git a/fs/jffs2/dir.c b/fs/jffs2/dir.c
+index f20cff1194bb6..776493713153f 100644
+--- a/fs/jffs2/dir.c
++++ b/fs/jffs2/dir.c
+@@ -590,10 +590,14 @@ static int jffs2_rmdir (struct inode *dir_i, struct dentry *dentry)
+ int ret;
+ uint32_t now = JFFS2_NOW();
+
++ mutex_lock(&f->sem);
+ for (fd = f->dents ; fd; fd = fd->next) {
+- if (fd->ino)
++ if (fd->ino) {
++ mutex_unlock(&f->sem);
+ return -ENOTEMPTY;
++ }
+ }
++ mutex_unlock(&f->sem);
+
+ ret = jffs2_do_unlink(c, dir_f, dentry->d_name.name,
+ dentry->d_name.len, f, now);
+diff --git a/fs/romfs/storage.c b/fs/romfs/storage.c
+index f86f51f99aceb..1dcadd22b440d 100644
+--- a/fs/romfs/storage.c
++++ b/fs/romfs/storage.c
+@@ -221,10 +221,8 @@ int romfs_dev_read(struct super_block *sb, unsigned long pos,
+ size_t limit;
+
+ limit = romfs_maxsize(sb);
+- if (pos >= limit)
++ if (pos >= limit || buflen > limit - pos)
+ return -EIO;
+- if (buflen > limit - pos)
+- buflen = limit - pos;
+
+ #ifdef CONFIG_ROMFS_ON_MTD
+ if (sb->s_mtd)
+diff --git a/fs/signalfd.c b/fs/signalfd.c
+index 4fcd1498acf52..3c40a3bf772ce 100644
+--- a/fs/signalfd.c
++++ b/fs/signalfd.c
+@@ -313,9 +313,10 @@ SYSCALL_DEFINE4(signalfd4, int, ufd, sigset_t __user *, user_mask,
+ {
+ sigset_t mask;
+
+- if (sizemask != sizeof(sigset_t) ||
+- copy_from_user(&mask, user_mask, sizeof(mask)))
++ if (sizemask != sizeof(sigset_t))
+ return -EINVAL;
++ if (copy_from_user(&mask, user_mask, sizeof(mask)))
++ return -EFAULT;
+ return do_signalfd4(ufd, &mask, flags);
+ }
+
+@@ -324,9 +325,10 @@ SYSCALL_DEFINE3(signalfd, int, ufd, sigset_t __user *, user_mask,
+ {
+ sigset_t mask;
+
+- if (sizemask != sizeof(sigset_t) ||
+- copy_from_user(&mask, user_mask, sizeof(mask)))
++ if (sizemask != sizeof(sigset_t))
+ return -EINVAL;
++ if (copy_from_user(&mask, user_mask, sizeof(mask)))
++ return -EFAULT;
+ return do_signalfd4(ufd, &mask, 0);
+ }
+
+diff --git a/fs/xfs/xfs_sysfs.h b/fs/xfs/xfs_sysfs.h
+index e9f810fc67317..43585850f1546 100644
+--- a/fs/xfs/xfs_sysfs.h
++++ b/fs/xfs/xfs_sysfs.h
+@@ -32,9 +32,11 @@ xfs_sysfs_init(
+ struct xfs_kobj *parent_kobj,
+ const char *name)
+ {
++ struct kobject *parent;
++
++ parent = parent_kobj ? &parent_kobj->kobject : NULL;
+ init_completion(&kobj->complete);
+- return kobject_init_and_add(&kobj->kobject, ktype,
+- &parent_kobj->kobject, "%s", name);
++ return kobject_init_and_add(&kobj->kobject, ktype, parent, "%s", name);
+ }
+
+ static inline void
+diff --git a/fs/xfs/xfs_trans_dquot.c b/fs/xfs/xfs_trans_dquot.c
+index c23257a26c2b8..b8f05d5909b59 100644
+--- a/fs/xfs/xfs_trans_dquot.c
++++ b/fs/xfs/xfs_trans_dquot.c
+@@ -657,7 +657,7 @@ xfs_trans_dqresv(
+ }
+ }
+ if (ninos > 0) {
+- total_count = be64_to_cpu(dqp->q_core.d_icount) + ninos;
++ total_count = dqp->q_res_icount + ninos;
+ timer = be32_to_cpu(dqp->q_core.d_itimer);
+ warns = be16_to_cpu(dqp->q_core.d_iwarns);
+ warnlimit = dqp->q_mount->m_quotainfo->qi_iwarnlimit;
+diff --git a/kernel/kthread.c b/kernel/kthread.c
+index 087d18d771b53..b786eda90bb56 100644
+--- a/kernel/kthread.c
++++ b/kernel/kthread.c
+@@ -190,8 +190,15 @@ static void __kthread_parkme(struct kthread *self)
+ if (!test_bit(KTHREAD_SHOULD_PARK, &self->flags))
+ break;
+
++ /*
++ * Thread is going to call schedule(), do not preempt it,
++ * or the caller of kthread_park() may spend more time in
++ * wait_task_inactive().
++ */
++ preempt_disable();
+ complete(&self->parked);
+- schedule();
++ schedule_preempt_disabled();
++ preempt_enable();
+ }
+ __set_current_state(TASK_RUNNING);
+ }
+@@ -236,8 +243,14 @@ static int kthread(void *_create)
+ /* OK, tell user we're spawned, wait for stop or wakeup */
+ __set_current_state(TASK_UNINTERRUPTIBLE);
+ create->result = current;
++ /*
++ * Thread is going to call schedule(), do not preempt it,
++ * or the creator may spend more time in wait_task_inactive().
++ */
++ preempt_disable();
+ complete(done);
+- schedule();
++ schedule_preempt_disabled();
++ preempt_enable();
+
+ ret = -EINTR;
+ if (!test_bit(KTHREAD_SHOULD_STOP, &self->flags)) {
+diff --git a/kernel/relay.c b/kernel/relay.c
+index 13c19f39e31e2..735cb208f023b 100644
+--- a/kernel/relay.c
++++ b/kernel/relay.c
+@@ -197,6 +197,7 @@ free_buf:
+ static void relay_destroy_channel(struct kref *kref)
+ {
+ struct rchan *chan = container_of(kref, struct rchan, kref);
++ free_percpu(chan->buf);
+ kfree(chan);
+ }
+
+diff --git a/mm/hugetlb.c b/mm/hugetlb.c
+index e068c7f75a849..8a5708f31aa07 100644
+--- a/mm/hugetlb.c
++++ b/mm/hugetlb.c
+@@ -4650,25 +4650,21 @@ static bool vma_shareable(struct vm_area_struct *vma, unsigned long addr)
+ void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma,
+ unsigned long *start, unsigned long *end)
+ {
+- unsigned long check_addr = *start;
++ unsigned long a_start, a_end;
+
+ if (!(vma->vm_flags & VM_MAYSHARE))
+ return;
+
+- for (check_addr = *start; check_addr < *end; check_addr += PUD_SIZE) {
+- unsigned long a_start = check_addr & PUD_MASK;
+- unsigned long a_end = a_start + PUD_SIZE;
++ /* Extend the range to be PUD aligned for a worst case scenario */
++ a_start = ALIGN_DOWN(*start, PUD_SIZE);
++ a_end = ALIGN(*end, PUD_SIZE);
+
+- /*
+- * If sharing is possible, adjust start/end if necessary.
+- */
+- if (range_in_vma(vma, a_start, a_end)) {
+- if (a_start < *start)
+- *start = a_start;
+- if (a_end > *end)
+- *end = a_end;
+- }
+- }
++ /*
++ * Intersect the range with the vma range, since pmd sharing won't be
++ * across vma after all
++ */
++ *start = max(vma->vm_start, a_start);
++ *end = min(vma->vm_end, a_end);
+ }
+
+ /*
+diff --git a/mm/khugepaged.c b/mm/khugepaged.c
+index 483c4573695a9..f37be43f8caeb 100644
+--- a/mm/khugepaged.c
++++ b/mm/khugepaged.c
+@@ -394,7 +394,7 @@ static void insert_to_mm_slots_hash(struct mm_struct *mm,
+
+ static inline int khugepaged_test_exit(struct mm_struct *mm)
+ {
+- return atomic_read(&mm->mm_users) == 0;
++ return atomic_read(&mm->mm_users) == 0 || !mmget_still_valid(mm);
+ }
+
+ static bool hugepage_vma_check(struct vm_area_struct *vma,
+@@ -427,7 +427,7 @@ int __khugepaged_enter(struct mm_struct *mm)
+ return -ENOMEM;
+
+ /* __khugepaged_exit() must not run from under us */
+- VM_BUG_ON_MM(khugepaged_test_exit(mm), mm);
++ VM_BUG_ON_MM(atomic_read(&mm->mm_users) == 0, mm);
+ if (unlikely(test_and_set_bit(MMF_VM_HUGEPAGE, &mm->flags))) {
+ free_mm_slot(mm_slot);
+ return 0;
+@@ -1005,9 +1005,6 @@ static void collapse_huge_page(struct mm_struct *mm,
+ * handled by the anon_vma lock + PG_lock.
+ */
+ down_write(&mm->mmap_sem);
+- result = SCAN_ANY_PROCESS;
+- if (!mmget_still_valid(mm))
+- goto out;
+ result = hugepage_vma_revalidate(mm, address, &vma);
+ if (result)
+ goto out;
+diff --git a/mm/page_alloc.c b/mm/page_alloc.c
+index 7181dfe764405..5717ee66c8b38 100644
+--- a/mm/page_alloc.c
++++ b/mm/page_alloc.c
+@@ -1115,6 +1115,11 @@ static void free_pcppages_bulk(struct zone *zone, int count,
+ struct page *page, *tmp;
+ LIST_HEAD(head);
+
++ /*
++ * Ensure proper count is passed which otherwise would stuck in the
++ * below while (list_empty(list)) loop.
++ */
++ count = min(pcp->count, count);
+ while (count) {
+ struct list_head *list;
+
+@@ -7395,7 +7400,7 @@ int __meminit init_per_zone_wmark_min(void)
+
+ return 0;
+ }
+-core_initcall(init_per_zone_wmark_min)
++postcore_initcall(init_per_zone_wmark_min)
+
+ /*
+ * min_free_kbytes_sysctl_handler - just a wrapper around proc_dointvec() so
+diff --git a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
+index 16c8174658fd1..252495ff9010d 100644
+--- a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
++++ b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
+@@ -268,6 +268,8 @@ static int svc_rdma_post_recv(struct svcxprt_rdma *rdma)
+ {
+ struct svc_rdma_recv_ctxt *ctxt;
+
++ if (test_bit(XPT_CLOSE, &rdma->sc_xprt.xpt_flags))
++ return 0;
+ ctxt = svc_rdma_recv_ctxt_get(rdma);
+ if (!ctxt)
+ return -ENOMEM;
+diff --git a/scripts/kconfig/qconf.cc b/scripts/kconfig/qconf.cc
+index 8f004db6f6034..1ee33d2e15bf8 100644
+--- a/scripts/kconfig/qconf.cc
++++ b/scripts/kconfig/qconf.cc
+@@ -869,40 +869,40 @@ void ConfigList::focusInEvent(QFocusEvent *e)
+
+ void ConfigList::contextMenuEvent(QContextMenuEvent *e)
+ {
+- if (e->y() <= header()->geometry().bottom()) {
+- if (!headerPopup) {
+- QAction *action;
+-
+- headerPopup = new QMenu(this);
+- action = new QAction("Show Name", this);
+- action->setCheckable(true);
+- connect(action, SIGNAL(toggled(bool)),
+- parent(), SLOT(setShowName(bool)));
+- connect(parent(), SIGNAL(showNameChanged(bool)),
+- action, SLOT(setOn(bool)));
+- action->setChecked(showName);
+- headerPopup->addAction(action);
+- action = new QAction("Show Range", this);
+- action->setCheckable(true);
+- connect(action, SIGNAL(toggled(bool)),
+- parent(), SLOT(setShowRange(bool)));
+- connect(parent(), SIGNAL(showRangeChanged(bool)),
+- action, SLOT(setOn(bool)));
+- action->setChecked(showRange);
+- headerPopup->addAction(action);
+- action = new QAction("Show Data", this);
+- action->setCheckable(true);
+- connect(action, SIGNAL(toggled(bool)),
+- parent(), SLOT(setShowData(bool)));
+- connect(parent(), SIGNAL(showDataChanged(bool)),
+- action, SLOT(setOn(bool)));
+- action->setChecked(showData);
+- headerPopup->addAction(action);
+- }
+- headerPopup->exec(e->globalPos());
+- e->accept();
+- } else
+- e->ignore();
++ if (!headerPopup) {
++ QAction *action;
++
++ headerPopup = new QMenu(this);
++ action = new QAction("Show Name", this);
++ action->setCheckable(true);
++ connect(action, SIGNAL(toggled(bool)),
++ parent(), SLOT(setShowName(bool)));
++ connect(parent(), SIGNAL(showNameChanged(bool)),
++ action, SLOT(setChecked(bool)));
++ action->setChecked(showName);
++ headerPopup->addAction(action);
++
++ action = new QAction("Show Range", this);
++ action->setCheckable(true);
++ connect(action, SIGNAL(toggled(bool)),
++ parent(), SLOT(setShowRange(bool)));
++ connect(parent(), SIGNAL(showRangeChanged(bool)),
++ action, SLOT(setChecked(bool)));
++ action->setChecked(showRange);
++ headerPopup->addAction(action);
++
++ action = new QAction("Show Data", this);
++ action->setCheckable(true);
++ connect(action, SIGNAL(toggled(bool)),
++ parent(), SLOT(setShowData(bool)));
++ connect(parent(), SIGNAL(showDataChanged(bool)),
++ action, SLOT(setChecked(bool)));
++ action->setChecked(showData);
++ headerPopup->addAction(action);
++ }
++
++ headerPopup->exec(e->globalPos());
++ e->accept();
+ }
+
+ ConfigView*ConfigView::viewList;
+@@ -1228,7 +1228,7 @@ QMenu* ConfigInfoView::createStandardContextMenu(const QPoint & pos)
+
+ action->setCheckable(true);
+ connect(action, SIGNAL(toggled(bool)), SLOT(setShowDebug(bool)));
+- connect(this, SIGNAL(showDebugChanged(bool)), action, SLOT(setOn(bool)));
++ connect(this, SIGNAL(showDebugChanged(bool)), action, SLOT(setChecked(bool)));
+ action->setChecked(showDebug());
+ popup->addSeparator();
+ popup->addAction(action);
+diff --git a/sound/soc/codecs/msm8916-wcd-analog.c b/sound/soc/codecs/msm8916-wcd-analog.c
+index cbdb6d4bb91ef..f4aba065c9257 100644
+--- a/sound/soc/codecs/msm8916-wcd-analog.c
++++ b/sound/soc/codecs/msm8916-wcd-analog.c
+@@ -16,8 +16,8 @@
+
+ #define CDC_D_REVISION1 (0xf000)
+ #define CDC_D_PERPH_SUBTYPE (0xf005)
+-#define CDC_D_INT_EN_SET (0x015)
+-#define CDC_D_INT_EN_CLR (0x016)
++#define CDC_D_INT_EN_SET (0xf015)
++#define CDC_D_INT_EN_CLR (0xf016)
+ #define MBHC_SWITCH_INT BIT(7)
+ #define MBHC_MIC_ELECTRICAL_INS_REM_DET BIT(6)
+ #define MBHC_BUTTON_PRESS_DET BIT(5)
+diff --git a/sound/soc/intel/atom/sst-mfld-platform-pcm.c b/sound/soc/intel/atom/sst-mfld-platform-pcm.c
+index 6868e71e3a3f0..0572c3c964506 100644
+--- a/sound/soc/intel/atom/sst-mfld-platform-pcm.c
++++ b/sound/soc/intel/atom/sst-mfld-platform-pcm.c
+@@ -339,7 +339,7 @@ static int sst_media_open(struct snd_pcm_substream *substream,
+
+ ret_val = power_up_sst(stream);
+ if (ret_val < 0)
+- return ret_val;
++ goto out_power_up;
+
+ /* Make sure, that the period size is always even */
+ snd_pcm_hw_constraint_step(substream->runtime, 0,
+@@ -348,8 +348,9 @@ static int sst_media_open(struct snd_pcm_substream *substream,
+ return snd_pcm_hw_constraint_integer(runtime,
+ SNDRV_PCM_HW_PARAM_PERIODS);
+ out_ops:
+- kfree(stream);
+ mutex_unlock(&sst_lock);
++out_power_up:
++ kfree(stream);
+ return ret_val;
+ }
+
+diff --git a/sound/soc/qcom/qdsp6/q6routing.c b/sound/soc/qcom/qdsp6/q6routing.c
+index c6b51571be945..44eee18c658ae 100644
+--- a/sound/soc/qcom/qdsp6/q6routing.c
++++ b/sound/soc/qcom/qdsp6/q6routing.c
+@@ -968,6 +968,20 @@ static int msm_routing_probe(struct snd_soc_component *c)
+ return 0;
+ }
+
++static unsigned int q6routing_reg_read(struct snd_soc_component *component,
++ unsigned int reg)
++{
++ /* default value */
++ return 0;
++}
++
++static int q6routing_reg_write(struct snd_soc_component *component,
++ unsigned int reg, unsigned int val)
++{
++ /* dummy */
++ return 0;
++}
++
+ static const struct snd_soc_component_driver msm_soc_routing_component = {
+ .ops = &q6pcm_routing_ops,
+ .probe = msm_routing_probe,
+@@ -976,6 +990,8 @@ static const struct snd_soc_component_driver msm_soc_routing_component = {
+ .num_dapm_widgets = ARRAY_SIZE(msm_qdsp6_widgets),
+ .dapm_routes = intercon,
+ .num_dapm_routes = ARRAY_SIZE(intercon),
++ .read = q6routing_reg_read,
++ .write = q6routing_reg_write,
+ };
+
+ static int q6pcm_routing_probe(struct platform_device *pdev)
+diff --git a/tools/perf/util/probe-finder.c b/tools/perf/util/probe-finder.c
+index 60169196b9481..4da4ec2552463 100644
+--- a/tools/perf/util/probe-finder.c
++++ b/tools/perf/util/probe-finder.c
+@@ -1351,7 +1351,7 @@ int debuginfo__find_trace_events(struct debuginfo *dbg,
+ tf.ntevs = 0;
+
+ ret = debuginfo__find_probes(dbg, &tf.pf);
+- if (ret < 0) {
++ if (ret < 0 || tf.ntevs == 0) {
+ for (i = 0; i < tf.ntevs; i++)
+ clear_probe_trace_event(&tf.tevs[i]);
+ zfree(tevs);
+diff --git a/tools/testing/selftests/cgroup/cgroup_util.c b/tools/testing/selftests/cgroup/cgroup_util.c
+index 075cb0c730149..90418d79ef676 100644
+--- a/tools/testing/selftests/cgroup/cgroup_util.c
++++ b/tools/testing/selftests/cgroup/cgroup_util.c
+@@ -95,7 +95,7 @@ int cg_read_strcmp(const char *cgroup, const char *control,
+
+ /* Handle the case of comparing against empty string */
+ if (!expected)
+- size = 32;
++ return -1;
+ else
+ size = strlen(expected) + 1;
+
+diff --git a/virt/kvm/arm/mmu.c b/virt/kvm/arm/mmu.c
+index a5bc10d30618f..41d6285c3da99 100644
+--- a/virt/kvm/arm/mmu.c
++++ b/virt/kvm/arm/mmu.c
+@@ -323,7 +323,8 @@ static void unmap_stage2_puds(struct kvm *kvm, pgd_t *pgd,
+ * destroying the VM), otherwise another faulting VCPU may come in and mess
+ * with things behind our backs.
+ */
+-static void unmap_stage2_range(struct kvm *kvm, phys_addr_t start, u64 size)
++static void __unmap_stage2_range(struct kvm *kvm, phys_addr_t start, u64 size,
++ bool may_block)
+ {
+ pgd_t *pgd;
+ phys_addr_t addr = start, end = start + size;
+@@ -348,11 +349,16 @@ static void unmap_stage2_range(struct kvm *kvm, phys_addr_t start, u64 size)
+ * If the range is too large, release the kvm->mmu_lock
+ * to prevent starvation and lockup detector warnings.
+ */
+- if (next != end)
++ if (may_block && next != end)
+ cond_resched_lock(&kvm->mmu_lock);
+ } while (pgd++, addr = next, addr != end);
+ }
+
++static void unmap_stage2_range(struct kvm *kvm, phys_addr_t start, u64 size)
++{
++ __unmap_stage2_range(kvm, start, size, true);
++}
++
+ static void stage2_flush_ptes(struct kvm *kvm, pmd_t *pmd,
+ phys_addr_t addr, phys_addr_t end)
+ {
+@@ -1820,18 +1826,20 @@ static int handle_hva_to_gpa(struct kvm *kvm,
+
+ static int kvm_unmap_hva_handler(struct kvm *kvm, gpa_t gpa, u64 size, void *data)
+ {
+- unmap_stage2_range(kvm, gpa, size);
++ bool may_block = *(bool *)data;
++
++ __unmap_stage2_range(kvm, gpa, size, may_block);
+ return 0;
+ }
+
+ int kvm_unmap_hva_range(struct kvm *kvm,
+- unsigned long start, unsigned long end)
++ unsigned long start, unsigned long end, bool blockable)
+ {
+ if (!kvm->arch.pgd)
+ return 0;
+
+ trace_kvm_unmap_hva_range(start, end);
+- handle_hva_to_gpa(kvm, start, end, &kvm_unmap_hva_handler, NULL);
++ handle_hva_to_gpa(kvm, start, end, &kvm_unmap_hva_handler, &blockable);
+ return 0;
+ }
+
+diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
+index 1218ea663c6d2..2155b52b17eca 100644
+--- a/virt/kvm/kvm_main.c
++++ b/virt/kvm/kvm_main.c
+@@ -410,7 +410,7 @@ static int kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn,
+ * count is also read inside the mmu_lock critical section.
+ */
+ kvm->mmu_notifier_count++;
+- need_tlb_flush = kvm_unmap_hva_range(kvm, start, end);
++ need_tlb_flush = kvm_unmap_hva_range(kvm, start, end, blockable);
+ need_tlb_flush |= kvm->tlbs_dirty;
+ /* we've to flush the tlb before the pages can be freed */
+ if (need_tlb_flush)