]> git.itanic.dy.fi Git - linux-stable/commitdiff
Linux 6.8.8 master v6.8.8
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Sat, 27 Apr 2024 15:13:05 +0000 (17:13 +0200)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Sat, 27 Apr 2024 15:13:05 +0000 (17:13 +0200)
Link: https://lore.kernel.org/r/20240423213855.824778126@linuxfoundation.org
Tested-by: SeongJae Park <sj@kernel.org>
Tested-by: Bagas Sanjaya <bagasdotme@gmail.com>
Tested-by: Pavel Machek (CIP) <pavel@denx.de>
Tested-by: Ron Economos <re@w6rz.net>
Tested-by: Ronald Warsow <rwarsow@gmx.de>
Tested-by: Florian Fainelli <florian.fainelli@broadcom.com>
Tested-by: Jon Hunter <jonathanh@nvidia.com>
Tested-by: Linux Kernel Functional Testing <lkft@linaro.org>
Tested-by: kernelci.org bot <bot@kernelci.org>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
178 files changed:
Makefile
arch/arm64/kernel/head.S
arch/arm64/mm/pageattr.c
arch/x86/include/asm/barrier.h
arch/x86/include/asm/kvm_host.h
arch/x86/kernel/cpu/bugs.c
arch/x86/kernel/cpu/cpuid-deps.c
arch/x86/kvm/cpuid.c
arch/x86/kvm/cpuid.h
arch/x86/kvm/lapic.c
arch/x86/kvm/mmu/mmu.c
arch/x86/kvm/mmu/tdp_mmu.c
arch/x86/kvm/vmx/vmx.c
arch/x86/kvm/x86.c
block/bdev.c
block/ioctl.c
drivers/accessibility/speakup/main.c
drivers/android/binder.c
drivers/char/random.c
drivers/clk/clk.c
drivers/clk/mediatek/clk-mt7988-infracfg.c
drivers/clk/mediatek/clk-mtk.c
drivers/comedi/drivers/vmk80xx.c
drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
drivers/gpu/drm/amd/amdkfd/kfd_process.c
drivers/gpu/drm/i915/display/intel_cdclk.c
drivers/gpu/drm/nouveau/nouveau_bios.c
drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c
drivers/gpu/drm/panel/panel-visionox-rm69299.c
drivers/gpu/drm/radeon/radeon_atombios.c
drivers/gpu/drm/ttm/ttm_pool.c
drivers/gpu/drm/v3d/v3d_irq.c
drivers/gpu/drm/vmwgfx/vmwgfx_blit.c
drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
drivers/gpu/drm/vmwgfx/vmwgfx_bo.h
drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
drivers/gpu/drm/vmwgfx/vmwgfx_gem.c
drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
drivers/gpu/drm/vmwgfx/vmwgfx_prime.c
drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c
drivers/gpu/drm/xe/display/intel_fb_bo.c
drivers/infiniband/core/cm.c
drivers/infiniband/hw/mlx5/mad.c
drivers/infiniband/sw/rxe/rxe.c
drivers/interconnect/core.c
drivers/interconnect/qcom/x1e80100.c
drivers/iommu/iommufd/Kconfig
drivers/misc/cardreader/rtsx_pcr.c
drivers/misc/mei/pci-me.c
drivers/misc/mei/platform-vsc.c
drivers/misc/mei/vsc-tp.c
drivers/misc/mei/vsc-tp.h
drivers/net/dsa/mt7530.c
drivers/net/dsa/mt7530.h
drivers/net/ethernet/intel/ice/ice_tc_lib.c
drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c
drivers/net/ethernet/mediatek/mtk_wed.c
drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c
drivers/net/ethernet/mellanox/mlx5/core/main.c
drivers/net/ethernet/mellanox/mlx5/core/sf/dev/driver.c
drivers/net/ethernet/microchip/sparx5/sparx5_tc_flower.c
drivers/net/ethernet/realtek/r8169.h
drivers/net/ethernet/realtek/r8169_leds.c
drivers/net/ethernet/realtek/r8169_main.c
drivers/net/ethernet/renesas/ravb.h
drivers/net/ethernet/renesas/ravb_main.c
drivers/net/ethernet/stmicro/stmmac/common.h
drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c
drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c
drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
drivers/net/ethernet/ti/am65-cpsw-nuss.c
drivers/net/tun.c
drivers/net/usb/ax88179_178a.c
drivers/platform/x86/amd/pmc/pmc-quirks.c
drivers/s390/cio/device.c
drivers/s390/cio/qdio_main.c
drivers/s390/net/ism_drv.c
drivers/scsi/scsi_lib.c
drivers/thermal/thermal_debugfs.c
drivers/thunderbolt/domain.c
drivers/thunderbolt/icm.c
drivers/thunderbolt/lc.c
drivers/thunderbolt/nhi.c
drivers/thunderbolt/path.c
drivers/thunderbolt/switch.c
drivers/thunderbolt/tb.c
drivers/thunderbolt/tb.h
drivers/thunderbolt/tb_regs.h
drivers/thunderbolt/usb4.c
drivers/tty/serial/8250/8250_dw.c
drivers/tty/serial/mxs-auart.c
drivers/tty/serial/pmac_zilog.c
drivers/tty/serial/serial_base.h
drivers/tty/serial/serial_core.c
drivers/tty/serial/serial_port.c
drivers/tty/serial/stm32-usart.c
drivers/ufs/host/ufs-qcom.c
drivers/usb/class/cdc-wdm.c
drivers/usb/core/port.c
drivers/usb/dwc2/hcd_ddma.c
drivers/usb/gadget/function/f_ncm.c
drivers/usb/misc/onboard_usb_hub.c
drivers/usb/serial/option.c
drivers/usb/typec/tcpm/tcpm.c
drivers/virt/vmgenid.c
fs/btrfs/extent_io.c
fs/fuse/dir.c
fs/nfsd/nfs4xdr.c
fs/nilfs2/dir.c
fs/smb/common/smb2pdu.h
fs/smb/server/server.c
fs/smb/server/smb2pdu.c
fs/smb/server/vfs.c
fs/squashfs/inode.c
fs/sysfs/file.c
include/asm-generic/barrier.h
include/linux/blkdev.h
include/linux/bootconfig.h
include/linux/gpio/property.h
include/linux/shmem_fs.h
include/linux/swapops.h
include/linux/udp.h
include/net/netfilter/nf_flow_table.h
include/net/netfilter/nf_tables.h
include/net/sch_generic.h
include/trace/events/rpcgss.h
init/main.c
io_uring/io_uring.c
kernel/fork.c
kernel/sched/sched.h
lib/bootconfig.c
mm/gup.c
mm/huge_memory.c
mm/hugetlb.c
mm/internal.h
mm/madvise.c
mm/memory-failure.c
mm/shmem.c
net/bridge/br_input.c
net/bridge/br_netfilter_hooks.c
net/bridge/br_private.h
net/bridge/netfilter/nf_conntrack_bridge.c
net/core/dev.c
net/netfilter/nf_flow_table_inet.c
net/netfilter/nf_flow_table_ip.c
net/netfilter/nf_tables_api.c
net/netfilter/nft_lookup.c
net/netfilter/nft_set_bitmap.c
net/netfilter/nft_set_hash.c
net/netfilter/nft_set_pipapo.c
net/netfilter/nft_set_pipapo.h
net/netfilter/nft_set_pipapo_avx2.c
net/netfilter/nft_set_rbtree.c
net/sched/sch_generic.c
net/unix/af_unix.c
sound/core/seq/seq_ump_convert.c
sound/pci/hda/patch_realtek.c
sound/pci/hda/tas2781_hda_i2c.c
tools/perf/ui/browsers/annotate.c
tools/perf/util/annotate.c
tools/perf/util/bpf_skel/lock_contention.bpf.c
tools/testing/selftests/ftrace/test.d/event/subsystem-enable.tc
tools/testing/selftests/iommu/config
tools/testing/selftests/net/tcp_ao/lib/proc.c
tools/testing/selftests/net/tcp_ao/lib/setup.c
tools/testing/selftests/net/tcp_ao/rst.c
tools/testing/selftests/net/tcp_ao/setsockopt-closed.c
tools/testing/selftests/net/udpgso.c
tools/testing/selftests/powerpc/papr_vpd/papr_vpd.c

index c426d47f4b7bf1d1eff69513762332e988bf60bb..ffa43a6d042411979a2e26ea272777086ad4d30f 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
 # SPDX-License-Identifier: GPL-2.0
 VERSION = 6
 PATCHLEVEL = 8
-SUBLEVEL = 6
+SUBLEVEL = 8
 EXTRAVERSION =
 NAME = Hurr durr I'ma ninja sloth
 
index cab7f91949d8f58e9565dae545e1f0a1056ea14b..a92905e6d43ad147627e6d641c23d0fc6b85fc28 100644 (file)
@@ -569,6 +569,11 @@ SYM_INNER_LABEL(init_el2, SYM_L_LOCAL)
        adr_l   x1, __hyp_text_end
        adr_l   x2, dcache_clean_poc
        blr     x2
+
+       mov_q   x0, INIT_SCTLR_EL2_MMU_OFF
+       pre_disable_mmu_workaround
+       msr     sctlr_el2, x0
+       isb
 0:
        mov_q   x0, HCR_HOST_NVHE_FLAGS
        msr     hcr_el2, x0
index 924843f1f661bfe1ff5c6b8f9eff753872416040..0a62f458c5cb023b01c894c555d120ce74941fea 100644 (file)
@@ -219,9 +219,6 @@ bool kernel_page_present(struct page *page)
        pte_t *ptep;
        unsigned long addr = (unsigned long)page_address(page);
 
-       if (!can_set_direct_map())
-               return true;
-
        pgdp = pgd_offset_k(addr);
        if (pgd_none(READ_ONCE(*pgdp)))
                return false;
index 0216f63a366b54db7239f5689e3fa77b54d7f7a8..d0795b5fab46adc4b12e429aa6fb4eb2ba60965c 100644 (file)
@@ -79,6 +79,9 @@ do {                                                                  \
 #define __smp_mb__before_atomic()      do { } while (0)
 #define __smp_mb__after_atomic()       do { } while (0)
 
+/* Writing to CR3 provides a full memory barrier in switch_mm(). */
+#define smp_mb__after_switch_mm()      do { } while (0)
+
 #include <asm-generic/barrier.h>
 
 #endif /* _ASM_X86_BARRIER_H */
index d271ba20a0b214104a1f11832a1007f5bb35190e..b7a539ea2986ff5cce0d84501803a02ab7b520f2 100644 (file)
@@ -854,6 +854,7 @@ struct kvm_vcpu_arch {
        int cpuid_nent;
        struct kvm_cpuid_entry2 *cpuid_entries;
        struct kvm_hypervisor_cpuid kvm_cpuid;
+       bool is_amd_compatible;
 
        /*
         * FIXME: Drop this macro and use KVM_NR_GOVERNED_FEATURES directly
index cbc8c88144e4706407a6f3d868fdc366aa3cf619..e7ceee008bd09f63fc548ac844b423243cd51503 100644 (file)
@@ -1651,7 +1651,8 @@ static void __init bhi_select_mitigation(void)
                return;
 
        /* Retpoline mitigates against BHI unless the CPU has RRSBA behavior */
-       if (cpu_feature_enabled(X86_FEATURE_RETPOLINE)) {
+       if (boot_cpu_has(X86_FEATURE_RETPOLINE) &&
+           !boot_cpu_has(X86_FEATURE_RETPOLINE_LFENCE)) {
                spec_ctrl_disable_kernel_rrsba();
                if (rrsba_disabled)
                        return;
@@ -2803,11 +2804,13 @@ static const char *spectre_bhi_state(void)
 {
        if (!boot_cpu_has_bug(X86_BUG_BHI))
                return "; BHI: Not affected";
-       else if  (boot_cpu_has(X86_FEATURE_CLEAR_BHB_HW))
+       else if (boot_cpu_has(X86_FEATURE_CLEAR_BHB_HW))
                return "; BHI: BHI_DIS_S";
-       else if  (boot_cpu_has(X86_FEATURE_CLEAR_BHB_LOOP))
+       else if (boot_cpu_has(X86_FEATURE_CLEAR_BHB_LOOP))
                return "; BHI: SW loop, KVM: SW loop";
-       else if (boot_cpu_has(X86_FEATURE_RETPOLINE) && rrsba_disabled)
+       else if (boot_cpu_has(X86_FEATURE_RETPOLINE) &&
+                !boot_cpu_has(X86_FEATURE_RETPOLINE_LFENCE) &&
+                rrsba_disabled)
                return "; BHI: Retpoline";
        else if (boot_cpu_has(X86_FEATURE_CLEAR_BHB_LOOP_ON_VMEXIT))
                return "; BHI: Vulnerable, KVM: SW loop";
index e462c1d3800a6cb47c7d486a2a684278fd4cca1e..6fb6d8a57cecafe2d0438222b2b9cb8dc73a2c05 100644 (file)
@@ -44,7 +44,10 @@ static const struct cpuid_dep cpuid_deps[] = {
        { X86_FEATURE_F16C,                     X86_FEATURE_XMM2,     },
        { X86_FEATURE_AES,                      X86_FEATURE_XMM2      },
        { X86_FEATURE_SHA_NI,                   X86_FEATURE_XMM2      },
+       { X86_FEATURE_GFNI,                     X86_FEATURE_XMM2      },
        { X86_FEATURE_FMA,                      X86_FEATURE_AVX       },
+       { X86_FEATURE_VAES,                     X86_FEATURE_AVX       },
+       { X86_FEATURE_VPCLMULQDQ,               X86_FEATURE_AVX       },
        { X86_FEATURE_AVX2,                     X86_FEATURE_AVX,      },
        { X86_FEATURE_AVX512F,                  X86_FEATURE_AVX,      },
        { X86_FEATURE_AVX512IFMA,               X86_FEATURE_AVX512F   },
@@ -56,9 +59,6 @@ static const struct cpuid_dep cpuid_deps[] = {
        { X86_FEATURE_AVX512VL,                 X86_FEATURE_AVX512F   },
        { X86_FEATURE_AVX512VBMI,               X86_FEATURE_AVX512F   },
        { X86_FEATURE_AVX512_VBMI2,             X86_FEATURE_AVX512VL  },
-       { X86_FEATURE_GFNI,                     X86_FEATURE_AVX512VL  },
-       { X86_FEATURE_VAES,                     X86_FEATURE_AVX512VL  },
-       { X86_FEATURE_VPCLMULQDQ,               X86_FEATURE_AVX512VL  },
        { X86_FEATURE_AVX512_VNNI,              X86_FEATURE_AVX512VL  },
        { X86_FEATURE_AVX512_BITALG,            X86_FEATURE_AVX512VL  },
        { X86_FEATURE_AVX512_4VNNIW,            X86_FEATURE_AVX512F   },
index adba49afb5fe63b1de9345579615284593e00468..3a02276899db8855b129eb1b0df77b1fa2fccd4e 100644 (file)
@@ -366,6 +366,7 @@ static void kvm_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu)
 
        kvm_update_pv_runtime(vcpu);
 
+       vcpu->arch.is_amd_compatible = guest_cpuid_is_amd_or_hygon(vcpu);
        vcpu->arch.maxphyaddr = cpuid_query_maxphyaddr(vcpu);
        vcpu->arch.reserved_gpa_bits = kvm_vcpu_reserved_gpa_bits_raw(vcpu);
 
index 856e3037e74f3ffc7fdeb72f2067812080d71910..23dbb9eb277c7465f19bc5af137b79d5a2b894d1 100644 (file)
@@ -120,6 +120,16 @@ static inline bool guest_cpuid_is_intel(struct kvm_vcpu *vcpu)
        return best && is_guest_vendor_intel(best->ebx, best->ecx, best->edx);
 }
 
+static inline bool guest_cpuid_is_amd_compatible(struct kvm_vcpu *vcpu)
+{
+       return vcpu->arch.is_amd_compatible;
+}
+
+static inline bool guest_cpuid_is_intel_compatible(struct kvm_vcpu *vcpu)
+{
+       return !guest_cpuid_is_amd_compatible(vcpu);
+}
+
 static inline int guest_cpuid_family(struct kvm_vcpu *vcpu)
 {
        struct kvm_cpuid_entry2 *best;
index 75bc7d3f0022d988236724ba205bd2012f7c8fef..76fcee92b0706c8ade331ea235b7fc18873e075f 100644 (file)
@@ -2771,7 +2771,8 @@ int kvm_apic_local_deliver(struct kvm_lapic *apic, int lvt_type)
                trig_mode = reg & APIC_LVT_LEVEL_TRIGGER;
 
                r = __apic_accept_irq(apic, mode, vector, 1, trig_mode, NULL);
-               if (r && lvt_type == APIC_LVTPC)
+               if (r && lvt_type == APIC_LVTPC &&
+                   guest_cpuid_is_intel_compatible(apic->vcpu))
                        kvm_lapic_set_reg(apic, APIC_LVTPC, reg | APIC_LVT_MASKED);
                return r;
        }
index e2c3573f53e4306841aa70170d5961c301e50701..982cf41e14924b3e0ae21105944881eb538491e8 100644 (file)
@@ -4922,7 +4922,7 @@ static void reset_guest_rsvds_bits_mask(struct kvm_vcpu *vcpu,
                                context->cpu_role.base.level, is_efer_nx(context),
                                guest_can_use(vcpu, X86_FEATURE_GBPAGES),
                                is_cr4_pse(context),
-                               guest_cpuid_is_amd_or_hygon(vcpu));
+                               guest_cpuid_is_amd_compatible(vcpu));
 }
 
 static void __reset_rsvds_bits_mask_ept(struct rsvd_bits_validate *rsvd_check,
@@ -7388,7 +7388,8 @@ bool kvm_arch_post_set_memory_attributes(struct kvm *kvm,
                         * by the memslot, KVM can't use a hugepage due to the
                         * misaligned address regardless of memory attributes.
                         */
-                       if (gfn >= slot->base_gfn) {
+                       if (gfn >= slot->base_gfn &&
+                           gfn + nr_pages <= slot->base_gfn + slot->npages) {
                                if (hugepage_has_attrs(kvm, slot, gfn, level, attrs))
                                        hugepage_clear_mixed(slot, gfn, level);
                                else
index 6ae19b4ee5b1cb17d4ddda85197379cde425b03e..953082bf96e27825bdad6d311fd139aa39822f7a 100644 (file)
@@ -1498,6 +1498,16 @@ void kvm_tdp_mmu_try_split_huge_pages(struct kvm *kvm,
        }
 }
 
+static bool tdp_mmu_need_write_protect(struct kvm_mmu_page *sp)
+{
+       /*
+        * All TDP MMU shadow pages share the same role as their root, aside
+        * from level, so it is valid to key off any shadow page to determine if
+        * write protection is needed for an entire tree.
+        */
+       return kvm_mmu_page_ad_need_write_protect(sp) || !kvm_ad_enabled();
+}
+
 /*
  * Clear the dirty status of all the SPTEs mapping GFNs in the memslot. If
  * AD bits are enabled, this will involve clearing the dirty bit on each SPTE.
@@ -1508,7 +1518,8 @@ void kvm_tdp_mmu_try_split_huge_pages(struct kvm *kvm,
 static bool clear_dirty_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
                           gfn_t start, gfn_t end)
 {
-       u64 dbit = kvm_ad_enabled() ? shadow_dirty_mask : PT_WRITABLE_MASK;
+       const u64 dbit = tdp_mmu_need_write_protect(root) ? PT_WRITABLE_MASK :
+                                                           shadow_dirty_mask;
        struct tdp_iter iter;
        bool spte_set = false;
 
@@ -1523,7 +1534,7 @@ static bool clear_dirty_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
                if (tdp_mmu_iter_cond_resched(kvm, &iter, false, true))
                        continue;
 
-               KVM_MMU_WARN_ON(kvm_ad_enabled() &&
+               KVM_MMU_WARN_ON(dbit == shadow_dirty_mask &&
                                spte_ad_need_write_protect(iter.old_spte));
 
                if (!(iter.old_spte & dbit))
@@ -1570,8 +1581,8 @@ bool kvm_tdp_mmu_clear_dirty_slot(struct kvm *kvm,
 static void clear_dirty_pt_masked(struct kvm *kvm, struct kvm_mmu_page *root,
                                  gfn_t gfn, unsigned long mask, bool wrprot)
 {
-       u64 dbit = (wrprot || !kvm_ad_enabled()) ? PT_WRITABLE_MASK :
-                                                  shadow_dirty_mask;
+       const u64 dbit = (wrprot || tdp_mmu_need_write_protect(root)) ? PT_WRITABLE_MASK :
+                                                                       shadow_dirty_mask;
        struct tdp_iter iter;
 
        lockdep_assert_held_write(&kvm->mmu_lock);
@@ -1583,7 +1594,7 @@ static void clear_dirty_pt_masked(struct kvm *kvm, struct kvm_mmu_page *root,
                if (!mask)
                        break;
 
-               KVM_MMU_WARN_ON(kvm_ad_enabled() &&
+               KVM_MMU_WARN_ON(dbit == shadow_dirty_mask &&
                                spte_ad_need_write_protect(iter.old_spte));
 
                if (iter.level > PG_LEVEL_4K ||
index 88a4ff200d04bf2ae6c9c2953608c3d34b2ac592..784f2ecca5d2c3204aef773e601815f92501e0e7 100644 (file)
@@ -7857,8 +7857,28 @@ static u64 vmx_get_perf_capabilities(void)
 
        if (vmx_pebs_supported()) {
                perf_cap |= host_perf_cap & PERF_CAP_PEBS_MASK;
-               if ((perf_cap & PERF_CAP_PEBS_FORMAT) < 4)
-                       perf_cap &= ~PERF_CAP_PEBS_BASELINE;
+
+               /*
+                * Disallow adaptive PEBS as it is functionally broken, can be
+                * used by the guest to read *host* LBRs, and can be used to
+                * bypass userspace event filters.  To correctly and safely
+                * support adaptive PEBS, KVM needs to:
+                *
+                * 1. Account for the ADAPTIVE flag when (re)programming fixed
+                *    counters.
+                *
+                * 2. Gain support from perf (or take direct control of counter
+                *    programming) to support events without adaptive PEBS
+                *    enabled for the hardware counter.
+                *
+                * 3. Ensure LBR MSRs cannot hold host data on VM-Entry with
+                *    adaptive PEBS enabled and MSR_PEBS_DATA_CFG.LBRS=1.
+                *
+                * 4. Document which PMU events are effectively exposed to the
+                *    guest via adaptive PEBS, and make adaptive PEBS mutually
+                *    exclusive with KVM_SET_PMU_EVENT_FILTER if necessary.
+                */
+               perf_cap &= ~PERF_CAP_PEBS_BASELINE;
        }
 
        return perf_cap;
index 8e4e48840290e0f0b62ef1535a33535a515fe769..c84927216fad4ea306118b764d67d7bda5136273 100644 (file)
@@ -3422,7 +3422,7 @@ static bool is_mci_status_msr(u32 msr)
 static bool can_set_mci_status(struct kvm_vcpu *vcpu)
 {
        /* McStatusWrEn enabled? */
-       if (guest_cpuid_is_amd_or_hygon(vcpu))
+       if (guest_cpuid_is_amd_compatible(vcpu))
                return !!(vcpu->arch.msr_hwcr & BIT_ULL(18));
 
        return false;
index 678807bcd0034c6f2fbcff234396bb5f23334fa6..2b0f97651a0a7fb19dfa4de56fe3b9eda6b23d56 100644 (file)
@@ -639,6 +639,14 @@ static void blkdev_flush_mapping(struct block_device *bdev)
        bdev_write_inode(bdev);
 }
 
+static void blkdev_put_whole(struct block_device *bdev)
+{
+       if (atomic_dec_and_test(&bdev->bd_openers))
+               blkdev_flush_mapping(bdev);
+       if (bdev->bd_disk->fops->release)
+               bdev->bd_disk->fops->release(bdev->bd_disk);
+}
+
 static int blkdev_get_whole(struct block_device *bdev, blk_mode_t mode)
 {
        struct gendisk *disk = bdev->bd_disk;
@@ -657,20 +665,21 @@ static int blkdev_get_whole(struct block_device *bdev, blk_mode_t mode)
 
        if (!atomic_read(&bdev->bd_openers))
                set_init_blocksize(bdev);
-       if (test_bit(GD_NEED_PART_SCAN, &disk->state))
-               bdev_disk_changed(disk, false);
        atomic_inc(&bdev->bd_openers);
+       if (test_bit(GD_NEED_PART_SCAN, &disk->state)) {
+               /*
+                * Only return scanning errors if we are called from contexts
+                * that explicitly want them, e.g. the BLKRRPART ioctl.
+                */
+               ret = bdev_disk_changed(disk, false);
+               if (ret && (mode & BLK_OPEN_STRICT_SCAN)) {
+                       blkdev_put_whole(bdev);
+                       return ret;
+               }
+       }
        return 0;
 }
 
-static void blkdev_put_whole(struct block_device *bdev)
-{
-       if (atomic_dec_and_test(&bdev->bd_openers))
-               blkdev_flush_mapping(bdev);
-       if (bdev->bd_disk->fops->release)
-               bdev->bd_disk->fops->release(bdev->bd_disk);
-}
-
 static int blkdev_get_part(struct block_device *part, blk_mode_t mode)
 {
        struct gendisk *disk = part->bd_disk;
index 438f79c564cfc05d6f525550417eeee93c7b82bb..5f8c988239c68fd2feb6adba16c699925eb4d01f 100644 (file)
@@ -556,7 +556,8 @@ static int blkdev_common_ioctl(struct block_device *bdev, blk_mode_t mode,
                        return -EACCES;
                if (bdev_is_partition(bdev))
                        return -EINVAL;
-               return disk_scan_partitions(bdev->bd_disk, mode);
+               return disk_scan_partitions(bdev->bd_disk,
+                               mode | BLK_OPEN_STRICT_SCAN);
        case BLKTRACESTART:
        case BLKTRACESTOP:
        case BLKTRACETEARDOWN:
index 1fbc9b921c4fccbff6bb64981ce678efd1841890..736c2eb8c0f37d58529ea500c33b8167ad9d248e 100644 (file)
@@ -574,7 +574,7 @@ static u_long get_word(struct vc_data *vc)
        }
        attr_ch = get_char(vc, (u_short *)tmp_pos, &spk_attr);
        buf[cnt++] = attr_ch;
-       while (tmpx < vc->vc_cols - 1) {
+       while (tmpx < vc->vc_cols - 1 && cnt < sizeof(buf) - 1) {
                tmp_pos += 2;
                tmpx++;
                ch = get_char(vc, (u_short *)tmp_pos, &temp);
index eca24f41556df04ac61747e05aace9622fbcc580..d6f14c8e20be3b8ccb8000192237735ba714c59e 100644 (file)
@@ -1708,8 +1708,10 @@ static size_t binder_get_object(struct binder_proc *proc,
        size_t object_size = 0;
 
        read_size = min_t(size_t, sizeof(*object), buffer->data_size - offset);
-       if (offset > buffer->data_size || read_size < sizeof(*hdr))
+       if (offset > buffer->data_size || read_size < sizeof(*hdr) ||
+           !IS_ALIGNED(offset, sizeof(u32)))
                return 0;
+
        if (u) {
                if (copy_from_user(object, u + offset, read_size))
                        return 0;
index 456be28ba67cb476846c83c532e7bd04e521463f..2597cb43f43871dc0dc629c13b0b0ee3acf1398a 100644 (file)
@@ -702,7 +702,7 @@ static void extract_entropy(void *buf, size_t len)
 
 static void __cold _credit_init_bits(size_t bits)
 {
-       static struct execute_work set_ready;
+       static DECLARE_WORK(set_ready, crng_set_ready);
        unsigned int new, orig, add;
        unsigned long flags;
 
@@ -718,8 +718,8 @@ static void __cold _credit_init_bits(size_t bits)
 
        if (orig < POOL_READY_BITS && new >= POOL_READY_BITS) {
                crng_reseed(NULL); /* Sets crng_init to CRNG_READY under base_crng.lock. */
-               if (static_key_initialized)
-                       execute_in_process_context(crng_set_ready, &set_ready);
+               if (static_key_initialized && system_unbound_wq)
+                       queue_work(system_unbound_wq, &set_ready);
                atomic_notifier_call_chain(&random_ready_notifier, 0, NULL);
                wake_up_interruptible(&crng_init_wait);
                kill_fasync(&fasync, SIGIO, POLL_IN);
@@ -890,8 +890,8 @@ void __init random_init(void)
 
        /*
         * If we were initialized by the cpu or bootloader before jump labels
-        * are initialized, then we should enable the static branch here, where
-        * it's guaranteed that jump labels have been initialized.
+        * or workqueues are initialized, then we should enable the static
+        * branch here, where it's guaranteed that these have been initialized.
         */
        if (!static_branch_likely(&crng_is_ready) && crng_init >= CRNG_READY)
                crng_set_ready(NULL);
index 20c4b28fed0616fe52dd533c21c91ca031266600..cf1fc0edfdbcae03584cb5194ce102fbc79a5bee 100644 (file)
@@ -37,6 +37,10 @@ static HLIST_HEAD(clk_root_list);
 static HLIST_HEAD(clk_orphan_list);
 static LIST_HEAD(clk_notifier_list);
 
+/* List of registered clks that use runtime PM */
+static HLIST_HEAD(clk_rpm_list);
+static DEFINE_MUTEX(clk_rpm_list_lock);
+
 static const struct hlist_head *all_lists[] = {
        &clk_root_list,
        &clk_orphan_list,
@@ -59,6 +63,7 @@ struct clk_core {
        struct clk_hw           *hw;
        struct module           *owner;
        struct device           *dev;
+       struct hlist_node       rpm_node;
        struct device_node      *of_node;
        struct clk_core         *parent;
        struct clk_parent_map   *parents;
@@ -122,6 +127,89 @@ static void clk_pm_runtime_put(struct clk_core *core)
        pm_runtime_put_sync(core->dev);
 }
 
+/**
+ * clk_pm_runtime_get_all() - Runtime "get" all clk provider devices
+ *
+ * Call clk_pm_runtime_get() on all runtime PM enabled clks in the clk tree so
+ * that disabling unused clks avoids a deadlock where a device is runtime PM
+ * resuming/suspending and the runtime PM callback is trying to grab the
+ * prepare_lock for something like clk_prepare_enable() while
+ * clk_disable_unused_subtree() holds the prepare_lock and is trying to runtime
+ * PM resume/suspend the device as well.
+ *
+ * Context: Acquires the 'clk_rpm_list_lock' and returns with the lock held on
+ * success. Otherwise the lock is released on failure.
+ *
+ * Return: 0 on success, negative errno otherwise.
+ */
+static int clk_pm_runtime_get_all(void)
+{
+       int ret;
+       struct clk_core *core, *failed;
+
+       /*
+        * Grab the list lock to prevent any new clks from being registered
+        * or unregistered until clk_pm_runtime_put_all().
+        */
+       mutex_lock(&clk_rpm_list_lock);
+
+       /*
+        * Runtime PM "get" all the devices that are needed for the clks
+        * currently registered. Do this without holding the prepare_lock, to
+        * avoid the deadlock.
+        */
+       hlist_for_each_entry(core, &clk_rpm_list, rpm_node) {
+               ret = clk_pm_runtime_get(core);
+               if (ret) {
+                       failed = core;
+                       pr_err("clk: Failed to runtime PM get '%s' for clk '%s'\n",
+                              dev_name(failed->dev), failed->name);
+                       goto err;
+               }
+       }
+
+       return 0;
+
+err:
+       hlist_for_each_entry(core, &clk_rpm_list, rpm_node) {
+               if (core == failed)
+                       break;
+
+               clk_pm_runtime_put(core);
+       }
+       mutex_unlock(&clk_rpm_list_lock);
+
+       return ret;
+}
+
+/**
+ * clk_pm_runtime_put_all() - Runtime "put" all clk provider devices
+ *
+ * Put the runtime PM references taken in clk_pm_runtime_get_all() and release
+ * the 'clk_rpm_list_lock'.
+ */
+static void clk_pm_runtime_put_all(void)
+{
+       struct clk_core *core;
+
+       hlist_for_each_entry(core, &clk_rpm_list, rpm_node)
+               clk_pm_runtime_put(core);
+       mutex_unlock(&clk_rpm_list_lock);
+}
+
+static void clk_pm_runtime_init(struct clk_core *core)
+{
+       struct device *dev = core->dev;
+
+       if (dev && pm_runtime_enabled(dev)) {
+               core->rpm_enabled = true;
+
+               mutex_lock(&clk_rpm_list_lock);
+               hlist_add_head(&core->rpm_node, &clk_rpm_list);
+               mutex_unlock(&clk_rpm_list_lock);
+       }
+}
+
 /***           locking             ***/
 static void clk_prepare_lock(void)
 {
@@ -1362,9 +1450,6 @@ static void __init clk_unprepare_unused_subtree(struct clk_core *core)
        if (core->flags & CLK_IGNORE_UNUSED)
                return;
 
-       if (clk_pm_runtime_get(core))
-               return;
-
        if (clk_core_is_prepared(core)) {
                trace_clk_unprepare(core);
                if (core->ops->unprepare_unused)
@@ -1373,8 +1458,6 @@ static void __init clk_unprepare_unused_subtree(struct clk_core *core)
                        core->ops->unprepare(core->hw);
                trace_clk_unprepare_complete(core);
        }
-
-       clk_pm_runtime_put(core);
 }
 
 static void __init clk_disable_unused_subtree(struct clk_core *core)
@@ -1390,9 +1473,6 @@ static void __init clk_disable_unused_subtree(struct clk_core *core)
        if (core->flags & CLK_OPS_PARENT_ENABLE)
                clk_core_prepare_enable(core->parent);
 
-       if (clk_pm_runtime_get(core))
-               goto unprepare_out;
-
        flags = clk_enable_lock();
 
        if (core->enable_count)
@@ -1417,8 +1497,6 @@ static void __init clk_disable_unused_subtree(struct clk_core *core)
 
 unlock_out:
        clk_enable_unlock(flags);
-       clk_pm_runtime_put(core);
-unprepare_out:
        if (core->flags & CLK_OPS_PARENT_ENABLE)
                clk_core_disable_unprepare(core->parent);
 }
@@ -1434,6 +1512,7 @@ __setup("clk_ignore_unused", clk_ignore_unused_setup);
 static int __init clk_disable_unused(void)
 {
        struct clk_core *core;
+       int ret;
 
        if (clk_ignore_unused) {
                pr_warn("clk: Not disabling unused clocks\n");
@@ -1442,6 +1521,13 @@ static int __init clk_disable_unused(void)
 
        pr_info("clk: Disabling unused clocks\n");
 
+       ret = clk_pm_runtime_get_all();
+       if (ret)
+               return ret;
+       /*
+        * Grab the prepare lock to keep the clk topology stable while iterating
+        * over clks.
+        */
        clk_prepare_lock();
 
        hlist_for_each_entry(core, &clk_root_list, child_node)
@@ -1458,6 +1544,8 @@ static int __init clk_disable_unused(void)
 
        clk_prepare_unlock();
 
+       clk_pm_runtime_put_all();
+
        return 0;
 }
 late_initcall_sync(clk_disable_unused);
@@ -3233,9 +3321,7 @@ static void clk_summary_show_subtree(struct seq_file *s, struct clk_core *c,
 {
        struct clk_core *child;
 
-       clk_pm_runtime_get(c);
        clk_summary_show_one(s, c, level);
-       clk_pm_runtime_put(c);
 
        hlist_for_each_entry(child, &c->children, child_node)
                clk_summary_show_subtree(s, child, level + 1);
@@ -3245,11 +3331,15 @@ static int clk_summary_show(struct seq_file *s, void *data)
 {
        struct clk_core *c;
        struct hlist_head **lists = s->private;
+       int ret;
 
        seq_puts(s, "                                 enable  prepare  protect                                duty  hardware                            connection\n");
        seq_puts(s, "   clock                          count    count    count        rate   accuracy phase  cycle    enable   consumer                         id\n");
        seq_puts(s, "---------------------------------------------------------------------------------------------------------------------------------------------\n");
 
+       ret = clk_pm_runtime_get_all();
+       if (ret)
+               return ret;
 
        clk_prepare_lock();
 
@@ -3258,6 +3348,7 @@ static int clk_summary_show(struct seq_file *s, void *data)
                        clk_summary_show_subtree(s, c, 0);
 
        clk_prepare_unlock();
+       clk_pm_runtime_put_all();
 
        return 0;
 }
@@ -3305,8 +3396,14 @@ static int clk_dump_show(struct seq_file *s, void *data)
        struct clk_core *c;
        bool first_node = true;
        struct hlist_head **lists = s->private;
+       int ret;
+
+       ret = clk_pm_runtime_get_all();
+       if (ret)
+               return ret;
 
        seq_putc(s, '{');
+
        clk_prepare_lock();
 
        for (; *lists; lists++) {
@@ -3319,6 +3416,7 @@ static int clk_dump_show(struct seq_file *s, void *data)
        }
 
        clk_prepare_unlock();
+       clk_pm_runtime_put_all();
 
        seq_puts(s, "}\n");
        return 0;
@@ -3962,8 +4060,6 @@ static int __clk_core_init(struct clk_core *core)
        }
 
        clk_core_reparent_orphans_nolock();
-
-       kref_init(&core->ref);
 out:
        clk_pm_runtime_put(core);
 unlock:
@@ -4192,6 +4288,22 @@ static void clk_core_free_parent_map(struct clk_core *core)
        kfree(core->parents);
 }
 
+/* Free memory allocated for a struct clk_core */
+static void __clk_release(struct kref *ref)
+{
+       struct clk_core *core = container_of(ref, struct clk_core, ref);
+
+       if (core->rpm_enabled) {
+               mutex_lock(&clk_rpm_list_lock);
+               hlist_del(&core->rpm_node);
+               mutex_unlock(&clk_rpm_list_lock);
+       }
+
+       clk_core_free_parent_map(core);
+       kfree_const(core->name);
+       kfree(core);
+}
+
 static struct clk *
 __clk_register(struct device *dev, struct device_node *np, struct clk_hw *hw)
 {
@@ -4212,6 +4324,8 @@ __clk_register(struct device *dev, struct device_node *np, struct clk_hw *hw)
                goto fail_out;
        }
 
+       kref_init(&core->ref);
+
        core->name = kstrdup_const(init->name, GFP_KERNEL);
        if (!core->name) {
                ret = -ENOMEM;
@@ -4224,9 +4338,8 @@ __clk_register(struct device *dev, struct device_node *np, struct clk_hw *hw)
        }
        core->ops = init->ops;
 
-       if (dev && pm_runtime_enabled(dev))
-               core->rpm_enabled = true;
        core->dev = dev;
+       clk_pm_runtime_init(core);
        core->of_node = np;
        if (dev && dev->driver)
                core->owner = dev->driver->owner;
@@ -4266,12 +4379,10 @@ __clk_register(struct device *dev, struct device_node *np, struct clk_hw *hw)
        hw->clk = NULL;
 
 fail_create_clk:
-       clk_core_free_parent_map(core);
 fail_parents:
 fail_ops:
-       kfree_const(core->name);
 fail_name:
-       kfree(core);
+       kref_put(&core->ref, __clk_release);
 fail_out:
        return ERR_PTR(ret);
 }
@@ -4351,18 +4462,6 @@ int of_clk_hw_register(struct device_node *node, struct clk_hw *hw)
 }
 EXPORT_SYMBOL_GPL(of_clk_hw_register);
 
-/* Free memory allocated for a clock. */
-static void __clk_release(struct kref *ref)
-{
-       struct clk_core *core = container_of(ref, struct clk_core, ref);
-
-       lockdep_assert_held(&prepare_lock);
-
-       clk_core_free_parent_map(core);
-       kfree_const(core->name);
-       kfree(core);
-}
-
 /*
  * Empty clk_ops for unregistered clocks. These are used temporarily
  * after clk_unregister() was called on a clock and until last clock
index 8011ef278bea3ecc7e2afdb2baf155e1033315e9..df02997c6b7c940a45317e2b0d7767e6fb7ac327 100644 (file)
@@ -152,7 +152,7 @@ static const struct mtk_gate infra_clks[] = {
        GATE_INFRA0(CLK_INFRA_PCIE_PERI_26M_CK_P1, "infra_pcie_peri_ck_26m_ck_p1",
                    "csw_infra_f26m_sel", 8),
        GATE_INFRA0(CLK_INFRA_PCIE_PERI_26M_CK_P2, "infra_pcie_peri_ck_26m_ck_p2",
-                   "csw_infra_f26m_sel", 9),
+                   "infra_pcie_peri_ck_26m_ck_p3", 9),
        GATE_INFRA0(CLK_INFRA_PCIE_PERI_26M_CK_P3, "infra_pcie_peri_ck_26m_ck_p3",
                    "csw_infra_f26m_sel", 10),
        /* INFRA1 */
index 2e55368dc4d82095b5baceb7b744a9ed4b1350b4..bd37ab4d1a9bb3252ae54a2f3a3d3d241b1daa3a 100644 (file)
@@ -13,6 +13,7 @@
 #include <linux/of.h>
 #include <linux/of_address.h>
 #include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
 #include <linux/slab.h>
 
 #include "clk-mtk.h"
@@ -494,6 +495,16 @@ static int __mtk_clk_simple_probe(struct platform_device *pdev,
                        return IS_ERR(base) ? PTR_ERR(base) : -ENOMEM;
        }
 
+
+       devm_pm_runtime_enable(&pdev->dev);
+       /*
+        * Do a pm_runtime_resume_and_get() to workaround a possible
+        * deadlock between clk_register() and the genpd framework.
+        */
+       r = pm_runtime_resume_and_get(&pdev->dev);
+       if (r)
+               return r;
+
        /* Calculate how many clk_hw_onecell_data entries to allocate */
        num_clks = mcd->num_clks + mcd->num_composite_clks;
        num_clks += mcd->num_fixed_clks + mcd->num_factor_clks;
@@ -574,6 +585,8 @@ static int __mtk_clk_simple_probe(struct platform_device *pdev,
                        goto unregister_clks;
        }
 
+       pm_runtime_put(&pdev->dev);
+
        return r;
 
 unregister_clks:
@@ -604,6 +617,8 @@ static int __mtk_clk_simple_probe(struct platform_device *pdev,
 free_base:
        if (mcd->shared_io && base)
                iounmap(base);
+
+       pm_runtime_put(&pdev->dev);
        return r;
 }
 
index 4536ed43f65b2763ec4612a000eb4dae70875c17..84dce5184a77ae7903035c8905dbff30cea390ca 100644 (file)
@@ -641,33 +641,22 @@ static int vmk80xx_find_usb_endpoints(struct comedi_device *dev)
        struct vmk80xx_private *devpriv = dev->private;
        struct usb_interface *intf = comedi_to_usb_interface(dev);
        struct usb_host_interface *iface_desc = intf->cur_altsetting;
-       struct usb_endpoint_descriptor *ep_desc;
-       int i;
-
-       if (iface_desc->desc.bNumEndpoints != 2)
-               return -ENODEV;
-
-       for (i = 0; i < iface_desc->desc.bNumEndpoints; i++) {
-               ep_desc = &iface_desc->endpoint[i].desc;
-
-               if (usb_endpoint_is_int_in(ep_desc) ||
-                   usb_endpoint_is_bulk_in(ep_desc)) {
-                       if (!devpriv->ep_rx)
-                               devpriv->ep_rx = ep_desc;
-                       continue;
-               }
+       struct usb_endpoint_descriptor *ep_rx_desc, *ep_tx_desc;
+       int ret;
 
-               if (usb_endpoint_is_int_out(ep_desc) ||
-                   usb_endpoint_is_bulk_out(ep_desc)) {
-                       if (!devpriv->ep_tx)
-                               devpriv->ep_tx = ep_desc;
-                       continue;
-               }
-       }
+       if (devpriv->model == VMK8061_MODEL)
+               ret = usb_find_common_endpoints(iface_desc, &ep_rx_desc,
+                                               &ep_tx_desc, NULL, NULL);
+       else
+               ret = usb_find_common_endpoints(iface_desc, NULL, NULL,
+                                               &ep_rx_desc, &ep_tx_desc);
 
-       if (!devpriv->ep_rx || !devpriv->ep_tx)
+       if (ret)
                return -ENODEV;
 
+       devpriv->ep_rx = ep_rx_desc;
+       devpriv->ep_tx = ep_tx_desc;
+
        if (!usb_endpoint_maxp(devpriv->ep_rx) || !usb_endpoint_maxp(devpriv->ep_tx))
                return -EINVAL;
 
index b0ed10f4de609d97335c61134c7d6421339cb5b9..a5ceec7820cfab565d0a6eef0cb7f6a4204f7d10 100644 (file)
@@ -562,7 +562,6 @@ static int amdgpu_ttm_io_mem_reserve(struct ttm_device *bdev,
                                     struct ttm_resource *mem)
 {
        struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
-       size_t bus_size = (size_t)mem->size;
 
        switch (mem->mem_type) {
        case TTM_PL_SYSTEM:
@@ -573,9 +572,6 @@ static int amdgpu_ttm_io_mem_reserve(struct ttm_device *bdev,
                break;
        case TTM_PL_VRAM:
                mem->bus.offset = mem->start << PAGE_SHIFT;
-               /* check if it's visible */
-               if ((mem->bus.offset + bus_size) > adev->gmc.visible_vram_size)
-                       return -EINVAL;
 
                if (adev->mman.aper_base_kaddr &&
                    mem->placement & TTM_PL_FLAG_CONTIGUOUS)
index b8fcb6c55698934549c6696a337e749f0e268217..5b50f7359199d4851ecace9c5dd8e0151a264ff8 100644 (file)
@@ -1559,6 +1559,37 @@ static void amdgpu_vm_bo_insert_map(struct amdgpu_device *adev,
        trace_amdgpu_vm_bo_map(bo_va, mapping);
 }
 
+/* Validate operation parameters to prevent potential abuse */
+static int amdgpu_vm_verify_parameters(struct amdgpu_device *adev,
+                                         struct amdgpu_bo *bo,
+                                         uint64_t saddr,
+                                         uint64_t offset,
+                                         uint64_t size)
+{
+       uint64_t tmp, lpfn;
+
+       if (saddr & AMDGPU_GPU_PAGE_MASK
+           || offset & AMDGPU_GPU_PAGE_MASK
+           || size & AMDGPU_GPU_PAGE_MASK)
+               return -EINVAL;
+
+       if (check_add_overflow(saddr, size, &tmp)
+           || check_add_overflow(offset, size, &tmp)
+           || size == 0 /* which also leads to end < begin */)
+               return -EINVAL;
+
+       /* make sure object fit at this offset */
+       if (bo && offset + size > amdgpu_bo_size(bo))
+               return -EINVAL;
+
+       /* Ensure last pfn not exceed max_pfn */
+       lpfn = (saddr + size - 1) >> AMDGPU_GPU_PAGE_SHIFT;
+       if (lpfn >= adev->vm_manager.max_pfn)
+               return -EINVAL;
+
+       return 0;
+}
+
 /**
  * amdgpu_vm_bo_map - map bo inside a vm
  *
@@ -1585,21 +1616,14 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
        struct amdgpu_bo *bo = bo_va->base.bo;
        struct amdgpu_vm *vm = bo_va->base.vm;
        uint64_t eaddr;
+       int r;
 
-       /* validate the parameters */
-       if (saddr & ~PAGE_MASK || offset & ~PAGE_MASK || size & ~PAGE_MASK)
-               return -EINVAL;
-       if (saddr + size <= saddr || offset + size <= offset)
-               return -EINVAL;
-
-       /* make sure object fit at this offset */
-       eaddr = saddr + size - 1;
-       if ((bo && offset + size > amdgpu_bo_size(bo)) ||
-           (eaddr >= adev->vm_manager.max_pfn << AMDGPU_GPU_PAGE_SHIFT))
-               return -EINVAL;
+       r = amdgpu_vm_verify_parameters(adev, bo, saddr, offset, size);
+       if (r)
+               return r;
 
        saddr /= AMDGPU_GPU_PAGE_SIZE;
-       eaddr /= AMDGPU_GPU_PAGE_SIZE;
+       eaddr = saddr + (size - 1) / AMDGPU_GPU_PAGE_SIZE;
 
        tmp = amdgpu_vm_it_iter_first(&vm->va, saddr, eaddr);
        if (tmp) {
@@ -1652,17 +1676,9 @@ int amdgpu_vm_bo_replace_map(struct amdgpu_device *adev,
        uint64_t eaddr;
        int r;
 
-       /* validate the parameters */
-       if (saddr & ~PAGE_MASK || offset & ~PAGE_MASK || size & ~PAGE_MASK)
-               return -EINVAL;
-       if (saddr + size <= saddr || offset + size <= offset)
-               return -EINVAL;
-
-       /* make sure object fit at this offset */
-       eaddr = saddr + size - 1;
-       if ((bo && offset + size > amdgpu_bo_size(bo)) ||
-           (eaddr >= adev->vm_manager.max_pfn << AMDGPU_GPU_PAGE_SHIFT))
-               return -EINVAL;
+       r = amdgpu_vm_verify_parameters(adev, bo, saddr, offset, size);
+       if (r)
+               return r;
 
        /* Allocate all the needed memory */
        mapping = kmalloc(sizeof(*mapping), GFP_KERNEL);
@@ -1676,7 +1692,7 @@ int amdgpu_vm_bo_replace_map(struct amdgpu_device *adev,
        }
 
        saddr /= AMDGPU_GPU_PAGE_SIZE;
-       eaddr /= AMDGPU_GPU_PAGE_SIZE;
+       eaddr = saddr + (size - 1) / AMDGPU_GPU_PAGE_SIZE;
 
        mapping->start = saddr;
        mapping->last = eaddr;
@@ -1763,10 +1779,14 @@ int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev,
        struct amdgpu_bo_va_mapping *before, *after, *tmp, *next;
        LIST_HEAD(removed);
        uint64_t eaddr;
+       int r;
+
+       r = amdgpu_vm_verify_parameters(adev, NULL, saddr, 0, size);
+       if (r)
+               return r;
 
-       eaddr = saddr + size - 1;
        saddr /= AMDGPU_GPU_PAGE_SIZE;
-       eaddr /= AMDGPU_GPU_PAGE_SIZE;
+       eaddr = saddr + (size - 1) / AMDGPU_GPU_PAGE_SIZE;
 
        /* Allocate all the needed memory */
        before = kzalloc(sizeof(*before), GFP_KERNEL);
index 717a60d7a4ea953b8dfc369b09d855ad74b49659..b79986412cd839bc89741a0b3bc1986daa2b10e4 100644 (file)
@@ -819,9 +819,9 @@ struct kfd_process *kfd_create_process(struct task_struct *thread)
        mutex_lock(&kfd_processes_mutex);
 
        if (kfd_is_locked()) {
-               mutex_unlock(&kfd_processes_mutex);
                pr_debug("KFD is locked! Cannot create process");
-               return ERR_PTR(-EINVAL);
+               process = ERR_PTR(-EINVAL);
+               goto out;
        }
 
        /* A prior open of /dev/kfd could have already created the process. */
index 6e36a15284537e1019671a852ba72033e4a4d651..7ba30d26f620c251ee46cc1cdd44d57574388e1c 100644 (file)
@@ -2512,7 +2512,8 @@ intel_set_cdclk_pre_plane_update(struct intel_atomic_state *state)
                intel_atomic_get_old_cdclk_state(state);
        const struct intel_cdclk_state *new_cdclk_state =
                intel_atomic_get_new_cdclk_state(state);
-       enum pipe pipe = new_cdclk_state->pipe;
+       struct intel_cdclk_config cdclk_config;
+       enum pipe pipe;
 
        if (!intel_cdclk_changed(&old_cdclk_state->actual,
                                 &new_cdclk_state->actual))
@@ -2521,12 +2522,25 @@ intel_set_cdclk_pre_plane_update(struct intel_atomic_state *state)
        if (IS_DG2(i915))
                intel_cdclk_pcode_pre_notify(state);
 
-       if (new_cdclk_state->disable_pipes ||
-           old_cdclk_state->actual.cdclk <= new_cdclk_state->actual.cdclk) {
-               drm_WARN_ON(&i915->drm, !new_cdclk_state->base.changed);
+       if (new_cdclk_state->disable_pipes) {
+               cdclk_config = new_cdclk_state->actual;
+               pipe = INVALID_PIPE;
+       } else {
+               if (new_cdclk_state->actual.cdclk >= old_cdclk_state->actual.cdclk) {
+                       cdclk_config = new_cdclk_state->actual;
+                       pipe = new_cdclk_state->pipe;
+               } else {
+                       cdclk_config = old_cdclk_state->actual;
+                       pipe = INVALID_PIPE;
+               }
 
-               intel_set_cdclk(i915, &new_cdclk_state->actual, pipe);
+               cdclk_config.voltage_level = max(new_cdclk_state->actual.voltage_level,
+                                                old_cdclk_state->actual.voltage_level);
        }
+
+       drm_WARN_ON(&i915->drm, !new_cdclk_state->base.changed);
+
+       intel_set_cdclk(i915, &cdclk_config, pipe);
 }
 
 /**
@@ -2544,7 +2558,7 @@ intel_set_cdclk_post_plane_update(struct intel_atomic_state *state)
                intel_atomic_get_old_cdclk_state(state);
        const struct intel_cdclk_state *new_cdclk_state =
                intel_atomic_get_new_cdclk_state(state);
-       enum pipe pipe = new_cdclk_state->pipe;
+       enum pipe pipe;
 
        if (!intel_cdclk_changed(&old_cdclk_state->actual,
                                 &new_cdclk_state->actual))
@@ -2554,11 +2568,14 @@ intel_set_cdclk_post_plane_update(struct intel_atomic_state *state)
                intel_cdclk_pcode_post_notify(state);
 
        if (!new_cdclk_state->disable_pipes &&
-           old_cdclk_state->actual.cdclk > new_cdclk_state->actual.cdclk) {
-               drm_WARN_ON(&i915->drm, !new_cdclk_state->base.changed);
+           new_cdclk_state->actual.cdclk < old_cdclk_state->actual.cdclk)
+               pipe = new_cdclk_state->pipe;
+       else
+               pipe = INVALID_PIPE;
 
-               intel_set_cdclk(i915, &new_cdclk_state->actual, pipe);
-       }
+       drm_WARN_ON(&i915->drm, !new_cdclk_state->base.changed);
+
+       intel_set_cdclk(i915, &new_cdclk_state->actual, pipe);
 }
 
 static int intel_pixel_rate_to_cdclk(const struct intel_crtc_state *crtc_state)
index 479effcf607e261fac73361958a0a855cf90d315..79cfab53f80e259093b7ae0f04310f6470a3c930 100644 (file)
@@ -23,6 +23,7 @@
  */
 
 #include "nouveau_drv.h"
+#include "nouveau_bios.h"
 #include "nouveau_reg.h"
 #include "dispnv04/hw.h"
 #include "nouveau_encoder.h"
@@ -1677,7 +1678,7 @@ apply_dcb_encoder_quirks(struct drm_device *dev, int idx, u32 *conn, u32 *conf)
         */
        if (nv_match_device(dev, 0x0201, 0x1462, 0x8851)) {
                if (*conn == 0xf2005014 && *conf == 0xffffffff) {
-                       fabricate_dcb_output(dcb, DCB_OUTPUT_TMDS, 1, 1, 1);
+                       fabricate_dcb_output(dcb, DCB_OUTPUT_TMDS, 1, 1, DCB_OUTPUT_B);
                        return false;
                }
        }
@@ -1763,26 +1764,26 @@ fabricate_dcb_encoder_table(struct drm_device *dev, struct nvbios *bios)
 #ifdef __powerpc__
        /* Apple iMac G4 NV17 */
        if (of_machine_is_compatible("PowerMac4,5")) {
-               fabricate_dcb_output(dcb, DCB_OUTPUT_TMDS, 0, all_heads, 1);
-               fabricate_dcb_output(dcb, DCB_OUTPUT_ANALOG, 1, all_heads, 2);
+               fabricate_dcb_output(dcb, DCB_OUTPUT_TMDS, 0, all_heads, DCB_OUTPUT_B);
+               fabricate_dcb_output(dcb, DCB_OUTPUT_ANALOG, 1, all_heads, DCB_OUTPUT_C);
                return;
        }
 #endif
 
        /* Make up some sane defaults */
        fabricate_dcb_output(dcb, DCB_OUTPUT_ANALOG,
-                            bios->legacy.i2c_indices.crt, 1, 1);
+                            bios->legacy.i2c_indices.crt, 1, DCB_OUTPUT_B);
 
        if (nv04_tv_identify(dev, bios->legacy.i2c_indices.tv) >= 0)
                fabricate_dcb_output(dcb, DCB_OUTPUT_TV,
                                     bios->legacy.i2c_indices.tv,
-                                    all_heads, 0);
+                                    all_heads, DCB_OUTPUT_A);
 
        else if (bios->tmds.output0_script_ptr ||
                 bios->tmds.output1_script_ptr)
                fabricate_dcb_output(dcb, DCB_OUTPUT_TMDS,
                                     bios->legacy.i2c_indices.panel,
-                                    all_heads, 1);
+                                    all_heads, DCB_OUTPUT_B);
 }
 
 static int
index a7f3fc342d87e03b031b5008d939c2eb46f49404..dd5b5a17ece0beed225888888d6c01a0afcf67c9 100644 (file)
@@ -222,8 +222,11 @@ nv50_instobj_acquire(struct nvkm_memory *memory)
        void __iomem *map = NULL;
 
        /* Already mapped? */
-       if (refcount_inc_not_zero(&iobj->maps))
+       if (refcount_inc_not_zero(&iobj->maps)) {
+               /* read barrier match the wmb on refcount set */
+               smp_rmb();
                return iobj->map;
+       }
 
        /* Take the lock, and re-check that another thread hasn't
         * already mapped the object in the meantime.
@@ -250,6 +253,8 @@ nv50_instobj_acquire(struct nvkm_memory *memory)
                        iobj->base.memory.ptrs = &nv50_instobj_fast;
                else
                        iobj->base.memory.ptrs = &nv50_instobj_slow;
+               /* barrier to ensure the ptrs are written before refcount is set */
+               smp_wmb();
                refcount_set(&iobj->maps, 1);
        }
 
index 775144695283f54dcb1c527e58c9604cfd6da207..b15ca56a09a74a06f8bfcd0b4053d554ced9b58d 100644 (file)
@@ -253,8 +253,6 @@ static void visionox_rm69299_remove(struct mipi_dsi_device *dsi)
        struct visionox_rm69299 *ctx = mipi_dsi_get_drvdata(dsi);
 
        mipi_dsi_detach(ctx->dsi);
-       mipi_dsi_device_unregister(ctx->dsi);
-
        drm_panel_remove(&ctx->panel);
 }
 
index 3596ea4a8b60f463a231682292bdc570e79b9c33..fc7b0e8f1ca15af385ac15f381a6528372e4878e 100644 (file)
@@ -923,8 +923,12 @@ bool radeon_get_atom_connector_info_from_supported_devices_table(struct
                max_device = ATOM_MAX_SUPPORTED_DEVICE_INFO;
 
        for (i = 0; i < max_device; i++) {
-               ATOM_CONNECTOR_INFO_I2C ci =
-                   supported_devices->info.asConnInfo[i];
+               ATOM_CONNECTOR_INFO_I2C ci;
+
+               if (frev > 1)
+                       ci = supported_devices->info_2d1.asConnInfo[i];
+               else
+                       ci = supported_devices->info.asConnInfo[i];
 
                bios_connectors[i].valid = false;
 
index 112438d965ffbefd4fa2cce5f246cc03a63759f9..6e1fd6985ffcb730eb7057c4509aec971dfa8266 100644 (file)
@@ -288,17 +288,23 @@ static struct ttm_pool_type *ttm_pool_select_type(struct ttm_pool *pool,
                                                  enum ttm_caching caching,
                                                  unsigned int order)
 {
-       if (pool->use_dma_alloc || pool->nid != NUMA_NO_NODE)
+       if (pool->use_dma_alloc)
                return &pool->caching[caching].orders[order];
 
 #ifdef CONFIG_X86
        switch (caching) {
        case ttm_write_combined:
+               if (pool->nid != NUMA_NO_NODE)
+                       return &pool->caching[caching].orders[order];
+
                if (pool->use_dma32)
                        return &global_dma32_write_combined[order];
 
                return &global_write_combined[order];
        case ttm_uncached:
+               if (pool->nid != NUMA_NO_NODE)
+                       return &pool->caching[caching].orders[order];
+
                if (pool->use_dma32)
                        return &global_dma32_uncached[order];
 
@@ -566,11 +572,17 @@ void ttm_pool_init(struct ttm_pool *pool, struct device *dev,
        pool->use_dma_alloc = use_dma_alloc;
        pool->use_dma32 = use_dma32;
 
-       if (use_dma_alloc || nid != NUMA_NO_NODE) {
-               for (i = 0; i < TTM_NUM_CACHING_TYPES; ++i)
-                       for (j = 0; j < NR_PAGE_ORDERS; ++j)
-                               ttm_pool_type_init(&pool->caching[i].orders[j],
-                                                  pool, i, j);
+       for (i = 0; i < TTM_NUM_CACHING_TYPES; ++i) {
+               for (j = 0; j < NR_PAGE_ORDERS; ++j) {
+                       struct ttm_pool_type *pt;
+
+                       /* Initialize only pool types which are actually used */
+                       pt = ttm_pool_select_type(pool, i, j);
+                       if (pt != &pool->caching[i].orders[j])
+                               continue;
+
+                       ttm_pool_type_init(pt, pool, i, j);
+               }
        }
 }
 EXPORT_SYMBOL(ttm_pool_init);
@@ -599,10 +611,16 @@ void ttm_pool_fini(struct ttm_pool *pool)
 {
        unsigned int i, j;
 
-       if (pool->use_dma_alloc || pool->nid != NUMA_NO_NODE) {
-               for (i = 0; i < TTM_NUM_CACHING_TYPES; ++i)
-                       for (j = 0; j < NR_PAGE_ORDERS; ++j)
-                               ttm_pool_type_fini(&pool->caching[i].orders[j]);
+       for (i = 0; i < TTM_NUM_CACHING_TYPES; ++i) {
+               for (j = 0; j < NR_PAGE_ORDERS; ++j) {
+                       struct ttm_pool_type *pt;
+
+                       pt = ttm_pool_select_type(pool, i, j);
+                       if (pt != &pool->caching[i].orders[j])
+                               continue;
+
+                       ttm_pool_type_fini(pt);
+               }
        }
 
        /* We removed the pool types from the LRU, but we need to also make sure
index afc76390a197a03fcfdd7804241eb60c6de1c9e6..b8f9f2a3d2fb3b0f201118b47d4503f11f896fcb 100644 (file)
@@ -105,7 +105,6 @@ v3d_irq(int irq, void *arg)
                struct v3d_file_priv *file = v3d->bin_job->base.file->driver_priv;
                u64 runtime = local_clock() - file->start_ns[V3D_BIN];
 
-               file->enabled_ns[V3D_BIN] += local_clock() - file->start_ns[V3D_BIN];
                file->jobs_sent[V3D_BIN]++;
                v3d->queue[V3D_BIN].jobs_sent++;
 
@@ -126,7 +125,6 @@ v3d_irq(int irq, void *arg)
                struct v3d_file_priv *file = v3d->render_job->base.file->driver_priv;
                u64 runtime = local_clock() - file->start_ns[V3D_RENDER];
 
-               file->enabled_ns[V3D_RENDER] += local_clock() - file->start_ns[V3D_RENDER];
                file->jobs_sent[V3D_RENDER]++;
                v3d->queue[V3D_RENDER].jobs_sent++;
 
@@ -147,7 +145,6 @@ v3d_irq(int irq, void *arg)
                struct v3d_file_priv *file = v3d->csd_job->base.file->driver_priv;
                u64 runtime = local_clock() - file->start_ns[V3D_CSD];
 
-               file->enabled_ns[V3D_CSD] += local_clock() - file->start_ns[V3D_CSD];
                file->jobs_sent[V3D_CSD]++;
                v3d->queue[V3D_CSD].jobs_sent++;
 
@@ -195,7 +192,6 @@ v3d_hub_irq(int irq, void *arg)
                struct v3d_file_priv *file = v3d->tfu_job->base.file->driver_priv;
                u64 runtime = local_clock() - file->start_ns[V3D_TFU];
 
-               file->enabled_ns[V3D_TFU] += local_clock() - file->start_ns[V3D_TFU];
                file->jobs_sent[V3D_TFU]++;
                v3d->queue[V3D_TFU].jobs_sent++;
 
index c52c7bf1485b1fa95b1e9ca3f1e05135167a8c5c..717d624e9a052298d5d5070551e909cc65ee0cc5 100644 (file)
@@ -456,8 +456,10 @@ int vmw_bo_cpu_blit(struct ttm_buffer_object *dst,
                .no_wait_gpu = false
        };
        u32 j, initial_line = dst_offset / dst_stride;
-       struct vmw_bo_blit_line_data d;
+       struct vmw_bo_blit_line_data d = {0};
        int ret = 0;
+       struct page **dst_pages = NULL;
+       struct page **src_pages = NULL;
 
        /* Buffer objects need to be either pinned or reserved: */
        if (!(dst->pin_count))
@@ -477,12 +479,35 @@ int vmw_bo_cpu_blit(struct ttm_buffer_object *dst,
                        return ret;
        }
 
+       if (!src->ttm->pages && src->ttm->sg) {
+               src_pages = kvmalloc_array(src->ttm->num_pages,
+                                          sizeof(struct page *), GFP_KERNEL);
+               if (!src_pages)
+                       return -ENOMEM;
+               ret = drm_prime_sg_to_page_array(src->ttm->sg, src_pages,
+                                                src->ttm->num_pages);
+               if (ret)
+                       goto out;
+       }
+       if (!dst->ttm->pages && dst->ttm->sg) {
+               dst_pages = kvmalloc_array(dst->ttm->num_pages,
+                                          sizeof(struct page *), GFP_KERNEL);
+               if (!dst_pages) {
+                       ret = -ENOMEM;
+                       goto out;
+               }
+               ret = drm_prime_sg_to_page_array(dst->ttm->sg, dst_pages,
+                                                dst->ttm->num_pages);
+               if (ret)
+                       goto out;
+       }
+
        d.mapped_dst = 0;
        d.mapped_src = 0;
        d.dst_addr = NULL;
        d.src_addr = NULL;
-       d.dst_pages = dst->ttm->pages;
-       d.src_pages = src->ttm->pages;
+       d.dst_pages = dst->ttm->pages ? dst->ttm->pages : dst_pages;
+       d.src_pages = src->ttm->pages ? src->ttm->pages : src_pages;
        d.dst_num_pages = PFN_UP(dst->resource->size);
        d.src_num_pages = PFN_UP(src->resource->size);
        d.dst_prot = ttm_io_prot(dst, dst->resource, PAGE_KERNEL);
@@ -504,6 +529,10 @@ int vmw_bo_cpu_blit(struct ttm_buffer_object *dst,
                kunmap_atomic(d.src_addr);
        if (d.dst_addr)
                kunmap_atomic(d.dst_addr);
+       if (src_pages)
+               kvfree(src_pages);
+       if (dst_pages)
+               kvfree(dst_pages);
 
        return ret;
 }
index 2bfac3aad7b7d6327b552b550f656973fd8758eb..4aac88cc5f913cb0b39264f74e1df7c7a8e4c366 100644 (file)
@@ -377,7 +377,8 @@ static int vmw_bo_init(struct vmw_private *dev_priv,
 {
        struct ttm_operation_ctx ctx = {
                .interruptible = params->bo_type != ttm_bo_type_kernel,
-               .no_wait_gpu = false
+               .no_wait_gpu = false,
+               .resv = params->resv,
        };
        struct ttm_device *bdev = &dev_priv->bdev;
        struct drm_device *vdev = &dev_priv->drm;
@@ -394,8 +395,8 @@ static int vmw_bo_init(struct vmw_private *dev_priv,
 
        vmw_bo_placement_set(vmw_bo, params->domain, params->busy_domain);
        ret = ttm_bo_init_reserved(bdev, &vmw_bo->tbo, params->bo_type,
-                                  &vmw_bo->placement, 0, &ctx, NULL,
-                                  NULL, destroy);
+                                  &vmw_bo->placement, 0, &ctx,
+                                  params->sg, params->resv, destroy);
        if (unlikely(ret))
                return ret;
 
index 0d496dc9c6af7a352c0432f50f4dd9be37448b5e..f349642e6190d6933031d08ccd7f353231f0f1da 100644 (file)
@@ -55,6 +55,8 @@ struct vmw_bo_params {
        enum ttm_bo_type bo_type;
        size_t size;
        bool pin;
+       struct dma_resv *resv;
+       struct sg_table *sg;
 };
 
 /**
index 0a304706e01322a6372727a27eb3fd0330471b31..58fb40c93100a84ec8b1dd769f35ab31c00bd0dc 100644 (file)
@@ -1628,6 +1628,7 @@ static const struct drm_driver driver = {
 
        .prime_fd_to_handle = vmw_prime_fd_to_handle,
        .prime_handle_to_fd = vmw_prime_handle_to_fd,
+       .gem_prime_import_sg_table = vmw_prime_import_sg_table,
 
        .fops = &vmwgfx_driver_fops,
        .name = VMWGFX_DRIVER_NAME,
index 3cd5090dedfc5b0571b4f4f1a1a1ac6a0d5b0638..6acc7ad0e9eb8761311a7fa1dc5a193c7c916baa 100644 (file)
@@ -1131,6 +1131,9 @@ extern int vmw_prime_handle_to_fd(struct drm_device *dev,
                                  struct drm_file *file_priv,
                                  uint32_t handle, uint32_t flags,
                                  int *prime_fd);
+struct drm_gem_object *vmw_prime_import_sg_table(struct drm_device *dev,
+                                                struct dma_buf_attachment *attach,
+                                                struct sg_table *table);
 
 /*
  * MemoryOBject management -  vmwgfx_mob.c
index 12787bb9c111d10db997b9db650c6bb1069c26ef..d6bcaf078b1f40bbf75bdfb63fd1e00b7901e20f 100644 (file)
@@ -149,6 +149,38 @@ int vmw_gem_object_create_with_handle(struct vmw_private *dev_priv,
        return ret;
 }
 
+struct drm_gem_object *vmw_prime_import_sg_table(struct drm_device *dev,
+                                                struct dma_buf_attachment *attach,
+                                                struct sg_table *table)
+{
+       int ret;
+       struct vmw_private *dev_priv = vmw_priv(dev);
+       struct drm_gem_object *gem = NULL;
+       struct vmw_bo *vbo;
+       struct vmw_bo_params params = {
+               .domain = (dev_priv->has_mob) ? VMW_BO_DOMAIN_SYS : VMW_BO_DOMAIN_VRAM,
+               .busy_domain = VMW_BO_DOMAIN_SYS,
+               .bo_type = ttm_bo_type_sg,
+               .size = attach->dmabuf->size,
+               .pin = false,
+               .resv = attach->dmabuf->resv,
+               .sg = table,
+
+       };
+
+       dma_resv_lock(params.resv, NULL);
+
+       ret = vmw_bo_create(dev_priv, &params, &vbo);
+       if (ret != 0)
+               goto out_no_bo;
+
+       vbo->tbo.base.funcs = &vmw_gem_object_funcs;
+
+       gem = &vbo->tbo.base;
+out_no_bo:
+       dma_resv_unlock(params.resv);
+       return gem;
+}
 
 int vmw_gem_object_create_ioctl(struct drm_device *dev, void *data,
                                struct drm_file *filp)
index 65be9e4a8992ae9e3859617204352e6885348976..f27afc6e39ee26b5c925d108aef0ff400deba55c 100644 (file)
@@ -932,6 +932,7 @@ int vmw_du_cursor_plane_atomic_check(struct drm_plane *plane,
 int vmw_du_crtc_atomic_check(struct drm_crtc *crtc,
                             struct drm_atomic_state *state)
 {
+       struct vmw_private *vmw = vmw_priv(crtc->dev);
        struct drm_crtc_state *new_state = drm_atomic_get_new_crtc_state(state,
                                                                         crtc);
        struct vmw_display_unit *du = vmw_crtc_to_du(new_state->crtc);
@@ -939,9 +940,13 @@ int vmw_du_crtc_atomic_check(struct drm_crtc *crtc,
        bool has_primary = new_state->plane_mask &
                           drm_plane_mask(crtc->primary);
 
-       /* We always want to have an active plane with an active CRTC */
-       if (has_primary != new_state->enable)
-               return -EINVAL;
+       /*
+        * This is fine in general, but broken userspace might expect
+        * some actual rendering so give a clue as why it's blank.
+        */
+       if (new_state->enable && !has_primary)
+               drm_dbg_driver(&vmw->drm,
+                              "CRTC without a primary plane will be blank.\n");
 
 
        if (new_state->connector_mask != connector_mask &&
index db81e635dc061f758b60d3989462b02bf25ad551..9fda4f4ec7a9727db50694c2abaa62765d6a8166 100644 (file)
@@ -243,10 +243,10 @@ struct vmw_framebuffer_bo {
 
 
 static const uint32_t __maybe_unused vmw_primary_plane_formats[] = {
-       DRM_FORMAT_XRGB1555,
-       DRM_FORMAT_RGB565,
        DRM_FORMAT_XRGB8888,
        DRM_FORMAT_ARGB8888,
+       DRM_FORMAT_RGB565,
+       DRM_FORMAT_XRGB1555,
 };
 
 static const uint32_t __maybe_unused vmw_cursor_plane_formats[] = {
index 2d72a5ee7c0c710339d5d25c0a9376745a90f7af..c99cad444991579f6e665453b74f56cb35de2e15 100644 (file)
@@ -75,8 +75,12 @@ int vmw_prime_fd_to_handle(struct drm_device *dev,
                           int fd, u32 *handle)
 {
        struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
+       int ret = ttm_prime_fd_to_handle(tfile, fd, handle);
 
-       return ttm_prime_fd_to_handle(tfile, fd, handle);
+       if (ret)
+               ret = drm_gem_prime_fd_to_handle(dev, file_priv, fd, handle);
+
+       return ret;
 }
 
 int vmw_prime_handle_to_fd(struct drm_device *dev,
@@ -85,5 +89,12 @@ int vmw_prime_handle_to_fd(struct drm_device *dev,
                           int *prime_fd)
 {
        struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
-       return ttm_prime_handle_to_fd(tfile, handle, flags, prime_fd);
+       int ret;
+
+       if (handle > VMWGFX_NUM_MOB)
+               ret = ttm_prime_handle_to_fd(tfile, handle, flags, prime_fd);
+       else
+               ret = drm_gem_prime_handle_to_fd(dev, file_priv, handle, flags, prime_fd);
+
+       return ret;
 }
index af8562c95cc35b85048f2b9c64bcde55ed374f73..fcb87d83760ef664ebf3b4a2d4971a9ac5fc9edb 100644 (file)
@@ -220,13 +220,18 @@ static int vmw_ttm_map_dma(struct vmw_ttm_tt *vmw_tt)
        switch (dev_priv->map_mode) {
        case vmw_dma_map_bind:
        case vmw_dma_map_populate:
-               vsgt->sgt = &vmw_tt->sgt;
-               ret = sg_alloc_table_from_pages_segment(
-                       &vmw_tt->sgt, vsgt->pages, vsgt->num_pages, 0,
-                       (unsigned long)vsgt->num_pages << PAGE_SHIFT,
-                       dma_get_max_seg_size(dev_priv->drm.dev), GFP_KERNEL);
-               if (ret)
-                       goto out_sg_alloc_fail;
+               if (vmw_tt->dma_ttm.page_flags  & TTM_TT_FLAG_EXTERNAL) {
+                       vsgt->sgt = vmw_tt->dma_ttm.sg;
+               } else {
+                       vsgt->sgt = &vmw_tt->sgt;
+                       ret = sg_alloc_table_from_pages_segment(&vmw_tt->sgt,
+                               vsgt->pages, vsgt->num_pages, 0,
+                               (unsigned long)vsgt->num_pages << PAGE_SHIFT,
+                               dma_get_max_seg_size(dev_priv->drm.dev),
+                               GFP_KERNEL);
+                       if (ret)
+                               goto out_sg_alloc_fail;
+               }
 
                ret = vmw_ttm_map_for_dma(vmw_tt);
                if (unlikely(ret != 0))
@@ -241,8 +246,9 @@ static int vmw_ttm_map_dma(struct vmw_ttm_tt *vmw_tt)
        return 0;
 
 out_map_fail:
-       sg_free_table(vmw_tt->vsgt.sgt);
-       vmw_tt->vsgt.sgt = NULL;
+       drm_warn(&dev_priv->drm, "VSG table map failed!");
+       sg_free_table(vsgt->sgt);
+       vsgt->sgt = NULL;
 out_sg_alloc_fail:
        return ret;
 }
@@ -388,15 +394,17 @@ static void vmw_ttm_destroy(struct ttm_device *bdev, struct ttm_tt *ttm)
 static int vmw_ttm_populate(struct ttm_device *bdev,
                            struct ttm_tt *ttm, struct ttm_operation_ctx *ctx)
 {
-       int ret;
+       bool external = (ttm->page_flags & TTM_TT_FLAG_EXTERNAL) != 0;
 
-       /* TODO: maybe completely drop this ? */
        if (ttm_tt_is_populated(ttm))
                return 0;
 
-       ret = ttm_pool_alloc(&bdev->pool, ttm, ctx);
+       if (external && ttm->sg)
+               return  drm_prime_sg_to_dma_addr_array(ttm->sg,
+                                                      ttm->dma_address,
+                                                      ttm->num_pages);
 
-       return ret;
+       return ttm_pool_alloc(&bdev->pool, ttm, ctx);
 }
 
 static void vmw_ttm_unpopulate(struct ttm_device *bdev,
@@ -404,6 +412,10 @@ static void vmw_ttm_unpopulate(struct ttm_device *bdev,
 {
        struct vmw_ttm_tt *vmw_tt = container_of(ttm, struct vmw_ttm_tt,
                                                 dma_ttm);
+       bool external = (ttm->page_flags & TTM_TT_FLAG_EXTERNAL) != 0;
+
+       if (external)
+               return;
 
        vmw_ttm_unbind(bdev, ttm);
 
@@ -422,6 +434,7 @@ static struct ttm_tt *vmw_ttm_tt_create(struct ttm_buffer_object *bo,
 {
        struct vmw_ttm_tt *vmw_be;
        int ret;
+       bool external = bo->type == ttm_bo_type_sg;
 
        vmw_be = kzalloc(sizeof(*vmw_be), GFP_KERNEL);
        if (!vmw_be)
@@ -430,7 +443,10 @@ static struct ttm_tt *vmw_ttm_tt_create(struct ttm_buffer_object *bo,
        vmw_be->dev_priv = vmw_priv_from_ttm(bo->bdev);
        vmw_be->mob = NULL;
 
-       if (vmw_be->dev_priv->map_mode == vmw_dma_alloc_coherent)
+       if (external)
+               page_flags |= TTM_TT_FLAG_EXTERNAL | TTM_TT_FLAG_EXTERNAL_MAPPABLE;
+
+       if (vmw_be->dev_priv->map_mode == vmw_dma_alloc_coherent || external)
                ret = ttm_sg_tt_init(&vmw_be->dma_ttm, bo, page_flags,
                                     ttm_cached);
        else
index b21da7b745a5e7cd6b3e34e4fb8d42a45b2b6466..a9c1f9885c6bb4d2727cbce81d5be93cb9458a38 100644 (file)
@@ -31,7 +31,7 @@ int intel_fb_bo_framebuffer_init(struct intel_framebuffer *intel_fb,
 
        ret = ttm_bo_reserve(&bo->ttm, true, false, NULL);
        if (ret)
-               return ret;
+               goto err;
 
        if (!(bo->flags & XE_BO_SCANOUT_BIT)) {
                /*
@@ -42,12 +42,16 @@ int intel_fb_bo_framebuffer_init(struct intel_framebuffer *intel_fb,
                 */
                if (XE_IOCTL_DBG(i915, !list_empty(&bo->ttm.base.gpuva.list))) {
                        ttm_bo_unreserve(&bo->ttm);
-                       return -EINVAL;
+                       ret = -EINVAL;
+                       goto err;
                }
                bo->flags |= XE_BO_SCANOUT_BIT;
        }
        ttm_bo_unreserve(&bo->ttm);
+       return 0;
 
+err:
+       xe_bo_put(bo);
        return ret;
 }
 
index bf0df6ee4f7857b4ac8d9ca1c9789b7ef3e4afa9..07fb8d3c037f004ccd34c5393a95c48bffdd8298 100644 (file)
@@ -1026,23 +1026,26 @@ static void cm_reset_to_idle(struct cm_id_private *cm_id_priv)
        }
 }
 
-static noinline void cm_destroy_id_wait_timeout(struct ib_cm_id *cm_id)
+static noinline void cm_destroy_id_wait_timeout(struct ib_cm_id *cm_id,
+                                               enum ib_cm_state old_state)
 {
        struct cm_id_private *cm_id_priv;
 
        cm_id_priv = container_of(cm_id, struct cm_id_private, id);
-       pr_err("%s: cm_id=%p timed out. state=%d refcnt=%d\n", __func__,
-              cm_id, cm_id->state, refcount_read(&cm_id_priv->refcount));
+       pr_err("%s: cm_id=%p timed out. state %d -> %d, refcnt=%d\n", __func__,
+              cm_id, old_state, cm_id->state, refcount_read(&cm_id_priv->refcount));
 }
 
 static void cm_destroy_id(struct ib_cm_id *cm_id, int err)
 {
        struct cm_id_private *cm_id_priv;
+       enum ib_cm_state old_state;
        struct cm_work *work;
        int ret;
 
        cm_id_priv = container_of(cm_id, struct cm_id_private, id);
        spin_lock_irq(&cm_id_priv->lock);
+       old_state = cm_id->state;
 retest:
        switch (cm_id->state) {
        case IB_CM_LISTEN:
@@ -1151,7 +1154,7 @@ static void cm_destroy_id(struct ib_cm_id *cm_id, int err)
                                                  msecs_to_jiffies(
                                                  CM_DESTROY_ID_WAIT_TIMEOUT));
                if (!ret) /* timeout happened */
-                       cm_destroy_id_wait_timeout(cm_id);
+                       cm_destroy_id_wait_timeout(cm_id, old_state);
        } while (!ret);
 
        while ((work = cm_dequeue_work(cm_id_priv)) != NULL)
index 0c3c4e64812c58cf8457dbaffeb7a28e31cebad1..3e43687a7f6f7313f4031955b1ad03fbc22b8bf9 100644 (file)
@@ -188,7 +188,8 @@ static int process_pma_cmd(struct mlx5_ib_dev *dev, u32 port_num,
                mdev = dev->mdev;
                mdev_port_num = 1;
        }
-       if (MLX5_CAP_GEN(dev->mdev, num_ports) == 1) {
+       if (MLX5_CAP_GEN(dev->mdev, num_ports) == 1 &&
+           !mlx5_core_mp_enabled(mdev)) {
                /* set local port to one for Function-Per-Port HCA. */
                mdev = dev->mdev;
                mdev_port_num = 1;
index 54c723a6eddacef625cccbc47184a46cb93fe9a3..6f9ec8db014c79714a109a46b5d2b9c98544eeec 100644 (file)
@@ -33,6 +33,8 @@ void rxe_dealloc(struct ib_device *ib_dev)
 
        if (rxe->tfm)
                crypto_free_shash(rxe->tfm);
+
+       mutex_destroy(&rxe->usdev_lock);
 }
 
 /* initialize rxe device parameters */
index 50bac2d79d9b5e4bea6cf76a8ffa32658ebb70ad..68edb07d4443e95b985f6a754a9efef83f06af66 100644 (file)
@@ -176,6 +176,8 @@ static struct icc_path *path_init(struct device *dev, struct icc_node *dst,
 
        path->num_nodes = num_nodes;
 
+       mutex_lock(&icc_bw_lock);
+
        for (i = num_nodes - 1; i >= 0; i--) {
                node->provider->users++;
                hlist_add_head(&path->reqs[i].req_node, &node->req_list);
@@ -186,6 +188,8 @@ static struct icc_path *path_init(struct device *dev, struct icc_node *dst,
                node = node->reverse;
        }
 
+       mutex_unlock(&icc_bw_lock);
+
        return path;
 }
 
@@ -792,12 +796,16 @@ void icc_put(struct icc_path *path)
                pr_err("%s: error (%d)\n", __func__, ret);
 
        mutex_lock(&icc_lock);
+       mutex_lock(&icc_bw_lock);
+
        for (i = 0; i < path->num_nodes; i++) {
                node = path->reqs[i].node;
                hlist_del(&path->reqs[i].req_node);
                if (!WARN_ON(!node->provider->users))
                        node->provider->users--;
        }
+
+       mutex_unlock(&icc_bw_lock);
        mutex_unlock(&icc_lock);
 
        kfree_const(path->name);
index cbaf4f9c41be656212b50dce683273911e1e1cd6..06f0a6d6cbbc035d49a7d66df1e7d6caede87f83 100644 (file)
@@ -116,15 +116,6 @@ static struct qcom_icc_node xm_sdc2 = {
        .links = { X1E80100_SLAVE_A2NOC_SNOC },
 };
 
-static struct qcom_icc_node ddr_perf_mode_master = {
-       .name = "ddr_perf_mode_master",
-       .id = X1E80100_MASTER_DDR_PERF_MODE,
-       .channels = 1,
-       .buswidth = 4,
-       .num_links = 1,
-       .links = { X1E80100_SLAVE_DDR_PERF_MODE },
-};
-
 static struct qcom_icc_node qup0_core_master = {
        .name = "qup0_core_master",
        .id = X1E80100_MASTER_QUP_CORE_0,
@@ -832,14 +823,6 @@ static struct qcom_icc_node qns_a2noc_snoc = {
        .links = { X1E80100_MASTER_A2NOC_SNOC },
 };
 
-static struct qcom_icc_node ddr_perf_mode_slave = {
-       .name = "ddr_perf_mode_slave",
-       .id = X1E80100_SLAVE_DDR_PERF_MODE,
-       .channels = 1,
-       .buswidth = 4,
-       .num_links = 0,
-};
-
 static struct qcom_icc_node qup0_core_slave = {
        .name = "qup0_core_slave",
        .id = X1E80100_SLAVE_QUP_CORE_0,
@@ -1591,12 +1574,6 @@ static struct qcom_icc_bcm bcm_acv = {
        .nodes = { &ebi },
 };
 
-static struct qcom_icc_bcm bcm_acv_perf = {
-       .name = "ACV_PERF",
-       .num_nodes = 1,
-       .nodes = { &ddr_perf_mode_slave },
-};
-
 static struct qcom_icc_bcm bcm_ce0 = {
        .name = "CE0",
        .num_nodes = 1,
@@ -1863,18 +1840,15 @@ static const struct qcom_icc_desc x1e80100_aggre2_noc = {
 };
 
 static struct qcom_icc_bcm * const clk_virt_bcms[] = {
-       &bcm_acv_perf,
        &bcm_qup0,
        &bcm_qup1,
        &bcm_qup2,
 };
 
 static struct qcom_icc_node * const clk_virt_nodes[] = {
-       [MASTER_DDR_PERF_MODE] = &ddr_perf_mode_master,
        [MASTER_QUP_CORE_0] = &qup0_core_master,
        [MASTER_QUP_CORE_1] = &qup1_core_master,
        [MASTER_QUP_CORE_2] = &qup2_core_master,
-       [SLAVE_DDR_PERF_MODE] = &ddr_perf_mode_slave,
        [SLAVE_QUP_CORE_0] = &qup0_core_slave,
        [SLAVE_QUP_CORE_1] = &qup1_core_slave,
        [SLAVE_QUP_CORE_2] = &qup2_core_slave,
index 99d4b075df49e446ef04295ab73020ae7b8f74c5..76656fe0470d7dce8aa49b201f59562e189da148 100644 (file)
@@ -37,6 +37,7 @@ config IOMMUFD_TEST
        depends on DEBUG_KERNEL
        depends on FAULT_INJECTION
        depends on RUNTIME_TESTING_MENU
+       select IOMMUFD_DRIVER
        default n
        help
          This is dangerous, do not enable unless running
index 1a64364700eb0f3d3b93197f04c154c03fd3601a..0ad2ff9065aad0d31ca2be16e32629b5612ecfea 100644 (file)
@@ -1002,7 +1002,7 @@ static irqreturn_t rtsx_pci_isr(int irq, void *dev_id)
                } else {
                        pcr->card_removed |= SD_EXIST;
                        pcr->card_inserted &= ~SD_EXIST;
-                       if (PCI_PID(pcr) == PID_5261) {
+                       if ((PCI_PID(pcr) == PID_5261) || (PCI_PID(pcr) == PID_5264)) {
                                rtsx_pci_write_register(pcr, RTS5261_FW_STATUS,
                                        RTS5261_EXPRESS_LINK_FAIL_MASK, 0);
                                pcr->extra_caps |= EXTRA_CAPS_SD_EXPRESS;
index 8cf636c5403225f7588a2428318cec1ff7fd2700..bd4e3df44865e1c0a7d1190fcc90c19d98bd858d 100644 (file)
@@ -116,7 +116,7 @@ static const struct pci_device_id mei_me_pci_tbl[] = {
        {MEI_PCI_DEVICE(MEI_DEV_ID_ADP_P, MEI_ME_PCH15_CFG)},
        {MEI_PCI_DEVICE(MEI_DEV_ID_ADP_N, MEI_ME_PCH15_CFG)},
 
-       {MEI_PCI_DEVICE(MEI_DEV_ID_RPL_S, MEI_ME_PCH15_CFG)},
+       {MEI_PCI_DEVICE(MEI_DEV_ID_RPL_S, MEI_ME_PCH15_SPS_CFG)},
 
        {MEI_PCI_DEVICE(MEI_DEV_ID_MTL_M, MEI_ME_PCH15_CFG)},
        {MEI_PCI_DEVICE(MEI_DEV_ID_ARL_S, MEI_ME_PCH15_CFG)},
index 8d303c6c000062ee2d7280f6df4d95f022e80bf8..8db0fcf24e709754f742b42dc47a635eeaeb9d2a 100644 (file)
@@ -402,25 +402,40 @@ static int mei_vsc_remove(struct platform_device *pdev)
 static int mei_vsc_suspend(struct device *dev)
 {
        struct mei_device *mei_dev = dev_get_drvdata(dev);
+       struct mei_vsc_hw *hw = mei_dev_to_vsc_hw(mei_dev);
 
        mei_stop(mei_dev);
 
+       mei_disable_interrupts(mei_dev);
+
+       vsc_tp_free_irq(hw->tp);
+
        return 0;
 }
 
 static int mei_vsc_resume(struct device *dev)
 {
        struct mei_device *mei_dev = dev_get_drvdata(dev);
+       struct mei_vsc_hw *hw = mei_dev_to_vsc_hw(mei_dev);
        int ret;
 
-       ret = mei_restart(mei_dev);
+       ret = vsc_tp_request_irq(hw->tp);
        if (ret)
                return ret;
 
+       ret = mei_restart(mei_dev);
+       if (ret)
+               goto err_free;
+
        /* start timer if stopped in suspend */
        schedule_delayed_work(&mei_dev->timer_work, HZ);
 
        return 0;
+
+err_free:
+       vsc_tp_free_irq(hw->tp);
+
+       return ret;
 }
 
 static DEFINE_SIMPLE_DEV_PM_OPS(mei_vsc_pm_ops, mei_vsc_suspend, mei_vsc_resume);
index 682c62c635b636275ce020e066a40506e5645841..870c70ef3bb8bc99d259e58a88e0a95bc052bffb 100644 (file)
@@ -94,6 +94,27 @@ static const struct acpi_gpio_mapping vsc_tp_acpi_gpios[] = {
        {}
 };
 
+static irqreturn_t vsc_tp_isr(int irq, void *data)
+{
+       struct vsc_tp *tp = data;
+
+       atomic_inc(&tp->assert_cnt);
+
+       wake_up(&tp->xfer_wait);
+
+       return IRQ_WAKE_THREAD;
+}
+
+static irqreturn_t vsc_tp_thread_isr(int irq, void *data)
+{
+       struct vsc_tp *tp = data;
+
+       if (tp->event_notify)
+               tp->event_notify(tp->event_notify_context);
+
+       return IRQ_HANDLED;
+}
+
 /* wakeup firmware and wait for response */
 static int vsc_tp_wakeup_request(struct vsc_tp *tp)
 {
@@ -383,6 +404,37 @@ int vsc_tp_register_event_cb(struct vsc_tp *tp, vsc_tp_event_cb_t event_cb,
 }
 EXPORT_SYMBOL_NS_GPL(vsc_tp_register_event_cb, VSC_TP);
 
+/**
+ * vsc_tp_request_irq - request irq for vsc_tp device
+ * @tp: vsc_tp device handle
+ */
+int vsc_tp_request_irq(struct vsc_tp *tp)
+{
+       struct spi_device *spi = tp->spi;
+       struct device *dev = &spi->dev;
+       int ret;
+
+       irq_set_status_flags(spi->irq, IRQ_DISABLE_UNLAZY);
+       ret = request_threaded_irq(spi->irq, vsc_tp_isr, vsc_tp_thread_isr,
+                                  IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
+                                  dev_name(dev), tp);
+       if (ret)
+               return ret;
+
+       return 0;
+}
+EXPORT_SYMBOL_NS_GPL(vsc_tp_request_irq, VSC_TP);
+
+/**
+ * vsc_tp_free_irq - free irq for vsc_tp device
+ * @tp: vsc_tp device handle
+ */
+void vsc_tp_free_irq(struct vsc_tp *tp)
+{
+       free_irq(tp->spi->irq, tp);
+}
+EXPORT_SYMBOL_NS_GPL(vsc_tp_free_irq, VSC_TP);
+
 /**
  * vsc_tp_intr_synchronize - synchronize vsc_tp interrupt
  * @tp: vsc_tp device handle
@@ -413,27 +465,6 @@ void vsc_tp_intr_disable(struct vsc_tp *tp)
 }
 EXPORT_SYMBOL_NS_GPL(vsc_tp_intr_disable, VSC_TP);
 
-static irqreturn_t vsc_tp_isr(int irq, void *data)
-{
-       struct vsc_tp *tp = data;
-
-       atomic_inc(&tp->assert_cnt);
-
-       return IRQ_WAKE_THREAD;
-}
-
-static irqreturn_t vsc_tp_thread_isr(int irq, void *data)
-{
-       struct vsc_tp *tp = data;
-
-       wake_up(&tp->xfer_wait);
-
-       if (tp->event_notify)
-               tp->event_notify(tp->event_notify_context);
-
-       return IRQ_HANDLED;
-}
-
 static int vsc_tp_match_any(struct acpi_device *adev, void *data)
 {
        struct acpi_device **__adev = data;
@@ -485,10 +516,9 @@ static int vsc_tp_probe(struct spi_device *spi)
        tp->spi = spi;
 
        irq_set_status_flags(spi->irq, IRQ_DISABLE_UNLAZY);
-       ret = devm_request_threaded_irq(dev, spi->irq, vsc_tp_isr,
-                                       vsc_tp_thread_isr,
-                                       IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
-                                       dev_name(dev), tp);
+       ret = request_threaded_irq(spi->irq, vsc_tp_isr, vsc_tp_thread_isr,
+                                  IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
+                                  dev_name(dev), tp);
        if (ret)
                return ret;
 
@@ -522,6 +552,8 @@ static int vsc_tp_probe(struct spi_device *spi)
 err_destroy_lock:
        mutex_destroy(&tp->mutex);
 
+       free_irq(spi->irq, tp);
+
        return ret;
 }
 
@@ -532,6 +564,8 @@ static void vsc_tp_remove(struct spi_device *spi)
        platform_device_unregister(tp->pdev);
 
        mutex_destroy(&tp->mutex);
+
+       free_irq(spi->irq, tp);
 }
 
 static const struct acpi_device_id vsc_tp_acpi_ids[] = {
index f9513ddc3e409350ffe871af1ad30268226e6225..14ca195cbddccf23b15c03556411dbef95a18715 100644 (file)
@@ -37,6 +37,9 @@ int vsc_tp_xfer(struct vsc_tp *tp, u8 cmd, const void *obuf, size_t olen,
 int vsc_tp_register_event_cb(struct vsc_tp *tp, vsc_tp_event_cb_t event_cb,
                             void *context);
 
+int vsc_tp_request_irq(struct vsc_tp *tp);
+void vsc_tp_free_irq(struct vsc_tp *tp);
+
 void vsc_tp_intr_enable(struct vsc_tp *tp);
 void vsc_tp_intr_disable(struct vsc_tp *tp);
 void vsc_tp_intr_synchronize(struct vsc_tp *tp);
index 22b97505fa53651f2e7c7040bebaaf2dd61296ae..e48ef9823c8ead560d8fe8601c5a1883919a9b6d 100644 (file)
@@ -1947,14 +1947,16 @@ mt7530_port_vlan_del(struct dsa_switch *ds, int port,
 
 static int mt753x_mirror_port_get(unsigned int id, u32 val)
 {
-       return (id == ID_MT7531) ? MT7531_MIRROR_PORT_GET(val) :
-                                  MIRROR_PORT(val);
+       return (id == ID_MT7531 || id == ID_MT7988) ?
+                      MT7531_MIRROR_PORT_GET(val) :
+                      MIRROR_PORT(val);
 }
 
 static int mt753x_mirror_port_set(unsigned int id, u32 val)
 {
-       return (id == ID_MT7531) ? MT7531_MIRROR_PORT_SET(val) :
-                                  MIRROR_PORT(val);
+       return (id == ID_MT7531 || id == ID_MT7988) ?
+                      MT7531_MIRROR_PORT_SET(val) :
+                      MIRROR_PORT(val);
 }
 
 static int mt753x_port_mirror_add(struct dsa_switch *ds, int port,
@@ -2469,8 +2471,6 @@ mt7530_setup(struct dsa_switch *ds)
                     SYS_CTRL_PHY_RST | SYS_CTRL_SW_RST |
                     SYS_CTRL_REG_RST);
 
-       mt7530_pll_setup(priv);
-
        /* Lower Tx driving for TRGMII path */
        for (i = 0; i < NUM_TRGMII_CTRL; i++)
                mt7530_write(priv, MT7530_TRGMII_TD_ODT(i),
@@ -2488,6 +2488,9 @@ mt7530_setup(struct dsa_switch *ds)
 
        priv->p6_interface = PHY_INTERFACE_MODE_NA;
 
+       if ((val & HWTRAP_XTAL_MASK) == HWTRAP_XTAL_40MHZ)
+               mt7530_pll_setup(priv);
+
        mt753x_trap_frames(priv);
 
        /* Enable and reset MIB counters */
@@ -2517,6 +2520,9 @@ mt7530_setup(struct dsa_switch *ds)
                           PVC_EG_TAG(MT7530_VLAN_EG_CONSISTENT));
        }
 
+       /* Allow mirroring frames received on the local port (monitor port). */
+       mt7530_set(priv, MT753X_AGC, LOCAL_EN);
+
        /* Setup VLAN ID 0 for VLAN-unaware bridges */
        ret = mt7530_setup_vlan0(priv);
        if (ret)
@@ -2625,6 +2631,9 @@ mt7531_setup_common(struct dsa_switch *ds)
                           PVC_EG_TAG(MT7530_VLAN_EG_CONSISTENT));
        }
 
+       /* Allow mirroring frames received on the local port (monitor port). */
+       mt7530_set(priv, MT753X_AGC, LOCAL_EN);
+
        /* Flush the FDB table */
        ret = mt7530_fdb_cmd(priv, MT7530_FDB_FLUSH, NULL);
        if (ret < 0)
@@ -2703,18 +2712,25 @@ mt7531_setup(struct dsa_switch *ds)
        priv->p5_interface = PHY_INTERFACE_MODE_NA;
        priv->p6_interface = PHY_INTERFACE_MODE_NA;
 
-       /* Enable PHY core PLL, since phy_device has not yet been created
-        * provided for phy_[read,write]_mmd_indirect is called, we provide
-        * our own mt7531_ind_mmd_phy_[read,write] to complete this
-        * function.
+       /* Enable Energy-Efficient Ethernet (EEE) and PHY core PLL, since
+        * phy_device has not yet been created provided for
+        * phy_[read,write]_mmd_indirect is called, we provide our own
+        * mt7531_ind_mmd_phy_[read,write] to complete this function.
         */
        val = mt7531_ind_c45_phy_read(priv, MT753X_CTRL_PHY_ADDR,
                                      MDIO_MMD_VEND2, CORE_PLL_GROUP4);
-       val |= MT7531_PHY_PLL_BYPASS_MODE;
+       val |= MT7531_RG_SYSPLL_DMY2 | MT7531_PHY_PLL_BYPASS_MODE;
        val &= ~MT7531_PHY_PLL_OFF;
        mt7531_ind_c45_phy_write(priv, MT753X_CTRL_PHY_ADDR, MDIO_MMD_VEND2,
                                 CORE_PLL_GROUP4, val);
 
+       /* Disable EEE advertisement on the switch PHYs. */
+       for (i = MT753X_CTRL_PHY_ADDR;
+            i < MT753X_CTRL_PHY_ADDR + MT7530_NUM_PHYS; i++) {
+               mt7531_ind_c45_phy_write(priv, i, MDIO_MMD_AN, MDIO_AN_EEE_ADV,
+                                        0);
+       }
+
        mt7531_setup_common(ds);
 
        /* Setup VLAN ID 0 for VLAN-unaware bridges */
index ddefeb69afda102dbc09608c08c447185dc4c9c5..0ad52d3cbfebb974ee70515da6770052acddc210 100644 (file)
@@ -32,6 +32,10 @@ enum mt753x_id {
 #define SYSC_REG_RSTCTRL               0x34
 #define  RESET_MCM                     BIT(2)
 
+/* Register for ARL global control */
+#define MT753X_AGC                     0xc
+#define  LOCAL_EN                      BIT(7)
+
 /* Registers to mac forward control for unknown frames */
 #define MT7530_MFC                     0x10
 #define  BC_FFP(x)                     (((x) & 0xff) << 24)
@@ -630,6 +634,7 @@ enum mt7531_clk_skew {
 #define  RG_SYSPLL_DDSFBK_EN           BIT(12)
 #define  RG_SYSPLL_BIAS_EN             BIT(11)
 #define  RG_SYSPLL_BIAS_LPF_EN         BIT(10)
+#define  MT7531_RG_SYSPLL_DMY2         BIT(6)
 #define  MT7531_PHY_PLL_OFF            BIT(5)
 #define  MT7531_PHY_PLL_BYPASS_MODE    BIT(4)
 
index b890410a2bc0bacd27eab5acd6f87cd8b0110939..688ccb0615ab9f87e7caf9e6fa522444613b2bf3 100644 (file)
@@ -28,6 +28,8 @@ ice_tc_count_lkups(u32 flags, struct ice_tc_flower_lyr_2_4_hdrs *headers,
         * - ICE_TC_FLWR_FIELD_VLAN_TPID (present if specified)
         * - Tunnel flag (present if tunnel)
         */
+       if (fltr->direction == ICE_ESWITCH_FLTR_EGRESS)
+               lkups_cnt++;
 
        if (flags & ICE_TC_FLWR_FIELD_TENANT_ID)
                lkups_cnt++;
@@ -363,6 +365,11 @@ ice_tc_fill_rules(struct ice_hw *hw, u32 flags,
        /* Always add direction metadata */
        ice_rule_add_direction_metadata(&list[ICE_TC_METADATA_LKUP_IDX]);
 
+       if (tc_fltr->direction == ICE_ESWITCH_FLTR_EGRESS) {
+               ice_rule_add_src_vsi_metadata(&list[i]);
+               i++;
+       }
+
        rule_info->tun_type = ice_sw_type_from_tunnel(tc_fltr->tunnel_type);
        if (tc_fltr->tunnel_type != TNL_LAST) {
                i = ice_tc_fill_tunnel_outer(flags, tc_fltr, list, i);
@@ -772,7 +779,7 @@ ice_eswitch_add_tc_fltr(struct ice_vsi *vsi, struct ice_tc_flower_fltr *fltr)
        int ret;
        int i;
 
-       if (!flags || (flags & ICE_TC_FLWR_FIELD_ENC_SRC_L4_PORT)) {
+       if (flags & ICE_TC_FLWR_FIELD_ENC_SRC_L4_PORT) {
                NL_SET_ERR_MSG_MOD(fltr->extack, "Unsupported encap field(s)");
                return -EOPNOTSUPP;
        }
@@ -820,6 +827,7 @@ ice_eswitch_add_tc_fltr(struct ice_vsi *vsi, struct ice_tc_flower_fltr *fltr)
 
        /* specify the cookie as filter_rule_id */
        rule_info.fltr_rule_id = fltr->cookie;
+       rule_info.src_vsi = vsi->idx;
 
        ret = ice_add_adv_rule(hw, list, lkups_cnt, &rule_info, &rule_added);
        if (ret == -EEXIST) {
@@ -1481,7 +1489,10 @@ ice_parse_cls_flower(struct net_device *filter_dev, struct ice_vsi *vsi,
                  (BIT_ULL(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) |
                   BIT_ULL(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) |
                   BIT_ULL(FLOW_DISSECTOR_KEY_ENC_KEYID) |
-                  BIT_ULL(FLOW_DISSECTOR_KEY_ENC_PORTS))) {
+                  BIT_ULL(FLOW_DISSECTOR_KEY_ENC_PORTS) |
+                  BIT_ULL(FLOW_DISSECTOR_KEY_ENC_IP) |
+                  BIT_ULL(FLOW_DISSECTOR_KEY_ENC_OPTS) |
+                  BIT_ULL(FLOW_DISSECTOR_KEY_ENC_CONTROL))) {
                NL_SET_ERR_MSG_MOD(fltr->extack, "Tunnel key used, but device isn't a tunnel");
                return -EOPNOTSUPP;
        } else {
index 4fd44b6eecea63c5b6f5f5ab39d04b43945b01a8..60ee7ae2c409793f500c05ce7b2cd25092d6f2b1 100644 (file)
@@ -688,6 +688,7 @@ static int otx2_tc_prepare_flow(struct otx2_nic *nic, struct otx2_tc_flow *node,
 
        if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) {
                struct flow_match_control match;
+               u32 val;
 
                flow_rule_match_control(rule, &match);
                if (match.mask->flags & FLOW_DIS_FIRST_FRAG) {
@@ -696,12 +697,14 @@ static int otx2_tc_prepare_flow(struct otx2_nic *nic, struct otx2_tc_flow *node,
                }
 
                if (match.mask->flags & FLOW_DIS_IS_FRAGMENT) {
+                       val = match.key->flags & FLOW_DIS_IS_FRAGMENT;
                        if (ntohs(flow_spec->etype) == ETH_P_IP) {
-                               flow_spec->ip_flag = IPV4_FLAG_MORE;
+                               flow_spec->ip_flag = val ? IPV4_FLAG_MORE : 0;
                                flow_mask->ip_flag = IPV4_FLAG_MORE;
                                req->features |= BIT_ULL(NPC_IPFRAG_IPV4);
                        } else if (ntohs(flow_spec->etype) == ETH_P_IPV6) {
-                               flow_spec->next_header = IPPROTO_FRAGMENT;
+                               flow_spec->next_header = val ?
+                                                        IPPROTO_FRAGMENT : 0;
                                flow_mask->next_header = 0xff;
                                req->features |= BIT_ULL(NPC_IPFRAG_IPV6);
                        } else {
index c895e265ae0ebcde930acf3785ba9ab1b63b65e5..61334a71058c7594a61ca768ce041f92ab238d24 100644 (file)
@@ -1074,13 +1074,13 @@ mtk_wed_dma_disable(struct mtk_wed_device *dev)
 static void
 mtk_wed_stop(struct mtk_wed_device *dev)
 {
+       mtk_wed_dma_disable(dev);
        mtk_wed_set_ext_int(dev, false);
 
        wed_w32(dev, MTK_WED_WPDMA_INT_TRIGGER, 0);
        wed_w32(dev, MTK_WED_WDMA_INT_TRIGGER, 0);
        wdma_w32(dev, MTK_WDMA_INT_MASK, 0);
        wdma_w32(dev, MTK_WDMA_INT_GRP2, 0);
-       wed_w32(dev, MTK_WED_WPDMA_INT_MASK, 0);
 
        if (!mtk_wed_get_rx_capa(dev))
                return;
@@ -1093,7 +1093,6 @@ static void
 mtk_wed_deinit(struct mtk_wed_device *dev)
 {
        mtk_wed_stop(dev);
-       mtk_wed_dma_disable(dev);
 
        wed_clr(dev, MTK_WED_CTRL,
                MTK_WED_CTRL_WDMA_INT_AGENT_EN |
@@ -2605,9 +2604,6 @@ mtk_wed_irq_get(struct mtk_wed_device *dev, u32 mask)
 static void
 mtk_wed_irq_set_mask(struct mtk_wed_device *dev, u32 mask)
 {
-       if (!dev->running)
-               return;
-
        mtk_wed_set_ext_int(dev, !!mask);
        wed_w32(dev, MTK_WED_INT_MASK, mask);
 }
index e66f486faafe1a6b0cfc75f0f11b2e957b040842..415fec7763bd26934afbfd537492ee2ea94ee4fa 100644 (file)
@@ -45,6 +45,10 @@ struct arfs_table {
        struct hlist_head        rules_hash[ARFS_HASH_SIZE];
 };
 
+enum {
+       MLX5E_ARFS_STATE_ENABLED,
+};
+
 enum arfs_type {
        ARFS_IPV4_TCP,
        ARFS_IPV6_TCP,
@@ -59,6 +63,7 @@ struct mlx5e_arfs_tables {
        spinlock_t                     arfs_lock;
        int                            last_filter_id;
        struct workqueue_struct        *wq;
+       unsigned long                  state;
 };
 
 struct arfs_tuple {
@@ -169,6 +174,8 @@ int mlx5e_arfs_enable(struct mlx5e_flow_steering *fs)
                        return err;
                }
        }
+       set_bit(MLX5E_ARFS_STATE_ENABLED, &arfs->state);
+
        return 0;
 }
 
@@ -454,6 +461,8 @@ static void arfs_del_rules(struct mlx5e_flow_steering *fs)
        int i;
        int j;
 
+       clear_bit(MLX5E_ARFS_STATE_ENABLED, &arfs->state);
+
        spin_lock_bh(&arfs->arfs_lock);
        mlx5e_for_each_arfs_rule(rule, htmp, arfs->arfs_tables, i, j) {
                hlist_del_init(&rule->hlist);
@@ -626,17 +635,8 @@ static void arfs_handle_work(struct work_struct *work)
        struct mlx5_flow_handle *rule;
 
        arfs = mlx5e_fs_get_arfs(priv->fs);
-       mutex_lock(&priv->state_lock);
-       if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) {
-               spin_lock_bh(&arfs->arfs_lock);
-               hlist_del(&arfs_rule->hlist);
-               spin_unlock_bh(&arfs->arfs_lock);
-
-               mutex_unlock(&priv->state_lock);
-               kfree(arfs_rule);
-               goto out;
-       }
-       mutex_unlock(&priv->state_lock);
+       if (!test_bit(MLX5E_ARFS_STATE_ENABLED, &arfs->state))
+               return;
 
        if (!arfs_rule->rule) {
                rule = arfs_add_rule(priv, arfs_rule);
@@ -752,6 +752,11 @@ int mlx5e_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
                return -EPROTONOSUPPORT;
 
        spin_lock_bh(&arfs->arfs_lock);
+       if (!test_bit(MLX5E_ARFS_STATE_ENABLED, &arfs->state)) {
+               spin_unlock_bh(&arfs->arfs_lock);
+               return -EPERM;
+       }
+
        arfs_rule = arfs_find_rule(arfs_t, &fk);
        if (arfs_rule) {
                if (arfs_rule->rxq == rxq_index || work_busy(&arfs_rule->arfs_work)) {
index 3047d7015c5256726338904432ce56845c59c39c..1789800faaeb62841387ed69b0a82aab3283bf46 100644 (file)
@@ -1868,6 +1868,7 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev)
        if (err)
                goto abort;
 
+       dev->priv.eswitch = esw;
        err = esw_offloads_init(esw);
        if (err)
                goto reps_err;
@@ -1892,11 +1893,6 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev)
                esw->offloads.encap = DEVLINK_ESWITCH_ENCAP_MODE_BASIC;
        else
                esw->offloads.encap = DEVLINK_ESWITCH_ENCAP_MODE_NONE;
-       if (MLX5_ESWITCH_MANAGER(dev) &&
-           mlx5_esw_vport_match_metadata_supported(esw))
-               esw->flags |= MLX5_ESWITCH_VPORT_MATCH_METADATA;
-
-       dev->priv.eswitch = esw;
        BLOCKING_INIT_NOTIFIER_HEAD(&esw->n_head);
 
        esw_info(dev,
@@ -1908,6 +1904,7 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev)
 
 reps_err:
        mlx5_esw_vports_cleanup(esw);
+       dev->priv.eswitch = NULL;
 abort:
        if (esw->work_queue)
                destroy_workqueue(esw->work_queue);
@@ -1926,7 +1923,6 @@ void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw)
 
        esw_info(esw->dev, "cleanup\n");
 
-       esw->dev->priv.eswitch = NULL;
        destroy_workqueue(esw->work_queue);
        WARN_ON(refcount_read(&esw->qos.refcnt));
        mutex_destroy(&esw->state_lock);
@@ -1937,6 +1933,7 @@ void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw)
        mutex_destroy(&esw->offloads.encap_tbl_lock);
        mutex_destroy(&esw->offloads.decap_tbl_lock);
        esw_offloads_cleanup(esw);
+       esw->dev->priv.eswitch = NULL;
        mlx5_esw_vports_cleanup(esw);
        debugfs_remove_recursive(esw->debugfs_root);
        devl_params_unregister(priv_to_devlink(esw->dev), mlx5_eswitch_params,
index baaae628b0a0f6510e2c350cbab0b6309b32da52..e3cce110e52fdb9207e417538d69089eb52139c4 100644 (file)
@@ -2476,6 +2476,10 @@ int esw_offloads_init(struct mlx5_eswitch *esw)
        if (err)
                return err;
 
+       if (MLX5_ESWITCH_MANAGER(esw->dev) &&
+           mlx5_esw_vport_match_metadata_supported(esw))
+               esw->flags |= MLX5_ESWITCH_VPORT_MATCH_METADATA;
+
        err = devl_params_register(priv_to_devlink(esw->dev),
                                   esw_devlink_params,
                                   ARRAY_SIZE(esw_devlink_params));
index d14459e5c04fc515ad682e11ee322aa3891e382f..69d482f7c5a29916688ac0d79d324df5f2596586 100644 (file)
@@ -703,8 +703,10 @@ int mlx5_deactivate_lag(struct mlx5_lag *ldev)
                return err;
        }
 
-       if (test_bit(MLX5_LAG_MODE_FLAG_HASH_BASED, &flags))
+       if (test_bit(MLX5_LAG_MODE_FLAG_HASH_BASED, &flags)) {
                mlx5_lag_port_sel_destroy(ldev);
+               ldev->buckets = 1;
+       }
        if (mlx5_lag_has_drop_rule(ldev))
                mlx5_lag_drop_rule_cleanup(ldev);
 
index 131a836c127e300c5a175efde74fdcc41650ceff..e285823bd08f0ba64aa028d8069204b587debd0a 100644 (file)
@@ -1699,12 +1699,15 @@ int mlx5_init_one_light(struct mlx5_core_dev *dev)
        err = mlx5_devlink_params_register(priv_to_devlink(dev));
        if (err) {
                mlx5_core_warn(dev, "mlx5_devlink_param_reg err = %d\n", err);
-               goto query_hca_caps_err;
+               goto params_reg_err;
        }
 
        devl_unlock(devlink);
        return 0;
 
+params_reg_err:
+       devl_unregister(devlink);
+       devl_unlock(devlink);
 query_hca_caps_err:
        devl_unregister(devlink);
        devl_unlock(devlink);
index e3bf8c7e4baa62e336415e495a8a385e1edb0654..7ebe712808275a7a1db290040d86c2cd5983c9d7 100644 (file)
@@ -75,7 +75,6 @@ static int mlx5_sf_dev_probe(struct auxiliary_device *adev, const struct auxilia
                goto peer_devlink_set_err;
        }
 
-       devlink_register(devlink);
        return 0;
 
 peer_devlink_set_err:
index 523e0c470894f7fdcf8a995fb821ff146f08fcd9..55f255a3c9db69b92d5743bd42c34cbaba46a0a8 100644 (file)
@@ -36,6 +36,27 @@ struct sparx5_tc_flower_template {
        u16 l3_proto; /* protocol specified in the template */
 };
 
+/* SparX-5 VCAP fragment types:
+ * 0 = no fragment, 1 = initial fragment,
+ * 2 = suspicious fragment, 3 = valid follow-up fragment
+ */
+enum {                   /* key / mask */
+       FRAG_NOT   = 0x03, /* 0 / 3 */
+       FRAG_SOME  = 0x11, /* 1 / 1 */
+       FRAG_FIRST = 0x13, /* 1 / 3 */
+       FRAG_LATER = 0x33, /* 3 / 3 */
+       FRAG_INVAL = 0xff, /* invalid */
+};
+
+/* Flower fragment flag to VCAP fragment type mapping */
+static const u8 sparx5_vcap_frag_map[4][4] = {           /* is_frag */
+       { FRAG_INVAL, FRAG_INVAL, FRAG_INVAL, FRAG_FIRST }, /* 0/0 */
+       { FRAG_NOT,   FRAG_NOT,   FRAG_INVAL, FRAG_INVAL }, /* 0/1 */
+       { FRAG_INVAL, FRAG_INVAL, FRAG_INVAL, FRAG_INVAL }, /* 1/0 */
+       { FRAG_SOME,  FRAG_LATER, FRAG_INVAL, FRAG_FIRST }  /* 1/1 */
+       /* 0/0        0/1         1/0         1/1 <-- first_frag */
+};
+
 static int
 sparx5_tc_flower_es0_tpid(struct vcap_tc_flower_parse_usage *st)
 {
@@ -145,29 +166,27 @@ sparx5_tc_flower_handler_control_usage(struct vcap_tc_flower_parse_usage *st)
        flow_rule_match_control(st->frule, &mt);
 
        if (mt.mask->flags) {
-               if (mt.mask->flags & FLOW_DIS_FIRST_FRAG) {
-                       if (mt.key->flags & FLOW_DIS_FIRST_FRAG) {
-                               value = 1; /* initial fragment */
-                               mask = 0x3;
-                       } else {
-                               if (mt.mask->flags & FLOW_DIS_IS_FRAGMENT) {
-                                       value = 3; /* follow up fragment */
-                                       mask = 0x3;
-                               } else {
-                                       value = 0; /* no fragment */
-                                       mask = 0x3;
-                               }
-                       }
-               } else {
-                       if (mt.mask->flags & FLOW_DIS_IS_FRAGMENT) {
-                               value = 3; /* follow up fragment */
-                               mask = 0x3;
-                       } else {
-                               value = 0; /* no fragment */
-                               mask = 0x3;
-                       }
+               u8 is_frag_key = !!(mt.key->flags & FLOW_DIS_IS_FRAGMENT);
+               u8 is_frag_mask = !!(mt.mask->flags & FLOW_DIS_IS_FRAGMENT);
+               u8 is_frag_idx = (is_frag_key << 1) | is_frag_mask;
+
+               u8 first_frag_key = !!(mt.key->flags & FLOW_DIS_FIRST_FRAG);
+               u8 first_frag_mask = !!(mt.mask->flags & FLOW_DIS_FIRST_FRAG);
+               u8 first_frag_idx = (first_frag_key << 1) | first_frag_mask;
+
+               /* Lookup verdict based on the 2 + 2 input bits */
+               u8 vdt = sparx5_vcap_frag_map[is_frag_idx][first_frag_idx];
+
+               if (vdt == FRAG_INVAL) {
+                       NL_SET_ERR_MSG_MOD(st->fco->common.extack,
+                                          "Match on invalid fragment flag combination");
+                       return -EINVAL;
                }
 
+               /* Extract VCAP fragment key and mask from verdict */
+               value = (vdt >> 4) & 0x3;
+               mask = vdt & 0x3;
+
                err = vcap_rule_add_key_u32(st->vrule,
                                            VCAP_KF_L3_FRAGMENT_TYPE,
                                            value, mask);
index 81567fcf395799701ce3ba597374daa8342b94e9..1ef39928757bd4ad43dd4303871c0eee6b59629f 100644 (file)
@@ -72,6 +72,7 @@ enum mac_version {
 };
 
 struct rtl8169_private;
+struct r8169_led_classdev;
 
 void r8169_apply_firmware(struct rtl8169_private *tp);
 u16 rtl8168h_2_get_adc_bias_ioffset(struct rtl8169_private *tp);
@@ -83,4 +84,5 @@ void r8169_get_led_name(struct rtl8169_private *tp, int idx,
                        char *buf, int buf_len);
 int rtl8168_get_led_mode(struct rtl8169_private *tp);
 int rtl8168_led_mod_ctrl(struct rtl8169_private *tp, u16 mask, u16 val);
-void rtl8168_init_leds(struct net_device *ndev);
+struct r8169_led_classdev *rtl8168_init_leds(struct net_device *ndev);
+void r8169_remove_leds(struct r8169_led_classdev *leds);
index 007d077edcad797be596d846d0806d20972adfc0..1c97f3cca22a112f7d9521deeb616e8aac5e51fa 100644 (file)
@@ -138,20 +138,31 @@ static void rtl8168_setup_ldev(struct r8169_led_classdev *ldev,
        led_cdev->hw_control_get_device = r8169_led_hw_control_get_device;
 
        /* ignore errors */
-       devm_led_classdev_register(&ndev->dev, led_cdev);
+       led_classdev_register(&ndev->dev, led_cdev);
 }
 
-void rtl8168_init_leds(struct net_device *ndev)
+struct r8169_led_classdev *rtl8168_init_leds(struct net_device *ndev)
 {
-       /* bind resource mgmt to netdev */
-       struct device *dev = &ndev->dev;
        struct r8169_led_classdev *leds;
        int i;
 
-       leds = devm_kcalloc(dev, RTL8168_NUM_LEDS, sizeof(*leds), GFP_KERNEL);
+       leds = kcalloc(RTL8168_NUM_LEDS + 1, sizeof(*leds), GFP_KERNEL);
        if (!leds)
-               return;
+               return NULL;
 
        for (i = 0; i < RTL8168_NUM_LEDS; i++)
                rtl8168_setup_ldev(leds + i, ndev, i);
+
+       return leds;
+}
+
+void r8169_remove_leds(struct r8169_led_classdev *leds)
+{
+       if (!leds)
+               return;
+
+       for (struct r8169_led_classdev *l = leds; l->ndev; l++)
+               led_classdev_unregister(&l->led);
+
+       kfree(leds);
 }
index 4b6c28576a5187cf952428cd8707e274aa054235..32b73f3988e8c90e4bd6f33798f7ff9f64569eeb 100644 (file)
@@ -634,6 +634,8 @@ struct rtl8169_private {
        const char *fw_name;
        struct rtl_fw *rtl_fw;
 
+       struct r8169_led_classdev *leds;
+
        u32 ocp_base;
 };
 
@@ -4930,6 +4932,9 @@ static void rtl_remove_one(struct pci_dev *pdev)
 
        cancel_work_sync(&tp->wk.work);
 
+       if (IS_ENABLED(CONFIG_R8169_LEDS))
+               r8169_remove_leds(tp->leds);
+
        unregister_netdev(tp->dev);
 
        if (tp->dash_type != RTL_DASH_NONE)
@@ -5391,7 +5396,7 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
        if (IS_ENABLED(CONFIG_R8169_LEDS) &&
            tp->mac_version > RTL_GIGA_MAC_VER_06 &&
            tp->mac_version < RTL_GIGA_MAC_VER_61)
-               rtl8168_init_leds(dev);
+               tp->leds = rtl8168_init_leds(dev);
 
        netdev_info(dev, "%s, %pM, XID %03x, IRQ %d\n",
                    rtl_chip_infos[chipset].name, dev->dev_addr, xid, tp->irq);
index e0f8276cffeddeb242217f3111e1211115e5ff98..fd59155a70e1f476ae39fbc968f7996aaecd0f50 100644 (file)
@@ -1060,8 +1060,10 @@ struct ravb_private {
        struct ravb_desc *desc_bat;
        dma_addr_t rx_desc_dma[NUM_RX_QUEUE];
        dma_addr_t tx_desc_dma[NUM_TX_QUEUE];
-       struct ravb_rx_desc *gbeth_rx_ring;
-       struct ravb_ex_rx_desc *rx_ring[NUM_RX_QUEUE];
+       union {
+               struct ravb_rx_desc *desc;
+               struct ravb_ex_rx_desc *ex_desc;
+       } rx_ring[NUM_RX_QUEUE];
        struct ravb_tx_desc *tx_ring[NUM_TX_QUEUE];
        void *tx_align[NUM_TX_QUEUE];
        struct sk_buff *rx_1st_skb;
index 1bdf0abb256cff6f381b9cfe5edf8ab13c552efc..853c2a0d4e259110a2fb34f491f099e4124eedda 100644 (file)
@@ -250,11 +250,11 @@ static void ravb_rx_ring_free_gbeth(struct net_device *ndev, int q)
        unsigned int ring_size;
        unsigned int i;
 
-       if (!priv->gbeth_rx_ring)
+       if (!priv->rx_ring[q].desc)
                return;
 
        for (i = 0; i < priv->num_rx_ring[q]; i++) {
-               struct ravb_rx_desc *desc = &priv->gbeth_rx_ring[i];
+               struct ravb_rx_desc *desc = &priv->rx_ring[q].desc[i];
 
                if (!dma_mapping_error(ndev->dev.parent,
                                       le32_to_cpu(desc->dptr)))
@@ -264,9 +264,9 @@ static void ravb_rx_ring_free_gbeth(struct net_device *ndev, int q)
                                         DMA_FROM_DEVICE);
        }
        ring_size = sizeof(struct ravb_rx_desc) * (priv->num_rx_ring[q] + 1);
-       dma_free_coherent(ndev->dev.parent, ring_size, priv->gbeth_rx_ring,
+       dma_free_coherent(ndev->dev.parent, ring_size, priv->rx_ring[q].desc,
                          priv->rx_desc_dma[q]);
-       priv->gbeth_rx_ring = NULL;
+       priv->rx_ring[q].desc = NULL;
 }
 
 static void ravb_rx_ring_free_rcar(struct net_device *ndev, int q)
@@ -275,11 +275,11 @@ static void ravb_rx_ring_free_rcar(struct net_device *ndev, int q)
        unsigned int ring_size;
        unsigned int i;
 
-       if (!priv->rx_ring[q])
+       if (!priv->rx_ring[q].ex_desc)
                return;
 
        for (i = 0; i < priv->num_rx_ring[q]; i++) {
-               struct ravb_ex_rx_desc *desc = &priv->rx_ring[q][i];
+               struct ravb_ex_rx_desc *desc = &priv->rx_ring[q].ex_desc[i];
 
                if (!dma_mapping_error(ndev->dev.parent,
                                       le32_to_cpu(desc->dptr)))
@@ -290,9 +290,9 @@ static void ravb_rx_ring_free_rcar(struct net_device *ndev, int q)
        }
        ring_size = sizeof(struct ravb_ex_rx_desc) *
                    (priv->num_rx_ring[q] + 1);
-       dma_free_coherent(ndev->dev.parent, ring_size, priv->rx_ring[q],
+       dma_free_coherent(ndev->dev.parent, ring_size, priv->rx_ring[q].ex_desc,
                          priv->rx_desc_dma[q]);
-       priv->rx_ring[q] = NULL;
+       priv->rx_ring[q].ex_desc = NULL;
 }
 
 /* Free skb's and DMA buffers for Ethernet AVB */
@@ -344,11 +344,11 @@ static void ravb_rx_ring_format_gbeth(struct net_device *ndev, int q)
        unsigned int i;
 
        rx_ring_size = sizeof(*rx_desc) * priv->num_rx_ring[q];
-       memset(priv->gbeth_rx_ring, 0, rx_ring_size);
+       memset(priv->rx_ring[q].desc, 0, rx_ring_size);
        /* Build RX ring buffer */
        for (i = 0; i < priv->num_rx_ring[q]; i++) {
                /* RX descriptor */
-               rx_desc = &priv->gbeth_rx_ring[i];
+               rx_desc = &priv->rx_ring[q].desc[i];
                rx_desc->ds_cc = cpu_to_le16(GBETH_RX_DESC_DATA_SIZE);
                dma_addr = dma_map_single(ndev->dev.parent, priv->rx_skb[q][i]->data,
                                          GBETH_RX_BUFF_MAX,
@@ -361,7 +361,7 @@ static void ravb_rx_ring_format_gbeth(struct net_device *ndev, int q)
                rx_desc->dptr = cpu_to_le32(dma_addr);
                rx_desc->die_dt = DT_FEMPTY;
        }
-       rx_desc = &priv->gbeth_rx_ring[i];
+       rx_desc = &priv->rx_ring[q].desc[i];
        rx_desc->dptr = cpu_to_le32((u32)priv->rx_desc_dma[q]);
        rx_desc->die_dt = DT_LINKFIX; /* type */
 }
@@ -374,11 +374,11 @@ static void ravb_rx_ring_format_rcar(struct net_device *ndev, int q)
        dma_addr_t dma_addr;
        unsigned int i;
 
-       memset(priv->rx_ring[q], 0, rx_ring_size);
+       memset(priv->rx_ring[q].ex_desc, 0, rx_ring_size);
        /* Build RX ring buffer */
        for (i = 0; i < priv->num_rx_ring[q]; i++) {
                /* RX descriptor */
-               rx_desc = &priv->rx_ring[q][i];
+               rx_desc = &priv->rx_ring[q].ex_desc[i];
                rx_desc->ds_cc = cpu_to_le16(RX_BUF_SZ);
                dma_addr = dma_map_single(ndev->dev.parent, priv->rx_skb[q][i]->data,
                                          RX_BUF_SZ,
@@ -391,7 +391,7 @@ static void ravb_rx_ring_format_rcar(struct net_device *ndev, int q)
                rx_desc->dptr = cpu_to_le32(dma_addr);
                rx_desc->die_dt = DT_FEMPTY;
        }
-       rx_desc = &priv->rx_ring[q][i];
+       rx_desc = &priv->rx_ring[q].ex_desc[i];
        rx_desc->dptr = cpu_to_le32((u32)priv->rx_desc_dma[q]);
        rx_desc->die_dt = DT_LINKFIX; /* type */
 }
@@ -446,10 +446,10 @@ static void *ravb_alloc_rx_desc_gbeth(struct net_device *ndev, int q)
 
        ring_size = sizeof(struct ravb_rx_desc) * (priv->num_rx_ring[q] + 1);
 
-       priv->gbeth_rx_ring = dma_alloc_coherent(ndev->dev.parent, ring_size,
-                                                &priv->rx_desc_dma[q],
-                                                GFP_KERNEL);
-       return priv->gbeth_rx_ring;
+       priv->rx_ring[q].desc = dma_alloc_coherent(ndev->dev.parent, ring_size,
+                                                  &priv->rx_desc_dma[q],
+                                                  GFP_KERNEL);
+       return priv->rx_ring[q].desc;
 }
 
 static void *ravb_alloc_rx_desc_rcar(struct net_device *ndev, int q)
@@ -459,10 +459,11 @@ static void *ravb_alloc_rx_desc_rcar(struct net_device *ndev, int q)
 
        ring_size = sizeof(struct ravb_ex_rx_desc) * (priv->num_rx_ring[q] + 1);
 
-       priv->rx_ring[q] = dma_alloc_coherent(ndev->dev.parent, ring_size,
-                                             &priv->rx_desc_dma[q],
-                                             GFP_KERNEL);
-       return priv->rx_ring[q];
+       priv->rx_ring[q].ex_desc = dma_alloc_coherent(ndev->dev.parent,
+                                                     ring_size,
+                                                     &priv->rx_desc_dma[q],
+                                                     GFP_KERNEL);
+       return priv->rx_ring[q].ex_desc;
 }
 
 /* Init skb and descriptor buffer for Ethernet AVB */
@@ -780,12 +781,15 @@ static bool ravb_rx_gbeth(struct net_device *ndev, int *quota, int q)
        int limit;
        int i;
 
-       entry = priv->cur_rx[q] % priv->num_rx_ring[q];
        limit = priv->dirty_rx[q] + priv->num_rx_ring[q] - priv->cur_rx[q];
        stats = &priv->stats[q];
 
-       desc = &priv->gbeth_rx_ring[entry];
-       for (i = 0; i < limit && rx_packets < *quota && desc->die_dt != DT_FEMPTY; i++) {
+       for (i = 0; i < limit; i++, priv->cur_rx[q]++) {
+               entry = priv->cur_rx[q] % priv->num_rx_ring[q];
+               desc = &priv->rx_ring[q].desc[entry];
+               if (rx_packets == *quota || desc->die_dt == DT_FEMPTY)
+                       break;
+
                /* Descriptor type must be checked before all other reads */
                dma_rmb();
                desc_status = desc->msc;
@@ -849,15 +853,12 @@ static bool ravb_rx_gbeth(struct net_device *ndev, int *quota, int q)
                                break;
                        }
                }
-
-               entry = (++priv->cur_rx[q]) % priv->num_rx_ring[q];
-               desc = &priv->gbeth_rx_ring[entry];
        }
 
        /* Refill the RX ring buffers. */
        for (; priv->cur_rx[q] - priv->dirty_rx[q] > 0; priv->dirty_rx[q]++) {
                entry = priv->dirty_rx[q] % priv->num_rx_ring[q];
-               desc = &priv->gbeth_rx_ring[entry];
+               desc = &priv->rx_ring[q].desc[entry];
                desc->ds_cc = cpu_to_le16(GBETH_RX_DESC_DATA_SIZE);
 
                if (!priv->rx_skb[q][entry]) {
@@ -893,30 +894,29 @@ static bool ravb_rx_rcar(struct net_device *ndev, int *quota, int q)
 {
        struct ravb_private *priv = netdev_priv(ndev);
        const struct ravb_hw_info *info = priv->info;
-       int entry = priv->cur_rx[q] % priv->num_rx_ring[q];
-       int boguscnt = (priv->dirty_rx[q] + priv->num_rx_ring[q]) -
-                       priv->cur_rx[q];
        struct net_device_stats *stats = &priv->stats[q];
        struct ravb_ex_rx_desc *desc;
+       unsigned int limit, i;
        struct sk_buff *skb;
        dma_addr_t dma_addr;
        struct timespec64 ts;
+       int rx_packets = 0;
        u8  desc_status;
        u16 pkt_len;
-       int limit;
+       int entry;
+
+       limit = priv->dirty_rx[q] + priv->num_rx_ring[q] - priv->cur_rx[q];
+       for (i = 0; i < limit; i++, priv->cur_rx[q]++) {
+               entry = priv->cur_rx[q] % priv->num_rx_ring[q];
+               desc = &priv->rx_ring[q].ex_desc[entry];
+               if (rx_packets == *quota || desc->die_dt == DT_FEMPTY)
+                       break;
 
-       boguscnt = min(boguscnt, *quota);
-       limit = boguscnt;
-       desc = &priv->rx_ring[q][entry];
-       while (desc->die_dt != DT_FEMPTY) {
                /* Descriptor type must be checked before all other reads */
                dma_rmb();
                desc_status = desc->msc;
                pkt_len = le16_to_cpu(desc->ds_cc) & RX_DS;
 
-               if (--boguscnt < 0)
-                       break;
-
                /* We use 0-byte descriptors to mark the DMA mapping errors */
                if (!pkt_len)
                        continue;
@@ -962,18 +962,15 @@ static bool ravb_rx_rcar(struct net_device *ndev, int *quota, int q)
                        if (ndev->features & NETIF_F_RXCSUM)
                                ravb_rx_csum(skb);
                        napi_gro_receive(&priv->napi[q], skb);
-                       stats->rx_packets++;
+                       rx_packets++;
                        stats->rx_bytes += pkt_len;
                }
-
-               entry = (++priv->cur_rx[q]) % priv->num_rx_ring[q];
-               desc = &priv->rx_ring[q][entry];
        }
 
        /* Refill the RX ring buffers. */
        for (; priv->cur_rx[q] - priv->dirty_rx[q] > 0; priv->dirty_rx[q]++) {
                entry = priv->dirty_rx[q] % priv->num_rx_ring[q];
-               desc = &priv->rx_ring[q][entry];
+               desc = &priv->rx_ring[q].ex_desc[entry];
                desc->ds_cc = cpu_to_le16(RX_BUF_SZ);
 
                if (!priv->rx_skb[q][entry]) {
@@ -998,9 +995,9 @@ static bool ravb_rx_rcar(struct net_device *ndev, int *quota, int q)
                desc->die_dt = DT_FEMPTY;
        }
 
-       *quota -= limit - (++boguscnt);
-
-       return boguscnt <= 0;
+       stats->rx_packets += rx_packets;
+       *quota -= rx_packets;
+       return *quota == 0;
 }
 
 /* Packet receive function for Ethernet AVB */
index 5ba606a596e779bc17081c55e5bf5c52555ada0b..5a1d46dcd5de0d92be9711b9dbd2142af58751d1 100644 (file)
@@ -550,6 +550,7 @@ extern const struct stmmac_hwtimestamp stmmac_ptp;
 extern const struct stmmac_mode_ops dwmac4_ring_mode_ops;
 
 struct mac_link {
+       u32 caps;
        u32 speed_mask;
        u32 speed10;
        u32 speed100;
index b21d99faa2d04c985427af61724dd073e3a2fe79..e1b761dcfa1dd56f2e5218312933eb1ea6bc06b1 100644 (file)
@@ -1096,6 +1096,8 @@ static struct mac_device_info *sun8i_dwmac_setup(void *ppriv)
 
        priv->dev->priv_flags |= IFF_UNICAST_FLT;
 
+       mac->link.caps = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
+                        MAC_10 | MAC_100 | MAC_1000;
        /* The loopback bit seems to be re-set when link change
         * Simply mask it each time
         * Speed 10/100/1000 are set in BIT(2)/BIT(3)
index 3927609abc44110be97903aee12e25084473b80c..8555299443f4edf2475b95c1785544a1c3b73251 100644 (file)
@@ -539,6 +539,8 @@ int dwmac1000_setup(struct stmmac_priv *priv)
        if (mac->multicast_filter_bins)
                mac->mcast_bits_log2 = ilog2(mac->multicast_filter_bins);
 
+       mac->link.caps = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
+                        MAC_10 | MAC_100 | MAC_1000;
        mac->link.duplex = GMAC_CONTROL_DM;
        mac->link.speed10 = GMAC_CONTROL_PS;
        mac->link.speed100 = GMAC_CONTROL_PS | GMAC_CONTROL_FES;
index a6e8d7bd95886fc277c7e22c896ddf618e0fca97..7667d103cd0ebd9670a42360a095cfd322c8ebac 100644 (file)
@@ -175,6 +175,8 @@ int dwmac100_setup(struct stmmac_priv *priv)
        dev_info(priv->device, "\tDWMAC100\n");
 
        mac->pcsr = priv->ioaddr;
+       mac->link.caps = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
+                        MAC_10 | MAC_100;
        mac->link.duplex = MAC_CONTROL_F;
        mac->link.speed10 = 0;
        mac->link.speed100 = 0;
index cef25efbdff99fdc07a313ab678869e83c85f79e..a38226d7cc6a99e45c39f62c81c56d8dc87a921a 100644 (file)
@@ -70,7 +70,10 @@ static void dwmac4_core_init(struct mac_device_info *hw,
 
 static void dwmac4_phylink_get_caps(struct stmmac_priv *priv)
 {
-       priv->phylink_config.mac_capabilities |= MAC_2500FD;
+       if (priv->plat->tx_queues_to_use > 1)
+               priv->hw->link.caps &= ~(MAC_10HD | MAC_100HD | MAC_1000HD);
+       else
+               priv->hw->link.caps |= (MAC_10HD | MAC_100HD | MAC_1000HD);
 }
 
 static void dwmac4_rx_queue_enable(struct mac_device_info *hw,
@@ -1378,6 +1381,8 @@ int dwmac4_setup(struct stmmac_priv *priv)
        if (mac->multicast_filter_bins)
                mac->mcast_bits_log2 = ilog2(mac->multicast_filter_bins);
 
+       mac->link.caps = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
+                        MAC_10 | MAC_100 | MAC_1000 | MAC_2500FD;
        mac->link.duplex = GMAC_CONFIG_DM;
        mac->link.speed10 = GMAC_CONFIG_PS;
        mac->link.speed100 = GMAC_CONFIG_FES | GMAC_CONFIG_PS;
index e841e312077ef0604c5b17a5473069cc4affadff..f8e7775bb63364c589da99cb4c954a38f4411567 100644 (file)
@@ -47,14 +47,6 @@ static void dwxgmac2_core_init(struct mac_device_info *hw,
        writel(XGMAC_INT_DEFAULT_EN, ioaddr + XGMAC_INT_EN);
 }
 
-static void xgmac_phylink_get_caps(struct stmmac_priv *priv)
-{
-       priv->phylink_config.mac_capabilities |= MAC_2500FD | MAC_5000FD |
-                                                MAC_10000FD | MAC_25000FD |
-                                                MAC_40000FD | MAC_50000FD |
-                                                MAC_100000FD;
-}
-
 static void dwxgmac2_set_mac(void __iomem *ioaddr, bool enable)
 {
        u32 tx = readl(ioaddr + XGMAC_TX_CONFIG);
@@ -1540,7 +1532,6 @@ static void dwxgmac3_fpe_configure(void __iomem *ioaddr, struct stmmac_fpe_cfg *
 
 const struct stmmac_ops dwxgmac210_ops = {
        .core_init = dwxgmac2_core_init,
-       .phylink_get_caps = xgmac_phylink_get_caps,
        .set_mac = dwxgmac2_set_mac,
        .rx_ipc = dwxgmac2_rx_ipc,
        .rx_queue_enable = dwxgmac2_rx_queue_enable,
@@ -1601,7 +1592,6 @@ static void dwxlgmac2_rx_queue_enable(struct mac_device_info *hw, u8 mode,
 
 const struct stmmac_ops dwxlgmac2_ops = {
        .core_init = dwxgmac2_core_init,
-       .phylink_get_caps = xgmac_phylink_get_caps,
        .set_mac = dwxgmac2_set_mac,
        .rx_ipc = dwxgmac2_rx_ipc,
        .rx_queue_enable = dwxlgmac2_rx_queue_enable,
@@ -1661,6 +1651,9 @@ int dwxgmac2_setup(struct stmmac_priv *priv)
        if (mac->multicast_filter_bins)
                mac->mcast_bits_log2 = ilog2(mac->multicast_filter_bins);
 
+       mac->link.caps = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
+                        MAC_1000FD | MAC_2500FD | MAC_5000FD |
+                        MAC_10000FD;
        mac->link.duplex = 0;
        mac->link.speed10 = XGMAC_CONFIG_SS_10_MII;
        mac->link.speed100 = XGMAC_CONFIG_SS_100_MII;
@@ -1698,6 +1691,11 @@ int dwxlgmac2_setup(struct stmmac_priv *priv)
        if (mac->multicast_filter_bins)
                mac->mcast_bits_log2 = ilog2(mac->multicast_filter_bins);
 
+       mac->link.caps = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
+                        MAC_1000FD | MAC_2500FD | MAC_5000FD |
+                        MAC_10000FD | MAC_25000FD |
+                        MAC_40000FD | MAC_50000FD |
+                        MAC_100000FD;
        mac->link.duplex = 0;
        mac->link.speed1000 = XLGMAC_CONFIG_SS_1000;
        mac->link.speed2500 = XLGMAC_CONFIG_SS_2500;
index 7c6aef033a456455e4334466bf276755f33dbd47..83b732c30c1bbe04436b3f2f25eb2f1089fd4e0f 100644 (file)
@@ -1198,17 +1198,6 @@ static int stmmac_init_phy(struct net_device *dev)
        return ret;
 }
 
-static void stmmac_set_half_duplex(struct stmmac_priv *priv)
-{
-       /* Half-Duplex can only work with single tx queue */
-       if (priv->plat->tx_queues_to_use > 1)
-               priv->phylink_config.mac_capabilities &=
-                       ~(MAC_10HD | MAC_100HD | MAC_1000HD);
-       else
-               priv->phylink_config.mac_capabilities |=
-                       (MAC_10HD | MAC_100HD | MAC_1000HD);
-}
-
 static int stmmac_phy_setup(struct stmmac_priv *priv)
 {
        struct stmmac_mdio_bus_data *mdio_bus_data;
@@ -1236,15 +1225,11 @@ static int stmmac_phy_setup(struct stmmac_priv *priv)
                xpcs_get_interfaces(priv->hw->xpcs,
                                    priv->phylink_config.supported_interfaces);
 
-       priv->phylink_config.mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
-                                               MAC_10FD | MAC_100FD |
-                                               MAC_1000FD;
-
-       stmmac_set_half_duplex(priv);
-
        /* Get the MAC specific capabilities */
        stmmac_mac_phylink_get_caps(priv);
 
+       priv->phylink_config.mac_capabilities = priv->hw->link.caps;
+
        max_speed = priv->plat->max_speed;
        if (max_speed)
                phylink_limit_mac_speed(&priv->phylink_config, max_speed);
@@ -7286,6 +7271,7 @@ int stmmac_reinit_queues(struct net_device *dev, u32 rx_cnt, u32 tx_cnt)
 {
        struct stmmac_priv *priv = netdev_priv(dev);
        int ret = 0, i;
+       int max_speed;
 
        if (netif_running(dev))
                stmmac_release(dev);
@@ -7299,7 +7285,14 @@ int stmmac_reinit_queues(struct net_device *dev, u32 rx_cnt, u32 tx_cnt)
                        priv->rss.table[i] = ethtool_rxfh_indir_default(i,
                                                                        rx_cnt);
 
-       stmmac_set_half_duplex(priv);
+       stmmac_mac_phylink_get_caps(priv);
+
+       priv->phylink_config.mac_capabilities = priv->hw->link.caps;
+
+       max_speed = priv->plat->max_speed;
+       if (max_speed)
+               phylink_limit_mac_speed(&priv->phylink_config, max_speed);
+
        stmmac_napi_add(dev);
 
        if (netif_running(dev))
index 2939a21ca74f3cf0f627981df74a949e9c61011e..1d00e21808c1c36dde2fcd4e6a864ca1ecf72a0b 100644 (file)
@@ -2793,6 +2793,8 @@ static void am65_cpsw_unregister_devlink(struct am65_cpsw_common *common)
 
 static int am65_cpsw_nuss_register_ndevs(struct am65_cpsw_common *common)
 {
+       struct am65_cpsw_rx_chn *rx_chan = &common->rx_chns;
+       struct am65_cpsw_tx_chn *tx_chan = common->tx_chns;
        struct device *dev = common->dev;
        struct am65_cpsw_port *port;
        int ret = 0, i;
@@ -2805,6 +2807,22 @@ static int am65_cpsw_nuss_register_ndevs(struct am65_cpsw_common *common)
        if (ret)
                return ret;
 
+       /* The DMA Channels are not guaranteed to be in a clean state.
+        * Reset and disable them to ensure that they are back to the
+        * clean state and ready to be used.
+        */
+       for (i = 0; i < common->tx_ch_num; i++) {
+               k3_udma_glue_reset_tx_chn(tx_chan[i].tx_chn, &tx_chan[i],
+                                         am65_cpsw_nuss_tx_cleanup);
+               k3_udma_glue_disable_tx_chn(tx_chan[i].tx_chn);
+       }
+
+       for (i = 0; i < AM65_CPSW_MAX_RX_FLOWS; i++)
+               k3_udma_glue_reset_rx_chn(rx_chan->rx_chn, i, rx_chan,
+                                         am65_cpsw_nuss_rx_cleanup, !!i);
+
+       k3_udma_glue_disable_rx_chn(rx_chan->rx_chn);
+
        ret = am65_cpsw_nuss_register_devlink(common);
        if (ret)
                return ret;
index 8f95a562b8d0c471c44591629e04809f7faef9b2..86515f0c2b6c1dd4de66b362e31aa59494bfb554 100644 (file)
@@ -2132,14 +2132,16 @@ static ssize_t tun_put_user(struct tun_struct *tun,
                                            tun_is_little_endian(tun), true,
                                            vlan_hlen)) {
                        struct skb_shared_info *sinfo = skb_shinfo(skb);
-                       pr_err("unexpected GSO type: "
-                              "0x%x, gso_size %d, hdr_len %d\n",
-                              sinfo->gso_type, tun16_to_cpu(tun, gso.gso_size),
-                              tun16_to_cpu(tun, gso.hdr_len));
-                       print_hex_dump(KERN_ERR, "tun: ",
-                                      DUMP_PREFIX_NONE,
-                                      16, 1, skb->head,
-                                      min((int)tun16_to_cpu(tun, gso.hdr_len), 64), true);
+
+                       if (net_ratelimit()) {
+                               netdev_err(tun->dev, "unexpected GSO type: 0x%x, gso_size %d, hdr_len %d\n",
+                                          sinfo->gso_type, tun16_to_cpu(tun, gso.gso_size),
+                                          tun16_to_cpu(tun, gso.hdr_len));
+                               print_hex_dump(KERN_ERR, "tun: ",
+                                              DUMP_PREFIX_NONE,
+                                              16, 1, skb->head,
+                                              min((int)tun16_to_cpu(tun, gso.hdr_len), 64), true);
+                       }
                        WARN_ON_ONCE(1);
                        return -EINVAL;
                }
index e0e9b4c53cb025b369c05bf77ec932286a18cb4c..3078511f76083340e5d3762e45b7a8c4187058be 100644 (file)
@@ -1317,6 +1317,8 @@ static int ax88179_bind(struct usbnet *dev, struct usb_interface *intf)
 
        netif_set_tso_max_size(dev->net, 16384);
 
+       ax88179_reset(dev);
+
        return 0;
 }
 
@@ -1695,7 +1697,6 @@ static const struct driver_info ax88179_info = {
        .unbind = ax88179_unbind,
        .status = ax88179_status,
        .link_reset = ax88179_link_reset,
-       .reset = ax88179_reset,
        .stop = ax88179_stop,
        .flags = FLAG_ETHER | FLAG_FRAMING_AX,
        .rx_fixup = ax88179_rx_fixup,
@@ -1708,7 +1709,6 @@ static const struct driver_info ax88178a_info = {
        .unbind = ax88179_unbind,
        .status = ax88179_status,
        .link_reset = ax88179_link_reset,
-       .reset = ax88179_reset,
        .stop = ax88179_stop,
        .flags = FLAG_ETHER | FLAG_FRAMING_AX,
        .rx_fixup = ax88179_rx_fixup,
index b456370166b6bb2158ca0916e0eb9e106f9fd9d7..b4f49720c87f62aa6e8349af12797382f740c2b7 100644 (file)
@@ -208,6 +208,15 @@ static const struct dmi_system_id fwbug_list[] = {
                        DMI_MATCH(DMI_BIOS_VERSION, "03.03"),
                }
        },
+       {
+               .ident = "Framework Laptop 13 (Phoenix)",
+               .driver_data = &quirk_spurious_8042,
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "Framework"),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "Laptop 13 (AMD Ryzen 7040Series)"),
+                       DMI_MATCH(DMI_BIOS_VERSION, "03.05"),
+               }
+       },
        {}
 };
 
index 0cfb179e1bcb61d59be433b7a0e47c7bf7cabd9d..34b2567b8df497391a2970ba65a7c4c846b635f8 100644 (file)
@@ -363,10 +363,8 @@ int ccw_device_set_online(struct ccw_device *cdev)
 
        spin_lock_irq(cdev->ccwlock);
        ret = ccw_device_online(cdev);
-       spin_unlock_irq(cdev->ccwlock);
-       if (ret == 0)
-               wait_event(cdev->private->wait_q, dev_fsm_final_state(cdev));
-       else {
+       if (ret) {
+               spin_unlock_irq(cdev->ccwlock);
                CIO_MSG_EVENT(0, "ccw_device_online returned %d, "
                              "device 0.%x.%04x\n",
                              ret, cdev->private->dev_id.ssid,
@@ -375,7 +373,12 @@ int ccw_device_set_online(struct ccw_device *cdev)
                put_device(&cdev->dev);
                return ret;
        }
-       spin_lock_irq(cdev->ccwlock);
+       /* Wait until a final state is reached */
+       while (!dev_fsm_final_state(cdev)) {
+               spin_unlock_irq(cdev->ccwlock);
+               wait_event(cdev->private->wait_q, dev_fsm_final_state(cdev));
+               spin_lock_irq(cdev->ccwlock);
+       }
        /* Check if online processing was successful */
        if ((cdev->private->state != DEV_STATE_ONLINE) &&
            (cdev->private->state != DEV_STATE_W4SENSE)) {
index 9cde55730b65a733b5d1cb9977c83750392e6312..ebcb535809882f464a5eed2015e4dc14940b7beb 100644 (file)
@@ -722,8 +722,8 @@ static void qdio_handle_activate_check(struct qdio_irq *irq_ptr,
        lgr_info_log();
 }
 
-static void qdio_establish_handle_irq(struct qdio_irq *irq_ptr, int cstat,
-                                     int dstat)
+static int qdio_establish_handle_irq(struct qdio_irq *irq_ptr, int cstat,
+                                    int dstat, int dcc)
 {
        DBF_DEV_EVENT(DBF_INFO, irq_ptr, "qest irq");
 
@@ -731,15 +731,18 @@ static void qdio_establish_handle_irq(struct qdio_irq *irq_ptr, int cstat,
                goto error;
        if (dstat & ~(DEV_STAT_DEV_END | DEV_STAT_CHN_END))
                goto error;
+       if (dcc == 1)
+               return -EAGAIN;
        if (!(dstat & DEV_STAT_DEV_END))
                goto error;
        qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ESTABLISHED);
-       return;
+       return 0;
 
 error:
        DBF_ERROR("%4x EQ:error", irq_ptr->schid.sch_no);
        DBF_ERROR("ds: %2x cs:%2x", dstat, cstat);
        qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR);
+       return -EIO;
 }
 
 /* qdio interrupt handler */
@@ -748,7 +751,7 @@ void qdio_int_handler(struct ccw_device *cdev, unsigned long intparm,
 {
        struct qdio_irq *irq_ptr = cdev->private->qdio_data;
        struct subchannel_id schid;
-       int cstat, dstat;
+       int cstat, dstat, rc, dcc;
 
        if (!intparm || !irq_ptr) {
                ccw_device_get_schid(cdev, &schid);
@@ -768,10 +771,12 @@ void qdio_int_handler(struct ccw_device *cdev, unsigned long intparm,
        qdio_irq_check_sense(irq_ptr, irb);
        cstat = irb->scsw.cmd.cstat;
        dstat = irb->scsw.cmd.dstat;
+       dcc   = scsw_cmd_is_valid_cc(&irb->scsw) ? irb->scsw.cmd.cc : 0;
+       rc    = 0;
 
        switch (irq_ptr->state) {
        case QDIO_IRQ_STATE_INACTIVE:
-               qdio_establish_handle_irq(irq_ptr, cstat, dstat);
+               rc = qdio_establish_handle_irq(irq_ptr, cstat, dstat, dcc);
                break;
        case QDIO_IRQ_STATE_CLEANUP:
                qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE);
@@ -785,12 +790,25 @@ void qdio_int_handler(struct ccw_device *cdev, unsigned long intparm,
                if (cstat || dstat)
                        qdio_handle_activate_check(irq_ptr, intparm, cstat,
                                                   dstat);
+               else if (dcc == 1)
+                       rc = -EAGAIN;
                break;
        case QDIO_IRQ_STATE_STOPPED:
                break;
        default:
                WARN_ON_ONCE(1);
        }
+
+       if (rc == -EAGAIN) {
+               DBF_DEV_EVENT(DBF_INFO, irq_ptr, "qint retry");
+               rc = ccw_device_start(cdev, irq_ptr->ccw, intparm, 0, 0);
+               if (!rc)
+                       return;
+               DBF_ERROR("%4x RETRY ERR", irq_ptr->schid.sch_no);
+               DBF_ERROR("rc:%4x", rc);
+               qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR);
+       }
+
        wake_up(&cdev->private->wait_q);
 }
 
index 2c8e964425dc38ca80fa5009b17b4e9dc29bbf10..43778b088ffac54c4a8911f3e41e187f0ae3f364 100644 (file)
@@ -292,13 +292,16 @@ static int ism_read_local_gid(struct ism_dev *ism)
 static void ism_free_dmb(struct ism_dev *ism, struct ism_dmb *dmb)
 {
        clear_bit(dmb->sba_idx, ism->sba_bitmap);
-       dma_free_coherent(&ism->pdev->dev, dmb->dmb_len,
-                         dmb->cpu_addr, dmb->dma_addr);
+       dma_unmap_page(&ism->pdev->dev, dmb->dma_addr, dmb->dmb_len,
+                      DMA_FROM_DEVICE);
+       folio_put(virt_to_folio(dmb->cpu_addr));
 }
 
 static int ism_alloc_dmb(struct ism_dev *ism, struct ism_dmb *dmb)
 {
+       struct folio *folio;
        unsigned long bit;
+       int rc;
 
        if (PAGE_ALIGN(dmb->dmb_len) > dma_get_max_seg_size(&ism->pdev->dev))
                return -EINVAL;
@@ -315,14 +318,30 @@ static int ism_alloc_dmb(struct ism_dev *ism, struct ism_dmb *dmb)
            test_and_set_bit(dmb->sba_idx, ism->sba_bitmap))
                return -EINVAL;
 
-       dmb->cpu_addr = dma_alloc_coherent(&ism->pdev->dev, dmb->dmb_len,
-                                          &dmb->dma_addr,
-                                          GFP_KERNEL | __GFP_NOWARN |
-                                          __GFP_NOMEMALLOC | __GFP_NORETRY);
-       if (!dmb->cpu_addr)
-               clear_bit(dmb->sba_idx, ism->sba_bitmap);
+       folio = folio_alloc(GFP_KERNEL | __GFP_NOWARN | __GFP_NOMEMALLOC |
+                           __GFP_NORETRY, get_order(dmb->dmb_len));
 
-       return dmb->cpu_addr ? 0 : -ENOMEM;
+       if (!folio) {
+               rc = -ENOMEM;
+               goto out_bit;
+       }
+
+       dmb->cpu_addr = folio_address(folio);
+       dmb->dma_addr = dma_map_page(&ism->pdev->dev,
+                                    virt_to_page(dmb->cpu_addr), 0,
+                                    dmb->dmb_len, DMA_FROM_DEVICE);
+       if (dma_mapping_error(&ism->pdev->dev, dmb->dma_addr)) {
+               rc = -ENOMEM;
+               goto out_free;
+       }
+
+       return 0;
+
+out_free:
+       kfree(dmb->cpu_addr);
+out_bit:
+       clear_bit(dmb->sba_idx, ism->sba_bitmap);
+       return rc;
 }
 
 int ism_register_dmb(struct ism_dev *ism, struct ism_dmb *dmb,
index df5ac03d5d6c2eb5233ad7fcfdad37a1e487b4e6..189dfeb3782027edbc1a216abf32b1f0b4c8b179 100644 (file)
@@ -543,10 +543,9 @@ static bool scsi_end_request(struct request *req, blk_status_t error,
        if (blk_queue_add_random(q))
                add_disk_randomness(req->q->disk);
 
-       if (!blk_rq_is_passthrough(req)) {
-               WARN_ON_ONCE(!(cmd->flags & SCMD_INITIALIZED));
-               cmd->flags &= ~SCMD_INITIALIZED;
-       }
+       WARN_ON_ONCE(!blk_rq_is_passthrough(req) &&
+                    !(cmd->flags & SCMD_INITIALIZED));
+       cmd->flags = 0;
 
        /*
         * Calling rcu_barrier() is not necessary here because the
index c617e8b9f0ddfe18bcb34155e156af47e4006837..d78d54ae2605e8ab3050dd7a1e68fb13688a78c5 100644 (file)
@@ -616,6 +616,7 @@ void thermal_debug_tz_trip_up(struct thermal_zone_device *tz,
        tze->trip_stats[trip_id].timestamp = now;
        tze->trip_stats[trip_id].max = max(tze->trip_stats[trip_id].max, temperature);
        tze->trip_stats[trip_id].min = min(tze->trip_stats[trip_id].min, temperature);
+       tze->trip_stats[trip_id].count++;
        tze->trip_stats[trip_id].avg = tze->trip_stats[trip_id].avg +
                (temperature - tze->trip_stats[trip_id].avg) /
                tze->trip_stats[trip_id].count;
index 9fb1a64f3300b8fcf7ebc1f7dafa6a68778da122..df0d845e069aca41643e850b84673b7a73330396 100644 (file)
@@ -423,6 +423,7 @@ struct tb *tb_domain_alloc(struct tb_nhi *nhi, int timeout_msec, size_t privsize
 /**
  * tb_domain_add() - Add domain to the system
  * @tb: Domain to add
+ * @reset: Issue reset to the host router
  *
  * Starts the domain and adds it to the system. Hotplugging devices will
  * work after this has been returned successfully. In order to remove
@@ -431,7 +432,7 @@ struct tb *tb_domain_alloc(struct tb_nhi *nhi, int timeout_msec, size_t privsize
  *
  * Return: %0 in case of success and negative errno in case of error
  */
-int tb_domain_add(struct tb *tb)
+int tb_domain_add(struct tb *tb, bool reset)
 {
        int ret;
 
@@ -460,7 +461,7 @@ int tb_domain_add(struct tb *tb)
 
        /* Start the domain */
        if (tb->cm_ops->start) {
-               ret = tb->cm_ops->start(tb);
+               ret = tb->cm_ops->start(tb, reset);
                if (ret)
                        goto err_domain_del;
        }
index 56790d50f9e3296bd3607162f6b2030fe28dc77a..baf10d099c7780ee9c83846f391ab1cf796f6f84 100644 (file)
@@ -2144,7 +2144,7 @@ static int icm_runtime_resume(struct tb *tb)
        return 0;
 }
 
-static int icm_start(struct tb *tb)
+static int icm_start(struct tb *tb, bool not_used)
 {
        struct icm *icm = tb_priv(tb);
        int ret;
index 633970fbe9b05904ca4f46876a3561265ed83318..63cb4b6afb718aca9689a01695d9625d83d29d03 100644 (file)
@@ -6,6 +6,8 @@
  * Author: Mika Westerberg <mika.westerberg@linux.intel.com>
  */
 
+#include <linux/delay.h>
+
 #include "tb.h"
 
 /**
@@ -45,6 +47,49 @@ static int find_port_lc_cap(struct tb_port *port)
        return sw->cap_lc + start + phys * size;
 }
 
+/**
+ * tb_lc_reset_port() - Trigger downstream port reset through LC
+ * @port: Port that is reset
+ *
+ * Triggers downstream port reset through link controller registers.
+ * Returns %0 in case of success negative errno otherwise. Only supports
+ * non-USB4 routers with link controller (that's Thunderbolt 2 and
+ * Thunderbolt 3).
+ */
+int tb_lc_reset_port(struct tb_port *port)
+{
+       struct tb_switch *sw = port->sw;
+       int cap, ret;
+       u32 mode;
+
+       if (sw->generation < 2)
+               return -EINVAL;
+
+       cap = find_port_lc_cap(port);
+       if (cap < 0)
+               return cap;
+
+       ret = tb_sw_read(sw, &mode, TB_CFG_SWITCH, cap + TB_LC_PORT_MODE, 1);
+       if (ret)
+               return ret;
+
+       mode |= TB_LC_PORT_MODE_DPR;
+
+       ret = tb_sw_write(sw, &mode, TB_CFG_SWITCH, cap + TB_LC_PORT_MODE, 1);
+       if (ret)
+               return ret;
+
+       fsleep(10000);
+
+       ret = tb_sw_read(sw, &mode, TB_CFG_SWITCH, cap + TB_LC_PORT_MODE, 1);
+       if (ret)
+               return ret;
+
+       mode &= ~TB_LC_PORT_MODE_DPR;
+
+       return tb_sw_write(sw, &mode, TB_CFG_SWITCH, cap + TB_LC_PORT_MODE, 1);
+}
+
 static int tb_lc_set_port_configured(struct tb_port *port, bool configured)
 {
        bool upstream = tb_is_upstream_port(port);
index fb4f46e51753ab8f3952c7093489571d6a40a1b7..b22023fae60de43d8e456f24a9bf94770461a97d 100644 (file)
@@ -1221,7 +1221,7 @@ static void nhi_check_iommu(struct tb_nhi *nhi)
                str_enabled_disabled(port_ok));
 }
 
-static void nhi_reset(struct tb_nhi *nhi)
+static bool nhi_reset(struct tb_nhi *nhi)
 {
        ktime_t timeout;
        u32 val;
@@ -1229,11 +1229,11 @@ static void nhi_reset(struct tb_nhi *nhi)
        val = ioread32(nhi->iobase + REG_CAPS);
        /* Reset only v2 and later routers */
        if (FIELD_GET(REG_CAPS_VERSION_MASK, val) < REG_CAPS_VERSION_2)
-               return;
+               return false;
 
        if (!host_reset) {
                dev_dbg(&nhi->pdev->dev, "skipping host router reset\n");
-               return;
+               return false;
        }
 
        iowrite32(REG_RESET_HRR, nhi->iobase + REG_RESET);
@@ -1244,12 +1244,14 @@ static void nhi_reset(struct tb_nhi *nhi)
                val = ioread32(nhi->iobase + REG_RESET);
                if (!(val & REG_RESET_HRR)) {
                        dev_warn(&nhi->pdev->dev, "host router reset successful\n");
-                       return;
+                       return true;
                }
                usleep_range(10, 20);
        } while (ktime_before(ktime_get(), timeout));
 
        dev_warn(&nhi->pdev->dev, "timeout resetting host router\n");
+
+       return false;
 }
 
 static int nhi_init_msi(struct tb_nhi *nhi)
@@ -1331,6 +1333,7 @@ static int nhi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
        struct device *dev = &pdev->dev;
        struct tb_nhi *nhi;
        struct tb *tb;
+       bool reset;
        int res;
 
        if (!nhi_imr_valid(pdev))
@@ -1365,7 +1368,11 @@ static int nhi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
        nhi_check_quirks(nhi);
        nhi_check_iommu(nhi);
 
-       nhi_reset(nhi);
+       /*
+        * Only USB4 v2 hosts support host reset so if we already did
+        * that then don't do it again when the domain is initialized.
+        */
+       reset = nhi_reset(nhi) ? false : host_reset;
 
        res = nhi_init_msi(nhi);
        if (res)
@@ -1392,7 +1399,7 @@ static int nhi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
 
        dev_dbg(dev, "NHI initialized, starting thunderbolt\n");
 
-       res = tb_domain_add(tb);
+       res = tb_domain_add(tb, reset);
        if (res) {
                /*
                 * At this point the RX/TX rings might already have been
index 091a81bbdbdc94623b1fda9a059c1b6396595c15..f760e54cd9bd1f9cc8caf8ad43990e1d32996437 100644 (file)
@@ -446,6 +446,19 @@ static int __tb_path_deactivate_hop(struct tb_port *port, int hop_index,
        return -ETIMEDOUT;
 }
 
+/**
+ * tb_path_deactivate_hop() - Deactivate one path in path config space
+ * @port: Lane or protocol adapter
+ * @hop_index: HopID of the path to be cleared
+ *
+ * This deactivates or clears a single path config space entry at
+ * @hop_index. Returns %0 in success and negative errno otherwise.
+ */
+int tb_path_deactivate_hop(struct tb_port *port, int hop_index)
+{
+       return __tb_path_deactivate_hop(port, hop_index, true);
+}
+
 static void __tb_path_deactivate_hops(struct tb_path *path, int first_hop)
 {
        int i, res;
index fad40c4bc710341f27b7f98f6d4317aef6627ae0..7b086923cec6b2591eb5e9998f5986cf01df07ca 100644 (file)
@@ -676,6 +676,13 @@ int tb_port_disable(struct tb_port *port)
        return __tb_port_enable(port, false);
 }
 
+static int tb_port_reset(struct tb_port *port)
+{
+       if (tb_switch_is_usb4(port->sw))
+               return port->cap_usb4 ? usb4_port_reset(port) : 0;
+       return tb_lc_reset_port(port);
+}
+
 /*
  * tb_init_port() - initialize a port
  *
@@ -1534,29 +1541,124 @@ static void tb_dump_switch(const struct tb *tb, const struct tb_switch *sw)
               regs->__unknown1, regs->__unknown4);
 }
 
+static int tb_switch_reset_host(struct tb_switch *sw)
+{
+       if (sw->generation > 1) {
+               struct tb_port *port;
+
+               tb_switch_for_each_port(sw, port) {
+                       int i, ret;
+
+                       /*
+                        * For lane adapters we issue downstream port
+                        * reset and clear up path config spaces.
+                        *
+                        * For protocol adapters we disable the path and
+                        * clear path config space one by one (from 8 to
+                        * Max Input HopID of the adapter).
+                        */
+                       if (tb_port_is_null(port) && !tb_is_upstream_port(port)) {
+                               ret = tb_port_reset(port);
+                               if (ret)
+                                       return ret;
+                       } else if (tb_port_is_usb3_down(port) ||
+                                  tb_port_is_usb3_up(port)) {
+                               tb_usb3_port_enable(port, false);
+                       } else if (tb_port_is_dpin(port) ||
+                                  tb_port_is_dpout(port)) {
+                               tb_dp_port_enable(port, false);
+                       } else if (tb_port_is_pcie_down(port) ||
+                                  tb_port_is_pcie_up(port)) {
+                               tb_pci_port_enable(port, false);
+                       } else {
+                               continue;
+                       }
+
+                       /* Cleanup path config space of protocol adapter */
+                       for (i = TB_PATH_MIN_HOPID;
+                            i <= port->config.max_in_hop_id; i++) {
+                               ret = tb_path_deactivate_hop(port, i);
+                               if (ret)
+                                       return ret;
+                       }
+               }
+       } else {
+               struct tb_cfg_result res;
+
+               /* Thunderbolt 1 uses the "reset" config space packet */
+               res.err = tb_sw_write(sw, ((u32 *) &sw->config) + 2,
+                                     TB_CFG_SWITCH, 2, 2);
+               if (res.err)
+                       return res.err;
+               res = tb_cfg_reset(sw->tb->ctl, tb_route(sw));
+               if (res.err > 0)
+                       return -EIO;
+               else if (res.err < 0)
+                       return res.err;
+       }
+
+       return 0;
+}
+
+static int tb_switch_reset_device(struct tb_switch *sw)
+{
+       return tb_port_reset(tb_switch_downstream_port(sw));
+}
+
+static bool tb_switch_enumerated(struct tb_switch *sw)
+{
+       u32 val;
+       int ret;
+
+       /*
+        * Read directly from the hardware because we use this also
+        * during system sleep where sw->config.enabled is already set
+        * by us.
+        */
+       ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, ROUTER_CS_3, 1);
+       if (ret)
+               return false;
+
+       return !!(val & ROUTER_CS_3_V);
+}
+
 /**
- * tb_switch_reset() - reconfigure route, enable and send TB_CFG_PKG_RESET
- * @sw: Switch to reset
+ * tb_switch_reset() - Perform reset to the router
+ * @sw: Router to reset
  *
- * Return: Returns 0 on success or an error code on failure.
+ * Issues reset to the router @sw. Can be used for any router. For host
+ * routers, resets all the downstream ports and cleans up path config
+ * spaces accordingly. For device routers issues downstream port reset
+ * through the parent router, so as side effect there will be unplug
+ * soon after this is finished.
+ *
+ * If the router is not enumerated does nothing.
+ *
+ * Returns %0 on success or negative errno in case of failure.
  */
 int tb_switch_reset(struct tb_switch *sw)
 {
-       struct tb_cfg_result res;
+       int ret;
 
-       if (sw->generation > 1)
+       /*
+        * We cannot access the port config spaces unless the router is
+        * already enumerated. If the router is not enumerated it is
+        * equal to being reset so we can skip that here.
+        */
+       if (!tb_switch_enumerated(sw))
                return 0;
 
-       tb_sw_dbg(sw, "resetting switch\n");
+       tb_sw_dbg(sw, "resetting\n");
+
+       if (tb_route(sw))
+               ret = tb_switch_reset_device(sw);
+       else
+               ret = tb_switch_reset_host(sw);
 
-       res.err = tb_sw_write(sw, ((u32 *) &sw->config) + 2,
-                             TB_CFG_SWITCH, 2, 2);
-       if (res.err)
-               return res.err;
-       res = tb_cfg_reset(sw->tb->ctl, tb_route(sw));
-       if (res.err > 0)
-               return -EIO;
-       return res.err;
+       if (ret)
+               tb_sw_warn(sw, "failed to reset\n");
+
+       return ret;
 }
 
 /**
@@ -3078,22 +3180,29 @@ void tb_switch_unconfigure_link(struct tb_switch *sw)
 {
        struct tb_port *up, *down;
 
-       if (sw->is_unplugged)
-               return;
        if (!tb_route(sw) || tb_switch_is_icm(sw))
                return;
 
+       /*
+        * Unconfigure downstream port so that wake-on-connect can be
+        * configured after router unplug. No need to unconfigure upstream port
+        * since its router is unplugged.
+        */
        up = tb_upstream_port(sw);
-       if (tb_switch_is_usb4(up->sw))
-               usb4_port_unconfigure(up);
-       else
-               tb_lc_unconfigure_port(up);
-
        down = up->remote;
        if (tb_switch_is_usb4(down->sw))
                usb4_port_unconfigure(down);
        else
                tb_lc_unconfigure_port(down);
+
+       if (sw->is_unplugged)
+               return;
+
+       up = tb_upstream_port(sw);
+       if (tb_switch_is_usb4(up->sw))
+               usb4_port_unconfigure(up);
+       else
+               tb_lc_unconfigure_port(up);
 }
 
 static void tb_switch_credits_init(struct tb_switch *sw)
@@ -3339,7 +3448,26 @@ static int tb_switch_set_wake(struct tb_switch *sw, unsigned int flags)
        return tb_lc_set_wake(sw, flags);
 }
 
-int tb_switch_resume(struct tb_switch *sw)
+static void tb_switch_check_wakes(struct tb_switch *sw)
+{
+       if (device_may_wakeup(&sw->dev)) {
+               if (tb_switch_is_usb4(sw))
+                       usb4_switch_check_wakes(sw);
+       }
+}
+
+/**
+ * tb_switch_resume() - Resume a switch after sleep
+ * @sw: Switch to resume
+ * @runtime: Is this resume from runtime suspend or system sleep
+ *
+ * Resumes and re-enumerates router (and all its children), if still plugged
+ * after suspend. Don't enumerate device router whose UID was changed during
+ * suspend. If this is resume from system sleep, notifies PM core about the
+ * wakes occurred during suspend. Disables all wakes, except USB4 wake of
+ * upstream port for USB4 routers that shall be always enabled.
+ */
+int tb_switch_resume(struct tb_switch *sw, bool runtime)
 {
        struct tb_port *port;
        int err;
@@ -3388,6 +3516,9 @@ int tb_switch_resume(struct tb_switch *sw)
        if (err)
                return err;
 
+       if (!runtime)
+               tb_switch_check_wakes(sw);
+
        /* Disable wakes */
        tb_switch_set_wake(sw, 0);
 
@@ -3417,7 +3548,8 @@ int tb_switch_resume(struct tb_switch *sw)
                         */
                        if (tb_port_unlock(port))
                                tb_port_warn(port, "failed to unlock port\n");
-                       if (port->remote && tb_switch_resume(port->remote->sw)) {
+                       if (port->remote &&
+                           tb_switch_resume(port->remote->sw, runtime)) {
                                tb_port_warn(port,
                                             "lost during suspend, disconnecting\n");
                                tb_sw_set_unplugged(port->remote->sw);
index e6681f153c69d86de31e332509f5d51252ddbe7a..525f515e8b48bacae308d21396ac568ba5e24900 100644 (file)
@@ -1717,6 +1717,12 @@ static struct tb_port *tb_find_dp_out(struct tb *tb, struct tb_port *in)
                        continue;
                }
 
+               /* Needs to be on different routers */
+               if (in->sw == port->sw) {
+                       tb_port_dbg(port, "skipping DP OUT on same router\n");
+                       continue;
+               }
+
                tb_port_dbg(port, "DP OUT available\n");
 
                /*
@@ -2628,7 +2634,7 @@ static int tb_scan_finalize_switch(struct device *dev, void *data)
        return 0;
 }
 
-static int tb_start(struct tb *tb)
+static int tb_start(struct tb *tb, bool reset)
 {
        struct tb_cm *tcm = tb_priv(tb);
        int ret;
@@ -2669,12 +2675,24 @@ static int tb_start(struct tb *tb)
        tb_switch_tmu_configure(tb->root_switch, TB_SWITCH_TMU_MODE_LOWRES);
        /* Enable TMU if it is off */
        tb_switch_tmu_enable(tb->root_switch);
-       /* Full scan to discover devices added before the driver was loaded. */
-       tb_scan_switch(tb->root_switch);
-       /* Find out tunnels created by the boot firmware */
-       tb_discover_tunnels(tb);
-       /* Add DP resources from the DP tunnels created by the boot firmware */
-       tb_discover_dp_resources(tb);
+
+       /*
+        * Boot firmware might have created tunnels of its own. Since we
+        * cannot be sure they are usable for us, tear them down and
+        * reset the ports to handle it as new hotplug for USB4 v1
+        * routers (for USB4 v2 and beyond we already do host reset).
+        */
+       if (reset && usb4_switch_version(tb->root_switch) == 1) {
+               tb_switch_reset(tb->root_switch);
+       } else {
+               /* Full scan to discover devices added before the driver was loaded. */
+               tb_scan_switch(tb->root_switch);
+               /* Find out tunnels created by the boot firmware */
+               tb_discover_tunnels(tb);
+               /* Add DP resources from the DP tunnels created by the boot firmware */
+               tb_discover_dp_resources(tb);
+       }
+
        /*
         * If the boot firmware did not create USB 3.x tunnels create them
         * now for the whole topology.
@@ -2745,10 +2763,14 @@ static int tb_resume_noirq(struct tb *tb)
 
        tb_dbg(tb, "resuming...\n");
 
-       /* remove any pci devices the firmware might have setup */
-       tb_switch_reset(tb->root_switch);
+       /*
+        * For non-USB4 hosts (Apple systems) remove any PCIe devices
+        * the firmware might have setup.
+        */
+       if (!tb_switch_is_usb4(tb->root_switch))
+               tb_switch_reset(tb->root_switch);
 
-       tb_switch_resume(tb->root_switch);
+       tb_switch_resume(tb->root_switch, false);
        tb_free_invalid_tunnels(tb);
        tb_free_unplugged_children(tb->root_switch);
        tb_restore_children(tb->root_switch);
@@ -2874,7 +2896,7 @@ static int tb_runtime_resume(struct tb *tb)
        struct tb_tunnel *tunnel, *n;
 
        mutex_lock(&tb->lock);
-       tb_switch_resume(tb->root_switch);
+       tb_switch_resume(tb->root_switch, true);
        tb_free_invalid_tunnels(tb);
        tb_restore_children(tb->root_switch);
        list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list)
index e4d4effb944470844a0e1bf9104096f60ee1acfe..7706f8e08c8d68cb7f98ac31e6d3d31769047d29 100644 (file)
@@ -487,7 +487,7 @@ struct tb_path {
  */
 struct tb_cm_ops {
        int (*driver_ready)(struct tb *tb);
-       int (*start)(struct tb *tb);
+       int (*start)(struct tb *tb, bool reset);
        void (*stop)(struct tb *tb);
        int (*suspend_noirq)(struct tb *tb);
        int (*resume_noirq)(struct tb *tb);
@@ -750,7 +750,7 @@ int tb_xdomain_init(void);
 void tb_xdomain_exit(void);
 
 struct tb *tb_domain_alloc(struct tb_nhi *nhi, int timeout_msec, size_t privsize);
-int tb_domain_add(struct tb *tb);
+int tb_domain_add(struct tb *tb, bool reset);
 void tb_domain_remove(struct tb *tb);
 int tb_domain_suspend_noirq(struct tb *tb);
 int tb_domain_resume_noirq(struct tb *tb);
@@ -817,7 +817,7 @@ int tb_switch_configuration_valid(struct tb_switch *sw);
 int tb_switch_add(struct tb_switch *sw);
 void tb_switch_remove(struct tb_switch *sw);
 void tb_switch_suspend(struct tb_switch *sw, bool runtime);
-int tb_switch_resume(struct tb_switch *sw);
+int tb_switch_resume(struct tb_switch *sw, bool runtime);
 int tb_switch_reset(struct tb_switch *sw);
 int tb_switch_wait_for_bit(struct tb_switch *sw, u32 offset, u32 bit,
                           u32 value, int timeout_msec);
@@ -1154,6 +1154,7 @@ struct tb_path *tb_path_alloc(struct tb *tb, struct tb_port *src, int src_hopid,
 void tb_path_free(struct tb_path *path);
 int tb_path_activate(struct tb_path *path);
 void tb_path_deactivate(struct tb_path *path);
+int tb_path_deactivate_hop(struct tb_port *port, int hop_index);
 bool tb_path_is_invalid(struct tb_path *path);
 bool tb_path_port_on_path(const struct tb_path *path,
                          const struct tb_port *port);
@@ -1173,6 +1174,7 @@ int tb_drom_read(struct tb_switch *sw);
 int tb_drom_read_uid_only(struct tb_switch *sw, u64 *uid);
 
 int tb_lc_read_uuid(struct tb_switch *sw, u32 *uuid);
+int tb_lc_reset_port(struct tb_port *port);
 int tb_lc_configure_port(struct tb_port *port);
 void tb_lc_unconfigure_port(struct tb_port *port);
 int tb_lc_configure_xdomain(struct tb_port *port);
@@ -1276,6 +1278,7 @@ static inline bool tb_switch_is_usb4(const struct tb_switch *sw)
        return usb4_switch_version(sw) > 0;
 }
 
+void usb4_switch_check_wakes(struct tb_switch *sw);
 int usb4_switch_setup(struct tb_switch *sw);
 int usb4_switch_configuration_valid(struct tb_switch *sw);
 int usb4_switch_read_uid(struct tb_switch *sw, u64 *uid);
@@ -1305,6 +1308,7 @@ void usb4_switch_remove_ports(struct tb_switch *sw);
 
 int usb4_port_unlock(struct tb_port *port);
 int usb4_port_hotplug_enable(struct tb_port *port);
+int usb4_port_reset(struct tb_port *port);
 int usb4_port_configure(struct tb_port *port);
 void usb4_port_unconfigure(struct tb_port *port);
 int usb4_port_configure_xdomain(struct tb_port *port, struct tb_xdomain *xd);
index 6f798f6a2b8488ca5011fe813733ffc8b5942f48..4e43b47f9f1195077b3f38a0dbb7cc8264aa0b36 100644 (file)
@@ -194,6 +194,8 @@ struct tb_regs_switch_header {
 #define USB4_VERSION_MAJOR_MASK                        GENMASK(7, 5)
 
 #define ROUTER_CS_1                            0x01
+#define ROUTER_CS_3                            0x03
+#define ROUTER_CS_3_V                          BIT(31)
 #define ROUTER_CS_4                            0x04
 /* Used with the router cmuv field */
 #define ROUTER_CS_4_CMUV_V1                    0x10
@@ -389,6 +391,7 @@ struct tb_regs_port_header {
 #define PORT_CS_18_CSA                         BIT(22)
 #define PORT_CS_18_TIP                         BIT(24)
 #define PORT_CS_19                             0x13
+#define PORT_CS_19_DPR                         BIT(0)
 #define PORT_CS_19_PC                          BIT(3)
 #define PORT_CS_19_PID                         BIT(4)
 #define PORT_CS_19_WOC                         BIT(16)
@@ -584,6 +587,9 @@ struct tb_regs_hop {
 #define TB_LC_POWER                            0x740
 
 /* Link controller registers */
+#define TB_LC_PORT_MODE                                0x26
+#define TB_LC_PORT_MODE_DPR                    BIT(0)
+
 #define TB_LC_CS_42                            0x2a
 #define TB_LC_CS_42_USB_PLUGGED                        BIT(31)
 
index 1515eff8cc3e23434202fead2a9aa038080111d6..a74c9ea67bf5403d95757db936245be6ad908894 100644 (file)
@@ -155,7 +155,13 @@ static inline int usb4_switch_op_data(struct tb_switch *sw, u16 opcode,
                                tx_dwords, rx_data, rx_dwords);
 }
 
-static void usb4_switch_check_wakes(struct tb_switch *sw)
+/**
+ * usb4_switch_check_wakes() - Check for wakes and notify PM core about them
+ * @sw: Router whose wakes to check
+ *
+ * Checks wakes occurred during suspend and notify the PM core about them.
+ */
+void usb4_switch_check_wakes(struct tb_switch *sw)
 {
        bool wakeup_usb4 = false;
        struct usb4_port *usb4;
@@ -163,9 +169,6 @@ static void usb4_switch_check_wakes(struct tb_switch *sw)
        bool wakeup = false;
        u32 val;
 
-       if (!device_may_wakeup(&sw->dev))
-               return;
-
        if (tb_route(sw)) {
                if (tb_sw_read(sw, &val, TB_CFG_SWITCH, ROUTER_CS_6, 1))
                        return;
@@ -244,8 +247,6 @@ int usb4_switch_setup(struct tb_switch *sw)
        u32 val = 0;
        int ret;
 
-       usb4_switch_check_wakes(sw);
-
        if (!tb_route(sw))
                return 0;
 
@@ -1113,6 +1114,45 @@ int usb4_port_hotplug_enable(struct tb_port *port)
        return tb_port_write(port, &val, TB_CFG_PORT, ADP_CS_5, 1);
 }
 
+/**
+ * usb4_port_reset() - Issue downstream port reset
+ * @port: USB4 port to reset
+ *
+ * Issues downstream port reset to @port.
+ */
+int usb4_port_reset(struct tb_port *port)
+{
+       int ret;
+       u32 val;
+
+       if (!port->cap_usb4)
+               return -EINVAL;
+
+       ret = tb_port_read(port, &val, TB_CFG_PORT,
+                          port->cap_usb4 + PORT_CS_19, 1);
+       if (ret)
+               return ret;
+
+       val |= PORT_CS_19_DPR;
+
+       ret = tb_port_write(port, &val, TB_CFG_PORT,
+                           port->cap_usb4 + PORT_CS_19, 1);
+       if (ret)
+               return ret;
+
+       fsleep(10000);
+
+       ret = tb_port_read(port, &val, TB_CFG_PORT,
+                          port->cap_usb4 + PORT_CS_19, 1);
+       if (ret)
+               return ret;
+
+       val &= ~PORT_CS_19_DPR;
+
+       return tb_port_write(port, &val, TB_CFG_PORT,
+                            port->cap_usb4 + PORT_CS_19, 1);
+}
+
 static int usb4_port_set_configured(struct tb_port *port, bool configured)
 {
        int ret;
index c1d43f040c43abc517c4ef6b48a5423e936e97f2..2d1f350a4bea2a86103d707cc322ded0f5941abb 100644 (file)
@@ -357,9 +357,9 @@ static void dw8250_set_termios(struct uart_port *p, struct ktermios *termios,
        long rate;
        int ret;
 
+       clk_disable_unprepare(d->clk);
        rate = clk_round_rate(d->clk, newrate);
-       if (rate > 0 && p->uartclk != rate) {
-               clk_disable_unprepare(d->clk);
+       if (rate > 0) {
                /*
                 * Note that any clock-notifer worker will block in
                 * serial8250_update_uartclk() until we are done.
@@ -367,8 +367,8 @@ static void dw8250_set_termios(struct uart_port *p, struct ktermios *termios,
                ret = clk_set_rate(d->clk, newrate);
                if (!ret)
                        p->uartclk = rate;
-               clk_prepare_enable(d->clk);
        }
+       clk_prepare_enable(d->clk);
 
        dw8250_do_set_termios(p, termios, old);
 }
index 4749331fe618cad7c0af98630f90021b8244bd07..1e8853eae5042b3eb16a1ec191d6ae2970ae30dd 100644 (file)
@@ -1086,11 +1086,13 @@ static void mxs_auart_set_ldisc(struct uart_port *port,
 
 static irqreturn_t mxs_auart_irq_handle(int irq, void *context)
 {
-       u32 istat;
+       u32 istat, stat;
        struct mxs_auart_port *s = context;
        u32 mctrl_temp = s->mctrl_prev;
-       u32 stat = mxs_read(s, REG_STAT);
 
+       uart_port_lock(&s->port);
+
+       stat = mxs_read(s, REG_STAT);
        istat = mxs_read(s, REG_INTR);
 
        /* ack irq */
@@ -1126,6 +1128,8 @@ static irqreturn_t mxs_auart_irq_handle(int irq, void *context)
                istat &= ~AUART_INTR_TXIS;
        }
 
+       uart_port_unlock(&s->port);
+
        return IRQ_HANDLED;
 }
 
index c8bf08c19c647a761e8c4b99fb2453c726e96c18..77691fbbf779ae253c21794fc429894604f7011d 100644 (file)
@@ -210,7 +210,6 @@ static bool pmz_receive_chars(struct uart_pmac_port *uap)
 {
        struct tty_port *port;
        unsigned char ch, r1, drop, flag;
-       int loops = 0;
 
        /* Sanity check, make sure the old bug is no longer happening */
        if (uap->port.state == NULL) {
@@ -291,24 +290,11 @@ static bool pmz_receive_chars(struct uart_pmac_port *uap)
                if (r1 & Rx_OVR)
                        tty_insert_flip_char(port, 0, TTY_OVERRUN);
        next_char:
-               /* We can get stuck in an infinite loop getting char 0 when the
-                * line is in a wrong HW state, we break that here.
-                * When that happens, I disable the receive side of the driver.
-                * Note that what I've been experiencing is a real irq loop where
-                * I'm getting flooded regardless of the actual port speed.
-                * Something strange is going on with the HW
-                */
-               if ((++loops) > 1000)
-                       goto flood;
                ch = read_zsreg(uap, R0);
                if (!(ch & Rx_CH_AV))
                        break;
        }
 
-       return true;
- flood:
-       pmz_interrupt_control(uap, 0);
-       pmz_error("pmz: rx irq flood !\n");
        return true;
 }
 
index c74c548f0db62ae97ffdfbe33390bac50c412862..b6c38d2edfd401a79a937c0d149cbbb77ee3b342 100644 (file)
@@ -22,6 +22,7 @@ struct serial_ctrl_device {
 struct serial_port_device {
        struct device dev;
        struct uart_port *port;
+       unsigned int tx_enabled:1;
 };
 
 int serial_base_ctrl_init(void);
@@ -30,6 +31,9 @@ void serial_base_ctrl_exit(void);
 int serial_base_port_init(void);
 void serial_base_port_exit(void);
 
+void serial_base_port_startup(struct uart_port *port);
+void serial_base_port_shutdown(struct uart_port *port);
+
 int serial_base_driver_register(struct device_driver *driver);
 void serial_base_driver_unregister(struct device_driver *driver);
 
index ff85ebd3a007dba0105e38d732efc0f6a6f8ab5b..c476d884356dbda13ad2183a2d7218aa88ae3325 100644 (file)
@@ -156,7 +156,7 @@ static void __uart_start(struct uart_state *state)
         * enabled, serial_port_runtime_resume() calls start_tx() again
         * after enabling the device.
         */
-       if (pm_runtime_active(&port_dev->dev))
+       if (!pm_runtime_enabled(port->dev) || pm_runtime_active(&port_dev->dev))
                port->ops->start_tx(port);
        pm_runtime_mark_last_busy(&port_dev->dev);
        pm_runtime_put_autosuspend(&port_dev->dev);
@@ -323,16 +323,26 @@ static int uart_startup(struct tty_struct *tty, struct uart_state *state,
                        bool init_hw)
 {
        struct tty_port *port = &state->port;
+       struct uart_port *uport;
        int retval;
 
        if (tty_port_initialized(port))
-               return 0;
+               goto out_base_port_startup;
 
        retval = uart_port_startup(tty, state, init_hw);
-       if (retval)
+       if (retval) {
                set_bit(TTY_IO_ERROR, &tty->flags);
+               return retval;
+       }
 
-       return retval;
+out_base_port_startup:
+       uport = uart_port_check(state);
+       if (!uport)
+               return -EIO;
+
+       serial_base_port_startup(uport);
+
+       return 0;
 }
 
 /*
@@ -355,6 +365,9 @@ static void uart_shutdown(struct tty_struct *tty, struct uart_state *state)
        if (tty)
                set_bit(TTY_IO_ERROR, &tty->flags);
 
+       if (uport)
+               serial_base_port_shutdown(uport);
+
        if (tty_port_initialized(port)) {
                tty_port_set_initialized(port, false);
 
@@ -1775,6 +1788,7 @@ static void uart_tty_port_shutdown(struct tty_port *port)
        uport->ops->stop_rx(uport);
        uart_port_unlock_irq(uport);
 
+       serial_base_port_shutdown(uport);
        uart_port_shutdown(port);
 
        /*
@@ -1788,6 +1802,7 @@ static void uart_tty_port_shutdown(struct tty_port *port)
         * Free the transmit buffer.
         */
        uart_port_lock_irq(uport);
+       uart_circ_clear(&state->xmit);
        buf = state->xmit.buf;
        state->xmit.buf = NULL;
        uart_port_unlock_irq(uport);
index 72b6f4f326e2b04953875062f8ebc6e56f324e45..7d51e66ec88b9d0f1c787cf19ad3fc03135a778f 100644 (file)
@@ -36,8 +36,12 @@ static int serial_port_runtime_resume(struct device *dev)
 
        /* Flush any pending TX for the port */
        uart_port_lock_irqsave(port, &flags);
+       if (!port_dev->tx_enabled)
+               goto unlock;
        if (__serial_port_busy(port))
                port->ops->start_tx(port);
+
+unlock:
        uart_port_unlock_irqrestore(port, flags);
 
 out:
@@ -57,6 +61,11 @@ static int serial_port_runtime_suspend(struct device *dev)
                return 0;
 
        uart_port_lock_irqsave(port, &flags);
+       if (!port_dev->tx_enabled) {
+               uart_port_unlock_irqrestore(port, flags);
+               return 0;
+       }
+
        busy = __serial_port_busy(port);
        if (busy)
                port->ops->start_tx(port);
@@ -68,6 +77,31 @@ static int serial_port_runtime_suspend(struct device *dev)
        return busy ? -EBUSY : 0;
 }
 
+static void serial_base_port_set_tx(struct uart_port *port,
+                                   struct serial_port_device *port_dev,
+                                   bool enabled)
+{
+       unsigned long flags;
+
+       uart_port_lock_irqsave(port, &flags);
+       port_dev->tx_enabled = enabled;
+       uart_port_unlock_irqrestore(port, flags);
+}
+
+void serial_base_port_startup(struct uart_port *port)
+{
+       struct serial_port_device *port_dev = port->port_dev;
+
+       serial_base_port_set_tx(port, port_dev, true);
+}
+
+void serial_base_port_shutdown(struct uart_port *port)
+{
+       struct serial_port_device *port_dev = port->port_dev;
+
+       serial_base_port_set_tx(port, port_dev, false);
+}
+
 static DEFINE_RUNTIME_DEV_PM_OPS(serial_port_pm,
                                 serial_port_runtime_suspend,
                                 serial_port_runtime_resume, NULL);
index 693e932d6feb5842467d1408e04c8d574342cb1f..d103b07d10ee5e08450388651f6aaeed3f896327 100644 (file)
@@ -857,6 +857,7 @@ static irqreturn_t stm32_usart_interrupt(int irq, void *ptr)
        const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
        u32 sr;
        unsigned int size;
+       irqreturn_t ret = IRQ_NONE;
 
        sr = readl_relaxed(port->membase + ofs->isr);
 
@@ -865,11 +866,14 @@ static irqreturn_t stm32_usart_interrupt(int irq, void *ptr)
            (sr & USART_SR_TC)) {
                stm32_usart_tc_interrupt_disable(port);
                stm32_usart_rs485_rts_disable(port);
+               ret = IRQ_HANDLED;
        }
 
-       if ((sr & USART_SR_RTOF) && ofs->icr != UNDEF_REG)
+       if ((sr & USART_SR_RTOF) && ofs->icr != UNDEF_REG) {
                writel_relaxed(USART_ICR_RTOCF,
                               port->membase + ofs->icr);
+               ret = IRQ_HANDLED;
+       }
 
        if ((sr & USART_SR_WUF) && ofs->icr != UNDEF_REG) {
                /* Clear wake up flag and disable wake up interrupt */
@@ -878,6 +882,7 @@ static irqreturn_t stm32_usart_interrupt(int irq, void *ptr)
                stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_WUFIE);
                if (irqd_is_wakeup_set(irq_get_irq_data(port->irq)))
                        pm_wakeup_event(tport->tty->dev, 0);
+               ret = IRQ_HANDLED;
        }
 
        /*
@@ -892,6 +897,7 @@ static irqreturn_t stm32_usart_interrupt(int irq, void *ptr)
                        uart_unlock_and_check_sysrq(port);
                        if (size)
                                tty_flip_buffer_push(tport);
+                       ret = IRQ_HANDLED;
                }
        }
 
@@ -899,6 +905,7 @@ static irqreturn_t stm32_usart_interrupt(int irq, void *ptr)
                uart_port_lock(port);
                stm32_usart_transmit_chars(port);
                uart_port_unlock(port);
+               ret = IRQ_HANDLED;
        }
 
        /* Receiver timeout irq for DMA RX */
@@ -908,9 +915,10 @@ static irqreturn_t stm32_usart_interrupt(int irq, void *ptr)
                uart_unlock_and_check_sysrq(port);
                if (size)
                        tty_flip_buffer_push(tport);
+               ret = IRQ_HANDLED;
        }
 
-       return IRQ_HANDLED;
+       return ret;
 }
 
 static void stm32_usart_set_mctrl(struct uart_port *port, unsigned int mctrl)
@@ -1080,6 +1088,7 @@ static int stm32_usart_startup(struct uart_port *port)
                val |= USART_CR2_SWAP;
                writel_relaxed(val, port->membase + ofs->cr2);
        }
+       stm32_port->throttled = false;
 
        /* RX FIFO Flush */
        if (ofs->rqr != UNDEF_REG)
index f532e2c004a25e030754ce347472e92ca7d45869..bcbcf758925be7889a1976a584536e39b12453d0 100644 (file)
@@ -47,7 +47,7 @@ enum {
        TSTBUS_MAX,
 };
 
-#define QCOM_UFS_MAX_GEAR 4
+#define QCOM_UFS_MAX_GEAR 5
 #define QCOM_UFS_MAX_LANE 2
 
 enum {
@@ -67,26 +67,32 @@ static const struct __ufs_qcom_bw_table {
        [MODE_PWM][UFS_PWM_G2][UFS_LANE_1] = { 1844,            1000 },
        [MODE_PWM][UFS_PWM_G3][UFS_LANE_1] = { 3688,            1000 },
        [MODE_PWM][UFS_PWM_G4][UFS_LANE_1] = { 7376,            1000 },
+       [MODE_PWM][UFS_PWM_G5][UFS_LANE_1] = { 14752,           1000 },
        [MODE_PWM][UFS_PWM_G1][UFS_LANE_2] = { 1844,            1000 },
        [MODE_PWM][UFS_PWM_G2][UFS_LANE_2] = { 3688,            1000 },
        [MODE_PWM][UFS_PWM_G3][UFS_LANE_2] = { 7376,            1000 },
        [MODE_PWM][UFS_PWM_G4][UFS_LANE_2] = { 14752,           1000 },
+       [MODE_PWM][UFS_PWM_G5][UFS_LANE_2] = { 29504,           1000 },
        [MODE_HS_RA][UFS_HS_G1][UFS_LANE_1] = { 127796,         1000 },
        [MODE_HS_RA][UFS_HS_G2][UFS_LANE_1] = { 255591,         1000 },
        [MODE_HS_RA][UFS_HS_G3][UFS_LANE_1] = { 1492582,        102400 },
        [MODE_HS_RA][UFS_HS_G4][UFS_LANE_1] = { 2915200,        204800 },
+       [MODE_HS_RA][UFS_HS_G5][UFS_LANE_1] = { 5836800,        409600 },
        [MODE_HS_RA][UFS_HS_G1][UFS_LANE_2] = { 255591,         1000 },
        [MODE_HS_RA][UFS_HS_G2][UFS_LANE_2] = { 511181,         1000 },
        [MODE_HS_RA][UFS_HS_G3][UFS_LANE_2] = { 1492582,        204800 },
        [MODE_HS_RA][UFS_HS_G4][UFS_LANE_2] = { 2915200,        409600 },
+       [MODE_HS_RA][UFS_HS_G5][UFS_LANE_2] = { 5836800,        819200 },
        [MODE_HS_RB][UFS_HS_G1][UFS_LANE_1] = { 149422,         1000 },
        [MODE_HS_RB][UFS_HS_G2][UFS_LANE_1] = { 298189,         1000 },
        [MODE_HS_RB][UFS_HS_G3][UFS_LANE_1] = { 1492582,        102400 },
        [MODE_HS_RB][UFS_HS_G4][UFS_LANE_1] = { 2915200,        204800 },
+       [MODE_HS_RB][UFS_HS_G5][UFS_LANE_1] = { 5836800,        409600 },
        [MODE_HS_RB][UFS_HS_G1][UFS_LANE_2] = { 298189,         1000 },
        [MODE_HS_RB][UFS_HS_G2][UFS_LANE_2] = { 596378,         1000 },
        [MODE_HS_RB][UFS_HS_G3][UFS_LANE_2] = { 1492582,        204800 },
        [MODE_HS_RB][UFS_HS_G4][UFS_LANE_2] = { 2915200,        409600 },
+       [MODE_HS_RB][UFS_HS_G5][UFS_LANE_2] = { 5836800,        819200 },
        [MODE_MAX][0][0]                    = { 7643136,        307200 },
 };
 
index c8262e2f291778c785193b4311ab9613d0c18697..c553decb5461078280b5f566a6ccd4035727c4a6 100644 (file)
@@ -485,7 +485,6 @@ static ssize_t wdm_write
 static int service_outstanding_interrupt(struct wdm_device *desc)
 {
        int rv = 0;
-       int used;
 
        /* submit read urb only if the device is waiting for it */
        if (!desc->resp_count || !--desc->resp_count)
@@ -500,10 +499,7 @@ static int service_outstanding_interrupt(struct wdm_device *desc)
                goto out;
        }
 
-       used = test_and_set_bit(WDM_RESPONDING, &desc->flags);
-       if (used)
-               goto out;
-
+       set_bit(WDM_RESPONDING, &desc->flags);
        spin_unlock_irq(&desc->iuspin);
        rv = usb_submit_urb(desc->response, GFP_KERNEL);
        spin_lock_irq(&desc->iuspin);
index a5776531ba4d38dcc990c0994fde90f48aea40cf..97ff2073cf1e27c2cef83660ec3465043bfd10be 100644 (file)
@@ -448,8 +448,10 @@ static void usb_port_shutdown(struct device *dev)
 {
        struct usb_port *port_dev = to_usb_port(dev);
 
-       if (port_dev->child)
+       if (port_dev->child) {
                usb_disable_usb2_hardware_lpm(port_dev->child);
+               usb_unlocked_disable_lpm(port_dev->child);
+       }
 }
 
 static const struct dev_pm_ops usb_port_pm_ops = {
index 79582b102c7eda2eb72f6da3c293615cdad50b56..994a78ad084b1c485673ca3c88819d4e752ef2ee 100644 (file)
@@ -867,13 +867,15 @@ static int dwc2_cmpl_host_isoc_dma_desc(struct dwc2_hsotg *hsotg,
        struct dwc2_dma_desc *dma_desc;
        struct dwc2_hcd_iso_packet_desc *frame_desc;
        u16 frame_desc_idx;
-       struct urb *usb_urb = qtd->urb->priv;
+       struct urb *usb_urb;
        u16 remain = 0;
        int rc = 0;
 
        if (!qtd->urb)
                return -EINVAL;
 
+       usb_urb = qtd->urb->priv;
+
        dma_sync_single_for_cpu(hsotg->dev, qh->desc_list_dma + (idx *
                                sizeof(struct dwc2_dma_desc)),
                                sizeof(struct dwc2_dma_desc),
index 28f4e6552e84592566d261ec3174773650c5d444..0acc32ed99609f8166877bd221087211e90b119d 100644 (file)
@@ -878,7 +878,7 @@ static int ncm_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
                if (alt > 1)
                        goto fail;
 
-               if (ncm->port.in_ep->enabled) {
+               if (ncm->netdev) {
                        DBG(cdev, "reset ncm\n");
                        ncm->netdev = NULL;
                        gether_disconnect(&ncm->port);
@@ -1367,7 +1367,7 @@ static void ncm_disable(struct usb_function *f)
 
        DBG(cdev, "ncm deactivated\n");
 
-       if (ncm->port.in_ep->enabled) {
+       if (ncm->netdev) {
                ncm->netdev = NULL;
                gether_disconnect(&ncm->port);
        }
index 0dd2b032c90b9bc6196ab7a3752e327c18a61fb0..e162838bec3d9259a12951952c32d82a12a96ae9 100644 (file)
@@ -78,7 +78,7 @@ static int onboard_hub_power_on(struct onboard_hub *hub)
        err = regulator_bulk_enable(hub->pdata->num_supplies, hub->supplies);
        if (err) {
                dev_err(hub->dev, "failed to enable supplies: %pe\n", ERR_PTR(err));
-               return err;
+               goto disable_clk;
        }
 
        fsleep(hub->pdata->reset_us);
@@ -87,6 +87,10 @@ static int onboard_hub_power_on(struct onboard_hub *hub)
        hub->is_powered_on = true;
 
        return 0;
+
+disable_clk:
+       clk_disable_unprepare(hub->clk);
+       return err;
 }
 
 static int onboard_hub_power_off(struct onboard_hub *hub)
index 55a65d941ccbfb1161d363ac72881ce0f490a8df..8a5846d4adf67e1de41ff8fa1b5ed28c1efcc960 100644 (file)
@@ -255,6 +255,10 @@ static void option_instat_callback(struct urb *urb);
 #define QUECTEL_PRODUCT_EM061K_LMS             0x0124
 #define QUECTEL_PRODUCT_EC25                   0x0125
 #define QUECTEL_PRODUCT_EM060K_128             0x0128
+#define QUECTEL_PRODUCT_EM060K_129             0x0129
+#define QUECTEL_PRODUCT_EM060K_12a             0x012a
+#define QUECTEL_PRODUCT_EM060K_12b             0x012b
+#define QUECTEL_PRODUCT_EM060K_12c             0x012c
 #define QUECTEL_PRODUCT_EG91                   0x0191
 #define QUECTEL_PRODUCT_EG95                   0x0195
 #define QUECTEL_PRODUCT_BG96                   0x0296
@@ -1218,6 +1222,18 @@ static const struct usb_device_id option_ids[] = {
        { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K_128, 0xff, 0xff, 0x30) },
        { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K_128, 0xff, 0x00, 0x40) },
        { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K_128, 0xff, 0xff, 0x40) },
+       { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K_129, 0xff, 0xff, 0x30) },
+       { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K_129, 0xff, 0x00, 0x40) },
+       { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K_129, 0xff, 0xff, 0x40) },
+       { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K_12a, 0xff, 0xff, 0x30) },
+       { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K_12a, 0xff, 0x00, 0x40) },
+       { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K_12a, 0xff, 0xff, 0x40) },
+       { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K_12b, 0xff, 0xff, 0x30) },
+       { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K_12b, 0xff, 0x00, 0x40) },
+       { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K_12b, 0xff, 0xff, 0x40) },
+       { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K_12c, 0xff, 0xff, 0x30) },
+       { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K_12c, 0xff, 0x00, 0x40) },
+       { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K_12c, 0xff, 0xff, 0x40) },
        { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM061K_LCN, 0xff, 0xff, 0x30) },
        { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM061K_LCN, 0xff, 0x00, 0x40) },
        { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM061K_LCN, 0xff, 0xff, 0x40) },
@@ -1360,6 +1376,12 @@ static const struct usb_device_id option_ids[] = {
          .driver_info = NCTRL(2) | RSVD(3) },
        { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1083, 0xff),    /* Telit FE990 (ECM) */
          .driver_info = NCTRL(0) | RSVD(1) },
+       { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x10a0, 0xff),    /* Telit FN20C04 (rmnet) */
+         .driver_info = RSVD(0) | NCTRL(3) },
+       { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x10a4, 0xff),    /* Telit FN20C04 (rmnet) */
+         .driver_info = RSVD(0) | NCTRL(3) },
+       { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x10a9, 0xff),    /* Telit FN20C04 (rmnet) */
+         .driver_info = RSVD(0) | NCTRL(2) | RSVD(3) | RSVD(4) },
        { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_ME910),
          .driver_info = NCTRL(0) | RSVD(1) | RSVD(3) },
        { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_ME910_DUAL_MODEM),
@@ -2052,6 +2074,10 @@ static const struct usb_device_id option_ids[] = {
          .driver_info = RSVD(3) },
        { USB_DEVICE_INTERFACE_CLASS(LONGCHEER_VENDOR_ID, 0x9803, 0xff),
          .driver_info = RSVD(4) },
+       { USB_DEVICE(LONGCHEER_VENDOR_ID, 0x9b05),      /* Longsung U8300 */
+         .driver_info = RSVD(4) | RSVD(5) },
+       { USB_DEVICE(LONGCHEER_VENDOR_ID, 0x9b3c),      /* Longsung U9300 */
+         .driver_info = RSVD(0) | RSVD(4) },
        { USB_DEVICE(LONGCHEER_VENDOR_ID, ZOOM_PRODUCT_4597) },
        { USB_DEVICE(LONGCHEER_VENDOR_ID, IBALL_3_5G_CONNECT) },
        { USB_DEVICE(HAIER_VENDOR_ID, HAIER_PRODUCT_CE100) },
@@ -2272,15 +2298,29 @@ static const struct usb_device_id option_ids[] = {
        { USB_DEVICE_AND_INTERFACE_INFO(0x2cb7, 0x010b, 0xff, 0xff, 0x30) },    /* Fibocom FG150 Diag */
        { USB_DEVICE_AND_INTERFACE_INFO(0x2cb7, 0x010b, 0xff, 0, 0) },          /* Fibocom FG150 AT */
        { USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x0111, 0xff) },                   /* Fibocom FM160 (MBIM mode) */
+       { USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x0115, 0xff),                     /* Fibocom FM135 (laptop MBIM) */
+         .driver_info = RSVD(5) },
        { USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x01a0, 0xff) },                   /* Fibocom NL668-AM/NL652-EU (laptop MBIM) */
        { USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x01a2, 0xff) },                   /* Fibocom FM101-GL (laptop MBIM) */
        { USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x01a3, 0xff) },                   /* Fibocom FM101-GL (laptop MBIM) */
        { USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x01a4, 0xff),                     /* Fibocom FM101-GL (laptop MBIM) */
          .driver_info = RSVD(4) },
+       { USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x0a04, 0xff) },                   /* Fibocom FM650-CN (ECM mode) */
+       { USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x0a05, 0xff) },                   /* Fibocom FM650-CN (NCM mode) */
+       { USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x0a06, 0xff) },                   /* Fibocom FM650-CN (RNDIS mode) */
+       { USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x0a07, 0xff) },                   /* Fibocom FM650-CN (MBIM mode) */
        { USB_DEVICE_INTERFACE_CLASS(0x2df3, 0x9d03, 0xff) },                   /* LongSung M5710 */
        { USB_DEVICE_INTERFACE_CLASS(0x305a, 0x1404, 0xff) },                   /* GosunCn GM500 RNDIS */
        { USB_DEVICE_INTERFACE_CLASS(0x305a, 0x1405, 0xff) },                   /* GosunCn GM500 MBIM */
        { USB_DEVICE_INTERFACE_CLASS(0x305a, 0x1406, 0xff) },                   /* GosunCn GM500 ECM/NCM */
+       { USB_DEVICE(0x33f8, 0x0104),                                           /* Rolling RW101-GL (laptop RMNET) */
+         .driver_info = RSVD(4) | RSVD(5) },
+       { USB_DEVICE_INTERFACE_CLASS(0x33f8, 0x01a2, 0xff) },                   /* Rolling RW101-GL (laptop MBIM) */
+       { USB_DEVICE_INTERFACE_CLASS(0x33f8, 0x01a3, 0xff) },                   /* Rolling RW101-GL (laptop MBIM) */
+       { USB_DEVICE_INTERFACE_CLASS(0x33f8, 0x01a4, 0xff),                     /* Rolling RW101-GL (laptop MBIM) */
+         .driver_info = RSVD(4) },
+       { USB_DEVICE_INTERFACE_CLASS(0x33f8, 0x0115, 0xff),                     /* Rolling RW135-GL (laptop MBIM) */
+         .driver_info = RSVD(5) },
        { USB_DEVICE_AND_INTERFACE_INFO(OPPO_VENDOR_ID, OPPO_PRODUCT_R11, 0xff, 0xff, 0x30) },
        { USB_DEVICE_AND_INTERFACE_INFO(SIERRA_VENDOR_ID, SIERRA_PRODUCT_EM9191, 0xff, 0xff, 0x30) },
        { USB_DEVICE_AND_INTERFACE_INFO(SIERRA_VENDOR_ID, SIERRA_PRODUCT_EM9191, 0xff, 0xff, 0x40) },
index f81699537312aa38043d820b6ffc4d0a77d6b154..df9a5d6760b455c53c69c16633286c6ff29ae5c7 100644 (file)
@@ -6111,14 +6111,14 @@ static int tcpm_pd_set(struct typec_port *p, struct usb_power_delivery *pd)
        if (data->sink_desc.pdo[0]) {
                for (i = 0; i < PDO_MAX_OBJECTS && data->sink_desc.pdo[i]; i++)
                        port->snk_pdo[i] = data->sink_desc.pdo[i];
-               port->nr_snk_pdo = i + 1;
+               port->nr_snk_pdo = i;
                port->operating_snk_mw = data->operating_snk_mw;
        }
 
        if (data->source_desc.pdo[0]) {
                for (i = 0; i < PDO_MAX_OBJECTS && data->source_desc.pdo[i]; i++)
                        port->src_pdo[i] = data->source_desc.pdo[i];
-               port->nr_src_pdo = i + 1;
+               port->nr_src_pdo = i;
        }
 
        switch (port->state) {
index b67a28da47026d0299b8a1f8c22a40fc36b1c4a2..a1c467a0e9f719665fc02fa559d5c94545e5725f 100644 (file)
@@ -68,7 +68,6 @@ static int vmgenid_add(struct acpi_device *device)
 static void vmgenid_notify(struct acpi_device *device, u32 event)
 {
        struct vmgenid_state *state = acpi_driver_data(device);
-       char *envp[] = { "NEW_VMGENID=1", NULL };
        u8 old_id[VMGENID_SIZE];
 
        memcpy(old_id, state->this_id, sizeof(old_id));
@@ -76,7 +75,6 @@ static void vmgenid_notify(struct acpi_device *device, u32 event)
        if (!memcmp(old_id, state->this_id, sizeof(old_id)))
                return;
        add_vmfork_randomness(state->this_id, sizeof(state->this_id));
-       kobject_uevent_env(&device->dev.kobj, KOBJ_CHANGE, envp);
 }
 
 static const struct acpi_device_id vmgenid_ids[] = {
index 77260cb9d0b2b0097fce1ee231b277e1237ede40..b5175b9929f879ff3bd69ff8b3138b52db08e4f1 100644 (file)
@@ -692,31 +692,21 @@ static void end_bbio_data_read(struct btrfs_bio *bbio)
 int btrfs_alloc_page_array(unsigned int nr_pages, struct page **page_array,
                           gfp_t extra_gfp)
 {
+       const gfp_t gfp = GFP_NOFS | extra_gfp;
        unsigned int allocated;
 
        for (allocated = 0; allocated < nr_pages;) {
                unsigned int last = allocated;
 
-               allocated = alloc_pages_bulk_array(GFP_NOFS | extra_gfp,
-                                                  nr_pages, page_array);
-
-               if (allocated == nr_pages)
-                       return 0;
-
-               /*
-                * During this iteration, no page could be allocated, even
-                * though alloc_pages_bulk_array() falls back to alloc_page()
-                * if  it could not bulk-allocate. So we must be out of memory.
-                */
-               if (allocated == last) {
+               allocated = alloc_pages_bulk_array(gfp, nr_pages, page_array);
+               if (unlikely(allocated == last)) {
+                       /* No progress, fail and do cleanup. */
                        for (int i = 0; i < allocated; i++) {
                                __free_page(page_array[i]);
                                page_array[i] = NULL;
                        }
                        return -ENOMEM;
                }
-
-               memalloc_retry_wait(GFP_NOFS);
        }
        return 0;
 }
@@ -4140,7 +4130,7 @@ void btrfs_clear_buffer_dirty(struct btrfs_trans_handle *trans,
         * The actual zeroout of the buffer will happen later in
         * btree_csum_one_bio.
         */
-       if (btrfs_is_zoned(fs_info)) {
+       if (btrfs_is_zoned(fs_info) && test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags)) {
                set_bit(EXTENT_BUFFER_ZONED_ZEROOUT, &eb->bflags);
                return;
        }
index 9307bb4393b8f629bcbdfd6789d0f3de497dd8ce..bd3b21d4850af5c5e5c613f550486ac6f7edba60 100644 (file)
@@ -1317,6 +1317,7 @@ static int fuse_update_get_attr(struct inode *inode, struct file *file,
                        err = fuse_do_statx(inode, file, stat);
                        if (err == -ENOSYS) {
                                fc->no_statx = 1;
+                               err = 0;
                                goto retry;
                        }
                } else {
index c719c475a068efa4cb569720a36ffa9aabfc3c2f..c17bdf973c18daacd63c6736cbe7fe9a3be3a6af 100644 (file)
@@ -3490,11 +3490,13 @@ nfsd4_encode_fattr4(struct svc_rqst *rqstp, struct xdr_stream *xdr,
                    struct dentry *dentry, const u32 *bmval,
                    int ignore_crossmnt)
 {
+       DECLARE_BITMAP(attr_bitmap, ARRAY_SIZE(nfsd4_enc_fattr4_encode_ops));
        struct nfsd4_fattr_args args;
        struct svc_fh *tempfh = NULL;
        int starting_len = xdr->buf->len;
        __be32 *attrlen_p, status;
        int attrlen_offset;
+       u32 attrmask[3];
        int err;
        struct nfsd4_compoundres *resp = rqstp->rq_resp;
        u32 minorversion = resp->cstate.minorversion;
@@ -3502,10 +3504,6 @@ nfsd4_encode_fattr4(struct svc_rqst *rqstp, struct xdr_stream *xdr,
                .mnt    = exp->ex_path.mnt,
                .dentry = dentry,
        };
-       union {
-               u32             attrmask[3];
-               unsigned long   mask[2];
-       } u;
        unsigned long bit;
 
        WARN_ON_ONCE(bmval[1] & NFSD_WRITEONLY_ATTRS_WORD1);
@@ -3519,20 +3517,19 @@ nfsd4_encode_fattr4(struct svc_rqst *rqstp, struct xdr_stream *xdr,
        /*
         * Make a local copy of the attribute bitmap that can be modified.
         */
-       memset(&u, 0, sizeof(u));
-       u.attrmask[0] = bmval[0];
-       u.attrmask[1] = bmval[1];
-       u.attrmask[2] = bmval[2];
+       attrmask[0] = bmval[0];
+       attrmask[1] = bmval[1];
+       attrmask[2] = bmval[2];
 
        args.rdattr_err = 0;
        if (exp->ex_fslocs.migrated) {
-               status = fattr_handle_absent_fs(&u.attrmask[0], &u.attrmask[1],
-                                               &u.attrmask[2], &args.rdattr_err);
+               status = fattr_handle_absent_fs(&attrmask[0], &attrmask[1],
+                                               &attrmask[2], &args.rdattr_err);
                if (status)
                        goto out;
        }
        args.size = 0;
-       if (u.attrmask[0] & (FATTR4_WORD0_CHANGE | FATTR4_WORD0_SIZE)) {
+       if (attrmask[0] & (FATTR4_WORD0_CHANGE | FATTR4_WORD0_SIZE)) {
                status = nfsd4_deleg_getattr_conflict(rqstp, d_inode(dentry));
                if (status)
                        goto out;
@@ -3547,16 +3544,16 @@ nfsd4_encode_fattr4(struct svc_rqst *rqstp, struct xdr_stream *xdr,
 
        if (!(args.stat.result_mask & STATX_BTIME))
                /* underlying FS does not offer btime so we can't share it */
-               u.attrmask[1] &= ~FATTR4_WORD1_TIME_CREATE;
-       if ((u.attrmask[0] & (FATTR4_WORD0_FILES_AVAIL | FATTR4_WORD0_FILES_FREE |
+               attrmask[1] &= ~FATTR4_WORD1_TIME_CREATE;
+       if ((attrmask[0] & (FATTR4_WORD0_FILES_AVAIL | FATTR4_WORD0_FILES_FREE |
                        FATTR4_WORD0_FILES_TOTAL | FATTR4_WORD0_MAXNAME)) ||
-           (u.attrmask[1] & (FATTR4_WORD1_SPACE_AVAIL | FATTR4_WORD1_SPACE_FREE |
+           (attrmask[1] & (FATTR4_WORD1_SPACE_AVAIL | FATTR4_WORD1_SPACE_FREE |
                       FATTR4_WORD1_SPACE_TOTAL))) {
                err = vfs_statfs(&path, &args.statfs);
                if (err)
                        goto out_nfserr;
        }
-       if ((u.attrmask[0] & (FATTR4_WORD0_FILEHANDLE | FATTR4_WORD0_FSID)) &&
+       if ((attrmask[0] & (FATTR4_WORD0_FILEHANDLE | FATTR4_WORD0_FSID)) &&
            !fhp) {
                tempfh = kmalloc(sizeof(struct svc_fh), GFP_KERNEL);
                status = nfserr_jukebox;
@@ -3571,10 +3568,10 @@ nfsd4_encode_fattr4(struct svc_rqst *rqstp, struct xdr_stream *xdr,
                args.fhp = fhp;
 
        args.acl = NULL;
-       if (u.attrmask[0] & FATTR4_WORD0_ACL) {
+       if (attrmask[0] & FATTR4_WORD0_ACL) {
                err = nfsd4_get_nfs4_acl(rqstp, dentry, &args.acl);
                if (err == -EOPNOTSUPP)
-                       u.attrmask[0] &= ~FATTR4_WORD0_ACL;
+                       attrmask[0] &= ~FATTR4_WORD0_ACL;
                else if (err == -EINVAL) {
                        status = nfserr_attrnotsupp;
                        goto out;
@@ -3586,17 +3583,17 @@ nfsd4_encode_fattr4(struct svc_rqst *rqstp, struct xdr_stream *xdr,
 
 #ifdef CONFIG_NFSD_V4_SECURITY_LABEL
        args.context = NULL;
-       if ((u.attrmask[2] & FATTR4_WORD2_SECURITY_LABEL) ||
-            u.attrmask[0] & FATTR4_WORD0_SUPPORTED_ATTRS) {
+       if ((attrmask[2] & FATTR4_WORD2_SECURITY_LABEL) ||
+            attrmask[0] & FATTR4_WORD0_SUPPORTED_ATTRS) {
                if (exp->ex_flags & NFSEXP_SECURITY_LABEL)
                        err = security_inode_getsecctx(d_inode(dentry),
                                                &args.context, &args.contextlen);
                else
                        err = -EOPNOTSUPP;
                args.contextsupport = (err == 0);
-               if (u.attrmask[2] & FATTR4_WORD2_SECURITY_LABEL) {
+               if (attrmask[2] & FATTR4_WORD2_SECURITY_LABEL) {
                        if (err == -EOPNOTSUPP)
-                               u.attrmask[2] &= ~FATTR4_WORD2_SECURITY_LABEL;
+                               attrmask[2] &= ~FATTR4_WORD2_SECURITY_LABEL;
                        else if (err)
                                goto out_nfserr;
                }
@@ -3604,8 +3601,8 @@ nfsd4_encode_fattr4(struct svc_rqst *rqstp, struct xdr_stream *xdr,
 #endif /* CONFIG_NFSD_V4_SECURITY_LABEL */
 
        /* attrmask */
-       status = nfsd4_encode_bitmap4(xdr, u.attrmask[0],
-                                     u.attrmask[1], u.attrmask[2]);
+       status = nfsd4_encode_bitmap4(xdr, attrmask[0], attrmask[1],
+                                     attrmask[2]);
        if (status)
                goto out;
 
@@ -3614,7 +3611,9 @@ nfsd4_encode_fattr4(struct svc_rqst *rqstp, struct xdr_stream *xdr,
        attrlen_p = xdr_reserve_space(xdr, XDR_UNIT);
        if (!attrlen_p)
                goto out_resource;
-       for_each_set_bit(bit, (const unsigned long *)&u.mask,
+       bitmap_from_arr32(attr_bitmap, attrmask,
+                         ARRAY_SIZE(nfsd4_enc_fattr4_encode_ops));
+       for_each_set_bit(bit, attr_bitmap,
                         ARRAY_SIZE(nfsd4_enc_fattr4_encode_ops)) {
                status = nfsd4_enc_fattr4_encode_ops[bit](xdr, &args);
                if (status != nfs_ok)
index bc846b904b68d43816c48c69c3ae83152cadabf1..aee40db7a036fb9f7d34e2e456fb6d61ae3bbf2d 100644 (file)
@@ -240,7 +240,7 @@ nilfs_filetype_table[NILFS_FT_MAX] = {
 
 #define S_SHIFT 12
 static unsigned char
-nilfs_type_by_mode[S_IFMT >> S_SHIFT] = {
+nilfs_type_by_mode[(S_IFMT >> S_SHIFT) + 1] = {
        [S_IFREG >> S_SHIFT]    = NILFS_FT_REG_FILE,
        [S_IFDIR >> S_SHIFT]    = NILFS_FT_DIR,
        [S_IFCHR >> S_SHIFT]    = NILFS_FT_CHRDEV,
index 57f2343164a34d81ad9816c0dec7c8994b66beb0..a233a24352b1fa2f0d4f62df656200200eb012f2 100644 (file)
@@ -702,7 +702,7 @@ struct smb2_close_rsp {
        __le16 StructureSize; /* 60 */
        __le16 Flags;
        __le32 Reserved;
-       struct_group(network_open_info,
+       struct_group_attr(network_open_info, __packed,
                __le64 CreationTime;
                __le64 LastAccessTime;
                __le64 LastWriteTime;
index 3079e607c5fe6db71e3cc70a76bcb26d90f813c3..2bbc3c3316f0ff3a25b2d3979034cfd917a70ccc 100644 (file)
@@ -167,20 +167,17 @@ static void __handle_ksmbd_work(struct ksmbd_work *work,
        int rc;
        bool is_chained = false;
 
-       if (conn->ops->allocate_rsp_buf(work))
-               return;
-
        if (conn->ops->is_transform_hdr &&
            conn->ops->is_transform_hdr(work->request_buf)) {
                rc = conn->ops->decrypt_req(work);
-               if (rc < 0) {
-                       conn->ops->set_rsp_status(work, STATUS_DATA_ERROR);
-                       goto send;
-               }
-
+               if (rc < 0)
+                       return;
                work->encrypted = true;
        }
 
+       if (conn->ops->allocate_rsp_buf(work))
+               return;
+
        rc = conn->ops->init_rsp_hdr(work);
        if (rc) {
                /* either uid or tid is not correct */
index cd2ed345c3d395a7d3398000fb496c7e9e824392..8ae0c4d5ab96c9e15a1e67ed055cedcc22c90c9e 100644 (file)
@@ -535,6 +535,10 @@ int smb2_allocate_rsp_buf(struct ksmbd_work *work)
        if (cmd == SMB2_QUERY_INFO_HE) {
                struct smb2_query_info_req *req;
 
+               if (get_rfc1002_len(work->request_buf) <
+                   offsetof(struct smb2_query_info_req, OutputBufferLength))
+                       return -EINVAL;
+
                req = smb2_get_msg(work->request_buf);
                if ((req->InfoType == SMB2_O_INFO_FILE &&
                     (req->FileInfoClass == FILE_FULL_EA_INFORMATION ||
index 2e992fadeaa7df3ee439c381ff09c5fecae3e326..d7906efaa96b158c27f941ca04e477cedd8eb70d 100644 (file)
@@ -754,10 +754,15 @@ int ksmbd_vfs_rename(struct ksmbd_work *work, const struct path *old_path,
                goto out4;
        }
 
+       /*
+        * explicitly handle file overwrite case, for compatibility with
+        * filesystems that may not support rename flags (e.g: fuse)
+        */
        if ((flags & RENAME_NOREPLACE) && d_is_positive(new_dentry)) {
                err = -EEXIST;
                goto out4;
        }
+       flags &= ~(RENAME_NOREPLACE);
 
        if (old_child == trap) {
                err = -EINVAL;
index aa3411354e66d00c48db30adf73b34eedb84cb6d..16bd693d0b3aa23ce87af9cc1540e113a4c2a286 100644 (file)
@@ -48,6 +48,10 @@ static int squashfs_new_inode(struct super_block *sb, struct inode *inode,
        gid_t i_gid;
        int err;
 
+       inode->i_ino = le32_to_cpu(sqsh_ino->inode_number);
+       if (inode->i_ino == 0)
+               return -EINVAL;
+
        err = squashfs_get_id(sb, le16_to_cpu(sqsh_ino->uid), &i_uid);
        if (err)
                return err;
@@ -58,7 +62,6 @@ static int squashfs_new_inode(struct super_block *sb, struct inode *inode,
 
        i_uid_write(inode, i_uid);
        i_gid_write(inode, i_gid);
-       inode->i_ino = le32_to_cpu(sqsh_ino->inode_number);
        inode_set_mtime(inode, le32_to_cpu(sqsh_ino->mtime), 0);
        inode_set_atime(inode, inode_get_mtime_sec(inode), 0);
        inode_set_ctime(inode, inode_get_mtime_sec(inode), 0);
index 6b7652fb805057b8a155a0c94a3746fcb72eb5c7..7cd64021d453de7494f974ee21b77bc9a0838a35 100644 (file)
@@ -463,6 +463,8 @@ struct kernfs_node *sysfs_break_active_protection(struct kobject *kobj,
        kn = kernfs_find_and_get(kobj->sd, attr->name);
        if (kn)
                kernfs_break_active_protection(kn);
+       else
+               kobject_put(kobj);
        return kn;
 }
 EXPORT_SYMBOL_GPL(sysfs_break_active_protection);
index 961f4d88f9ef784c3c8fbafd6925579698a93d5f..1985c22d90ca47f63d9816899ed4870fccbf3176 100644 (file)
@@ -296,5 +296,13 @@ do {                                                                       \
 #define io_stop_wc() do { } while (0)
 #endif
 
+/*
+ * Architectures that guarantee an implicit smp_mb() in switch_mm()
+ * can override smp_mb__after_switch_mm.
+ */
+#ifndef smp_mb__after_switch_mm
+# define smp_mb__after_switch_mm()     smp_mb()
+#endif
+
 #endif /* !__ASSEMBLY__ */
 #endif /* __ASM_GENERIC_BARRIER_H */
index 99e4f5e722132c2c4f301816bbab7871f2f2ccb0..b43ca3b9d2a2649dc6a1ac6f1403132fadf44025 100644 (file)
@@ -126,6 +126,8 @@ typedef unsigned int __bitwise blk_mode_t;
 #define BLK_OPEN_WRITE_IOCTL   ((__force blk_mode_t)(1 << 4))
 /* open is exclusive wrt all other BLK_OPEN_WRITE opens to the device */
 #define BLK_OPEN_RESTRICT_WRITES       ((__force blk_mode_t)(1 << 5))
+/* return partition scanning errors */
+#define BLK_OPEN_STRICT_SCAN   ((__force blk_mode_t)(1 << 6))
 
 struct gendisk {
        /*
index e5ee2c694401e0972d4a644f01ca523f63f83475..3f4b4ac527ca28c66119cf00fc13083633ec80a3 100644 (file)
@@ -288,7 +288,12 @@ int __init xbc_init(const char *buf, size_t size, const char **emsg, int *epos);
 int __init xbc_get_info(int *node_size, size_t *data_size);
 
 /* XBC cleanup data structures */
-void __init xbc_exit(void);
+void __init _xbc_exit(bool early);
+
+static inline void xbc_exit(void)
+{
+       _xbc_exit(false);
+}
 
 /* XBC embedded bootconfig data in kernel */
 #ifdef CONFIG_BOOT_CONFIG_EMBED
index 6c75c8bd44a0bb627020ba267c3d0debb2379ba4..1a14e239221f7e9aec06b510d450c9f30b34fc2c 100644 (file)
@@ -2,7 +2,6 @@
 #ifndef __LINUX_GPIO_PROPERTY_H
 #define __LINUX_GPIO_PROPERTY_H
 
-#include <dt-bindings/gpio/gpio.h> /* for GPIO_* flags */
 #include <linux/property.h>
 
 #define PROPERTY_ENTRY_GPIO(_name_, _chip_node_, _idx_, _flags_) \
index 66828dfc6e74e887bdcf302b651affff9bd40c49..f0c6bf98283279c1aa7460df5edb82a4eededdec 100644 (file)
@@ -114,8 +114,17 @@ extern struct page *shmem_read_mapping_page_gfp(struct address_space *mapping,
 extern void shmem_truncate_range(struct inode *inode, loff_t start, loff_t end);
 int shmem_unuse(unsigned int type);
 
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
 extern bool shmem_is_huge(struct inode *inode, pgoff_t index, bool shmem_huge_force,
                          struct mm_struct *mm, unsigned long vm_flags);
+#else
+static __always_inline bool shmem_is_huge(struct inode *inode, pgoff_t index, bool shmem_huge_force,
+                                         struct mm_struct *mm, unsigned long vm_flags)
+{
+       return false;
+}
+#endif
+
 #ifdef CONFIG_SHMEM
 extern unsigned long shmem_swap_usage(struct vm_area_struct *vma);
 #else
index bff1e8d97de0e089a70ed3f9aa872bf050955087..925c84653af5e9a8f0a97b4f1b2a1bf732d98112 100644 (file)
@@ -390,6 +390,35 @@ static inline bool is_migration_entry_dirty(swp_entry_t entry)
 }
 #endif /* CONFIG_MIGRATION */
 
+#ifdef CONFIG_MEMORY_FAILURE
+
+/*
+ * Support for hardware poisoned pages
+ */
+static inline swp_entry_t make_hwpoison_entry(struct page *page)
+{
+       BUG_ON(!PageLocked(page));
+       return swp_entry(SWP_HWPOISON, page_to_pfn(page));
+}
+
+static inline int is_hwpoison_entry(swp_entry_t entry)
+{
+       return swp_type(entry) == SWP_HWPOISON;
+}
+
+#else
+
+static inline swp_entry_t make_hwpoison_entry(struct page *page)
+{
+       return swp_entry(0, 0);
+}
+
+static inline int is_hwpoison_entry(swp_entry_t swp)
+{
+       return 0;
+}
+#endif
+
 typedef unsigned long pte_marker;
 
 #define  PTE_MARKER_UFFD_WP                    BIT(0)
@@ -470,8 +499,9 @@ static inline struct page *pfn_swap_entry_to_page(swp_entry_t entry)
 
 /*
  * A pfn swap entry is a special type of swap entry that always has a pfn stored
- * in the swap offset. They are used to represent unaddressable device memory
- * and to restrict access to a page undergoing migration.
+ * in the swap offset. They can either be used to represent unaddressable device
+ * memory, to restrict access to a page undergoing migration or to represent a
+ * pfn which has been hwpoisoned and unmapped.
  */
 static inline bool is_pfn_swap_entry(swp_entry_t entry)
 {
@@ -479,7 +509,7 @@ static inline bool is_pfn_swap_entry(swp_entry_t entry)
        BUILD_BUG_ON(SWP_TYPE_SHIFT < SWP_PFN_BITS);
 
        return is_migration_entry(entry) || is_device_private_entry(entry) ||
-              is_device_exclusive_entry(entry);
+              is_device_exclusive_entry(entry) || is_hwpoison_entry(entry);
 }
 
 struct page_vma_mapped_walk;
@@ -548,35 +578,6 @@ static inline int is_pmd_migration_entry(pmd_t pmd)
 }
 #endif  /* CONFIG_ARCH_ENABLE_THP_MIGRATION */
 
-#ifdef CONFIG_MEMORY_FAILURE
-
-/*
- * Support for hardware poisoned pages
- */
-static inline swp_entry_t make_hwpoison_entry(struct page *page)
-{
-       BUG_ON(!PageLocked(page));
-       return swp_entry(SWP_HWPOISON, page_to_pfn(page));
-}
-
-static inline int is_hwpoison_entry(swp_entry_t entry)
-{
-       return swp_type(entry) == SWP_HWPOISON;
-}
-
-#else
-
-static inline swp_entry_t make_hwpoison_entry(struct page *page)
-{
-       return swp_entry(0, 0);
-}
-
-static inline int is_hwpoison_entry(swp_entry_t swp)
-{
-       return 0;
-}
-#endif
-
 static inline int non_swap_entry(swp_entry_t entry)
 {
        return swp_type(entry) >= MAX_SWAPFILES;
index 94e63b2695406f96333e9851bc32742d932d859a..00790bb5cbde666a2a72180d94beae8eb647905d 100644 (file)
@@ -105,7 +105,7 @@ struct udp_sock {
 #define udp_assign_bit(nr, sk, val)            \
        assign_bit(UDP_FLAGS_##nr, &udp_sk(sk)->udp_flags, val)
 
-#define UDP_MAX_SEGMENTS       (1 << 6UL)
+#define UDP_MAX_SEGMENTS       (1 << 7UL)
 
 #define udp_sk(ptr) container_of_const(ptr, struct udp_sock, inet.sk)
 
index a763dd327c6ea95d6b94fda1ea2efd8f1784335f..9abb7ee40d72fc2e7d2ef0ec86ef18df939ddd9c 100644 (file)
@@ -336,7 +336,7 @@ int nf_flow_rule_route_ipv6(struct net *net, struct flow_offload *flow,
 int nf_flow_table_offload_init(void);
 void nf_flow_table_offload_exit(void);
 
-static inline __be16 nf_flow_pppoe_proto(const struct sk_buff *skb)
+static inline __be16 __nf_flow_pppoe_proto(const struct sk_buff *skb)
 {
        __be16 proto;
 
@@ -352,6 +352,16 @@ static inline __be16 nf_flow_pppoe_proto(const struct sk_buff *skb)
        return 0;
 }
 
+static inline bool nf_flow_pppoe_proto(struct sk_buff *skb, __be16 *inner_proto)
+{
+       if (!pskb_may_pull(skb, PPPOE_SES_HLEN))
+               return false;
+
+       *inner_proto = __nf_flow_pppoe_proto(skb);
+
+       return true;
+}
+
 #define NF_FLOW_TABLE_STAT_INC(net, count) __this_cpu_inc((net)->ft.stat->count)
 #define NF_FLOW_TABLE_STAT_DEC(net, count) __this_cpu_dec((net)->ft.stat->count)
 #define NF_FLOW_TABLE_STAT_INC_ATOMIC(net, count)      \
index 510244cc0f8f0e479f252598ba2aaf43b8918978..1cf9cb0f0a975ae96010775ebd00a745cd936cf6 100644 (file)
@@ -307,9 +307,23 @@ static inline void *nft_elem_priv_cast(const struct nft_elem_priv *priv)
        return (void *)priv;
 }
 
+
+/**
+ * enum nft_iter_type - nftables set iterator type
+ *
+ * @NFT_ITER_READ: read-only iteration over set elements
+ * @NFT_ITER_UPDATE: iteration under mutex to update set element state
+ */
+enum nft_iter_type {
+       NFT_ITER_UNSPEC,
+       NFT_ITER_READ,
+       NFT_ITER_UPDATE,
+};
+
 struct nft_set;
 struct nft_set_iter {
        u8              genmask;
+       enum nft_iter_type type:8;
        unsigned int    count;
        unsigned int    skip;
        int             err;
index cefe0c4bdae34c91868c22731a3b666f8e16e996..41ca14e81d55f926dc4002e820d7e027f4729021 100644 (file)
@@ -117,6 +117,7 @@ struct Qdisc {
        struct qdisc_skb_head   q;
        struct gnet_stats_basic_sync bstats;
        struct gnet_stats_queue qstats;
+       int                     owner;
        unsigned long           state;
        unsigned long           state2; /* must be written under qdisc spinlock */
        struct Qdisc            *next_sched;
index ba2d96a1bc2f94703945c5f79294a66af1fe8fd4..f50fcafc69de20b8b20a53a45f29b23f4259a65e 100644 (file)
@@ -609,7 +609,7 @@ TRACE_EVENT(rpcgss_context,
                __field(unsigned int, timeout)
                __field(u32, window_size)
                __field(int, len)
-               __string(acceptor, data)
+               __string_len(acceptor, data, len)
        ),
 
        TP_fast_assign(
@@ -618,7 +618,7 @@ TRACE_EVENT(rpcgss_context,
                __entry->timeout = timeout;
                __entry->window_size = window_size;
                __entry->len = len;
-               strncpy(__get_str(acceptor), data, len);
+               __assign_str(acceptor, data);
        ),
 
        TP_printk("win_size=%u expiry=%lu now=%lu timeout=%u acceptor=%.*s",
index 98fdd93d79a5c5eab16bc7eba1a942160ed7d2a0..b06bb72a288a9b46e8a5315560a5ecc94818287e 100644 (file)
@@ -635,6 +635,8 @@ static void __init setup_command_line(char *command_line)
        if (!saved_command_line)
                panic("%s: Failed to allocate %zu bytes\n", __func__, len + ilen);
 
+       len = xlen + strlen(command_line) + 1;
+
        static_command_line = memblock_alloc(len, SMP_CACHE_BYTES);
        if (!static_command_line)
                panic("%s: Failed to allocate %zu bytes\n", __func__, len);
index 3fc792dfc6ae724aef7289ea940bc8aed5acd9ee..dc0235ff472d34817f9050e850642e66652a5f87 100644 (file)
@@ -2610,19 +2610,6 @@ static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events,
        if (__io_cqring_events_user(ctx) >= min_events)
                return 0;
 
-       if (sig) {
-#ifdef CONFIG_COMPAT
-               if (in_compat_syscall())
-                       ret = set_compat_user_sigmask((const compat_sigset_t __user *)sig,
-                                                     sigsz);
-               else
-#endif
-                       ret = set_user_sigmask(sig, sigsz);
-
-               if (ret)
-                       return ret;
-       }
-
        init_waitqueue_func_entry(&iowq.wq, io_wake_function);
        iowq.wq.private = current;
        INIT_LIST_HEAD(&iowq.wq.entry);
@@ -2639,6 +2626,19 @@ static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events,
                iowq.timeout = ktime_add_ns(timespec64_to_ktime(ts), ktime_get_ns());
        }
 
+       if (sig) {
+#ifdef CONFIG_COMPAT
+               if (in_compat_syscall())
+                       ret = set_compat_user_sigmask((const compat_sigset_t __user *)sig,
+                                                     sigsz);
+               else
+#endif
+                       ret = set_user_sigmask(sig, sigsz);
+
+               if (ret)
+                       return ret;
+       }
+
        trace_io_uring_cqring_wait(ctx, min_events);
        do {
                int nr_wait = (int) iowq.cq_tail - READ_ONCE(ctx->rings->cq.tail);
index 0d944e92a43ffa13bdbcce6c6a28c44bab29ca19..3b9cdb42e757cb26fb71cc957ccb47cda2c2b77a 100644 (file)
@@ -712,6 +712,23 @@ static __latent_entropy int dup_mmap(struct mm_struct *mm,
                } else if (anon_vma_fork(tmp, mpnt))
                        goto fail_nomem_anon_vma_fork;
                vm_flags_clear(tmp, VM_LOCKED_MASK);
+               /*
+                * Copy/update hugetlb private vma information.
+                */
+               if (is_vm_hugetlb_page(tmp))
+                       hugetlb_dup_vma_private(tmp);
+
+               /*
+                * Link the vma into the MT. After using __mt_dup(), memory
+                * allocation is not necessary here, so it cannot fail.
+                */
+               vma_iter_bulk_store(&vmi, tmp);
+
+               mm->map_count++;
+
+               if (tmp->vm_ops && tmp->vm_ops->open)
+                       tmp->vm_ops->open(tmp);
+
                file = tmp->vm_file;
                if (file) {
                        struct address_space *mapping = file->f_mapping;
@@ -728,25 +745,9 @@ static __latent_entropy int dup_mmap(struct mm_struct *mm,
                        i_mmap_unlock_write(mapping);
                }
 
-               /*
-                * Copy/update hugetlb private vma information.
-                */
-               if (is_vm_hugetlb_page(tmp))
-                       hugetlb_dup_vma_private(tmp);
-
-               /*
-                * Link the vma into the MT. After using __mt_dup(), memory
-                * allocation is not necessary here, so it cannot fail.
-                */
-               vma_iter_bulk_store(&vmi, tmp);
-
-               mm->map_count++;
                if (!(tmp->vm_flags & VM_WIPEONFORK))
                        retval = copy_page_range(tmp, mpnt);
 
-               if (tmp->vm_ops && tmp->vm_ops->open)
-                       tmp->vm_ops->open(tmp);
-
                if (retval) {
                        mpnt = vma_next(&vmi);
                        goto loop_out;
index 001fe047bd5d80b841776719b5fa73f4401288a4..8c817d0a92f3ef5c8f03a9aa1ef148bdcd9135ea 100644 (file)
@@ -79,6 +79,8 @@
 # include <asm/paravirt_api_clock.h>
 #endif
 
+#include <asm/barrier.h>
+
 #include "cpupri.h"
 #include "cpudeadline.h"
 
@@ -3445,13 +3447,19 @@ static inline void switch_mm_cid(struct rq *rq,
                 * between rq->curr store and load of {prev,next}->mm->pcpu_cid[cpu].
                 * Provide it here.
                 */
-               if (!prev->mm)                          // from kernel
+               if (!prev->mm) {                        // from kernel
                        smp_mb();
-               /*
-                * user -> user transition guarantees a memory barrier through
-                * switch_mm() when current->mm changes. If current->mm is
-                * unchanged, no barrier is needed.
-                */
+               } else {                                // from user
+                       /*
+                        * user->user transition relies on an implicit
+                        * memory barrier in switch_mm() when
+                        * current->mm changes. If the architecture
+                        * switch_mm() does not have an implicit memory
+                        * barrier, it is emitted here.  If current->mm
+                        * is unchanged, no barrier is needed.
+                        */
+                       smp_mb__after_switch_mm();
+               }
        }
        if (prev->mm_cid_active) {
                mm_cid_snapshot_time(rq, prev->mm);
index c59d26068a6401990343e26c03002fcc4022ef98..8841554432d5b4770c19a948eb849af71386271f 100644 (file)
@@ -61,9 +61,12 @@ static inline void * __init xbc_alloc_mem(size_t size)
        return memblock_alloc(size, SMP_CACHE_BYTES);
 }
 
-static inline void __init xbc_free_mem(void *addr, size_t size)
+static inline void __init xbc_free_mem(void *addr, size_t size, bool early)
 {
-       memblock_free(addr, size);
+       if (early)
+               memblock_free(addr, size);
+       else if (addr)
+               memblock_free_late(__pa(addr), size);
 }
 
 #else /* !__KERNEL__ */
@@ -73,7 +76,7 @@ static inline void *xbc_alloc_mem(size_t size)
        return malloc(size);
 }
 
-static inline void xbc_free_mem(void *addr, size_t size)
+static inline void xbc_free_mem(void *addr, size_t size, bool early)
 {
        free(addr);
 }
@@ -904,13 +907,13 @@ static int __init xbc_parse_tree(void)
  * If you need to reuse xbc_init() with new boot config, you can
  * use this.
  */
-void __init xbc_exit(void)
+void __init _xbc_exit(bool early)
 {
-       xbc_free_mem(xbc_data, xbc_data_size);
+       xbc_free_mem(xbc_data, xbc_data_size, early);
        xbc_data = NULL;
        xbc_data_size = 0;
        xbc_node_num = 0;
-       xbc_free_mem(xbc_nodes, sizeof(struct xbc_node) * XBC_NODE_MAX);
+       xbc_free_mem(xbc_nodes, sizeof(struct xbc_node) * XBC_NODE_MAX, early);
        xbc_nodes = NULL;
        brace_index = 0;
 }
@@ -963,7 +966,7 @@ int __init xbc_init(const char *data, size_t size, const char **emsg, int *epos)
        if (!xbc_nodes) {
                if (emsg)
                        *emsg = "Failed to allocate bootconfig nodes";
-               xbc_exit();
+               _xbc_exit(true);
                return -ENOMEM;
        }
        memset(xbc_nodes, 0, sizeof(struct xbc_node) * XBC_NODE_MAX);
@@ -977,7 +980,7 @@ int __init xbc_init(const char *data, size_t size, const char **emsg, int *epos)
                        *epos = xbc_err_pos;
                if (emsg)
                        *emsg = xbc_err_msg;
-               xbc_exit();
+               _xbc_exit(true);
        } else
                ret = xbc_node_num;
 
index df83182ec72d5d77bb86b6571814a23fe244945a..f6d55635742f54a42778e5edebdc6bf9bddf98b8 100644 (file)
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -1206,6 +1206,22 @@ static long __get_user_pages(struct mm_struct *mm,
 
                /* first iteration or cross vma bound */
                if (!vma || start >= vma->vm_end) {
+                       /*
+                        * MADV_POPULATE_(READ|WRITE) wants to handle VMA
+                        * lookups+error reporting differently.
+                        */
+                       if (gup_flags & FOLL_MADV_POPULATE) {
+                               vma = vma_lookup(mm, start);
+                               if (!vma) {
+                                       ret = -ENOMEM;
+                                       goto out;
+                               }
+                               if (check_vma_flags(vma, gup_flags)) {
+                                       ret = -EINVAL;
+                                       goto out;
+                               }
+                               goto retry;
+                       }
                        vma = gup_vma_lookup(mm, start);
                        if (!vma && in_gate_area(mm, start)) {
                                ret = get_gate_page(mm, start & PAGE_MASK,
@@ -1683,35 +1699,35 @@ long populate_vma_page_range(struct vm_area_struct *vma,
 }
 
 /*
- * faultin_vma_page_range() - populate (prefault) page tables inside the
- *                           given VMA range readable/writable
+ * faultin_page_range() - populate (prefault) page tables inside the
+ *                       given range readable/writable
  *
  * This takes care of mlocking the pages, too, if VM_LOCKED is set.
  *
- * @vma: target vma
+ * @mm: the mm to populate page tables in
  * @start: start address
  * @end: end address
  * @write: whether to prefault readable or writable
  * @locked: whether the mmap_lock is still held
  *
- * Returns either number of processed pages in the vma, or a negative error
- * code on error (see __get_user_pages()).
+ * Returns either number of processed pages in the MM, or a negative error
+ * code on error (see __get_user_pages()). Note that this function reports
+ * errors related to VMAs, such as incompatible mappings, as expected by
+ * MADV_POPULATE_(READ|WRITE).
  *
- * vma->vm_mm->mmap_lock must be held. The range must be page-aligned and
- * covered by the VMA. If it's released, *@locked will be set to 0.
+ * The range must be page-aligned.
+ *
+ * mm->mmap_lock must be held. If it's released, *@locked will be set to 0.
  */
-long faultin_vma_page_range(struct vm_area_struct *vma, unsigned long start,
-                           unsigned long end, bool write, int *locked)
+long faultin_page_range(struct mm_struct *mm, unsigned long start,
+                       unsigned long end, bool write, int *locked)
 {
-       struct mm_struct *mm = vma->vm_mm;
        unsigned long nr_pages = (end - start) / PAGE_SIZE;
        int gup_flags;
        long ret;
 
        VM_BUG_ON(!PAGE_ALIGNED(start));
        VM_BUG_ON(!PAGE_ALIGNED(end));
-       VM_BUG_ON_VMA(start < vma->vm_start, vma);
-       VM_BUG_ON_VMA(end > vma->vm_end, vma);
        mmap_assert_locked(mm);
 
        /*
@@ -1723,19 +1739,13 @@ long faultin_vma_page_range(struct vm_area_struct *vma, unsigned long start,
         *                a poisoned page.
         * !FOLL_FORCE: Require proper access permissions.
         */
-       gup_flags = FOLL_TOUCH | FOLL_HWPOISON | FOLL_UNLOCKABLE;
+       gup_flags = FOLL_TOUCH | FOLL_HWPOISON | FOLL_UNLOCKABLE |
+                   FOLL_MADV_POPULATE;
        if (write)
                gup_flags |= FOLL_WRITE;
 
-       /*
-        * We want to report -EINVAL instead of -EFAULT for any permission
-        * problems or incompatible mappings.
-        */
-       if (check_vma_flags(vma, gup_flags))
-               return -EINVAL;
-
-       ret = __get_user_pages(mm, start, nr_pages, gup_flags,
-                              NULL, locked);
+       ret = __get_user_pages_locked(mm, start, nr_pages, NULL, locked,
+                                     gup_flags);
        lru_add_drain();
        return ret;
 }
index 94c958f7ebb50dd925070157c0d0b2432dfc0483..6790f93fda45bd3415581bcb5322bd7bef77a65e 100644 (file)
@@ -2244,9 +2244,6 @@ int move_pages_huge_pmd(struct mm_struct *mm, pmd_t *dst_pmd, pmd_t *src_pmd, pm
                goto unlock_ptls;
        }
 
-       folio_move_anon_rmap(src_folio, dst_vma);
-       WRITE_ONCE(src_folio->index, linear_page_index(dst_vma, dst_addr));
-
        src_pmdval = pmdp_huge_clear_flush(src_vma, src_addr, src_pmd);
        /* Folio got pinned from under us. Put it back and fail the move. */
        if (folio_maybe_dma_pinned(src_folio)) {
@@ -2255,6 +2252,9 @@ int move_pages_huge_pmd(struct mm_struct *mm, pmd_t *dst_pmd, pmd_t *src_pmd, pm
                goto unlock_ptls;
        }
 
+       folio_move_anon_rmap(src_folio, dst_vma);
+       WRITE_ONCE(src_folio->index, linear_page_index(dst_vma, dst_addr));
+
        _dst_pmd = mk_huge_pmd(&src_folio->page, dst_vma->vm_page_prot);
        /* Follow mremap() behavior and treat the entry dirty after the move */
        _dst_pmd = pmd_mkwrite(pmd_mkdirty(_dst_pmd), dst_vma);
index ed1581b670d42e3e5cc1bd3ea4c01390f10e1016..e5e3df1d3b8747294f5912b2c21145ca427c3d34 100644 (file)
@@ -6943,9 +6943,13 @@ long hugetlb_change_protection(struct vm_area_struct *vma,
                        if (!pte_same(pte, newpte))
                                set_huge_pte_at(mm, address, ptep, newpte, psize);
                } else if (unlikely(is_pte_marker(pte))) {
-                       /* No other markers apply for now. */
-                       WARN_ON_ONCE(!pte_marker_uffd_wp(pte));
-                       if (uffd_wp_resolve)
+                       /*
+                        * Do nothing on a poison marker; page is
+                        * corrupted, permissons do not apply.  Here
+                        * pte_marker_uffd_wp()==true implies !poison
+                        * because they're mutual exclusive.
+                        */
+                       if (pte_marker_uffd_wp(pte) && uffd_wp_resolve)
                                /* Safe to modify directly (non-present->none). */
                                huge_pte_clear(mm, address, ptep, psize);
                } else if (!huge_pte_none(pte)) {
index f309a010d50fb6de06e6d613b7f23357a99164ef..c3f3e0f1911513b458cbcdba01d55ccd2b453071 100644 (file)
@@ -590,9 +590,8 @@ struct anon_vma *folio_anon_vma(struct folio *folio);
 void unmap_mapping_folio(struct folio *folio);
 extern long populate_vma_page_range(struct vm_area_struct *vma,
                unsigned long start, unsigned long end, int *locked);
-extern long faultin_vma_page_range(struct vm_area_struct *vma,
-                                  unsigned long start, unsigned long end,
-                                  bool write, int *locked);
+extern long faultin_page_range(struct mm_struct *mm, unsigned long start,
+               unsigned long end, bool write, int *locked);
 extern bool mlock_future_ok(struct mm_struct *mm, unsigned long flags,
                               unsigned long bytes);
 
@@ -1031,10 +1030,13 @@ enum {
        FOLL_FAST_ONLY = 1 << 20,
        /* allow unlocking the mmap lock */
        FOLL_UNLOCKABLE = 1 << 21,
+       /* VMA lookup+checks compatible with MADV_POPULATE_(READ|WRITE) */
+       FOLL_MADV_POPULATE = 1 << 22,
 };
 
 #define INTERNAL_GUP_FLAGS (FOLL_TOUCH | FOLL_TRIED | FOLL_REMOTE | FOLL_PIN | \
-                           FOLL_FAST_ONLY | FOLL_UNLOCKABLE)
+                           FOLL_FAST_ONLY | FOLL_UNLOCKABLE | \
+                           FOLL_MADV_POPULATE)
 
 /*
  * Indicates for which pages that are write-protected in the page table,
index cfa5e7288261189cb8242e5a0367fe6ffeebca12..f2c818af0b66e8f8e67d332a0d89d15502d765fc 100644 (file)
@@ -908,27 +908,14 @@ static long madvise_populate(struct vm_area_struct *vma,
 {
        const bool write = behavior == MADV_POPULATE_WRITE;
        struct mm_struct *mm = vma->vm_mm;
-       unsigned long tmp_end;
        int locked = 1;
        long pages;
 
        *prev = vma;
 
        while (start < end) {
-               /*
-                * We might have temporarily dropped the lock. For example,
-                * our VMA might have been split.
-                */
-               if (!vma || start >= vma->vm_end) {
-                       vma = vma_lookup(mm, start);
-                       if (!vma)
-                               return -ENOMEM;
-               }
-
-               tmp_end = min_t(unsigned long, end, vma->vm_end);
                /* Populate (prefault) page tables readable/writable. */
-               pages = faultin_vma_page_range(vma, start, tmp_end, write,
-                                              &locked);
+               pages = faultin_page_range(mm, start, end, write, &locked);
                if (!locked) {
                        mmap_read_lock(mm);
                        locked = 1;
@@ -949,7 +936,7 @@ static long madvise_populate(struct vm_area_struct *vma,
                                pr_warn_once("%s: unhandled return value: %ld\n",
                                             __func__, pages);
                                fallthrough;
-                       case -ENOMEM:
+                       case -ENOMEM: /* No VMA or out of memory. */
                                return -ENOMEM;
                        }
                }
index 9349948f1abfd120977706bbda23456999f057bc..9e62a00b46ddee5899f85cfc252dabd7c0d04121 100644 (file)
@@ -154,11 +154,23 @@ static int __page_handle_poison(struct page *page)
 {
        int ret;
 
-       zone_pcp_disable(page_zone(page));
+       /*
+        * zone_pcp_disable() can't be used here. It will
+        * hold pcp_batch_high_lock and dissolve_free_huge_page() might hold
+        * cpu_hotplug_lock via static_key_slow_dec() when hugetlb vmemmap
+        * optimization is enabled. This will break current lock dependency
+        * chain and leads to deadlock.
+        * Disabling pcp before dissolving the page was a deterministic
+        * approach because we made sure that those pages cannot end up in any
+        * PCP list. Draining PCP lists expels those pages to the buddy system,
+        * but nothing guarantees that those pages do not get back to a PCP
+        * queue if we need to refill those.
+        */
        ret = dissolve_free_huge_page(page);
-       if (!ret)
+       if (!ret) {
+               drain_all_pages(page_zone(page));
                ret = take_page_off_buddy(page);
-       zone_pcp_enable(page_zone(page));
+       }
 
        return ret;
 }
index 791a6dc163244be15ac799c16ce7ac9ad2e0baee..5853f3ae36e535371679a978c7b66aa14f0d2e0e 100644 (file)
@@ -742,12 +742,6 @@ static long shmem_unused_huge_count(struct super_block *sb,
 
 #define shmem_huge SHMEM_HUGE_DENY
 
-bool shmem_is_huge(struct inode *inode, pgoff_t index, bool shmem_huge_force,
-                  struct mm_struct *mm, unsigned long vm_flags)
-{
-       return false;
-}
-
 static unsigned long shmem_unused_huge_shrink(struct shmem_sb_info *sbinfo,
                struct shrink_control *sc, unsigned long nr_to_split)
 {
index f21097e734827891f87adb9d0a1f7cebf9f15380..ceaa5a89b947fc574ee2a05003db3de7cc9797b1 100644 (file)
@@ -30,7 +30,7 @@ br_netif_receive_skb(struct net *net, struct sock *sk, struct sk_buff *skb)
        return netif_receive_skb(skb);
 }
 
-static int br_pass_frame_up(struct sk_buff *skb)
+static int br_pass_frame_up(struct sk_buff *skb, bool promisc)
 {
        struct net_device *indev, *brdev = BR_INPUT_SKB_CB(skb)->brdev;
        struct net_bridge *br = netdev_priv(brdev);
@@ -65,6 +65,8 @@ static int br_pass_frame_up(struct sk_buff *skb)
        br_multicast_count(br, NULL, skb, br_multicast_igmp_type(skb),
                           BR_MCAST_DIR_TX);
 
+       BR_INPUT_SKB_CB(skb)->promisc = promisc;
+
        return NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_IN,
                       dev_net(indev), NULL, skb, indev, NULL,
                       br_netif_receive_skb);
@@ -82,6 +84,7 @@ int br_handle_frame_finish(struct net *net, struct sock *sk, struct sk_buff *skb
        struct net_bridge_mcast *brmctx;
        struct net_bridge_vlan *vlan;
        struct net_bridge *br;
+       bool promisc;
        u16 vid = 0;
        u8 state;
 
@@ -137,7 +140,9 @@ int br_handle_frame_finish(struct net *net, struct sock *sk, struct sk_buff *skb
        if (p->flags & BR_LEARNING)
                br_fdb_update(br, p, eth_hdr(skb)->h_source, vid, 0);
 
-       local_rcv = !!(br->dev->flags & IFF_PROMISC);
+       promisc = !!(br->dev->flags & IFF_PROMISC);
+       local_rcv = promisc;
+
        if (is_multicast_ether_addr(eth_hdr(skb)->h_dest)) {
                /* by definition the broadcast is also a multicast address */
                if (is_broadcast_ether_addr(eth_hdr(skb)->h_dest)) {
@@ -200,7 +205,7 @@ int br_handle_frame_finish(struct net *net, struct sock *sk, struct sk_buff *skb
                unsigned long now = jiffies;
 
                if (test_bit(BR_FDB_LOCAL, &dst->flags))
-                       return br_pass_frame_up(skb);
+                       return br_pass_frame_up(skb, false);
 
                if (now != dst->used)
                        dst->used = now;
@@ -213,7 +218,7 @@ int br_handle_frame_finish(struct net *net, struct sock *sk, struct sk_buff *skb
        }
 
        if (local_rcv)
-               return br_pass_frame_up(skb);
+               return br_pass_frame_up(skb, promisc);
 
 out:
        return 0;
@@ -386,6 +391,8 @@ static rx_handler_result_t br_handle_frame(struct sk_buff **pskb)
                                goto forward;
                }
 
+               BR_INPUT_SKB_CB(skb)->promisc = false;
+
                /* The else clause should be hit when nf_hook():
                 *   - returns < 0 (drop/error)
                 *   - returns = 0 (stolen/nf_queue)
index 35e10c5a766d550e0c5cb85cf5a0c4835b52a89d..22e35623c148ac41056d7c24e3996227726ec1a6 100644 (file)
@@ -600,11 +600,17 @@ static unsigned int br_nf_local_in(void *priv,
                                   struct sk_buff *skb,
                                   const struct nf_hook_state *state)
 {
+       bool promisc = BR_INPUT_SKB_CB(skb)->promisc;
        struct nf_conntrack *nfct = skb_nfct(skb);
        const struct nf_ct_hook *ct_hook;
        struct nf_conn *ct;
        int ret;
 
+       if (promisc) {
+               nf_reset_ct(skb);
+               return NF_ACCEPT;
+       }
+
        if (!nfct || skb->pkt_type == PACKET_HOST)
                return NF_ACCEPT;
 
index 86ea5e6689b5ce49a4b71b383893d2ef5b53d110..d4bedc87b1d8f1bcf96c714fc80078227470550a 100644 (file)
@@ -589,6 +589,7 @@ struct br_input_skb_cb {
 #endif
        u8 proxyarp_replied:1;
        u8 src_port_isolated:1;
+       u8 promisc:1;
 #ifdef CONFIG_BRIDGE_VLAN_FILTERING
        u8 vlan_filtered:1;
 #endif
index 6f877e31709bad3646ea15bf3a96999ed275bdc1..c3c51b9a68265b443326432274e7fd75675e0e28 100644 (file)
@@ -294,18 +294,24 @@ static unsigned int nf_ct_bridge_pre(void *priv, struct sk_buff *skb,
 static unsigned int nf_ct_bridge_in(void *priv, struct sk_buff *skb,
                                    const struct nf_hook_state *state)
 {
-       enum ip_conntrack_info ctinfo;
+       bool promisc = BR_INPUT_SKB_CB(skb)->promisc;
+       struct nf_conntrack *nfct = skb_nfct(skb);
        struct nf_conn *ct;
 
-       if (skb->pkt_type == PACKET_HOST)
+       if (promisc) {
+               nf_reset_ct(skb);
+               return NF_ACCEPT;
+       }
+
+       if (!nfct || skb->pkt_type == PACKET_HOST)
                return NF_ACCEPT;
 
        /* nf_conntrack_confirm() cannot handle concurrent clones,
         * this happens for broad/multicast frames with e.g. macvlan on top
         * of the bridge device.
         */
-       ct = nf_ct_get(skb, &ctinfo);
-       if (!ct || nf_ct_is_confirmed(ct) || nf_ct_is_template(ct))
+       ct = container_of(nfct, struct nf_conn, ct_general);
+       if (nf_ct_is_confirmed(ct) || nf_ct_is_template(ct))
                return NF_ACCEPT;
 
        /* let inet prerouting call conntrack again */
index c9b8412f1c9d3a5f581861fb0ed1e95186d2c876..c365aa06f886f7edc70dd92612afa84d849763ef 100644 (file)
@@ -3791,6 +3791,10 @@ static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
                return rc;
        }
 
+       if (unlikely(READ_ONCE(q->owner) == smp_processor_id())) {
+               kfree_skb_reason(skb, SKB_DROP_REASON_TC_RECLASSIFY_LOOP);
+               return NET_XMIT_DROP;
+       }
        /*
         * Heuristic to force contended enqueues to serialize on a
         * separate lock before trying to get qdisc main lock.
@@ -3830,7 +3834,9 @@ static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
                qdisc_run_end(q);
                rc = NET_XMIT_SUCCESS;
        } else {
+               WRITE_ONCE(q->owner, smp_processor_id());
                rc = dev_qdisc_enqueue(skb, q, &to_free, txq);
+               WRITE_ONCE(q->owner, -1);
                if (qdisc_run_begin(q)) {
                        if (unlikely(contended)) {
                                spin_unlock(&q->busylock);
index 9505f9d188ff257a8ca35f30ee111c2f19805a5a..6eef15648b7b0853fb249288bf4545dca3a2cf85 100644 (file)
@@ -21,7 +21,8 @@ nf_flow_offload_inet_hook(void *priv, struct sk_buff *skb,
                proto = veth->h_vlan_encapsulated_proto;
                break;
        case htons(ETH_P_PPP_SES):
-               proto = nf_flow_pppoe_proto(skb);
+               if (!nf_flow_pppoe_proto(skb, &proto))
+                       return NF_ACCEPT;
                break;
        default:
                proto = skb->protocol;
index e45fade764096182443814e8dcd70700e7956742..5383bed3d3e002661f01468e1a8bef8425e229b4 100644 (file)
@@ -157,7 +157,7 @@ static void nf_flow_tuple_encap(struct sk_buff *skb,
                tuple->encap[i].proto = skb->protocol;
                break;
        case htons(ETH_P_PPP_SES):
-               phdr = (struct pppoe_hdr *)skb_mac_header(skb);
+               phdr = (struct pppoe_hdr *)skb_network_header(skb);
                tuple->encap[i].id = ntohs(phdr->sid);
                tuple->encap[i].proto = skb->protocol;
                break;
@@ -273,10 +273,11 @@ static unsigned int nf_flow_xmit_xfrm(struct sk_buff *skb,
        return NF_STOLEN;
 }
 
-static bool nf_flow_skb_encap_protocol(const struct sk_buff *skb, __be16 proto,
+static bool nf_flow_skb_encap_protocol(struct sk_buff *skb, __be16 proto,
                                       u32 *offset)
 {
        struct vlan_ethhdr *veth;
+       __be16 inner_proto;
 
        switch (skb->protocol) {
        case htons(ETH_P_8021Q):
@@ -287,7 +288,8 @@ static bool nf_flow_skb_encap_protocol(const struct sk_buff *skb, __be16 proto,
                }
                break;
        case htons(ETH_P_PPP_SES):
-               if (nf_flow_pppoe_proto(skb) == proto) {
+               if (nf_flow_pppoe_proto(skb, &inner_proto) &&
+                   inner_proto == proto) {
                        *offset += PPPOE_SES_HLEN;
                        return true;
                }
@@ -316,7 +318,7 @@ static void nf_flow_encap_pop(struct sk_buff *skb,
                        skb_reset_network_header(skb);
                        break;
                case htons(ETH_P_PPP_SES):
-                       skb->protocol = nf_flow_pppoe_proto(skb);
+                       skb->protocol = __nf_flow_pppoe_proto(skb);
                        skb_pull(skb, PPPOE_SES_HLEN);
                        skb_reset_network_header(skb);
                        break;
index 21581bae700c4a3454a1143bdc561d7711f3a833..0e697e53a7902b176ca5654799f2f389b6950cc0 100644 (file)
@@ -594,6 +594,12 @@ static int nft_mapelem_deactivate(const struct nft_ctx *ctx,
                                  const struct nft_set_iter *iter,
                                  struct nft_elem_priv *elem_priv)
 {
+       struct nft_set_ext *ext = nft_set_elem_ext(set, elem_priv);
+
+       if (!nft_set_elem_active(ext, iter->genmask))
+               return 0;
+
+       nft_set_elem_change_active(ctx->net, set, ext);
        nft_setelem_data_deactivate(ctx->net, set, elem_priv);
 
        return 0;
@@ -617,6 +623,7 @@ static void nft_map_catchall_deactivate(const struct nft_ctx *ctx,
                if (!nft_set_elem_active(ext, genmask))
                        continue;
 
+               nft_set_elem_change_active(ctx->net, set, ext);
                nft_setelem_data_deactivate(ctx->net, set, catchall->elem);
                break;
        }
@@ -626,6 +633,7 @@ static void nft_map_deactivate(const struct nft_ctx *ctx, struct nft_set *set)
 {
        struct nft_set_iter iter = {
                .genmask        = nft_genmask_next(ctx->net),
+               .type           = NFT_ITER_UPDATE,
                .fn             = nft_mapelem_deactivate,
        };
 
@@ -3048,7 +3056,7 @@ static const struct nft_expr_type *__nft_expr_type_get(u8 family,
 {
        const struct nft_expr_type *type, *candidate = NULL;
 
-       list_for_each_entry(type, &nf_tables_expressions, list) {
+       list_for_each_entry_rcu(type, &nf_tables_expressions, list) {
                if (!nla_strcmp(nla, type->name)) {
                        if (!type->family && !candidate)
                                candidate = type;
@@ -3080,9 +3088,13 @@ static const struct nft_expr_type *nft_expr_type_get(struct net *net,
        if (nla == NULL)
                return ERR_PTR(-EINVAL);
 
+       rcu_read_lock();
        type = __nft_expr_type_get(family, nla);
-       if (type != NULL && try_module_get(type->owner))
+       if (type != NULL && try_module_get(type->owner)) {
+               rcu_read_unlock();
                return type;
+       }
+       rcu_read_unlock();
 
        lockdep_nfnl_nft_mutex_not_held();
 #ifdef CONFIG_MODULES
@@ -3863,6 +3875,9 @@ int nft_setelem_validate(const struct nft_ctx *ctx, struct nft_set *set,
        const struct nft_data *data;
        int err;
 
+       if (!nft_set_elem_active(ext, iter->genmask))
+               return 0;
+
        if (nft_set_ext_exists(ext, NFT_SET_EXT_FLAGS) &&
            *nft_set_ext_flags(ext) & NFT_SET_ELEM_INTERVAL_END)
                return 0;
@@ -3886,17 +3901,20 @@ int nft_setelem_validate(const struct nft_ctx *ctx, struct nft_set *set,
 
 int nft_set_catchall_validate(const struct nft_ctx *ctx, struct nft_set *set)
 {
-       u8 genmask = nft_genmask_next(ctx->net);
+       struct nft_set_iter dummy_iter = {
+               .genmask        = nft_genmask_next(ctx->net),
+       };
        struct nft_set_elem_catchall *catchall;
+
        struct nft_set_ext *ext;
        int ret = 0;
 
        list_for_each_entry_rcu(catchall, &set->catchall_list, list) {
                ext = nft_set_elem_ext(set, catchall->elem);
-               if (!nft_set_elem_active(ext, genmask))
+               if (!nft_set_elem_active(ext, dummy_iter.genmask))
                        continue;
 
-               ret = nft_setelem_validate(ctx, set, NULL, catchall->elem);
+               ret = nft_setelem_validate(ctx, set, &dummy_iter, catchall->elem);
                if (ret < 0)
                        return ret;
        }
@@ -5393,6 +5411,11 @@ static int nf_tables_bind_check_setelem(const struct nft_ctx *ctx,
                                        const struct nft_set_iter *iter,
                                        struct nft_elem_priv *elem_priv)
 {
+       const struct nft_set_ext *ext = nft_set_elem_ext(set, elem_priv);
+
+       if (!nft_set_elem_active(ext, iter->genmask))
+               return 0;
+
        return nft_setelem_data_validate(ctx, set, elem_priv);
 }
 
@@ -5437,6 +5460,7 @@ int nf_tables_bind_set(const struct nft_ctx *ctx, struct nft_set *set,
                }
 
                iter.genmask    = nft_genmask_next(ctx->net);
+               iter.type       = NFT_ITER_UPDATE;
                iter.skip       = 0;
                iter.count      = 0;
                iter.err        = 0;
@@ -5484,6 +5508,13 @@ static int nft_mapelem_activate(const struct nft_ctx *ctx,
                                const struct nft_set_iter *iter,
                                struct nft_elem_priv *elem_priv)
 {
+       struct nft_set_ext *ext = nft_set_elem_ext(set, elem_priv);
+
+       /* called from abort path, reverse check to undo changes. */
+       if (nft_set_elem_active(ext, iter->genmask))
+               return 0;
+
+       nft_clear(ctx->net, ext);
        nft_setelem_data_activate(ctx->net, set, elem_priv);
 
        return 0;
@@ -5501,6 +5532,7 @@ static void nft_map_catchall_activate(const struct nft_ctx *ctx,
                if (!nft_set_elem_active(ext, genmask))
                        continue;
 
+               nft_clear(ctx->net, ext);
                nft_setelem_data_activate(ctx->net, set, catchall->elem);
                break;
        }
@@ -5510,6 +5542,7 @@ static void nft_map_activate(const struct nft_ctx *ctx, struct nft_set *set)
 {
        struct nft_set_iter iter = {
                .genmask        = nft_genmask_next(ctx->net),
+               .type           = NFT_ITER_UPDATE,
                .fn             = nft_mapelem_activate,
        };
 
@@ -5774,6 +5807,9 @@ static int nf_tables_dump_setelem(const struct nft_ctx *ctx,
        const struct nft_set_ext *ext = nft_set_elem_ext(set, elem_priv);
        struct nft_set_dump_args *args;
 
+       if (!nft_set_elem_active(ext, iter->genmask))
+               return 0;
+
        if (nft_set_elem_expired(ext) || nft_set_elem_is_dead(ext))
                return 0;
 
@@ -5884,6 +5920,7 @@ static int nf_tables_dump_set(struct sk_buff *skb, struct netlink_callback *cb)
        args.skb                = skb;
        args.reset              = dump_ctx->reset;
        args.iter.genmask       = nft_genmask_cur(net);
+       args.iter.type          = NFT_ITER_READ;
        args.iter.skip          = cb->args[0];
        args.iter.count         = 0;
        args.iter.err           = 0;
@@ -6623,7 +6660,7 @@ static void nft_setelem_activate(struct net *net, struct nft_set *set,
        struct nft_set_ext *ext = nft_set_elem_ext(set, elem_priv);
 
        if (nft_setelem_is_catchall(set, elem_priv)) {
-               nft_set_elem_change_active(net, set, ext);
+               nft_clear(net, ext);
        } else {
                set->ops->activate(net, set, elem_priv);
        }
@@ -7182,6 +7219,16 @@ void nft_data_hold(const struct nft_data *data, enum nft_data_types type)
        }
 }
 
+static int nft_setelem_active_next(const struct net *net,
+                                  const struct nft_set *set,
+                                  struct nft_elem_priv *elem_priv)
+{
+       const struct nft_set_ext *ext = nft_set_elem_ext(set, elem_priv);
+       u8 genmask = nft_genmask_next(net);
+
+       return nft_set_elem_active(ext, genmask);
+}
+
 static void nft_setelem_data_activate(const struct net *net,
                                      const struct nft_set *set,
                                      struct nft_elem_priv *elem_priv)
@@ -7305,8 +7352,12 @@ static int nft_setelem_flush(const struct nft_ctx *ctx,
                             const struct nft_set_iter *iter,
                             struct nft_elem_priv *elem_priv)
 {
+       const struct nft_set_ext *ext = nft_set_elem_ext(set, elem_priv);
        struct nft_trans *trans;
 
+       if (!nft_set_elem_active(ext, iter->genmask))
+               return 0;
+
        trans = nft_trans_alloc_gfp(ctx, NFT_MSG_DELSETELEM,
                                    sizeof(struct nft_trans_elem), GFP_ATOMIC);
        if (!trans)
@@ -7368,6 +7419,7 @@ static int nft_set_flush(struct nft_ctx *ctx, struct nft_set *set, u8 genmask)
 {
        struct nft_set_iter iter = {
                .genmask        = genmask,
+               .type           = NFT_ITER_UPDATE,
                .fn             = nft_setelem_flush,
        };
 
@@ -7603,7 +7655,7 @@ static const struct nft_object_type *__nft_obj_type_get(u32 objtype, u8 family)
 {
        const struct nft_object_type *type;
 
-       list_for_each_entry(type, &nf_tables_objects, list) {
+       list_for_each_entry_rcu(type, &nf_tables_objects, list) {
                if (type->family != NFPROTO_UNSPEC &&
                    type->family != family)
                        continue;
@@ -7619,9 +7671,13 @@ nft_obj_type_get(struct net *net, u32 objtype, u8 family)
 {
        const struct nft_object_type *type;
 
+       rcu_read_lock();
        type = __nft_obj_type_get(objtype, family);
-       if (type != NULL && try_module_get(type->owner))
+       if (type != NULL && try_module_get(type->owner)) {
+               rcu_read_unlock();
                return type;
+       }
+       rcu_read_unlock();
 
        lockdep_nfnl_nft_mutex_not_held();
 #ifdef CONFIG_MODULES
@@ -10590,8 +10646,10 @@ static int __nf_tables_abort(struct net *net, enum nfnl_abort_action action)
                case NFT_MSG_DESTROYSETELEM:
                        te = (struct nft_trans_elem *)trans->data;
 
-                       nft_setelem_data_activate(net, te->set, te->elem_priv);
-                       nft_setelem_activate(net, te->set, te->elem_priv);
+                       if (!nft_setelem_active_next(net, te->set, te->elem_priv)) {
+                               nft_setelem_data_activate(net, te->set, te->elem_priv);
+                               nft_setelem_activate(net, te->set, te->elem_priv);
+                       }
                        if (!nft_setelem_is_catchall(te->set, te->elem_priv))
                                te->set->ndeact--;
 
@@ -10779,6 +10837,9 @@ static int nf_tables_loop_check_setelem(const struct nft_ctx *ctx,
 {
        const struct nft_set_ext *ext = nft_set_elem_ext(set, elem_priv);
 
+       if (!nft_set_elem_active(ext, iter->genmask))
+               return 0;
+
        if (nft_set_ext_exists(ext, NFT_SET_EXT_FLAGS) &&
            *nft_set_ext_flags(ext) & NFT_SET_ELEM_INTERVAL_END)
                return 0;
@@ -10863,6 +10924,7 @@ static int nf_tables_check_loops(const struct nft_ctx *ctx,
                                continue;
 
                        iter.genmask    = nft_genmask_next(ctx->net);
+                       iter.type       = NFT_ITER_UPDATE;
                        iter.skip       = 0;
                        iter.count      = 0;
                        iter.err        = 0;
index 870e5b113d13ec903b00a6f7921d7c162e57418f..87c18eddb06894e72bbe02ad065fc0c579df785d 100644 (file)
@@ -216,6 +216,7 @@ static int nft_lookup_validate(const struct nft_ctx *ctx,
                return 0;
 
        iter.genmask    = nft_genmask_next(ctx->net);
+       iter.type       = NFT_ITER_UPDATE;
        iter.skip       = 0;
        iter.count      = 0;
        iter.err        = 0;
index 32df7a16835da3e1d850d34a8236e0a45f06f026..1caa04619dc6da37f845acc65c8ca86c173096de 100644 (file)
@@ -172,7 +172,7 @@ static void nft_bitmap_activate(const struct net *net,
        nft_bitmap_location(set, nft_set_ext_key(&be->ext), &idx, &off);
        /* Enter 11 state. */
        priv->bitmap[idx] |= (genmask << off);
-       nft_set_elem_change_active(net, set, &be->ext);
+       nft_clear(net, &be->ext);
 }
 
 static void nft_bitmap_flush(const struct net *net,
@@ -222,8 +222,6 @@ static void nft_bitmap_walk(const struct nft_ctx *ctx,
        list_for_each_entry_rcu(be, &priv->list, head) {
                if (iter->count < iter->skip)
                        goto cont;
-               if (!nft_set_elem_active(&be->ext, iter->genmask))
-                       goto cont;
 
                iter->err = iter->fn(ctx, set, iter, &be->priv);
 
index 6968a3b342367c6c0cb0df7523fdfd5864038802..daa56dda737ae2e6b4727c2d3930d68e58a33efb 100644 (file)
@@ -199,7 +199,7 @@ static void nft_rhash_activate(const struct net *net, const struct nft_set *set,
 {
        struct nft_rhash_elem *he = nft_elem_priv_cast(elem_priv);
 
-       nft_set_elem_change_active(net, set, &he->ext);
+       nft_clear(net, &he->ext);
 }
 
 static void nft_rhash_flush(const struct net *net,
@@ -286,8 +286,6 @@ static void nft_rhash_walk(const struct nft_ctx *ctx, struct nft_set *set,
 
                if (iter->count < iter->skip)
                        goto cont;
-               if (!nft_set_elem_active(&he->ext, iter->genmask))
-                       goto cont;
 
                iter->err = iter->fn(ctx, set, iter, &he->priv);
                if (iter->err < 0)
@@ -599,7 +597,7 @@ static void nft_hash_activate(const struct net *net, const struct nft_set *set,
 {
        struct nft_hash_elem *he = nft_elem_priv_cast(elem_priv);
 
-       nft_set_elem_change_active(net, set, &he->ext);
+       nft_clear(net, &he->ext);
 }
 
 static void nft_hash_flush(const struct net *net,
@@ -652,8 +650,6 @@ static void nft_hash_walk(const struct nft_ctx *ctx, struct nft_set *set,
                hlist_for_each_entry_rcu(he, &priv->table[i], node) {
                        if (iter->count < iter->skip)
                                goto cont;
-                       if (!nft_set_elem_active(&he->ext, iter->genmask))
-                               goto cont;
 
                        iter->err = iter->fn(ctx, set, iter, &he->priv);
                        if (iter->err < 0)
index b3b282de802def7a01316e5e8ed809ba92d0fd8d..b42a34087e8076d82a820b334e869fa5fa5d71ba 100644 (file)
  * Return: -1 on no match, bit position on 'match_only', 0 otherwise.
  */
 int pipapo_refill(unsigned long *map, int len, int rules, unsigned long *dst,
-                 union nft_pipapo_map_bucket *mt, bool match_only)
+                 const union nft_pipapo_map_bucket *mt, bool match_only)
 {
        unsigned long bitset;
        int k, ret = -1;
@@ -412,9 +412,9 @@ bool nft_pipapo_lookup(const struct net *net, const struct nft_set *set,
        struct nft_pipapo_scratch *scratch;
        unsigned long *res_map, *fill_map;
        u8 genmask = nft_genmask_cur(net);
+       const struct nft_pipapo_match *m;
+       const struct nft_pipapo_field *f;
        const u8 *rp = (const u8 *)key;
-       struct nft_pipapo_match *m;
-       struct nft_pipapo_field *f;
        bool map_index;
        int i;
 
@@ -519,11 +519,13 @@ static struct nft_pipapo_elem *pipapo_get(const struct net *net,
 {
        struct nft_pipapo_elem *ret = ERR_PTR(-ENOENT);
        struct nft_pipapo *priv = nft_set_priv(set);
-       struct nft_pipapo_match *m = priv->clone;
        unsigned long *res_map, *fill_map = NULL;
-       struct nft_pipapo_field *f;
+       const struct nft_pipapo_match *m;
+       const struct nft_pipapo_field *f;
        int i;
 
+       m = priv->clone;
+
        res_map = kmalloc_array(m->bsize_max, sizeof(*res_map), GFP_ATOMIC);
        if (!res_map) {
                ret = ERR_PTR(-ENOMEM);
@@ -1597,7 +1599,7 @@ static void pipapo_gc(struct nft_set *set, struct nft_pipapo_match *m)
 
        while ((rules_f0 = pipapo_rules_same_key(m->f, first_rule))) {
                union nft_pipapo_map_bucket rulemap[NFT_PIPAPO_MAX_FIELDS];
-               struct nft_pipapo_field *f;
+               const struct nft_pipapo_field *f;
                int i, start, rules_fx;
 
                start = first_rule;
@@ -1771,7 +1773,7 @@ static void nft_pipapo_activate(const struct net *net,
 {
        struct nft_pipapo_elem *e = nft_elem_priv_cast(elem_priv);
 
-       nft_set_elem_change_active(net, set, &e->ext);
+       nft_clear(net, &e->ext);
 }
 
 /**
@@ -2000,6 +2002,8 @@ static void nft_pipapo_remove(const struct net *net, const struct nft_set *set,
                rules_fx = rules_f0;
 
                nft_pipapo_for_each_field(f, i, m) {
+                       bool last = i == m->field_count - 1;
+
                        if (!pipapo_match_field(f, start, rules_fx,
                                                match_start, match_end))
                                break;
@@ -2012,16 +2016,18 @@ static void nft_pipapo_remove(const struct net *net, const struct nft_set *set,
 
                        match_start += NFT_PIPAPO_GROUPS_PADDED_SIZE(f);
                        match_end += NFT_PIPAPO_GROUPS_PADDED_SIZE(f);
-               }
 
-               if (i == m->field_count) {
-                       priv->dirty = true;
-                       pipapo_drop(m, rulemap);
-                       return;
+                       if (last && f->mt[rulemap[i].to].e == e) {
+                               priv->dirty = true;
+                               pipapo_drop(m, rulemap);
+                               return;
+                       }
                }
 
                first_rule += rules_f0;
        }
+
+       WARN_ON_ONCE(1); /* elem_priv not found */
 }
 
 /**
@@ -2038,13 +2044,15 @@ static void nft_pipapo_walk(const struct nft_ctx *ctx, struct nft_set *set,
                            struct nft_set_iter *iter)
 {
        struct nft_pipapo *priv = nft_set_priv(set);
-       struct net *net = read_pnet(&set->net);
-       struct nft_pipapo_match *m;
-       struct nft_pipapo_field *f;
+       const struct nft_pipapo_match *m;
+       const struct nft_pipapo_field *f;
        int i, r;
 
+       WARN_ON_ONCE(iter->type != NFT_ITER_READ &&
+                    iter->type != NFT_ITER_UPDATE);
+
        rcu_read_lock();
-       if (iter->genmask == nft_genmask_cur(net))
+       if (iter->type == NFT_ITER_READ)
                m = rcu_dereference(priv->match);
        else
                m = priv->clone;
@@ -2066,9 +2074,6 @@ static void nft_pipapo_walk(const struct nft_ctx *ctx, struct nft_set *set,
 
                e = f->mt[r].e;
 
-               if (!nft_set_elem_active(&e->ext, iter->genmask))
-                       goto cont;
-
                iter->err = iter->fn(ctx, set, iter, &e->priv);
                if (iter->err < 0)
                        goto out;
index 3842c7341a9f40a088d78532c4b610f3a99d7d23..42464e7c24ac04fbecbc305ff13460ca5be45d2d 100644 (file)
@@ -187,7 +187,7 @@ struct nft_pipapo_elem {
 };
 
 int pipapo_refill(unsigned long *map, int len, int rules, unsigned long *dst,
-                 union nft_pipapo_map_bucket *mt, bool match_only);
+                 const union nft_pipapo_map_bucket *mt, bool match_only);
 
 /**
  * pipapo_and_field_buckets_4bit() - Intersect 4-bit buckets
@@ -195,7 +195,7 @@ int pipapo_refill(unsigned long *map, int len, int rules, unsigned long *dst,
  * @dst:       Area to store result
  * @data:      Input data selecting table buckets
  */
-static inline void pipapo_and_field_buckets_4bit(struct nft_pipapo_field *f,
+static inline void pipapo_and_field_buckets_4bit(const struct nft_pipapo_field *f,
                                                 unsigned long *dst,
                                                 const u8 *data)
 {
@@ -223,7 +223,7 @@ static inline void pipapo_and_field_buckets_4bit(struct nft_pipapo_field *f,
  * @dst:       Area to store result
  * @data:      Input data selecting table buckets
  */
-static inline void pipapo_and_field_buckets_8bit(struct nft_pipapo_field *f,
+static inline void pipapo_and_field_buckets_8bit(const struct nft_pipapo_field *f,
                                                 unsigned long *dst,
                                                 const u8 *data)
 {
index a3a8ddca991894b28aa1a1cd7c84ba0380366b5f..d08407d589eac54b10e4b4cc107e25adc875e9e2 100644 (file)
@@ -212,8 +212,9 @@ static int nft_pipapo_avx2_refill(int offset, unsigned long *map,
  * word index to be checked next (i.e. first filled word).
  */
 static int nft_pipapo_avx2_lookup_4b_2(unsigned long *map, unsigned long *fill,
-                                      struct nft_pipapo_field *f, int offset,
-                                      const u8 *pkt, bool first, bool last)
+                                      const struct nft_pipapo_field *f,
+                                      int offset, const u8 *pkt,
+                                      bool first, bool last)
 {
        int i, ret = -1, m256_size = f->bsize / NFT_PIPAPO_LONGS_PER_M256, b;
        u8 pg[2] = { pkt[0] >> 4, pkt[0] & 0xf };
@@ -274,8 +275,9 @@ static int nft_pipapo_avx2_lookup_4b_2(unsigned long *map, unsigned long *fill,
  * word index to be checked next (i.e. first filled word).
  */
 static int nft_pipapo_avx2_lookup_4b_4(unsigned long *map, unsigned long *fill,
-                                      struct nft_pipapo_field *f, int offset,
-                                      const u8 *pkt, bool first, bool last)
+                                      const struct nft_pipapo_field *f,
+                                      int offset, const u8 *pkt,
+                                      bool first, bool last)
 {
        int i, ret = -1, m256_size = f->bsize / NFT_PIPAPO_LONGS_PER_M256, b;
        u8 pg[4] = { pkt[0] >> 4, pkt[0] & 0xf, pkt[1] >> 4, pkt[1] & 0xf };
@@ -350,8 +352,9 @@ static int nft_pipapo_avx2_lookup_4b_4(unsigned long *map, unsigned long *fill,
  * word index to be checked next (i.e. first filled word).
  */
 static int nft_pipapo_avx2_lookup_4b_8(unsigned long *map, unsigned long *fill,
-                                      struct nft_pipapo_field *f, int offset,
-                                      const u8 *pkt, bool first, bool last)
+                                      const struct nft_pipapo_field *f,
+                                      int offset, const u8 *pkt,
+                                      bool first, bool last)
 {
        u8 pg[8] = {  pkt[0] >> 4,  pkt[0] & 0xf,  pkt[1] >> 4,  pkt[1] & 0xf,
                      pkt[2] >> 4,  pkt[2] & 0xf,  pkt[3] >> 4,  pkt[3] & 0xf,
@@ -445,8 +448,9 @@ static int nft_pipapo_avx2_lookup_4b_8(unsigned long *map, unsigned long *fill,
  * word index to be checked next (i.e. first filled word).
  */
 static int nft_pipapo_avx2_lookup_4b_12(unsigned long *map, unsigned long *fill,
-                                       struct nft_pipapo_field *f, int offset,
-                                       const u8 *pkt, bool first, bool last)
+                                       const struct nft_pipapo_field *f,
+                                       int offset, const u8 *pkt,
+                                       bool first, bool last)
 {
        u8 pg[12] = {  pkt[0] >> 4,  pkt[0] & 0xf,  pkt[1] >> 4,  pkt[1] & 0xf,
                       pkt[2] >> 4,  pkt[2] & 0xf,  pkt[3] >> 4,  pkt[3] & 0xf,
@@ -534,8 +538,9 @@ static int nft_pipapo_avx2_lookup_4b_12(unsigned long *map, unsigned long *fill,
  * word index to be checked next (i.e. first filled word).
  */
 static int nft_pipapo_avx2_lookup_4b_32(unsigned long *map, unsigned long *fill,
-                                       struct nft_pipapo_field *f, int offset,
-                                       const u8 *pkt, bool first, bool last)
+                                       const struct nft_pipapo_field *f,
+                                       int offset, const u8 *pkt,
+                                       bool first, bool last)
 {
        u8 pg[32] = {  pkt[0] >> 4,  pkt[0] & 0xf,  pkt[1] >> 4,  pkt[1] & 0xf,
                       pkt[2] >> 4,  pkt[2] & 0xf,  pkt[3] >> 4,  pkt[3] & 0xf,
@@ -669,8 +674,9 @@ static int nft_pipapo_avx2_lookup_4b_32(unsigned long *map, unsigned long *fill,
  * word index to be checked next (i.e. first filled word).
  */
 static int nft_pipapo_avx2_lookup_8b_1(unsigned long *map, unsigned long *fill,
-                                      struct nft_pipapo_field *f, int offset,
-                                      const u8 *pkt, bool first, bool last)
+                                      const struct nft_pipapo_field *f,
+                                      int offset, const u8 *pkt,
+                                      bool first, bool last)
 {
        int i, ret = -1, m256_size = f->bsize / NFT_PIPAPO_LONGS_PER_M256, b;
        unsigned long *lt = f->lt, bsize = f->bsize;
@@ -726,8 +732,9 @@ static int nft_pipapo_avx2_lookup_8b_1(unsigned long *map, unsigned long *fill,
  * word index to be checked next (i.e. first filled word).
  */
 static int nft_pipapo_avx2_lookup_8b_2(unsigned long *map, unsigned long *fill,
-                                      struct nft_pipapo_field *f, int offset,
-                                      const u8 *pkt, bool first, bool last)
+                                      const struct nft_pipapo_field *f,
+                                      int offset, const u8 *pkt,
+                                      bool first, bool last)
 {
        int i, ret = -1, m256_size = f->bsize / NFT_PIPAPO_LONGS_PER_M256, b;
        unsigned long *lt = f->lt, bsize = f->bsize;
@@ -790,8 +797,9 @@ static int nft_pipapo_avx2_lookup_8b_2(unsigned long *map, unsigned long *fill,
  * word index to be checked next (i.e. first filled word).
  */
 static int nft_pipapo_avx2_lookup_8b_4(unsigned long *map, unsigned long *fill,
-                                      struct nft_pipapo_field *f, int offset,
-                                      const u8 *pkt, bool first, bool last)
+                                      const struct nft_pipapo_field *f,
+                                      int offset, const u8 *pkt,
+                                      bool first, bool last)
 {
        int i, ret = -1, m256_size = f->bsize / NFT_PIPAPO_LONGS_PER_M256, b;
        unsigned long *lt = f->lt, bsize = f->bsize;
@@ -865,8 +873,9 @@ static int nft_pipapo_avx2_lookup_8b_4(unsigned long *map, unsigned long *fill,
  * word index to be checked next (i.e. first filled word).
  */
 static int nft_pipapo_avx2_lookup_8b_6(unsigned long *map, unsigned long *fill,
-                                      struct nft_pipapo_field *f, int offset,
-                                      const u8 *pkt, bool first, bool last)
+                                      const struct nft_pipapo_field *f,
+                                      int offset, const u8 *pkt,
+                                      bool first, bool last)
 {
        int i, ret = -1, m256_size = f->bsize / NFT_PIPAPO_LONGS_PER_M256, b;
        unsigned long *lt = f->lt, bsize = f->bsize;
@@ -950,8 +959,9 @@ static int nft_pipapo_avx2_lookup_8b_6(unsigned long *map, unsigned long *fill,
  * word index to be checked next (i.e. first filled word).
  */
 static int nft_pipapo_avx2_lookup_8b_16(unsigned long *map, unsigned long *fill,
-                                       struct nft_pipapo_field *f, int offset,
-                                       const u8 *pkt, bool first, bool last)
+                                       const struct nft_pipapo_field *f,
+                                       int offset, const u8 *pkt,
+                                       bool first, bool last)
 {
        int i, ret = -1, m256_size = f->bsize / NFT_PIPAPO_LONGS_PER_M256, b;
        unsigned long *lt = f->lt, bsize = f->bsize;
@@ -1042,8 +1052,9 @@ static int nft_pipapo_avx2_lookup_8b_16(unsigned long *map, unsigned long *fill,
  * word index to be checked next (i.e. first filled word).
  */
 static int nft_pipapo_avx2_lookup_slow(unsigned long *map, unsigned long *fill,
-                                       struct nft_pipapo_field *f, int offset,
-                                       const u8 *pkt, bool first, bool last)
+                                       const struct nft_pipapo_field *f,
+                                       int offset, const u8 *pkt,
+                                       bool first, bool last)
 {
        unsigned long bsize = f->bsize;
        int i, ret = -1, b;
@@ -1119,9 +1130,9 @@ bool nft_pipapo_avx2_lookup(const struct net *net, const struct nft_set *set,
        struct nft_pipapo *priv = nft_set_priv(set);
        struct nft_pipapo_scratch *scratch;
        u8 genmask = nft_genmask_cur(net);
+       const struct nft_pipapo_match *m;
+       const struct nft_pipapo_field *f;
        const u8 *rp = (const u8 *)key;
-       struct nft_pipapo_match *m;
-       struct nft_pipapo_field *f;
        unsigned long *res, *fill;
        bool map_index;
        int i, ret = 0;
index 9944fe479e5361dc140f75be8b90bf3c5deb40f6..b7ea21327549b353c087b3e607e722f391ea94c1 100644 (file)
@@ -532,7 +532,7 @@ static void nft_rbtree_activate(const struct net *net,
 {
        struct nft_rbtree_elem *rbe = nft_elem_priv_cast(elem_priv);
 
-       nft_set_elem_change_active(net, set, &rbe->ext);
+       nft_clear(net, &rbe->ext);
 }
 
 static void nft_rbtree_flush(const struct net *net,
@@ -600,8 +600,6 @@ static void nft_rbtree_walk(const struct nft_ctx *ctx,
 
                if (iter->count < iter->skip)
                        goto cont;
-               if (!nft_set_elem_active(&rbe->ext, iter->genmask))
-                       goto cont;
 
                iter->err = iter->fn(ctx, set, iter, &rbe->priv);
                if (iter->err < 0) {
index 9b3e9262040b6ef6516752c558c8997bf4054123..a498b5d7c5d60f97c058ff6649cdb7102e3df72b 100644 (file)
@@ -973,6 +973,7 @@ struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue,
        sch->enqueue = ops->enqueue;
        sch->dequeue = ops->dequeue;
        sch->dev_queue = dev_queue;
+       sch->owner = -1;
        netdev_hold(dev, &sch->dev_tracker, GFP_KERNEL);
        refcount_set(&sch->refcnt, 1);
 
index e37cf913818a1d431ef27c073ab74d55ed7ff732..9df15a7bc2569acb134d37e8a6524114cef68705 100644 (file)
@@ -2602,7 +2602,9 @@ static struct sk_buff *manage_oob(struct sk_buff *skb, struct sock *sk,
                                        WRITE_ONCE(u->oob_skb, NULL);
                                        consume_skb(skb);
                                }
-                       } else if (!(flags & MSG_PEEK)) {
+                       } else if (flags & MSG_PEEK) {
+                               skb = NULL;
+                       } else {
                                skb_unlink(skb, &sk->sk_receive_queue);
                                WRITE_ONCE(u->oob_skb, NULL);
                                if (!WARN_ON_ONCE(skb_unref(skb)))
@@ -2680,18 +2682,16 @@ static int unix_stream_read_generic(struct unix_stream_read_state *state,
                last = skb = skb_peek(&sk->sk_receive_queue);
                last_len = last ? last->len : 0;
 
+again:
 #if IS_ENABLED(CONFIG_AF_UNIX_OOB)
                if (skb) {
                        skb = manage_oob(skb, sk, flags, copied);
-                       if (!skb) {
+                       if (!skb && copied) {
                                unix_state_unlock(sk);
-                               if (copied)
-                                       break;
-                               goto redo;
+                               break;
                        }
                }
 #endif
-again:
                if (skb == NULL) {
                        if (copied >= target)
                                goto unlock;
index b141024830ecc831430c697858ce409a56c711af..ee6ac649df836d695133f93da9cfd0518cc1ae23 100644 (file)
@@ -428,7 +428,7 @@ static int cvt_ump_midi2_to_midi1(struct snd_seq_client *dest,
        midi1->note.group = midi2->note.group;
        midi1->note.status = midi2->note.status;
        midi1->note.channel = midi2->note.channel;
-       switch (midi2->note.status << 4) {
+       switch (midi2->note.status) {
        case UMP_MSG_STATUS_NOTE_ON:
        case UMP_MSG_STATUS_NOTE_OFF:
                midi1->note.note = midi2->note.note;
index 8c2467ed127ee5d27ca6a576132f117d518110dc..bf0618ab4fda005eda0145b0082799e68f7a4e90 100644 (file)
@@ -10215,6 +10215,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x1462, 0xb120, "MSI Cubi MS-B120", ALC283_FIXUP_HEADSET_MIC),
        SND_PCI_QUIRK(0x1462, 0xb171, "Cubi N 8GL (MS-B171)", ALC283_FIXUP_HEADSET_MIC),
        SND_PCI_QUIRK(0x152d, 0x1082, "Quanta NL3", ALC269_FIXUP_LIFEBOOK),
+       SND_PCI_QUIRK(0x152d, 0x1262, "Huawei NBLB-WAX9N", ALC2XX_FIXUP_HEADSET_MIC),
        SND_PCI_QUIRK(0x1558, 0x0353, "Clevo V35[05]SN[CDE]Q", ALC256_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1558, 0x1323, "Clevo N130ZU", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1558, 0x1325, "Clevo N15[01][CW]U", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
@@ -10320,6 +10321,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x17aa, 0x222e, "Thinkpad", ALC298_FIXUP_TPT470_DOCK),
        SND_PCI_QUIRK(0x17aa, 0x2231, "Thinkpad T560", ALC292_FIXUP_TPT460),
        SND_PCI_QUIRK(0x17aa, 0x2233, "Thinkpad", ALC292_FIXUP_TPT460),
+       SND_PCI_QUIRK(0x17aa, 0x2234, "Thinkpad ICE-1", ALC287_FIXUP_TAS2781_I2C),
        SND_PCI_QUIRK(0x17aa, 0x2245, "Thinkpad T470", ALC298_FIXUP_TPT470_DOCK),
        SND_PCI_QUIRK(0x17aa, 0x2246, "Thinkpad", ALC298_FIXUP_TPT470_DOCK),
        SND_PCI_QUIRK(0x17aa, 0x2247, "Thinkpad", ALC298_FIXUP_TPT470_DOCK),
@@ -10381,8 +10383,8 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x17aa, 0x3886, "Y780 VECO DUAL", ALC287_FIXUP_TAS2781_I2C),
        SND_PCI_QUIRK(0x17aa, 0x38a7, "Y780P AMD YG dual", ALC287_FIXUP_TAS2781_I2C),
        SND_PCI_QUIRK(0x17aa, 0x38a8, "Y780P AMD VECO dual", ALC287_FIXUP_TAS2781_I2C),
-       SND_PCI_QUIRK(0x17aa, 0x38a9, "Thinkbook 16P", ALC287_FIXUP_CS35L41_I2C_2),
-       SND_PCI_QUIRK(0x17aa, 0x38ab, "Thinkbook 16P", ALC287_FIXUP_CS35L41_I2C_2),
+       SND_PCI_QUIRK(0x17aa, 0x38a9, "Thinkbook 16P", ALC287_FIXUP_MG_RTKC_CSAMP_CS35L41_I2C_THINKPAD),
+       SND_PCI_QUIRK(0x17aa, 0x38ab, "Thinkbook 16P", ALC287_FIXUP_MG_RTKC_CSAMP_CS35L41_I2C_THINKPAD),
        SND_PCI_QUIRK(0x17aa, 0x38b4, "Legion Slim 7 16IRH8", ALC287_FIXUP_CS35L41_I2C_2),
        SND_PCI_QUIRK(0x17aa, 0x38b5, "Legion Slim 7 16IRH8", ALC287_FIXUP_CS35L41_I2C_2),
        SND_PCI_QUIRK(0x17aa, 0x38b6, "Legion Slim 7 16APH8", ALC287_FIXUP_CS35L41_I2C_2),
@@ -10442,6 +10444,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x1d05, 0x115c, "TongFang GMxTGxx", ALC269_FIXUP_NO_SHUTUP),
        SND_PCI_QUIRK(0x1d05, 0x121b, "TongFang GMxAGxx", ALC269_FIXUP_NO_SHUTUP),
        SND_PCI_QUIRK(0x1d05, 0x1387, "TongFang GMxIXxx", ALC2XX_FIXUP_HEADSET_MIC),
+       SND_PCI_QUIRK(0x1d17, 0x3288, "Haier Boyue G42", ALC269VC_FIXUP_ACER_VCOPPERBOX_PINS),
        SND_PCI_QUIRK(0x1d72, 0x1602, "RedmiBook", ALC255_FIXUP_XIAOMI_HEADSET_MIC),
        SND_PCI_QUIRK(0x1d72, 0x1701, "XiaomiNotebook Pro", ALC298_FIXUP_DELL1_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1d72, 0x1901, "RedmiBook 14", ALC256_FIXUP_ASUS_HEADSET_MIC),
index 693c1b81a23c22d35a1d93a5a0ff92f31588ff95..e19f2fa1a528fe472d021b1d474d0bee75c1f916 100644 (file)
@@ -501,10 +501,10 @@ static int tas2563_save_calibration(struct tasdevice_priv *tas_priv)
 static void tas2781_apply_calib(struct tasdevice_priv *tas_priv)
 {
        static const unsigned char page_array[CALIB_MAX] = {
-               0x17, 0x18, 0x18, 0x0d, 0x18
+               0x17, 0x18, 0x18, 0x13, 0x18,
        };
        static const unsigned char rgno_array[CALIB_MAX] = {
-               0x74, 0x0c, 0x14, 0x3c, 0x7c
+               0x74, 0x0c, 0x14, 0x70, 0x7c,
        };
        unsigned char *data;
        int i, j, rc;
index ec5e21932876038b99afbfa30560d856efd8afd5..4790c735599bdd97b62a39edd861230464abc5d8 100644 (file)
@@ -970,7 +970,7 @@ int symbol__tui_annotate(struct map_symbol *ms, struct evsel *evsel,
        if (dso->annotate_warned)
                return -1;
 
-       if (not_annotated) {
+       if (not_annotated || !sym->annotate2) {
                err = symbol__annotate2(ms, evsel, &browser.arch);
                if (err) {
                        char msg[BUFSIZ];
index 9b70ab110ce79f24da580611f1a2098726f1ae12..86a996290e9ab576066051c2349f57041362c007 100644 (file)
@@ -2435,6 +2435,9 @@ int symbol__annotate(struct map_symbol *ms, struct evsel *evsel,
        if (parch)
                *parch = arch;
 
+       if (!list_empty(&notes->src->source))
+               return 0;
+
        args.arch = arch;
        args.ms = *ms;
        if (annotate_opts.full_addr)
index 95cd8414f6ef85042885f9404097685dc940dc61..e5d78565f479feabbcaeae29693931e5907696bf 100644 (file)
@@ -289,6 +289,7 @@ static inline __u32 check_lock_type(__u64 lock, __u32 flags)
        struct task_struct *curr;
        struct mm_struct___old *mm_old;
        struct mm_struct___new *mm_new;
+       struct sighand_struct *sighand;
 
        switch (flags) {
        case LCB_F_READ:  /* rwsem */
@@ -310,7 +311,9 @@ static inline __u32 check_lock_type(__u64 lock, __u32 flags)
                break;
        case LCB_F_SPIN:  /* spinlock */
                curr = bpf_get_current_task_btf();
-               if (&curr->sighand->siglock == (void *)lock)
+               sighand = curr->sighand;
+
+               if (sighand && &sighand->siglock == (void *)lock)
                        return LCD_F_SIGHAND_LOCK;
                break;
        default:
index b1ede624986676a554514105936698fdd2b0a915..b7c8f29c09a978895c1176e1a39aeda8c97e8416 100644 (file)
@@ -18,7 +18,7 @@ echo 'sched:*' > set_event
 
 yield
 
-count=`cat trace | grep -v ^# | awk '{ print $5 }' | sort -u | wc -l`
+count=`head -n 100 trace | grep -v ^# | awk '{ print $5 }' | sort -u | wc -l`
 if [ $count -lt 3 ]; then
     fail "at least fork, exec and exit events should be recorded"
 fi
@@ -29,7 +29,7 @@ echo 1 > events/sched/enable
 
 yield
 
-count=`cat trace | grep -v ^# | awk '{ print $5 }' | sort -u | wc -l`
+count=`head -n 100 trace | grep -v ^# | awk '{ print $5 }' | sort -u | wc -l`
 if [ $count -lt 3 ]; then
     fail "at least fork, exec and exit events should be recorded"
 fi
@@ -40,7 +40,7 @@ echo 0 > events/sched/enable
 
 yield
 
-count=`cat trace | grep -v ^# | awk '{ print $5 }' | sort -u | wc -l`
+count=`head -n 100 trace | grep -v ^# | awk '{ print $5 }' | sort -u | wc -l`
 if [ $count -ne 0 ]; then
     fail "any of scheduler events should not be recorded"
 fi
index 110d73917615d177d5d7a891f08d523619c404f3..02a2a1b267c1eae76f24011c64f9a0759d55032f 100644 (file)
@@ -1,3 +1,5 @@
 CONFIG_IOMMUFD=y
+CONFIG_FAULT_INJECTION_DEBUG_FS=y
 CONFIG_FAULT_INJECTION=y
 CONFIG_IOMMUFD_TEST=y
+CONFIG_FAILSLAB=y
index 2fb6dd8adba6945d0000c19fe90da1002d8c0dd4..8b984fa042869e595507368541504f0b04d42014 100644 (file)
@@ -86,7 +86,7 @@ static void netstat_read_type(FILE *fnetstat, struct netstat **dest, char *line)
 
        pos = strchr(line, ' ') + 1;
 
-       if (fscanf(fnetstat, type->header_name) == EOF)
+       if (fscanf(fnetstat, "%[^ :]", type->header_name) == EOF)
                test_error("fscanf(%s)", type->header_name);
        if (fread(&tmp, 1, 1, fnetstat) != 1 || tmp != ':')
                test_error("Unexpected netstat format (%c)", tmp);
index 92276f916f2f30d080ba3e1f5521c492192f8e98..e408b9243b2c5a5cf66785518fbfc16f2682b169 100644 (file)
@@ -17,37 +17,37 @@ static pthread_mutex_t ksft_print_lock = PTHREAD_MUTEX_INITIALIZER;
 void __test_msg(const char *buf)
 {
        pthread_mutex_lock(&ksft_print_lock);
-       ksft_print_msg(buf);
+       ksft_print_msg("%s", buf);
        pthread_mutex_unlock(&ksft_print_lock);
 }
 void __test_ok(const char *buf)
 {
        pthread_mutex_lock(&ksft_print_lock);
-       ksft_test_result_pass(buf);
+       ksft_test_result_pass("%s", buf);
        pthread_mutex_unlock(&ksft_print_lock);
 }
 void __test_fail(const char *buf)
 {
        pthread_mutex_lock(&ksft_print_lock);
-       ksft_test_result_fail(buf);
+       ksft_test_result_fail("%s", buf);
        pthread_mutex_unlock(&ksft_print_lock);
 }
 void __test_xfail(const char *buf)
 {
        pthread_mutex_lock(&ksft_print_lock);
-       ksft_test_result_xfail(buf);
+       ksft_test_result_xfail("%s", buf);
        pthread_mutex_unlock(&ksft_print_lock);
 }
 void __test_error(const char *buf)
 {
        pthread_mutex_lock(&ksft_print_lock);
-       ksft_test_result_error(buf);
+       ksft_test_result_error("%s", buf);
        pthread_mutex_unlock(&ksft_print_lock);
 }
 void __test_skip(const char *buf)
 {
        pthread_mutex_lock(&ksft_print_lock);
-       ksft_test_result_skip(buf);
+       ksft_test_result_skip("%s", buf);
        pthread_mutex_unlock(&ksft_print_lock);
 }
 
index 7df8b8700e39e96292f8eafdf105ee0314a65497..a2fe88d35ac06e4f534bd4d452670528d9f77219 100644 (file)
@@ -256,8 +256,6 @@ static int test_wait_fds(int sk[], size_t nr, bool is_writable[],
 
 static void test_client_active_rst(unsigned int port)
 {
-       /* one in queue, another accept()ed */
-       unsigned int wait_for = backlog + 2;
        int i, sk[3], err;
        bool is_writable[ARRAY_SIZE(sk)] = {false};
        unsigned int last = ARRAY_SIZE(sk) - 1;
@@ -275,16 +273,20 @@ static void test_client_active_rst(unsigned int port)
        for (i = 0; i < last; i++) {
                err = _test_connect_socket(sk[i], this_ip_dest, port,
                                               (i == 0) ? TEST_TIMEOUT_SEC : -1);
-
                if (err < 0)
                        test_error("failed to connect()");
        }
 
-       synchronize_threads(); /* 2: connection accept()ed, another queued */
-       err = test_wait_fds(sk, last, is_writable, wait_for, TEST_TIMEOUT_SEC);
+       synchronize_threads(); /* 2: two connections: one accept()ed, another queued */
+       err = test_wait_fds(sk, last, is_writable, last, TEST_TIMEOUT_SEC);
        if (err < 0)
                test_error("test_wait_fds(): %d", err);
 
+       /* async connect() with third sk to get into request_sock_queue */
+       err = _test_connect_socket(sk[last], this_ip_dest, port, -1);
+       if (err < 0)
+               test_error("failed to connect()");
+
        synchronize_threads(); /* 3: close listen socket */
        if (test_client_verify(sk[0], packet_sz, quota / packet_sz, TEST_TIMEOUT_SEC))
                test_fail("Failed to send data on connected socket");
@@ -292,13 +294,14 @@ static void test_client_active_rst(unsigned int port)
                test_ok("Verified established tcp connection");
 
        synchronize_threads(); /* 4: finishing up */
-       err = _test_connect_socket(sk[last], this_ip_dest, port, -1);
-       if (err < 0)
-               test_error("failed to connect()");
 
        synchronize_threads(); /* 5: closed active sk */
-       err = test_wait_fds(sk, ARRAY_SIZE(sk), NULL,
-                           wait_for, TEST_TIMEOUT_SEC);
+       /*
+        * Wait for 2 connections: one accepted, another in the accept queue,
+        * the one in request_sock_queue won't get fully established, so
+        * doesn't receive an active RST, see inet_csk_listen_stop().
+        */
+       err = test_wait_fds(sk, last, NULL, last, TEST_TIMEOUT_SEC);
        if (err < 0)
                test_error("select(): %d", err);
 
index 452de131fa3a9c720cd1fc4b9dc24438fd01d15d..517930f9721bd9b062d178def9fb296c17353119 100644 (file)
@@ -21,7 +21,7 @@ static void make_listen(int sk)
 static void test_vefify_ao_info(int sk, struct tcp_ao_info_opt *info,
                                const char *tst)
 {
-       struct tcp_ao_info_opt tmp;
+       struct tcp_ao_info_opt tmp = {};
        socklen_t len = sizeof(tmp);
 
        if (getsockopt(sk, IPPROTO_TCP, TCP_AO_INFO, &tmp, &len))
index 7badaf215de288fca3a1ad935b829c7425b32552..b02080d09fbc057e2e7e136a1de345e51ae52042 100644 (file)
@@ -34,7 +34,7 @@
 #endif
 
 #ifndef UDP_MAX_SEGMENTS
-#define UDP_MAX_SEGMENTS       (1 << 6UL)
+#define UDP_MAX_SEGMENTS       (1 << 7UL)
 #endif
 
 #define CONST_MTU_TEST 1500
index 505294da1b9fb5e7bd07aac4a119164900c8f2e6..d6f99eb9be659d3c9bb60b922e4f81102ac7f918 100644 (file)
@@ -154,7 +154,7 @@ static int dev_papr_vpd_null_handle(void)
 static int papr_vpd_close_handle_without_reading(void)
 {
        const int devfd = open(DEVPATH, O_RDONLY);
-       struct papr_location_code lc;
+       struct papr_location_code lc = { .str = "", };
        int fd;
 
        SKIP_IF_MSG(devfd < 0 && errno == ENOENT,